Spaces:
Runtime error
Runtime error
Commit
·
fc0aee9
0
Parent(s):
Duplicate from hra/ChatGPT-Tech-Radar
Browse filesCo-authored-by: hra <hra@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +233 -0
- lastradartext.txt +51 -0
- lasttechradar.png +0 -0
- requirements.txt +10 -0
.gitattributes
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: ChatGPT Tech Radar
|
| 3 |
+
emoji: 📊
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.19.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: hra/ChatGPT-Tech-Radar
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gpt_index import GPTListIndex, SimpleWebPageReader, BeautifulSoupWebReader, GPTSimpleVectorIndex,LLMPredictor
|
| 2 |
+
from IPython.display import Markdown, display
|
| 3 |
+
from langchain.agents import load_tools, Tool, initialize_agent
|
| 4 |
+
from langchain.llms import OpenAI
|
| 5 |
+
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
|
| 6 |
+
from langchain.agents import initialize_agent, Tool
|
| 7 |
+
from langchain import LLMChain
|
| 8 |
+
from langchain import PromptTemplate
|
| 9 |
+
import gradio as gr
|
| 10 |
+
import pandas as pd
|
| 11 |
+
import openai
|
| 12 |
+
from sklearn.manifold import TSNE
|
| 13 |
+
from sklearn.cluster import KMeans
|
| 14 |
+
from openai.embeddings_utils import get_embedding
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import matplotlib.pyplot as plt
|
| 18 |
+
import matplotlib
|
| 19 |
+
import datetime
|
| 20 |
+
from datetime import datetime, date, time, timedelta
|
| 21 |
+
import os
|
| 22 |
+
from PIL import Image
|
| 23 |
+
from PIL import ImageOps
|
| 24 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 25 |
+
from io import BytesIO
|
| 26 |
+
import requests
|
| 27 |
+
|
| 28 |
+
import gcsfs
|
| 29 |
+
fs = gcsfs.GCSFileSystem(project='createinsightsproject',token='anon')
|
| 30 |
+
fs.ls('trends_chrome_extension_bucket')
|
| 31 |
+
print('Started')
|
| 32 |
+
###download both text and image from cloud to display
|
| 33 |
+
with fs.open('trends_chrome_extension_bucket/lastradartext.txt', 'rb') as file:
|
| 34 |
+
data_old = file.read()
|
| 35 |
+
print(data_old)
|
| 36 |
+
value1,value2,value3,value4,value5,value6=str(data_old.decode()).split('SEPERATOR')
|
| 37 |
+
|
| 38 |
+
img_data = requests.get('https://storage.googleapis.com/trends_chrome_extension_bucket/lasttechradar.png').content
|
| 39 |
+
with open('lasttechradar.png', 'wb') as handler:
|
| 40 |
+
handler.write(img_data)
|
| 41 |
+
|
| 42 |
+
def getlastimage():
|
| 43 |
+
#print('Came into getlastimage')
|
| 44 |
+
img_data = requests.get('https://storage.googleapis.com/trends_chrome_extension_bucket/lasttechradar.png').content
|
| 45 |
+
with open('lasttechradar1.png', 'wb') as handler:
|
| 46 |
+
handler.write(img_data)
|
| 47 |
+
|
| 48 |
+
with fs.open('trends_chrome_extension_bucket/lastradartext.txt', 'rb') as file:
|
| 49 |
+
data_old = file.read()
|
| 50 |
+
#print(data_old)
|
| 51 |
+
value1,value2,value3,value4,value5,value6=str(data_old.decode()).split('SEPERATOR')
|
| 52 |
+
return 'lasttechradar1.png',value1.strip(),value2.strip(),value3.strip(),value4.strip(),value5.strip(),value6.strip()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def getstuff(openapikey):
|
| 56 |
+
dateforfilesave=datetime.today().strftime("%d-%m-%Y %I:%M%p")
|
| 57 |
+
print(dateforfilesave)
|
| 58 |
+
os.environ['OPENAI_API_KEY'] = str(openapikey)
|
| 59 |
+
|
| 60 |
+
mainlistofanswers=[]
|
| 61 |
+
for each in ['www.mckinsey.com','www.bcg.com','www.bain.com','www.accenture.com']:
|
| 62 |
+
print(each)
|
| 63 |
+
Input_URL = "https://"+each
|
| 64 |
+
documents = SimpleWebPageReader(html_to_text=True).load_data([Input_URL])
|
| 65 |
+
index = GPTSimpleVectorIndex(documents)
|
| 66 |
+
print('Came here 0')
|
| 67 |
+
#@title # Creating your Langchain Agent
|
| 68 |
+
def querying_db(query: str):
|
| 69 |
+
response = index.query(query)
|
| 70 |
+
return response
|
| 71 |
+
|
| 72 |
+
tools = [
|
| 73 |
+
Tool(
|
| 74 |
+
name = "QueryingDB",
|
| 75 |
+
func=querying_db,
|
| 76 |
+
description="This function takes a query string as input and returns the most relevant answer from the documentation as output"
|
| 77 |
+
)]
|
| 78 |
+
llm = OpenAI(temperature=0,openai_api_key=openapikey)
|
| 79 |
+
print('Came here 1')
|
| 80 |
+
query_string = "what are the top technologies mentioned?"
|
| 81 |
+
|
| 82 |
+
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
|
| 83 |
+
result = agent.run(query_string)
|
| 84 |
+
mainlistofanswers.append(result)
|
| 85 |
+
|
| 86 |
+
print('Came here 2')
|
| 87 |
+
print(mainlistofanswers)
|
| 88 |
+
newlistoftech=[]
|
| 89 |
+
newlistofcompanies=[]
|
| 90 |
+
for i in range(len(mainlistofanswers)):
|
| 91 |
+
each=mainlistofanswers[i]
|
| 92 |
+
each=each.replace("The top technologies mentioned are ","").replace("The technologies mentioned are ","")
|
| 93 |
+
each=each.replace(":","").replace(" and ",",").replace("and ",",").replace(" and",",").replace(" the "," ").replace("the "," ").replace(" the"," ").strip()
|
| 94 |
+
for item in each.split(","):
|
| 95 |
+
if item!='':
|
| 96 |
+
newlistoftech.append(item.strip())
|
| 97 |
+
newlistofcompanies.append(i)
|
| 98 |
+
tech_df=pd.DataFrame()
|
| 99 |
+
tech_df['tech']=newlistoftech
|
| 100 |
+
tech_df['company']=newlistofcompanies
|
| 101 |
+
print(newlistoftech)
|
| 102 |
+
print('Came here 3')
|
| 103 |
+
embedding_model = "text-embedding-ada-002"
|
| 104 |
+
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
|
| 105 |
+
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
|
| 106 |
+
|
| 107 |
+
tech_df["embedding"] = tech_df['tech'].apply(lambda x: get_embedding(x, engine=embedding_model))
|
| 108 |
+
|
| 109 |
+
print('Came here 4')
|
| 110 |
+
|
| 111 |
+
# Load the embeddings
|
| 112 |
+
|
| 113 |
+
# Convert to a list of lists of floats
|
| 114 |
+
matrix = np.array(tech_df['embedding'].to_list())
|
| 115 |
+
perplexityvalue=max(int(len(tech_df['embedding'].to_list()))/2,5) ###original value was a constant of 15
|
| 116 |
+
|
| 117 |
+
# Create a t-SNE model and transform the data
|
| 118 |
+
tsne = TSNE(n_components=2, perplexity=perplexityvalue, random_state=42, init='random', learning_rate=200)
|
| 119 |
+
vis_dims = tsne.fit_transform(matrix)
|
| 120 |
+
|
| 121 |
+
n_clusters = 5
|
| 122 |
+
|
| 123 |
+
kmeans = KMeans(n_clusters=n_clusters, init="k-means++", random_state=42)
|
| 124 |
+
kmeans.fit(matrix)
|
| 125 |
+
labels = kmeans.labels_
|
| 126 |
+
tech_df["Cluster"] = labels
|
| 127 |
+
print('Came here 5')
|
| 128 |
+
colors = ["red", "darkorange", "darkgrey", "blue", "darkgreen"]
|
| 129 |
+
x = [x for x,y in vis_dims]
|
| 130 |
+
y = [y for x,y in vis_dims]
|
| 131 |
+
color_indices = tech_df['Cluster'].values
|
| 132 |
+
|
| 133 |
+
colormap = matplotlib.colors.ListedColormap(colors)
|
| 134 |
+
#plt.scatter(x, y, c=color_indices, cmap=colormap, alpha=0.3,)
|
| 135 |
+
fig, ax = plt.subplots(figsize=(12,8))
|
| 136 |
+
ax.scatter(x, y, c=color_indices, cmap=colormap, alpha=1, s=100)
|
| 137 |
+
|
| 138 |
+
for i, txt in enumerate(tech_df['tech'].tolist()):
|
| 139 |
+
ax.annotate(txt, (x[i], y[i]),fontsize=14)
|
| 140 |
+
|
| 141 |
+
plt.title("Top Technologies as of "+dateforfilesave,fontsize=20)
|
| 142 |
+
plt.axis('off')
|
| 143 |
+
plt.savefig('lasttechradar.png', bbox_inches='tight')
|
| 144 |
+
print('Came here 6')
|
| 145 |
+
response = openai.Completion.create(
|
| 146 |
+
engine="text-davinci-003",
|
| 147 |
+
prompt=f'I will give you top technologies list. Write a paragraph on it.\n\nTechnologies:'+",".join(tech_df['tech'].tolist()),
|
| 148 |
+
temperature=0,
|
| 149 |
+
max_tokens=1024,
|
| 150 |
+
top_p=1,
|
| 151 |
+
frequency_penalty=0,
|
| 152 |
+
presence_penalty=0,
|
| 153 |
+
)
|
| 154 |
+
print(response["choices"][0]["text"].replace("\n", ""))
|
| 155 |
+
desc_tmp=response["choices"][0]["text"].replace("\n", "")
|
| 156 |
+
print('Came here 7')
|
| 157 |
+
# Reading a review which belong to each group.
|
| 158 |
+
rev_per_cluster = 5
|
| 159 |
+
|
| 160 |
+
clusterstextlist=[]
|
| 161 |
+
for i in range(n_clusters):
|
| 162 |
+
print(f"Cluster {i} Theme:", end=" ")
|
| 163 |
+
|
| 164 |
+
reviews = "\n".join(tech_df[tech_df['Cluster'] == i]['tech'].tolist())
|
| 165 |
+
response = openai.Completion.create(
|
| 166 |
+
engine="text-davinci-003",
|
| 167 |
+
prompt=f'What do the following technologies have in common?\n\nCustomer reviews:\n"""\n{reviews}\n"""\n\nTheme:',
|
| 168 |
+
temperature=0,
|
| 169 |
+
max_tokens=64,
|
| 170 |
+
top_p=1,
|
| 171 |
+
frequency_penalty=0,
|
| 172 |
+
presence_penalty=0,
|
| 173 |
+
)
|
| 174 |
+
print(response["choices"][0]["text"].replace("\n", ""))
|
| 175 |
+
|
| 176 |
+
print(reviews)
|
| 177 |
+
clusterstextlist.append("Cluster "+str(i)+"\nTheme:"+response["choices"][0]["text"].replace("\n", "")+'\n'+reviews+'\n'+"-" * 10+'\n\n')
|
| 178 |
+
|
| 179 |
+
textlist=[mainlistofanswers[0],"SEPERATOR",mainlistofanswers[1],"SEPERATOR",mainlistofanswers[2],"SEPERATOR",mainlistofanswers[3],"SEPERATOR",desc_tmp,"SEPERATOR","".join(clusterstextlist)]
|
| 180 |
+
###create file with new info locally & upload to bucket
|
| 181 |
+
with open('lastradartext.txt', 'w') as f:
|
| 182 |
+
for line in textlist:
|
| 183 |
+
f.write(f"{line}\n")
|
| 184 |
+
|
| 185 |
+
with fs.open('trends_chrome_extension_bucket/lastradartext.txt', 'wb') as file:
|
| 186 |
+
for line in textlist:
|
| 187 |
+
file.write(f"{line}\n".encode())
|
| 188 |
+
|
| 189 |
+
print('Came here 8')
|
| 190 |
+
|
| 191 |
+
###read it and put in output
|
| 192 |
+
|
| 193 |
+
with open('lastradartext.txt', 'r') as file:
|
| 194 |
+
data_old = file.read()
|
| 195 |
+
value1,value2,value3,value4,value5,value6=str(data_old).split('SEPERATOR')
|
| 196 |
+
|
| 197 |
+
###upload image to cloud for next run display
|
| 198 |
+
with open('lasttechradar.png','rb') as image_file:
|
| 199 |
+
image_string = image_file.read()
|
| 200 |
+
with fs.open('trends_chrome_extension_bucket/lasttechradar.png', 'wb') as file:
|
| 201 |
+
file.write(image_string)
|
| 202 |
+
|
| 203 |
+
return 'lasttechradar.png',mainlistofanswers[0],mainlistofanswers[1],mainlistofanswers[2],mainlistofanswers[3],desc_tmp,"".join(clusterstextlist)
|
| 204 |
+
|
| 205 |
+
with gr.Blocks() as demo:
|
| 206 |
+
gr.Markdown("<h1><center>ChatGPT Technology Radar</center></h1>")
|
| 207 |
+
gr.Markdown(
|
| 208 |
+
"""What are the top technologies as of now? Let us query top consulting company websites & use ChatGPT to understand. \n\nShowcases ChatGPT integrated with real data. It shows how to get real-time data and marry it with ChatGPT capabilities. This demonstrates 'Chain of Thought' thinking using ChatGPT.\nLangChain & GPT-Index are both used.\n """
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
with gr.Row() as row:
|
| 212 |
+
textboxopenapi = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
|
| 213 |
+
btn = gr.Button("Refresh")
|
| 214 |
+
with gr.Row() as row:
|
| 215 |
+
with gr.Column():
|
| 216 |
+
output_image = gr.components.Image(label="Tech Radar",value='lasttechradar.png')
|
| 217 |
+
with gr.Column():
|
| 218 |
+
outputMck = gr.Textbox(placeholder=value1, lines=1,label='McKinsey View')
|
| 219 |
+
outputBcg = gr.Textbox(placeholder=value2, lines=1,label='BCG View')
|
| 220 |
+
outputBain = gr.Textbox(placeholder=value3, lines=1,label='Bain View')
|
| 221 |
+
outputAcc = gr.Textbox(placeholder=value4, lines=1,label='Accenture View')
|
| 222 |
+
with gr.Row() as row:
|
| 223 |
+
with gr.Column():
|
| 224 |
+
outputdesc = gr.Textbox(placeholder=value5, lines=1,label='Description')
|
| 225 |
+
with gr.Column():
|
| 226 |
+
outputclusters = gr.Textbox(placeholder=value6, lines=1,label='Clusters')
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
btn.click(getstuff, inputs=[textboxopenapi],outputs=[output_image,outputMck,outputBcg,outputBain,outputAcc,outputdesc,outputclusters])
|
| 230 |
+
|
| 231 |
+
demo.load(getlastimage,[],[output_image,outputMck,outputBcg,outputBain,outputAcc,outputdesc,outputclusters])
|
| 232 |
+
|
| 233 |
+
demo.launch(debug=True)
|
lastradartext.txt
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The top technologies mentioned are generative AI, ChatGPT, ERP platforms, and autonomous mobility.
|
| 2 |
+
SEPERATOR
|
| 3 |
+
The top technologies mentioned are Artificial Intelligence, 5G Technology, Web3, and reCaptcha.
|
| 4 |
+
SEPERATOR
|
| 5 |
+
The technologies mentioned are Salesforce integration, automation, data analytics, digital strategy, sustainability and responsibility, voluntary carbon markets, and omnichannel strategy.
|
| 6 |
+
SEPERATOR
|
| 7 |
+
The top technologies mentioned are virtual reality, cloud computing, augmented reality, artificial intelligence, and mainframe technology.
|
| 8 |
+
SEPERATOR
|
| 9 |
+
The world of technology is constantly evolving and advancing, and the list of top technologies is ever-changing. Generative AI, ChatGPT, ERP platforms, autonomous mobility, Artificial Intelligence, 5G Technology, Web3, reCaptcha, Salesforce integration, automation, data analytics, digital strategy, sustainability responsibility, voluntary carbon markets, omnichannel strategy, virtual reality, cloud computing, augmented reality, artificial intelligence, and mainframe technology are all examples of the latest and greatest technologies that are being used in the world today. Generative AI is being used to create new and innovative products, while ChatGPT is being used to create more natural conversations between humans and machines. ERP platforms are being used to streamline business processes, and autonomous mobility is being used to create self-driving cars. 5G Technology is revolutionizing the way we communicate, and Web3 is being used to create a more secure and decentralized internet. reCaptcha is being used to protect websites from malicious bots, and Salesforce integration is being used to create a more unified customer experience. Automation is being used to reduce manual labor, and data analytics is being used to gain insights into customer behavior. Digital strategy is being used to create more effective marketing campaigns, and sustainability responsibility is being used to reduce the environmental impact of businesses. Voluntary carbon markets are being used to incentivize businesses to reduce their carbon emissions, and omnichannel strategy is being used to create a more unified customer experience across multiple channels. Virtual reality, cloud computing, augmented reality, artificial intelligence, and mainframe technology are all being used to create more efficient and powerful systems. All of these technologies are revolutionizing the way we live and work, and they are sure to continue to shape the future of technology.
|
| 10 |
+
SEPERATOR
|
| 11 |
+
Cluster 0
|
| 12 |
+
Theme: All of these technologies are related to online security and user authentication.
|
| 13 |
+
ChatGPT
|
| 14 |
+
Web3
|
| 15 |
+
reCaptcha.
|
| 16 |
+
----------
|
| 17 |
+
|
| 18 |
+
Cluster 1
|
| 19 |
+
Theme: All of the technologies listed are related to digital technology and automation. They all involve the use of computers, software, and/or artificial intelligence to automate processes or create new experiences.
|
| 20 |
+
generative AI
|
| 21 |
+
autonomous mobility.
|
| 22 |
+
Artificial Intelligence
|
| 23 |
+
automation
|
| 24 |
+
virtual reality
|
| 25 |
+
cloud computing
|
| 26 |
+
augmented reality
|
| 27 |
+
artificial intelligence
|
| 28 |
+
mainframe technology.
|
| 29 |
+
----------
|
| 30 |
+
|
| 31 |
+
Cluster 2
|
| 32 |
+
Theme: They are all technologies related to customer relationship management (CRM).
|
| 33 |
+
ERP platforms
|
| 34 |
+
Salesforce integration
|
| 35 |
+
----------
|
| 36 |
+
|
| 37 |
+
Cluster 3
|
| 38 |
+
Theme: The common theme between these technologies is sustainability.
|
| 39 |
+
sustainability responsibility
|
| 40 |
+
voluntary carbon markets
|
| 41 |
+
----------
|
| 42 |
+
|
| 43 |
+
Cluster 4
|
| 44 |
+
Theme: All of the technologies listed are related to digital marketing and customer engagement.
|
| 45 |
+
5G Technology
|
| 46 |
+
data analytics
|
| 47 |
+
digital strategy
|
| 48 |
+
omnichannel strategy.
|
| 49 |
+
----------
|
| 50 |
+
|
| 51 |
+
|
lasttechradar.png
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
gpt_index
|
| 3 |
+
html2text
|
| 4 |
+
langchain
|
| 5 |
+
ipython
|
| 6 |
+
scikit-learn
|
| 7 |
+
plotly
|
| 8 |
+
Werkzeug
|
| 9 |
+
google-cloud-storage
|
| 10 |
+
gcsfs
|