Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -1,84 +1,103 @@
|
|
| 1 |
from fastapi import FastAPI, UploadFile, File
|
| 2 |
from fastapi.responses import HTMLResponse
|
| 3 |
-
from fastapi
|
| 4 |
-
import
|
| 5 |
-
import io
|
| 6 |
import requests
|
| 7 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
app = FastAPI()
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
@app.post("/upload/")
|
| 25 |
async def upload_file(file: UploadFile = File(...)):
|
| 26 |
contents = await file.read()
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
content = html_file.read()
|
| 79 |
-
return HTMLResponse(content=content)
|
| 80 |
-
except FileNotFoundError:
|
| 81 |
-
return HTMLResponse(content="Result HTML not found")
|
| 82 |
|
| 83 |
|
| 84 |
|
|
|
|
| 1 |
from fastapi import FastAPI, UploadFile, File
|
| 2 |
from fastapi.responses import HTMLResponse
|
| 3 |
+
from fastapi import FastAPI
|
| 4 |
+
import json
|
|
|
|
| 5 |
import requests
|
| 6 |
+
import csv
|
| 7 |
+
from fastapi import FastAPI, UploadFile, File
|
| 8 |
+
from fastapi.responses import HTMLResponse
|
| 9 |
+
from fastapi.responses import HTMLResponse, FileResponse
|
| 10 |
+
# Dictionaries to store URLs, API counts, and associated names
|
| 11 |
+
url_counts = {}
|
| 12 |
+
api_counts = {}
|
| 13 |
+
url_names = {}
|
| 14 |
|
| 15 |
app = FastAPI()
|
| 16 |
|
| 17 |
+
def ask(query):
|
| 18 |
+
headers = {"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiM2NlMTljZDItNTk2Zi00NzRhLWE3OGMtZWZhNTM1YWJlNmY0IiwidHlwZSI6ImFwaV90b2tlbiJ9.x_PPBluoOy2GT3ZjDQ3dcS8WtbNn95RfkoFKkhM_a5A"} # Replace with your API key
|
| 19 |
+
url = "https://api.edenai.run/v2/text/chat"
|
| 20 |
+
|
| 21 |
+
# Create the content dictionary
|
| 22 |
+
content ="The inventory consists of various items, including Milk (50 units in stock, 10 units pending), Bread (100 units in stock, 20 units pending), Eggs (75 dozen in stock, 5 dozen pending), Rice (200 pounds in stock, 15 pounds pending), Chicken (40 pounds in stock, 8 pounds pending), Fresh Vegetables (various in stock, 30 pounds pending), Canned Soup (150 cans in stock, 25 cans pending), Pasta (80 pounds in stock, 10 pounds pending), Toilet Paper (60 rolls in stock, 12 rolls pending), and Shampoo (30 bottles in stock, 5 bottles pending)."
|
| 23 |
+
# Concatenate the input query with the "text" field in the payload
|
| 24 |
+
payload = {
|
| 25 |
+
"providers": "openai",
|
| 26 |
+
"text": "from the summary: ",
|
| 27 |
+
"chatbot_global_action": content + " " + query,
|
| 28 |
+
"previous_history": [],
|
| 29 |
+
"temperature": 0.0,
|
| 30 |
+
"max_tokens": 300
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
response = requests.post(url, json=payload, headers=headers)
|
| 34 |
+
print(response)
|
| 35 |
+
result = json.loads(response.text)
|
| 36 |
+
print(result)
|
| 37 |
+
generated_text = result['openai']['generated_text']
|
| 38 |
+
print(generated_text)
|
| 39 |
+
return generated_text
|
| 40 |
+
|
| 41 |
+
@app.get("/ask")
|
| 42 |
+
async def ask_question(question_text: str):
|
| 43 |
+
# Call the question function with the provided question_text
|
| 44 |
+
response = ask(question_text)
|
| 45 |
+
return {"response": response}
|
| 46 |
|
| 47 |
@app.post("/upload/")
|
| 48 |
async def upload_file(file: UploadFile = File(...)):
|
| 49 |
contents = await file.read()
|
| 50 |
+
with open(contents, 'r', newline='', encoding='utf-8') as csvfile:
|
| 51 |
+
csvreader = csv.reader(csvfile)
|
| 52 |
+
header = next(csvreader) # Skip the header row
|
| 53 |
+
total_records = 0
|
| 54 |
+
for row in csvreader:
|
| 55 |
+
url = row[3] # Assuming the URL is in the 4th column (index 3)
|
| 56 |
+
api = row[4] # Assuming the API is in the 5th column (index 4)
|
| 57 |
+
url_name = row[2] # Assuming the name associated with URLs is in the 3rd column (index 2)
|
| 58 |
+
api_name = row[6] # Assuming the name associated with APIs is in the 7th column (index 6)
|
| 59 |
+
total_records += 1
|
| 60 |
+
|
| 61 |
+
# Count URLs and associated names
|
| 62 |
+
if url in url_counts:
|
| 63 |
+
url_counts[url] += 1
|
| 64 |
+
else:
|
| 65 |
+
url_counts[url] = 1
|
| 66 |
+
url_names[url] = url_name
|
| 67 |
+
|
| 68 |
+
# Count APIs and associated names
|
| 69 |
+
if api in api_counts:
|
| 70 |
+
api_counts[api] += 1
|
| 71 |
+
else:
|
| 72 |
+
api_counts[api] = 1
|
| 73 |
+
api_names[api] = api_name
|
| 74 |
+
|
| 75 |
+
# Find redundant URLs and APIs with counts greater than 1
|
| 76 |
+
redundant_urls = [url for url, count in url_counts.items() if count > 1]
|
| 77 |
+
redundant_apis = [api for api, count in api_counts.items() if count > 1]
|
| 78 |
+
|
| 79 |
+
# Print the results including names
|
| 80 |
+
print("Total GET Records:", total_records)
|
| 81 |
+
|
| 82 |
+
print("\nRedundant GET Requests:")
|
| 83 |
+
for url in redundant_urls:
|
| 84 |
+
print("URL:", url)
|
| 85 |
+
print("Name:", url_names[url])
|
| 86 |
+
print("Count:", url_counts[url])
|
| 87 |
+
print("---")
|
| 88 |
+
|
| 89 |
+
# Calculate the percentage of redundant URLs and APIs
|
| 90 |
+
percentage_redundant_urls = (len(redundant_urls) / total_records) * 100 if total_records > 0 else 0
|
| 91 |
+
|
| 92 |
+
print("\nPercentage of Redundant GET Requests: {:.2f}%".format(percentage_redundant_urls))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
return HTMLResponse(content=percentage_redundant_urls)
|
| 99 |
+
|
| 100 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
|
| 103 |
|