MFF212 commited on
Commit
ba970f2
·
1 Parent(s): cf52350

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +90 -71
main.py CHANGED
@@ -1,84 +1,103 @@
1
  from fastapi import FastAPI, UploadFile, File
2
  from fastapi.responses import HTMLResponse
3
- from fastapi.responses import HTMLResponse, FileResponse
4
- import pandas as pd
5
- import io
6
  import requests
7
- import time
 
 
 
 
 
 
 
8
 
9
  app = FastAPI()
10
 
11
- @app.get("/", response_class=HTMLResponse)
12
- async def analyze_logs():
13
- return """
14
- <html>
15
- <body>
16
- <form action="/upload/" enctype="multipart/form-data" method="post">
17
- <input name="file" type="file">
18
- <input type="submit">
19
- </form>
20
- </body>
21
- </html>
22
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  @app.post("/upload/")
25
  async def upload_file(file: UploadFile = File(...)):
26
  contents = await file.read()
27
- logs_df = pd.read_parquet(io.BytesIO(contents))
28
-
29
- processing_message = "\n\n Processing files...\n\n"
30
- time.sleep(3) # Simulate processing time (3 seconds)
31
-
32
- logs_df['datetime'] = pd.to_datetime(logs_df['datetime'], format='%d/%m/%Y:%H:%M:%S')
33
- logs_df['day'] = logs_df['datetime'].apply(lambda x: x.day)
34
- logs_df['hour'] = logs_df['datetime'].apply(lambda x: x.hour)
35
- logs_df['minute'] = logs_df['datetime'].apply(lambda x: x.minute)
36
-
37
- ip_address_count_df = (
38
- logs_df.groupby(['method', 'client'], as_index=False)
39
- .size()
40
- .rename(columns={'size': 'count'})
41
- .sort_values('count', ascending=False)
42
- )
43
-
44
- ip_address_count_df = ip_address_count_df.assign(
45
- perc=ip_address_count_df['count'].div(ip_address_count_df['count'].sum()),
46
- cum_perc=lambda df: df['perc'].cumsum(),
47
- )
48
-
49
- result = (
50
- "<h1>Redundant IP Requests....</h1>"
51
- "<p>The Total API Requests from the sample logs are : {total_requests}</p>"
52
- "<p>The Redundant API Requests from the sample logs are : {redundant_requests}</p>"
53
- "<p>The percentage of Redundant API Requests from the sample logs is : {redundant_percentage:.2f}%</p>"
54
- "{dataframe_html}"
55
- ).format(
56
- total_requests=logs_df.shape[0],
57
- redundant_requests=ip_address_count_df.shape[0],
58
- redundant_percentage=(ip_address_count_df.shape[0] / logs_df.shape[0]) * 100,
59
- dataframe_html=ip_address_count_df.head(1000)
60
- .style.background_gradient(subset=['count', 'perc', 'cum_perc'], cmap='cividis')
61
- .format({'count': '{:,}', 'perc': '{:.1%}', 'cum_perc': '{:.1%}'})
62
- .to_html(), # Corrected here
63
- )
64
- # Save the HTML content locally
65
- file_location = "result.html"
66
- with open(file_location, "w") as html_file:
67
- html_file.write(result)
68
-
69
- # Print the location of the saved file
70
- print(f"HTML result saved to: {file_location}")
71
-
72
- return HTMLResponse(content=result)
73
-
74
- @app.get("/view-result/", response_class=HTMLResponse)
75
- async def view_result():
76
- try:
77
- with open("result.html", "r") as html_file:
78
- content = html_file.read()
79
- return HTMLResponse(content=content)
80
- except FileNotFoundError:
81
- return HTMLResponse(content="Result HTML not found")
82
 
83
 
84
 
 
1
  from fastapi import FastAPI, UploadFile, File
2
  from fastapi.responses import HTMLResponse
3
+ from fastapi import FastAPI
4
+ import json
 
5
  import requests
6
+ import csv
7
+ from fastapi import FastAPI, UploadFile, File
8
+ from fastapi.responses import HTMLResponse
9
+ from fastapi.responses import HTMLResponse, FileResponse
10
+ # Dictionaries to store URLs, API counts, and associated names
11
+ url_counts = {}
12
+ api_counts = {}
13
+ url_names = {}
14
 
15
  app = FastAPI()
16
 
17
+ def ask(query):
18
+ headers = {"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiM2NlMTljZDItNTk2Zi00NzRhLWE3OGMtZWZhNTM1YWJlNmY0IiwidHlwZSI6ImFwaV90b2tlbiJ9.x_PPBluoOy2GT3ZjDQ3dcS8WtbNn95RfkoFKkhM_a5A"} # Replace with your API key
19
+ url = "https://api.edenai.run/v2/text/chat"
20
+
21
+ # Create the content dictionary
22
+ content ="The inventory consists of various items, including Milk (50 units in stock, 10 units pending), Bread (100 units in stock, 20 units pending), Eggs (75 dozen in stock, 5 dozen pending), Rice (200 pounds in stock, 15 pounds pending), Chicken (40 pounds in stock, 8 pounds pending), Fresh Vegetables (various in stock, 30 pounds pending), Canned Soup (150 cans in stock, 25 cans pending), Pasta (80 pounds in stock, 10 pounds pending), Toilet Paper (60 rolls in stock, 12 rolls pending), and Shampoo (30 bottles in stock, 5 bottles pending)."
23
+ # Concatenate the input query with the "text" field in the payload
24
+ payload = {
25
+ "providers": "openai",
26
+ "text": "from the summary: ",
27
+ "chatbot_global_action": content + " " + query,
28
+ "previous_history": [],
29
+ "temperature": 0.0,
30
+ "max_tokens": 300
31
+ }
32
+
33
+ response = requests.post(url, json=payload, headers=headers)
34
+ print(response)
35
+ result = json.loads(response.text)
36
+ print(result)
37
+ generated_text = result['openai']['generated_text']
38
+ print(generated_text)
39
+ return generated_text
40
+
41
+ @app.get("/ask")
42
+ async def ask_question(question_text: str):
43
+ # Call the question function with the provided question_text
44
+ response = ask(question_text)
45
+ return {"response": response}
46
 
47
  @app.post("/upload/")
48
  async def upload_file(file: UploadFile = File(...)):
49
  contents = await file.read()
50
+ with open(contents, 'r', newline='', encoding='utf-8') as csvfile:
51
+ csvreader = csv.reader(csvfile)
52
+ header = next(csvreader) # Skip the header row
53
+ total_records = 0
54
+ for row in csvreader:
55
+ url = row[3] # Assuming the URL is in the 4th column (index 3)
56
+ api = row[4] # Assuming the API is in the 5th column (index 4)
57
+ url_name = row[2] # Assuming the name associated with URLs is in the 3rd column (index 2)
58
+ api_name = row[6] # Assuming the name associated with APIs is in the 7th column (index 6)
59
+ total_records += 1
60
+
61
+ # Count URLs and associated names
62
+ if url in url_counts:
63
+ url_counts[url] += 1
64
+ else:
65
+ url_counts[url] = 1
66
+ url_names[url] = url_name
67
+
68
+ # Count APIs and associated names
69
+ if api in api_counts:
70
+ api_counts[api] += 1
71
+ else:
72
+ api_counts[api] = 1
73
+ api_names[api] = api_name
74
+
75
+ # Find redundant URLs and APIs with counts greater than 1
76
+ redundant_urls = [url for url, count in url_counts.items() if count > 1]
77
+ redundant_apis = [api for api, count in api_counts.items() if count > 1]
78
+
79
+ # Print the results including names
80
+ print("Total GET Records:", total_records)
81
+
82
+ print("\nRedundant GET Requests:")
83
+ for url in redundant_urls:
84
+ print("URL:", url)
85
+ print("Name:", url_names[url])
86
+ print("Count:", url_counts[url])
87
+ print("---")
88
+
89
+ # Calculate the percentage of redundant URLs and APIs
90
+ percentage_redundant_urls = (len(redundant_urls) / total_records) * 100 if total_records > 0 else 0
91
+
92
+ print("\nPercentage of Redundant GET Requests: {:.2f}%".format(percentage_redundant_urls))
93
+
94
+
95
+
96
+
97
+
98
+ return HTMLResponse(content=percentage_redundant_urls)
99
+
100
+
 
 
 
 
101
 
102
 
103