Mr-Help commited on
Commit
f4cb00a
·
verified ·
1 Parent(s): b2b2efe

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +131 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+ from dotenv import load_dotenv, dotenv_values
4
+ from fastapi import FastAPI, Request
5
+ from pydantic import BaseModel
6
+ from fastapi.responses import RedirectResponse, HTMLResponse
7
+ from fastapi.responses import JSONResponse
8
+ import urllib.parse
9
+ import requests
10
+
11
+ # Load environment variables (assuming your API key is stored in a `.env` file)
12
+ load_dotenv()
13
+ api_key = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
14
+
15
+ # OpenAI API configuration (specific to Meta-Llama-3-8B)
16
+ model_link = "meta-llama/Meta-Llama-3-8B-Instruct"
17
+ base_url = "https://router.huggingface.co/v1"
18
+
19
+ app = FastAPI()
20
+
21
+ class Message(BaseModel):
22
+ message: str
23
+
24
+ @app.get("/", response_class=HTMLResponse)
25
+ async def read_root():
26
+ return """Welcome to Up to 12 Chat Processor"""
27
+
28
+ @app.post("/processtext")
29
+ async def receive_updates(request: Request):
30
+ data = await request.json()
31
+ print("Received Update:", data)
32
+
33
+ result = process_text(data.get("message", ""))
34
+ print("Assistant:", result)
35
+
36
+ # ضمان إن الرجوع JSON دايمًا
37
+ return JSONResponse(content=result)
38
+
39
+
40
+ # def process_text(user_text):
41
+ # """
42
+ # Processes user text using the Meta-Llama-3-8B model and returns the response.
43
+ # Args:
44
+ # user_text: The text entered by the user.
45
+ # Returns:
46
+ # The response generated by the model.
47
+ # """
48
+
49
+ # # Initialize OpenAI client
50
+ # client = openai.OpenAI(api_key=api_key, base_url=base_url)
51
+
52
+ # try:
53
+ # # Generate response using OpenAI chat completion API
54
+ # response = client.chat.completions.create(
55
+ # model=model_link,
56
+ # messages=[{"role": "user", "content": user_text}],
57
+ # max_tokens=3000,
58
+ # temperature=0.5, # Adjust temperature for desired response randomness
59
+ # stream=False # Disable streaming for function use
60
+ # )
61
+
62
+ # # Handle potential changes in response format
63
+ # if isinstance(response.choices, list):
64
+ # # Access response text if choices is a list (likely scenario)
65
+ # return response.choices[0].message.content.strip()
66
+ # else:
67
+ # # Handle potential alternative response format (less likely)
68
+ # return response.content.strip() if hasattr(response, 'content') else "An error occurred."
69
+
70
+ # except Exception as e:
71
+ # print(f"Error occurred: {e}")
72
+ # return "An error occurred while processing your request."
73
+
74
+ def process_text(user_text: str):
75
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
76
+
77
+ # 1) تصحيح النص
78
+ try:
79
+ system_prompt = f"""
80
+ You are a helpful English grammar corrector for A1/A2 CEFR students.
81
+ Correct grammar, spelling, and literal translation mistakes.
82
+ If the input is only a list of words (not sentences), only correct spelling.
83
+ Return ONLY the revised text (no explanations).
84
+ """
85
+
86
+ resp = client.chat.completions.create(
87
+ model=model_link,
88
+ messages=[
89
+ {"role": "system", "content": system_prompt},
90
+ {"role": "user", "content": user_text},
91
+ ],
92
+ max_tokens=800,
93
+ temperature=0.1,
94
+ stream=False,
95
+ )
96
+
97
+ corrected_text = resp.choices[0].message.content.strip()
98
+ print("After correction:", corrected_text)
99
+ print("--------------------------------------")
100
+
101
+ except Exception as e:
102
+ # مهم جدًا: رجّع نص error مش object
103
+ return {"ok": False, "when": "During correcting sentences", "error": str(e)}
104
+
105
+ # 2) استخراج الأخطاء (diff)
106
+ try:
107
+ mistakes_prompt = f"""
108
+ Compare the user text and the corrected text, and list EVERY correction made.
109
+ Return ONLY a list of mistakes/corrections (no revised text).
110
+
111
+ User text: "{user_text}"
112
+ Corrected text: "{corrected_text}"
113
+ """
114
+
115
+ resp2 = client.chat.completions.create(
116
+ model=model_link,
117
+ messages=[
118
+ {"role": "system", "content": "You are a grammar checker bot."},
119
+ {"role": "user", "content": mistakes_prompt},
120
+ ],
121
+ max_tokens=800,
122
+ temperature=0,
123
+ stream=False,
124
+ )
125
+
126
+ mistakes = resp2.choices[0].message.content.strip()
127
+
128
+ return {"ok": True, "Corrected_text": corrected_text, "Mistakes": mistakes}
129
+
130
+ except Exception as e:
131
+ return {"ok": False, "when": "During checking mistakes", "error": str(e), "Corrected_text": corrected_text}
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ uvicorn
2
+ requests
3
+ fastapi
4
+ numpy<2
5
+ python-multipart
6
+ openai
7
+ langchain
8
+ python-dotenv
9
+ langchain-community