Update app.py
Browse files
app.py
CHANGED
|
@@ -10,4 +10,62 @@ fileobj.close()
|
|
| 10 |
from sentence_transformers import SentenceTransformer, util
|
| 11 |
import torch
|
| 12 |
|
| 13 |
-
embedder = SentenceTransformer("ramdane/jurimodel")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
from sentence_transformers import SentenceTransformer, util
|
| 11 |
import torch
|
| 12 |
|
| 13 |
+
embedder = SentenceTransformer("ramdane/jurimodel")
|
| 14 |
+
import google.generativeai as genai
|
| 15 |
+
genai.configure(api_key="AIzaSyCcxB0xY2C1IGDqxlLRmLBH6AX_wbBORX4")
|
| 16 |
+
|
| 17 |
+
# Set up the model
|
| 18 |
+
generation_config = {
|
| 19 |
+
"temperature": 0,
|
| 20 |
+
"top_p": 1,
|
| 21 |
+
"top_k": 1,
|
| 22 |
+
"max_output_tokens": 2048,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
safety_settings = [
|
| 26 |
+
{
|
| 27 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
| 28 |
+
"threshold": "BLOCK_NONE"
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 32 |
+
"threshold": "BLOCK_NONE"
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 36 |
+
"threshold": "BLOCK_NONE"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 40 |
+
"threshold": "BLOCK_NONE"
|
| 41 |
+
},
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
model = genai.GenerativeModel(model_name="gemini-1.0-pro-001",
|
| 45 |
+
generation_config=generation_config,
|
| 46 |
+
safety_settings=safety_settings)
|
| 47 |
+
def show(queries):
|
| 48 |
+
query_embedding = embedder.encode(queries, convert_to_tensor=True)
|
| 49 |
+
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=10)
|
| 50 |
+
|
| 51 |
+
hits = hits[0]
|
| 52 |
+
history=[]
|
| 53 |
+
|
| 54 |
+
#Get the hits for the first query
|
| 55 |
+
|
| 56 |
+
for i in range(0,10):
|
| 57 |
+
history.append({"role": "user", "parts": [corpus[hits[i]['corpus_id']]]})
|
| 58 |
+
history.append({"role": "model", "parts": ["حسنا"]})
|
| 59 |
+
convo = model.start_chat(history=history)
|
| 60 |
+
convo.send_message(" اجب من خلال ما سبق من اجتهادات على السؤال التالي مع دكر الاجتهاد الدي اعتمدت عليه"+queries)
|
| 61 |
+
|
| 62 |
+
return convo.last.text
|
| 63 |
+
import gradio as gr
|
| 64 |
+
app = gr.Interface(
|
| 65 |
+
fn=show,
|
| 66 |
+
inputs=gr.Textbox(label="إسئل وسيتم الاجابة عن طريق الاجتهادات القضائية"),
|
| 67 |
+
outputs=gr.TextArea(label="استنتاج النموذج"),
|
| 68 |
+
# Prevents caching conversation history
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
app.launch(share=True,debug=True)
|