riteshkokam commited on
Commit
c3a27c6
·
verified ·
1 Parent(s): c6dc0bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -16
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import os
3
  from google import genai
 
4
  from PyPDF2 import PdfReader
5
  from docx import Document
6
 
@@ -99,31 +100,17 @@ def extract_text_from_docx(docx_file):
99
  text += para.text + "\n"
100
  return text
101
 
102
- def generate_response(message: str, system_prompt: str, temperature: float, max_tokens: int):
103
- # conversation = [
104
- # {"role": "system", "content": system_prompt},
105
- # {"role": "user", "content": message}
106
- # ]
107
-
108
- # response = client.chat.completions.create(
109
- # model="llama-3.1-8B-Instant",
110
- # messages=conversation,
111
- # temperature=temperature,
112
- # max_tokens=max_tokens,
113
- # stream=False
114
- # )
115
-
116
  response = client.models.generate_content(
117
  model="gemini-2.5-flash",
118
  contents=message,
119
- config=GenerateContentConfig(
120
  system_instruction=system_prompt,
121
  temperature=temperature,
122
  max_output_tokens=max_tokens
123
  )
124
  )
125
 
126
- # return response.choices[0].message.content
127
  return response.text
128
 
129
 
 
1
  import gradio as gr
2
  import os
3
  from google import genai
4
+ from google.genai import types
5
  from PyPDF2 import PdfReader
6
  from docx import Document
7
 
 
100
  text += para.text + "\n"
101
  return text
102
 
103
+ def generate_response(message: str, system_prompt: str, temperature: float, max_tokens: int):
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  response = client.models.generate_content(
105
  model="gemini-2.5-flash",
106
  contents=message,
107
+ config=types.GenerateContentConfig(
108
  system_instruction=system_prompt,
109
  temperature=temperature,
110
  max_output_tokens=max_tokens
111
  )
112
  )
113
 
 
114
  return response.text
115
 
116