fudii0921 commited on
Commit
3d8bf6d
·
verified ·
1 Parent(s): 60e0c2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -9,6 +9,10 @@ from typing_extensions import Annotated
9
  import random
10
  import groq
11
 
 
 
 
 
12
  conn = mysql.connector.connect(
13
  host="www.ryhintl.com",
14
  user="smairuser",
@@ -20,7 +24,7 @@ conn = mysql.connector.connect(
20
  cursor = conn.cursor(dictionary=True)
21
 
22
  # MySQLに接続
23
- def get_api_keys():
24
  select_one_data_query = "SELECT * FROM agentic_apis_copy where api = 'GROQ_KEYS'"
25
  cursor.execute(select_one_data_query)
26
  result = cursor.fetchall()
@@ -34,11 +38,13 @@ mytokens = get_api_keys()
34
 
35
  tokens = eval("["+mytokens+"]")
36
 
37
- client = groq.Client(api_key=tokens[0])
 
 
38
 
39
  def compare_result(eprag: str, llm: str):
40
  completion = client.chat.completions.create(
41
- model="llama-3.3-70b-versatile",
42
  messages=[
43
  {"role": "system", "content": "貴方は優秀なアシスタントです。必ず、日本語で答えてください。"},
44
  {"role": "user", "content": eprag+"\n"+llm+"\n epragとllmの結果を比較分析してください。必ず、日本語で答えてください。"}
@@ -50,7 +56,7 @@ def compare_result(eprag: str, llm: str):
50
 
51
  def get_llm(prompt: str):
52
  completion = client.chat.completions.create(
53
- model="llama-3.3-70b-versatile",
54
  messages=[
55
  {"role": "system", "content": "貴方は優秀なアシスタントです。必ず、日本語で答えてください。"},
56
  {"role": "user", "content": prompt}
@@ -134,7 +140,7 @@ def generate_blog_article(prompt):
134
  config_list.clear
135
  config_list = [
136
  {"model": "llama-3.3-70b-versatile", "api_key": token, "api_type": "groq"},
137
- {"model": "llama3-70b-8192", "api_key": token, "api_type": "groq"},
138
  ]
139
 
140
  print("config token: ",token)
 
9
  import random
10
  import groq
11
 
12
+ from dotenv import load_dotenv
13
+
14
+ load_dotenv(verbose=True)
15
+
16
  conn = mysql.connector.connect(
17
  host="www.ryhintl.com",
18
  user="smairuser",
 
24
  cursor = conn.cursor(dictionary=True)
25
 
26
  # MySQLに接続
27
+ '''def get_api_keys():
28
  select_one_data_query = "SELECT * FROM agentic_apis_copy where api = 'GROQ_KEYS'"
29
  cursor.execute(select_one_data_query)
30
  result = cursor.fetchall()
 
38
 
39
  tokens = eval("["+mytokens+"]")
40
 
41
+ client = groq.Client(api_key=tokens[0])'''
42
+
43
+ client = groq.Client(api_key=os.environ["GROQ_API_KEY"])
44
 
45
  def compare_result(eprag: str, llm: str):
46
  completion = client.chat.completions.create(
47
+ model="openai/gpt-oss-120b",
48
  messages=[
49
  {"role": "system", "content": "貴方は優秀なアシスタントです。必ず、日本語で答えてください。"},
50
  {"role": "user", "content": eprag+"\n"+llm+"\n epragとllmの結果を比較分析してください。必ず、日本語で答えてください。"}
 
56
 
57
  def get_llm(prompt: str):
58
  completion = client.chat.completions.create(
59
+ model="openai/gpt-oss-120b",
60
  messages=[
61
  {"role": "system", "content": "貴方は優秀なアシスタントです。必ず、日本語で答えてください。"},
62
  {"role": "user", "content": prompt}
 
140
  config_list.clear
141
  config_list = [
142
  {"model": "llama-3.3-70b-versatile", "api_key": token, "api_type": "groq"},
143
+ {"model": "openai/gpt-oss-120b", "api_key": token, "api_type": "groq"},
144
  ]
145
 
146
  print("config token: ",token)