yonkoyonks commited on
Commit
da87d84
·
verified ·
1 Parent(s): 93d9888

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +9 -8
utils.py CHANGED
@@ -1,10 +1,7 @@
1
  from huggingface_hub import InferenceClient
2
- from dotenv import load_dotenv
3
  import os
4
  import pandas as pd
5
 
6
- load_dotenv()
7
-
8
  def summarize_dataframe(df: pd.DataFrame, max_rows: int = 30) -> str:
9
  summary = f"Columns: {', '.join(df.columns)}\n\n"
10
  if len(df) > max_rows:
@@ -42,13 +39,17 @@ Question:
42
  Answer (with explanation):
43
  """
44
 
45
- # ----------------- Option 2 starts here -----------------
46
  client = InferenceClient(token=os.environ.get("HUGGINGFACE_API_KEY"))
47
- # Use a model that supports text-generation (TGI-compatible)
48
- response = client.text_generation(prompt, model="mistralai/Mistral-7B-Instruct-v0.3")
49
 
50
- # Extract the text
 
 
 
 
 
 
 
51
  answer = response[0]["generated_text"] if isinstance(response, list) and "generated_text" in response[0] else str(response)
52
- # ----------------- Option 2 ends here -----------------
53
 
54
  return answer
 
1
  from huggingface_hub import InferenceClient
 
2
  import os
3
  import pandas as pd
4
 
 
 
5
  def summarize_dataframe(df: pd.DataFrame, max_rows: int = 30) -> str:
6
  summary = f"Columns: {', '.join(df.columns)}\n\n"
7
  if len(df) > max_rows:
 
39
  Answer (with explanation):
40
  """
41
 
42
+ # Initialize the InferenceClient
43
  client = InferenceClient(token=os.environ.get("HUGGINGFACE_API_KEY"))
 
 
44
 
45
+ # Use the text_generation method with the correct parameters
46
+ response = client.text_generation(
47
+ model="google/gemma-2b-it",
48
+ inputs=prompt,
49
+ parameters={"max_new_tokens": 1024, "temperature": 0.7}
50
+ )
51
+
52
+ # Extract the generated text from the response
53
  answer = response[0]["generated_text"] if isinstance(response, list) and "generated_text" in response[0] else str(response)
 
54
 
55
  return answer