MichaelP719 commited on
Commit
6827d3c
·
verified ·
1 Parent(s): f9627e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -58,12 +58,12 @@ ddg = DuckDuckGoSearchTool()
58
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
59
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
60
 
61
- model = HfApiModel(
62
- max_tokens=2096,
63
- temperature=0.5,
64
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
65
- custom_role_conversions=None,
66
- )
67
 
68
  # model = LiteLLMModel(
69
  # model_id="ollama_chat/qwen2.5:7b", # Ollama model identifier
@@ -72,13 +72,13 @@ custom_role_conversions=None,
72
  # temperature=0.5
73
  # )
74
 
75
- # model = TransformersModel(
76
- # model_id="TheBloke/vicuna-7B-1.1-HF", # or any other Hugging Face repo
77
- # device="cuda", # or "cpu"
78
- # max_new_tokens=1024, # how many tokens to generate
79
- # temperature=0.5 # same as before
80
- # # quantize="4bit" # optional if supported
81
- # )
82
 
83
  # Import tool from Hub
84
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
58
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
59
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
60
 
61
+ # model = HfApiModel(
62
+ # max_tokens=2096,
63
+ # temperature=0.5,
64
+ # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
65
+ # custom_role_conversions=None,
66
+ # )
67
 
68
  # model = LiteLLMModel(
69
  # model_id="ollama_chat/qwen2.5:7b", # Ollama model identifier
 
72
  # temperature=0.5
73
  # )
74
 
75
+ model = TransformersModel(
76
+ model_id="TheBloke/vicuna-7B-1.1-HF", # or any other Hugging Face repo
77
+ device="cuda", # or "cpu"
78
+ max_new_tokens=1024, # how many tokens to generate
79
+ temperature=0.5 # same as before
80
+ # quantize="4bit" # optional if supported
81
+ )
82
 
83
  # Import tool from Hub
84
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)