| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| try: |
| |
| from transformers import pipeline |
| from transformers import AutoTokenizer |
| |
| model_id = "HuggingFaceTB/SmolLM3-3B" |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| |
| pipe = pipeline("text-generation", model=model_id, tokenizer=tokenizer) |
| |
| messages = [ |
| {"role": "user", "content": "Give me a brief explanation of gravity in simple terms."}, |
| ] |
| pipe(messages) |
| |
| messages = [ |
| {"role": "system", "content": "/no_think"}, |
| {"role": "user", "content": "Give me a brief explanation of gravity in simple terms."}, |
| ] |
| pipe(messages) |
| |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_name = "HuggingFaceTB/SmolLM3-3B" |
| device = "cuda" |
| |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| ).to(device) |
| |
| |
| prompt = "Give me a brief explanation of gravity in simple terms." |
| messages_think = [ |
| {"role": "user", "content": prompt} |
| ] |
| |
| text = tokenizer.apply_chat_template( |
| messages_think, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
| |
| |
| generated_ids = model.generate(**model_inputs, max_new_tokens=32768) |
| |
| |
| output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :] |
| print(tokenizer.decode(output_ids, skip_special_tokens=True)) |
| |
| prompt = "Give me a brief explanation of gravity in simple terms." |
| messages = [ |
| {"role": "system", "content": "/no_think"}, |
| {"role": "user", "content": prompt} |
| ] |
| |
| text = tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| |
| model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
| |
| |
| generated_ids = model.generate(**model_inputs, max_new_tokens=32768) |
| |
| |
| output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :] |
| print(tokenizer.decode(output_ids, skip_special_tokens=True)) |
| |
| tools = [ |
| { |
| "name": "get_weather", |
| "description": "Get the weather in a city", |
| "parameters": {"type": "object", "properties": {"city": {"type": "string", "description": "The city to get the weather for"}}}} |
| ] |
| |
| messages = [ |
| { |
| "role": "user", |
| "content": "Hello! How is the weather today in Copenhagen?" |
| } |
| ] |
| |
| inputs = tokenizer.apply_chat_template( |
| messages, |
| enable_thinking=False, |
| xml_tools=tools, |
| add_generation_prompt=True, |
| tokenize=True, |
| return_tensors="pt" |
| ).to(model.device) |
| |
| outputs = model.generate(inputs) |
| print(tokenizer.decode(outputs[0])) |
| with open('HuggingFaceTB_SmolLM3-3B_0.txt', 'w') as f: |
| f.write('Everything was good in HuggingFaceTB_SmolLM3-3B_0.txt') |
| except Exception as e: |
| with open('HuggingFaceTB_SmolLM3-3B_0.txt', 'w') as f: |
| import traceback |
| traceback.print_exc(file=f) |
| finally: |
| from huggingface_hub import upload_file |
| upload_file( |
| path_or_fileobj='HuggingFaceTB_SmolLM3-3B_0.txt', |
| repo_id='model-metadata/custom_code_execution_files', |
| path_in_repo='HuggingFaceTB_SmolLM3-3B_0.txt', |
| repo_type='dataset', |
| ) |