moo100 commited on
Commit
1168d51
·
verified ·
1 Parent(s): 4d93cf4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -10
README.md CHANGED
@@ -48,31 +48,31 @@ import torch
48
  from unsloth import FastLanguageModel
49
  from transformers import AutoTokenizer
50
 
51
- # Load model and tokenizer
52
  model_path = "moo100/DeepSeek-R1-telecom-chatbot"
53
  model, tokenizer = FastLanguageModel.from_pretrained(model_path, max_seq_length=1024, dtype=None)
54
 
55
- # Optimize for fast inference
56
  model = FastLanguageModel.for_inference(model)
57
 
58
- # Move model to GPU if available
59
  device = "cuda" if torch.cuda.is_available() else "cpu"
60
  model.to(device)
61
 
62
- # Define system instruction for guided response
63
  system_instruction = """You are an AI assistant. Answer user questions concisely and factually.
64
  Do NOT role-play as a customer service agent. Only answer the user's query."""
65
 
66
- # Define user input
67
  user_input = "What are the benefits of 5G?"
68
 
69
- # Construct full prompt
70
  full_prompt = f"{system_instruction}\n\nUser: {user_input}\nAssistant:"
71
 
72
- # Tokenize input
73
  inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
74
 
75
- # Generate response
76
  outputs = model.generate(
77
  input_ids=inputs.input_ids,
78
  attention_mask=inputs.attention_mask,
@@ -83,7 +83,7 @@ outputs = model.generate(
83
  eos_token_id=tokenizer.eos_token_id,
84
  )
85
 
86
- # Decode and print response
87
  response = tokenizer.decode(outputs[0][inputs.input_ids.shape[-1]:], skip_special_tokens=True)
88
  print(response.strip())
89
 
@@ -94,7 +94,7 @@ print(response.strip())
94
 
95
  <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
96
 
97
-
98
 
99
  ### Training Procedure
100
 
 
48
  from unsloth import FastLanguageModel
49
  from transformers import AutoTokenizer
50
 
51
+ Load model and tokenizer
52
  model_path = "moo100/DeepSeek-R1-telecom-chatbot"
53
  model, tokenizer = FastLanguageModel.from_pretrained(model_path, max_seq_length=1024, dtype=None)
54
 
55
+ Optimize for fast inference
56
  model = FastLanguageModel.for_inference(model)
57
 
58
+ Move model to GPU if available
59
  device = "cuda" if torch.cuda.is_available() else "cpu"
60
  model.to(device)
61
 
62
+ Define system instruction for guided response
63
  system_instruction = """You are an AI assistant. Answer user questions concisely and factually.
64
  Do NOT role-play as a customer service agent. Only answer the user's query."""
65
 
66
+ Define user input
67
  user_input = "What are the benefits of 5G?"
68
 
69
+ Construct full prompt
70
  full_prompt = f"{system_instruction}\n\nUser: {user_input}\nAssistant:"
71
 
72
+ Tokenize input
73
  inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
74
 
75
+ Generate response
76
  outputs = model.generate(
77
  input_ids=inputs.input_ids,
78
  attention_mask=inputs.attention_mask,
 
83
  eos_token_id=tokenizer.eos_token_id,
84
  )
85
 
86
+ Decode and print response
87
  response = tokenizer.decode(outputs[0][inputs.input_ids.shape[-1]:], skip_special_tokens=True)
88
  print(response.strip())
89
 
 
94
 
95
  <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
96
 
97
+ talkmap/telecom-conversation-corpus
98
 
99
  ### Training Procedure
100