doberst commited on
Commit
d83a7d9
·
verified ·
1 Parent(s): 54c2b3e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -30
README.md CHANGED
@@ -69,36 +69,6 @@ The model generation output will be a string in the form of a well-formed python
69
 
70
  except:
71
  print("could not convert to python dictionary automatically - ", output_only)
72
-
73
- ## How to Get Started with the Model
74
-
75
- The fastest way to get started with SLIM is through direct import in transformers:
76
-
77
- import ast
78
- from transformers import AutoModelForCausalLM, AutoTokenizer
79
-
80
- model = AutoModelForCausalLM.from_pretrained("llmware/slim-sentiment")
81
- tokenizer = AutoTokenizer.from_pretrained("llmware/slim-sentiment")
82
-
83
- text = "The markets declined for a second straight days on news of disappointing earnings."
84
-
85
- keys = "sentiment"
86
-
87
- prompt = "<human>: " + text + "\n" + "<classify> " + keys + "</classify>" + "\n<bot>: "
88
-
89
- # huggingface standard generation script
90
- inputs = tokenizer(prompt, return_tensors="pt")
91
- start_of_output = len(inputs.input_ids[0])
92
-
93
- outputs = model.generate(inputs.input_ids.to('cpu'), eos_token_id=tokenizer.eos_token_id,
94
- pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.3, max_new_tokens=100)
95
-
96
- output_only = tokenizer.decode(outputs[0][start_of_output:], skip_special_tokens=True)
97
-
98
- print("input text sample - ", text)
99
- print("llm_response - ", output_only)
100
-
101
- # where it gets interesting
102
 
103
 
104
  ## Using as Function Call in LLMWare
 
69
 
70
  except:
71
  print("could not convert to python dictionary automatically - ", output_only)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
 
74
  ## Using as Function Call in LLMWare