How to use from the
Use from the
Transformers library
# Use a pipeline as a high-level helper
from transformers import pipeline

pipe = pipeline("text-generation", model="llmware/slim-extract-phi-3", trust_remote_code=True)
messages = [
    {"role": "user", "content": "Who are you?"},
]
pipe(messages)
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("llmware/slim-extract-phi-3", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("llmware/slim-extract-phi-3", trust_remote_code=True)
messages = [
    {"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
	messages,
	add_generation_prompt=True,
	tokenize=True,
	return_dict=True,
	return_tensors="pt",
).to(model.device)

outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
Quick Links

SLIM-EXTRACT-PHI-3

slim-extract-phi-3 implements a specialized function-calling customizable 'extract' capability that takes as an input a context passage, a customized key, and outputs a python dictionary with key that corresponds to the customized key, with a value consisting of a list of items extracted from the text corresponding to that key, e.g.,

    {'universities': ['Berkeley, Stanford, Yale, University of Florida, ...'] }

Prompt format:

function = "extract"
params = "{custom key}"
prompt = "<human> " + {text} + "\n" +
                      "<{function}> " + {params} + "</{function}>" + "\n<bot>:"

Transformers Script
model = AutoModelForCausalLM.from_pretrained("llmware/slim-extract-phi-3")
tokenizer = AutoTokenizer.from_pretrained("llmware/slim-extract-phi-3")

function = "extract"
params = "company"

text = "Tesla stock declined yesterday 8% in premarket trading after a poorly-received event in San Francisco yesterday, in which the company indicated a likely shortfall in revenue."  

prompt = "<human>: " + text + "\n" + f"<{function}> {params} </{function}>\n<bot>:"

inputs = tokenizer(prompt, return_tensors="pt")
start_of_input = len(inputs.input_ids[0])

outputs = model.generate(
    inputs.input_ids.to('cpu'),
    eos_token_id=tokenizer.eos_token_id,
    pad_token_id=tokenizer.eos_token_id,
    do_sample=True,
    temperature=0.3,
    max_new_tokens=100
)

output_only = tokenizer.decode(outputs[0][start_of_input:], skip_special_tokens=True)

print("output only: ", output_only)  

# here's the fun part
try:
    output_only = ast.literal_eval(llm_string_output)
    print("success - converted to python dictionary automatically")
except:
    print("fail - could not convert to python dictionary automatically - ", llm_string_output)
Using as Function Call in LLMWare
from llmware.models import ModelCatalog
slim_model = ModelCatalog().load_model("llmware/slim-extract-phi-3")
response = slim_model.function_call(text,params=["company"], function="extract")

print("llmware - llm_response: ", response)

Model Card Contact

Darren Oberst & llmware team

Join us on Discord

Downloads last month
33
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for llmware/slim-extract-phi-3

Quantizations
2 models

Collection including llmware/slim-extract-phi-3