Spaces:
Paused
Paused
| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| # from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM, pipeline | |
| from langchain_huggingface import HuggingFacePipeline | |
| import getpass | |
| from langchain.chains import SimpleChain | |
| from langchain_mistralai import ChatMistralAI | |
| token = getpass.getpass("Token: ") | |
| members = ["Researcher", "Coder"] | |
| system_prompt = ( | |
| "You are a supervisor tasked with managing a conversation between the" | |
| " following workers: {members}. Given the following user request," | |
| " respond with the worker to act next. Each worker will perform a" | |
| " task and respond with their results and status. When finished," | |
| " respond with FINISH." | |
| ) | |
| # Our team supervisor is an LLM node. It just picks the next agent to process | |
| # and decides when the work is completed | |
| options = ["FINISH"] + members | |
| # Using openai function calling can make output parsing easier for us | |
| function_def = { | |
| "name": "route", | |
| "description": "Select the next role.", | |
| "parameters": { | |
| "title": "routeSchema", | |
| "type": "object", | |
| "properties": { | |
| "next": { | |
| "title": "Next", | |
| "anyOf": [ | |
| {"enum": options}, | |
| ], | |
| } | |
| }, | |
| "required": ["next"], | |
| }, | |
| } | |
| prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ("system", system_prompt), | |
| MessagesPlaceholder(variable_name="messages"), | |
| ( | |
| "system", | |
| "Given the conversation above, who should act next?" | |
| " Or should we FINISH? Select one of: {options}", | |
| ), | |
| ] | |
| ).partial(options=str(options), members=", ".join(members)) | |
| path = "mistralai/Mistral-7B-Instruct-v0.3" | |
| model = AutoModelForCausalLM.from_pretrained( | |
| path, | |
| torch_dtype=torch.float16, | |
| low_cpu_mem_usage=True, | |
| trust_remote_code=True, | |
| device_map="auto", | |
| token=token | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(path, token=token) | |
| tokenizer.add_special_tokens({"pad_token": "[PAD]"}) | |
| tokenizer.padding_side = "left" | |
| pipe = pipeline(task='text-generation', model=model, tokenizer=tokenizer, | |
| num_return_sequences=1, | |
| eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, | |
| max_new_tokens=260, temperature=0.7, do_sample=True) | |
| llm = HuggingFacePipeline(pipeline=pipe) | |
| def custom_function(input_text): | |
| # Example function logic | |
| return {"output": "processed " + input_text} | |
| from langchain.tools import Tool | |
| class MyCustomTool(Tool): | |
| def call(self, input_text: str) -> str: | |
| # Custom tool logic here | |
| return f"Processed: {input_text}" | |
| # Initialize the custom tool | |
| my_tool = MyCustomTool() | |
| chain = prompt | llm | [my_tool] | |
| # Define the input text | |
| input_text = "Your input text here" | |
| # Run the chain with the input text | |
| result = chain.invoke(input_text) | |
| print(result) |