Instructions to use principled-intelligence/claim-extraction with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use principled-intelligence/claim-extraction with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="principled-intelligence/claim-extraction") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("principled-intelligence/claim-extraction") model = AutoModelForCausalLM.from_pretrained("principled-intelligence/claim-extraction") messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use principled-intelligence/claim-extraction with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "principled-intelligence/claim-extraction" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "principled-intelligence/claim-extraction", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/principled-intelligence/claim-extraction
- SGLang
How to use principled-intelligence/claim-extraction with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "principled-intelligence/claim-extraction" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "principled-intelligence/claim-extraction", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "principled-intelligence/claim-extraction" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "principled-intelligence/claim-extraction", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use principled-intelligence/claim-extraction with Docker Model Runner:
docker model run hf.co/principled-intelligence/claim-extraction
| import torch | |
| import transformers | |
| from transformers import Pipeline | |
| try: | |
| import orbitals.claim_extractor | |
| import orbitals.claim_extractor.modeling | |
| import orbitals.claim_extractor.prompting | |
| import orbitals.types | |
| except ModuleNotFoundError: | |
| raise ImportError( | |
| "orbitals.claim_extractor module not found. Please install it: `pip install orbitals`" | |
| ) | |
| class ClaimExtractionPipeline(Pipeline): | |
| def __init__( | |
| self, | |
| model, | |
| tokenizer=None, | |
| skip_evidences: bool = True, | |
| max_new_tokens: int = 20_000, | |
| do_sample: bool = True, | |
| temperature: float = 0.7, | |
| repetition_penalty: float = 1.0, | |
| top_p: float = 0.8, | |
| top_k: int = 20, | |
| min_p: float = 0.0, | |
| **kwargs, | |
| ): | |
| if tokenizer is None and isinstance(model, str): | |
| tokenizer = transformers.AutoTokenizer.from_pretrained(model) | |
| elif isinstance(tokenizer, str): | |
| tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer) | |
| if isinstance(model, str): | |
| model = transformers.AutoModelForCausalLM.from_pretrained( | |
| model, dtype="auto", device_map="auto" | |
| ) | |
| # Set left padding for decoder-only models (required for batched generation) | |
| if tokenizer is not None: | |
| tokenizer.padding_side = "left" | |
| # Ensure pad token is set (use eos_token if pad_token doesn't exist) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| self.skip_evidences = skip_evidences | |
| self.max_new_tokens = max_new_tokens | |
| self.do_sample = do_sample | |
| self.temperature = temperature | |
| self.repetition_penalty = repetition_penalty | |
| self.top_p = top_p | |
| self.top_k = top_k | |
| self.min_p = min_p | |
| super().__init__(model, tokenizer, **kwargs) | |
| def _sanitize_parameters( | |
| self, | |
| **kwargs, | |
| ): | |
| preprocess_kwargs = { | |
| "skip_evidences": kwargs.get("skip_evidences", self.skip_evidences) | |
| } | |
| return ( | |
| preprocess_kwargs, | |
| {}, | |
| {}, | |
| ) | |
| def preprocess( | |
| self, | |
| inputs: tuple[ | |
| orbitals.claim_extractor.modeling.ClaimExtractorInput, | |
| str | orbitals.types.AIServiceDescription | None, | |
| ], | |
| skip_evidences: bool = True, | |
| ): | |
| conversation, ai_service_description = inputs | |
| model_messages = orbitals.claim_extractor.prompting.prepare_messages( | |
| conversation, | |
| ai_service_description, | |
| skip_evidences=skip_evidences, | |
| ) | |
| text = self.tokenizer.apply_chat_template( | |
| model_messages, | |
| tokenize=False, # we are not tokenizing so as to enable batching | |
| add_generation_prompt=True, | |
| enable_thinking=False, | |
| ) | |
| return {"text": text} | |
| def _forward(self, model_inputs): | |
| tokenized = self.tokenizer( | |
| model_inputs["text"], | |
| return_tensors="pt", | |
| padding=True, | |
| truncation=True, | |
| ).to(self.device) | |
| with torch.inference_mode(): | |
| outputs = self.model.generate( | |
| **tokenized, | |
| max_new_tokens=self.max_new_tokens, | |
| do_sample=self.do_sample, | |
| temperature=self.temperature, | |
| repetition_penalty=self.repetition_penalty, | |
| top_p=self.top_p, | |
| top_k=self.top_k, | |
| min_p=self.min_p, | |
| ) | |
| return { | |
| "output_ids": outputs, | |
| "input_ids": tokenized["input_ids"], | |
| } | |
| def postprocess(self, model_outputs): | |
| output_ids = model_outputs["output_ids"] | |
| input_ids = model_outputs["input_ids"] | |
| # Decode each output in the batch | |
| results = [] | |
| for i in range(output_ids.shape[0]): | |
| # Skip the input tokens to get only the generated text | |
| generated_ids = output_ids[i][input_ids.shape[1] :] | |
| generated_output = self.tokenizer.decode( | |
| generated_ids, | |
| skip_special_tokens=True, | |
| ) | |
| results.append({"generated_text": generated_output}) | |
| return results | |