Instructions to use Intel/NeuroPrompts with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Intel/NeuroPrompts with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Intel/NeuroPrompts")# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Intel/NeuroPrompts") model = AutoModelForCausalLM.from_pretrained("Intel/NeuroPrompts") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use Intel/NeuroPrompts with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "Intel/NeuroPrompts" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Intel/NeuroPrompts", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker
docker model run hf.co/Intel/NeuroPrompts
- SGLang
How to use Intel/NeuroPrompts with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "Intel/NeuroPrompts" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Intel/NeuroPrompts", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "Intel/NeuroPrompts" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Intel/NeuroPrompts", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }' - Docker Model Runner
How to use Intel/NeuroPrompts with Docker Model Runner:
docker model run hf.co/Intel/NeuroPrompts
Update README.md
Browse files
README.md
CHANGED
|
@@ -13,104 +13,9 @@ NeuroPrompts was accepted to EACL 2024.
|
|
| 13 |
|
| 14 |
## Usage
|
| 15 |
|
| 16 |
-
|
| 17 |
-
pip install torch torchvision gradio==3.39.0 transformers diffusers flair==0.12.2 numpy tqdm webdataset pytorch_lightning datasets openai-clip scipy==1.10.1
|
| 18 |
-
```
|
| 19 |
-
|
| 20 |
-
```python
|
| 21 |
-
import torch
|
| 22 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 23 |
-
import sys
|
| 24 |
-
import os
|
| 25 |
-
# from categories import styles_list, artists_list, formats_list, perspective_list, booster_list, vibe_list
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
# Set environment variables and PyTorch configurations
|
| 29 |
-
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
|
| 30 |
-
torch.backends.cudnn.benchmark = False
|
| 31 |
-
torch.use_deterministic_algorithms(True)
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
# Set the path for the 'neurologic' module
|
| 35 |
-
neurologic_path = os.path.abspath('neurologic/')
|
| 36 |
-
os.environ['NEUROLOGIC_PATH'] = neurologic_path
|
| 37 |
-
sys.path.insert(0,neurologic_path)
|
| 38 |
-
from neurologic_pe import generate_neurologic
|
| 39 |
-
|
| 40 |
-
# Load the pre-trained model and tokenizer
|
| 41 |
-
model_name = "Intel/NeuroPrompts"
|
| 42 |
-
model_type = 'finetuned'
|
| 43 |
-
# model_type = 'ppo'
|
| 44 |
-
rand_seed = 1535471403
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda')
|
| 48 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 49 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
# Set the inference parameters
|
| 53 |
-
length_penalty = 1.0
|
| 54 |
-
max_length = 77
|
| 55 |
-
beam_size = 5
|
| 56 |
-
inference_steps = 25
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
# Initialize the input constraints
|
| 60 |
-
curr_input_artist = None
|
| 61 |
-
curr_input_style = None
|
| 62 |
-
curr_input_format = None
|
| 63 |
-
curr_input_perspective = None
|
| 64 |
-
curr_input_booster = None
|
| 65 |
-
curr_input_vibe = None
|
| 66 |
-
curr_input_negative = ""
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
# Set the plain text input
|
| 70 |
-
plain_text = "A boy and his dog"
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
# Construct the positive and negative constraints
|
| 74 |
-
constraints = []
|
| 75 |
-
for clause in [curr_input_artist, curr_input_style, curr_input_format, curr_input_perspective, curr_input_booster, curr_input_vibe]:
|
| 76 |
-
if clause is not None and len(clause) > 0:
|
| 77 |
-
constraints.append([clause.lower(), clause.title()])
|
| 78 |
-
|
| 79 |
-
print(f"Positive constraints:{constraints}")
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
neg_constraints = []
|
| 83 |
-
neg_inputs = [i.strip() for i in curr_input_negative.split(',')]
|
| 84 |
-
for clause in neg_inputs:
|
| 85 |
-
if clause is not None and len(clause) > 0:
|
| 86 |
-
neg_constraints += [clause.lower(), clause.title()]
|
| 87 |
-
|
| 88 |
-
print(f"Negative constraints:{neg_constraints}")
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
# Generate the output using the 'generate_neurologic' function
|
| 93 |
-
res = generate_neurologic(plain_text,
|
| 94 |
-
model=model,
|
| 95 |
-
tokenizer=tokenizer,
|
| 96 |
-
model_type=model_type,
|
| 97 |
-
constraint_method='clusters',
|
| 98 |
-
clusters_file='/home/philliph/mcai/mm-counterfactuals/prompt_engineering/template_keywords.json',
|
| 99 |
-
user_constraints = constraints if len(constraints) > 0 else None,
|
| 100 |
-
negative_constraints = neg_constraints if len(neg_constraints) > 0 else None,
|
| 101 |
-
length_penalty=float(length_penalty),
|
| 102 |
-
max_tgt_length=int(max_length),
|
| 103 |
-
beam_size=int(beam_size),
|
| 104 |
-
num_return_sequences=int(beam_size),
|
| 105 |
-
ngram_size=2,
|
| 106 |
-
n_per_cluster=1,
|
| 107 |
-
seed=None)[0][0]
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
# Print the result
|
| 112 |
-
print(f"\nResult:\n{res}")
|
| 113 |
|
|
|
|
| 114 |
|
| 115 |
|
| 116 |
|
|
|
|
| 13 |
|
| 14 |
## Usage
|
| 15 |
|
| 16 |
+
This model is inteded to be used within the NeuroPrompts application.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
Please see our GitHub repository for instructions on how to run NeuroPrompts: https://github.com/IntelLabs/multimodal_cognitive_ai/tree/main/Demos/NeuroPrompts
|
| 19 |
|
| 20 |
|
| 21 |
|