Text Generation
PEFT
Safetensors
GGUF
English
materialsanalyst-ai-7b
MaterialsAnalyst-AI-7B
materials-science
computational-materials
materials-analysis
chain-of-thought
reasoning-model
property-prediction
materials-discovery
crystal-structure
materials-informatics
scientific-ai
7b
quantized
fine-tuned
lora
json-mode
structured-output
materials-engineering
band-gap-prediction
computational-chemistry
materials-characterization
File size: 1,954 Bytes
c67333b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# INSTRUCTIONS: Replace the JSON below with your material's properties
# Common data sources: materialsproject.org, DFT calculations, experimental databases
JSON_INPUT = """
{
"material_id": "mp-8062",
"formula": "SiC",
"elements": [
"Si",
"C"
],
"spacegroup": "P63mc",
"band_gap": 3.26,
"formation_energy_per_atom": -0.73,
"density": 3.21,
"volume": 41.2,
"nsites": 8,
"is_stable": true,
"elastic_modulus": 448,
"bulk_modulus": 220,
"thermal_expansion": 4.2e-06,
"electron_affinity": 4.0,
"ionization_energy": 6.7,
"crystal_system": "Hexagonal",
"magnetic_property": "Non-magnetic",
"thermal_conductivity": 490,
"specific_heat": 0.69,
"is_superconductor": false,
"band_gap_type": "Indirect"
}
"""
def load_model(model_path):
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(
model_path,
trust_remote_code=True
)
return model, tokenizer
def generate_response(model, tokenizer, topic):
topic = topic.strip()
prompt = f"USER: {topic}\nASSISTANT:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=3000,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1,
do_sample=True
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("ASSISTANT:")[-1].strip()
def run():
model_path = "./" # Path to the directory containing your model weight files
model, tokenizer = load_model(model_path)
result = generate_response(model, tokenizer, JSON_INPUT)
print(result)
if __name__ == "__main__":
run()
|