You need to agree to share your contact information to access this model

This repository is publicly accessible, but you have to accept the conditions to access its files and content.

Log in or Sign Up to review the conditions and access this model content.

Have external files can load directly from hub,must down all the yourself . Can run on mobile or mini PC,it's much more faster than the normal version. Sample code for using this model :

from transformers import AutoTokenizer, pipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer,GenerationConfig,TextIteratorStreamer
from optimum.onnxruntime import ORTModelForCausalLM
from transformers import logging
logging.set_verbosity_error()
import time
model_id="pathto/Orca-Mini-3B-Onnx-Quantized"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = ORTModelForCausalLM.from_pretrained(model_id)
streamer = TextStreamer(tokenizer,skip_prompt=True, skip_special_tokens=True,return_text=True)

onnx_gen = pipeline("text-generation", model=model, tokenizer=tokenizer,streamer=streamer,return_text=True)

while True:
    text = input("\nBrian:")
    print("Bot:\n")
    t0=time.time()
    gen = onnx_gen(text)
    t=time.time()-t0
    text=gen[0]["generated_text"]
    print(t,len(text.split(" "))/t,"words /sec")
    

My sample code for quant modes on Colab :

from optimum.onnxruntime import ORTStableDiffusionXLImg2ImgPipeline , ORTStableDiffusionXLPipeline,ORTModelForCausalLM
from transformers import AutoTokenizer
from optimum.onnxruntime import ORTOptimizer
from optimum.onnxruntime.configuration import OptimizationConfig
from optimum.onnxruntime.configuration import AutoQuantizationConfig
from optimum.onnxruntime import ORTQuantizer
import os,torch
from pathlib import Path
Gbase="/gdrive/MyDrive/onnx/"

model_checkpoint = "gpt2"
save_directory = Gbase+"onnx/gpt2_arm64"

tasks=['TinyLlama/TinyLlama-1.1B-Chat-v0.6',
'pankajmathur/orca_mini_3b',
'Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4',
'CobraMamba/mamba-gpt-3b-v4',
'WizardLM/WizardCoder-3B-V1.0',
'GeneZC/MiniChat-3B']




#TinyLlama/TinyLlama-1.1B-Chat-v0.6
#pankajmathur/orca_mini_3b
#Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4
#CobraMamba/mamba-gpt-3b-v4
#WizardLM/WizardCoder-3B-V1.0
#GeneZC/MiniChat-3B

def quantModel(model_checkpoint=model_checkpoint,save_directory=save_directory):
    tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
    tokenizer.save_pretrained(save_directory)
    ort_model = ORTModelForCausalLM.from_pretrained(model_checkpoint, export=True)

    optimizer = ORTOptimizer.from_pretrained(ort_model)
    optimization_config = OptimizationConfig(optimization_level=3)

    qconfig = AutoQuantizationConfig.arm64(is_static=False, per_channel=False)

    quantizer = ORTQuantizer.from_pretrained(ort_model)

    quantizer.quantize(save_dir=save_directory, quantization_config=qconfig,use_external_data_format=True)

def  doTasks(tasks=tasks,Gbase=Gbase):
    for model_checkpoint in tasks:
        save_directory=os.path.join(Gbase,Path(model_checkpoint).name)
        try:quantModel(model_checkpoint=model_checkpoint,save_directory=save_directory)
        except:
            import traceback
            traceback.print_exc()


doTasks()
Downloads last month
-
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support