usage model :

    from unsloth import FastLanguageModel
    max_seq_length = 2048
    dtype = None 
    load_in_4bit = True
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name = "ai-nightcoder/llama3-uzb", # YOUR MODEL YOU USED FOR TRAINING
        max_seq_length = max_seq_length,
        dtype = dtype,
        load_in_4bit = load_in_4bit,
    )
    FastLanguageModel.for_inference(model) # Enable native 2x faster inference
    
    # alpaca_prompt = You MUST copy from above!
    alpaca_prompt = """Quyida vazifani tavsiflovchi ko'rsatma mavjud bo'lib, u qo'shimcha kontekstni ta'minlaydigan kiritish bilan bog'langan. So'rovni to'g'ri to'ldiradigan javob yozing.
    
    ### Instruction:
    {}
    
    ### Input:
    {}
    
    ### Response:
    {}"""
    
    inputs = tokenizer(
    [
        alpaca_prompt.format(
            "Tropik iqlimli uchta davlatni ayting.", # instruction
            " O'zbekiston, AQSH , Yaponiya", # input
            "", # output - leave this blank for generation!
        )
    ], return_tensors = "pt").to("cuda")
    
    outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
    res = tokenizer.batch_decode(outputs)
    
    print(res)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support