1-1-3-8 commited on
Commit
2830e08
·
verified ·
1 Parent(s): ca5bc7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -7,8 +7,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessor, L
7
 
8
 
9
  # === Model Configuration ===
10
- MODEL_ID = os.getenv("MODEL_ID", "llm-rna-api-rmit/rna-structure-model")
11
- HF_TOKEN = os.getenv("HF_TOKEN", None)
12
  TOKENIZER = None
13
  MODEL = None
14
  VALID_CHARS = re.compile(r"^[AUCG]+$")
@@ -304,6 +304,7 @@ def _precompute_can_open(seq, min_loop=3, allow_gu=True):
304
 
305
 
306
  # === Balanced Parentheses Processor ===
 
307
  class BalancedParenProcessor(LogitsProcessor):
308
  """Custom logits processor that ensures balanced parentheses during structure generation."""
309
  def __init__(self, lp_id, rp_id, dot_id, total_len, can_open,
@@ -372,7 +373,7 @@ def _top_p_sample(logits, top_p=0.5, temperature=0.4):
372
  idx = torch.multinomial(sorted_probs, 1)
373
  return sorted_idx.gather(-1, idx).squeeze(-1)
374
 
375
-
376
  def _generate_db(seq, top_p=0.5, temperature=0.4, min_loop=3, greedy=False):
377
  if TOKENIZER is None or MODEL is None:
378
  raise RuntimeError("Model not initialized — call init_model() first.")
 
7
 
8
 
9
  # === Model Configuration ===
10
+ MODEL_ID = os.getenv("MODEL_ID", "llm-rna-api-rmit/rna-structure-model") #Model can be changed but it may not work without modification to eval code
11
+ HF_TOKEN = os.getenv("HF_TOKEN", None) #Currently not required as model is public
12
  TOKENIZER = None
13
  MODEL = None
14
  VALID_CHARS = re.compile(r"^[AUCG]+$")
 
304
 
305
 
306
  # === Balanced Parentheses Processor ===
307
+ # dot_bias , paren_penalty and window may be changed in order to modify the models bias's to get it more accurate
308
  class BalancedParenProcessor(LogitsProcessor):
309
  """Custom logits processor that ensures balanced parentheses during structure generation."""
310
  def __init__(self, lp_id, rp_id, dot_id, total_len, can_open,
 
373
  idx = torch.multinomial(sorted_probs, 1)
374
  return sorted_idx.gather(-1, idx).squeeze(-1)
375
 
376
+ # top_p, temperature and min_loop can be changed in order to change model biases
377
  def _generate_db(seq, top_p=0.5, temperature=0.4, min_loop=3, greedy=False):
378
  if TOKENIZER is None or MODEL is None:
379
  raise RuntimeError("Model not initialized — call init_model() first.")