code stringlengths 17 6.64M |
|---|
def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn(f"Initializing network using module's reset_parameters attribute")
if hasattr(module, 'reset_parameters'):
module.reset_parameters()
|
def fused_init_helper_(module: nn.Module, init_fn_):
_fused = getattr(module, '_fused', None)
if (_fused is None):
raise RuntimeError(f'Internal logic error')
(dim, splits) = _fused
splits = (0, *splits, module.weight.size(dim))
for (s, e) in zip(splits[:(- 1)], splits[1:]):
slice_indices = ([slice(None)] * module.weight.ndim)
slice_indices[dim] = slice(s, e)
init_fn_(module.weight[slice_indices])
|
def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn(f'If model has bias parameters they are initialized to 0.')
init_div_is_residual = init_div_is_residual
if (init_div_is_residual is False):
div_is_residual = 1.0
elif (init_div_is_residual is True):
div_is_residual = math.sqrt((2 * n_layers))
elif (isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int)):
div_is_residual = init_div_is_residual
elif (isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric()):
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')
if (init_div_is_residual is not False):
if (verbose > 1):
warnings.warn((f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.'))
if isinstance(module, nn.Linear):
if hasattr(module, '_fused'):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if (module.bias is not None):
torch.nn.init.zeros_(module.bias)
if ((init_div_is_residual is not False) and getattr(module, '_is_residual', False)):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if (emb_init_std is not None):
std = emb_init_std
if (std == 0):
warnings.warn(f'Embedding layer initialized to 0.')
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if (verbose > 1):
warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')
elif (emb_init_uniform_lim is not None):
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if (len(lim) > 2):
raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')
if (lim[0] == lim[1]):
warnings.warn(f'Embedding layer initialized to {lim[0]}.')
else:
if (lim == 0):
warnings.warn(f'Embedding layer initialized to 0.')
lim = [(- lim), lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if (verbose > 1):
warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if (verbose > 1):
warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')
if (hasattr(module, 'weight') and (module.weight is not None)):
torch.nn.init.ones_(module.weight)
if (hasattr(module, 'bias') and (module.bias is not None)):
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert (module.in_proj_weight is not None)
assert ((module.q_proj_weight is None) and (module.k_proj_weight is None) and (module.v_proj_weight is None))
assert (d_model is not None)
_d = d_model
splits = (0, _d, (2 * _d), (3 * _d))
for (s, e) in zip(splits[:(- 1)], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert ((module.q_proj_weight is not None) and (module.k_proj_weight is not None) and (module.v_proj_weight is not None))
assert (module.in_proj_weight is None)
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if (module.in_proj_bias is not None):
torch.nn.init.zeros_(module.in_proj_bias)
if (module.bias_k is not None):
torch.nn.init.zeros_(module.bias_k)
if (module.bias_v is not None):
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if ((init_div_is_residual is not False) and getattr(module.out_proj, '_is_residual', False)):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if (module.out_proj.bias is not None):
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')
|
def _normal_init_(std, mean=0.0):
return partial(torch.nn.init.normal_, mean=mean, std=std)
|
def _normal_param_init_fn_(module: nn.Module, std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs):
del kwargs
init_fn_ = _normal_init_(std=std)
if (verbose > 1):
warnings.warn(f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}')
generic_param_init_fn_(module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def baseline_param_init_fn_(module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs):
del kwargs
if (init_std is None):
raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.")
_normal_param_init_fn_(module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def small_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs):
del kwargs
std = math.sqrt((2 / (5 * d_model)))
_normal_param_init_fn_(module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def neox_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs):
'From section 2.3.1 of GPT-NeoX-20B:\n\n An Open-Source AutoregressiveLanguage Model β Black et. al. (2022)\n see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151\n and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py\n '
del kwargs
residual_div = (n_layers / math.sqrt(10))
if (verbose > 1):
warnings.warn(f'setting init_div_is_residual to {residual_div}')
small_param_init_fn_(module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def kaiming_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn((f'Using nn.init.kaiming_uniform_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}'))
kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
generic_param_init_fn_(module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs):
del kwargs
if (verbose > 1):
warnings.warn((f'Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}'))
kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity)
generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs):
del kwargs
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
if (verbose > 1):
warnings.warn((f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}'))
generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs):
xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
if (verbose > 1):
warnings.warn((f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' + f'gain={init_gain}'))
generic_param_init_fn_(module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
|
@torch.inference_mode()
def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2):
'Adapted from fastchat/serve/model_worker.py::generate_stream'
prompt = params['prompt']
l_prompt = len(prompt)
temperature = float(params.get('temperature', 1.0))
max_new_tokens = int(params.get('max_new_tokens', 256))
stop_str = params.get('stop', None)
input_ids = tokenizer(prompt).input_ids
output_ids = list(input_ids)
max_src_len = ((context_len - max_new_tokens) - 8)
input_ids = input_ids[(- max_src_len):]
for i in range(max_new_tokens):
if (i == 0):
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
attention_mask = torch.ones(1, (past_key_values[0][0].shape[(- 2)] + 1), device=device)
out = model(input_ids=torch.as_tensor([[token]], device=device), use_cache=True, attention_mask=attention_mask, past_key_values=past_key_values)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][(- 1)]
if (temperature < 0.0001):
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax((last_token_logits / temperature), dim=(- 1))
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
if (token == tokenizer.eos_token_id):
stopped = True
else:
stopped = False
if (((i % stream_interval) == 0) or (i == (max_new_tokens - 1)) or stopped):
output = tokenizer.decode(output_ids, skip_special_tokens=True)
pos = output.rfind(stop_str, l_prompt)
if (pos != (- 1)):
output = output[:pos]
stopped = True
(yield output)
if stopped:
break
del past_key_values
|
def main(args):
model_name = args.model_name
num_gpus = args.num_gpus
if (args.device == 'cuda'):
kwargs = {'torch_dtype': torch.float16}
if (num_gpus == 'auto'):
kwargs['device_map'] = 'auto'
else:
num_gpus = int(num_gpus)
if (num_gpus != 1):
kwargs.update({'device_map': 'auto', 'max_memory': {i: '13GiB' for i in range(num_gpus)}})
elif (args.device == 'cpu'):
kwargs = {}
else:
raise ValueError(f'Invalid device: {args.device}')
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, **kwargs)
if ((args.device == 'cuda') and (num_gpus == 1)):
model.cuda()
conv = conv_templates[args.conv_template].copy()
while True:
try:
inp = input(f'{conv.roles[0]}: ')
except EOFError:
inp = ''
if (not inp):
print('exit...')
break
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
params = {'model': model_name, 'prompt': prompt, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'stop': (conv.sep if (conv.sep_style == SeparatorStyle.SINGLE) else conv.sep2)}
print(f'{conv.roles[1]}: ', end='', flush=True)
pre = 0
for outputs in generate_stream(tokenizer, model, params, args.device):
outputs = outputs[(len(prompt) + 1):].strip()
outputs = outputs.split(' ')
now = len(outputs)
if ((now - 1) > pre):
print(' '.join(outputs[pre:(now - 1)]), end=' ', flush=True)
pre = (now - 1)
print(' '.join(outputs[pre:]), flush=True)
conv.messages[(- 1)][(- 1)] = ' '.join(outputs)
if args.debug:
print('\n', {'prompt': prompt, 'outputs': outputs}, '\n')
|
class DispatchMethod(Enum):
LOTTERY = auto()
SHORTEST_QUEUE = auto()
@classmethod
def from_str(cls, name):
if (name == 'lottery'):
return cls.LOTTERY
elif (name == 'shortest_queue'):
return cls.SHORTEST_QUEUE
else:
raise ValueError(f'Invalid dispatch method')
|
@dataclasses.dataclass
class WorkerInfo():
model_names: List[str]
speed: int
queue_length: int
check_heart_beat: bool
last_heart_beat: str
|
def heart_beat_controller(controller):
while True:
time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
controller.remove_stable_workers_by_expiration()
|
class Controller():
def __init__(self, dispatch_method: str):
self.worker_info = {}
self.dispatch_method = DispatchMethod.from_str(dispatch_method)
self.heart_beat_thread = threading.Thread(target=heart_beat_controller, args=(self,))
self.heart_beat_thread.start()
logger.info('Init controller')
def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict):
if (worker_name not in self.worker_info):
logger.info(f'Register a new worker: {worker_name}')
else:
logger.info(f'Register an existing worker: {worker_name}')
if (not worker_status):
worker_status = self.get_worker_status(worker_name)
if (not worker_status):
return False
self.worker_info[worker_name] = WorkerInfo(worker_status['model_names'], worker_status['speed'], worker_status['queue_length'], check_heart_beat, time.time())
logger.info(f'Register done: {worker_name}, {worker_status}')
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post((worker_name + '/worker_get_status'), timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f'Get status fails: {worker_name}, {e}')
return None
if (r.status_code != 200):
logger.error(f'Get status fails: {worker_name}, {r}')
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for (w_name, w_info) in old_info.items():
if (not self.register_worker(w_name, w_info.check_heart_beat, None)):
logger.info(f'Remove stale worker: {w_name}')
def list_models(self):
model_names = set()
for (w_name, w_info) in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if (self.dispatch_method == DispatchMethod.LOTTERY):
worker_names = []
worker_speeds = []
for (w_name, w_info) in self.worker_info.items():
if (model_name in w_info.model_names):
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if (norm < 0.0001):
return ''
worker_speeds = (worker_speeds / norm)
if True:
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
while True:
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if (norm < 0.0001):
return ''
worker_speeds = (worker_speeds / norm)
continue
return worker_name
elif (self.dispatch_method == DispatchMethod.SHORTEST_QUEUE):
worker_names = []
worker_qlen = []
for (w_name, w_info) in self.worker_info.items():
if (model_name in w_info.model_names):
worker_names.append(w_name)
worker_qlen.append((w_info.queue_length / w_info.speed))
if (len(worker_names) == 0):
return ''
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(f'names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}')
return w_name
else:
raise ValueError(f'Invalid dispatch method: {self.dispatch_method}')
def receive_heart_beat(self, worker_name: str, queue_length: int):
if (worker_name not in self.worker_info):
logger.info(f'Receive unknown heart beat. {worker_name}')
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f'Receive heart beat. {worker_name}')
return True
def remove_stable_workers_by_expiration(self):
expire = (time.time() - CONTROLLER_HEART_BEAT_EXPIRATION)
to_delete = []
for (worker_name, w_info) in self.worker_info.items():
if (w_info.check_heart_beat and (w_info.last_heart_beat < expire)):
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
worker_addr = self.get_worker_address(params['model'])
if (not worker_addr):
logger.info(f"no worker: {params['model']}")
ret = {'text': server_error_msg, 'error_code': 2}
(yield (json.dumps(ret).encode() + b'\x00'))
try:
response = requests.post((worker_addr + '/worker_generate_stream'), json=params, stream=True, timeout=5)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b'\x00'):
if chunk:
(yield (chunk + b'\x00'))
except requests.exceptions.RequestException as e:
logger.info(f'worker timeout: {worker_addr}')
ret = {'text': server_error_msg, 'error_code': 3}
(yield (json.dumps(ret).encode() + b'\x00'))
def worker_api_get_status(self):
model_names = set()
speed = 0
queue_length = 0
for w_name in self.worker_info:
worker_status = self.get_worker_status(w_name)
if (worker_status is not None):
model_names.update(worker_status['model_names'])
speed += worker_status['speed']
queue_length += worker_status['queue_length']
return {'model_names': list(model_names), 'speed': speed, 'queue_length': queue_length}
|
class _Keywords(Enum):
NO_VALUE = 'NO_VALUE'
FINISHED_ITERATING = 'FINISHED_ITERATING'
|
@document('style')
class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
'\n Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.\n Preprocessing: this component does *not* accept input.\n Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.\n\n Demos: chatbot_simple, chatbot_multimodal\n '
def __init__(self, value: ((List[Tuple[((str | None), (str | None))]] | Callable) | None)=None, color_map: (Dict[(str, str)] | None)=None, *, label: (str | None)=None, every: (float | None)=None, show_label: bool=True, visible: bool=True, elem_id: (str | None)=None, elem_classes: ((List[str] | str) | None)=None, **kwargs):
"\n Parameters:\n value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.\n label: component name in interface.\n every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.\n show_label: if True, will display label.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n "
if (color_map is not None):
warnings.warn("The 'color_map' parameter has been deprecated.")
self.md = Markdown(extras=['fenced-code-blocks', 'tables', 'break-on-newline'])
self.select: EventListenerMethod
'\n Event listener for when the user selects message from Chatbot.\n Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.\n See EventData documentation on how to use this event data.\n '
IOComponent.__init__(self, label=label, every=every, show_label=show_label, visible=visible, elem_id=elem_id, elem_classes=elem_classes, value=value, **kwargs)
def get_config(self):
return {'value': self.value, 'selectable': self.selectable, **IOComponent.get_config(self)}
@staticmethod
def update(value: ((Any | Literal[_Keywords.NO_VALUE]) | None)=_Keywords.NO_VALUE, label: (str | None)=None, show_label: (bool | None)=None, visible: (bool | None)=None):
updated_config = {'label': label, 'show_label': show_label, 'visible': visible, 'value': value, '__type__': 'update'}
return updated_config
def _process_chat_messages(self, chat_message: ((((str | Tuple) | List) | Dict) | None)) -> ((str | Dict) | None):
if (chat_message is None):
return None
elif isinstance(chat_message, (tuple, list)):
mime_type = processing_utils.get_mimetype(chat_message[0])
return {'name': chat_message[0], 'mime_type': mime_type, 'alt_text': (chat_message[1] if (len(chat_message) > 1) else None), 'data': None, 'is_file': True}
elif isinstance(chat_message, dict):
return chat_message
elif isinstance(chat_message, str):
return str(self.md.convert(chat_message))
else:
raise ValueError(f'Invalid message for Chatbot component: {chat_message}')
def postprocess(self, y: List[Tuple[(((((str | Tuple) | List) | Dict) | None), ((((str | Tuple) | List) | Dict) | None))]]) -> List[Tuple[(((str | Dict) | None), ((str | Dict) | None))]]:
'\n Parameters:\n y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.\n Returns:\n List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.\n '
if (y is None):
return []
processed_messages = []
for message_pair in y:
assert isinstance(message_pair, (tuple, list)), f'Expected a list of lists or list of tuples. Received: {message_pair}'
assert (len(message_pair) == 2), f'Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}'
processed_messages.append(((('<pre style="font-family: var(--font)">' + message_pair[0]) + '</pre>'), self._process_chat_messages(message_pair[1])))
return processed_messages
def style(self, height: (int | None)=None, **kwargs):
'\n This method can be used to change the appearance of the Chatbot component.\n '
if (height is not None):
self._style['height'] = height
if (kwargs.get('color_map') is not None):
warnings.warn("The 'color_map' parameter has been deprecated.")
Component.style(self, **kwargs)
return self
|
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f'{t.year}-{t.month:02d}-{t.day:02d}-conv.json')
return name
|
def get_model_list():
ret = requests.post((args.controller_url + '/refresh_all_workers'))
assert (ret.status_code == 200)
ret = requests.post((args.controller_url + '/list_models'))
models = ret.json()['models']
models.sort(key=(lambda x: priority.get(x, x)))
logger.info(f'Models: {models}')
return models
|
def load_demo(url_params, request: gr.Request):
logger.info(f'load_demo. ip: {request.client.host}. params: {url_params}')
dropdown_update = gr.Dropdown.update(visible=True)
if ('model' in url_params):
model = url_params['model']
if (model in models):
dropdown_update = gr.Dropdown.update(value=model, visible=True)
state = default_conversation.copy()
return (state, dropdown_update, gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True))
|
def load_demo_refresh_model_list(request: gr.Request):
logger.info(f'load_demo. ip: {request.client.host}')
models = get_model_list()
state = default_conversation.copy()
return (state, gr.Dropdown.update(choices=models, value=(models[0] if (len(models) > 0) else '')), gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True))
|
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
with open(get_conv_log_filename(), 'a') as fout:
data = {'tstamp': round(time.time(), 4), 'type': vote_type, 'model': model_selector, 'state': state.dict(), 'ip': request.client.host}
fout.write((json.dumps(data) + '\n'))
|
def upvote_last_response(state, model_selector, request: gr.Request):
logger.info(f'upvote. ip: {request.client.host}')
vote_last_response(state, 'upvote', model_selector, request)
return (('',) + ((disable_btn,) * 3))
|
def downvote_last_response(state, model_selector, request: gr.Request):
logger.info(f'downvote. ip: {request.client.host}')
vote_last_response(state, 'downvote', model_selector, request)
return (('',) + ((disable_btn,) * 3))
|
def flag_last_response(state, model_selector, request: gr.Request):
logger.info(f'flag. ip: {request.client.host}')
vote_last_response(state, 'flag', model_selector, request)
return (('',) + ((disable_btn,) * 3))
|
def regenerate(state, image_process_mode, request: gr.Request):
logger.info(f'regenerate. ip: {request.client.host}')
state.messages[(- 1)][(- 1)] = None
prev_human_msg = state.messages[(- 2)]
if (type(prev_human_msg[1]) in (tuple, list)):
prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
state.skip_next = False
return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
|
def clear_history(request: gr.Request):
logger.info(f'clear_history. ip: {request.client.host}')
state = default_conversation.copy()
return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
|
def add_text(state, text, image, image_process_mode, request: gr.Request):
logger.info(f'add_text. ip: {request.client.host}. len: {len(text)}')
if ((len(text) <= 0) and (image is None)):
state.skip_next = True
return ((state, state.to_gradio_chatbot(), '', None) + ((no_change_btn,) * 5))
if args.moderate:
flagged = violates_moderation(text)
if flagged:
state.skip_next = True
return ((state, state.to_gradio_chatbot(), moderation_msg, None) + ((no_change_btn,) * 5))
text = text[:1536]
if (image is not None):
text = text[:1200]
if ('<image>' not in text):
text = (text + '\n<image>')
text = (text, image, image_process_mode)
state = default_conversation.copy()
state.append_message(state.roles[0], text)
state.append_message(state.roles[1], None)
state.skip_next = False
return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
|
def post_process_code(code):
sep = '\n```'
if (sep in code):
blocks = code.split(sep)
if ((len(blocks) % 2) == 1):
for i in range(1, len(blocks), 2):
blocks[i] = blocks[i].replace('\\_', '_')
code = sep.join(blocks)
return code
|
def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request):
logger.info(f'http_bot. ip: {request.client.host}')
start_tstamp = time.time()
model_name = model_selector
if state.skip_next:
(yield ((state, state.to_gradio_chatbot()) + ((no_change_btn,) * 5)))
return
if (len(state.messages) == (state.offset + 2)):
if ('llava' in model_name.lower()):
if ('v1' in model_name.lower()):
template_name = 'llava_v1'
elif ('mpt' in model_name.lower()):
template_name = 'mpt_multimodal'
else:
template_name = 'multimodal'
elif ('mpt' in model_name):
template_name = 'mpt_text'
elif ('koala' in model_name):
template_name = 'bair_v1'
elif ('v1' in model_name):
template_name = 'vicuna_v1_1'
else:
template_name = 'v1'
new_state = conv_templates[template_name].copy()
new_state.append_message(new_state.roles[0], state.messages[(- 2)][1])
new_state.append_message(new_state.roles[1], None)
state = new_state
controller_url = args.controller_url
ret = requests.post((controller_url + '/get_worker_address'), json={'model': model_name})
worker_addr = ret.json()['address']
logger.info(f'model_name: {model_name}, worker_addr: {worker_addr}')
if (worker_addr == ''):
state.messages[(- 1)][(- 1)] = server_error_msg
(yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn))
return
prompt = state.get_prompt()
all_images = state.get_images(return_pil=True)
all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
for (image, hash) in zip(all_images, all_image_hash):
t = datetime.datetime.now()
filename = os.path.join(LOGDIR, 'serve_images', f'{t.year}-{t.month:02d}-{t.day:02d}', f'{hash}.jpg')
if (not os.path.isfile(filename)):
os.makedirs(os.path.dirname(filename), exist_ok=True)
image.save(filename)
pload = {'model': model_name, 'prompt': prompt, 'temperature': float(temperature), 'max_new_tokens': min(int(max_new_tokens), 1536), 'stop': (state.sep if (state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT]) else state.sep2), 'images': f'List of {len(state.get_images())} images: {all_image_hash}'}
logger.info(f'''==== request ====
{pload}''')
pload['images'] = state.get_images()
state.messages[(- 1)][(- 1)] = 'β'
(yield ((state, state.to_gradio_chatbot()) + ((disable_btn,) * 5)))
try:
response = requests.post((worker_addr + '/worker_generate_stream'), headers=headers, json=pload, stream=True, timeout=10)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b'\x00'):
if chunk:
data = json.loads(chunk.decode())
if (data['error_code'] == 0):
output = data['text'][len(prompt):].strip()
output = post_process_code(output)
state.messages[(- 1)][(- 1)] = (output + 'β')
(yield ((state, state.to_gradio_chatbot()) + ((disable_btn,) * 5)))
else:
output = (data['text'] + f" (error_code: {data['error_code']})")
state.messages[(- 1)][(- 1)] = output
(yield ((state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)))
return
time.sleep(0.03)
except requests.exceptions.RequestException as e:
state.messages[(- 1)][(- 1)] = server_error_msg
(yield ((state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)))
return
state.messages[(- 1)][(- 1)] = state.messages[(- 1)][(- 1)][:(- 1)]
(yield ((state, state.to_gradio_chatbot()) + ((enable_btn,) * 5)))
finish_tstamp = time.time()
logger.info(f'{output}')
with open(get_conv_log_filename(), 'a') as fout:
data = {'tstamp': round(finish_tstamp, 4), 'type': 'chat', 'model': model_name, 'start': round(start_tstamp, 4), 'finish': round(start_tstamp, 4), 'state': state.dict(), 'images': all_image_hash, 'ip': request.client.host}
fout.write((json.dumps(data) + '\n'))
|
def build_demo(embed_mode):
textbox = gr.Textbox(show_label=False, placeholder='Enter text and press ENTER', visible=False).style(container=False)
with gr.Blocks(title='LLaVA', theme=gr.themes.Base(), css=css) as demo:
state = gr.State()
if (not embed_mode):
gr.Markdown(title_markdown)
with gr.Row():
with gr.Column(scale=3):
with gr.Row(elem_id='model_selector_row'):
model_selector = gr.Dropdown(choices=models, value=(models[0] if (len(models) > 0) else ''), interactive=True, show_label=False).style(container=False)
imagebox = gr.Image(type='pil')
image_process_mode = gr.Radio(['Crop', 'Resize', 'Pad'], value='Crop', label='Preprocess for non-square image')
cur_dir = os.path.dirname(os.path.abspath(__file__))
gr.Examples(examples=[[f'{cur_dir}/examples/extreme_ironing.jpg', 'What is unusual about this image?'], [f'{cur_dir}/examples/waterview.jpg', 'What are the things I should be cautious about when I visit here?']], inputs=[imagebox, textbox])
with gr.Accordion('Parameters', open=False, visible=False) as parameter_row:
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label='Temperature')
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label='Max output tokens')
with gr.Column(scale=6):
chatbot = grChatbot(elem_id='chatbot', label='LLaVA Chatbot', visible=False).style(height=550)
with gr.Row():
with gr.Column(scale=8):
textbox.render()
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value='Submit', visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value='π Upvote', interactive=False)
downvote_btn = gr.Button(value='π Downvote', interactive=False)
flag_btn = gr.Button(value='β οΈ Flag', interactive=False)
regenerate_btn = gr.Button(value='π Regenerate', interactive=False)
clear_btn = gr.Button(value='ποΈ Clear history', interactive=False)
if (not embed_mode):
gr.Markdown(tos_markdown)
gr.Markdown(learn_more_markdown)
url_params = gr.JSON(visible=False)
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
upvote_btn.click(upvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
downvote_btn.click(downvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
flag_btn.click(flag_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
regenerate_btn.click(regenerate, [state, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list))
clear_btn.click(clear_history, None, ([state, chatbot, textbox, imagebox] + btn_list))
textbox.submit(add_text, [state, textbox, imagebox, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list))
submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list))
if (args.model_list_mode == 'once'):
demo.load(load_demo, [url_params], [state, model_selector, chatbot, textbox, submit_btn, button_row, parameter_row], _js=get_window_url_params)
elif (args.model_list_mode == 'reload'):
demo.load(load_demo_refresh_model_list, None, [state, model_selector, chatbot, textbox, submit_btn, button_row, parameter_row])
else:
raise ValueError(f'Unknown model list mode: {args.model_list_mode}')
return demo
|
def heart_beat_worker(controller):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
controller.send_heart_beat()
|
def load_model(model_path, model_name, num_gpus):
if (num_gpus == 1):
kwargs = {}
else:
kwargs = {'device_map': 'auto', 'max_memory': {i: '13GiB' for i in range(num_gpus)}}
tokenizer = AutoTokenizer.from_pretrained(model_path)
if ('llava' in model_name.lower()):
if ('mpt' in model_name.lower()):
model = LlavaMPTForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
else:
model = LlavaLlamaForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
elif ('mpt' in model_name.lower()):
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if ('llava' in model_name.lower()):
from transformers import CLIPImageProcessor, CLIPVisionModel
image_processor = CLIPImageProcessor.from_pretrained(model.config.mm_vision_tower, torch_dtype=torch.float16)
mm_use_im_start_end = getattr(model.config, 'mm_use_im_start_end', False)
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
vision_tower = model.get_model().vision_tower[0]
if (vision_tower.device.type == 'meta'):
vision_tower = CLIPVisionModel.from_pretrained(vision_tower.config._name_or_path, torch_dtype=torch.float16, low_cpu_mem_usage=True).cuda()
model.get_model().vision_tower[0] = vision_tower
else:
vision_tower.to(device='cuda', dtype=torch.float16)
vision_config = vision_tower.config
vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0]
vision_config.use_im_start_end = mm_use_im_start_end
if mm_use_im_start_end:
(vision_config.im_start_token, vision_config.im_end_token) = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])
if (num_gpus == 1):
model.cuda()
if hasattr(model.config, 'max_sequence_length'):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return (tokenizer, model, image_processor, context_len)
|
class ModelWorker():
def __init__(self, controller_addr, worker_addr, worker_id, no_register, model_path, model_name, keep_aspect_ratio, num_gpus):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith('/'):
model_path = model_path[:(- 1)]
if (model_name is None):
model_paths = model_path.split('/')
if model_paths[(- 1)].startswith('checkpoint-'):
self.model_name = ((model_paths[(- 2)] + '_') + model_paths[(- 1)])
else:
self.model_name = model_paths[(- 1)]
else:
self.model_name = model_name
logger.info(f'Loading the model {self.model_name} on worker {worker_id} ...')
self.keep_aspect_ratio = keep_aspect_ratio
(self.tokenizer, self.model, self.image_processor, self.context_len) = load_model(model_path, self.model_name, num_gpus)
self.is_multimodal = ('llava' in model_path.lower())
if (not no_register):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(target=heart_beat_worker, args=(self,))
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info('Register to controller')
url = (self.controller_addr + '/register_worker')
data = {'worker_name': self.worker_addr, 'check_heart_beat': True, 'worker_status': self.get_status()}
r = requests.post(url, json=data)
assert (r.status_code == 200)
def send_heart_beat(self):
logger.info(f'Send heart beat. Models: {[self.model_name]}. Semaphore: {pretty_print_semaphore(model_semaphore)}. global_counter: {global_counter}')
url = (self.controller_addr + '/receive_heart_beat')
while True:
try:
ret = requests.post(url, json={'worker_name': self.worker_addr, 'queue_length': self.get_queue_length()}, timeout=5)
exist = ret.json()['exist']
break
except requests.exceptions.RequestException as e:
logger.error(f'heart beat error: {e}')
time.sleep(5)
if (not exist):
self.register_to_controller()
def get_queue_length(self):
if (model_semaphore is None):
return 0
else:
return ((args.limit_model_concurrency - model_semaphore._value) + (len(model_semaphore._waiters) if (model_semaphore._waiters is not None) else 0))
def get_status(self):
return {'model_names': [self.model_name], 'speed': 1, 'queue_length': self.get_queue_length()}
@torch.inference_mode()
def generate_stream(self, params):
(tokenizer, model, image_processor) = (self.tokenizer, self.model, self.image_processor)
prompt = params['prompt']
ori_prompt = prompt
images = params.get('images', None)
if ((images is not None) and (len(images) > 0) and self.is_multimodal):
from PIL import Image
from io import BytesIO
import base64
assert (type(images) is list)
if (len(images) > 0):
images = [Image.open(BytesIO(base64.b64decode(image))) for image in images]
assert (len(images) == prompt.count(DEFAULT_IMAGE_TOKEN)), 'Number of images does not match number of <image> tokens in prompt'
if self.keep_aspect_ratio:
new_images = []
for (image_idx, image) in enumerate(images):
(max_hw, min_hw) = (max(image.size), min(image.size))
aspect_ratio = (max_hw / min_hw)
(max_len, min_len) = (448, 224)
shortest_edge = int(min((max_len / aspect_ratio), min_len))
image = image_processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={'shortest_edge': shortest_edge})['pixel_values'][0]
new_images.append(image.to(self.model.device, dtype=torch.float16))
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * cur_token_len)
if getattr(self.model.config, 'mm_use_im_start_end', False):
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token, 1)
images = new_images
else:
images = image_processor(images, return_tensors='pt')['pixel_values']
images = images.to(self.model.device, dtype=torch.float16)
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * 256)
if getattr(self.model.config, 'mm_use_im_start_end', False):
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
else:
images = None
image_args = {'images': images}
else:
images = None
image_args = {}
l_prompt = len(prompt)
temperature = float(params.get('temperature', 1.0))
max_new_tokens = min(int(params.get('max_new_tokens', 256)), 1024)
stop_str = params.get('stop', None)
stop_idx = None
if (stop_str is not None):
stop_idx = tokenizer(stop_str).input_ids
if (len(stop_idx) == 1):
stop_idx = stop_idx[0]
else:
stop_idx = None
input_ids = tokenizer(prompt).input_ids
output_ids = list(input_ids)
pred_ids = []
max_src_len = ((self.context_len - max_new_tokens) - 8)
input_ids = input_ids[(- max_src_len):]
past_key_values = None
for i in range(max_new_tokens):
if (i == 0):
out = model(torch.as_tensor([input_ids]).cuda(), use_cache=True, **image_args)
logits = out.logits
past_key_values = out.past_key_values
else:
attention_mask = torch.ones(1, (past_key_values[0][0].shape[(- 2)] + 1), device='cuda')
out = model(input_ids=torch.as_tensor([[token]], device='cuda'), use_cache=True, attention_mask=attention_mask, past_key_values=past_key_values)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][(- 1)]
if (temperature < 0.0001):
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax((last_token_logits / temperature), dim=(- 1))
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
pred_ids.append(token)
if ((stop_idx is not None) and (token == stop_idx)):
stopped = True
elif (token == tokenizer.eos_token_id):
stopped = True
else:
stopped = False
if (((i % args.stream_interval) == 0) or (i == (max_new_tokens - 1)) or stopped):
cur_out = tokenizer.decode(pred_ids, skip_special_tokens=True)
pos = cur_out.rfind(stop_str)
if (pos != (- 1)):
cur_out = cur_out[:pos]
stopped = True
output = (ori_prompt + cur_out)
ret = {'text': output, 'error_code': 0}
(yield (json.dumps(ret).encode() + b'\x00'))
if stopped:
break
if (past_key_values is not None):
del past_key_values
def generate_stream_gate(self, params):
try:
for x in self.generate_stream(params):
(yield x)
except ValueError as e:
print('Caught ValueError:', e)
ret = {'text': server_error_msg, 'error_code': 1}
(yield (json.dumps(ret).encode() + b'\x00'))
except torch.cuda.CudaError as e:
print('Caught torch.cuda.CudaError:', e)
ret = {'text': server_error_msg, 'error_code': 1}
(yield (json.dumps(ret).encode() + b'\x00'))
|
def release_model_semaphore(fn=None):
model_semaphore.release()
if (fn is not None):
fn()
|
def main():
if args.worker_address:
worker_addr = args.worker_address
else:
controller_addr = args.controller_address
ret = requests.post((controller_addr + '/refresh_all_workers'))
ret = requests.post((controller_addr + '/list_models'))
models = ret.json()['models']
models.sort()
print(f'Models: {models}')
ret = requests.post((controller_addr + '/get_worker_address'), json={'model': args.model_name})
worker_addr = ret.json()['address']
print(f'worker_addr: {worker_addr}')
if (worker_addr == ''):
return
conv = default_conversation.copy()
conv.append_message(conv.roles[0], args.message)
prompt = conv.get_prompt()
headers = {'User-Agent': 'LLaVA Client'}
pload = {'model': args.model_name, 'prompt': prompt, 'max_new_tokens': args.max_new_tokens, 'temperature': 0.7, 'stop': conv.sep}
response = requests.post((worker_addr + '/worker_generate_stream'), headers=headers, json=pload, stream=True)
print(prompt.replace(conv.sep, '\n'), end='')
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'):
if chunk:
data = json.loads(chunk.decode('utf-8'))
output = data['text'].split(conv.sep)[(- 1)]
print(output, end='\r')
print('')
|
def forward(self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]:
'Input shape: Batch x Time x Channel\n \n attention_mask: [bsz, q_len]\n '
(bsz, q_len, _) = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[(- 2)]
offset = 0
if (past_key_value is not None):
offset = past_key_value[0].shape[(- 2)]
kv_seq_len += offset
(cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len)
(query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, offset=offset)
assert (not output_attentions), 'output_attentions is not supported'
assert (not use_cache), 'use_cache is not supported'
assert (past_key_value is None), 'past_key_value is not supported'
qkv = torch.stack([query_states, key_states, value_states], dim=2)
qkv = qkv.transpose(1, 3)
key_padding_mask = attention_mask
if (key_padding_mask is None):
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = q_len
cu_q_lens = torch.arange(0, ((bsz + 1) * q_len), step=q_len, dtype=torch.int32, device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(output, '(b s) ... -> b s ...', b=bsz)
else:
nheads = qkv.shape[(- 2)]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
(x_unpad, indices, cu_q_lens, max_s) = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, bsz, q_len), 'b s (h d) -> b s h d', h=nheads)
return (self.o_proj(rearrange(output, 'b s h d -> b s (h d)')), None, None)
|
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
return attention_mask
|
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
|
def unwrap_model(model: nn.Module) -> nn.Module:
'\n Recursively unwraps a model from potential containers (as used in distributed training).\n\n Args:\n model (`torch.nn.Module`): The model to unwrap.\n '
if hasattr(model, 'module'):
return unwrap_model(model.module)
else:
return model
|
class LLaVATrainer(Trainer):
def _save(self, output_dir: Optional[str]=None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
_state_dict = state_dict
if (_state_dict is None):
model_to_save = unwrap_model(self.model)
_state_dict = model_to_save.state_dict()
weight_to_save = {}
keys_to_match = ['mm_projector', 'embed_tokens', 'embed_in']
for (k, v) in _state_dict.items():
if any(((key_match in k) for key_match in keys_to_match)):
weight_to_save[k] = v
current_folder = output_dir.split('/')[(- 1)]
parent_folder = os.path.dirname(output_dir)
if current_folder.startswith('checkpoint-'):
mm_projector_folder = os.path.join(parent_folder, 'mm_projector')
os.makedirs(mm_projector_folder, exist_ok=True)
torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
else:
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
super(LLaVATrainer, self)._save(output_dir, state_dict)
|
@dataclass
class ModelArguments():
model_name_or_path: Optional[str] = field(default='facebook/opt-125m')
version: Optional[str] = field(default='v0')
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
vision_tower: Optional[str] = field(default=None)
mm_vision_select_layer: Optional[int] = field(default=(- 1))
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
mm_use_im_start_end: bool = field(default=False)
with_spi: bool = field(default=True)
|
@dataclass
class DataArguments():
data_path: str = field(default=None, metadata={'help': 'Path to the training data.'})
lazy_preprocess: bool = False
is_multimodal: bool = False
sep_image_conv_front: bool = False
image_token_len: int = 0
image_folder: Optional[str] = field(default=None)
image_aspect_ratio: str = 'square'
|
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default='adamw_torch')
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
force_fsdp: bool = field(default=False)
model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'})
|
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
'Collects the state dict and dump to disk.'
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for (key, value) in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict)
|
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel):
'Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n '
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if (num_new_tokens > 0):
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
input_embeddings[(- num_new_tokens):] = input_embeddings_avg
output_embeddings[(- num_new_tokens):] = output_embeddings_avg
|
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
'Tokenize a list of strings.'
tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens)
|
def _mask_targets(target, tokenized_lens, speakers):
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for (tokenized_len, speaker) in zip(tokenized_lens, speakers):
if (speaker == 'human'):
target[(cur_idx + 2):(cur_idx + tokenized_len)] = IGNORE_INDEX
cur_idx += tokenized_len
|
def _add_speaker_and_signal(header, source, get_conversation=True):
'Add speaker and start/end signal on each round.'
BEGIN_SIGNAL = '### '
END_SIGNAL = '\n'
conversation = header
for sentence in source:
from_str = sentence['from']
if (from_str.lower() == 'human'):
from_str = conversation_lib.default_conversation.roles[0]
elif (from_str.lower() == 'gpt'):
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence['value'] = ((((BEGIN_SIGNAL + from_str) + ': ') + sentence['value']) + END_SIGNAL)
if get_conversation:
conversation += sentence['value']
conversation += BEGIN_SIGNAL
return conversation
|
def preprocess_multimodal(sources: Sequence[str], multimodal_cfg: dict, cur_token_len: int) -> Dict:
is_multimodal = multimodal_cfg['is_multimodal']
image_token_len = cur_token_len
if (not is_multimodal):
return sources
for source in sources:
if multimodal_cfg['sep_image_conv_front']:
assert (DEFAULT_IMAGE_TOKEN in source[0]['value'])
source[0]['value'] = source[0]['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
source[0]['value'] = ((((DEFAULT_IMAGE_TOKEN + conversation_lib.default_conversation.sep) + conversation_lib.default_conversation.roles[0]) + ': ') + source[0]['value'])
for sentence in source:
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * image_token_len)
if multimodal_cfg['use_im_start_end']:
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return sources
|
def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
conversations = []
for (i, source) in enumerate(sources):
if (roles[source[0]['from']] != conv.roles[0]):
source = source[1:]
conv.messages = []
for (j, sentence) in enumerate(source):
role = roles[sentence['from']]
assert (role == conv.roles[(j % 2)]), f'{i}'
conv.append_message(role, sentence['value'])
conversations.append(conv.get_prompt())
input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids
targets = input_ids.clone()
assert (conv.sep_style == conversation_lib.SeparatorStyle.TWO)
sep = ((conv.sep + conv.roles[1]) + ': ')
for (conversation, target) in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = (len(tokenizer(parts[0]).input_ids) - 2)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)')
return dict(input_ids=input_ids, labels=targets)
|
def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
conversations = []
for (i, source) in enumerate(sources):
if (roles[source[0]['from']] != conv.roles[0]):
source = source[1:]
conv.messages = []
for (j, sentence) in enumerate(source):
role = roles[sentence['from']]
assert (role == conv.roles[(j % 2)]), f'{i}'
conv.append_message(role, sentence['value'])
conversations.append(conv.get_prompt())
input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids
targets = input_ids.clone()
assert (conv.sep_style == conversation_lib.SeparatorStyle.MPT)
sep = (conv.sep + conv.roles[1])
for (conversation, target) in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep)
re_rounds = [conv.sep.join(rounds[:3])]
for conv_idx in range(3, len(rounds), 2):
re_rounds.append(conv.sep.join(rounds[conv_idx:(conv_idx + 2)]))
cur_len = 0
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(re_rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = (len(tokenizer(rou).input_ids) + len(tokenizer(conv.sep).input_ids))
instruction_len = len(tokenizer(parts[0]).input_ids)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)')
return dict(input_ids=input_ids, labels=targets)
|
def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"\n Given a list of sources, each is a conversation list. This transform:\n 1. Add signal '### ' at the beginning each sentence, with end signal '\n';\n 2. Concatenate conversations together;\n 3. Tokenize the concatenated conversation;\n 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.\n "
if (conversation_lib.default_conversation.version == 'v1'):
return preprocess_v1(sources, tokenizer)
if (conversation_lib.default_conversation.version == 'mpt'):
return preprocess_mpt(sources, tokenizer)
conversations = []
for source in sources:
header = f'''{conversation_lib.default_conversation.system}
'''
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized['input_ids']
targets = copy.deepcopy(input_ids)
for (target, source) in zip(targets, sources):
tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens']
speakers = [sentence['from'] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets)
|
class SupervisedDataset(Dataset):
'Dataset for supervised fine-tuning.'
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = json.load(open(data_path, 'r'))
logging.warning('Formatting inputs...')
sources = [example['conversations'] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict['input_ids']
self.labels = data_dict['labels']
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[(str, torch.Tensor)]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
|
class LazySupervisedDataset(Dataset):
'Dataset for supervised fine-tuning.'
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, multimodal_cfg: dict):
super(LazySupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = json.load(open(data_path, 'r'))
logging.warning('Formatting inputs...Skip in lazy mode')
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.multimodal_cfg = multimodal_cfg
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[(str, torch.Tensor)]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert (len(sources) == 1), "Don't know why it is wrapped to a list"
if ('image' in sources[0]):
image_file = self.list_data_dict[i]['image']
image_folder = self.multimodal_cfg['image_folder']
processor = self.multimodal_cfg['image_processor']
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
if (self.multimodal_cfg['image_aspect_ratio'] == 'keep'):
(max_hw, min_hw) = (max(image.size), min(image.size))
aspect_ratio = (max_hw / min_hw)
(max_len, min_len) = (448, 224)
shortest_edge = int(min((max_len / aspect_ratio), min_len))
image = processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={'shortest_edge': shortest_edge})['pixel_values'][0]
elif (self.multimodal_cfg['image_aspect_ratio'] == 'pad'):
def expand2square(pil_img, background_color):
(width, height) = pil_img.size
if (width == height):
return pil_img
elif (width > height):
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, ((width - height) // 2)))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, (((height - width) // 2), 0))
return result
image = expand2square(image, tuple((int((x * 255)) for x in processor.image_mean)))
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
else:
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
sources = preprocess_multimodal(copy.deepcopy([e['conversations'] for e in sources]), self.multimodal_cfg, cur_token_len)
else:
sources = copy.deepcopy([e['conversations'] for e in sources])
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
if ('image' in self.list_data_dict[i]):
data_dict['image'] = image
elif self.multimodal_cfg['is_multimodal']:
crop_size = self.multimodal_cfg['image_processor'].crop_size
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
return data_dict
|
@dataclass
class DataCollatorForSupervisedDataset(object):
'Collate examples for supervised fine-tuning.'
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[(str, torch.Tensor)]:
(input_ids, labels) = tuple(([instance[key] for instance in instances] for key in ('input_ids', 'labels')))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id))
if ('image' in instances[0]):
images = [instance['image'] for instance in instances]
if all((((x is not None) and (x.shape == images[0].shape)) for x in images)):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
|
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
'Make dataset and collator for supervised fine-tuning.'
dataset_cls = (LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer, data_path=data_args.data_path, multimodal_cfg=dict(is_multimodal=data_args.is_multimodal, sep_image_conv_front=data_args.sep_image_conv_front, image_token_len=data_args.image_token_len, image_folder=data_args.image_folder, image_aspect_ratio=data_args.image_aspect_ratio, use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False), image_processor=getattr(data_args, 'image_processor', None)))
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
|
def train():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (model_args.vision_tower is not None):
if ('mpt' in model_args.model_name_or_path):
model = LlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
elif model_args.with_spi:
from gpt4roi.models.spi_llava import SPILlavaMPTForCausalLM
model = SPILlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
else:
model = LlavaLlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
else:
model = transformers.LlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if ('mpt' in model_args.model_name_or_path):
tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right')
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right', use_fast=False)
if (model_args.version == 'v0'):
if (tokenizer.pad_token is None):
smart_tokenizer_and_embedding_resize(special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN), tokenizer=tokenizer, model=model)
if ('llama' in model_args.model_name_or_path):
tokenizer.add_special_tokens({'eos_token': DEFAULT_EOS_TOKEN, 'bos_token': DEFAULT_BOS_TOKEN, 'unk_token': DEFAULT_UNK_TOKEN})
else:
tokenizer.pad_token = tokenizer.unk_token
if ('mpt' in model_args.model_name_or_path):
conversation_lib.default_conversation = conversation_lib.conv_templates['mpt']
else:
conversation_lib.default_conversation = conversation_lib.conv_templates['vicuna_v1_1']
if (model_args.vision_tower is not None):
model_vision_dict = model.get_model().initialize_vision_modules(vision_tower=model_args.vision_tower, mm_vision_select_layer=model_args.mm_vision_select_layer, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter)
dtype = torch.float32
if training_args.fp16:
dtype = torch.float16
if training_args.bf16:
dtype = torch.bfloat16
model.get_model().vision_tower[0].to(dtype=dtype, device=training_args.device)
vision_config = model_vision_dict['vision_config']
data_args.image_token_len = model_vision_dict['image_token_len']
data_args.image_processor = model_vision_dict['image_processor']
data_args.is_multimodal = True
model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
if training_args.freeze_mm_mlp_adapter:
for p in model.get_model().mm_projector.parameters():
p.requires_grad = False
model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
vision_config.use_im_start_end = training_args.use_im_start_end = model_args.mm_use_im_start_end
model.config.sep_image_conv_front = data_args.sep_image_conv_front
model.initialize_vision_tokenizer(mm_use_im_start_end=model_args.mm_use_im_start_end, tokenizer=tokenizer, device=training_args.device, tune_mm_mlp_adapter=model_args.tune_mm_mlp_adapter, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter)
params_no_grad = [n for (n, p) in model.named_parameters() if (not p.requires_grad)]
if (os.environ.get('SAVE_MEMORY', '0') == '1'):
model.requires_grad_(False)
model.half()
model.lm_head.requires_grad_(True)
if (len(params_no_grad) > 0):
if ((training_args.fsdp is not None) and (len(training_args.fsdp) > 0)):
if (len(params_no_grad) < 10):
print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}'.format(len(params_no_grad), params_no_grad))
else:
print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}...(omitted)'.format(len(params_no_grad), ', '.join(params_no_grad[:10])))
print('[WARNING] Attempting to use FSDP with partially frozen paramters, this is experimental.')
print('[WARNING] As of 4/30/23, this feature requires PyTorch-nightly build. See here for details: https://github.com/haotian-liu/LLaVA#experimental-use-fsdp-to-save-memory-in-pretraining')
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
def patch_FSDP_use_orig_params(func):
def wrap_func(*args, **kwargs):
use_orig_params = kwargs.pop('use_orig_params', True)
return func(*args, **kwargs, use_orig_params=use_orig_params)
return wrap_func
FSDP.__init__ = patch_FSDP_use_orig_params(FSDP.__init__)
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = LLaVATrainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
if list(pathlib.Path(training_args.output_dir).glob('checkpoint-*')):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
|
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(fmt='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
if (not logging.getLogger().handlers):
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
stdout_logger = logging.getLogger('stdout')
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger('stderr')
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
if (handler is None):
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(filename, when='D', utc=True)
handler.setFormatter(formatter)
for (name, item) in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger
|
class StreamToLogger(object):
'\n Fake file-like stream object that redirects writes to a logger instance.\n '
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = (self.linebuf + buf)
self.linebuf = ''
for line in temp_linebuf.splitlines(True):
if (line[(- 1)] == '\n'):
self.logger.log(self.log_level, line.rstrip())
else:
self.linebuf += line
def flush(self):
if (self.linebuf != ''):
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ''
|
def disable_torch_init():
'\n Disable the redundant torch default initialization to accelerate model creation.\n '
import torch
setattr(torch.nn.Linear, 'reset_parameters', (lambda self: None))
setattr(torch.nn.LayerNorm, 'reset_parameters', (lambda self: None))
|
def violates_moderation(text):
'\n Check whether the text violates OpenAI moderation API.\n '
url = 'https://api.openai.com/v1/moderations'
headers = {'Content-Type': 'application/json', 'Authorization': ('Bearer ' + os.environ['OPENAI_API_KEY'])}
text = text.replace('\n', '')
data = ((('{' + '"input": ') + f'"{text}"') + '}')
data = data.encode('utf-8')
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()['results'][0]['flagged']
except requests.exceptions.RequestException as e:
flagged = False
except KeyError as e:
flagged = False
return flagged
|
def pretty_print_semaphore(semaphore):
if (semaphore is None):
return 'None'
return f'Semaphore(value={semaphore._value}, locked={semaphore.locked()})'
|
def check_installation():
'Check whether mmcv-full has been installed successfully.'
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1)
boxes2 = torch.from_numpy(np_boxes2)
box_iou_rotated(boxes1, boxes2)
print('CPU ops were compiled successfully.')
if torch.cuda.is_available():
boxes1 = boxes1.cuda()
boxes2 = boxes2.cuda()
box_iou_rotated(boxes1, boxes2)
print('CUDA ops were compiled successfully.')
else:
print('No CUDA runtime is found, skipping the checking of CUDA ops.')
|
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train_step(self, data, optimizer):
(images, labels) = data
predicts = self(images)
loss = self.loss_fn(predicts, labels)
return {'loss': loss}
|
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
'Quantize an array of (-inf, inf) to [0, levels-1].\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the quantized array.\n\n Returns:\n tuple: Quantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError(f'levels must be a positive integer, but got {levels}')
if (min_val >= max_val):
raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})')
arr = (np.clip(arr, min_val, max_val) - min_val)
quantized_arr = np.minimum(np.floor(((levels * arr) / (max_val - min_val))).astype(dtype), (levels - 1))
return quantized_arr
|
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
'Dequantize an array.\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the dequantized array.\n\n Returns:\n tuple: Dequantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError(f'levels must be a positive integer, but got {levels}')
if (min_val >= max_val):
raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})')
dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val)
return dequantized_arr
|
class AlexNet(nn.Module):
'AlexNet backbone.\n\n Args:\n num_classes (int): number of classes for classification.\n '
def __init__(self, num_classes=(- 1)):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
from ..runner import load_checkpoint
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.features(x)
if (self.num_classes > 0):
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
@ACTIVATION_LAYERS.register_module(name='Clip')
@ACTIVATION_LAYERS.register_module()
class Clamp(nn.Module):
'Clamp activation layer.\n\n This activation function is to clamp the feature map value within\n :math:`[min, max]`. More details can be found in ``torch.clamp()``.\n\n Args:\n min (Number | optional): Lower-bound of the range to be clamped to.\n Default to -1.\n max (Number | optional): Upper-bound of the range to be clamped to.\n Default to 1.\n '
def __init__(self, min=(- 1.0), max=1.0):
super(Clamp, self).__init__()
self.min = min
self.max = max
def forward(self, x):
'Forward function.\n\n Args:\n x (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: Clamped tensor.\n '
return torch.clamp(x, min=self.min, max=self.max)
|
class GELU(nn.Module):
'Applies the Gaussian Error Linear Units function:\n\n .. math::\n \\text{GELU}(x) = x * \\Phi(x)\n where :math:`\\Phi(x)` is the Cumulative Distribution Function for\n Gaussian Distribution.\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n .. image:: scripts/activation_images/GELU.png\n\n Examples::\n\n >>> m = nn.GELU()\n >>> input = torch.randn(2)\n >>> output = m(input)\n '
def forward(self, input):
return F.gelu(input)
|
def build_activation_layer(cfg):
'Build activation layer.\n\n Args:\n cfg (dict): The activation layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate an activation layer.\n\n Returns:\n nn.Module: Created activation layer.\n '
return build_from_cfg(cfg, ACTIVATION_LAYERS)
|
def last_zero_init(m):
if isinstance(m, nn.Sequential):
constant_init(m[(- 1)], val=0)
else:
constant_init(m, val=0)
|
@PLUGIN_LAYERS.register_module()
class ContextBlock(nn.Module):
"ContextBlock module in GCNet.\n\n See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'\n (https://arxiv.org/abs/1904.11492) for details.\n\n Args:\n in_channels (int): Channels of the input feature map.\n ratio (float): Ratio of channels of transform bottleneck\n pooling_type (str): Pooling method for context modeling.\n Options are 'att' and 'avg', stand for attention pooling and\n average pooling respectively. Default: 'att'.\n fusion_types (Sequence[str]): Fusion method for feature fusion,\n Options are 'channels_add', 'channel_mul', stand for channelwise\n addition and multiplication respectively. Default: ('channel_add',)\n "
_abbr_ = 'context_block'
def __init__(self, in_channels, ratio, pooling_type='att', fusion_types=('channel_add',)):
super(ContextBlock, self).__init__()
assert (pooling_type in ['avg', 'att'])
assert isinstance(fusion_types, (list, tuple))
valid_fusion_types = ['channel_add', 'channel_mul']
assert all([(f in valid_fusion_types) for f in fusion_types])
assert (len(fusion_types) > 0), 'at least one fusion should be used'
self.in_channels = in_channels
self.ratio = ratio
self.planes = int((in_channels * ratio))
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if (pooling_type == 'att'):
self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if ('channel_add' in fusion_types):
self.channel_add_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
else:
self.channel_add_conv = None
if ('channel_mul' in fusion_types):
self.channel_mul_conv = nn.Sequential(nn.Conv2d(self.in_channels, self.planes, kernel_size=1), nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
else:
self.channel_mul_conv = None
self.reset_parameters()
def reset_parameters(self):
if (self.pooling_type == 'att'):
kaiming_init(self.conv_mask, mode='fan_in')
self.conv_mask.inited = True
if (self.channel_add_conv is not None):
last_zero_init(self.channel_add_conv)
if (self.channel_mul_conv is not None):
last_zero_init(self.channel_mul_conv)
def spatial_pool(self, x):
(batch, channel, height, width) = x.size()
if (self.pooling_type == 'att'):
input_x = x
input_x = input_x.view(batch, channel, (height * width))
input_x = input_x.unsqueeze(1)
context_mask = self.conv_mask(x)
context_mask = context_mask.view(batch, 1, (height * width))
context_mask = self.softmax(context_mask)
context_mask = context_mask.unsqueeze((- 1))
context = torch.matmul(input_x, context_mask)
context = context.view(batch, channel, 1, 1)
else:
context = self.avg_pool(x)
return context
def forward(self, x):
context = self.spatial_pool(x)
out = x
if (self.channel_mul_conv is not None):
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = (out * channel_mul_term)
if (self.channel_add_conv is not None):
channel_add_term = self.channel_add_conv(context)
out = (out + channel_add_term)
return out
|
def build_conv_layer(cfg, *args, **kwargs):
'Build convolution layer.\n\n Args:\n cfg (None or dict): The conv layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate an conv layer.\n args (argument list): Arguments passed to the `__init__`\n method of the corresponding conv layer.\n kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n method of the corresponding conv layer.\n\n Returns:\n nn.Module: Created conv layer.\n '
if (cfg is None):
cfg_ = dict(type='Conv2d')
else:
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in CONV_LAYERS):
raise KeyError(f'Unrecognized layer type {layer_type}')
else:
conv_layer = CONV_LAYERS.get(layer_type)
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
|
@CONV_LAYERS.register_module()
class Conv2dAdaptivePadding(nn.Conv2d):
'Implementation of 2D convolution in tensorflow with `padding` as "same",\n which applies padding to input (if needed) so that input image gets fully\n covered by filter and stride you specified. For stride 1, this will ensure\n that output image size is same as input. For stride of 2, output dimensions\n will be half, for example.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
(img_h, img_w) = x.size()[(- 2):]
(kernel_h, kernel_w) = self.weight.size()[(- 2):]
(stride_h, stride_w) = self.stride
output_h = math.ceil((img_h / stride_h))
output_w = math.ceil((img_w / stride_w))
pad_h = max((((((output_h - 1) * self.stride[0]) + ((kernel_h - 1) * self.dilation[0])) + 1) - img_h), 0)
pad_w = max((((((output_w - 1) * self.stride[1]) + ((kernel_w - 1) * self.dilation[1])) + 1) - img_w), 0)
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
@PLUGIN_LAYERS.register_module()
class ConvModule(nn.Module):
'A conv block that bundles conv/norm/activation layers.\n\n This block simplifies the usage of convolution layers, which are commonly\n used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).\n It is based upon three build methods: `build_conv_layer()`,\n `build_norm_layer()` and `build_activation_layer()`.\n\n Besides, we add some additional features in this module.\n 1. Automatically set `bias` of the conv layer.\n 2. Spectral norm is supported.\n 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only\n supports zero and circular padding, and we add "reflect" padding mode.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``.\n groups (int): Number of blocked connections from input channels to\n output channels. Same as that in ``nn._ConvNd``.\n bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise\n False. Default: "auto".\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type=\'ReLU\').\n inplace (bool): Whether to use inplace mode for activation.\n Default: True.\n with_spectral_norm (bool): Whether use spectral norm in conv module.\n Default: False.\n padding_mode (str): If the `padding_mode` has not been supported by\n current `Conv2d` in PyTorch, we will use our own padding layer\n instead. Currently, we support [\'zeros\', \'circular\'] with official\n implementation and [\'reflect\'] with our own implementation.\n Default: \'zeros\'.\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n Default: (\'conv\', \'norm\', \'act\').\n '
_abbr_ = 'conv_block'
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, padding_mode='zeros', order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert ((conv_cfg is None) or isinstance(conv_cfg, dict))
assert ((norm_cfg is None) or isinstance(norm_cfg, dict))
assert ((act_cfg is None) or isinstance(act_cfg, dict))
official_padding_mode = ['zeros', 'circular']
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.inplace = inplace
self.with_spectral_norm = with_spectral_norm
self.with_explicit_padding = (padding_mode not in official_padding_mode)
self.order = order
assert (isinstance(self.order, tuple) and (len(self.order) == 3))
assert (set(order) == set(['conv', 'norm', 'act']))
self.with_norm = (norm_cfg is not None)
self.with_activation = (act_cfg is not None)
if (bias == 'auto'):
bias = (not self.with_norm)
self.with_bias = bias
if self.with_explicit_padding:
pad_cfg = dict(type=padding_mode)
self.padding_layer = build_padding_layer(pad_cfg, padding)
conv_padding = (0 if self.with_explicit_padding else padding)
self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=conv_padding, dilation=dilation, groups=groups, bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
if self.with_spectral_norm:
self.conv = nn.utils.spectral_norm(self.conv)
if self.with_norm:
if (order.index('norm') > order.index('conv')):
norm_channels = out_channels
else:
norm_channels = in_channels
(self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
if self.with_bias:
if isinstance(norm, (_BatchNorm, _InstanceNorm)):
warnings.warn('Unnecessary conv bias before batch/instance norm')
else:
self.norm_name = None
if self.with_activation:
act_cfg_ = act_cfg.copy()
if (act_cfg_['type'] not in ['Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish']):
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
self.init_weights()
@property
def norm(self):
if self.norm_name:
return getattr(self, self.norm_name)
else:
return None
def init_weights(self):
if (not hasattr(self.conv, 'init_weights')):
if (self.with_activation and (self.act_cfg['type'] == 'LeakyReLU')):
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if (layer == 'conv'):
if self.with_explicit_padding:
x = self.padding_layer(x)
x = self.conv(x)
elif ((layer == 'norm') and norm and self.with_norm):
x = self.norm(x)
elif ((layer == 'act') and activate and self.with_activation):
x = self.activate(x)
return x
|
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, (- 1))
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = ((weight - mean) / (std + eps))
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
|
@CONV_LAYERS.register_module('ConvWS')
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
|
@CONV_LAYERS.register_module(name='ConvAWS')
class ConvAWS2d(nn.Conv2d):
'AWS (Adaptive Weight Standardization)\n\n This is a variant of Weight Standardization\n (https://arxiv.org/pdf/1903.10520.pdf)\n It is used in DetectoRS to avoid NaN\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the conv kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If set True, adds a learnable bias to the\n output. Default: True\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.register_buffer('weight_gamma', torch.ones(self.out_channels, 1, 1, 1))
self.register_buffer('weight_beta', torch.zeros(self.out_channels, 1, 1, 1))
def _get_weight(self, weight):
weight_flat = weight.view(weight.size(0), (- 1))
mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1)
std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1)
weight = ((weight - mean) / std)
weight = ((self.weight_gamma * weight) + self.weight_beta)
return weight
def forward(self, x):
weight = self._get_weight(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
'Override default load function.\n\n AWS overrides the function _load_from_state_dict to recover\n weight_gamma and weight_beta if they are missing. If weight_gamma and\n weight_beta are found in the checkpoint, this function will return\n after super()._load_from_state_dict. Otherwise, it will compute the\n mean and std of the pretrained weights and store them in weight_beta\n and weight_gamma.\n '
self.weight_gamma.data.fill_((- 1))
local_missing_keys = []
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, local_missing_keys, unexpected_keys, error_msgs)
if (self.weight_gamma.data.mean() > 0):
for k in local_missing_keys:
missing_keys.append(k)
return
weight = self.weight.data
weight_flat = weight.view(weight.size(0), (- 1))
mean = weight_flat.mean(dim=1).view((- 1), 1, 1, 1)
std = torch.sqrt((weight_flat.var(dim=1) + 1e-05)).view((- 1), 1, 1, 1)
self.weight_beta.data.copy_(mean)
self.weight_gamma.data.copy_(std)
missing_gamma_beta = [k for k in local_missing_keys if (k.endswith('weight_gamma') or k.endswith('weight_beta'))]
for k in missing_gamma_beta:
local_missing_keys.remove(k)
for k in local_missing_keys:
missing_keys.append(k)
|
class DepthwiseSeparableConvModule(nn.Module):
"Depthwise separable convolution module.\n\n See https://arxiv.org/pdf/1704.04861.pdf for details.\n\n This module can replace a ConvModule with the conv block replaced by two\n conv block: depthwise conv block and pointwise conv block. The depthwise\n conv block contains depthwise-conv/norm/activation layers. The pointwise\n conv block contains pointwise-conv/norm/activation layers. It should be\n noted that there will be norm/activation layer in the depthwise conv block\n if `norm_cfg` and `act_cfg` are specified.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n Same as that in ``nn._ConvNd``.\n out_channels (int): Number of channels produced by the convolution.\n Same as that in ``nn._ConvNd``.\n kernel_size (int | tuple[int]): Size of the convolving kernel.\n Same as that in ``nn._ConvNd``.\n stride (int | tuple[int]): Stride of the convolution.\n Same as that in ``nn._ConvNd``. Default: 1.\n padding (int | tuple[int]): Zero-padding added to both sides of\n the input. Same as that in ``nn._ConvNd``. Default: 0.\n dilation (int | tuple[int]): Spacing between kernel elements.\n Same as that in ``nn._ConvNd``. Default: 1.\n norm_cfg (dict): Default norm config for both depthwise ConvModule and\n pointwise ConvModule. Default: None.\n act_cfg (dict): Default activation config for both depthwise ConvModule\n and pointwise ConvModule. Default: dict(type='ReLU').\n dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n kwargs (optional): Other shared arguments for depthwise and pointwise\n ConvModule. See ConvModule for ref.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs):
super(DepthwiseSeparableConvModule, self).__init__()
assert ('groups' not in kwargs), 'groups should not be specified'
dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg)
dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg)
pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg)
pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg)
self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs)
self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
|
def drop_path(x, drop_prob=0.0, training=False):
'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n '
if ((drop_prob == 0.0) or (not training)):
return x
keep_prob = (1 - drop_prob)
shape = ((x.shape[0],) + ((1,) * (x.ndim - 1)))
random_tensor = (keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device))
output = (x.div(keep_prob) * random_tensor.floor())
return output
|
@DROPOUT_LAYERS.register_module()
class DropPath(nn.Module):
'Drop paths (Stochastic Depth) per sample (when applied in main path of\n residual blocks).\n\n We follow the implementation\n https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501\n\n Args:\n drop_prob (float): Probability of the path to be zeroed. Default: 0.1\n '
def __init__(self, drop_prob=0.1):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
|
@DROPOUT_LAYERS.register_module()
class Dropout(nn.Dropout):
'A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of\n ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with\n ``DropPath``\n\n Args:\n drop_prob (float): Probability of the elements to be\n zeroed. Default: 0.5.\n inplace (bool): Do the operation inplace or not. Default: False.\n '
def __init__(self, drop_prob=0.5, inplace=False):
super().__init__(p=drop_prob, inplace=inplace)
|
def build_dropout(cfg, default_args=None):
'Builder for drop out layers.'
return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
|
@ACTIVATION_LAYERS.register_module()
class HSigmoid(nn.Module):
'Hard Sigmoid Module. Apply the hard sigmoid function:\n Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)\n Default: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1)\n\n Note:\n In MMCV v1.4.4, we modified the default value of args to align with\n PyTorch official.\n\n Args:\n bias (float): Bias of the input feature map. Default: 3.0.\n divisor (float): Divisor of the input feature map. Default: 6.0.\n min_value (float): Lower bound value. Default: 0.0.\n max_value (float): Upper bound value. Default: 1.0.\n\n Returns:\n Tensor: The output tensor.\n '
def __init__(self, bias=3.0, divisor=6.0, min_value=0.0, max_value=1.0):
super(HSigmoid, self).__init__()
warnings.warn('In MMCV v1.4.4, we modified the default value of args to align with PyTorch official. Previous Implementation: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1). Current Implementation: Hsigmoid(x) = min(max((x + 3) / 6, 0), 1).')
self.bias = bias
self.divisor = divisor
assert (self.divisor != 0)
self.min_value = min_value
self.max_value = max_value
def forward(self, x):
x = ((x + self.bias) / self.divisor)
return x.clamp_(self.min_value, self.max_value)
|
class HSwish(nn.Module):
'Hard Swish Module.\n\n This module applies the hard swish function:\n\n .. math::\n Hswish(x) = x * ReLU6(x + 3) / 6\n\n Args:\n inplace (bool): can optionally do the operation in-place.\n Default: False.\n\n Returns:\n Tensor: The output tensor.\n '
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.act = nn.ReLU6(inplace)
def forward(self, x):
return ((x * self.act((x + 3))) / 6)
|
class _NonLocalNd(nn.Module, metaclass=ABCMeta):
'Basic Non-local module.\n\n This module is proposed in\n "Non-local Neural Networks"\n Paper reference: https://arxiv.org/abs/1711.07971\n Code reference: https://github.com/AlexHex7/Non-local_pytorch\n\n Args:\n in_channels (int): Channels of the input feature map.\n reduction (int): Channel reduction ratio. Default: 2.\n use_scale (bool): Whether to scale pairwise_weight by\n `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.\n Default: True.\n conv_cfg (None | dict): The config dict for convolution layers.\n If not specified, it will use `nn.Conv2d` for convolution layers.\n Default: None.\n norm_cfg (None | dict): The config dict for normalization layers.\n Default: None. (This parameter is only applicable to conv_out.)\n mode (str): Options are `gaussian`, `concatenation`,\n `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.\n '
def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian', **kwargs):
super(_NonLocalNd, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.use_scale = use_scale
self.inter_channels = max((in_channels // reduction), 1)
self.mode = mode
if (mode not in ['gaussian', 'embedded_gaussian', 'dot_product', 'concatenation']):
raise ValueError(f"Mode should be in 'gaussian', 'concatenation', 'embedded_gaussian' or 'dot_product', but got {mode} instead.")
self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None)
self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
if (self.mode != 'gaussian'):
self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None)
self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, conv_cfg=conv_cfg, act_cfg=None)
if (self.mode == 'concatenation'):
self.concat_project = ConvModule((self.inter_channels * 2), 1, kernel_size=1, stride=1, padding=0, bias=False, act_cfg=dict(type='ReLU'))
self.init_weights(**kwargs)
def init_weights(self, std=0.01, zeros_init=True):
if (self.mode != 'gaussian'):
for m in [self.g, self.theta, self.phi]:
normal_init(m.conv, std=std)
else:
normal_init(self.g.conv, std=std)
if zeros_init:
if (self.conv_out.norm_cfg is None):
constant_init(self.conv_out.conv, 0)
else:
constant_init(self.conv_out.norm, 0)
elif (self.conv_out.norm_cfg is None):
normal_init(self.conv_out.conv, std=std)
else:
normal_init(self.conv_out.norm, std=std)
def gaussian(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
pairwise_weight = pairwise_weight.softmax(dim=(- 1))
return pairwise_weight
def embedded_gaussian(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
if self.use_scale:
pairwise_weight /= (theta_x.shape[(- 1)] ** 0.5)
pairwise_weight = pairwise_weight.softmax(dim=(- 1))
return pairwise_weight
def dot_product(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
pairwise_weight /= pairwise_weight.shape[(- 1)]
return pairwise_weight
def concatenation(self, theta_x, phi_x):
h = theta_x.size(2)
w = phi_x.size(3)
theta_x = theta_x.repeat(1, 1, 1, w)
phi_x = phi_x.repeat(1, 1, h, 1)
concat_feature = torch.cat([theta_x, phi_x], dim=1)
pairwise_weight = self.concat_project(concat_feature)
(n, _, h, w) = pairwise_weight.size()
pairwise_weight = pairwise_weight.view(n, h, w)
pairwise_weight /= pairwise_weight.shape[(- 1)]
return pairwise_weight
def forward(self, x):
n = x.size(0)
g_x = self.g(x).view(n, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
if (self.mode == 'gaussian'):
theta_x = x.view(n, self.in_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
if self.sub_sample:
phi_x = self.phi(x).view(n, self.in_channels, (- 1))
else:
phi_x = x.view(n, self.in_channels, (- 1))
elif (self.mode == 'concatenation'):
theta_x = self.theta(x).view(n, self.inter_channels, (- 1), 1)
phi_x = self.phi(x).view(n, self.inter_channels, 1, (- 1))
else:
theta_x = self.theta(x).view(n, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(n, self.inter_channels, (- 1))
pairwise_func = getattr(self, self.mode)
pairwise_weight = pairwise_func(theta_x, phi_x)
y = torch.matmul(pairwise_weight, g_x)
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, *x.size()[2:])
output = (x + self.conv_out(y))
return output
|
class NonLocal1d(_NonLocalNd):
"1D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv1d').\n "
def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv1d'), **kwargs):
super(NonLocal1d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs)
self.sub_sample = sub_sample
if sub_sample:
max_pool_layer = nn.MaxPool1d(kernel_size=2)
self.g = nn.Sequential(self.g, max_pool_layer)
if (self.mode != 'gaussian'):
self.phi = nn.Sequential(self.phi, max_pool_layer)
else:
self.phi = max_pool_layer
|
@PLUGIN_LAYERS.register_module()
class NonLocal2d(_NonLocalNd):
"2D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv2d').\n "
_abbr_ = 'nonlocal_block'
def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv2d'), **kwargs):
super(NonLocal2d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs)
self.sub_sample = sub_sample
if sub_sample:
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
self.g = nn.Sequential(self.g, max_pool_layer)
if (self.mode != 'gaussian'):
self.phi = nn.Sequential(self.phi, max_pool_layer)
else:
self.phi = max_pool_layer
|
class NonLocal3d(_NonLocalNd):
"3D Non-local module.\n\n Args:\n in_channels (int): Same as `NonLocalND`.\n sub_sample (bool): Whether to apply max pooling after pairwise\n function (Note that the `sub_sample` is applied on spatial only).\n Default: False.\n conv_cfg (None | dict): Same as `NonLocalND`.\n Default: dict(type='Conv3d').\n "
def __init__(self, in_channels, sub_sample=False, conv_cfg=dict(type='Conv3d'), **kwargs):
super(NonLocal3d, self).__init__(in_channels, conv_cfg=conv_cfg, **kwargs)
self.sub_sample = sub_sample
if sub_sample:
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
self.g = nn.Sequential(self.g, max_pool_layer)
if (self.mode != 'gaussian'):
self.phi = nn.Sequential(self.phi, max_pool_layer)
else:
self.phi = max_pool_layer
|
def infer_abbr(class_type):
'Infer abbreviation from the class name.\n\n When we build a norm layer with `build_norm_layer()`, we want to preserve\n the norm type in variable names, e.g, self.bn1, self.gn. This method will\n infer the abbreviation to map class types to abbreviations.\n\n Rule 1: If the class has the property "_abbr_", return the property.\n Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or\n InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and\n "in" respectively.\n Rule 3: If the class name contains "batch", "group", "layer" or "instance",\n the abbreviation of this layer will be "bn", "gn", "ln" and "in"\n respectively.\n Rule 4: Otherwise, the abbreviation falls back to "norm".\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n '
if (not inspect.isclass(class_type)):
raise TypeError(f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
if issubclass(class_type, _InstanceNorm):
return 'in'
elif issubclass(class_type, _BatchNorm):
return 'bn'
elif issubclass(class_type, nn.GroupNorm):
return 'gn'
elif issubclass(class_type, nn.LayerNorm):
return 'ln'
else:
class_name = class_type.__name__.lower()
if ('batch' in class_name):
return 'bn'
elif ('group' in class_name):
return 'gn'
elif ('layer' in class_name):
return 'ln'
elif ('instance' in class_name):
return 'in'
else:
return 'norm_layer'
|
def build_norm_layer(cfg, num_features, postfix=''):
'Build normalization layer.\n\n Args:\n cfg (dict): The norm layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate a norm layer.\n - requires_grad (bool, optional): Whether stop gradient updates.\n num_features (int): Number of input channels.\n postfix (int | str): The postfix to be appended into norm abbreviation\n to create named layer.\n\n Returns:\n tuple[str, nn.Module]: The first element is the layer name consisting\n of abbreviation and postfix, e.g., bn1, gn. The second element is the\n created norm layer.\n '
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in NORM_LAYERS):
raise KeyError(f'Unrecognized norm type {layer_type}')
norm_layer = NORM_LAYERS.get(layer_type)
abbr = infer_abbr(norm_layer)
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
if ((layer_type == 'SyncBN') and hasattr(layer, '_specify_ddp_gpu_num')):
layer._specify_ddp_gpu_num(1)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer)
|
def is_norm(layer, exclude=None):
'Check if a layer is a normalization layer.\n\n Args:\n layer (nn.Module): The layer to be checked.\n exclude (type | tuple[type]): Types to be excluded.\n\n Returns:\n bool: Whether the layer is a norm layer.\n '
if (exclude is not None):
if (not isinstance(exclude, tuple)):
exclude = (exclude,)
if (not is_tuple_of(exclude, type)):
raise TypeError(f'"exclude" must be either None or type or a tuple of types, but got {type(exclude)}: {exclude}')
if (exclude and isinstance(layer, exclude)):
return False
all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
return isinstance(layer, all_norm_bases)
|
def build_padding_layer(cfg, *args, **kwargs):
'Build padding layer.\n\n Args:\n cfg (None or dict): The padding layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate a padding layer.\n\n Returns:\n nn.Module: Created padding layer.\n '
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
padding_type = cfg_.pop('type')
if (padding_type not in PADDING_LAYERS):
raise KeyError(f'Unrecognized padding type {padding_type}.')
else:
padding_layer = PADDING_LAYERS.get(padding_type)
layer = padding_layer(*args, **kwargs, **cfg_)
return layer
|
def infer_abbr(class_type):
'Infer abbreviation from the class name.\n\n This method will infer the abbreviation to map class types to\n abbreviations.\n\n Rule 1: If the class has the property "abbr", return the property.\n Rule 2: Otherwise, the abbreviation falls back to snake case of class\n name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.\n\n Args:\n class_type (type): The norm layer type.\n\n Returns:\n str: The inferred abbreviation.\n '
def camel2snack(word):
'Convert camel case word into snack case.\n\n Modified from `inflection lib\n <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.\n\n Example::\n\n >>> camel2snack("FancyBlock")\n \'fancy_block\'\n '
word = re.sub('([A-Z]+)([A-Z][a-z])', '\\1_\\2', word)
word = re.sub('([a-z\\d])([A-Z])', '\\1_\\2', word)
word = word.replace('-', '_')
return word.lower()
if (not inspect.isclass(class_type)):
raise TypeError(f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
else:
return camel2snack(class_type.__name__)
|
def build_plugin_layer(cfg, postfix='', **kwargs):
"Build plugin layer.\n\n Args:\n cfg (None or dict): cfg should contain:\n\n - type (str): identify plugin layer type.\n - layer args: args needed to instantiate a plugin layer.\n postfix (int, str): appended into norm abbreviation to\n create named layer. Default: ''.\n\n Returns:\n tuple[str, nn.Module]: The first one is the concatenation of\n abbreviation and postfix. The second is the created plugin layer.\n "
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in PLUGIN_LAYERS):
raise KeyError(f'Unrecognized plugin type {layer_type}')
plugin_layer = PLUGIN_LAYERS.get(layer_type)
abbr = infer_abbr(plugin_layer)
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
layer = plugin_layer(**kwargs, **cfg_)
return (name, layer)
|
class Scale(nn.Module):
'A learnable scale parameter.\n\n This layer scales the input by a learnable factor. It multiplies a\n learnable scale parameter of shape (1,) with input of any shape.\n\n Args:\n scale (float): Initial value of scale factor. Default: 1.0\n '
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return (x * self.scale)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.