id
int64
1
6.07M
name
stringlengths
1
295
code
stringlengths
12
426k
language
stringclasses
1 value
source_file
stringlengths
5
202
start_line
int64
1
158k
end_line
int64
1
158k
repo
dict
1,201
add_log_parameters_hook
def add_log_parameters_hook( self, module: "torch.nn.Module", name: str = "", prefix: str = "", log_freq: int = 0, ) -> None: """This instruments hooks into the pytorch module log parameters after a forward pass log_freq - log gradients/parameters every N batches """ # if name is not None: prefix = prefix + name if not hasattr(module, "_wandb_hook_names"): module._wandb_hook_names = [] def parameter_log_hook(module, input_, output, log_track): if not log_track_update(log_track): return for name, parameter in module.named_parameters(): # for pytorch 0.3 Variables if isinstance(parameter, torch.autograd.Variable): data = parameter.data else: data = parameter self.log_tensor_stats(data.cpu(), "parameters/" + prefix + name) log_track_params = log_track_init(log_freq) try: hook = module.register_forward_hook( lambda mod, inp, outp: parameter_log_hook( mod, inp, outp, log_track_params ) ) self._hook_handles["parameters/" + prefix] = hook module._wandb_hook_names.append("parameters/" + prefix) except RuntimeError as e: wandb.termwarn( f"Trying to register forward_hook failed ({e}) - skipping parameter tracking." )
python
wandb/wandb_torch.py
79
119
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,202
parameter_log_hook
def parameter_log_hook(module, input_, output, log_track): if not log_track_update(log_track): return for name, parameter in module.named_parameters(): # for pytorch 0.3 Variables if isinstance(parameter, torch.autograd.Variable): data = parameter.data else: data = parameter self.log_tensor_stats(data.cpu(), "parameters/" + prefix + name)
python
wandb/wandb_torch.py
96
105
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,203
add_log_gradients_hook
def add_log_gradients_hook( self, module: "torch.nn.Module", name: str = "", prefix: str = "", log_freq: int = 0, ) -> None: """This instruments hooks into the pytorch module log gradients after a backward pass log_freq - log gradients/parameters every N batches """ # if name is not None: prefix = prefix + name if not hasattr(module, "_wandb_hook_names"): module._wandb_hook_names = [] for name, parameter in module.named_parameters(): if parameter.requires_grad: log_track_grad = log_track_init(log_freq) module._wandb_hook_names.append("gradients/" + prefix + name) self._hook_variable_gradient_stats( parameter, "gradients/" + prefix + name, log_track_grad )
python
wandb/wandb_torch.py
121
145
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,204
log_tensor_stats
def log_tensor_stats(self, tensor, name): """Add distribution statistics on a tensor's elements to the current History entry""" # TODO Handle the case of duplicate names. if isinstance(tensor, tuple) or isinstance(tensor, list): while (isinstance(tensor, tuple) or isinstance(tensor, list)) and ( isinstance(tensor[0], tuple) or isinstance(tensor[0], list) ): tensor = [item for sublist in tensor for item in sublist] tensor = torch.cat([t.reshape(-1) for t in tensor]) # checking for inheritance from _TensorBase didn't work for some reason if not hasattr(tensor, "shape"): cls = type(tensor) raise TypeError(f"Expected Tensor, not {cls.__module__}.{cls.__name__}") # HalfTensors on cpu do not support view(), upconvert to 32bit if isinstance(tensor, torch.HalfTensor): tensor = tensor.clone().type(torch.FloatTensor).detach() # Sparse tensors have a bunch of implicit zeros. In order to histo them correctly, # we have to count them up and add them to the histo ourselves. sparse_zeros = None if tensor.is_sparse: # Have to call this on a sparse tensor before most other ops. tensor = tensor.cpu().coalesce().clone().detach() backing_values = tensor._values() non_zero_values = backing_values.numel() all_values = tensor.numel() sparse_zeros = all_values - non_zero_values tensor = backing_values flat = tensor.reshape(-1) # For pytorch 0.3 we use unoptimized numpy histograms (detach is new in 0.4) if not hasattr(flat, "detach"): tensor = flat.cpu().clone().numpy() wandb.run._log({name: wandb.Histogram(tensor)}, commit=False) return if flat.is_cuda: # TODO(jhr): see if pytorch will accept something upstream to check cuda support for ops # until then, we are going to have to catch a specific exception to check for histc support. if self._is_cuda_histc_supported is None: self._is_cuda_histc_supported = True check = torch.cuda.FloatTensor(1).fill_(0) try: check = flat.histc(bins=self._num_bins) except RuntimeError as e: # Only work around missing support with specific exception # if str(e).startswith("_th_histc is not implemented"): # self._is_cuda_histc_supported = False # On second thought, 0.4.1 doesnt have support and maybe there are other issues # lets disable more broadly for now self._is_cuda_histc_supported = False if not self._is_cuda_histc_supported: flat = flat.cpu().clone().detach() # As of torch 1.0.1.post2+nightly, float16 cuda summary ops are not supported (convert to float32) if isinstance(flat, torch.cuda.HalfTensor): flat = flat.clone().type(torch.cuda.FloatTensor).detach() if isinstance(flat, torch.HalfTensor): flat = flat.clone().type(torch.FloatTensor).detach() # Skip logging if all values are nan or inf or the tensor is empty. if self._no_finite_values(flat): return # Remove nans and infs if present. There's no good way to represent that in histograms. flat = self._remove_infs_nans(flat) tmin = flat.min().item() tmax = flat.max().item() if sparse_zeros: # If we've got zeros to add in, make sure zero is in the hist range. tmin = 0 if tmin > 0 else tmin tmax = 0 if tmax < 0 else tmax # Anecdotally, this can somehow happen sometimes. Maybe a precision error # in min()/max() above. Swap here to prevent a runtime error. if tmin > tmax: tmin, tmax = tmax, tmin tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax) tensor = tensor.cpu().clone().detach() bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1) # Add back zeroes from a sparse tensor. if sparse_zeros: bins_np = bins.numpy() tensor_np = tensor.numpy() bin_idx = 0 num_buckets = len(bins_np) - 1 for i in range(num_buckets): start = bins_np[i] end = bins_np[i + 1] # There are 3 cases to consider here, all of which mean we've found the right bucket # 1. The bucket range contains zero. # 2. The bucket range lower bound *is* zero. # 3. This is the last bucket and the bucket range upper bound is zero. if (start <= 0 and end > 0) or (i == num_buckets - 1 and end == 0): bin_idx = i break tensor_np[bin_idx] += sparse_zeros tensor = torch.Tensor(tensor_np) bins = torch.Tensor(bins_np) wandb.run._log( {name: wandb.Histogram(np_histogram=(tensor.tolist(), bins.tolist()))}, commit=False, )
python
wandb/wandb_torch.py
147
259
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,205
_hook_variable_gradient_stats
def _hook_variable_gradient_stats(self, var, name, log_track): """Logs a Variable's gradient's distribution statistics next time backward() is called on it. """ if not isinstance(var, torch.autograd.Variable): cls = type(var) raise TypeError( "Expected torch.Variable, not {}.{}".format( cls.__module__, cls.__name__ ) ) handle = self._hook_handles.get(name) if handle is not None and self._torch_hook_handle_is_valid(handle): raise ValueError(f'A hook has already been set under name "{name}"') def _callback(grad, log_track): if not log_track_update(log_track): return self.log_tensor_stats(grad.data, name) handle = var.register_hook(lambda grad: _callback(grad, log_track)) self._hook_handles[name] = handle return handle
python
wandb/wandb_torch.py
261
284
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,206
_callback
def _callback(grad, log_track): if not log_track_update(log_track): return self.log_tensor_stats(grad.data, name)
python
wandb/wandb_torch.py
277
280
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,207
unhook_all
def unhook_all(self): for handle in self._hook_handles.values(): handle.remove() self._hook_handles = []
python
wandb/wandb_torch.py
286
289
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,208
unhook
def unhook(self, name): handle = self._hook_handles.pop(name) handle.remove()
python
wandb/wandb_torch.py
291
293
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,209
_torch_hook_handle_is_valid
def _torch_hook_handle_is_valid(self, handle): d = handle.hooks_dict_ref() if d is None: return False else: return handle.id in d
python
wandb/wandb_torch.py
295
300
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,210
_no_finite_values
def _no_finite_values(self, tensor: "torch.Tensor") -> bool: return tensor.shape == torch.Size([0]) or (~torch.isfinite(tensor)).all().item()
python
wandb/wandb_torch.py
302
303
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,211
_remove_infs_nans
def _remove_infs_nans(self, tensor: "torch.Tensor") -> "torch.Tensor": if not torch.isfinite(tensor).all(): tensor = tensor[torch.isfinite(tensor)] return tensor
python
wandb/wandb_torch.py
305
309
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,212
__init__
def __init__(self): super().__init__("torch") self._graph_hooks = set()
python
wandb/wandb_torch.py
313
315
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,213
hook_torch
def hook_torch(cls, model, criterion=None, graph_idx=0): wandb.termlog("logging graph, to disable use `wandb.watch(log_graph=False)`") graph = TorchGraph() graph.hook_torch_modules(model, criterion, graph_idx=graph_idx) return graph
python
wandb/wandb_torch.py
318
322
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,214
create_forward_hook
def create_forward_hook(self, name, graph_idx): graph = self def after_forward_hook(module, input, output): if id(module) not in self._graph_hooks: # hook already processed -> noop return if not isinstance(output, tuple): output = (output,) parameters = [ (pname, list(param.size())) for pname, param in module.named_parameters() ] node = Node( id=id(module), name=name, class_name=str(module), output_shape=nested_shape(output), parameters=parameters, num_parameters=[reduce(mul, size, 1) for (pname, size) in parameters], ) graph.nodes_by_id[id(module)] = node for param in module.parameters(): graph.nodes_by_id[id(param)] = node graph.add_node(node) if not graph.criterion_passed: if hasattr(output[0], "grad_fn"): graph.criterion = output[0].grad_fn elif ( isinstance(output[0], list) and output[0] and hasattr(output[0][0], "grad_fn") ): graph.criterion = output[0][0].grad_fn # hook has been processed self._graph_hooks -= {id(module)} if not self._graph_hooks: # we went through the entire graph wandb.run.summary["graph_%i" % graph_idx] = self return after_forward_hook
python
wandb/wandb_torch.py
324
367
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,215
after_forward_hook
def after_forward_hook(module, input, output): if id(module) not in self._graph_hooks: # hook already processed -> noop return if not isinstance(output, tuple): output = (output,) parameters = [ (pname, list(param.size())) for pname, param in module.named_parameters() ] node = Node( id=id(module), name=name, class_name=str(module), output_shape=nested_shape(output), parameters=parameters, num_parameters=[reduce(mul, size, 1) for (pname, size) in parameters], ) graph.nodes_by_id[id(module)] = node for param in module.parameters(): graph.nodes_by_id[id(param)] = node graph.add_node(node) if not graph.criterion_passed: if hasattr(output[0], "grad_fn"): graph.criterion = output[0].grad_fn elif ( isinstance(output[0], list) and output[0] and hasattr(output[0][0], "grad_fn") ): graph.criterion = output[0][0].grad_fn # hook has been processed self._graph_hooks -= {id(module)} if not self._graph_hooks: # we went through the entire graph wandb.run.summary["graph_%i" % graph_idx] = self
python
wandb/wandb_torch.py
327
365
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,216
hook_torch_modules
def hook_torch_modules( self, module, criterion=None, prefix=None, graph_idx=0, parent=None ): torch = util.get_module("torch", "Could not import torch") layers = 0 graph = self if hasattr(module, "_wandb_watch_called") and module._wandb_watch_called: raise ValueError( "You can only call `wandb.watch` once per model. Pass a new instance of the model if you need to call wandb.watch again in your code." ) module._wandb_watch_called = True if criterion: graph.criterion = criterion graph.criterion_passed = True for name, sub_module in module.named_children(): name = name or str(layers) if prefix: name = prefix + "." + name layers += 1 if not isinstance(sub_module, torch.nn.Module): # TODO: Why does this happen? break # Trying to support torch >0.3 making this code complicated # We want a list of types that we should recurse into # Torch 0.3 uses containers # 0.4 has ModuleList # 0.4.1 has ModuleDict module_types = [ getattr(torch.nn, module_classname) for module_classname in ( "Container", "Sequential", "ModuleList", "ModuleDict", ) if hasattr(torch.nn, module_classname) ] if parent is None: parent = module if isinstance(sub_module, tuple(module_types)): self.hook_torch_modules(sub_module, prefix=name, parent=parent) else: self._graph_hooks |= {id(sub_module)} try: graph_hook = sub_module.register_forward_hook( self.create_forward_hook(name, graph_idx) ) wandb.run._torch._hook_handles[ "topology/" + str(id(graph_hook)) ] = graph_hook if not hasattr(parent, "_wandb_hook_names"): # should never happen but let's be extra safe parent._wandb_hook_names = [] parent._wandb_hook_names.append("topology/" + str(id(graph_hook))) except RuntimeError as e: wandb.termwarn( f"Trying to register forward_hook failed ({e}) - skipping graph tracking.", repeat=False, )
python
wandb/wandb_torch.py
369
430
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,217
from_torch_layers
def from_torch_layers(cls, module_graph, variable): """Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder """ # TODO: We're currently not using this, but I left it here incase we want to resurrect! - CVP torch = util.get_module("torch", "Could not import torch") module_nodes_by_hash = {id(n): n for n in module_graph.nodes} module_parameter_nodes = [ n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter) ] names_by_pid = {id(n.obj): n.name for n in module_parameter_nodes} reachable_param_nodes = module_graph[0].reachable_descendents() reachable_params = {} module_reachable_params = {} names = {} for pid, reachable_nodes in reachable_param_nodes.items(): node = module_nodes_by_hash[pid] if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = {} # by object id module_reachable_params[id(module)] = reachable_params names[node.name] = set() for reachable_hash in reachable_nodes: reachable = module_nodes_by_hash[reachable_hash] if isinstance(reachable.obj, torch.nn.Parameter): param = reachable.obj reachable_params[id(param)] = param names[node.name].add(names_by_pid[id(param)]) # we look for correspondences between sets of parameters used in subtrees of the # computation graph and sets of parameters contained in subtrees of the module # graph node_depths = {id(n): d for n, d in module_graph[0].descendent_bfs()} parameter_module_names = {} parameter_modules = {} for param_node in ( n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter) ): pid = id(param_node.obj) best_node = None best_depth = None best_reachable_params = None for node in module_graph.nodes: if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = module_reachable_params[id(module)] if pid in reachable_params: depth = node_depths[id(node)] if best_node is None or (len(reachable_params), depth) <= ( len(best_reachable_params), best_depth, ): best_node = node best_depth = depth best_reachable_params = reachable_params parameter_modules[pid] = best_node parameter_module_names[param_node.name] = best_node.name # contains all parameters but only a minimal set of modules necessary # to contain them (and which ideally correspond to conceptual layers) reduced_module_graph = cls() rmg_ids = itertools.count() rmg_root = Node(id=next(rmg_ids), node=module_graph[0]) reduced_module_graph.add_node(rmg_root) reduced_module_graph.root = rmg_root rmg_nodes_by_pid = {} module_nodes_by_pid = {id(n.obj): n for n in module_graph.nodes} compute_graph, compute_node_vars = cls.from_torch_compute_graph(variable) for node, _ in reversed(list(compute_graph[0].ancestor_bfs())): param = compute_node_vars.get(node.id) pid = id(param) if not isinstance(param, torch.nn.Parameter): continue if pid not in module_nodes_by_pid: # not all Parameters that occur in the compute graph come from the Module graph continue # add the nodes in the order we want to display them on the frontend mid = id(parameter_modules[pid].obj) if mid in rmg_nodes_by_pid: rmg_module = rmg_nodes_by_pid[mid] else: rmg_module = rmg_nodes_by_pid[mid] = Node( id=next(rmg_ids), node=module_nodes_by_pid[mid] ) reduced_module_graph.add_node(rmg_module) reduced_module_graph.add_edge(rmg_root, rmg_module) rmg_param = Node(id=next(rmg_ids), node=module_nodes_by_pid[pid]) rmg_nodes_by_pid[pid] = rmg_param reduced_module_graph.add_node(rmg_param) reduced_module_graph.add_edge(rmg_module, rmg_param) return reduced_module_graph
python
wandb/wandb_torch.py
433
548
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,218
node_from_module
def node_from_module(cls, nid, module): numpy = util.get_module("numpy", "Could not import numpy") node = wandb.Node() node.id = nid node.child_parameters = 0 for parameter in module.parameters(): node.child_parameters += numpy.prod(parameter.size()) node.class_name = type(module).__name__ return node
python
wandb/wandb_torch.py
551
561
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,219
__init__
def __init__( self, env=None, command=None, function=None, run_id=None, in_jupyter=None ): self._popen = None self._proc = None self._finished_q = multiprocessing.Queue() self._proc_killed = False if command: if platform.system() == "Windows": kwargs = dict(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) else: kwargs = dict(preexec_fn=os.setpgrp) self._popen = subprocess.Popen(command, env=env, **kwargs) elif function: self._proc = multiprocessing.Process( target=self._start, args=(self._finished_q, env, function, run_id, in_jupyter), ) self._proc.start() else: raise AgentError("Agent Process requires command or function")
python
wandb/wandb_agent.py
33
54
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,220
_start
def _start(self, finished_q, env, function, run_id, in_jupyter): if env: for k, v in env.items(): os.environ[k] = v # call user function print("wandb: Agent Started Run:", run_id) if function: function() print("wandb: Agent Finished Run:", run_id, "\n") # complete the run run = wandb.run if run: wandb.join() # signal that the process is finished finished_q.put(True)
python
wandb/wandb_agent.py
56
73
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,221
poll
def poll(self): if self._popen: return self._popen.poll() if self._proc_killed: # we need to join process to prevent zombies self._proc.join() return True try: finished = self._finished_q.get(False, 0) if finished: return True except queue.Empty: pass return
python
wandb/wandb_agent.py
75
88
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,222
wait
def wait(self): if self._popen: # if on windows, wait() will block and we wont be able to interrupt if platform.system() == "Windows": try: while True: p = self._popen.poll() if p is not None: return p time.sleep(1) except KeyboardInterrupt: raise return self._popen.wait() return self._proc.join()
python
wandb/wandb_agent.py
90
103
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,223
kill
def kill(self): if self._popen: return self._popen.kill() pid = self._proc.pid if pid: ret = os.kill(pid, signal.SIGKILL) self._proc_killed = True return ret return
python
wandb/wandb_agent.py
105
113
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,224
terminate
def terminate(self): if self._popen: # windows terminate is too strong, send Ctrl-C instead if platform.system() == "Windows": return self._popen.send_signal(signal.CTRL_C_EVENT) return self._popen.terminate() return self._proc.terminate()
python
wandb/wandb_agent.py
115
121
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,225
_create_sweep_command_args
def _create_sweep_command_args(command: Dict) -> Dict[str, Any]: """Create various formats of command arguments for the agent. Raises: ValueError: improperly formatted command dict """ if "args" not in command: raise ValueError('No "args" found in command: %s' % command) # four different formats of command args # (1) standard command line flags (e.g. --foo=bar) flags: List[str] = [] # (2) flags without hyphens (e.g. foo=bar) flags_no_hyphens: List[str] = [] # (3) flags with false booleans ommited (e.g. --foo) flags_no_booleans: List[str] = [] # (4) flags as a dictionary (used for constructing a json) flags_dict: Dict[str, Any] = {} for param, config in command["args"].items(): _value: Any = config.get("value", None) if _value is None: raise ValueError('No "value" found for command["args"]["%s"]' % param) _flag: str = f"{param}={_value}" flags.append("--" + _flag) flags_no_hyphens.append(_flag) if isinstance(_value, bool): # omit flags if they are boolean and false if _value: flags_no_booleans.append("--" + param) else: flags_no_booleans.append("--" + _flag) flags_dict[param] = _value return { "args": flags, "args_no_hyphens": flags_no_hyphens, "args_no_boolean_flags": flags_no_booleans, "args_json": [json.dumps(flags_dict)], "args_dict": flags_dict, }
python
wandb/wandb_agent.py
124
162
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,226
__init__
def __init__( self, api, queue, sweep_id=None, function=None, in_jupyter=None, count=None ): self._api = api self._queue = queue self._run_processes = {} # keyed by run.id (GQL run name) self._server_responses = [] self._sweep_id = sweep_id self._in_jupyter = in_jupyter self._log = [] self._running = True self._last_report_time = None self._function = function self._report_interval = wandb.env.get_agent_report_interval( self.REPORT_INTERVAL ) self._kill_delay = wandb.env.get_agent_kill_delay(self.KILL_DELAY) self._finished = 0 self._failed = 0 self._count = count self._sweep_command = [] self._max_initial_failures = wandb.env.get_agent_max_initial_failures( self.MAX_INITIAL_FAILURES ) if self._report_interval is None: raise AgentError("Invalid agent report interval") if self._kill_delay is None: raise AgentError("Invalid agent kill delay") # if the directory to log to is not set, set it if os.environ.get("WANDB_DIR") is None: os.environ["WANDB_DIR"] = os.path.abspath(os.getcwd())
python
wandb/wandb_agent.py
180
210
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,227
is_flapping
def is_flapping(self): """Determine if the process is flapping. Flapping occurs if the agents receives FLAPPING_MAX_FAILURES non-0 exit codes in the first FLAPPING_MAX_SECONDS. """ if os.getenv(wandb.env.AGENT_DISABLE_FLAPPING) == "true": return False if time.time() < wandb.START_TIME + self.FLAPPING_MAX_SECONDS: return self._failed >= self.FLAPPING_MAX_FAILURES
python
wandb/wandb_agent.py
212
221
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,228
is_failing
def is_failing(self): return ( self._failed >= self._finished and self._max_initial_failures <= self._failed )
python
wandb/wandb_agent.py
223
227
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,229
run
def run(self): # noqa: C901 # TODO: catch exceptions, handle errors, show validation warnings, and make more generic sweep_obj = self._api.sweep(self._sweep_id, "{}") if sweep_obj: sweep_yaml = sweep_obj.get("config") if sweep_yaml: sweep_config = yaml.safe_load(sweep_yaml) if sweep_config: sweep_command = sweep_config.get("command") if sweep_command and isinstance(sweep_command, list): self._sweep_command = sweep_command # TODO: include sweep ID agent = self._api.register_agent(socket.gethostname(), sweep_id=self._sweep_id) agent_id = agent["id"] try: while self._running: commands = util.read_many_from_queue( self._queue, 100, self.POLL_INTERVAL ) for command in commands: command["resp_queue"].put(self._process_command(command)) now = util.stopwatch_now() if self._last_report_time is None or ( self._report_interval != 0 and now > self._last_report_time + self._report_interval ): logger.info("Running runs: %s", list(self._run_processes.keys())) self._last_report_time = now run_status = {} for run_id, run_process in list(self._run_processes.items()): poll_result = run_process.poll() if poll_result is None: run_status[run_id] = True continue elif ( not isinstance(poll_result, bool) and isinstance(poll_result, int) and poll_result > 0 ): self._failed += 1 if self.is_flapping(): logger.error( "Detected %i failed runs in the first %i seconds, shutting down.", self.FLAPPING_MAX_FAILURES, self.FLAPPING_MAX_SECONDS, ) logger.info( "To disable this check set WANDB_AGENT_DISABLE_FLAPPING=true" ) self._running = False break if self.is_failing(): logger.error( "Detected %i failed runs in a row, shutting down.", self._max_initial_failures, ) logger.info( "To change this value set WANDB_AGENT_MAX_INITIAL_FAILURES=val" ) self._running = False break logger.info("Cleaning up finished run: %s", run_id) # wandb.teardown() was added with wandb service and is a hammer to make # sure that active runs are finished before moving on to another agent run # # In the future, a lighter weight way to implement this could be to keep a # service process open for all the agent instances and inform_finish when # the run should be marked complete. This however could require # inform_finish on every run created by this process. if hasattr(wandb, "teardown"): exit_code = 0 if isinstance(poll_result, int): exit_code = poll_result elif isinstance(poll_result, bool): exit_code = -1 wandb.teardown(exit_code) del self._run_processes[run_id] self._last_report_time = None self._finished += 1 if self._count and self._finished >= self._count or not self._running: self._running = False continue commands = self._api.agent_heartbeat(agent_id, {}, run_status) # TODO: send _server_responses self._server_responses = [] for command in commands: self._server_responses.append(self._process_command(command)) except KeyboardInterrupt: try: wandb.termlog( "Ctrl-c pressed. Waiting for runs to end. Press ctrl-c again to terminate them." ) for _, run_process in self._run_processes.items(): run_process.wait() except KeyboardInterrupt: pass finally: try: if not self._in_jupyter: wandb.termlog("Terminating and syncing runs. Press ctrl-c to kill.") for _, run_process in self._run_processes.items(): try: run_process.terminate() except OSError: pass # if process is already dead for _, run_process in self._run_processes.items(): run_process.wait() except KeyboardInterrupt: wandb.termlog("Killing runs and quitting.") for _, run_process in self._run_processes.items(): try: run_process.kill() except OSError: pass # if process is already dead
python
wandb/wandb_agent.py
229
351
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,230
_process_command
def _process_command(self, command): logger.info( "Agent received command: %s" % (command["type"] if "type" in command else "Unknown") ) response = { "id": command.get("id"), "result": None, } try: command_type = command["type"] if command_type == "run": result = self._command_run(command) elif command_type == "stop": result = self._command_stop(command) elif command_type == "exit": result = self._command_exit(command) elif command_type == "resume": result = self._command_run(command) else: raise AgentError("No such command: %s" % command_type) response["result"] = result except Exception: logger.exception("Exception while processing command: %s", command) ex_type, ex, tb = sys.exc_info() response["exception"] = f"{ex_type.__name__}: {str(ex)}" response["traceback"] = traceback.format_tb(tb) del tb self._log.append((command, response)) return response
python
wandb/wandb_agent.py
353
384
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,231
_create_sweep_command
def _create_sweep_command(command: Optional[List] = None) -> List: """Return sweep command, filling in environment variable macros.""" # Start from default sweep command command = command or Agent.DEFAULT_SWEEP_COMMAND for i, chunk in enumerate(command): # Replace environment variable macros # Search a str(chunk), but allow matches to be of any (ex: int) type if Agent.SWEEP_COMMAND_ENV_VAR_REGEX.search(str(chunk)): # Replace from backwards forwards matches = list(Agent.SWEEP_COMMAND_ENV_VAR_REGEX.finditer(chunk)) for m in matches[::-1]: # Default to just leaving as is if environment variable does not exist _var: str = os.environ.get(m.group(1), m.group(1)) command[i] = f"{command[i][:m.start()]}{_var}{command[i][m.end():]}" return command
python
wandb/wandb_agent.py
387
401
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,232
_command_run
def _command_run(self, command): logger.info( "Agent starting run with config:\n" + "\n".join( ["\t{}: {}".format(k, v["value"]) for k, v in command["args"].items()] ) ) if self._in_jupyter: print( "wandb: Agent Starting Run: {} with config:\n".format( command.get("run_id") ) + "\n".join( [ "\t{}: {}".format(k, v["value"]) for k, v in command["args"].items() ] ) ) # Setup sweep command sweep_command: List[str] = Agent._create_sweep_command(self._sweep_command) run_id = command.get("run_id") sweep_id = os.environ.get(wandb.env.SWEEP_ID) # TODO(jhr): move into settings config_file = os.path.join( "wandb", "sweep-" + sweep_id, "config-" + run_id + ".yaml" ) json_file = os.path.join( "wandb", "sweep-" + sweep_id, "config-" + run_id + ".json" ) os.environ[wandb.env.RUN_ID] = run_id base_dir = os.environ.get(wandb.env.DIR, "") sweep_param_path = os.path.join(base_dir, config_file) os.environ[wandb.env.SWEEP_PARAM_PATH] = sweep_param_path wandb_lib.config_util.save_config_file_from_dict( sweep_param_path, command["args"] ) env = dict(os.environ) sweep_vars: Dict[str, Any] = _create_sweep_command_args(command) if "${args_json_file}" in sweep_command: with open(json_file, "w") as fp: fp.write(sweep_vars["args_json"][0]) if self._function: # make sure that each run regenerates setup singleton wandb_sdk.wandb_setup._setup(_reset=True) proc = AgentProcess( function=self._function, env=env, run_id=run_id, in_jupyter=self._in_jupyter, ) else: sweep_vars["interpreter"] = ["python"] sweep_vars["program"] = [command["program"]] sweep_vars["args_json_file"] = [json_file] if not platform.system() == "Windows": sweep_vars["env"] = ["/usr/bin/env"] command_list = [] for c in sweep_command: c = str(c) if c.startswith("${") and c.endswith("}"): replace_list = sweep_vars.get(c[2:-1]) command_list += replace_list or [] else: command_list += [c] logger.info( "About to run command: {}".format( " ".join('"%s"' % c if " " in c else c for c in command_list) ) ) proc = AgentProcess(command=command_list, env=env) self._run_processes[run_id] = proc # we keep track of when we sent the sigterm to give processes a chance # to handle the signal before sending sigkill every heartbeat self._run_processes[run_id].last_sigterm_time = None self._last_report_time = None
python
wandb/wandb_agent.py
403
487
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,233
_command_stop
def _command_stop(self, command): run_id = command["run_id"] if run_id in self._run_processes: proc = self._run_processes[run_id] now = util.stopwatch_now() if proc.last_sigterm_time is None: proc.last_sigterm_time = now logger.info("Stop: %s", run_id) try: proc.terminate() except OSError: # if process is already dead pass elif now > proc.last_sigterm_time + self._kill_delay: logger.info("Kill: %s", run_id) try: proc.kill() except OSError: # if process is already dead pass else: logger.error("Run %s not running", run_id)
python
wandb/wandb_agent.py
489
508
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,234
_command_exit
def _command_exit(self, command): logger.info("Received exit command. Killing runs and quitting.") for _, proc in self._run_processes.items(): try: proc.kill() except OSError: # process is already dead pass self._running = False
python
wandb/wandb_agent.py
510
518
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,235
__init__
def __init__(self, queue): self._queue = queue self._command_id = 0 self._multiproc_manager = multiprocessing.Manager()
python
wandb/wandb_agent.py
522
525
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,236
command
def command(self, command): command["origin"] = "local" command["id"] = "local-%s" % self._command_id self._command_id += 1 resp_queue = self._multiproc_manager.Queue() command["resp_queue"] = resp_queue self._queue.put(command) result = resp_queue.get() print("result:", result) if "exception" in result: print("Exception occurred while running command") for line in result["traceback"]: print(line.strip()) print(result["exception"]) return result
python
wandb/wandb_agent.py
527
541
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,237
run_agent
def run_agent( sweep_id, function=None, in_jupyter=None, entity=None, project=None, count=None ): parts = dict(entity=entity, project=project, name=sweep_id) err = util.parse_sweep_id(parts) if err: wandb.termerror(err) return entity = parts.get("entity") or entity project = parts.get("project") or project sweep_id = parts.get("name") or sweep_id if entity: wandb.env.set_entity(entity) if project: wandb.env.set_project(project) if sweep_id: # TODO(jhr): remove when jobspec is merged os.environ[wandb.env.SWEEP_ID] = sweep_id logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() log_level = logging.DEBUG if in_jupyter: log_level = logging.ERROR ch.setLevel(log_level) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) ch.setFormatter(formatter) try: logger.addHandler(ch) api = InternalApi() queue = multiprocessing.Queue() agent = Agent( api, queue, sweep_id=sweep_id, function=function, in_jupyter=in_jupyter, count=count, ) agent.run() finally: # make sure we remove the logging handler (important for jupyter notebooks) logger.removeHandler(ch)
python
wandb/wandb_agent.py
544
589
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,238
agent
def agent(sweep_id, function=None, entity=None, project=None, count=None): """Run a function or program with configuration parameters specified by server. Generic agent entrypoint, used for CLI or jupyter. Arguments: sweep_id: (dict) Sweep ID generated by CLI or sweep API function: (func, optional) A function to call instead of the "program" specifed in the config. entity: (str, optional) W&B Entity project: (str, optional) W&B Project count: (int, optional) the number of trials to run. Examples: Run a sample sweep over a function: <!--yeadoc-test:one-parameter-sweep-agent--> ```python import wandb sweep_configuration = { "name": "my-awesome-sweep", "metric": {"name": "accuracy", "goal": "maximize"}, "method": "grid", "parameters": {"a": {"values": [1, 2, 3, 4]}}, } def my_train_func(): # read the current value of parameter "a" from wandb.config wandb.init() a = wandb.config.a wandb.log({"a": a, "accuracy": a + 1}) sweep_id = wandb.sweep(sweep_configuration) # run the sweep wandb.agent(sweep_id, function=my_train_func) ``` """ global _INSTANCES _INSTANCES += 1 try: # make sure we are logged in wandb_sdk.wandb_login._login(_silent=True) if function: return pyagent(sweep_id, function, entity, project, count) in_jupyter = wandb.wandb_sdk.lib.ipython._get_python_type() != "python" return run_agent( sweep_id, function=function, in_jupyter=in_jupyter, entity=entity, project=project, count=count, ) finally: _INSTANCES -= 1
python
wandb/wandb_agent.py
592
650
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,239
_is_running
def _is_running(): return bool(_INSTANCES)
python
wandb/wandb_agent.py
656
657
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,240
__init__
def __init__(self, id: str, data: Table) -> None: self._id = id self._data = data
python
wandb/viz.py
8
10
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,241
get_config_value
def get_config_value(self, key: str) -> Dict[str, Any]: return { "id": self._id, "historyFieldSettings": {"x-axis": "_step", "key": key}, }
python
wandb/viz.py
12
16
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,242
get_config_key
def get_config_key(key: str) -> Tuple[str, str, str]: return "_wandb", "viz", key
python
wandb/viz.py
19
20
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,243
value
def value(self) -> Table: return self._data
python
wandb/viz.py
23
24
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,244
__init__
def __init__( self, id: str, data: Table, fields: Dict[str, Any], string_fields: Dict[str, Any], ) -> None: self._id = id self._data = data self._fields = fields self._string_fields = string_fields
python
wandb/viz.py
28
38
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,245
get_config_value
def get_config_value( self, panel_type: str, query: Dict[str, Any], ) -> Dict[str, Any]: return { "panel_type": panel_type, "panel_config": { "panelDefId": self._id, "fieldSettings": self._fields, "stringSettings": self._string_fields, "transform": {"name": "tableWithLeafColNames"}, "userQuery": query, }, }
python
wandb/viz.py
40
54
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,246
get_config_key
def get_config_key(key: str) -> Tuple[str, str, str]: return "_wandb", "visualize", key
python
wandb/viz.py
57
58
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,247
user_query
def user_query(table_key: str) -> Dict[str, Any]: return { "queryFields": [ { "name": "runSets", "args": [{"name": "runSets", "value": "${runSets}"}], "fields": [ {"name": "id", "fields": []}, {"name": "name", "fields": []}, {"name": "_defaultColorIndex", "fields": []}, { "name": "summaryTable", "args": [{"name": "tableKey", "value": table_key}], "fields": [], }, ], } ], }
python
wandb/viz.py
61
79
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,248
table
def table(self) -> Table: return self._data
python
wandb/viz.py
82
83
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,249
fields
def fields(self) -> Dict[str, Any]: return self._fields
python
wandb/viz.py
86
87
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,250
string_fields
def string_fields(self) -> Dict[str, Any]: return self._string_fields
python
wandb/viz.py
90
91
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,251
custom_chart
def custom_chart( vega_spec_name: str, data_table: Table, fields: Dict[str, Any], string_fields: Dict[str, Any] = {}, ) -> CustomChart: if not isinstance(data_table, Table): raise Error( f"Expected `data_table` to be `wandb.Table` type, instead got {type(data_table).__name__}" ) return CustomChart( id=vega_spec_name, data=data_table, fields=fields, string_fields=string_fields, )
python
wandb/viz.py
94
109
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,252
visualize
def visualize(id: str, value: Table) -> Visualize: if not isinstance(value, Table): raise Error( f"Expected `value` to be `wandb.Table` type, instead got {type(value).__name__}" ) return Visualize(id=id, data=value)
python
wandb/viz.py
112
117
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,253
reset
def reset(): _triggers.clear()
python
wandb/trigger.py
14
15
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,254
register
def register(event: str, func: Callable): _triggers.setdefault(event, []).append(func)
python
wandb/trigger.py
18
19
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,255
call
def call(event_str: str, *args: Any, **kwargs: Any): for func in _triggers.get(event_str, []): func(*args, **kwargs)
python
wandb/trigger.py
22
24
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,256
unregister
def unregister(event: str, func: Callable): _triggers[event].remove(func)
python
wandb/trigger.py
27
28
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,257
magics_class
def magics_class(*args, **kwargs): return lambda *args, **kwargs: None
python
wandb/jupyter.py
28
29
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,258
magic_arguments
def magic_arguments(*args, **kwargs): return lambda *args, **kwargs: None
python
wandb/jupyter.py
31
32
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,259
argument
def argument(*args, **kwargs): return lambda *args, **kwargs: None
python
wandb/jupyter.py
34
35
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,260
line_cell_magic
def line_cell_magic(*args, **kwargs): return lambda *args, **kwargs: None
python
wandb/jupyter.py
37
38
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,261
maybe_display
def maybe_display(): """Display a run if the user added cell magic and we have run.""" if __IFrame is not None: return __IFrame.maybe_display() return False
python
wandb/jupyter.py
46
50
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,262
quiet
def quiet(): if __IFrame is not None: return __IFrame.opts.get("quiet") return False
python
wandb/jupyter.py
53
56
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,263
__init__
def __init__(self, path=None, opts=None): self.path = path self.api = wandb.Api() self.opts = opts or {} self.displayed = False self.height = self.opts.get("height", 420)
python
wandb/jupyter.py
60
65
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,264
maybe_display
def maybe_display(self) -> bool: if not self.displayed and (self.path or wandb.run): display(self) return self.displayed
python
wandb/jupyter.py
67
70
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,265
_repr_html_
def _repr_html_(self): try: self.displayed = True if self.opts.get("workspace", False): if self.path is None and wandb.run: self.path = wandb.run.path if isinstance(self.path, str): object = self.api.from_path(self.path) else: object = wandb.run if object is None: if wandb.Api().api_key is None: return "You must be logged in to render wandb in jupyter, run `wandb.login()`" else: object = self.api.project( "/".join( [ wandb.Api().default_entity, wandb.util.auto_project_name(None), ] ) ) return object.to_html(self.height, hidden=False) except wandb.Error as e: return f"Can't display wandb interface<br/>{e}"
python
wandb/jupyter.py
72
96
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,266
__init__
def __init__(self, shell, require_interaction=False): super().__init__(shell) self.options = {}
python
wandb/jupyter.py
101
103
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,267
wandb
def wandb(self, line, cell=None): """Display wandb resources in jupyter. This can be used as cell or line magic. %wandb USERNAME/PROJECT/runs/RUN_ID --- %%wandb -h 1024 with wandb.init() as run: run.log({"loss": 1}) """ # Record options args = parse_argstring(self.wandb, line) self.options["height"] = args.height self.options["workspace"] = args.workspace self.options["quiet"] = args.quiet iframe = IFrame(args.path, opts=self.options) displayed = iframe.maybe_display() if cell is not None: if not displayed: # Store the IFrame globally and attempt to display if we have a run cell = ( f"wandb.jupyter.__IFrame = wandb.jupyter.IFrame(opts={self.options})\n" + cell + "\nwandb.jupyter.__IFrame = None" ) get_ipython().run_cell(cell)
python
wandb/jupyter.py
134
158
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,268
notebook_metadata_from_jupyter_servers_and_kernel_id
def notebook_metadata_from_jupyter_servers_and_kernel_id(): servers, kernel_id = jupyter_servers_and_kernel_id() for s in servers: if s.get("password"): raise ValueError("Can't query password protected kernel") res = requests.get( urljoin(s["url"], "api/sessions"), params={"token": s.get("token", "")} ).json() for nn in res: # TODO: wandb/client#400 found a case where res returned an array of # strings... if isinstance(nn, dict) and nn.get("kernel") and "notebook" in nn: if nn["kernel"]["id"] == kernel_id: return { "root": s.get("root_dir", s.get("notebook_dir", os.getcwd())), "path": nn["notebook"]["path"], "name": nn["notebook"]["name"], } return None
python
wandb/jupyter.py
161
179
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,269
notebook_metadata
def notebook_metadata(silent) -> Dict[str, str]: """Attempt to query jupyter for the path and name of the notebook file. This can handle different jupyter environments, specifically: 1. Colab 2. Kaggle 3. JupyterLab 4. Notebooks 5. Other? """ error_message = ( "Failed to detect the name of this notebook, you can set it manually with " "the WANDB_NOTEBOOK_NAME environment variable to enable code saving." ) try: jupyter_metadata = notebook_metadata_from_jupyter_servers_and_kernel_id() # Colab: # request the most recent contents ipynb = attempt_colab_load_ipynb() if ipynb is not None and jupyter_metadata is not None: return { "root": "/content", "path": jupyter_metadata["path"], "name": jupyter_metadata["name"], } # Kaggle: if wandb.util._is_kaggle(): # request the most recent contents ipynb = attempt_kaggle_load_ipynb() if ipynb: return { "root": "/kaggle/working", "path": ipynb["metadata"]["name"], "name": ipynb["metadata"]["name"], } if jupyter_metadata: return jupyter_metadata if not silent: logger.error(error_message) return {} except Exception: # TODO: report this exception # TODO: Fix issue this is not the logger initialized in in wandb.init() # since logger is not attached, outputs to notebook if not silent: logger.error(error_message) return {}
python
wandb/jupyter.py
182
232
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,270
jupyter_servers_and_kernel_id
def jupyter_servers_and_kernel_id(): """Return a list of servers and the current kernel_id. Used to query for the name of the notebook. """ try: import ipykernel kernel_id = re.search( "kernel-(.*).json", ipykernel.connect.get_connection_file() ).group(1) # We're either in jupyterlab or a notebook, lets prefer the newer jupyter_server package serverapp = wandb.util.get_module("jupyter_server.serverapp") notebookapp = wandb.util.get_module("notebook.notebookapp") servers = [] if serverapp is not None: servers.extend(list(serverapp.list_running_servers())) if notebookapp is not None: servers.extend(list(notebookapp.list_running_servers())) return servers, kernel_id except (AttributeError, ValueError, ImportError): return [], None
python
wandb/jupyter.py
235
256
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,271
attempt_colab_load_ipynb
def attempt_colab_load_ipynb(): colab = wandb.util.get_module("google.colab") if colab: # This isn't thread safe, never call in a thread response = colab._message.blocking_request("get_ipynb", timeout_sec=5) if response: return response["ipynb"]
python
wandb/jupyter.py
259
265
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,272
attempt_kaggle_load_ipynb
def attempt_kaggle_load_ipynb(): kaggle = wandb.util.get_module("kaggle_session") if kaggle: try: client = kaggle.UserSessionClient() parsed = json.loads(client.get_exportable_ipynb()["source"]) # TODO: couldn't find a way to get the name of the notebook... parsed["metadata"]["name"] = "kaggle.ipynb" return parsed except Exception: logger.exception("Unable to load kaggle notebook") return None
python
wandb/jupyter.py
268
279
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,273
attempt_colab_login
def attempt_colab_login(app_url): """This renders an iframe to wandb in the hopes it posts back an api key.""" from google.colab import output from google.colab._message import MessageError from IPython import display display.display( display.Javascript( """ window._wandbApiKey = new Promise((resolve, reject) => { function loadScript(url) { return new Promise(function(resolve, reject) { let newScript = document.createElement("script"); newScript.onerror = reject; newScript.onload = resolve; document.body.appendChild(newScript); newScript.src = url; }); } loadScript("https://cdn.jsdelivr.net/npm/postmate/build/postmate.min.js").then(() => { const iframe = document.createElement('iframe') iframe.style.cssText = "width:0;height:0;border:none" document.body.appendChild(iframe) const handshake = new Postmate({ container: iframe, url: '%s/authorize' }); const timeout = setTimeout(() => reject("Couldn't auto authenticate"), 5000) handshake.then(function(child) { child.on('authorize', data => { clearTimeout(timeout) resolve(data) }); }); }) }); """ % app_url.replace("http:", "https:") ) ) try: return output.eval_js("_wandbApiKey") except MessageError: return None
python
wandb/jupyter.py
282
325
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,274
__init__
def __init__(self, settings): self.outputs = {} self.settings = settings self.shell = get_ipython()
python
wandb/jupyter.py
329
332
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,275
save_display
def save_display(self, exc_count, data_with_metadata): self.outputs[exc_count] = self.outputs.get(exc_count, []) # byte values such as images need to be encoded in base64 # otherwise nbformat.v4.new_output will throw a NotebookValidationError data = data_with_metadata["data"] b64_data = {} for key in data: val = data[key] if isinstance(val, bytes): b64_data[key] = b64encode(val).decode("utf-8") else: b64_data[key] = val self.outputs[exc_count].append( {"data": b64_data, "metadata": data_with_metadata["metadata"]} )
python
wandb/jupyter.py
334
350
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,276
probe_ipynb
def probe_ipynb(self): """Return notebook as dict or None.""" relpath = self.settings._jupyter_path if relpath: if os.path.exists(relpath): with open(relpath) as json_file: data = json.load(json_file) return data colab_ipynb = attempt_colab_load_ipynb() if colab_ipynb: return colab_ipynb kaggle_ipynb = attempt_kaggle_load_ipynb() if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0: return kaggle_ipynb return
python
wandb/jupyter.py
352
369
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,277
save_ipynb
def save_ipynb(self) -> bool: if not self.settings.save_code: logger.info("not saving jupyter notebook") return False ret = False try: ret = self._save_ipynb() except Exception as e: logger.info(f"Problem saving notebook: {repr(e)}") return ret
python
wandb/jupyter.py
371
380
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,278
_save_ipynb
def _save_ipynb(self) -> bool: relpath = self.settings._jupyter_path logger.info("looking for notebook: %s", relpath) if relpath: if os.path.exists(relpath): shutil.copy( relpath, os.path.join( self.settings._tmp_code_dir, os.path.basename(relpath) ), ) return True # TODO: likely only save if the code has changed colab_ipynb = attempt_colab_load_ipynb() if colab_ipynb: try: jupyter_metadata = ( notebook_metadata_from_jupyter_servers_and_kernel_id() ) nb_name = jupyter_metadata["name"] except Exception: nb_name = "colab.ipynb" if not nb_name.endswith(".ipynb"): nb_name += ".ipynb" with open( os.path.join( self.settings._tmp_code_dir, nb_name, ), "w", encoding="utf-8", ) as f: f.write(json.dumps(colab_ipynb)) return True kaggle_ipynb = attempt_kaggle_load_ipynb() if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0: with open( os.path.join( self.settings._tmp_code_dir, kaggle_ipynb["metadata"]["name"] ), "w", encoding="utf-8", ) as f: f.write(json.dumps(kaggle_ipynb)) return True return False
python
wandb/jupyter.py
382
430
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,279
save_history
def save_history(self): """This saves all cell executions in the current session as a new notebook.""" try: from nbformat import v4, validator, write except ImportError: logger.error("Run pip install nbformat to save notebook history") return # TODO: some tests didn't patch ipython properly? if self.shell is None: return cells = [] hist = list(self.shell.history_manager.get_range(output=True)) if len(hist) <= 1 or not self.settings.save_code: logger.info("not saving jupyter history") return try: for _, execution_count, exc in hist: if exc[1]: # TODO: capture stderr? outputs = [ v4.new_output(output_type="stream", name="stdout", text=exc[1]) ] else: outputs = [] if self.outputs.get(execution_count): for out in self.outputs[execution_count]: outputs.append( v4.new_output( output_type="display_data", data=out["data"], metadata=out["metadata"] or {}, ) ) cells.append( v4.new_code_cell( execution_count=execution_count, source=exc[0], outputs=outputs ) ) if hasattr(self.shell, "kernel"): language_info = self.shell.kernel.language_info else: language_info = {"name": "python", "version": sys.version} logger.info("saving %i cells to _session_history.ipynb", len(cells)) nb = v4.new_notebook( cells=cells, metadata={ "kernelspec": { "display_name": "Python %i" % sys.version_info[0], "name": "python%i" % sys.version_info[0], "language": "python", }, "language_info": language_info, }, ) state_path = os.path.join("code", "_session_history.ipynb") wandb.run._set_config_wandb("session_history", state_path) filesystem.mkdir_exists_ok(os.path.join(wandb.run.dir, "code")) with open( os.path.join(self.settings._tmp_code_dir, "_session_history.ipynb"), "w", encoding="utf-8", ) as f: write(nb, f, version=4) with open( os.path.join(wandb.run.dir, state_path), "w", encoding="utf-8" ) as f: write(nb, f, version=4) except (OSError, validator.NotebookValidationError) as e: logger.error("Unable to save ipython session history:\n%s", e) pass
python
wandb/jupyter.py
432
501
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,280
_add_any
def _add_any( artifact: ArtifactInterface, path_or_obj: Union[ str, ArtifactManifestEntry, data_types.WBValue ], # todo: add dataframe name: Optional[str], ) -> Any: """Add an object to an artifact. High-level wrapper to add object(s) to an artifact - calls any of the .add* methods under Artifact depending on the type of object that's passed in. This will probably be moved to the Artifact class in the future. Args: artifact: `ArtifactInterface` - most likely a LocalArtifact created with `wandb.Artifact(...)` path_or_obj: `Union[str, ArtifactManifestEntry, data_types.WBValue]` - either a str or valid object which indicates what to add to an artifact. name: `str` - the name of the object which is added to an artifact. Returns: Type[Any] - Union[None, ArtifactManifestEntry, etc] """ if isinstance(path_or_obj, ArtifactManifestEntry): return artifact.add_reference(path_or_obj, name) elif isinstance(path_or_obj, data_types.WBValue): return artifact.add(path_or_obj, name) elif isinstance(path_or_obj, str): if os.path.isdir(path_or_obj): return artifact.add_dir(path_or_obj) elif os.path.isfile(path_or_obj): return artifact.add_file(path_or_obj) else: with artifact.new_file(name) as f: f.write(json.dumps(path_or_obj, sort_keys=True)) else: raise ValueError( f"Expected `path_or_obj` to be instance of `ArtifactManifestEntry`, `WBValue`, or `str, found {type(path_or_obj)}" )
python
wandb/beta/workflows.py
12
52
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,281
_log_artifact_version
def _log_artifact_version( name: str, type: str, entries: Dict[str, Union[str, ArtifactManifestEntry, data_types.WBValue]], aliases: Optional[Union[str, List[str]]] = None, description: Optional[str] = None, metadata: Optional[dict] = None, project: Optional[str] = None, scope_project: Optional[bool] = None, job_type: str = "auto", ) -> ArtifactInterface: """Create an artifact, populate it, and log it with a run. If a run is not present, we create one. Args: name: `str` - name of the artifact. If not scoped to a project, name will be suffixed by "-{run_id}". type: `str` - type of the artifact, used in the UI to group artifacts of the same type. entries: `Dict` - dictionary containing the named objects we want added to this artifact. description: `str` - text description of artifact. metadata: `Dict` - users can pass in artifact-specific metadata here, will be visible in the UI. project: `str` - project under which to place this artifact. scope_project: `bool` - if True, we will not suffix `name` with "-{run_id}". job_type: `str` - Only applied if run is not present and we create one. Used to identify runs of a certain job type, i.e "evaluation". Returns: ArtifactInterface """ if wandb.run is None: run = wandb.init( project=project, job_type=job_type, settings=wandb.Settings(silent="true") ) else: run = wandb.run if not scope_project: name = f"{name}-{run.id}" if metadata is None: metadata = {} art = wandb.Artifact(name, type, description, metadata, False, None) for path in entries: _add_any(art, entries[path], path) # "latest" should always be present as an alias aliases = wandb.util._resolve_aliases(aliases) run.log_artifact(art, aliases=aliases) return art
python
wandb/beta/workflows.py
55
111
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,282
log_model
def log_model( model_obj: Any, name: str = "model", aliases: Optional[Union[str, List[str]]] = None, description: Optional[str] = None, metadata: Optional[dict] = None, project: Optional[str] = None, scope_project: Optional[bool] = None, **kwargs: Dict[str, Any], ) -> "_SavedModel": """Log a model object to enable model-centric workflows in the UI. Supported frameworks include PyTorch, Keras, Tensorflow, Scikit-learn, etc. Under the hood, we create a model artifact, bind it to the run that produced this model, associate it with the latest metrics logged with `wandb.log(...)` and more. Args: model_obj: any model object created with the following ML frameworks: PyTorch, Keras, Tensorflow, Scikit-learn. name: `str` - name of the model artifact that will be created to house this model_obj. aliases: `str, List[str]` - optional alias(es) that will be applied on this model and allow for unique identification. The alias "latest" will always be applied to the latest version of a model. description: `str` - text description/notes about the model - will be visible in the Model Card UI. metadata: `Dict` - model-specific metadata goes here - will be visible the UI. project: `str` - project under which to place this artifact. scope_project: `bool` - If true, name of this model artifact will not be suffixed by `-{run_id}`. Returns: _SavedModel instance Example: ```python import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(10, 10) def forward(self, x): x = self.fc1(x) x = F.relu(x) return x model = Net() sm = log_model(model, "my-simple-model", aliases=["best"]) ``` """ model = data_types._SavedModel.init(model_obj, **kwargs) _ = _log_artifact_version( name=name, type="model", entries={ "index": model, }, aliases=aliases, description=description, metadata=metadata, project=project, scope_project=scope_project, job_type="log_model", ) # TODO: handle offline mode appropriately. return model
python
wandb/beta/workflows.py
114
184
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,283
use_model
def use_model(aliased_path: str) -> "_SavedModel": """Fetch a saved model from an alias. Under the hood, we use the alias to fetch the model artifact containing the serialized model files and rebuild the model object from these files. We also declare the fetched model artifact as an input to the run (with `run.use_artifact`). Args: aliased_path: `str` - the following forms are valid: "name:version", "name:alias". May be prefixed with "entity/project". Returns: _SavedModel instance Example: ```python # Assuming you have previously logged a model with the name "my-simple-model": sm = use_model("my-simple-model:latest") model = sm.model_obj() ``` """ if ":" not in aliased_path: raise ValueError( "aliased_path must be of the form 'name:alias' or 'name:version'." ) # Returns a _SavedModel instance if wandb.run: run = wandb.run artifact = run.use_artifact(aliased_path) sm = artifact.get("index") if sm is None or not isinstance(sm, _SavedModel): raise ValueError( "Deserialization into model object failed: _SavedModel instance could not be initialized properly." ) return sm else: raise ValueError( "use_model can only be called inside a run. Please call wandb.init() before use_model(...)" )
python
wandb/beta/workflows.py
187
228
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,284
link_model
def link_model( model: "_SavedModel", target_path: str, aliases: Optional[Union[str, List[str]]] = None, ) -> None: """Link the given model to a portfolio. A portfolio is a promoted collection which contains (in this case) model artifacts. Linking to a portfolio allows for useful model-centric workflows in the UI. Args: model: `_SavedModel` - an instance of _SavedModel, most likely from the output of `log_model` or `use_model`. target_path: `str` - the target portfolio. The following forms are valid for the string: {portfolio}, {project/portfolio},{entity}/{project}/{portfolio}. aliases: `str, List[str]` - optional alias(es) that will only be applied on this linked model inside the portfolio. The alias "latest" will always be applied to the latest version of a model. Returns: None Example: sm = use_model("my-simple-model:latest") link_model(sm, "my-portfolio") """ aliases = wandb.util._resolve_aliases(aliases) if wandb.run: run = wandb.run # _artifact_source, if it exists, points to a Public Artifact. # Its existence means that _SavedModel was deserialized from a logged artifact, most likely from `use_model`. if model._artifact_source: artifact = model._artifact_source.artifact # If the _SavedModel has been added to a Local Artifact (most likely through `.add(WBValue)`), then # model._artifact_target will point to that Local Artifact. elif model._artifact_target and model._artifact_target.artifact._final: artifact = model._artifact_target.artifact else: raise ValueError( "Linking requires that the given _SavedModel belongs to an artifact" ) run.link_artifact(artifact, target_path, aliases) else: if model._artifact_source is not None: model._artifact_source.artifact.link(target_path, aliases) else: raise ValueError( "Linking requires that the given _SavedModel belongs to a logged artifact." )
python
wandb/beta/workflows.py
231
284
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,285
find_config_file
def find_config_file(config_path: Optional[str] = None) -> Optional[str]: paths = list( filter( None, [ config_path, # 1 config_path_from_environment(), # 2 os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3 os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4 ], ) ) log.debug(f"Trying paths: {repr(paths)}") for path in paths: if os.path.exists(path): log.debug(f"Found file at path: {path}") return path log.debug("No config file found") return None
python
wandb/docker/auth.py
37
59
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,286
config_path_from_environment
def config_path_from_environment() -> Optional[str]: config_dir = os.environ.get("DOCKER_CONFIG") if not config_dir: return None return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
python
wandb/docker/auth.py
62
66
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,287
home_dir
def home_dir() -> str: """Get the user's home directory. Uses the same logic as the Docker Engine client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX. """ if IS_WINDOWS_PLATFORM: return os.environ.get("USERPROFILE", "") else: return os.path.expanduser("~")
python
wandb/docker/auth.py
69
78
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,288
load_general_config
def load_general_config(config_path: Optional[str] = None) -> Dict: config_file = find_config_file(config_path) if not config_file: return {} try: with open(config_file) as f: conf: Dict = json.load(f) return conf except (OSError, ValueError) as e: # In the case of a legacy `.dockercfg` file, we won't # be able to load any JSON data. log.debug(e) log.debug("All parsing attempts failed - returning empty config") return {}
python
wandb/docker/auth.py
81
97
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,289
resolve_repository_name
def resolve_repository_name(repo_name: str) -> Tuple[str, str]: if "://" in repo_name: raise InvalidRepositoryError( f"Repository name cannot contain a scheme ({repo_name})" ) index_name, remote_name = split_repo_name(repo_name) if index_name[0] == "-" or index_name[-1] == "-": raise InvalidRepositoryError( "Invalid index name ({}). Cannot begin or end with a" " hyphen.".format(index_name) ) return resolve_index_name(index_name), remote_name
python
wandb/docker/auth.py
100
112
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,290
resolve_index_name
def resolve_index_name(index_name: str) -> str: index_name = convert_to_hostname(index_name) if index_name == "index." + INDEX_NAME: index_name = INDEX_NAME return index_name
python
wandb/docker/auth.py
115
119
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,291
split_repo_name
def split_repo_name(repo_name: str) -> Tuple[str, str]: parts = repo_name.split("/", 1) if len(parts) == 1 or ( "." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost" ): # This is a docker index repo (ex: username/foobar or ubuntu) return INDEX_NAME, repo_name return parts[0], parts[1]
python
wandb/docker/auth.py
122
129
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,292
get_credential_store
def get_credential_store(authconfig: Dict, registry: str) -> Optional[str]: if not isinstance(authconfig, AuthConfig): authconfig = AuthConfig(authconfig) return authconfig.get_credential_store(registry)
python
wandb/docker/auth.py
132
135
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,293
__init__
def __init__(self, dct: Dict, credstore_env: Optional[Mapping] = None) -> None: super().__init__(dct) if "auths" not in dct: dct["auths"] = {} self.update(dct) self._credstore_env = credstore_env self._stores: Dict[str, "dockerpycreds.Store"] = dict()
python
wandb/docker/auth.py
139
145
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,294
parse_auth
def parse_auth( cls, entries: Dict[str, Dict[str, Any]], raise_on_error: bool = False, ) -> Dict[str, Dict[str, Any]]: """Parse authentication entries. Arguments: entries: Dict of authentication entries. raise_on_error: If set to true, an invalid format will raise InvalidConfigFileError Returns: Authentication registry. """ conf = {} for registry, entry in entries.items(): if not isinstance(entry, dict): log.debug(f"Config entry for key {registry} is not auth config") # type: ignore # We sometimes fall back to parsing the whole config as if it # was the auth config by itself, for legacy purposes. In that # case, we fail silently and return an empty conf if any of the # keys is not formatted properly. if raise_on_error: raise InvalidConfigFileError( f"Invalid configuration for registry {registry}" ) return {} if "identitytoken" in entry: log.debug(f"Found an IdentityToken entry for registry {registry}") conf[registry] = {"IdentityToken": entry["identitytoken"]} continue # Other values are irrelevant if we have a token if "auth" not in entry: # Starting with engine v1.11 (API 1.23), an empty dictionary is # a valid value in the auth's config. # https://github.com/docker/compose/issues/3265 log.debug( "Auth data for {} is absent. Client might be using a " "credentials store instead.".format(registry) ) conf[registry] = {} continue username, password = decode_auth(entry["auth"]) log.debug( "Found entry (registry={}, username={})".format( repr(registry), repr(username) ) ) conf[registry] = { "username": username, "password": password, "email": entry.get("email"), "serveraddress": registry, } return conf
python
wandb/docker/auth.py
148
204
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,295
load_config
def load_config( cls, config_path: Optional[str], config_dict: Optional[Dict[str, Any]], credstore_env: Optional[Mapping] = None, ) -> "AuthConfig": """Load authentication data from a Docker configuration file. If the config_path is not passed in it looks for a configuration file in the root directory. Lookup priority: explicit config_path parameter > DOCKER_CONFIG environment variable > ~/.docker/config.json > ~/.dockercfg. """ if not config_dict: config_file = find_config_file(config_path) if not config_file: return cls({}, credstore_env) try: with open(config_file) as f: config_dict = json.load(f) except (OSError, KeyError, ValueError) as e: # Likely missing new Docker config file, or it's in an # unknown format, continue to attempt to read old location # and format. log.debug(e) return cls(_load_legacy_config(config_file), credstore_env) res = {} assert isinstance(config_dict, Dict) # worship mypy if config_dict.get("auths"): log.debug("Found 'auths' section") res.update( {"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)} ) if config_dict.get("credsStore"): log.debug("Found 'credsStore' section") res.update({"credsStore": config_dict.pop("credsStore")}) if config_dict.get("credHelpers"): log.debug("Found 'credHelpers' section") res.update({"credHelpers": config_dict.pop("credHelpers")}) if res: return cls(res, credstore_env) log.debug( "Couldn't find auth-related section ; attempting to interpret " "as auth-only file" ) return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
python
wandb/docker/auth.py
207
257
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,296
auths
def auths(self) -> Dict[str, Dict[str, Any]]: return self.get("auths", {})
python
wandb/docker/auth.py
260
261
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,297
creds_store
def creds_store(self) -> Optional[str]: return self.get("credsStore", None)
python
wandb/docker/auth.py
264
265
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,298
cred_helpers
def cred_helpers(self) -> Dict: return self.get("credHelpers", {})
python
wandb/docker/auth.py
268
269
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,299
is_empty
def is_empty(self) -> bool: return not self.auths and not self.creds_store and not self.cred_helpers
python
wandb/docker/auth.py
272
273
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
1,300
resolve_authconfig
def resolve_authconfig( self, registry: Optional[str] = None ) -> Optional[Dict[str, Any]]: """Return the authentication data for a specific registry. As with the Docker client, legacy entries in the config with full URLs are stripped down to hostnames before checking for a match. Returns None if no match was found. """ if self.creds_store or self.cred_helpers: store_name = self.get_credential_store(registry) if store_name is not None: log.debug(f"Using credentials store {store_name!r}") cfg = self._resolve_authconfig_credstore(registry, store_name) if cfg is not None: return cfg log.debug("No entry in credstore - fetching from auth dict") # Default to the public index server registry = resolve_index_name(registry) if registry else INDEX_NAME log.debug(f"Looking for auth entry for {repr(registry)}") if registry in self.auths: log.debug(f"Found {repr(registry)}") return self.auths[registry] for key, conf in self.auths.items(): if resolve_index_name(key) == registry: log.debug(f"Found {repr(key)}") return conf log.debug("No entry found") return None
python
wandb/docker/auth.py
275
307
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }