id
int64
1
6.07M
name
stringlengths
1
295
code
stringlengths
12
426k
language
stringclasses
1 value
source_file
stringlengths
5
202
start_line
int64
1
158k
end_line
int64
1
158k
repo
dict
2,601
config
def config(self) -> Dict[str, Any]: if self._config is not None: return deepcopy(self._config) self._config = {k: v["config"] for (k, v) in self._entries.items()} return deepcopy(self._config)
python
wandb/testing/relay.py
180
185
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,602
get_run_telemetry
def get_run_telemetry(self, run_id: str) -> Dict[str, Any]: return self.config.get(run_id, {}).get("_wandb", {}).get("value", {}).get("t")
python
wandb/testing/relay.py
201
202
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,603
get_run_metrics
def get_run_metrics(self, run_id: str) -> Dict[str, Any]: return self.config.get(run_id, {}).get("_wandb", {}).get("value", {}).get("m")
python
wandb/testing/relay.py
204
205
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,604
get_run_summary
def get_run_summary( self, run_id: str, include_private: bool = False ) -> Dict[str, Any]: # run summary dataframe must have only one row # for the given run id, so we convert it to dict # and extract the first (and only) row. mask_run = self.summary["__run_id"] == run_id run_summary = self.summary[mask_run] ret = ( run_summary.filter(regex="^[^_]", axis=1) if not include_private else run_summary ).to_dict(orient="records") return ret[0] if len(ret) > 0 else {}
python
wandb/testing/relay.py
207
220
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,605
get_run_history
def get_run_history( self, run_id: str, include_private: bool = False ) -> pd.DataFrame: mask_run = self.history["__run_id"] == run_id run_history = self.history[mask_run] return ( run_history.filter(regex="^[^_]", axis=1) if not include_private else run_history )
python
wandb/testing/relay.py
222
231
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,606
get_run_uploaded_files
def get_run_uploaded_files(self, run_id: str) -> Dict[str, Any]: return self.entries.get(run_id, {}).get("uploaded", [])
python
wandb/testing/relay.py
233
234
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,607
get_run_stats
def get_run_stats(self, run_id: str) -> pd.DataFrame: mask_run = self.events["__run_id"] == run_id run_stats = self.events[mask_run] return run_stats
python
wandb/testing/relay.py
236
239
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,608
__init__
def __init__(self): self.resolvers: List["Resolver"] = [ { "name": "upsert_bucket", "resolver": self.resolve_upsert_bucket, }, { "name": "upload_files", "resolver": self.resolve_upload_files, }, { "name": "uploaded_files", "resolver": self.resolve_uploaded_files, }, { "name": "preempting", "resolver": self.resolve_preempting, }, { "name": "upsert_sweep", "resolver": self.resolve_upsert_sweep, }, # { "name": "create_artifact", # "resolver": self.resolve_create_artifact, # }, ]
python
wandb/testing/relay.py
250
275
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,609
resolve_upsert_bucket
def resolve_upsert_bucket( request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(request_data, dict) or not isinstance(response_data, dict): return None query = response_data.get("data", {}).get("upsertBucket") is not None if query: data = response_data["data"]["upsertBucket"].get("bucket") data["config"] = json.loads(data["config"]) return data return None
python
wandb/testing/relay.py
278
288
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,610
resolve_upload_files
def resolve_upload_files( request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(request_data, dict): return None query = request_data.get("files") is not None if query: # todo: refactor this 🤮🤮🤮🤮🤮 eventually? name = kwargs.get("path").split("/")[2] files = { file_name: [ { "content": [ json.loads(k) for k in file_value.get("content", []) ], "offset": file_value.get("offset"), } ] for file_name, file_value in request_data["files"].items() } post_processed_data = { "name": name, "dropped": [request_data["dropped"]], "files": files, } return post_processed_data return None
python
wandb/testing/relay.py
291
317
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,611
resolve_uploaded_files
def resolve_uploaded_files( request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(request_data, dict) or not isinstance(response_data, dict): return None query = "RunUploadUrls" in request_data.get("query", "") if query: # todo: refactor this 🤮🤮🤮🤮🤮 eventually? name = request_data["variables"]["run"] files = ( response_data.get("data", {}) .get("model", {}) .get("bucket", {}) .get("files", {}) .get("edges", []) ) # note: we count all attempts to upload files post_processed_data = { "name": name, "uploaded": [files[0].get("node", {}).get("name")] if files else [""], } return post_processed_data return None
python
wandb/testing/relay.py
320
342
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,612
resolve_preempting
def resolve_preempting( request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(request_data, dict): return None query = "preempting" in request_data if query: name = kwargs.get("path").split("/")[2] post_processed_data = { "name": name, "preempting": [request_data["preempting"]], } return post_processed_data return None
python
wandb/testing/relay.py
345
358
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,613
resolve_upsert_sweep
def resolve_upsert_sweep( request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(response_data, dict): return None query = response_data.get("data", {}).get("upsertSweep") is not None if query: data = response_data["data"]["upsertSweep"].get("sweep") return data return None
python
wandb/testing/relay.py
361
370
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,614
resolve_create_artifact
def resolve_create_artifact( self, request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any ) -> Optional[Dict[str, Any]]: if not isinstance(request_data, dict): return None query = ( "createArtifact(" in request_data.get("query", "") and request_data.get("variables") is not None and response_data is not None ) if query: name = request_data["variables"]["runName"] post_processed_data = { "name": name, "create_artifact": [ { "variables": request_data["variables"], "response": response_data["data"]["createArtifact"]["artifact"], } ], } return post_processed_data return None
python
wandb/testing/relay.py
372
394
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,615
resolve
def resolve( self, request_data: Dict[str, Any], response_data: Dict[str, Any], **kwargs: Any, ) -> Optional[Dict[str, Any]]: for resolver in self.resolvers: result = resolver.get("resolver")(request_data, response_data, **kwargs) if result is not None: return result return None
python
wandb/testing/relay.py
396
406
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,616
__init__
def __init__(self, pattern: str): known_tokens = {self.APPLY_TOKEN, self.PASS_TOKEN, self.STOP_TOKEN} if not pattern: raise ValueError("Pattern cannot be empty") if set(pattern) - known_tokens: raise ValueError(f"Pattern can only contain {known_tokens}") self.pattern: "Deque[str]" = deque(pattern)
python
wandb/testing/relay.py
414
421
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,617
next
def next(self): if self.pattern[0] == self.STOP_TOKEN: return self.pattern.rotate(-1)
python
wandb/testing/relay.py
423
426
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,618
should_apply
def should_apply(self) -> bool: return self.pattern[0] == self.APPLY_TOKEN
python
wandb/testing/relay.py
428
429
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,619
__eq__
def __eq__( self, other: Union["InjectedResponse", requests.Request, requests.PreparedRequest], ): """Check InjectedResponse object equality. We use this to check if this response should be injected as a replacement of `other`. :param other: :return: """ if not isinstance( other, (InjectedResponse, requests.Request, requests.PreparedRequest) ): return False # always check the method and url ret = self.method == other.method and self.url == other.url # use custom_match_fn to check, e.g. the request body content if self.custom_match_fn is not None: ret = ret and self.custom_match_fn(self, other) return ret
python
wandb/testing/relay.py
467
489
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,620
to_dict
def to_dict(self): excluded_fields = {"application_pattern", "custom_match_fn"} return { k: self.__getattribute__(k) for k in self.__dict__ if (not k.startswith("_") and k not in excluded_fields) }
python
wandb/testing/relay.py
491
497
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,621
process
def process(self, request: "flask.Request") -> None: ... # pragma: no cover
python
wandb/testing/relay.py
501
502
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,622
control
def control(self, request: "flask.Request") -> Mapping[str, str]: ... # pragma: no cover
python
wandb/testing/relay.py
504
505
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,623
__init__
def __init__( self, base_url: str, inject: Optional[List[InjectedResponse]] = None, control: Optional[RelayControlProtocol] = None, ) -> None: # todo for the future: # - consider switching from Flask to Quart # - async app will allow for better failure injection/poor network perf self.relay_control = control self.app = flask.Flask(__name__) self.app.logger.setLevel(logging.INFO) self.app.register_error_handler(DeliberateHTTPError, self.handle_http_exception) self.app.add_url_rule( rule="/graphql", endpoint="graphql", view_func=self.graphql, methods=["POST"], ) self.app.add_url_rule( rule="/files/<path:path>", endpoint="files", view_func=self.file_stream, methods=["POST"], ) self.app.add_url_rule( rule="/storage", endpoint="storage", view_func=self.storage, methods=["PUT", "GET"], ) self.app.add_url_rule( rule="/storage/<path:path>", endpoint="storage_file", view_func=self.storage_file, methods=["PUT", "GET"], ) if control: self.app.add_url_rule( rule="/_control", endpoint="_control", view_func=self.control, methods=["POST"], ) # @app.route("/artifacts/<entity>/<digest>", methods=["GET", "POST"]) self.port = self._get_free_port() self.base_url = urllib.parse.urlparse(base_url) self.session = requests.Session() self.relay_url = f"http://127.0.0.1:{self.port}" # recursively merge-able object to store state self.resolver = QueryResolver() # todo: add an option to add custom resolvers self.context = Context() # injected responses self.inject = inject or [] # useful when debugging: # self.after_request_fn = self.app.after_request(self.after_request_fn)
python
wandb/testing/relay.py
509
568
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,624
handle_http_exception
def handle_http_exception(e): response = e.get_response() return response
python
wandb/testing/relay.py
571
573
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,625
_get_free_port
def _get_free_port() -> int: sock = socket.socket() sock.bind(("", 0)) _, port = sock.getsockname() return port
python
wandb/testing/relay.py
576
581
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,626
start
def start(self) -> None: # run server in a separate thread relay_server_thread = threading.Thread( target=self.app.run, kwargs={"port": self.port}, daemon=True, ) relay_server_thread.start()
python
wandb/testing/relay.py
583
590
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,627
after_request_fn
def after_request_fn(self, response: "requests.Response") -> "requests.Response": # todo: this is useful for debugging, but should be removed in the future # flask.request.url = self.relay_url + flask.request.url print(flask.request) print(flask.request.get_json()) print(response) print(response.json()) return response
python
wandb/testing/relay.py
592
599
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,628
relay
def relay( self, request: "flask.Request", ) -> Union["responses.Response", "requests.Response"]: # replace the relay url with the real backend url (self.base_url) url = ( urllib.parse.urlparse(request.url) ._replace(netloc=self.base_url.netloc, scheme=self.base_url.scheme) .geturl() ) headers = {key: value for (key, value) in request.headers if key != "Host"} prepared_relayed_request = requests.Request( method=request.method, url=url, headers=headers, data=request.get_data(), json=request.get_json(), ).prepare() for injected_response in self.inject: # where are we in the application pattern? should_apply = injected_response.application_pattern.should_apply() # check if an injected response matches the request if injected_response == prepared_relayed_request: # rotate the injection pattern injected_response.application_pattern.next() if should_apply: with responses.RequestsMock() as mocked_responses: # do the actual injection mocked_responses.add(**injected_response.to_dict()) relayed_response = self.session.send(prepared_relayed_request) return relayed_response # normal case: no injected response matches the request relayed_response = self.session.send(prepared_relayed_request) return relayed_response
python
wandb/testing/relay.py
601
637
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,629
snoop_context
def snoop_context( self, request: "flask.Request", response: "requests.Response", time_elapsed: float, **kwargs: Any, ) -> None: request_data = request.get_json() response_data = response.json() or {} if self.relay_control: self.relay_control.process(request) # store raw data raw_data: "RawRequestResponse" = { "url": request.url, "request": request_data, "response": response_data, "time_elapsed": time_elapsed, } self.context.raw_data.append(raw_data) snooped_context = self.resolver.resolve(request_data, response_data, **kwargs) if snooped_context is not None: self.context.upsert(snooped_context) return None
python
wandb/testing/relay.py
639
665
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,630
graphql
def graphql(self) -> Mapping[str, str]: request = flask.request with Timer() as timer: relayed_response = self.relay(request) # print("*****************") # print("GRAPHQL REQUEST:") # print(request.get_json()) # print("GRAPHQL RESPONSE:") # print(relayed_response.status_code, relayed_response.json()) # print("*****************") # snoop work to extract the context self.snoop_context(request, relayed_response, timer.elapsed) # print("*****************") # print("SNOOPED CONTEXT:") # print(self.context.entries) # print(len(self.context.raw_data)) # print("*****************") return relayed_response.json()
python
wandb/testing/relay.py
667
685
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,631
file_stream
def file_stream(self, path) -> Mapping[str, str]: request = flask.request with Timer() as timer: relayed_response = self.relay(request) # print("*****************") # print("FILE STREAM REQUEST:") # print("********PATH*********") # print(path) # print("********ENDPATH*********") # print(request.get_json()) # print("FILE STREAM RESPONSE:") # print(relayed_response) # print(relayed_response.status_code, relayed_response.json()) # print("*****************") self.snoop_context(request, relayed_response, timer.elapsed, path=path) return relayed_response.json()
python
wandb/testing/relay.py
687
704
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,632
storage
def storage(self) -> Mapping[str, str]: request = flask.request with Timer() as timer: relayed_response = self.relay(request) # print("*****************") # print("STORAGE REQUEST:") # print(request.get_json()) # print("STORAGE RESPONSE:") # print(relayed_response.status_code, relayed_response.json()) # print("*****************") self.snoop_context(request, relayed_response, timer.elapsed) return relayed_response.json()
python
wandb/testing/relay.py
706
719
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,633
storage_file
def storage_file(self, path) -> Mapping[str, str]: request = flask.request with Timer() as timer: relayed_response = self.relay(request) # print("*****************") # print("STORAGE FILE REQUEST:") # print("********PATH*********") # print(path) # print("********ENDPATH*********") # print(request.get_json()) # print("STORAGE FILE RESPONSE:") # print(relayed_response.json()) # print("*****************") self.snoop_context(request, relayed_response, timer.elapsed, path=path) return relayed_response.json()
python
wandb/testing/relay.py
721
737
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,634
control
def control(self) -> Mapping[str, str]: assert self.relay_control return self.relay_control.control(flask.request)
python
wandb/testing/relay.py
739
741
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,635
check_against_limit
def check_against_limit(count, chart, limit=None): if limit is None: limit = chart_limit if count > limit: warn_chart_limit(limit, chart) return True else: return False
python
wandb/sklearn/utils.py
14
21
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,636
warn_chart_limit
def warn_chart_limit(limit, chart): warning = f"using only the first {limit} datapoints to create chart {chart}" wandb.termwarn(warning)
python
wandb/sklearn/utils.py
24
26
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,637
encode_labels
def encode_labels(df): le = sklearn.preprocessing.LabelEncoder() # apply le on categorical feature columns categorical_cols = df.select_dtypes( exclude=["int", "float", "float64", "float32", "int32", "int64"] ).columns df[categorical_cols] = df[categorical_cols].apply(lambda col: le.fit_transform(col))
python
wandb/sklearn/utils.py
29
35
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,638
test_types
def test_types(**kwargs): test_passed = True for k, v in kwargs.items(): # check for incorrect types if ( (k == "X") or (k == "X_test") or (k == "y") or (k == "y_test") or (k == "y_true") or (k == "y_probas") ): # FIXME: do this individually if not isinstance( v, ( Sequence, Iterable, np.ndarray, np.generic, pd.DataFrame, pd.Series, list, ), ): wandb.termerror("%s is not an array. Please try again." % (k)) test_passed = False # check for classifier types if k == "model": if (not sklearn.base.is_classifier(v)) and ( not sklearn.base.is_regressor(v) ): wandb.termerror( "%s is not a classifier or regressor. Please try again." % (k) ) test_passed = False elif k == "clf" or k == "binary_clf": if not (sklearn.base.is_classifier(v)): wandb.termerror("%s is not a classifier. Please try again." % (k)) test_passed = False elif k == "regressor": if not sklearn.base.is_regressor(v): wandb.termerror("%s is not a regressor. Please try again." % (k)) test_passed = False elif k == "clusterer": if not (getattr(v, "_estimator_type", None) == "clusterer"): wandb.termerror("%s is not a clusterer. Please try again." % (k)) test_passed = False return test_passed
python
wandb/sklearn/utils.py
38
86
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,639
test_fitted
def test_fitted(model): try: model.predict(np.zeros((7, 3))) except sklearn.exceptions.NotFittedError: wandb.termerror("Please fit the model before passing it in.") return False except AttributeError: # Some clustering models (LDA, PCA, Agglomerative) don't implement ``predict`` try: sklearn.utils.validation.check_is_fitted( model, [ "coef_", "estimator_", "labels_", "n_clusters_", "children_", "components_", "n_components_", "n_iter_", "n_batch_iter_", "explained_variance_", "singular_values_", "mean_", ], all_or_any=any, ) return True except sklearn.exceptions.NotFittedError: wandb.termerror("Please fit the model before passing it in.") return False except Exception: # Assume it's fitted, since ``NotFittedError`` wasn't raised return True
python
wandb/sklearn/utils.py
89
122
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,640
test_missing
def test_missing(**kwargs): test_passed = True for k, v in kwargs.items(): # Missing/empty params/datapoint arrays if v is None: wandb.termerror("%s is None. Please try again." % (k)) test_passed = False if (k == "X") or (k == "X_test"): if isinstance(v, scipy.sparse.csr.csr_matrix): v = v.toarray() elif isinstance(v, (pd.DataFrame, pd.Series)): v = v.to_numpy() elif isinstance(v, list): v = np.asarray(v) # Warn the user about missing values missing = 0 missing = np.count_nonzero(pd.isnull(v)) if missing > 0: wandb.termwarn("%s contains %d missing values. " % (k, missing)) test_passed = False # Ensure the dataset contains only integers non_nums = 0 if v.ndim == 1: non_nums = sum( 1 for val in v if ( not isinstance(val, (int, float, complex)) and not isinstance(val, np.number) ) ) else: non_nums = sum( 1 for sl in v for val in sl if ( not isinstance(val, (int, float, complex)) and not isinstance(val, np.number) ) ) if non_nums > 0: wandb.termerror( "%s contains values that are not numbers. Please vectorize, label encode or one hot encode %s and call the plotting function again." % (k, k) ) test_passed = False return test_passed
python
wandb/sklearn/utils.py
126
174
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,641
round_3
def round_3(n): return round(n, 3)
python
wandb/sklearn/utils.py
177
178
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,642
round_2
def round_2(n): return round(n, 2)
python
wandb/sklearn/utils.py
181
182
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,643
classifier
def classifier( model, X_train, X_test, y_train, y_test, y_pred, y_probas, labels, is_binary=False, model_name="Classifier", feature_names=None, log_learning_curve=False, ): """Generate all sklearn classifier plots supported by W&B. The following plots are generated: feature importances, confusion matrix, summary metrics, class propotions, calibration curve, roc curve, precision-recall curve. Should only be called with a fitted classifer (otherwise an error is thrown). Arguments: model: (classifier) Takes in a fitted classifier. X_train: (arr) Training set features. y_train: (arr) Training set labels. X_test: (arr) Test set features. y_test: (arr) Test set labels. y_pred: (arr) Test set predictions by the model passed. y_probas: (arr) Test set predicted probabilities by the model passed. labels: (list) Named labels for target varible (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. is_binary: (bool) Is the model passed a binary classifier? Defaults to False model_name: (str) Model name. Defaults to 'Classifier' feature_names: (list) Names for features. Makes plots easier to read by replacing feature indexes with corresponding names. log_learning_curve: (bool) Whether or not to log the learning curve. Defaults to False. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_classifier( model, X_train, X_test, y_train, y_test, y_pred, y_probas, ["cat", "dog"], False, "RandomForest", ["barks", "drools", "plays_fetch", "breed"], ) ``` """ wandb.termlog("\nPlotting %s." % model_name) if not isinstance(model, naive_bayes.MultinomialNB): feature_importances(model, feature_names) wandb.termlog("Logged feature importances.") if log_learning_curve: shared.learning_curve(model, X_train, y_train) wandb.termlog("Logged learning curve.") confusion_matrix(y_test, y_pred, labels) wandb.termlog("Logged confusion matrix.") shared.summary_metrics(model, X=X_train, y=y_train, X_test=X_test, y_test=y_test) wandb.termlog("Logged summary metrics.") class_proportions(y_train, y_test, labels) wandb.termlog("Logged class proportions.") if not isinstance(model, naive_bayes.MultinomialNB): calibration_curve(model, X_train, y_train, model_name) wandb.termlog("Logged calibration curve.") roc(y_test, y_probas, labels) wandb.termlog("Logged roc curve.") precision_recall(y_test, y_probas, labels) wandb.termlog("Logged precision-recall curve.")
python
wandb/sklearn/plot/classifier.py
17
106
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,644
roc
def roc( y_true=None, y_probas=None, labels=None, plot_micro=True, plot_macro=True, classes_to_plot=None, ): """Log the receiver-operating characteristic curve. Arguments: y_true: (arr) Test set labels. y_probas: (arr) Test set predicted probabilities. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_roc(y_true, y_probas, labels) ``` """ roc_chart = wandb.plots.roc.roc( y_true, y_probas, labels, plot_micro, plot_macro, classes_to_plot ) wandb.log({"roc": roc_chart})
python
wandb/sklearn/plot/classifier.py
109
139
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,645
confusion_matrix
def confusion_matrix( y_true=None, y_pred=None, labels=None, true_labels=None, pred_labels=None, normalize=False, ): """Log a confusion matrix to W&B. Confusion matrices depict the pattern of misclassifications by a model. Arguments: y_true: (arr) Test set labels. y_probas: (arr) Test set predicted probabilities. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_confusion_matrix(y_true, y_probas, labels) ``` """ y_true = np.asarray(y_true) y_pred = np.asarray(y_pred) not_missing = utils.test_missing(y_true=y_true, y_pred=y_pred) correct_types = utils.test_types(y_true=y_true, y_pred=y_pred) if not_missing and correct_types: confusion_matrix_chart = calculate.confusion_matrix( y_true, y_pred, labels, true_labels, pred_labels, normalize, ) wandb.log({"confusion_matrix": confusion_matrix_chart})
python
wandb/sklearn/plot/classifier.py
142
187
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,646
precision_recall
def precision_recall( y_true=None, y_probas=None, labels=None, plot_micro=True, classes_to_plot=None ): """Log a precision-recall curve to W&B. Precision-recall curves depict the tradeoff between positive predictive value (precision) and true positive rate (recall) as the threshold of a classifier is shifted. Arguments: y_true: (arr) Test set labels. y_probas: (arr) Test set predicted probabilities. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_precision_recall(y_true, y_probas, labels) ``` """ precision_recall_chart = wandb.plots.precision_recall( y_true, y_probas, labels, plot_micro, classes_to_plot ) wandb.log({"precision_recall": precision_recall_chart})
python
wandb/sklearn/plot/classifier.py
190
219
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,647
feature_importances
def feature_importances( model=None, feature_names=None, title="Feature Importance", max_num_features=50 ): """Log a plot depicting the relative importance of each feature for a classifier's decisions. Should only be called with a fitted classifer (otherwise an error is thrown). Only works with classifiers that have a feature_importances_ attribute, like trees. Arguments: model: (clf) Takes in a fitted classifier. feature_names: (list) Names for features. Makes plots easier to read by replacing feature indexes with corresponding names. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_feature_importances(model, ["width", "height", "length"]) ``` """ not_missing = utils.test_missing(model=model) correct_types = utils.test_types(model=model) model_fitted = utils.test_fitted(model) if not_missing and correct_types and model_fitted: feature_importance_chart = calculate.feature_importances(model, feature_names) wandb.log({"feature_importances": feature_importance_chart})
python
wandb/sklearn/plot/classifier.py
222
250
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,648
class_proportions
def class_proportions(y_train=None, y_test=None, labels=None): """Plot the distribution of target classses in training and test sets. Useful for detecting imbalanced classes. Arguments: y_train: (arr) Training set labels. y_test: (arr) Test set labels. labels: (list) Named labels for target variable (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_class_proportions(y_train, y_test, ["dog", "cat", "owl"]) ``` """ not_missing = utils.test_missing(y_train=y_train, y_test=y_test) correct_types = utils.test_types(y_train=y_train, y_test=y_test) if not_missing and correct_types: y_train, y_test = np.array(y_train), np.array(y_test) class_proportions_chart = calculate.class_proportions(y_train, y_test, labels) wandb.log({"class_proportions": class_proportions_chart})
python
wandb/sklearn/plot/classifier.py
253
281
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,649
calibration_curve
def calibration_curve(clf=None, X=None, y=None, clf_name="Classifier"): """Log a plot depicting how well-calibrated the predicted probabilities of a classifier are. Also suggests how to calibrate an uncalibrated classifier. Compares estimated predicted probabilities by a baseline logistic regression model, the model passed as an argument, and by both its isotonic calibration and sigmoid calibrations. The closer the calibration curves are to a diagonal the better. A sine wave like curve represents an overfitted classifier, while a cosine wave like curve represents an underfitted classifier. By training isotonic and sigmoid calibrations of the model and comparing their curves we can figure out whether the model is over or underfitting and if so which calibration (sigmoid or isotonic) might help fix this. For more details, see https://scikit-learn.org/stable/auto_examples/calibration/plot_calibration_curve.html. Should only be called with a fitted classifer (otherwise an error is thrown). Please note this function fits variations of the model on the training set when called. Arguments: clf: (clf) Takes in a fitted classifier. X: (arr) Training set features. y: (arr) Training set labels. model_name: (str) Model name. Defaults to 'Classifier' Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_calibration_curve(clf, X, y, "RandomForestClassifier") ``` """ not_missing = utils.test_missing(clf=clf, X=X, y=y) correct_types = utils.test_types(clf=clf, X=X, y=y) is_fitted = utils.test_fitted(clf) if not_missing and correct_types and is_fitted: y = np.asarray(y) if y.dtype.char == "U" or not ((y == 0) | (y == 1)).all(): wandb.termwarn( "This function only supports binary classification at the moment and therefore expects labels to be binary. Skipping calibration curve." ) return calibration_curve_chart = calculate.calibration_curves(clf, X, y, clf_name) wandb.log({"calibration_curve": calibration_curve_chart})
python
wandb/sklearn/plot/classifier.py
284
330
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,650
summary_metrics
def summary_metrics(model=None, X=None, y=None, X_test=None, y_test=None): """Logs a chart depicting summary metrics for a model. Should only be called with a fitted model (otherwise an error is thrown). Arguments: model: (clf or reg) Takes in a fitted regressor or classifier. X: (arr) Training set features. y: (arr) Training set labels. X_test: (arr) Test set features. y_test: (arr) Test set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_summary_metrics(model, X_train, y_train, X_test, y_test) ``` """ not_missing = utils.test_missing( model=model, X=X, y=y, X_test=X_test, y_test=y_test ) correct_types = utils.test_types( model=model, X=X, y=y, X_test=X_test, y_test=y_test ) model_fitted = utils.test_fitted(model) if not_missing and correct_types and model_fitted: metrics_chart = calculate.summary_metrics(model, X, y, X_test, y_test) wandb.log({"summary_metrics": metrics_chart})
python
wandb/sklearn/plot/shared.py
13
44
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,651
learning_curve
def learning_curve( model=None, X=None, y=None, cv=None, shuffle=False, random_state=None, train_sizes=None, n_jobs=1, scoring=None, ): """Logs a plot depicting model performance against dataset size. Please note this function fits the model to datasets of varying sizes when called. Arguments: model: (clf or reg) Takes in a fitted regressor or classifier. X: (arr) Dataset features. y: (arr) Dataset labels. For details on the other keyword arguments, see the documentation for `sklearn.model_selection.learning_curve`. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_learning_curve(model, X, y) ``` """ not_missing = utils.test_missing(model=model, X=X, y=y) correct_types = utils.test_types(model=model, X=X, y=y) if not_missing and correct_types: if train_sizes is None: train_sizes = np.linspace(0.1, 1.0, 5) y = np.asarray(y) learning_curve_chart = calculate.learning_curve( model, X, y, cv, shuffle, random_state, train_sizes, n_jobs, scoring ) wandb.log({"learning_curve": learning_curve_chart})
python
wandb/sklearn/plot/shared.py
47
90
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,652
regressor
def regressor(model, X_train, X_test, y_train, y_test, model_name="Regressor"): """Generates all sklearn regressor plots supported by W&B. The following plots are generated: learning curve, summary metrics, residuals plot, outlier candidates. Should only be called with a fitted regressor (otherwise an error is thrown). Arguments: model: (regressor) Takes in a fitted regressor. X_train: (arr) Training set features. y_train: (arr) Training set labels. X_test: (arr) Test set features. y_test: (arr) Test set labels. model_name: (str) Model name. Defaults to 'Regressor' Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_regressor(reg, X_train, X_test, y_train, y_test, "Ridge") ``` """ wandb.termlog("\nPlotting %s." % model_name) shared.summary_metrics(model, X_train, y_train, X_test, y_test) wandb.termlog("Logged summary metrics.") shared.learning_curve(model, X_train, y_train) wandb.termlog("Logged learning curve.") outlier_candidates(model, X_train, y_train) wandb.termlog("Logged outlier candidates.") residuals(model, X_train, y_train) wandb.termlog("Logged residuals.")
python
wandb/sklearn/plot/regressor.py
15
52
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,653
outlier_candidates
def outlier_candidates(regressor=None, X=None, y=None): """Measures a datapoint's influence on regression model via cook's distance. Instances with high influences could potentially be outliers. Should only be called with a fitted regressor (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (regressor) Takes in a fitted regressor. X: (arr) Training set features. y: (arr) Training set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_outlier_candidates(model, X, y) ``` """ is_missing = utils.test_missing(regressor=regressor, X=X, y=y) correct_types = utils.test_types(regressor=regressor, X=X, y=y) is_fitted = utils.test_fitted(regressor) if is_missing and correct_types and is_fitted: y = np.asarray(y) outliers_chart = calculate.outlier_candidates(regressor, X, y) wandb.log({"outlier_candidates": outliers_chart})
python
wandb/sklearn/plot/regressor.py
55
86
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,654
residuals
def residuals(regressor=None, X=None, y=None): """Measures and plots the regressor's predicted value against the residual. The marginal distribution of residuals is also calculated and plotted. Should only be called with a fitted regressor (otherwise an error is thrown). Please note this function fits variations of the model on the training set when called. Arguments: regressor: (regressor) Takes in a fitted regressor. X: (arr) Training set features. y: (arr) Training set labels. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_residuals(model, X, y) ``` """ not_missing = utils.test_missing(regressor=regressor, X=X, y=y) correct_types = utils.test_types(regressor=regressor, X=X, y=y) is_fitted = utils.test_fitted(regressor) if not_missing and correct_types and is_fitted: y = np.asarray(y) residuals_chart = calculate.residuals(regressor, X, y) wandb.log({"residuals": residuals_chart})
python
wandb/sklearn/plot/regressor.py
89
120
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,655
clusterer
def clusterer(model, X_train, cluster_labels, labels=None, model_name="Clusterer"): """Generates all sklearn clusterer plots supported by W&B. The following plots are generated: elbow curve, silhouette plot. Should only be called with a fitted clusterer (otherwise an error is thrown). Arguments: model: (clusterer) Takes in a fitted clusterer. X_train: (arr) Training set features. cluster_labels: (list) Names for cluster labels. Makes plots easier to read by replacing cluster indexes with corresponding names. labels: (list) Named labels for target varible (y). Makes plots easier to read by replacing target values with corresponding index. For example if `labels=['dog', 'cat', 'owl']` all 0s are replaced by dog, 1s by cat. model_name: (str) Model name. Defaults to 'Clusterer' Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_clusterer(kmeans, X, cluster_labels, labels, "KMeans") ``` """ wandb.termlog("\nPlotting %s." % model_name) if isinstance(model, sklearn.cluster.KMeans): elbow_curve(model, X_train) wandb.termlog("Logged elbow curve.") silhouette(model, X_train, cluster_labels, labels=labels, kmeans=True) else: silhouette(model, X_train, cluster_labels, kmeans=False) wandb.termlog("Logged silhouette plot.")
python
wandb/sklearn/plot/clusterer.py
14
52
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,656
elbow_curve
def elbow_curve( clusterer=None, X=None, cluster_ranges=None, n_jobs=1, show_cluster_time=True ): """Measures and plots variance explained as a function of the number of clusters. Useful in picking the optimal number of clusters. Should only be called with a fitted clusterer (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (clusterer) Takes in a fitted clusterer. X: (arr) Training set features. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_elbow_curve(model, X_train) ``` """ if not hasattr(clusterer, "n_clusters"): wandb.termlog( "n_clusters attribute not in classifier. Cannot plot elbow method." ) return not_missing = utils.test_missing(clusterer=clusterer) correct_types = utils.test_types is_fitted = utils.test_fitted(clusterer) if not_missing and correct_types and is_fitted: elbow_curve_chart = calculate.elbow_curve( clusterer, X, cluster_ranges, n_jobs, show_cluster_time ) wandb.log({"elbow_curve": elbow_curve_chart})
python
wandb/sklearn/plot/clusterer.py
55
94
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,657
silhouette
def silhouette( clusterer=None, X=None, cluster_labels=None, labels=None, metric="euclidean", kmeans=True, ): """Measures & plots silhouette coefficients. Silhouette coefficients near +1 indicate that the sample is far away from the neighboring clusters. A value near 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that the samples might have been assigned to the wrong cluster. Should only be called with a fitted clusterer (otherwise an error is thrown). Please note this function fits the model on the training set when called. Arguments: model: (clusterer) Takes in a fitted clusterer. X: (arr) Training set features. cluster_labels: (list) Names for cluster labels. Makes plots easier to read by replacing cluster indexes with corresponding names. Returns: None: To see plots, go to your W&B run page then expand the 'media' tab under 'auto visualizations'. Example: ```python wandb.sklearn.plot_silhouette(model, X_train, ["spam", "not spam"]) ``` """ not_missing = utils.test_missing(clusterer=clusterer) correct_types = utils.test_types(clusterer=clusterer) is_fitted = utils.test_fitted(clusterer) if not_missing and correct_types and is_fitted: if isinstance(X, (pd.DataFrame)): X = X.values silhouette_chart = calculate.silhouette( clusterer, X, cluster_labels, labels, metric, kmeans ) wandb.log({"silhouette_plot": silhouette_chart})
python
wandb/sklearn/plot/clusterer.py
97
141
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,658
learning_curve
def learning_curve( model, X, y, cv=None, shuffle=False, random_state=None, train_sizes=None, n_jobs=1, scoring=None, ): """Train model on datasets of varying size and generates plot of score vs size. Called by plot_learning_curve to visualize learning curve. Please use the function plot_learning_curve() if you wish to visualize your learning curves. """ train_sizes, train_scores, test_scores = model_selection.learning_curve( model, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring, shuffle=shuffle, random_state=random_state, ) train_scores_mean = np.mean(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) table = make_table(train_scores_mean, test_scores_mean, train_sizes) chart = wandb.visualize("wandb/learning_curve/v1", table) return chart
python
wandb/sklearn/calculate/learning_curve.py
13
46
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,659
make_table
def make_table(train, test, train_sizes): data = [] for i in range(len(train)): if utils.check_against_limit( i, "learning_curve", utils.chart_limit / 2, ): break train_set = ["train", utils.round_2(train[i]), train_sizes[i]] test_set = ["test", utils.round_2(test[i]), train_sizes[i]] data.append(train_set) data.append(test_set) table = wandb.Table(columns=["dataset", "score", "train_size"], data=data) return table
python
wandb/sklearn/calculate/learning_curve.py
49
64
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,660
class_proportions
def class_proportions(y_train, y_test, labels): # Get the unique values from the dataset targets = (y_train,) if y_test is None else (y_train, y_test) class_ids = np.array(unique_labels(*targets)) # Compute the class counts counts_train = np.array([(y_train == c).sum() for c in class_ids]) counts_test = np.array([(y_test == c).sum() for c in class_ids]) class_column, dataset_column, count_column = make_columns( class_ids, counts_train, counts_test ) if labels is not None and ( isinstance(class_column[0], int) or isinstance(class_column[0], np.integer) ): class_column = get_named_labels(labels, class_column) table = make_table(class_column, dataset_column, count_column) chart = wandb.visualize("wandb/class_proportions/v1", table) return chart
python
wandb/sklearn/calculate/class_proportions.py
13
34
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,661
make_table
def make_table(class_column, dataset_column, count_column): columns = ["class", "dataset", "count"] data = list(zip(class_column, dataset_column, count_column)) return wandb.Table(data=data, columns=columns)
python
wandb/sklearn/calculate/class_proportions.py
37
41
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,662
make_columns
def make_columns(class_ids, counts_train, counts_test): class_column, dataset_column, count_column = [], [], [] for i in range(len(class_ids)): # add class counts from training set class_column.append(class_ids[i]) dataset_column.append("train") count_column.append(counts_train[i]) # add class counts from test set class_column.append(class_ids[i]) dataset_column.append("test") count_column.append(counts_test[i]) if utils.check_against_limit( i, "class_proportions", utils.chart_limit, ): break return class_column, dataset_column, count_column
python
wandb/sklearn/calculate/class_proportions.py
44
64
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,663
get_named_labels
def get_named_labels(labels, numeric_labels): return np.array([labels[num_label] for num_label in numeric_labels])
python
wandb/sklearn/calculate/class_proportions.py
67
68
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,664
silhouette
def silhouette(clusterer, X, cluster_labels, labels, metric, kmeans): # Run clusterer for n_clusters in range(len(cluster_ranges), get cluster labels # TODO - keep/delete once we decide if we should train clusterers # or ask for trained models # clusterer.set_params(n_clusters=n_clusters, random_state=42) # cluster_labels = clusterer.fit_predict(X) cluster_labels = np.asarray(cluster_labels) labels = np.asarray(labels) le = LabelEncoder() _ = le.fit_transform(cluster_labels) n_clusters = len(np.unique(cluster_labels)) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels, metric=metric) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels, metric=metric) x_sil, y_sil, color_sil = [], [], [] count, y_lower = 0, 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i y_values = np.arange(y_lower, y_upper) for j in range(len(y_values)): y_sil.append(y_values[j]) x_sil.append(ith_cluster_silhouette_values[j]) color_sil.append(i) count += 1 if utils.check_against_limit(count, "silhouette", utils.chart_limit): break # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples if kmeans: centers = clusterer.cluster_centers_ centerx = centers[:, 0] centery = centers[:, 1] else: centerx = [None] * len(color_sil) centery = [None] * len(color_sil) table = make_table( X[:, 0], X[:, 1], cluster_labels, centerx, centery, y_sil, x_sil, color_sil, silhouette_avg, ) chart = wandb.visualize("wandb/silhouette_/v1", table) return chart
python
wandb/sklearn/calculate/silhouette.py
14
83
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,665
make_table
def make_table(x, y, colors, centerx, centery, y_sil, x_sil, color_sil, silhouette_avg): columns = [ "x", "y", "colors", "centerx", "centery", "y_sil", "x1", "x2", "color_sil", "silhouette_avg", ] data = [ [ x[i], y[i], colors[i], centerx[colors[i]], centery[colors[i]], y_sil[i], 0, x_sil[i], color_sil[i], silhouette_avg, ] for i in range(len(color_sil)) ] table = wandb.Table(data=data, columns=columns) return table
python
wandb/sklearn/calculate/silhouette.py
86
118
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,666
calibration_curves
def calibration_curves(clf, X, y, clf_name): # ComplementNB (introduced in 0.20.0) requires non-negative features if int(sklearn.__version__.split(".")[1]) >= 20 and isinstance( clf, naive_bayes.ComplementNB ): X = X - X.min() # Calibrated with isotonic calibration isotonic = CalibratedClassifierCV(clf, cv=2, method="isotonic") # Calibrated with sigmoid calibration sigmoid = CalibratedClassifierCV(clf, cv=2, method="sigmoid") # Logistic regression with no calibration as baseline lr = LogisticRegression(C=1.0) model_column = [] # color frac_positives_column = [] # y axis mean_pred_value_column = [] # x axis hist_column = [] # barchart y edge_column = [] # barchart x # Add curve for perfectly calibrated model # format: model, fraction_of_positives, mean_predicted_value model_column.append("Perfectly calibrated") frac_positives_column.append(0) mean_pred_value_column.append(0) hist_column.append(0) edge_column.append(0) model_column.append("Perfectly calibrated") hist_column.append(0) edge_column.append(0) frac_positives_column.append(1) mean_pred_value_column.append(1) X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=0.9, random_state=42 ) # Add curve for LogisticRegression baseline and other models models = [lr, isotonic, sigmoid] names = ["Logistic", f"{clf_name} Isotonic", f"{clf_name} Sigmoid"] for model, name in zip(models, names): model.fit(X_train, y_train) if hasattr(model, "predict_proba"): prob_pos = model.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = model.decision_function(X_test) prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) hist, edges = np.histogram(prob_pos, bins=10, density=False) frac_positives, mean_pred_value = sklearn.calibration.calibration_curve( y_test, prob_pos, n_bins=10 ) # format: model, fraction_of_positives, mean_predicted_value num_entries = len(frac_positives) for i in range(num_entries): hist_column.append(hist[i]) edge_column.append(edges[i]) model_column.append(name) frac_positives_column.append(utils.round_3(frac_positives[i])) mean_pred_value_column.append(utils.round_3(mean_pred_value[i])) if utils.check_against_limit( i, "calibration_curve", utils.chart_limit - 2, ): break table = make_table( model_column, frac_positives_column, mean_pred_value_column, hist_column, edge_column, ) chart = wandb.visualize("wandb/calibration/v1", table) return chart
python
wandb/sklearn/calculate/calibration_curves.py
16
98
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,667
make_table
def make_table( model_column, frac_positives_column, mean_pred_value_column, hist_column, edge_column, ): columns = [ "model", "fraction_of_positives", "mean_predicted_value", "hist_dict", "edge_dict", ] data = list( zip( model_column, frac_positives_column, mean_pred_value_column, hist_column, edge_column, ) ) return wandb.Table(columns=columns, data=data)
python
wandb/sklearn/calculate/calibration_curves.py
101
126
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,668
outlier_candidates
def outlier_candidates(regressor, X, y): # Fit a linear model to X and y to compute MSE regressor.fit(X, y) # Leverage is computed as the diagonal of the projection matrix of X leverage = (X * np.linalg.pinv(X).T).sum(1) # Compute the rank and the degrees of freedom of the OLS model rank = np.linalg.matrix_rank(X) df = X.shape[0] - rank # Compute the MSE from the residuals residuals = y - regressor.predict(X) mse = np.dot(residuals, residuals) / df # Compute Cook's distance residuals_studentized = residuals / np.sqrt(mse) / np.sqrt(1 - leverage) distance_ = residuals_studentized**2 / X.shape[1] distance_ *= leverage / (1 - leverage) # Compute the influence threshold rule of thumb influence_threshold_ = 4 / X.shape[0] outlier_percentage_ = sum(distance_ >= influence_threshold_) / X.shape[0] outlier_percentage_ *= 100.0 distance_dict, count = [], 0 for d in distance_: distance_dict.append(d) count += 1 if utils.check_against_limit( count, "outlier_candidates", utils.chart_limit, ): break table = make_table(distance_dict, outlier_percentage_, influence_threshold_) chart = wandb.visualize("wandb/outliers/v1", table) return chart
python
wandb/sklearn/calculate/outlier_candidates.py
12
51
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,669
make_table
def make_table(distance, outlier_percentage, influence_threshold): columns = [ "distance", "instance_indicies", "outlier_percentage", "influence_threshold", ] data = [ [distance[i], i, utils.round_3(outlier_percentage), influence_threshold] for i in range(len(distance)) ] table = wandb.Table(columns=columns, data=data) return table
python
wandb/sklearn/calculate/outlier_candidates.py
54
69
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,670
residuals
def residuals(regressor, X, y): # Create the train and test splits X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=0.2 ) # Store labels and colors for the legend ordered by call regressor.fit(X_train, y_train) train_score_ = regressor.score(X_train, y_train) test_score_ = regressor.score(X_test, y_test) y_pred_train = regressor.predict(X_train) residuals_train = y_pred_train - y_train y_pred_test = regressor.predict(X_test) residuals_test = y_pred_test - y_test table = make_table( y_pred_train, residuals_train, y_pred_test, residuals_test, train_score_, test_score_, ) chart = wandb.visualize("wandb/residuals_plot/v1", table) return chart
python
wandb/sklearn/calculate/residuals.py
12
39
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,671
make_table
def make_table( y_pred_train, residuals_train, y_pred_test, residuals_test, train_score_, test_score_, ): y_pred_column, dataset_column, residuals_column = [], [], [] datapoints, max_datapoints_train = 0, 100 for pred, residual in zip(y_pred_train, residuals_train): # add class counts from training set y_pred_column.append(pred) dataset_column.append("train") residuals_column.append(residual) datapoints += 1 if utils.check_against_limit(datapoints, "residuals", max_datapoints_train): break datapoints = 0 for pred, residual in zip(y_pred_test, residuals_test): # add class counts from training set y_pred_column.append(pred) dataset_column.append("test") residuals_column.append(residual) datapoints += 1 if utils.check_against_limit(datapoints, "residuals", max_datapoints_train): break columns = ["dataset", "y_pred", "residuals", "train_score", "test_score"] data = [ [ dataset_column[i], y_pred_column[i], residuals_column[i], train_score_, test_score_, ] for i in range(len(y_pred_column)) ] table = wandb.Table(columns=columns, data=data) return table
python
wandb/sklearn/calculate/residuals.py
42
86
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,672
decision_boundaries
def decision_boundaries( decision_boundary_x, decision_boundary_y, decision_boundary_color, train_x, train_y, train_color, test_x, test_y, test_color, ): x_dict, y_dict, color_dict = [], [], [] for i in range(min(len(decision_boundary_x), 100)): x_dict.append(decision_boundary_x[i]) y_dict.append(decision_boundary_y[i]) color_dict.append(decision_boundary_color) for i in range(300): x_dict.append(test_x[i]) y_dict.append(test_y[i]) color_dict.append(test_color[i]) for i in range(min(len(train_x), 600)): x_dict.append(train_x[i]) y_dict.append(train_y[i]) color_dict.append(train_color[i]) return wandb.visualize( "wandb/decision_boundaries/v1", wandb.Table( columns=["x", "y", "color"], data=[[x_dict[i], y_dict[i], color_dict[i]] for i in range(len(x_dict))], ), )
python
wandb/sklearn/calculate/decision_boundaries.py
9
40
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,673
elbow_curve
def elbow_curve(clusterer, X, cluster_ranges, n_jobs, show_cluster_time): if cluster_ranges is None: cluster_ranges = range(1, 10, 2) else: cluster_ranges = sorted(cluster_ranges) clfs, times = _compute_results_parallel(n_jobs, clusterer, X, cluster_ranges) clfs = np.absolute(clfs) table = make_table(cluster_ranges, clfs, times) chart = wandb.visualize("wandb/elbow/v1", table) return chart
python
wandb/sklearn/calculate/elbow_curve.py
14
27
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,674
make_table
def make_table(cluster_ranges, clfs, times): columns = ["cluster_ranges", "errors", "clustering_time"] data = list(zip(cluster_ranges, clfs, times)) table = wandb.Table(columns=columns, data=data) return table
python
wandb/sklearn/calculate/elbow_curve.py
30
37
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,675
_compute_results_parallel
def _compute_results_parallel(n_jobs, clusterer, X, cluster_ranges): parallel_runner = Parallel(n_jobs=n_jobs) _cluster_scorer = delayed(_clone_and_score_clusterer) results = parallel_runner(_cluster_scorer(clusterer, X, i) for i in cluster_ranges) clfs, times = zip(*results) return clfs, times
python
wandb/sklearn/calculate/elbow_curve.py
40
47
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,676
_clone_and_score_clusterer
def _clone_and_score_clusterer(clusterer, X, n_clusters): start = time.time() clusterer = clone(clusterer) setattr(clusterer, "n_clusters", n_clusters) return clusterer.fit(X).score(X), time.time() - start
python
wandb/sklearn/calculate/elbow_curve.py
50
55
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,677
summary_metrics
def summary_metrics(model=None, X=None, y=None, X_test=None, y_test=None): """Calculate summary metrics for both regressors and classifiers. Called by plot_summary_metrics to visualize metrics. Please use the function plot_summary_metrics() if you wish to visualize your summary metrics. """ y, y_test = np.asarray(y), np.asarray(y_test) metrics = {} model_name = model.__class__.__name__ y_pred = model.predict(X_test) if sklearn.base.is_classifier(model): accuracy_score = sklearn.metrics.accuracy_score(y_test, y_pred) metrics["accuracy_score"] = accuracy_score precision = sklearn.metrics.precision_score(y_test, y_pred, average="weighted") metrics["precision"] = precision recall = sklearn.metrics.recall_score(y_test, y_pred, average="weighted") metrics["recall"] = recall f1_score = sklearn.metrics.f1_score(y_test, y_pred, average="weighted") metrics["f1_score"] = f1_score elif sklearn.base.is_regressor(model): mae = sklearn.metrics.mean_absolute_error(y_test, y_pred) metrics["mae"] = mae mse = sklearn.metrics.mean_squared_error(y_test, y_pred) metrics["mse"] = mse r2_score = sklearn.metrics.r2_score(y_test, y_pred) metrics["r2_score"] = r2_score metrics = {name: utils.round_2(metric) for name, metric in metrics.items()} table = make_table(metrics, model_name) chart = wandb.visualize("wandb/metrics/v1", table) return chart
python
wandb/sklearn/calculate/summary_metrics.py
13
53
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,678
make_table
def make_table(metrics, model_name): columns = ["metric_name", "metric_value", "model_name"] table_content = [[name, value, model_name] for name, value in metrics.items()] table = wandb.Table(columns=columns, data=table_content) return table
python
wandb/sklearn/calculate/summary_metrics.py
56
62
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,679
feature_importances
def feature_importances(model, feature_names): attributes_to_check = ["feature_importances_", "feature_log_prob_", "coef_"] found_attribute = check_for_attribute_on(model, attributes_to_check) if found_attribute is None: wandb.termwarn( f"could not find any of attributes {', '.join(attributes_to_check)} on classifier. Cannot plot feature importances." ) return elif found_attribute == "feature_importances_": importances = model.feature_importances_ elif found_attribute == "coef_": # ElasticNet-like models importances = model.coef_ elif found_attribute == "feature_log_prob_": # coef_ was deprecated in sklearn 0.24, replaced with # feature_log_prob_ importances = model.feature_log_prob_ if len(importances.shape) > 1: n_significant_dims = sum(i > 1 for i in importances.shape) if n_significant_dims > 1: nd = len(importances.shape) wandb.termwarn( f"{nd}-dimensional feature importances array passed to plot_feature_importances. " f"{nd}-dimensional and higher feature importances arrays are not currently supported. " f"These importances will not be plotted." ) return else: importances = np.squeeze(importances) indices = np.argsort(importances)[::-1] importances = importances[indices] if feature_names is None: feature_names = indices else: feature_names = np.array(feature_names)[indices] table = make_table(feature_names, importances) chart = wandb.visualize("wandb/feature_importances/v1", table) return chart
python
wandb/sklearn/calculate/feature_importances.py
11
52
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,680
make_table
def make_table(feature_names, importances): table = wandb.Table( columns=["feature_names", "importances"], data=[[feature_names[i], importances[i]] for i in range(len(feature_names))], ) return table
python
wandb/sklearn/calculate/feature_importances.py
55
60
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,681
check_for_attribute_on
def check_for_attribute_on(model, attributes_to_check): for attr in attributes_to_check: if hasattr(model, attr): return attr return None
python
wandb/sklearn/calculate/feature_importances.py
63
67
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,682
validate_labels
def validate_labels(*args, **kwargs): # FIXME assert False
python
wandb/sklearn/calculate/confusion_matrix.py
15
16
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,683
confusion_matrix
def confusion_matrix( y_true=None, y_pred=None, labels=None, true_labels=None, pred_labels=None, normalize=False, ): """Compute the confusion matrix to evaluate the performance of a classification. Called by plot_confusion_matrix to visualize roc curves. Please use the function plot_confusion_matrix() if you wish to visualize your confusion matrix. """ cm = metrics.confusion_matrix(y_true, y_pred) if labels is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(labels) if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] cm = np.around(cm, decimals=2) cm[np.isnan(cm)] = 0.0 if true_labels is None: true_classes = classes else: validate_labels(classes, true_labels, "true_labels") true_label_indexes = np.in1d(classes, true_labels) true_classes = classes[true_label_indexes] cm = cm[true_label_indexes] if pred_labels is None: pred_classes = classes else: validate_labels(classes, pred_labels, "pred_labels") pred_label_indexes = np.in1d(classes, pred_labels) pred_classes = classes[pred_label_indexes] cm = cm[:, pred_label_indexes] table = make_table(cm, pred_classes, true_classes, labels) chart = wandb.visualize("wandb/confusion_matrix/v1", table) return chart
python
wandb/sklearn/calculate/confusion_matrix.py
19
67
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,684
make_table
def make_table(cm, pred_classes, true_classes, labels): data, count = [], 0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if labels is not None and ( isinstance(pred_classes[i], int) or isinstance(pred_classes[0], np.integer) ): pred = labels[pred_classes[i]] true = labels[true_classes[j]] else: pred = pred_classes[i] true = true_classes[j] data.append([pred, true, cm[i, j]]) count += 1 if utils.check_against_limit( count, "confusion_matrix", utils.chart_limit, ): break table = wandb.Table(columns=["Predicted", "Actual", "Count"], data=data) return table
python
wandb/sklearn/calculate/confusion_matrix.py
70
92
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,685
__init__
def __init__(self): object.__setattr__(self, "_items", dict()) object.__setattr__(self, "_locked", dict()) object.__setattr__(self, "_users", dict()) object.__setattr__(self, "_users_inv", dict()) object.__setattr__(self, "_users_cnt", 0) object.__setattr__(self, "_callback", None) object.__setattr__(self, "_settings", None) object.__setattr__(self, "_artifact_callback", None) self._load_defaults()
python
wandb/sdk/wandb_config.py
95
105
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,686
_set_callback
def _set_callback(self, cb): object.__setattr__(self, "_callback", cb)
python
wandb/sdk/wandb_config.py
107
108
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,687
_set_artifact_callback
def _set_artifact_callback(self, cb): object.__setattr__(self, "_artifact_callback", cb)
python
wandb/sdk/wandb_config.py
110
111
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,688
_set_settings
def _set_settings(self, settings): object.__setattr__(self, "_settings", settings)
python
wandb/sdk/wandb_config.py
113
114
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,689
__repr__
def __repr__(self): return str(dict(self))
python
wandb/sdk/wandb_config.py
116
117
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,690
keys
def keys(self): return [k for k in self._items.keys() if not k.startswith("_")]
python
wandb/sdk/wandb_config.py
119
120
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,691
_as_dict
def _as_dict(self): return self._items
python
wandb/sdk/wandb_config.py
122
123
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,692
as_dict
def as_dict(self): # TODO: add telemetry, deprecate, then remove return dict(self)
python
wandb/sdk/wandb_config.py
125
127
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,693
__getitem__
def __getitem__(self, key): return self._items[key]
python
wandb/sdk/wandb_config.py
129
130
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,694
_check_locked
def _check_locked(self, key, ignore_locked=False) -> bool: locked = self._locked.get(key) if locked is not None: locked_user = self._users_inv[locked] if not ignore_locked: wandb.termwarn( "Config item '%s' was locked by '%s' (ignored update)." % (key, locked_user) ) return True return False
python
wandb/sdk/wandb_config.py
132
142
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,695
__setitem__
def __setitem__(self, key, val): if self._check_locked(key): return with wandb.sdk.lib.telemetry.context() as tel: tel.feature.set_config_item = True self._raise_value_error_on_nested_artifact(val, nested=True) key, val = self._sanitize(key, val) self._items[key] = val logger.info("config set %s = %s - %s", key, val, self._callback) if self._callback: self._callback(key=key, val=val)
python
wandb/sdk/wandb_config.py
144
154
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,696
items
def items(self): return [(k, v) for k, v in self._items.items() if not k.startswith("_")]
python
wandb/sdk/wandb_config.py
156
157
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,697
__getattr__
def __getattr__(self, key): try: return self.__getitem__(key) except KeyError as ke: raise AttributeError( f"{self.__class__!r} object has no attribute {key!r}" ) from ke
python
wandb/sdk/wandb_config.py
161
167
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,698
__contains__
def __contains__(self, key): return key in self._items
python
wandb/sdk/wandb_config.py
169
170
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,699
_update
def _update(self, d, allow_val_change=None, ignore_locked=None): parsed_dict = wandb_helper.parse_config(d) locked_keys = set() for key in list(parsed_dict): if self._check_locked(key, ignore_locked=ignore_locked): locked_keys.add(key) sanitized = self._sanitize_dict( parsed_dict, allow_val_change, ignore_keys=locked_keys ) self._items.update(sanitized) return sanitized
python
wandb/sdk/wandb_config.py
172
182
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }
2,700
update
def update(self, d, allow_val_change=None): sanitized = self._update(d, allow_val_change) if self._callback: self._callback(data=sanitized)
python
wandb/sdk/wandb_config.py
184
187
{ "name": "Git-abouvier/wandb", "url": "https://github.com/Git-abouvier/wandb.git", "license": "MIT", "stars": 0, "forks": 0 }