body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
026f9be22829b5dbaa1165ac73c83f3a95539506d98535b45e67c6670d69cdaf
def __init__(self, message, content): '\n :param message: WxBizMsgTypeBase 对象\n :param content: 文字回复内容\n ' super(TextReply, self).__init__(message=message, content=content)
:param message: WxBizMsgTypeBase 对象 :param content: 文字回复内容
core/WxMsgReply.py
__init__
NextStand/WechatBiz
1
python
def __init__(self, message, content): '\n :param message: WxBizMsgTypeBase 对象\n :param content: 文字回复内容\n ' super(TextReply, self).__init__(message=message, content=content)
def __init__(self, message, content): '\n :param message: WxBizMsgTypeBase 对象\n :param content: 文字回复内容\n ' super(TextReply, self).__init__(message=message, content=content)<|docstring|>:param message: WxBizMsgTypeBase 对象 :param content: 文字回复内容<|endoftext|>
7f128b8e844a0e53c07ac07a060f663abdc111de735f381ecff69906bc01f73c
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 图片的 MediaID\n ' super(ImageReply, self).__init__(message=message, media_id=media_id)
:param message: WechatMessage 对象 :param media_id: 图片的 MediaID
core/WxMsgReply.py
__init__
NextStand/WechatBiz
1
python
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 图片的 MediaID\n ' super(ImageReply, self).__init__(message=message, media_id=media_id)
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 图片的 MediaID\n ' super(ImageReply, self).__init__(message=message, media_id=media_id)<|docstring|>:param message: WechatMessage 对象 :param media_id: 图片的 MediaID<|endoftext|>
eeb89caf6bc1dbd8258432f43a3deacceb53f0964c08ed52c468bb28a544e993
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 语音的 MediaID\n ' super(VoiceReply, self).__init__(message=message, media_id=media_id)
:param message: WechatMessage 对象 :param media_id: 语音的 MediaID
core/WxMsgReply.py
__init__
NextStand/WechatBiz
1
python
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 语音的 MediaID\n ' super(VoiceReply, self).__init__(message=message, media_id=media_id)
def __init__(self, message, media_id): '\n :param message: WechatMessage 对象\n :param media_id: 语音的 MediaID\n ' super(VoiceReply, self).__init__(message=message, media_id=media_id)<|docstring|>:param message: WechatMessage 对象 :param media_id: 语音的 MediaID<|endoftext|>
06cb6ca9afbe3781a8afe4e1797870d362330bc5c38ece1ca20a9171294acb86
def __init__(self, message, media_id, title=None, description=None): '\n :param message: WechatMessage对象\n :param media_id: 视频的 MediaID\n :param title: 视频消息的标题\n :param description: 视频消息的描述\n ' title = (title or '') description = (description or '') super(VideoReply, self).__init__(message=message, media_id=media_id, title=title, description=description)
:param message: WechatMessage对象 :param media_id: 视频的 MediaID :param title: 视频消息的标题 :param description: 视频消息的描述
core/WxMsgReply.py
__init__
NextStand/WechatBiz
1
python
def __init__(self, message, media_id, title=None, description=None): '\n :param message: WechatMessage对象\n :param media_id: 视频的 MediaID\n :param title: 视频消息的标题\n :param description: 视频消息的描述\n ' title = (title or ) description = (description or ) super(VideoReply, self).__init__(message=message, media_id=media_id, title=title, description=description)
def __init__(self, message, media_id, title=None, description=None): '\n :param message: WechatMessage对象\n :param media_id: 视频的 MediaID\n :param title: 视频消息的标题\n :param description: 视频消息的描述\n ' title = (title or ) description = (description or ) super(VideoReply, self).__init__(message=message, media_id=media_id, title=title, description=description)<|docstring|>:param message: WechatMessage对象 :param media_id: 视频的 MediaID :param title: 视频消息的标题 :param description: 视频消息的描述<|endoftext|>
e22df265543349432f98c957c4a783b563e38e5996ae3d4bf9168c32ca65995c
def picp_loss(interval_model: ModelWithPredictionInterval, x_test: np.ndarray, y_true: np.ndarray) -> float: '\n Calculate the difference between the desired confidence level and the prediction_interval_coverage_probability for several intervals.\n\n :param interval_model: Some model which makes predictions with a standard deviation\n :param x_test: The variables for which we would like to use to predict a distribution\n :param y_true: The True target value\n\n :return: The loss score\n ' levels = np.array((90, 70, 50, 30, 10)) intervals = interval_model.predict_interval(x_test, conf_level=levels).T lower = intervals[0] upper = intervals[1] loss = 0 for i in range(len(levels)): loss += abs((levels[i] - ((100 * len(np.where(np.logical_and((y_true >= lower[i]), (y_true <= upper[i])))[0])) / len(y_true)))) return (loss / len(levels))
Calculate the difference between the desired confidence level and the prediction_interval_coverage_probability for several intervals. :param interval_model: Some model which makes predictions with a standard deviation :param x_test: The variables for which we would like to use to predict a distribution :param y_true: The True target value :return: The loss score
src/macest/regression/models.py
picp_loss
LaudateCorpus1/macest
88
python
def picp_loss(interval_model: ModelWithPredictionInterval, x_test: np.ndarray, y_true: np.ndarray) -> float: '\n Calculate the difference between the desired confidence level and the prediction_interval_coverage_probability for several intervals.\n\n :param interval_model: Some model which makes predictions with a standard deviation\n :param x_test: The variables for which we would like to use to predict a distribution\n :param y_true: The True target value\n\n :return: The loss score\n ' levels = np.array((90, 70, 50, 30, 10)) intervals = interval_model.predict_interval(x_test, conf_level=levels).T lower = intervals[0] upper = intervals[1] loss = 0 for i in range(len(levels)): loss += abs((levels[i] - ((100 * len(np.where(np.logical_and((y_true >= lower[i]), (y_true <= upper[i])))[0])) / len(y_true)))) return (loss / len(levels))
def picp_loss(interval_model: ModelWithPredictionInterval, x_test: np.ndarray, y_true: np.ndarray) -> float: '\n Calculate the difference between the desired confidence level and the prediction_interval_coverage_probability for several intervals.\n\n :param interval_model: Some model which makes predictions with a standard deviation\n :param x_test: The variables for which we would like to use to predict a distribution\n :param y_true: The True target value\n\n :return: The loss score\n ' levels = np.array((90, 70, 50, 30, 10)) intervals = interval_model.predict_interval(x_test, conf_level=levels).T lower = intervals[0] upper = intervals[1] loss = 0 for i in range(len(levels)): loss += abs((levels[i] - ((100 * len(np.where(np.logical_and((y_true >= lower[i]), (y_true <= upper[i])))[0])) / len(y_true)))) return (loss / len(levels))<|docstring|>Calculate the difference between the desired confidence level and the prediction_interval_coverage_probability for several intervals. :param interval_model: Some model which makes predictions with a standard deviation :param x_test: The variables for which we would like to use to predict a distribution :param y_true: The True target value :return: The loss score<|endoftext|>
2c1c16c3456700f6dcbfa70d387d00fa31a09efa3a50687687cc99495d296b73
def predict(self, x_star: np.ndarray) -> Any: 'Return nothing as is only needed to check method exists.' pass
Return nothing as is only needed to check method exists.
src/macest/regression/models.py
predict
LaudateCorpus1/macest
88
python
def predict(self, x_star: np.ndarray) -> Any: pass
def predict(self, x_star: np.ndarray) -> Any: pass<|docstring|>Return nothing as is only needed to check method exists.<|endoftext|>
58cdc8ea52f865f117619399c8468c8ed64f14e24151cb1e0a466e80d7e0da53
def __init__(self, model: _RegressionPointPredictionModel, x_train: np.ndarray, train_err: np.ndarray, macest_model_params: MacestPredIntervalModelParams=MacestPredIntervalModelParams(), error_dist: Literal[('normal', 'laplace')]='normal', dist_func: Literal[('linear', 'error_weighted_poly')]='linear', precomputed_neighbour_info: Optional[PrecomputedNeighbourInfo]=None, prec_point_preds: Optional[np.ndarray]=None, prec_graph: Optional[nmslib.dist.FloatIndex]=None, search_method_args: HnswGraphArgs=HnswGraphArgs()): '\n Init.\n\n :param model: Any model which takes some variables x and returns a point prediction y\n :param x_train: The variables used to train the model\n :param train_err: The error for each training point\n :param num_neighbours: The number of points which define the local neighbourhood\n :param alpha: co-efficient for distance function (hyper-parameter)\n :param beta: The hyper-parameter used in distance function\n :param error_dist: The assumed distribution for the errors\n :param dist_func: The function to convert distance to confidence (currently linear or error_weighted_poly implemented)\n :param prec_point_preds: The pre-computed model predictions\n :param prec_distance_to_nn: The pre-computed nearest neighbour distances for the calibration and test data\n :param prec_ind_of_nn: The pre-computed nearest neighbour indices for the calibration and test data\n :param prec_graph: The pre-computed graph to use for online hnsw search\n ' self.model = model self.x_train = x_train self.train_err = train_err self.macest_model_params = macest_model_params self._num_neighbours = macest_model_params.num_neighbours self._alpha = macest_model_params.alpha self._beta = macest_model_params.beta self.dist_func = dist_func self.error_dist = error_dist self.prec_graph = prec_graph self.point_preds = prec_point_preds self.precomputed_neighbour_info = precomputed_neighbour_info if (not self.precomputed_neighbour_info): self._distance_to_nn = None self._ind_of_nn = None else: self._distance_to_nn = self.precomputed_neighbour_info.prec_distance_to_nn self._ind_of_nn = self.precomputed_neighbour_info.prec_ind_of_nn self.search_method_args = search_method_args self._check_consistent_search_method_args() self._check_data_consistent_with_search_args()
Init. :param model: Any model which takes some variables x and returns a point prediction y :param x_train: The variables used to train the model :param train_err: The error for each training point :param num_neighbours: The number of points which define the local neighbourhood :param alpha: co-efficient for distance function (hyper-parameter) :param beta: The hyper-parameter used in distance function :param error_dist: The assumed distribution for the errors :param dist_func: The function to convert distance to confidence (currently linear or error_weighted_poly implemented) :param prec_point_preds: The pre-computed model predictions :param prec_distance_to_nn: The pre-computed nearest neighbour distances for the calibration and test data :param prec_ind_of_nn: The pre-computed nearest neighbour indices for the calibration and test data :param prec_graph: The pre-computed graph to use for online hnsw search
src/macest/regression/models.py
__init__
LaudateCorpus1/macest
88
python
def __init__(self, model: _RegressionPointPredictionModel, x_train: np.ndarray, train_err: np.ndarray, macest_model_params: MacestPredIntervalModelParams=MacestPredIntervalModelParams(), error_dist: Literal[('normal', 'laplace')]='normal', dist_func: Literal[('linear', 'error_weighted_poly')]='linear', precomputed_neighbour_info: Optional[PrecomputedNeighbourInfo]=None, prec_point_preds: Optional[np.ndarray]=None, prec_graph: Optional[nmslib.dist.FloatIndex]=None, search_method_args: HnswGraphArgs=HnswGraphArgs()): '\n Init.\n\n :param model: Any model which takes some variables x and returns a point prediction y\n :param x_train: The variables used to train the model\n :param train_err: The error for each training point\n :param num_neighbours: The number of points which define the local neighbourhood\n :param alpha: co-efficient for distance function (hyper-parameter)\n :param beta: The hyper-parameter used in distance function\n :param error_dist: The assumed distribution for the errors\n :param dist_func: The function to convert distance to confidence (currently linear or error_weighted_poly implemented)\n :param prec_point_preds: The pre-computed model predictions\n :param prec_distance_to_nn: The pre-computed nearest neighbour distances for the calibration and test data\n :param prec_ind_of_nn: The pre-computed nearest neighbour indices for the calibration and test data\n :param prec_graph: The pre-computed graph to use for online hnsw search\n ' self.model = model self.x_train = x_train self.train_err = train_err self.macest_model_params = macest_model_params self._num_neighbours = macest_model_params.num_neighbours self._alpha = macest_model_params.alpha self._beta = macest_model_params.beta self.dist_func = dist_func self.error_dist = error_dist self.prec_graph = prec_graph self.point_preds = prec_point_preds self.precomputed_neighbour_info = precomputed_neighbour_info if (not self.precomputed_neighbour_info): self._distance_to_nn = None self._ind_of_nn = None else: self._distance_to_nn = self.precomputed_neighbour_info.prec_distance_to_nn self._ind_of_nn = self.precomputed_neighbour_info.prec_ind_of_nn self.search_method_args = search_method_args self._check_consistent_search_method_args() self._check_data_consistent_with_search_args()
def __init__(self, model: _RegressionPointPredictionModel, x_train: np.ndarray, train_err: np.ndarray, macest_model_params: MacestPredIntervalModelParams=MacestPredIntervalModelParams(), error_dist: Literal[('normal', 'laplace')]='normal', dist_func: Literal[('linear', 'error_weighted_poly')]='linear', precomputed_neighbour_info: Optional[PrecomputedNeighbourInfo]=None, prec_point_preds: Optional[np.ndarray]=None, prec_graph: Optional[nmslib.dist.FloatIndex]=None, search_method_args: HnswGraphArgs=HnswGraphArgs()): '\n Init.\n\n :param model: Any model which takes some variables x and returns a point prediction y\n :param x_train: The variables used to train the model\n :param train_err: The error for each training point\n :param num_neighbours: The number of points which define the local neighbourhood\n :param alpha: co-efficient for distance function (hyper-parameter)\n :param beta: The hyper-parameter used in distance function\n :param error_dist: The assumed distribution for the errors\n :param dist_func: The function to convert distance to confidence (currently linear or error_weighted_poly implemented)\n :param prec_point_preds: The pre-computed model predictions\n :param prec_distance_to_nn: The pre-computed nearest neighbour distances for the calibration and test data\n :param prec_ind_of_nn: The pre-computed nearest neighbour indices for the calibration and test data\n :param prec_graph: The pre-computed graph to use for online hnsw search\n ' self.model = model self.x_train = x_train self.train_err = train_err self.macest_model_params = macest_model_params self._num_neighbours = macest_model_params.num_neighbours self._alpha = macest_model_params.alpha self._beta = macest_model_params.beta self.dist_func = dist_func self.error_dist = error_dist self.prec_graph = prec_graph self.point_preds = prec_point_preds self.precomputed_neighbour_info = precomputed_neighbour_info if (not self.precomputed_neighbour_info): self._distance_to_nn = None self._ind_of_nn = None else: self._distance_to_nn = self.precomputed_neighbour_info.prec_distance_to_nn self._ind_of_nn = self.precomputed_neighbour_info.prec_ind_of_nn self.search_method_args = search_method_args self._check_consistent_search_method_args() self._check_data_consistent_with_search_args()<|docstring|>Init. :param model: Any model which takes some variables x and returns a point prediction y :param x_train: The variables used to train the model :param train_err: The error for each training point :param num_neighbours: The number of points which define the local neighbourhood :param alpha: co-efficient for distance function (hyper-parameter) :param beta: The hyper-parameter used in distance function :param error_dist: The assumed distribution for the errors :param dist_func: The function to convert distance to confidence (currently linear or error_weighted_poly implemented) :param prec_point_preds: The pre-computed model predictions :param prec_distance_to_nn: The pre-computed nearest neighbour distances for the calibration and test data :param prec_ind_of_nn: The pre-computed nearest neighbour indices for the calibration and test data :param prec_graph: The pre-computed graph to use for online hnsw search<|endoftext|>
d0a13f9c92e0c51995ad7c91e564d950bb3ac8dcc5381089d8ef595ffa220ca3
def predict(self, x_star: np.ndarray) -> np.ndarray: '\n Return a point prediction for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: pred_star : The point prediction for x_star\n ' pred_star = self.model.predict(x_star) return pred_star
Return a point prediction for x_star. :param x_star: The position for which we would like to predict :return: pred_star : The point prediction for x_star
src/macest/regression/models.py
predict
LaudateCorpus1/macest
88
python
def predict(self, x_star: np.ndarray) -> np.ndarray: '\n Return a point prediction for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: pred_star : The point prediction for x_star\n ' pred_star = self.model.predict(x_star) return pred_star
def predict(self, x_star: np.ndarray) -> np.ndarray: '\n Return a point prediction for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: pred_star : The point prediction for x_star\n ' pred_star = self.model.predict(x_star) return pred_star<|docstring|>Return a point prediction for x_star. :param x_star: The position for which we would like to predict :return: pred_star : The point prediction for x_star<|endoftext|>
feec748cbf45db35b7eb2bb2c673aa1af47f59545ca803639b03d2b515a66b65
def build_graph(self) -> nmslib.dist.FloatIndex: '\n Build the Hierarchical Navigable Small World (hnsw) index graph.\n\n :return: A queryable HNSW graph\n ' graph = nmslib.init(**self.search_method_args.init_kwargs) graph.addDataPointBatch(self.x_train) graph.createIndex(self.search_method_args.construction_kwargs) graph.setQueryTimeParams(self.search_method_args.query_kwargs) return graph
Build the Hierarchical Navigable Small World (hnsw) index graph. :return: A queryable HNSW graph
src/macest/regression/models.py
build_graph
LaudateCorpus1/macest
88
python
def build_graph(self) -> nmslib.dist.FloatIndex: '\n Build the Hierarchical Navigable Small World (hnsw) index graph.\n\n :return: A queryable HNSW graph\n ' graph = nmslib.init(**self.search_method_args.init_kwargs) graph.addDataPointBatch(self.x_train) graph.createIndex(self.search_method_args.construction_kwargs) graph.setQueryTimeParams(self.search_method_args.query_kwargs) return graph
def build_graph(self) -> nmslib.dist.FloatIndex: '\n Build the Hierarchical Navigable Small World (hnsw) index graph.\n\n :return: A queryable HNSW graph\n ' graph = nmslib.init(**self.search_method_args.init_kwargs) graph.addDataPointBatch(self.x_train) graph.createIndex(self.search_method_args.construction_kwargs) graph.setQueryTimeParams(self.search_method_args.query_kwargs) return graph<|docstring|>Build the Hierarchical Navigable Small World (hnsw) index graph. :return: A queryable HNSW graph<|endoftext|>
367605a5a602792ca7e181d052a058762e129eac4c8a92bc63560a8739d4209e
def calc_nn_dist(self, x_star: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]: '\n Calculate the distant to a set of k nearest neighbours.\n\n :param x_star: The position for which we would like to predict\n\n :return: The distance to k nearest neighbours and the indices of the k closest neighbours\n ' if (self.prec_graph is None): self.prec_graph = self.build_graph() neighbours = np.array(self.prec_graph.knnQueryBatch(x_star, k=self._num_neighbours, num_threads=num_threads_available)) dist = neighbours[(:, 1, :)] ind = neighbours[(:, 0, :)].astype(int) return (dist, ind)
Calculate the distant to a set of k nearest neighbours. :param x_star: The position for which we would like to predict :return: The distance to k nearest neighbours and the indices of the k closest neighbours
src/macest/regression/models.py
calc_nn_dist
LaudateCorpus1/macest
88
python
def calc_nn_dist(self, x_star: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]: '\n Calculate the distant to a set of k nearest neighbours.\n\n :param x_star: The position for which we would like to predict\n\n :return: The distance to k nearest neighbours and the indices of the k closest neighbours\n ' if (self.prec_graph is None): self.prec_graph = self.build_graph() neighbours = np.array(self.prec_graph.knnQueryBatch(x_star, k=self._num_neighbours, num_threads=num_threads_available)) dist = neighbours[(:, 1, :)] ind = neighbours[(:, 0, :)].astype(int) return (dist, ind)
def calc_nn_dist(self, x_star: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]: '\n Calculate the distant to a set of k nearest neighbours.\n\n :param x_star: The position for which we would like to predict\n\n :return: The distance to k nearest neighbours and the indices of the k closest neighbours\n ' if (self.prec_graph is None): self.prec_graph = self.build_graph() neighbours = np.array(self.prec_graph.knnQueryBatch(x_star, k=self._num_neighbours, num_threads=num_threads_available)) dist = neighbours[(:, 1, :)] ind = neighbours[(:, 0, :)].astype(int) return (dist, ind)<|docstring|>Calculate the distant to a set of k nearest neighbours. :param x_star: The position for which we would like to predict :return: The distance to k nearest neighbours and the indices of the k closest neighbours<|endoftext|>
e406c9462efec3cfeaa40e80ccedef13579c87f7073c3906bc104d2dc626fe81
def calc_linear_dist_func(self, x_star: np.ndarray) -> np.ndarray: '\n Calculate the linear sum of average distance to neighbours and average per neighbour error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the sum of average distance to neighbours and average per neighbour error for x_star\n ' if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(local_distance, np.ndarray): dist = (self._alpha * np.average(local_distance, weights=np.arange(local_distance.shape[1], 0, (- 1)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(ind, np.ndarray): error = (self._beta * np.average(abs(self.train_err[ind.astype(int)]), weights=(1.0 / (1 + local_distance)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') return (dist + error)
Calculate the linear sum of average distance to neighbours and average per neighbour error. :param x_star: The position for which we would like to predict :return: the sum of average distance to neighbours and average per neighbour error for x_star
src/macest/regression/models.py
calc_linear_dist_func
LaudateCorpus1/macest
88
python
def calc_linear_dist_func(self, x_star: np.ndarray) -> np.ndarray: '\n Calculate the linear sum of average distance to neighbours and average per neighbour error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the sum of average distance to neighbours and average per neighbour error for x_star\n ' if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(local_distance, np.ndarray): dist = (self._alpha * np.average(local_distance, weights=np.arange(local_distance.shape[1], 0, (- 1)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(ind, np.ndarray): error = (self._beta * np.average(abs(self.train_err[ind.astype(int)]), weights=(1.0 / (1 + local_distance)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') return (dist + error)
def calc_linear_dist_func(self, x_star: np.ndarray) -> np.ndarray: '\n Calculate the linear sum of average distance to neighbours and average per neighbour error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the sum of average distance to neighbours and average per neighbour error for x_star\n ' if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(local_distance, np.ndarray): dist = (self._alpha * np.average(local_distance, weights=np.arange(local_distance.shape[1], 0, (- 1)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(ind, np.ndarray): error = (self._beta * np.average(abs(self.train_err[ind.astype(int)]), weights=(1.0 / (1 + local_distance)), axis=1)) else: raise ValueError('Need to remove pre-cached training neighbour data from training') return (dist + error)<|docstring|>Calculate the linear sum of average distance to neighbours and average per neighbour error. :param x_star: The position for which we would like to predict :return: the sum of average distance to neighbours and average per neighbour error for x_star<|endoftext|>
41faf0a48dae7a0d53699de4ffda7476f5f64ee8cf607415e746f172db0c8524
def calc_error_weighted_dist(self, x_star: np.ndarray) -> np.ndarray: "\n Calculate average distance to neighbours weighted by the per neighbour prediction error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the error weighted distance from x_star point to it's neighbours\n " if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(ind, np.ndarray): train_error = self.train_err[ind.astype(int)] else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(local_distance, np.ndarray): error_weighted_dist = np.average((local_distance * abs(train_error)), weights=(1.0 / (1 + local_distance)), axis=1) else: raise ValueError('Need to remove pre-cached training neighbour data from training') error_weighted_poly = (self._alpha * (error_weighted_dist ** self._beta)) return error_weighted_poly
Calculate average distance to neighbours weighted by the per neighbour prediction error. :param x_star: The position for which we would like to predict :return: the error weighted distance from x_star point to it's neighbours
src/macest/regression/models.py
calc_error_weighted_dist
LaudateCorpus1/macest
88
python
def calc_error_weighted_dist(self, x_star: np.ndarray) -> np.ndarray: "\n Calculate average distance to neighbours weighted by the per neighbour prediction error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the error weighted distance from x_star point to it's neighbours\n " if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(ind, np.ndarray): train_error = self.train_err[ind.astype(int)] else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(local_distance, np.ndarray): error_weighted_dist = np.average((local_distance * abs(train_error)), weights=(1.0 / (1 + local_distance)), axis=1) else: raise ValueError('Need to remove pre-cached training neighbour data from training') error_weighted_poly = (self._alpha * (error_weighted_dist ** self._beta)) return error_weighted_poly
def calc_error_weighted_dist(self, x_star: np.ndarray) -> np.ndarray: "\n Calculate average distance to neighbours weighted by the per neighbour prediction error.\n\n :param x_star: The position for which we would like to predict\n\n :return: the error weighted distance from x_star point to it's neighbours\n " if (self._distance_to_nn is not None): local_distance = self._distance_to_nn if (self._ind_of_nn is None): raise ValueError('_ind_of_nn has not been cached during training') ind = self._ind_of_nn else: (local_distance, ind) = self.calc_nn_dist(x_star) if isinstance(ind, np.ndarray): train_error = self.train_err[ind.astype(int)] else: raise ValueError('Need to remove pre-cached training neighbour data from training') if isinstance(local_distance, np.ndarray): error_weighted_dist = np.average((local_distance * abs(train_error)), weights=(1.0 / (1 + local_distance)), axis=1) else: raise ValueError('Need to remove pre-cached training neighbour data from training') error_weighted_poly = (self._alpha * (error_weighted_dist ** self._beta)) return error_weighted_poly<|docstring|>Calculate average distance to neighbours weighted by the per neighbour prediction error. :param x_star: The position for which we would like to predict :return: the error weighted distance from x_star point to it's neighbours<|endoftext|>
e3de3e7fe272a6aec5dea453bf53057b28cd33b24c0d0795cc787065e4df0057
def std_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The standard deviation for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma
Return the predicted variance for x_star. :param x_star: The position for which we would like to predict :return: sigma: The standard deviation for the prediction at x_star
src/macest/regression/models.py
std_on_y_star
LaudateCorpus1/macest
88
python
def std_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The standard deviation for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma
def std_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The standard deviation for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma<|docstring|>Return the predicted variance for x_star. :param x_star: The position for which we would like to predict :return: sigma: The standard deviation for the prediction at x_star<|endoftext|>
577deff9388b3615a45ea9705ed72c983f3cdef11c29da529f700db691d1b290
def laplace_scale_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted laplacian variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The laplacian scaler for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma
Return the predicted laplacian variance for x_star. :param x_star: The position for which we would like to predict :return: sigma: The laplacian scaler for the prediction at x_star
src/macest/regression/models.py
laplace_scale_on_y_star
LaudateCorpus1/macest
88
python
def laplace_scale_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted laplacian variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The laplacian scaler for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma
def laplace_scale_on_y_star(self, x_star: np.ndarray) -> np.ndarray: '\n Return the predicted laplacian variance for x_star.\n\n :param x_star: The position for which we would like to predict\n\n :return: sigma: The laplacian scaler for the prediction at x_star\n ' if (self.dist_func == 'error_weighted_poly'): dist = self.calc_error_weighted_dist(x_star) elif (self.dist_func == 'linear'): dist = self.calc_linear_dist_func(x_star) else: raise ValueError(f'Unknown distance function: {self.dist_func}') sigma = dist return sigma<|docstring|>Return the predicted laplacian variance for x_star. :param x_star: The position for which we would like to predict :return: sigma: The laplacian scaler for the prediction at x_star<|endoftext|>
ef17197bdf4802134c129a4591a37e118efb70a75601ebd0f6a3f1f868526bec
def _distribution(self, x_star: np.ndarray) -> laplace_gen: '\n Return the distribution that we will predict from.\n\n :return:\n ' if (self.point_preds is not None): point_preds = self.point_preds else: point_preds = self.predict(x_star) if (self.error_dist == 'normal'): scale = self.std_on_y_star(x_star) dist = norm(loc=point_preds, scale=scale) elif (self.error_dist == 'laplace'): scale = self.laplace_scale_on_y_star(x_star) dist = laplace(loc=point_preds, scale=scale) else: raise ValueError(f'Unknown distance function: {self.dist_func}') return dist
Return the distribution that we will predict from. :return:
src/macest/regression/models.py
_distribution
LaudateCorpus1/macest
88
python
def _distribution(self, x_star: np.ndarray) -> laplace_gen: '\n Return the distribution that we will predict from.\n\n :return:\n ' if (self.point_preds is not None): point_preds = self.point_preds else: point_preds = self.predict(x_star) if (self.error_dist == 'normal'): scale = self.std_on_y_star(x_star) dist = norm(loc=point_preds, scale=scale) elif (self.error_dist == 'laplace'): scale = self.laplace_scale_on_y_star(x_star) dist = laplace(loc=point_preds, scale=scale) else: raise ValueError(f'Unknown distance function: {self.dist_func}') return dist
def _distribution(self, x_star: np.ndarray) -> laplace_gen: '\n Return the distribution that we will predict from.\n\n :return:\n ' if (self.point_preds is not None): point_preds = self.point_preds else: point_preds = self.predict(x_star) if (self.error_dist == 'normal'): scale = self.std_on_y_star(x_star) dist = norm(loc=point_preds, scale=scale) elif (self.error_dist == 'laplace'): scale = self.laplace_scale_on_y_star(x_star) dist = laplace(loc=point_preds, scale=scale) else: raise ValueError(f'Unknown distance function: {self.dist_func}') return dist<|docstring|>Return the distribution that we will predict from. :return:<|endoftext|>
45989704d3b9f108e57caa3d3462b7c25a7fe1b4655092ec32bd244584c0f3c6
def predict_interval(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Predict the upper and lower prediction interval bounds for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: The confidence bounds for each x_star for each confidence level\n ' dist = self._distribution(x_star) lower_perc = ((100 - conf_level) / 2) upper_perc = (100 - lower_perc) lower_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * lower_perc) upper_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * upper_perc) return np.array([dist.ppf(lower_vec.T), dist.ppf(upper_vec.T)]).T
Predict the upper and lower prediction interval bounds for a given confidence level. :param x_star: The position for which we would like to predict :param conf_level: :return: The confidence bounds for each x_star for each confidence level
src/macest/regression/models.py
predict_interval
LaudateCorpus1/macest
88
python
def predict_interval(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Predict the upper and lower prediction interval bounds for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: The confidence bounds for each x_star for each confidence level\n ' dist = self._distribution(x_star) lower_perc = ((100 - conf_level) / 2) upper_perc = (100 - lower_perc) lower_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * lower_perc) upper_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * upper_perc) return np.array([dist.ppf(lower_vec.T), dist.ppf(upper_vec.T)]).T
def predict_interval(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Predict the upper and lower prediction interval bounds for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: The confidence bounds for each x_star for each confidence level\n ' dist = self._distribution(x_star) lower_perc = ((100 - conf_level) / 2) upper_perc = (100 - lower_perc) lower_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * lower_perc) upper_vec = ((0.01 * np.ones((x_star.shape[0], len([conf_level])))) * upper_perc) return np.array([dist.ppf(lower_vec.T), dist.ppf(upper_vec.T)]).T<|docstring|>Predict the upper and lower prediction interval bounds for a given confidence level. :param x_star: The position for which we would like to predict :param conf_level: :return: The confidence bounds for each x_star for each confidence level<|endoftext|>
6308c10904487c5a62ae93788ac42a9b288ed66d6faee8c6525346bfd04cedf0
def calculate_prediction_interval_width(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Calculate the absolute width of a prediction interval for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: the absolute width of a prediction interval for each x_star for each confidence level\n ' intervals = self.predict_interval(x_star, conf_level) return np.diff(intervals)
Calculate the absolute width of a prediction interval for a given confidence level. :param x_star: The position for which we would like to predict :param conf_level: :return: the absolute width of a prediction interval for each x_star for each confidence level
src/macest/regression/models.py
calculate_prediction_interval_width
LaudateCorpus1/macest
88
python
def calculate_prediction_interval_width(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Calculate the absolute width of a prediction interval for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: the absolute width of a prediction interval for each x_star for each confidence level\n ' intervals = self.predict_interval(x_star, conf_level) return np.diff(intervals)
def calculate_prediction_interval_width(self, x_star: np.ndarray, conf_level: Union[(np.ndarray, int, float)]=90) -> np.ndarray: '\n Calculate the absolute width of a prediction interval for a given confidence level.\n\n :param x_star: The position for which we would like to predict\n :param conf_level:\n\n :return: the absolute width of a prediction interval for each x_star for each confidence level\n ' intervals = self.predict_interval(x_star, conf_level) return np.diff(intervals)<|docstring|>Calculate the absolute width of a prediction interval for a given confidence level. :param x_star: The position for which we would like to predict :param conf_level: :return: the absolute width of a prediction interval for each x_star for each confidence level<|endoftext|>
7f8afeec5ca945b937d397154aa1bb89905bb162e83b7677678ecc5efbf24d47
def sample_prediction(self, x_star: np.ndarray, nsamples: int=(10 ** 3)) -> np.ndarray: '\n Draw samples from any predicted distribution to get a distribution of predictions.\n\n :param x_star: The position in feature space for which we would like to predict\n :param nsamples: The number of samples to draw from the distribution\n\n :return: Samples from the predicted distribution\n ' dist = self._distribution(x_star) return dist.rvs(size=(nsamples, x_star.shape[0])).T
Draw samples from any predicted distribution to get a distribution of predictions. :param x_star: The position in feature space for which we would like to predict :param nsamples: The number of samples to draw from the distribution :return: Samples from the predicted distribution
src/macest/regression/models.py
sample_prediction
LaudateCorpus1/macest
88
python
def sample_prediction(self, x_star: np.ndarray, nsamples: int=(10 ** 3)) -> np.ndarray: '\n Draw samples from any predicted distribution to get a distribution of predictions.\n\n :param x_star: The position in feature space for which we would like to predict\n :param nsamples: The number of samples to draw from the distribution\n\n :return: Samples from the predicted distribution\n ' dist = self._distribution(x_star) return dist.rvs(size=(nsamples, x_star.shape[0])).T
def sample_prediction(self, x_star: np.ndarray, nsamples: int=(10 ** 3)) -> np.ndarray: '\n Draw samples from any predicted distribution to get a distribution of predictions.\n\n :param x_star: The position in feature space for which we would like to predict\n :param nsamples: The number of samples to draw from the distribution\n\n :return: Samples from the predicted distribution\n ' dist = self._distribution(x_star) return dist.rvs(size=(nsamples, x_star.shape[0])).T<|docstring|>Draw samples from any predicted distribution to get a distribution of predictions. :param x_star: The position in feature space for which we would like to predict :param nsamples: The number of samples to draw from the distribution :return: Samples from the predicted distribution<|endoftext|>
0a9731d7ce966e09b27cbd6e32e71c931d3e1d190b3e92e0b60ce525e37202d5
def fit(self, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds(), optimiser_args: Optional[Dict[(Any, Any)]]=None) -> None: '\n Fit MACEst model using the calibration data.\n\n :param x_cal: Calibration data\n :param y_cal: Target values\n :param param_range: The bounds within which to search for MACEst parameters\n :param optimiser_args: Any arguments for the optimiser (see scipy.optimize)\n\n :return: None\n ' if (optimiser_args is None): optimiser_args = {} train_helper = _TrainingHelper(self, x_cal, y_cal, param_range) train_helper.fit(optimiser_args=optimiser_args)
Fit MACEst model using the calibration data. :param x_cal: Calibration data :param y_cal: Target values :param param_range: The bounds within which to search for MACEst parameters :param optimiser_args: Any arguments for the optimiser (see scipy.optimize) :return: None
src/macest/regression/models.py
fit
LaudateCorpus1/macest
88
python
def fit(self, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds(), optimiser_args: Optional[Dict[(Any, Any)]]=None) -> None: '\n Fit MACEst model using the calibration data.\n\n :param x_cal: Calibration data\n :param y_cal: Target values\n :param param_range: The bounds within which to search for MACEst parameters\n :param optimiser_args: Any arguments for the optimiser (see scipy.optimize)\n\n :return: None\n ' if (optimiser_args is None): optimiser_args = {} train_helper = _TrainingHelper(self, x_cal, y_cal, param_range) train_helper.fit(optimiser_args=optimiser_args)
def fit(self, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds(), optimiser_args: Optional[Dict[(Any, Any)]]=None) -> None: '\n Fit MACEst model using the calibration data.\n\n :param x_cal: Calibration data\n :param y_cal: Target values\n :param param_range: The bounds within which to search for MACEst parameters\n :param optimiser_args: Any arguments for the optimiser (see scipy.optimize)\n\n :return: None\n ' if (optimiser_args is None): optimiser_args = {} train_helper = _TrainingHelper(self, x_cal, y_cal, param_range) train_helper.fit(optimiser_args=optimiser_args)<|docstring|>Fit MACEst model using the calibration data. :param x_cal: Calibration data :param y_cal: Target values :param param_range: The bounds within which to search for MACEst parameters :param optimiser_args: Any arguments for the optimiser (see scipy.optimize) :return: None<|endoftext|>
2224ca360368f433799a365fbcec44cb4ab8c9a6f626b54bf0c642be7fdedbaa
def __init__(self, init_conf_model: ModelWithPredictionInterval, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds()): '\n Init.\n\n :param init_conf_model: an initialised ModelWithConfidence object that we want to fit\n :param x_cal: The X variables that we will use to calibrate the confidence predictions\n :param y_cal: The target variables that we will use to calibrate the confidence predictions\n :param param_range: The bounds on the hyper-parameter space we want to search\n ' self.model = init_conf_model self.x_cal = x_cal self.y_cal = y_cal self.param_range = param_range self.prec_graph = self.model.build_graph() self.model.prec_graph = self.prec_graph (self.prec_dist, self.prec_ind) = self._prec_neighbours() self.model.point_preds = self.model.predict(self.x_cal)
Init. :param init_conf_model: an initialised ModelWithConfidence object that we want to fit :param x_cal: The X variables that we will use to calibrate the confidence predictions :param y_cal: The target variables that we will use to calibrate the confidence predictions :param param_range: The bounds on the hyper-parameter space we want to search
src/macest/regression/models.py
__init__
LaudateCorpus1/macest
88
python
def __init__(self, init_conf_model: ModelWithPredictionInterval, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds()): '\n Init.\n\n :param init_conf_model: an initialised ModelWithConfidence object that we want to fit\n :param x_cal: The X variables that we will use to calibrate the confidence predictions\n :param y_cal: The target variables that we will use to calibrate the confidence predictions\n :param param_range: The bounds on the hyper-parameter space we want to search\n ' self.model = init_conf_model self.x_cal = x_cal self.y_cal = y_cal self.param_range = param_range self.prec_graph = self.model.build_graph() self.model.prec_graph = self.prec_graph (self.prec_dist, self.prec_ind) = self._prec_neighbours() self.model.point_preds = self.model.predict(self.x_cal)
def __init__(self, init_conf_model: ModelWithPredictionInterval, x_cal: np.ndarray, y_cal: np.ndarray, param_range: SearchBounds=SearchBounds()): '\n Init.\n\n :param init_conf_model: an initialised ModelWithConfidence object that we want to fit\n :param x_cal: The X variables that we will use to calibrate the confidence predictions\n :param y_cal: The target variables that we will use to calibrate the confidence predictions\n :param param_range: The bounds on the hyper-parameter space we want to search\n ' self.model = init_conf_model self.x_cal = x_cal self.y_cal = y_cal self.param_range = param_range self.prec_graph = self.model.build_graph() self.model.prec_graph = self.prec_graph (self.prec_dist, self.prec_ind) = self._prec_neighbours() self.model.point_preds = self.model.predict(self.x_cal)<|docstring|>Init. :param init_conf_model: an initialised ModelWithConfidence object that we want to fit :param x_cal: The X variables that we will use to calibrate the confidence predictions :param y_cal: The target variables that we will use to calibrate the confidence predictions :param param_range: The bounds on the hyper-parameter space we want to search<|endoftext|>
83ce0f3b334be2b4c6657183db978f82a5752a50e4aa9545e8cd68db76cb8669
def _prec_neighbours(self) -> Tuple[(Dict[(int, np.ndarray)], Dict[(int, np.ndarray)])]: '\n Pre-compute the nearest neighbours and their distances.\n\n :return:\n ' min_nbrs = self.param_range[2][0] max_nbrs = self.param_range[2][1] num_nbrs = np.arange(min_nbrs, (max_nbrs + 0.1), 1) x_cal_len_array = np.arange(len(self.x_cal)) dist_dict = {} ind_dict = {} max_neighbours = np.array(self.prec_graph.knnQueryBatch(self.x_cal, k=int(max_nbrs), num_threads=num_threads_available)) max_dist = max_neighbours[(x_cal_len_array, 1)] max_ind = max_neighbours[(x_cal_len_array, 0)] for k in num_nbrs: dist = max_dist[(x_cal_len_array, 0:int(k))] ind = max_ind[(x_cal_len_array, 0:int(k))] dist_dict[k] = dist ind_dict[k] = ind return (dist_dict, ind_dict)
Pre-compute the nearest neighbours and their distances. :return:
src/macest/regression/models.py
_prec_neighbours
LaudateCorpus1/macest
88
python
def _prec_neighbours(self) -> Tuple[(Dict[(int, np.ndarray)], Dict[(int, np.ndarray)])]: '\n Pre-compute the nearest neighbours and their distances.\n\n :return:\n ' min_nbrs = self.param_range[2][0] max_nbrs = self.param_range[2][1] num_nbrs = np.arange(min_nbrs, (max_nbrs + 0.1), 1) x_cal_len_array = np.arange(len(self.x_cal)) dist_dict = {} ind_dict = {} max_neighbours = np.array(self.prec_graph.knnQueryBatch(self.x_cal, k=int(max_nbrs), num_threads=num_threads_available)) max_dist = max_neighbours[(x_cal_len_array, 1)] max_ind = max_neighbours[(x_cal_len_array, 0)] for k in num_nbrs: dist = max_dist[(x_cal_len_array, 0:int(k))] ind = max_ind[(x_cal_len_array, 0:int(k))] dist_dict[k] = dist ind_dict[k] = ind return (dist_dict, ind_dict)
def _prec_neighbours(self) -> Tuple[(Dict[(int, np.ndarray)], Dict[(int, np.ndarray)])]: '\n Pre-compute the nearest neighbours and their distances.\n\n :return:\n ' min_nbrs = self.param_range[2][0] max_nbrs = self.param_range[2][1] num_nbrs = np.arange(min_nbrs, (max_nbrs + 0.1), 1) x_cal_len_array = np.arange(len(self.x_cal)) dist_dict = {} ind_dict = {} max_neighbours = np.array(self.prec_graph.knnQueryBatch(self.x_cal, k=int(max_nbrs), num_threads=num_threads_available)) max_dist = max_neighbours[(x_cal_len_array, 1)] max_ind = max_neighbours[(x_cal_len_array, 0)] for k in num_nbrs: dist = max_dist[(x_cal_len_array, 0:int(k))] ind = max_ind[(x_cal_len_array, 0:int(k))] dist_dict[k] = dist ind_dict[k] = ind return (dist_dict, ind_dict)<|docstring|>Pre-compute the nearest neighbours and their distances. :return:<|endoftext|>
81cfdf55047817d268e8042bbdbfe8e2f86fe5d826fdcfe8af3f24487103af23
def set_macest_model_params(self) -> MacestPredIntervalModelParams: '\n Return MACEst parameter values.\n\n :return:\n ' params = MacestPredIntervalModelParams(num_neighbours=self.model._num_neighbours, alpha=self.model._alpha, beta=self.model._beta) self.model.macest_model_params = params return params
Return MACEst parameter values. :return:
src/macest/regression/models.py
set_macest_model_params
LaudateCorpus1/macest
88
python
def set_macest_model_params(self) -> MacestPredIntervalModelParams: '\n Return MACEst parameter values.\n\n :return:\n ' params = MacestPredIntervalModelParams(num_neighbours=self.model._num_neighbours, alpha=self.model._alpha, beta=self.model._beta) self.model.macest_model_params = params return params
def set_macest_model_params(self) -> MacestPredIntervalModelParams: '\n Return MACEst parameter values.\n\n :return:\n ' params = MacestPredIntervalModelParams(num_neighbours=self.model._num_neighbours, alpha=self.model._alpha, beta=self.model._beta) self.model.macest_model_params = params return params<|docstring|>Return MACEst parameter values. :return:<|endoftext|>
590103797f63711407248ae42b19ec9ec29fb407a612b8c69937a4e106e23f5a
def loss_func(self, params: MacestPredIntervalModelParams) -> float: '\n Calculate the loss for a given set of parameters, this will then be optimised when fit is called.\n\n :param params: A tuple containing the model hyper-paramters\n :return:\n ' (self.model._alpha, self.model._beta, self.model._num_neighbours) = params self.model._num_neighbours = int(np.round(self.model._num_neighbours)) self.model.prec_graph = self.prec_graph self.model._distance_to_nn = self.prec_dist[self.model._num_neighbours] self.model._ind_of_nn = self.prec_ind[self.model._num_neighbours] return picp_loss(self.model, self.x_cal, self.y_cal)
Calculate the loss for a given set of parameters, this will then be optimised when fit is called. :param params: A tuple containing the model hyper-paramters :return:
src/macest/regression/models.py
loss_func
LaudateCorpus1/macest
88
python
def loss_func(self, params: MacestPredIntervalModelParams) -> float: '\n Calculate the loss for a given set of parameters, this will then be optimised when fit is called.\n\n :param params: A tuple containing the model hyper-paramters\n :return:\n ' (self.model._alpha, self.model._beta, self.model._num_neighbours) = params self.model._num_neighbours = int(np.round(self.model._num_neighbours)) self.model.prec_graph = self.prec_graph self.model._distance_to_nn = self.prec_dist[self.model._num_neighbours] self.model._ind_of_nn = self.prec_ind[self.model._num_neighbours] return picp_loss(self.model, self.x_cal, self.y_cal)
def loss_func(self, params: MacestPredIntervalModelParams) -> float: '\n Calculate the loss for a given set of parameters, this will then be optimised when fit is called.\n\n :param params: A tuple containing the model hyper-paramters\n :return:\n ' (self.model._alpha, self.model._beta, self.model._num_neighbours) = params self.model._num_neighbours = int(np.round(self.model._num_neighbours)) self.model.prec_graph = self.prec_graph self.model._distance_to_nn = self.prec_dist[self.model._num_neighbours] self.model._ind_of_nn = self.prec_ind[self.model._num_neighbours] return picp_loss(self.model, self.x_cal, self.y_cal)<|docstring|>Calculate the loss for a given set of parameters, this will then be optimised when fit is called. :param params: A tuple containing the model hyper-paramters :return:<|endoftext|>
cb93c6be87f02f1c7fbfdb431199189420e4b7c90c249ab975ee6bc0de4c73c8
def fit(self, optimiser: Literal['de']='de', optimiser_args: Optional[Dict[(Any, Any)]]=None) -> ModelWithPredictionInterval: '\n Fit MACEst parameters.\n\n :param optimiser: The optimisation method\n :param optimiser_args: Any arguments for the optimisation strategy\n :return: A ModelWithConfidence object with the hyper-parameters that minimises the loss function\n ' if (optimiser == 'de'): result = differential_evolution(self.loss_func, self.param_range, **optimiser_args) else: raise ValueError('The only optimisation method currently implemented is differential evolution') log.info(f'min_loss = {result.fun}') (alpha, beta, k) = result.x k = int(np.round(k, 0)) log.info(f' best_alpha: {alpha}') log.info(f' best_beta: {beta}') log.info(f' best_k: {k}') self.model._alpha = alpha self.model._beta = beta self.model._num_neighbours = int(np.round(k)) self.model.macest_model_params = self.set_macest_model_params() self.model._distance_to_nn = None self.model._ind_of_nn = None self.model.point_preds = None return self.model
Fit MACEst parameters. :param optimiser: The optimisation method :param optimiser_args: Any arguments for the optimisation strategy :return: A ModelWithConfidence object with the hyper-parameters that minimises the loss function
src/macest/regression/models.py
fit
LaudateCorpus1/macest
88
python
def fit(self, optimiser: Literal['de']='de', optimiser_args: Optional[Dict[(Any, Any)]]=None) -> ModelWithPredictionInterval: '\n Fit MACEst parameters.\n\n :param optimiser: The optimisation method\n :param optimiser_args: Any arguments for the optimisation strategy\n :return: A ModelWithConfidence object with the hyper-parameters that minimises the loss function\n ' if (optimiser == 'de'): result = differential_evolution(self.loss_func, self.param_range, **optimiser_args) else: raise ValueError('The only optimisation method currently implemented is differential evolution') log.info(f'min_loss = {result.fun}') (alpha, beta, k) = result.x k = int(np.round(k, 0)) log.info(f' best_alpha: {alpha}') log.info(f' best_beta: {beta}') log.info(f' best_k: {k}') self.model._alpha = alpha self.model._beta = beta self.model._num_neighbours = int(np.round(k)) self.model.macest_model_params = self.set_macest_model_params() self.model._distance_to_nn = None self.model._ind_of_nn = None self.model.point_preds = None return self.model
def fit(self, optimiser: Literal['de']='de', optimiser_args: Optional[Dict[(Any, Any)]]=None) -> ModelWithPredictionInterval: '\n Fit MACEst parameters.\n\n :param optimiser: The optimisation method\n :param optimiser_args: Any arguments for the optimisation strategy\n :return: A ModelWithConfidence object with the hyper-parameters that minimises the loss function\n ' if (optimiser == 'de'): result = differential_evolution(self.loss_func, self.param_range, **optimiser_args) else: raise ValueError('The only optimisation method currently implemented is differential evolution') log.info(f'min_loss = {result.fun}') (alpha, beta, k) = result.x k = int(np.round(k, 0)) log.info(f' best_alpha: {alpha}') log.info(f' best_beta: {beta}') log.info(f' best_k: {k}') self.model._alpha = alpha self.model._beta = beta self.model._num_neighbours = int(np.round(k)) self.model.macest_model_params = self.set_macest_model_params() self.model._distance_to_nn = None self.model._ind_of_nn = None self.model.point_preds = None return self.model<|docstring|>Fit MACEst parameters. :param optimiser: The optimisation method :param optimiser_args: Any arguments for the optimisation strategy :return: A ModelWithConfidence object with the hyper-parameters that minimises the loss function<|endoftext|>
0f3ac0f2c187ee26fca408270948cc67ed2eddbd2d2d2bb480de2a69cab41d61
@staticmethod def getInstance(path_word2vec): ' Static access method. ' if (Singleton.__instance == None): Singleton(path_word2vec) return Singleton.__instance
Static access method.
acr_module/acr/acr_module_service.py
getInstance
13520505/bigdataproj
0
python
@staticmethod def getInstance(path_word2vec): ' ' if (Singleton.__instance == None): Singleton(path_word2vec) return Singleton.__instance
@staticmethod def getInstance(path_word2vec): ' ' if (Singleton.__instance == None): Singleton(path_word2vec) return Singleton.__instance<|docstring|>Static access method.<|endoftext|>
9f17bbb432163ce5cd4c1c54cb0d970687ecd30d9aa3625d6de84b6a7ee1646d
def __init__(self, path_word2vec): ' Virtually private constructor. ' if (Singleton.__instance != None): raise Exception('This class is a singleton!') else: print('Load model word2vec') self = load_word_embeddings_vietnamese(path_word2vec, binary=False) Singleton.__instance = self
Virtually private constructor.
acr_module/acr/acr_module_service.py
__init__
13520505/bigdataproj
0
python
def __init__(self, path_word2vec): ' ' if (Singleton.__instance != None): raise Exception('This class is a singleton!') else: print('Load model word2vec') self = load_word_embeddings_vietnamese(path_word2vec, binary=False) Singleton.__instance = self
def __init__(self, path_word2vec): ' ' if (Singleton.__instance != None): raise Exception('This class is a singleton!') else: print('Load model word2vec') self = load_word_embeddings_vietnamese(path_word2vec, binary=False) Singleton.__instance = self<|docstring|>Virtually private constructor.<|endoftext|>
10b6e1c299d1d8e7a2d1395a04b72f3c66c66e17646e31944ad38897c914da1e
def __init__(self, nid: dtfcore.NodeId, prediction_col: str, volatility_col: str, portfolio: omportfo.AbstractPortfolio, process_forecasts_config: Dict[(str, Any)]) -> None: '\n Parameters have the same meaning as in `process_forecasts()`.\n ' super().__init__(nid) self._prediction_col = prediction_col self._volatility_col = volatility_col self._portfolio = portfolio self._process_forecasts_config = process_forecasts_config
Parameters have the same meaning as in `process_forecasts()`.
dataflow/system/sink_nodes.py
__init__
alphamatic/amp
5
python
def __init__(self, nid: dtfcore.NodeId, prediction_col: str, volatility_col: str, portfolio: omportfo.AbstractPortfolio, process_forecasts_config: Dict[(str, Any)]) -> None: '\n \n ' super().__init__(nid) self._prediction_col = prediction_col self._volatility_col = volatility_col self._portfolio = portfolio self._process_forecasts_config = process_forecasts_config
def __init__(self, nid: dtfcore.NodeId, prediction_col: str, volatility_col: str, portfolio: omportfo.AbstractPortfolio, process_forecasts_config: Dict[(str, Any)]) -> None: '\n \n ' super().__init__(nid) self._prediction_col = prediction_col self._volatility_col = volatility_col self._portfolio = portfolio self._process_forecasts_config = process_forecasts_config<|docstring|>Parameters have the same meaning as in `process_forecasts()`.<|endoftext|>
3be47214399f080e693553046a94c38d734624d1860cc4789a6c5b191ed18640
def __init__(self, writer=None, trace=False, echo=False): 'stdout must be an object implementing a write() method' super(Logger, self).__init__() if (writer == Logger.BUFFER): self.writer = Buffer() else: self.writer = (writer or sys.__stdout__) self.trace = trace self._echo = echo self.echo = (echo and (self.writer is not sys.__stdout__)) self.buffer = BinaryWriter(self)
stdout must be an object implementing a write() method
yue/core/logger.py
__init__
nsetzer/YueMusicPlayer
0
python
def __init__(self, writer=None, trace=False, echo=False): super(Logger, self).__init__() if (writer == Logger.BUFFER): self.writer = Buffer() else: self.writer = (writer or sys.__stdout__) self.trace = trace self._echo = echo self.echo = (echo and (self.writer is not sys.__stdout__)) self.buffer = BinaryWriter(self)
def __init__(self, writer=None, trace=False, echo=False): super(Logger, self).__init__() if (writer == Logger.BUFFER): self.writer = Buffer() else: self.writer = (writer or sys.__stdout__) self.trace = trace self._echo = echo self.echo = (echo and (self.writer is not sys.__stdout__)) self.buffer = BinaryWriter(self)<|docstring|>stdout must be an object implementing a write() method<|endoftext|>
6a320d0ece7f505a02922b98bf0678924426ece917b8ea5ac8b7b6b4ed764f34
def setUp(self): 'Set up Camera class with fixtures' super(TestCamera, self).setUp() self.gen1_fixture = json.loads(self.fixtures['accessories'])[0] self.test_camera = Camera(self.logi, self.gen1_fixture) self.logi.auth_provider = self.get_authorized_auth_provider()
Set up Camera class with fixtures
tests/test_camera.py
setUp
evanjd/python-logi-circle
22
python
def setUp(self): super(TestCamera, self).setUp() self.gen1_fixture = json.loads(self.fixtures['accessories'])[0] self.test_camera = Camera(self.logi, self.gen1_fixture) self.logi.auth_provider = self.get_authorized_auth_provider()
def setUp(self): super(TestCamera, self).setUp() self.gen1_fixture = json.loads(self.fixtures['accessories'])[0] self.test_camera = Camera(self.logi, self.gen1_fixture) self.logi.auth_provider = self.get_authorized_auth_provider()<|docstring|>Set up Camera class with fixtures<|endoftext|>
fba20ee1808a9381482d310152fae48681e56b2c053c92fb2b14ad4d731bce86
def tearDown(self): 'Remove test Camera instance' super(TestCamera, self).tearDown() del self.gen1_fixture del self.test_camera
Remove test Camera instance
tests/test_camera.py
tearDown
evanjd/python-logi-circle
22
python
def tearDown(self): super(TestCamera, self).tearDown() del self.gen1_fixture del self.test_camera
def tearDown(self): super(TestCamera, self).tearDown() del self.gen1_fixture del self.test_camera<|docstring|>Remove test Camera instance<|endoftext|>
d2cc483d6a3e1cb648b48882c009fe5cfa1e85850d63727d3d8d6a0ea9488bb0
def test_camera_props(self): 'Camera props should match fixtures' gen1_fixture = self.gen1_fixture self.assertEqual(self.test_camera.id, gen1_fixture['accessoryId']) self.assertEqual(self.test_camera.name, gen1_fixture['name']) self.assertEqual(self.test_camera.mac_address, gen1_fixture['mac']) gen1_fixture['cfg'] = gen1_fixture['configuration'] self.assertEqual(self.test_camera.model, gen1_fixture['modelNumber']) self.assertEqual(self.test_camera.mount, GEN_1_MOUNT) self.assertEqual(self.test_camera.connected, gen1_fixture['isConnected']) self.assertEqual(self.test_camera.streaming, gen1_fixture['cfg']['streamingEnabled']) self.assertEqual(self.test_camera.timezone, gen1_fixture['cfg']['timeZone']) self.assertEqual(self.test_camera.battery_level, gen1_fixture['cfg']['batteryLevel']) self.assertEqual(self.test_camera.charging, gen1_fixture['cfg']['batteryCharging']) self.assertEqual(self.test_camera.battery_saving, gen1_fixture['cfg']['saveBattery']) self.assertEqual(self.test_camera.signal_strength_percentage, gen1_fixture['cfg']['wifiSignalStrength']) self.assertEqual(self.test_camera.firmware, gen1_fixture['cfg']['firmwareVersion']) self.assertEqual(self.test_camera.microphone, gen1_fixture['cfg']['microphoneOn']) self.assertEqual(self.test_camera.microphone_gain, gen1_fixture['cfg']['microphoneGain']) self.assertEqual(self.test_camera.speaker, gen1_fixture['cfg']['speakerOn']) self.assertEqual(self.test_camera.speaker_volume, gen1_fixture['cfg']['speakerVolume']) self.assertEqual(self.test_camera.led, gen1_fixture['cfg']['ledEnabled']) self.assertEqual(self.test_camera.recording, (not gen1_fixture['cfg']['privacyMode']))
Camera props should match fixtures
tests/test_camera.py
test_camera_props
evanjd/python-logi-circle
22
python
def test_camera_props(self): gen1_fixture = self.gen1_fixture self.assertEqual(self.test_camera.id, gen1_fixture['accessoryId']) self.assertEqual(self.test_camera.name, gen1_fixture['name']) self.assertEqual(self.test_camera.mac_address, gen1_fixture['mac']) gen1_fixture['cfg'] = gen1_fixture['configuration'] self.assertEqual(self.test_camera.model, gen1_fixture['modelNumber']) self.assertEqual(self.test_camera.mount, GEN_1_MOUNT) self.assertEqual(self.test_camera.connected, gen1_fixture['isConnected']) self.assertEqual(self.test_camera.streaming, gen1_fixture['cfg']['streamingEnabled']) self.assertEqual(self.test_camera.timezone, gen1_fixture['cfg']['timeZone']) self.assertEqual(self.test_camera.battery_level, gen1_fixture['cfg']['batteryLevel']) self.assertEqual(self.test_camera.charging, gen1_fixture['cfg']['batteryCharging']) self.assertEqual(self.test_camera.battery_saving, gen1_fixture['cfg']['saveBattery']) self.assertEqual(self.test_camera.signal_strength_percentage, gen1_fixture['cfg']['wifiSignalStrength']) self.assertEqual(self.test_camera.firmware, gen1_fixture['cfg']['firmwareVersion']) self.assertEqual(self.test_camera.microphone, gen1_fixture['cfg']['microphoneOn']) self.assertEqual(self.test_camera.microphone_gain, gen1_fixture['cfg']['microphoneGain']) self.assertEqual(self.test_camera.speaker, gen1_fixture['cfg']['speakerOn']) self.assertEqual(self.test_camera.speaker_volume, gen1_fixture['cfg']['speakerVolume']) self.assertEqual(self.test_camera.led, gen1_fixture['cfg']['ledEnabled']) self.assertEqual(self.test_camera.recording, (not gen1_fixture['cfg']['privacyMode']))
def test_camera_props(self): gen1_fixture = self.gen1_fixture self.assertEqual(self.test_camera.id, gen1_fixture['accessoryId']) self.assertEqual(self.test_camera.name, gen1_fixture['name']) self.assertEqual(self.test_camera.mac_address, gen1_fixture['mac']) gen1_fixture['cfg'] = gen1_fixture['configuration'] self.assertEqual(self.test_camera.model, gen1_fixture['modelNumber']) self.assertEqual(self.test_camera.mount, GEN_1_MOUNT) self.assertEqual(self.test_camera.connected, gen1_fixture['isConnected']) self.assertEqual(self.test_camera.streaming, gen1_fixture['cfg']['streamingEnabled']) self.assertEqual(self.test_camera.timezone, gen1_fixture['cfg']['timeZone']) self.assertEqual(self.test_camera.battery_level, gen1_fixture['cfg']['batteryLevel']) self.assertEqual(self.test_camera.charging, gen1_fixture['cfg']['batteryCharging']) self.assertEqual(self.test_camera.battery_saving, gen1_fixture['cfg']['saveBattery']) self.assertEqual(self.test_camera.signal_strength_percentage, gen1_fixture['cfg']['wifiSignalStrength']) self.assertEqual(self.test_camera.firmware, gen1_fixture['cfg']['firmwareVersion']) self.assertEqual(self.test_camera.microphone, gen1_fixture['cfg']['microphoneOn']) self.assertEqual(self.test_camera.microphone_gain, gen1_fixture['cfg']['microphoneGain']) self.assertEqual(self.test_camera.speaker, gen1_fixture['cfg']['speakerOn']) self.assertEqual(self.test_camera.speaker_volume, gen1_fixture['cfg']['speakerVolume']) self.assertEqual(self.test_camera.led, gen1_fixture['cfg']['ledEnabled']) self.assertEqual(self.test_camera.recording, (not gen1_fixture['cfg']['privacyMode']))<|docstring|>Camera props should match fixtures<|endoftext|>
09ce8d4d7a1cea7728e372daf59f0c81f42d1bf73a953117f94ad36cccfd53b4
def test_missing_mandatory_props(self): 'Camera should raise if mandatory props missing' incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'configuration': {'stuff': '123'}} with self.assertRaises(KeyError): Camera(self.logi, incomplete_camera)
Camera should raise if mandatory props missing
tests/test_camera.py
test_missing_mandatory_props
evanjd/python-logi-circle
22
python
def test_missing_mandatory_props(self): incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'configuration': {'stuff': '123'}} with self.assertRaises(KeyError): Camera(self.logi, incomplete_camera)
def test_missing_mandatory_props(self): incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'configuration': {'stuff': '123'}} with self.assertRaises(KeyError): Camera(self.logi, incomplete_camera)<|docstring|>Camera should raise if mandatory props missing<|endoftext|>
b95f847bd198b6101aa0e6105e2512a062b1e67884c77491cba02fb243837224
def test_missing_optional_props(self): 'Camera should not raise if optional props missing' incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'mac': 'ABC', 'configuration': {'modelNumber': '1234', 'batteryLevel': 1}, 'isConnected': False} camera = Camera(self.logi, incomplete_camera) self.assertEqual(camera.name, 'Incomplete cam') self.assertEqual(camera.id, '123') self.assertEqual(camera.mac_address, 'ABC') self.assertIsNone(camera.charging) self.assertIsNone(camera.battery_saving) self.assertIsNone(camera.signal_strength_percentage) self.assertIsNone(camera.signal_strength_category) self.assertIsNone(camera.firmware) self.assertIsNone(camera.microphone_gain) self.assertIsNone(camera.speaker_volume) self.assertFalse(camera.streaming) self.assertFalse(camera.microphone) self.assertFalse(camera.speaker) self.assertFalse(camera.led) self.assertTrue(camera.recording) self.assertEqual(camera.timezone, 'UTC') self.assertEqual(camera.mount, MOUNT_UNKNOWN)
Camera should not raise if optional props missing
tests/test_camera.py
test_missing_optional_props
evanjd/python-logi-circle
22
python
def test_missing_optional_props(self): incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'mac': 'ABC', 'configuration': {'modelNumber': '1234', 'batteryLevel': 1}, 'isConnected': False} camera = Camera(self.logi, incomplete_camera) self.assertEqual(camera.name, 'Incomplete cam') self.assertEqual(camera.id, '123') self.assertEqual(camera.mac_address, 'ABC') self.assertIsNone(camera.charging) self.assertIsNone(camera.battery_saving) self.assertIsNone(camera.signal_strength_percentage) self.assertIsNone(camera.signal_strength_category) self.assertIsNone(camera.firmware) self.assertIsNone(camera.microphone_gain) self.assertIsNone(camera.speaker_volume) self.assertFalse(camera.streaming) self.assertFalse(camera.microphone) self.assertFalse(camera.speaker) self.assertFalse(camera.led) self.assertTrue(camera.recording) self.assertEqual(camera.timezone, 'UTC') self.assertEqual(camera.mount, MOUNT_UNKNOWN)
def test_missing_optional_props(self): incomplete_camera = {'name': 'Incomplete cam', 'accessoryId': '123', 'mac': 'ABC', 'configuration': {'modelNumber': '1234', 'batteryLevel': 1}, 'isConnected': False} camera = Camera(self.logi, incomplete_camera) self.assertEqual(camera.name, 'Incomplete cam') self.assertEqual(camera.id, '123') self.assertEqual(camera.mac_address, 'ABC') self.assertIsNone(camera.charging) self.assertIsNone(camera.battery_saving) self.assertIsNone(camera.signal_strength_percentage) self.assertIsNone(camera.signal_strength_category) self.assertIsNone(camera.firmware) self.assertIsNone(camera.microphone_gain) self.assertIsNone(camera.speaker_volume) self.assertFalse(camera.streaming) self.assertFalse(camera.microphone) self.assertFalse(camera.speaker) self.assertFalse(camera.led) self.assertTrue(camera.recording) self.assertEqual(camera.timezone, 'UTC') self.assertEqual(camera.mount, MOUNT_UNKNOWN)<|docstring|>Camera should not raise if optional props missing<|endoftext|>
ef2679057cef6ed4b7eace57cd06de398fa370077045584a62469e6a389bc75b
def test_camera_mount_prop(self): 'Test mount property correctly infers type from other props' gen2_wired_fixture = json.loads(self.fixtures['accessories'])[1] gen2_wirefree_fixture = json.loads(self.fixtures['accessories'])[2] gen1_camera = Camera(self.logi, self.gen1_fixture) gen2_wired_camera = Camera(self.logi, gen2_wired_fixture) gen2_wirefree_camera = Camera(self.logi, gen2_wirefree_fixture) self.assertEqual(gen1_camera.mount, GEN_1_MOUNT) self.assertEqual(gen1_camera.model, GEN_1_MODEL) self.assertEqual(gen2_wired_camera.mount, GEN_2_MOUNT_WIRE) self.assertEqual(gen2_wired_camera.battery_level, (- 1)) self.assertEqual(gen2_wired_camera.model, GEN_2_MODEL) self.assertEqual(gen2_wirefree_camera.mount, GEN_2_MOUNT_WIREFREE) self.assertNotEqual(gen2_wirefree_camera.battery_level, (- 1)) self.assertEqual(gen2_wirefree_camera.model, GEN_2_MODEL)
Test mount property correctly infers type from other props
tests/test_camera.py
test_camera_mount_prop
evanjd/python-logi-circle
22
python
def test_camera_mount_prop(self): gen2_wired_fixture = json.loads(self.fixtures['accessories'])[1] gen2_wirefree_fixture = json.loads(self.fixtures['accessories'])[2] gen1_camera = Camera(self.logi, self.gen1_fixture) gen2_wired_camera = Camera(self.logi, gen2_wired_fixture) gen2_wirefree_camera = Camera(self.logi, gen2_wirefree_fixture) self.assertEqual(gen1_camera.mount, GEN_1_MOUNT) self.assertEqual(gen1_camera.model, GEN_1_MODEL) self.assertEqual(gen2_wired_camera.mount, GEN_2_MOUNT_WIRE) self.assertEqual(gen2_wired_camera.battery_level, (- 1)) self.assertEqual(gen2_wired_camera.model, GEN_2_MODEL) self.assertEqual(gen2_wirefree_camera.mount, GEN_2_MOUNT_WIREFREE) self.assertNotEqual(gen2_wirefree_camera.battery_level, (- 1)) self.assertEqual(gen2_wirefree_camera.model, GEN_2_MODEL)
def test_camera_mount_prop(self): gen2_wired_fixture = json.loads(self.fixtures['accessories'])[1] gen2_wirefree_fixture = json.loads(self.fixtures['accessories'])[2] gen1_camera = Camera(self.logi, self.gen1_fixture) gen2_wired_camera = Camera(self.logi, gen2_wired_fixture) gen2_wirefree_camera = Camera(self.logi, gen2_wirefree_fixture) self.assertEqual(gen1_camera.mount, GEN_1_MOUNT) self.assertEqual(gen1_camera.model, GEN_1_MODEL) self.assertEqual(gen2_wired_camera.mount, GEN_2_MOUNT_WIRE) self.assertEqual(gen2_wired_camera.battery_level, (- 1)) self.assertEqual(gen2_wired_camera.model, GEN_2_MODEL) self.assertEqual(gen2_wirefree_camera.mount, GEN_2_MOUNT_WIREFREE) self.assertNotEqual(gen2_wirefree_camera.battery_level, (- 1)) self.assertEqual(gen2_wirefree_camera.model, GEN_2_MODEL)<|docstring|>Test mount property correctly infers type from other props<|endoftext|>
f89f2c2dd40ec9d003dc493d9993258e6a59c18374b78949e0a46bc2ce5bdc24
def test_signal_strength_categories(self): 'Test friendly signal strength categorisation' self.test_camera._attrs['signal_strength_percentage'] = 99 self.assertEqual(self.test_camera.signal_strength_category, 'Excellent') self.test_camera._attrs['signal_strength_percentage'] = 79 self.assertEqual(self.test_camera.signal_strength_category, 'Good') self.test_camera._attrs['signal_strength_percentage'] = 59 self.assertEqual(self.test_camera.signal_strength_category, 'Fair') self.test_camera._attrs['signal_strength_percentage'] = 39 self.assertEqual(self.test_camera.signal_strength_category, 'Poor') self.test_camera._attrs['signal_strength_percentage'] = 19 self.assertEqual(self.test_camera.signal_strength_category, 'Bad') self.test_camera._attrs['signal_strength_percentage'] = None self.assertIsNone(self.test_camera.signal_strength_category)
Test friendly signal strength categorisation
tests/test_camera.py
test_signal_strength_categories
evanjd/python-logi-circle
22
python
def test_signal_strength_categories(self): self.test_camera._attrs['signal_strength_percentage'] = 99 self.assertEqual(self.test_camera.signal_strength_category, 'Excellent') self.test_camera._attrs['signal_strength_percentage'] = 79 self.assertEqual(self.test_camera.signal_strength_category, 'Good') self.test_camera._attrs['signal_strength_percentage'] = 59 self.assertEqual(self.test_camera.signal_strength_category, 'Fair') self.test_camera._attrs['signal_strength_percentage'] = 39 self.assertEqual(self.test_camera.signal_strength_category, 'Poor') self.test_camera._attrs['signal_strength_percentage'] = 19 self.assertEqual(self.test_camera.signal_strength_category, 'Bad') self.test_camera._attrs['signal_strength_percentage'] = None self.assertIsNone(self.test_camera.signal_strength_category)
def test_signal_strength_categories(self): self.test_camera._attrs['signal_strength_percentage'] = 99 self.assertEqual(self.test_camera.signal_strength_category, 'Excellent') self.test_camera._attrs['signal_strength_percentage'] = 79 self.assertEqual(self.test_camera.signal_strength_category, 'Good') self.test_camera._attrs['signal_strength_percentage'] = 59 self.assertEqual(self.test_camera.signal_strength_category, 'Fair') self.test_camera._attrs['signal_strength_percentage'] = 39 self.assertEqual(self.test_camera.signal_strength_category, 'Poor') self.test_camera._attrs['signal_strength_percentage'] = 19 self.assertEqual(self.test_camera.signal_strength_category, 'Bad') self.test_camera._attrs['signal_strength_percentage'] = None self.assertIsNone(self.test_camera.signal_strength_category)<|docstring|>Test friendly signal strength categorisation<|endoftext|>
bf319c95e744e9d8d1cea940bb253f759ad54556277b16aca6a8c737156d6040
def test_update(self): 'Test polling for changes in camera properties' endpoint = ('%s/%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'get', aresponses.Response(status=200, text=self.fixtures['accessory'], headers={'content-type': 'application/json'})) self.assertEqual(self.test_camera.battery_level, 100) self.assertEqual(self.test_camera.signal_strength_percentage, 74) (await self.test_camera.update()) self.assertEqual(self.test_camera.battery_level, 99) self.assertEqual(self.test_camera.signal_strength_percentage, 88) self.loop.run_until_complete(run_test())
Test polling for changes in camera properties
tests/test_camera.py
test_update
evanjd/python-logi-circle
22
python
def test_update(self): endpoint = ('%s/%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'get', aresponses.Response(status=200, text=self.fixtures['accessory'], headers={'content-type': 'application/json'})) self.assertEqual(self.test_camera.battery_level, 100) self.assertEqual(self.test_camera.signal_strength_percentage, 74) (await self.test_camera.update()) self.assertEqual(self.test_camera.battery_level, 99) self.assertEqual(self.test_camera.signal_strength_percentage, 88) self.loop.run_until_complete(run_test())
def test_update(self): endpoint = ('%s/%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'get', aresponses.Response(status=200, text=self.fixtures['accessory'], headers={'content-type': 'application/json'})) self.assertEqual(self.test_camera.battery_level, 100) self.assertEqual(self.test_camera.signal_strength_percentage, 74) (await self.test_camera.update()) self.assertEqual(self.test_camera.battery_level, 99) self.assertEqual(self.test_camera.signal_strength_percentage, 88) self.loop.run_until_complete(run_test())<|docstring|>Test polling for changes in camera properties<|endoftext|>
01eb0edc715e9b94a9b5c6c4ae739f37096728cf4299a010eafb264133f54814
def test_set_config_valid(self): 'Test updating configuration for camera' endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) self.assertEqual(self.test_camera.streaming, True) (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, False) self.assertEqual(self.test_camera.recording, True) (await self.test_camera.set_config('recording_disabled', True)) self.assertEqual(self.test_camera.recording, False) (await self.test_camera.set_config('recording_disabled', False)) self.assertEqual(self.test_camera.recording, True) self.loop.run_until_complete(run_test())
Test updating configuration for camera
tests/test_camera.py
test_set_config_valid
evanjd/python-logi-circle
22
python
def test_set_config_valid(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) self.assertEqual(self.test_camera.streaming, True) (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, False) self.assertEqual(self.test_camera.recording, True) (await self.test_camera.set_config('recording_disabled', True)) self.assertEqual(self.test_camera.recording, False) (await self.test_camera.set_config('recording_disabled', False)) self.assertEqual(self.test_camera.recording, True) self.loop.run_until_complete(run_test())
def test_set_config_valid(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=200)) self.assertEqual(self.test_camera.streaming, True) (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, False) self.assertEqual(self.test_camera.recording, True) (await self.test_camera.set_config('recording_disabled', True)) self.assertEqual(self.test_camera.recording, False) (await self.test_camera.set_config('recording_disabled', False)) self.assertEqual(self.test_camera.recording, True) self.loop.run_until_complete(run_test())<|docstring|>Test updating configuration for camera<|endoftext|>
ef3ea98d92d3a044f02d48f854689e3182b20252204cdb9d89709ab4c4476845
def test_set_config_error(self): 'Test updating configuration for camera' endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=500)) self.assertEqual(self.test_camera.streaming, True) with self.assertRaises(ClientResponseError): (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, True) self.loop.run_until_complete(run_test())
Test updating configuration for camera
tests/test_camera.py
test_set_config_error
evanjd/python-logi-circle
22
python
def test_set_config_error(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=500)) self.assertEqual(self.test_camera.streaming, True) with self.assertRaises(ClientResponseError): (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, True) self.loop.run_until_complete(run_test())
def test_set_config_error(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, CONFIG_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'put', aresponses.Response(status=500)) self.assertEqual(self.test_camera.streaming, True) with self.assertRaises(ClientResponseError): (await self.test_camera.set_config('streaming', False)) self.assertEqual(self.test_camera.streaming, True) self.loop.run_until_complete(run_test())<|docstring|>Test updating configuration for camera<|endoftext|>
eb1fda87037e0171dc18332ef35971588e345a3d1079a3d703d9f826975ea2f0
def test_set_config_invalid(self): 'Test updating invalid configuration prop for camera' async def run_test(): with self.assertRaises(NameError): (await self.test_camera.set_config('firmware', 'Windows 95')) with self.assertRaises(NameError): (await self.test_camera.set_config('nonsense', 123)) self.loop.run_until_complete(run_test())
Test updating invalid configuration prop for camera
tests/test_camera.py
test_set_config_invalid
evanjd/python-logi-circle
22
python
def test_set_config_invalid(self): async def run_test(): with self.assertRaises(NameError): (await self.test_camera.set_config('firmware', 'Windows 95')) with self.assertRaises(NameError): (await self.test_camera.set_config('nonsense', 123)) self.loop.run_until_complete(run_test())
def test_set_config_invalid(self): async def run_test(): with self.assertRaises(NameError): (await self.test_camera.set_config('firmware', 'Windows 95')) with self.assertRaises(NameError): (await self.test_camera.set_config('nonsense', 123)) self.loop.run_until_complete(run_test())<|docstring|>Test updating invalid configuration prop for camera<|endoftext|>
6a7edc98ed92aed7da78f9a0a92e884398ef0700f5c63b79e2111d4c37d23f7f
def test_get_last_activity(self): 'Test get last activity property' endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) self.assertIsInstance((await self.test_camera.get_last_activity()), Activity) self.loop.run_until_complete(run_test())
Test get last activity property
tests/test_camera.py
test_get_last_activity
evanjd/python-logi-circle
22
python
def test_get_last_activity(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) self.assertIsInstance((await self.test_camera.get_last_activity()), Activity) self.loop.run_until_complete(run_test())
def test_get_last_activity(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) self.assertIsInstance((await self.test_camera.get_last_activity()), Activity) self.loop.run_until_complete(run_test())<|docstring|>Test get last activity property<|endoftext|>
f5148708d8ba92c8e51b6405121a761aaf3c061bc1fefa57268a3366063b462a
def test_no_last_activity(self): 'Test last_activity property when no activities reported from server' endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text='{ "activities" : [] }', headers={'content-type': 'application/json'})) self.assertIsNone((await self.test_camera.get_last_activity())) self.loop.run_until_complete(run_test())
Test last_activity property when no activities reported from server
tests/test_camera.py
test_no_last_activity
evanjd/python-logi-circle
22
python
def test_no_last_activity(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text='{ "activities" : [] }', headers={'content-type': 'application/json'})) self.assertIsNone((await self.test_camera.get_last_activity())) self.loop.run_until_complete(run_test())
def test_no_last_activity(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text='{ "activities" : [] }', headers={'content-type': 'application/json'})) self.assertIsNone((await self.test_camera.get_last_activity())) self.loop.run_until_complete(run_test())<|docstring|>Test last_activity property when no activities reported from server<|endoftext|>
a27b899d0f04e25b2107ac718e600ccaff66be325f7cc4c273e3707c0d973641
def test_query_activity_history(self): 'Test get last activity property' endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) activities = (await self.test_camera.query_activity_history(property_filter='prop_filter', date_filter=datetime.now(), date_operator='>', limit=100)) self.assertIsInstance(activities, list) for activity in activities: self.assertIsInstance(activity, Activity) self.loop.run_until_complete(run_test())
Test get last activity property
tests/test_camera.py
test_query_activity_history
evanjd/python-logi-circle
22
python
def test_query_activity_history(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) activities = (await self.test_camera.query_activity_history(property_filter='prop_filter', date_filter=datetime.now(), date_operator='>', limit=100)) self.assertIsInstance(activities, list) for activity in activities: self.assertIsInstance(activity, Activity) self.loop.run_until_complete(run_test())
def test_query_activity_history(self): endpoint = ('%s/%s%s' % (ACCESSORIES_ENDPOINT, self.test_camera.id, ACTIVITIES_ENDPOINT)) async def run_test(): async with aresponses.ResponsesMockServer(loop=self.loop) as arsps: arsps.add(API_HOST, endpoint, 'post', aresponses.Response(status=200, text=self.fixtures['activities'], headers={'content-type': 'application/json'})) activities = (await self.test_camera.query_activity_history(property_filter='prop_filter', date_filter=datetime.now(), date_operator='>', limit=100)) self.assertIsInstance(activities, list) for activity in activities: self.assertIsInstance(activity, Activity) self.loop.run_until_complete(run_test())<|docstring|>Test get last activity property<|endoftext|>
e69a4605309c26360d46c9af9b3a9343d92ba8b94f6dced05740bc4361af92c8
def test_activity_api_limits(self): 'Test requesting more activities then API permits' async def run_test(): with self.assertRaises(ValueError): (await self.test_camera.query_activity_history(limit=(ACTIVITY_API_LIMIT + 1))) self.loop.run_until_complete(run_test())
Test requesting more activities then API permits
tests/test_camera.py
test_activity_api_limits
evanjd/python-logi-circle
22
python
def test_activity_api_limits(self): async def run_test(): with self.assertRaises(ValueError): (await self.test_camera.query_activity_history(limit=(ACTIVITY_API_LIMIT + 1))) self.loop.run_until_complete(run_test())
def test_activity_api_limits(self): async def run_test(): with self.assertRaises(ValueError): (await self.test_camera.query_activity_history(limit=(ACTIVITY_API_LIMIT + 1))) self.loop.run_until_complete(run_test())<|docstring|>Test requesting more activities then API permits<|endoftext|>
f314b0a75aec52551d1ba087b5b604a85c04bc37492f9ae411a0293c9a4abbc8
def test_activity_reject_bad_type(self): "Test rejection of date filter if it's not a datetime object" async def run_test(): with self.assertRaises(TypeError): (await self.test_camera.query_activity_history(date_filter='2018-01-01')) self.loop.run_until_complete(run_test())
Test rejection of date filter if it's not a datetime object
tests/test_camera.py
test_activity_reject_bad_type
evanjd/python-logi-circle
22
python
def test_activity_reject_bad_type(self): async def run_test(): with self.assertRaises(TypeError): (await self.test_camera.query_activity_history(date_filter='2018-01-01')) self.loop.run_until_complete(run_test())
def test_activity_reject_bad_type(self): async def run_test(): with self.assertRaises(TypeError): (await self.test_camera.query_activity_history(date_filter='2018-01-01')) self.loop.run_until_complete(run_test())<|docstring|>Test rejection of date filter if it's not a datetime object<|endoftext|>
6da9198afed6ba6aaabf2b5618965e97e7759869cde9d846c42d2391dc1887a8
def test_slugify_safe_name(self): 'Returns camera ID if camera name string empty after slugification.' valid_name = 'My camera' invalid_name = '!@#$%^&*()' self.test_camera._attrs['name'] = valid_name self.assertEqual(self.test_camera.slugify_safe_name, valid_name) self.test_camera._attrs['name'] = invalid_name self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = ' ' self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = '' self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id)
Returns camera ID if camera name string empty after slugification.
tests/test_camera.py
test_slugify_safe_name
evanjd/python-logi-circle
22
python
def test_slugify_safe_name(self): valid_name = 'My camera' invalid_name = '!@#$%^&*()' self.test_camera._attrs['name'] = valid_name self.assertEqual(self.test_camera.slugify_safe_name, valid_name) self.test_camera._attrs['name'] = invalid_name self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = ' ' self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id)
def test_slugify_safe_name(self): valid_name = 'My camera' invalid_name = '!@#$%^&*()' self.test_camera._attrs['name'] = valid_name self.assertEqual(self.test_camera.slugify_safe_name, valid_name) self.test_camera._attrs['name'] = invalid_name self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = ' ' self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id) self.test_camera._attrs['name'] = self.assertEqual(self.test_camera.slugify_safe_name, self.test_camera.id)<|docstring|>Returns camera ID if camera name string empty after slugification.<|endoftext|>
87e02738f0f682bd4adebeac4b1cb2cbcbfa97429b77b877b97abab7fbe2d2e9
@property def depth(self): '\n Or statements can only be evaluated together, an so depth is minimum\n depth of shared prefix.\n ' common_prefix = ''.join((c[0] for c in itertools.takewhile((lambda x: all(((x[0] == y) for y in x))), zip(*[op.field for op in self.operands if isinstance(op, Constraint)])))) return min([(len(common_prefix.split('/')) - 1), min((op.depth for op in self.operands))])
Or statements can only be evaluated together, an so depth is minimum depth of shared prefix.
mensor/constraints.py
depth
airbnb/mensor
48
python
@property def depth(self): '\n Or statements can only be evaluated together, an so depth is minimum\n depth of shared prefix.\n ' common_prefix = .join((c[0] for c in itertools.takewhile((lambda x: all(((x[0] == y) for y in x))), zip(*[op.field for op in self.operands if isinstance(op, Constraint)])))) return min([(len(common_prefix.split('/')) - 1), min((op.depth for op in self.operands))])
@property def depth(self): '\n Or statements can only be evaluated together, an so depth is minimum\n depth of shared prefix.\n ' common_prefix = .join((c[0] for c in itertools.takewhile((lambda x: all(((x[0] == y) for y in x))), zip(*[op.field for op in self.operands if isinstance(op, Constraint)])))) return min([(len(common_prefix.split('/')) - 1), min((op.depth for op in self.operands))])<|docstring|>Or statements can only be evaluated together, an so depth is minimum depth of shared prefix.<|endoftext|>
e6e3d5e73fd56db3ba04ed21d147f2c4a9fa62a0d59243da16edd8154e9bf4f1
def setUp(self): '\n Set up method to run before each test case\n '
Set up method to run before each test case
tests/test_article.py
setUp
markmurimi/news-high
0
python
def setUp(self): '\n \n '
def setUp(self): '\n \n '<|docstring|>Set up method to run before each test case<|endoftext|>
75c51193d62879a7aafdd861e4a9f3f72409541b49a5adef5963f51e221e7b5c
def test_instance(self): '\n This test is to check \n ' self.assertTrue(isinstance(self.new_article, NewsArticles))
This test is to check
tests/test_article.py
test_instance
markmurimi/news-high
0
python
def test_instance(self): '\n \n ' self.assertTrue(isinstance(self.new_article, NewsArticles))
def test_instance(self): '\n \n ' self.assertTrue(isinstance(self.new_article, NewsArticles))<|docstring|>This test is to check<|endoftext|>
cd957f19c035f5791b0dc7a03757ef3cd4c4ff153dfe06633bc345a23db8e027
def test_init(self): '\n Test case to check if the Article class is initialised\n '
Test case to check if the Article class is initialised
tests/test_article.py
test_init
markmurimi/news-high
0
python
def test_init(self): '\n \n '
def test_init(self): '\n \n '<|docstring|>Test case to check if the Article class is initialised<|endoftext|>
62072611dfca22c6169d14313c38d4fb8c30034cfe7db3f265df43064a6c6533
def keras2tf(h5_path, pd_save_path): '\n Convert a Keras model to a Tensorflow model.\n :param h5_path: Path to Keras model (.h5)\n :param pd_save_path: Path to save the Tensorflow model\n ' model = tf.keras.models.load_model(h5_path) model.save(pd_save_path)
Convert a Keras model to a Tensorflow model. :param h5_path: Path to Keras model (.h5) :param pd_save_path: Path to save the Tensorflow model
jetson_nano/converter.py
keras2tf
t9s9/BeeMeter
0
python
def keras2tf(h5_path, pd_save_path): '\n Convert a Keras model to a Tensorflow model.\n :param h5_path: Path to Keras model (.h5)\n :param pd_save_path: Path to save the Tensorflow model\n ' model = tf.keras.models.load_model(h5_path) model.save(pd_save_path)
def keras2tf(h5_path, pd_save_path): '\n Convert a Keras model to a Tensorflow model.\n :param h5_path: Path to Keras model (.h5)\n :param pd_save_path: Path to save the Tensorflow model\n ' model = tf.keras.models.load_model(h5_path) model.save(pd_save_path)<|docstring|>Convert a Keras model to a Tensorflow model. :param h5_path: Path to Keras model (.h5) :param pd_save_path: Path to save the Tensorflow model<|endoftext|>
a30fecb08011d58774c286669b461178254903952fd79b3c5b8e8389454d1f85
def convert(model_path, precision_mode='FP16'): '\n Convert a Tensorflow model to a TensorRT model. This specific for the device to run the model on.\n :param model_path:\n :param precision_mode:\n :return:\n ' model_path = Path(model_path) conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS conversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 16)) conversion_params = conversion_params._replace(precision_mode=precision_mode) t1 = time.time() converter = trt.TrtGraphConverterV2(input_saved_model_dir=str(model_path), conversion_params=conversion_params) converter.convert() print('Model converted in {0:.3f}'.format((time.time() - t1))) def my_input_fn(): inp1 = tf.zeros(shape=(1, 200, 400, 3), dtype=tf.float32) (yield [inp1]) converter.build(input_fn=my_input_fn) print('Model build.') converter.save(str((model_path.parent / 'tensorRT_{0}'.format(precision_mode)))) print('Model saved.')
Convert a Tensorflow model to a TensorRT model. This specific for the device to run the model on. :param model_path: :param precision_mode: :return:
jetson_nano/converter.py
convert
t9s9/BeeMeter
0
python
def convert(model_path, precision_mode='FP16'): '\n Convert a Tensorflow model to a TensorRT model. This specific for the device to run the model on.\n :param model_path:\n :param precision_mode:\n :return:\n ' model_path = Path(model_path) conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS conversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 16)) conversion_params = conversion_params._replace(precision_mode=precision_mode) t1 = time.time() converter = trt.TrtGraphConverterV2(input_saved_model_dir=str(model_path), conversion_params=conversion_params) converter.convert() print('Model converted in {0:.3f}'.format((time.time() - t1))) def my_input_fn(): inp1 = tf.zeros(shape=(1, 200, 400, 3), dtype=tf.float32) (yield [inp1]) converter.build(input_fn=my_input_fn) print('Model build.') converter.save(str((model_path.parent / 'tensorRT_{0}'.format(precision_mode)))) print('Model saved.')
def convert(model_path, precision_mode='FP16'): '\n Convert a Tensorflow model to a TensorRT model. This specific for the device to run the model on.\n :param model_path:\n :param precision_mode:\n :return:\n ' model_path = Path(model_path) conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS conversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 16)) conversion_params = conversion_params._replace(precision_mode=precision_mode) t1 = time.time() converter = trt.TrtGraphConverterV2(input_saved_model_dir=str(model_path), conversion_params=conversion_params) converter.convert() print('Model converted in {0:.3f}'.format((time.time() - t1))) def my_input_fn(): inp1 = tf.zeros(shape=(1, 200, 400, 3), dtype=tf.float32) (yield [inp1]) converter.build(input_fn=my_input_fn) print('Model build.') converter.save(str((model_path.parent / 'tensorRT_{0}'.format(precision_mode)))) print('Model saved.')<|docstring|>Convert a Tensorflow model to a TensorRT model. This specific for the device to run the model on. :param model_path: :param precision_mode: :return:<|endoftext|>
cf69cf970547b7823334eb6c0212326d7d32708d2c72c07e1eb573c315e359af
def palindromePairs(self, words: List[str]) -> List[List[int]]: '\n Brute force, i, j and then check palindrom\n O(N^2 * L)\n\n Reverse the str, and then check O(N * L). Does it work actually?\n Check: map str -> idx\n\n |---s1---|---s2--| |---s1---|-s2-| |-s1-|---s2---|\n Need to check whether part of the str is palindrome.\n Part of str -> Trie.\n How to check part of the str. Useful\n\n Better way of checking palindrome? Infamouse Manacher\n\n word_i | word_j\n abc pppp | cba\n abc | pppp cba\n\n If palindrome suffix in work_i, we only need to check the "abc" against word_j\n Similarly for palindrome prefix in word_j\n\n Construct Trie for word_j reversely, since word_j is being checked\n ' root = TrieNode() for (idx, w) in enumerate(words): cur = root for i in range((len(w) - 1), (- 1), (- 1)): if self.is_palindrome(w, 0, (i + 1)): cur.pali_prefix_idxes.append(idx) cur = cur.children[w[i]] cur.pali_prefix_idxes.append(idx) cur.word_idx = idx ret = [] for (idx, w) in enumerate(words): cur = root for i in range(len(w)): if (self.is_palindrome(w, i, len(w)) and (cur.word_idx is not None) and (cur.word_idx != idx)): ret.append([idx, cur.word_idx]) cur = cur.children.get(w[i], None) if (cur is None): break else: for idx_j in cur.pali_prefix_idxes: if (idx != idx_j): ret.append([idx, idx_j]) return ret
Brute force, i, j and then check palindrom O(N^2 * L) Reverse the str, and then check O(N * L). Does it work actually? Check: map str -> idx |---s1---|---s2--| |---s1---|-s2-| |-s1-|---s2---| Need to check whether part of the str is palindrome. Part of str -> Trie. How to check part of the str. Useful Better way of checking palindrome? Infamouse Manacher word_i | word_j abc pppp | cba abc | pppp cba If palindrome suffix in work_i, we only need to check the "abc" against word_j Similarly for palindrome prefix in word_j Construct Trie for word_j reversely, since word_j is being checked
336 Palindrome Pairs.py
palindromePairs
scorpionpd/LeetCode-all
872
python
def palindromePairs(self, words: List[str]) -> List[List[int]]: '\n Brute force, i, j and then check palindrom\n O(N^2 * L)\n\n Reverse the str, and then check O(N * L). Does it work actually?\n Check: map str -> idx\n\n |---s1---|---s2--| |---s1---|-s2-| |-s1-|---s2---|\n Need to check whether part of the str is palindrome.\n Part of str -> Trie.\n How to check part of the str. Useful\n\n Better way of checking palindrome? Infamouse Manacher\n\n word_i | word_j\n abc pppp | cba\n abc | pppp cba\n\n If palindrome suffix in work_i, we only need to check the "abc" against word_j\n Similarly for palindrome prefix in word_j\n\n Construct Trie for word_j reversely, since word_j is being checked\n ' root = TrieNode() for (idx, w) in enumerate(words): cur = root for i in range((len(w) - 1), (- 1), (- 1)): if self.is_palindrome(w, 0, (i + 1)): cur.pali_prefix_idxes.append(idx) cur = cur.children[w[i]] cur.pali_prefix_idxes.append(idx) cur.word_idx = idx ret = [] for (idx, w) in enumerate(words): cur = root for i in range(len(w)): if (self.is_palindrome(w, i, len(w)) and (cur.word_idx is not None) and (cur.word_idx != idx)): ret.append([idx, cur.word_idx]) cur = cur.children.get(w[i], None) if (cur is None): break else: for idx_j in cur.pali_prefix_idxes: if (idx != idx_j): ret.append([idx, idx_j]) return ret
def palindromePairs(self, words: List[str]) -> List[List[int]]: '\n Brute force, i, j and then check palindrom\n O(N^2 * L)\n\n Reverse the str, and then check O(N * L). Does it work actually?\n Check: map str -> idx\n\n |---s1---|---s2--| |---s1---|-s2-| |-s1-|---s2---|\n Need to check whether part of the str is palindrome.\n Part of str -> Trie.\n How to check part of the str. Useful\n\n Better way of checking palindrome? Infamouse Manacher\n\n word_i | word_j\n abc pppp | cba\n abc | pppp cba\n\n If palindrome suffix in work_i, we only need to check the "abc" against word_j\n Similarly for palindrome prefix in word_j\n\n Construct Trie for word_j reversely, since word_j is being checked\n ' root = TrieNode() for (idx, w) in enumerate(words): cur = root for i in range((len(w) - 1), (- 1), (- 1)): if self.is_palindrome(w, 0, (i + 1)): cur.pali_prefix_idxes.append(idx) cur = cur.children[w[i]] cur.pali_prefix_idxes.append(idx) cur.word_idx = idx ret = [] for (idx, w) in enumerate(words): cur = root for i in range(len(w)): if (self.is_palindrome(w, i, len(w)) and (cur.word_idx is not None) and (cur.word_idx != idx)): ret.append([idx, cur.word_idx]) cur = cur.children.get(w[i], None) if (cur is None): break else: for idx_j in cur.pali_prefix_idxes: if (idx != idx_j): ret.append([idx, idx_j]) return ret<|docstring|>Brute force, i, j and then check palindrom O(N^2 * L) Reverse the str, and then check O(N * L). Does it work actually? Check: map str -> idx |---s1---|---s2--| |---s1---|-s2-| |-s1-|---s2---| Need to check whether part of the str is palindrome. Part of str -> Trie. How to check part of the str. Useful Better way of checking palindrome? Infamouse Manacher word_i | word_j abc pppp | cba abc | pppp cba If palindrome suffix in work_i, we only need to check the "abc" against word_j Similarly for palindrome prefix in word_j Construct Trie for word_j reversely, since word_j is being checked<|endoftext|>
e90caae6b440fc76bd5176aaf70d751df2c1ec65174b55b8c9b13820ed55ee68
def _getImport(libs, val): '\n Dynamically imports a library into memory for referencing during configuration file parsing.\n \n Args:\n libs (list): The list of libraries already imported.\n val (str): The name of the new library to import.\n \n Returns:\n (str): The name of the newly imported library. If library is already imported then returns None.\n ' if ((val is not None) and isinstance(val, str) and (val.strip() != '')): if ('.' in val): parts = val.split('.') parts.pop() ret = '.'.join(parts) if (ret in libs): return None libs.append(ret) return ret return None
Dynamically imports a library into memory for referencing during configuration file parsing. Args: libs (list): The list of libraries already imported. val (str): The name of the new library to import. Returns: (str): The name of the newly imported library. If library is already imported then returns None.
src/karen_brain/__init__.py
_getImport
lnxusr1/karen-brain
2
python
def _getImport(libs, val): '\n Dynamically imports a library into memory for referencing during configuration file parsing.\n \n Args:\n libs (list): The list of libraries already imported.\n val (str): The name of the new library to import.\n \n Returns:\n (str): The name of the newly imported library. If library is already imported then returns None.\n ' if ((val is not None) and isinstance(val, str) and (val.strip() != )): if ('.' in val): parts = val.split('.') parts.pop() ret = '.'.join(parts) if (ret in libs): return None libs.append(ret) return ret return None
def _getImport(libs, val): '\n Dynamically imports a library into memory for referencing during configuration file parsing.\n \n Args:\n libs (list): The list of libraries already imported.\n val (str): The name of the new library to import.\n \n Returns:\n (str): The name of the newly imported library. If library is already imported then returns None.\n ' if ((val is not None) and isinstance(val, str) and (val.strip() != )): if ('.' in val): parts = val.split('.') parts.pop() ret = '.'.join(parts) if (ret in libs): return None libs.append(ret) return ret return None<|docstring|>Dynamically imports a library into memory for referencing during configuration file parsing. Args: libs (list): The list of libraries already imported. val (str): The name of the new library to import. Returns: (str): The name of the newly imported library. If library is already imported then returns None.<|endoftext|>
d35b7542963f1dfe182cf5a914a821c39d2876d54ff2174fba9aa9b8804966c4
def start(configFile=None, log_level='info', log_file=None, x_wait=True): '\n Static method to start a new instance of karen based on a provided configuration file.\n \n Args:\n configFile (str): Path and Name of the JSON configuration file.\n log_level (str): The level of logging to provide (critical, error, warning, info, and debug). (optional)\n log_file (str): Path and Name of the log file to create (otherwise prints all messages to stderr). (optional)\n x_wait (bool): Set to True to wait until engine has exited before returning.\n ' if ((configFile is None) or (str(configFile).lower() == 'audio')): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config.json')) elif (str(configFile).lower() == 'video'): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config_video.json')) configFile = os.path.abspath(configFile) if (not os.path.isfile(configFile)): raise Exception('Configuration file does not exist.') quit(1) try: with open(configFile, 'r') as fp: myConfig = json.load(fp) except: raise Exception('Configuration file does not to be properly formatted') quit(1) logging_level = logging.DEBUG if (str(log_level).lower() == 'debug'): logging_level = logging.DEBUG elif (str(log_level).lower() == 'info'): logging_level = logging.INFO elif (str(log_level).lower() == 'warning'): logging_level = logging.WARNING elif (str(log_level).lower() == 'error'): logging_level = logging.ERROR elif (str(log_level).lower() == 'critical'): logging_level = logging.CRITICAL logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S %z', filename=log_file, format='%(asctime)s %(name)-12s - %(levelname)-9s - %(message)s', level=logging.DEBUG) logging.getLogger('requests').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.INFO) logging.getLogger('CTYPES').setLevel(logging_level) logging.getLogger('HTTP').setLevel(logging_level) logging.getLogger('BRAIN').setLevel(logging_level) logging.getLogger('SKILLMANAGER').setLevel(logging_level) brain = None importedLibs = [] skillFolder = None authenticationKey = None if (('settings' in myConfig) and ('libraryFolder' in myConfig['settings']) and (myConfig['settings']['libraryFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['libraryFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['libraryFolder']))) if (('settings' in myConfig) and ('skillsFolder' in myConfig['settings']) and (myConfig['settings']['skillsFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['skillsFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['skillsFolder']))) skillFolder = os.path.abspath(str(myConfig['settings']['skillsFolder'])) if (('settings' in myConfig) and ('authentication' in myConfig['settings']) and isinstance(myConfig['settings']['authentication'], dict) and ('key' in myConfig['settings']['authentication'])): authentication = myConfig['settings']['authentication'] if ('brain' in myConfig): tcp_port = (myConfig['brain']['tcp_port'] if (('tcp_port' in myConfig['brain']) and (myConfig['brain']['tcp_port'] is not None)) else 8080) hostname = (myConfig['brain']['hostname'] if (('hostname' in myConfig['brain']) and (myConfig['brain']['hostname'] is not None)) else '') use_ssl = (myConfig['brain']['ssl']['use_ssl'] if (('ssl' in myConfig['brain']) and ('use_ssl' in myConfig['brain']['ssl'])) else False) ssl_cert_file = (myConfig['brain']['ssl']['cert_file'] if (('ssl' in myConfig['brain']) and ('cert_file' in myConfig['brain']['ssl'])) else None) ssl_key_file = (myConfig['brain']['ssl']['key_file'] if (('ssl' in myConfig['brain']) and ('key_file' in myConfig['brain']['ssl'])) else None) groupName = (myConfig['brain']['groupName'] if ('groupName' in myConfig['brain']) else None) startUPNP = (myConfig['brain']['startUPNP'] if ('startUPNP' in myConfig['brain']) else True) brain_url = ((((('http' + ('s' if use_ssl else '')) + '://') + (hostname if ((hostname is not None) and (hostname != '')) else 'localhost')) + ':') + str(tcp_port)) if (not use_ssl): ssl_cert_file = None ssl_key_file = None if (('start' not in myConfig['brain']) or myConfig['brain']['start']): try: brain = Brain(tcp_port=tcp_port, hostname=hostname, ssl_cert_file=ssl_cert_file, ssl_key_file=ssl_key_file, groupName=groupName, authentication=authentication) brain.initialize(skillFolder, startUPNP=startUPNP) brain.start() except NameError: pass if (x_wait and (brain is not None)): brain.wait() return brain
Static method to start a new instance of karen based on a provided configuration file. Args: configFile (str): Path and Name of the JSON configuration file. log_level (str): The level of logging to provide (critical, error, warning, info, and debug). (optional) log_file (str): Path and Name of the log file to create (otherwise prints all messages to stderr). (optional) x_wait (bool): Set to True to wait until engine has exited before returning.
src/karen_brain/__init__.py
start
lnxusr1/karen-brain
2
python
def start(configFile=None, log_level='info', log_file=None, x_wait=True): '\n Static method to start a new instance of karen based on a provided configuration file.\n \n Args:\n configFile (str): Path and Name of the JSON configuration file.\n log_level (str): The level of logging to provide (critical, error, warning, info, and debug). (optional)\n log_file (str): Path and Name of the log file to create (otherwise prints all messages to stderr). (optional)\n x_wait (bool): Set to True to wait until engine has exited before returning.\n ' if ((configFile is None) or (str(configFile).lower() == 'audio')): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config.json')) elif (str(configFile).lower() == 'video'): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config_video.json')) configFile = os.path.abspath(configFile) if (not os.path.isfile(configFile)): raise Exception('Configuration file does not exist.') quit(1) try: with open(configFile, 'r') as fp: myConfig = json.load(fp) except: raise Exception('Configuration file does not to be properly formatted') quit(1) logging_level = logging.DEBUG if (str(log_level).lower() == 'debug'): logging_level = logging.DEBUG elif (str(log_level).lower() == 'info'): logging_level = logging.INFO elif (str(log_level).lower() == 'warning'): logging_level = logging.WARNING elif (str(log_level).lower() == 'error'): logging_level = logging.ERROR elif (str(log_level).lower() == 'critical'): logging_level = logging.CRITICAL logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S %z', filename=log_file, format='%(asctime)s %(name)-12s - %(levelname)-9s - %(message)s', level=logging.DEBUG) logging.getLogger('requests').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.INFO) logging.getLogger('CTYPES').setLevel(logging_level) logging.getLogger('HTTP').setLevel(logging_level) logging.getLogger('BRAIN').setLevel(logging_level) logging.getLogger('SKILLMANAGER').setLevel(logging_level) brain = None importedLibs = [] skillFolder = None authenticationKey = None if (('settings' in myConfig) and ('libraryFolder' in myConfig['settings']) and (myConfig['settings']['libraryFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['libraryFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['libraryFolder']))) if (('settings' in myConfig) and ('skillsFolder' in myConfig['settings']) and (myConfig['settings']['skillsFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['skillsFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['skillsFolder']))) skillFolder = os.path.abspath(str(myConfig['settings']['skillsFolder'])) if (('settings' in myConfig) and ('authentication' in myConfig['settings']) and isinstance(myConfig['settings']['authentication'], dict) and ('key' in myConfig['settings']['authentication'])): authentication = myConfig['settings']['authentication'] if ('brain' in myConfig): tcp_port = (myConfig['brain']['tcp_port'] if (('tcp_port' in myConfig['brain']) and (myConfig['brain']['tcp_port'] is not None)) else 8080) hostname = (myConfig['brain']['hostname'] if (('hostname' in myConfig['brain']) and (myConfig['brain']['hostname'] is not None)) else ) use_ssl = (myConfig['brain']['ssl']['use_ssl'] if (('ssl' in myConfig['brain']) and ('use_ssl' in myConfig['brain']['ssl'])) else False) ssl_cert_file = (myConfig['brain']['ssl']['cert_file'] if (('ssl' in myConfig['brain']) and ('cert_file' in myConfig['brain']['ssl'])) else None) ssl_key_file = (myConfig['brain']['ssl']['key_file'] if (('ssl' in myConfig['brain']) and ('key_file' in myConfig['brain']['ssl'])) else None) groupName = (myConfig['brain']['groupName'] if ('groupName' in myConfig['brain']) else None) startUPNP = (myConfig['brain']['startUPNP'] if ('startUPNP' in myConfig['brain']) else True) brain_url = ((((('http' + ('s' if use_ssl else )) + '://') + (hostname if ((hostname is not None) and (hostname != )) else 'localhost')) + ':') + str(tcp_port)) if (not use_ssl): ssl_cert_file = None ssl_key_file = None if (('start' not in myConfig['brain']) or myConfig['brain']['start']): try: brain = Brain(tcp_port=tcp_port, hostname=hostname, ssl_cert_file=ssl_cert_file, ssl_key_file=ssl_key_file, groupName=groupName, authentication=authentication) brain.initialize(skillFolder, startUPNP=startUPNP) brain.start() except NameError: pass if (x_wait and (brain is not None)): brain.wait() return brain
def start(configFile=None, log_level='info', log_file=None, x_wait=True): '\n Static method to start a new instance of karen based on a provided configuration file.\n \n Args:\n configFile (str): Path and Name of the JSON configuration file.\n log_level (str): The level of logging to provide (critical, error, warning, info, and debug). (optional)\n log_file (str): Path and Name of the log file to create (otherwise prints all messages to stderr). (optional)\n x_wait (bool): Set to True to wait until engine has exited before returning.\n ' if ((configFile is None) or (str(configFile).lower() == 'audio')): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config.json')) elif (str(configFile).lower() == 'video'): configFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'basic_config_video.json')) configFile = os.path.abspath(configFile) if (not os.path.isfile(configFile)): raise Exception('Configuration file does not exist.') quit(1) try: with open(configFile, 'r') as fp: myConfig = json.load(fp) except: raise Exception('Configuration file does not to be properly formatted') quit(1) logging_level = logging.DEBUG if (str(log_level).lower() == 'debug'): logging_level = logging.DEBUG elif (str(log_level).lower() == 'info'): logging_level = logging.INFO elif (str(log_level).lower() == 'warning'): logging_level = logging.WARNING elif (str(log_level).lower() == 'error'): logging_level = logging.ERROR elif (str(log_level).lower() == 'critical'): logging_level = logging.CRITICAL logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S %z', filename=log_file, format='%(asctime)s %(name)-12s - %(levelname)-9s - %(message)s', level=logging.DEBUG) logging.getLogger('requests').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.INFO) logging.getLogger('CTYPES').setLevel(logging_level) logging.getLogger('HTTP').setLevel(logging_level) logging.getLogger('BRAIN').setLevel(logging_level) logging.getLogger('SKILLMANAGER').setLevel(logging_level) brain = None importedLibs = [] skillFolder = None authenticationKey = None if (('settings' in myConfig) and ('libraryFolder' in myConfig['settings']) and (myConfig['settings']['libraryFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['libraryFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['libraryFolder']))) if (('settings' in myConfig) and ('skillsFolder' in myConfig['settings']) and (myConfig['settings']['skillsFolder'] is not None)): if os.path.isdir(str(myConfig['settings']['skillsFolder'])): sys.path.insert(0, os.path.abspath(str(myConfig['settings']['skillsFolder']))) skillFolder = os.path.abspath(str(myConfig['settings']['skillsFolder'])) if (('settings' in myConfig) and ('authentication' in myConfig['settings']) and isinstance(myConfig['settings']['authentication'], dict) and ('key' in myConfig['settings']['authentication'])): authentication = myConfig['settings']['authentication'] if ('brain' in myConfig): tcp_port = (myConfig['brain']['tcp_port'] if (('tcp_port' in myConfig['brain']) and (myConfig['brain']['tcp_port'] is not None)) else 8080) hostname = (myConfig['brain']['hostname'] if (('hostname' in myConfig['brain']) and (myConfig['brain']['hostname'] is not None)) else ) use_ssl = (myConfig['brain']['ssl']['use_ssl'] if (('ssl' in myConfig['brain']) and ('use_ssl' in myConfig['brain']['ssl'])) else False) ssl_cert_file = (myConfig['brain']['ssl']['cert_file'] if (('ssl' in myConfig['brain']) and ('cert_file' in myConfig['brain']['ssl'])) else None) ssl_key_file = (myConfig['brain']['ssl']['key_file'] if (('ssl' in myConfig['brain']) and ('key_file' in myConfig['brain']['ssl'])) else None) groupName = (myConfig['brain']['groupName'] if ('groupName' in myConfig['brain']) else None) startUPNP = (myConfig['brain']['startUPNP'] if ('startUPNP' in myConfig['brain']) else True) brain_url = ((((('http' + ('s' if use_ssl else )) + '://') + (hostname if ((hostname is not None) and (hostname != )) else 'localhost')) + ':') + str(tcp_port)) if (not use_ssl): ssl_cert_file = None ssl_key_file = None if (('start' not in myConfig['brain']) or myConfig['brain']['start']): try: brain = Brain(tcp_port=tcp_port, hostname=hostname, ssl_cert_file=ssl_cert_file, ssl_key_file=ssl_key_file, groupName=groupName, authentication=authentication) brain.initialize(skillFolder, startUPNP=startUPNP) brain.start() except NameError: pass if (x_wait and (brain is not None)): brain.wait() return brain<|docstring|>Static method to start a new instance of karen based on a provided configuration file. Args: configFile (str): Path and Name of the JSON configuration file. log_level (str): The level of logging to provide (critical, error, warning, info, and debug). (optional) log_file (str): Path and Name of the log file to create (otherwise prints all messages to stderr). (optional) x_wait (bool): Set to True to wait until engine has exited before returning.<|endoftext|>
2f65eb128078ba658ed33a1397b6fc04a58ef179e76d673c238f250c7362e052
def install_package(specifier, prefix): '\n Install a pip package (without dependencies) into the prefix directory.\n ' from pip._internal.cli.main import main as pip_entry_point exit_code = pip_entry_point(['-q', 'install', '--compile', '--no-deps', '--disable-pip-version-check', '--no-warn-script-location', '--prefix={}'.format(prefix), '--ignore-installed', specifier]) if (exit_code != 0): raise RuntimeError('Error installing python dependency: {}'.format(specifier))
Install a pip package (without dependencies) into the prefix directory.
source/neuropod/backends/python_bridge/_neuropod_native_bootstrap/pip_utils.py
install_package
weijiadeng/neuropod
887
python
def install_package(specifier, prefix): '\n \n ' from pip._internal.cli.main import main as pip_entry_point exit_code = pip_entry_point(['-q', 'install', '--compile', '--no-deps', '--disable-pip-version-check', '--no-warn-script-location', '--prefix={}'.format(prefix), '--ignore-installed', specifier]) if (exit_code != 0): raise RuntimeError('Error installing python dependency: {}'.format(specifier))
def install_package(specifier, prefix): '\n \n ' from pip._internal.cli.main import main as pip_entry_point exit_code = pip_entry_point(['-q', 'install', '--compile', '--no-deps', '--disable-pip-version-check', '--no-warn-script-location', '--prefix={}'.format(prefix), '--ignore-installed', specifier]) if (exit_code != 0): raise RuntimeError('Error installing python dependency: {}'.format(specifier))<|docstring|>Install a pip package (without dependencies) into the prefix directory.<|endoftext|>
d3865c63774fc71ef079374610d2d8cc887b7e8a2215d4c3a83c637a1148f4ff
def bootstrap_requirements(): "\n If we're loading the python library from native code in an isolated environment,\n we need to make sure that our required deps are available before we try to load\n Neuropod\n\n This is called from the PythonBridge in native code\n " if hasattr(bootstrap_requirements, 'did_run'): return bootstrap_requirements.did_run = True reqs = '\n future==0.18.2\n numpy=={}\n six==1.15.0\n testpath==0.4.4\n '.format(('1.18.0' if (sys.version_info.major == 3) else '1.16.6')) _load_deps_internal(reqs)
If we're loading the python library from native code in an isolated environment, we need to make sure that our required deps are available before we try to load Neuropod This is called from the PythonBridge in native code
source/neuropod/backends/python_bridge/_neuropod_native_bootstrap/pip_utils.py
bootstrap_requirements
weijiadeng/neuropod
887
python
def bootstrap_requirements(): "\n If we're loading the python library from native code in an isolated environment,\n we need to make sure that our required deps are available before we try to load\n Neuropod\n\n This is called from the PythonBridge in native code\n " if hasattr(bootstrap_requirements, 'did_run'): return bootstrap_requirements.did_run = True reqs = '\n future==0.18.2\n numpy=={}\n six==1.15.0\n testpath==0.4.4\n '.format(('1.18.0' if (sys.version_info.major == 3) else '1.16.6')) _load_deps_internal(reqs)
def bootstrap_requirements(): "\n If we're loading the python library from native code in an isolated environment,\n we need to make sure that our required deps are available before we try to load\n Neuropod\n\n This is called from the PythonBridge in native code\n " if hasattr(bootstrap_requirements, 'did_run'): return bootstrap_requirements.did_run = True reqs = '\n future==0.18.2\n numpy=={}\n six==1.15.0\n testpath==0.4.4\n '.format(('1.18.0' if (sys.version_info.major == 3) else '1.16.6')) _load_deps_internal(reqs)<|docstring|>If we're loading the python library from native code in an isolated environment, we need to make sure that our required deps are available before we try to load Neuropod This is called from the PythonBridge in native code<|endoftext|>
c46668536f6e4ca5ea2b8d08b992dd233fa8ccf910372b0678763f4f91be1585
def load_deps(lockfile): "\n For each dependency in the lockfile, install it to the cachedir if necessary\n and add it to sys.path\n\n Note: the lockfile contains all transitive deps so we don't need to do any\n recursive scanning\n\n This is intented to be used by the native code when running with OPE (i.e. one\n model per process).\n " with open(lockfile, 'r') as f: lockfile_contents = f.read() _load_deps_internal(lockfile_contents)
For each dependency in the lockfile, install it to the cachedir if necessary and add it to sys.path Note: the lockfile contains all transitive deps so we don't need to do any recursive scanning This is intented to be used by the native code when running with OPE (i.e. one model per process).
source/neuropod/backends/python_bridge/_neuropod_native_bootstrap/pip_utils.py
load_deps
weijiadeng/neuropod
887
python
def load_deps(lockfile): "\n For each dependency in the lockfile, install it to the cachedir if necessary\n and add it to sys.path\n\n Note: the lockfile contains all transitive deps so we don't need to do any\n recursive scanning\n\n This is intented to be used by the native code when running with OPE (i.e. one\n model per process).\n " with open(lockfile, 'r') as f: lockfile_contents = f.read() _load_deps_internal(lockfile_contents)
def load_deps(lockfile): "\n For each dependency in the lockfile, install it to the cachedir if necessary\n and add it to sys.path\n\n Note: the lockfile contains all transitive deps so we don't need to do any\n recursive scanning\n\n This is intented to be used by the native code when running with OPE (i.e. one\n model per process).\n " with open(lockfile, 'r') as f: lockfile_contents = f.read() _load_deps_internal(lockfile_contents)<|docstring|>For each dependency in the lockfile, install it to the cachedir if necessary and add it to sys.path Note: the lockfile contains all transitive deps so we don't need to do any recursive scanning This is intented to be used by the native code when running with OPE (i.e. one model per process).<|endoftext|>
46eaf6f71f45e0244f935bf6471c6ad1b42fda9c10ad91d68e600a874149355e
def _load_deps_internal(lockfile_contents): '\n See `load_deps` above for details\n ' requirements = [] for line in lockfile_contents.splitlines(): pos = line.find('#') if (pos != (- 1)): line = line[:pos] line = line.strip() if (not line): continue parts = line.split('==') if (len(parts) != 2): raise ValueError('Expected requirements of the form name==version but got {}'.format(line)) requirements.append(line.lower()) create_if_not_exists(PACKAGE_BASE_DIR) for requirement in requirements: req_path = os.path.abspath(os.path.join(PACKAGE_BASE_DIR, requirement)) if (not req_path.startswith(PACKAGE_BASE_DIR)): raise ValueError('Invalid dependency: {}'.format(requirement)) if (not os.path.exists((req_path + '.complete'))): lock = FileLock((req_path + '.lock')) with lock: if (not os.path.exists((req_path + '.complete'))): install_package(requirement, req_path) open((req_path + '.complete'), 'a').close() sys.path.insert(0, glob.glob('{}/lib/python*/site-packages'.format(req_path))[0])
See `load_deps` above for details
source/neuropod/backends/python_bridge/_neuropod_native_bootstrap/pip_utils.py
_load_deps_internal
weijiadeng/neuropod
887
python
def _load_deps_internal(lockfile_contents): '\n \n ' requirements = [] for line in lockfile_contents.splitlines(): pos = line.find('#') if (pos != (- 1)): line = line[:pos] line = line.strip() if (not line): continue parts = line.split('==') if (len(parts) != 2): raise ValueError('Expected requirements of the form name==version but got {}'.format(line)) requirements.append(line.lower()) create_if_not_exists(PACKAGE_BASE_DIR) for requirement in requirements: req_path = os.path.abspath(os.path.join(PACKAGE_BASE_DIR, requirement)) if (not req_path.startswith(PACKAGE_BASE_DIR)): raise ValueError('Invalid dependency: {}'.format(requirement)) if (not os.path.exists((req_path + '.complete'))): lock = FileLock((req_path + '.lock')) with lock: if (not os.path.exists((req_path + '.complete'))): install_package(requirement, req_path) open((req_path + '.complete'), 'a').close() sys.path.insert(0, glob.glob('{}/lib/python*/site-packages'.format(req_path))[0])
def _load_deps_internal(lockfile_contents): '\n \n ' requirements = [] for line in lockfile_contents.splitlines(): pos = line.find('#') if (pos != (- 1)): line = line[:pos] line = line.strip() if (not line): continue parts = line.split('==') if (len(parts) != 2): raise ValueError('Expected requirements of the form name==version but got {}'.format(line)) requirements.append(line.lower()) create_if_not_exists(PACKAGE_BASE_DIR) for requirement in requirements: req_path = os.path.abspath(os.path.join(PACKAGE_BASE_DIR, requirement)) if (not req_path.startswith(PACKAGE_BASE_DIR)): raise ValueError('Invalid dependency: {}'.format(requirement)) if (not os.path.exists((req_path + '.complete'))): lock = FileLock((req_path + '.lock')) with lock: if (not os.path.exists((req_path + '.complete'))): install_package(requirement, req_path) open((req_path + '.complete'), 'a').close() sys.path.insert(0, glob.glob('{}/lib/python*/site-packages'.format(req_path))[0])<|docstring|>See `load_deps` above for details<|endoftext|>
ff118801124ef24051e959e689f19d1fbf1858fcd02dd3dd7020d4236b472f91
def estimate_value(budget, exchange_rate): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:return:\n\t' return (budget / exchange_rate)
:param budget: float - amount of money you are planning to exchange. :param exchange_rate: float - unit value of the foreign currency. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
estimate_value
mfeif/python
0
python
def estimate_value(budget, exchange_rate): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:return:\n\t' return (budget / exchange_rate)
def estimate_value(budget, exchange_rate): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:return:\n\t' return (budget / exchange_rate)<|docstring|>:param budget: float - amount of money you are planning to exchange. :param exchange_rate: float - unit value of the foreign currency. :return:<|endoftext|>
b187adf2c4031d4edcf7252be7e425a6e33c1b12aed6c4d6bf51816ae480a0f6
def get_change(budget, exchanging_value): '\n\n\t:param budget: float - amount of money you own.\n\t:param exchanging_value: int - amount of your money you want to exchange now.\n\t:return:\n\t' return (budget - exchanging_value)
:param budget: float - amount of money you own. :param exchanging_value: int - amount of your money you want to exchange now. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
get_change
mfeif/python
0
python
def get_change(budget, exchanging_value): '\n\n\t:param budget: float - amount of money you own.\n\t:param exchanging_value: int - amount of your money you want to exchange now.\n\t:return:\n\t' return (budget - exchanging_value)
def get_change(budget, exchanging_value): '\n\n\t:param budget: float - amount of money you own.\n\t:param exchanging_value: int - amount of your money you want to exchange now.\n\t:return:\n\t' return (budget - exchanging_value)<|docstring|>:param budget: float - amount of money you own. :param exchanging_value: int - amount of your money you want to exchange now. :return:<|endoftext|>
7f93bb535bed5b39c4eaad887bb91cf8563c8cf158655fb2aded44754b3c1f5d
def get_value(denomination, number_of_bills): '\n\n\t:param denomination: int - the value of a bill.\n\t:param number_of_bills: int amount of bills you received.\n\t:return:\n\t' return (number_of_bills * denomination)
:param denomination: int - the value of a bill. :param number_of_bills: int amount of bills you received. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
get_value
mfeif/python
0
python
def get_value(denomination, number_of_bills): '\n\n\t:param denomination: int - the value of a bill.\n\t:param number_of_bills: int amount of bills you received.\n\t:return:\n\t' return (number_of_bills * denomination)
def get_value(denomination, number_of_bills): '\n\n\t:param denomination: int - the value of a bill.\n\t:param number_of_bills: int amount of bills you received.\n\t:return:\n\t' return (number_of_bills * denomination)<|docstring|>:param denomination: int - the value of a bill. :param number_of_bills: int amount of bills you received. :return:<|endoftext|>
33424ed9dd61c6dfdc8c28414133896d6bda234cf4ed4c626d8e822ce9c26c34
def get_number_of_bills(budget, denomination): '\n\n\t:param budget: float - the amount of money you are planning to exchange.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' return int((budget / denomination))
:param budget: float - the amount of money you are planning to exchange. :param denomination: int - the value of a single bill. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
get_number_of_bills
mfeif/python
0
python
def get_number_of_bills(budget, denomination): '\n\n\t:param budget: float - the amount of money you are planning to exchange.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' return int((budget / denomination))
def get_number_of_bills(budget, denomination): '\n\n\t:param budget: float - the amount of money you are planning to exchange.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' return int((budget / denomination))<|docstring|>:param budget: float - the amount of money you are planning to exchange. :param denomination: int - the value of a single bill. :return:<|endoftext|>
e499175611796925db8e285a9b84152fcd0ca7bd156e976a1bf76e696bed2ef6
def exchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - the amount of your money you are planning to exchange.\n\t:param exchange_rate: float - the unit value of the foreign currency.\n\t:param spread: int - percentage that is taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) exchangeable_amount = int(((budget / actual_rate) / denomination)) return (exchangeable_amount * denomination)
:param budget: float - the amount of your money you are planning to exchange. :param exchange_rate: float - the unit value of the foreign currency. :param spread: int - percentage that is taken as an exchange fee. :param denomination: int - the value of a single bill. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
exchangeable_value
mfeif/python
0
python
def exchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - the amount of your money you are planning to exchange.\n\t:param exchange_rate: float - the unit value of the foreign currency.\n\t:param spread: int - percentage that is taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) exchangeable_amount = int(((budget / actual_rate) / denomination)) return (exchangeable_amount * denomination)
def exchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - the amount of your money you are planning to exchange.\n\t:param exchange_rate: float - the unit value of the foreign currency.\n\t:param spread: int - percentage that is taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) exchangeable_amount = int(((budget / actual_rate) / denomination)) return (exchangeable_amount * denomination)<|docstring|>:param budget: float - the amount of your money you are planning to exchange. :param exchange_rate: float - the unit value of the foreign currency. :param spread: int - percentage that is taken as an exchange fee. :param denomination: int - the value of a single bill. :return:<|endoftext|>
fa0e61608bdd2ec57410b05e88cd87142acaf75a54a58c58c43dcbfa23facb27
def unexchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:param spread: int - the percentage taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) unexchangeable_amount = int(((budget / actual_rate) % denomination)) return unexchangeable_amount
:param budget: float - amount of money you are planning to exchange. :param exchange_rate: float - unit value of the foreign currency. :param spread: int - the percentage taken as an exchange fee. :param denomination: int - the value of a single bill. :return:
exercises/concept/currency-exchange/.meta/exemplar.py
unexchangeable_value
mfeif/python
0
python
def unexchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:param spread: int - the percentage taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) unexchangeable_amount = int(((budget / actual_rate) % denomination)) return unexchangeable_amount
def unexchangeable_value(budget, exchange_rate, spread, denomination): '\n\n\t:param budget: float - amount of money you are planning to exchange.\n\t:param exchange_rate: float - unit value of the foreign currency.\n\t:param spread: int - the percentage taken as an exchange fee.\n\t:param denomination: int - the value of a single bill.\n\t:return:\n\t' exchange_fee = ((exchange_rate / 100) * spread) actual_rate = (exchange_rate + exchange_fee) unexchangeable_amount = int(((budget / actual_rate) % denomination)) return unexchangeable_amount<|docstring|>:param budget: float - amount of money you are planning to exchange. :param exchange_rate: float - unit value of the foreign currency. :param spread: int - the percentage taken as an exchange fee. :param denomination: int - the value of a single bill. :return:<|endoftext|>
285bc063026d7c331094357b4c9db02b0b62b8d11ffe1b9fd0380061750d097a
def supports_color(): "\n Return True if the running system's terminal supports color,\n and False otherwise.\n " def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1) is_a_tty = (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) return (is_a_tty and ((sys.platform != 'win32') or HAS_COLORAMA or ('ANSICON' in os.environ) or ('WT_SESSION' in os.environ) or (os.environ.get('TERM_PROGRAM') == 'vscode') or vt_codes_enabled_in_windows_registry()))
Return True if the running system's terminal supports color, and False otherwise.
django/core/management/color.py
supports_color
mavisguan/django
61,676
python
def supports_color(): "\n Return True if the running system's terminal supports color,\n and False otherwise.\n " def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1) is_a_tty = (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) return (is_a_tty and ((sys.platform != 'win32') or HAS_COLORAMA or ('ANSICON' in os.environ) or ('WT_SESSION' in os.environ) or (os.environ.get('TERM_PROGRAM') == 'vscode') or vt_codes_enabled_in_windows_registry()))
def supports_color(): "\n Return True if the running system's terminal supports color,\n and False otherwise.\n " def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1) is_a_tty = (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) return (is_a_tty and ((sys.platform != 'win32') or HAS_COLORAMA or ('ANSICON' in os.environ) or ('WT_SESSION' in os.environ) or (os.environ.get('TERM_PROGRAM') == 'vscode') or vt_codes_enabled_in_windows_registry()))<|docstring|>Return True if the running system's terminal supports color, and False otherwise.<|endoftext|>
f31305a5603b851e3ae47009389350ca880e9f78efe80d75356205a3dc7d4ac5
def make_style(config_string=''): '\n Create a Style object from the given config_string.\n\n If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.\n ' style = Style() color_settings = termcolors.parse_color_setting(config_string) for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: if color_settings: format = color_settings.get(role, {}) style_func = termcolors.make_style(**format) else: def style_func(x): return x setattr(style, role, style_func) style.ERROR_OUTPUT = style.ERROR return style
Create a Style object from the given config_string. If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
django/core/management/color.py
make_style
mavisguan/django
61,676
python
def make_style(config_string=): '\n Create a Style object from the given config_string.\n\n If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.\n ' style = Style() color_settings = termcolors.parse_color_setting(config_string) for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: if color_settings: format = color_settings.get(role, {}) style_func = termcolors.make_style(**format) else: def style_func(x): return x setattr(style, role, style_func) style.ERROR_OUTPUT = style.ERROR return style
def make_style(config_string=): '\n Create a Style object from the given config_string.\n\n If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.\n ' style = Style() color_settings = termcolors.parse_color_setting(config_string) for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: if color_settings: format = color_settings.get(role, {}) style_func = termcolors.make_style(**format) else: def style_func(x): return x setattr(style, role, style_func) style.ERROR_OUTPUT = style.ERROR return style<|docstring|>Create a Style object from the given config_string. If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.<|endoftext|>
6d133decb77c5bf7e61aadd53496837f85aff4df02ceae8d47015c0d9774b4e0
@functools.lru_cache(maxsize=None) def no_style(): '\n Return a Style object with no color scheme.\n ' return make_style('nocolor')
Return a Style object with no color scheme.
django/core/management/color.py
no_style
mavisguan/django
61,676
python
@functools.lru_cache(maxsize=None) def no_style(): '\n \n ' return make_style('nocolor')
@functools.lru_cache(maxsize=None) def no_style(): '\n \n ' return make_style('nocolor')<|docstring|>Return a Style object with no color scheme.<|endoftext|>
19a6f6bf7a9e206d6b7454951d6158d525295fedaa0305542bdfd5573e1d1198
def color_style(force_color=False): '\n Return a Style object from the Django color scheme.\n ' if ((not force_color) and (not supports_color())): return no_style() return make_style(os.environ.get('DJANGO_COLORS', ''))
Return a Style object from the Django color scheme.
django/core/management/color.py
color_style
mavisguan/django
61,676
python
def color_style(force_color=False): '\n \n ' if ((not force_color) and (not supports_color())): return no_style() return make_style(os.environ.get('DJANGO_COLORS', ))
def color_style(force_color=False): '\n \n ' if ((not force_color) and (not supports_color())): return no_style() return make_style(os.environ.get('DJANGO_COLORS', ))<|docstring|>Return a Style object from the Django color scheme.<|endoftext|>
40adbf29e9c0d5b4f87d2801e73da590166e6312b009be22e30d62bb22952a4d
def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1)
Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564.
django/core/management/color.py
vt_codes_enabled_in_windows_registry
mavisguan/django
61,676
python
def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1)
def vt_codes_enabled_in_windows_registry(): '\n Check the Windows Registry to see if VT code handling has been enabled\n by default, see https://superuser.com/a/1300251/447564.\n ' try: import winreg except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: (reg_key_value, _) = winreg.QueryValueEx(reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return (reg_key_value == 1)<|docstring|>Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564.<|endoftext|>
33cf66bfeda80470357981bbc286d573da554ddff32c93d7605b835f3f17752c
@slim.add_arg_scope def bottleneck_hdc(inputs, depth, depth_bottleneck, stride, rate=1, multi_grid=(1, 2, 4), outputs_collections=None, scope=None, use_bounded_activations=False): "Hybrid Dilated Convolution Bottleneck.\n Multi_Grid = (1,2,4)\n See Understanding Convolution for Semantic Segmentation.\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n multi_grid: multi_grid sturcture.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n use_bounded_activations: Whether or not to use bounded activations. Bounded\n activations better lend themselves to quantized inference.\n Returns:\n The ResNet unit's output.\n " with tf.variable_scope(scope, 'bottleneck_v{}'.format(resnet_vn), [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if (depth == depth_in): shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride, activation_fn=(tf.nn.relu6 if use_bounded_activations else None), scope='shortcut') residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, rate=(rate * multi_grid[0]), scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=(rate * multi_grid[1]), scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, rate=(rate * multi_grid[2]), activation_fn=None, scope='conv3') if use_bounded_activations: residual = tf.clip_by_value(residual, (- 6.0), 6.0) output = tf.nn.relu6((shortcut + residual)) else: output = tf.nn.relu((shortcut + residual)) return output
Hybrid Dilated Convolution Bottleneck. Multi_Grid = (1,2,4) See Understanding Convolution for Semantic Segmentation. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. multi_grid: multi_grid sturcture. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. use_bounded_activations: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. Returns: The ResNet unit's output.
examples/Deeplab/experiments/deeplabv3_slim/deeplabv3_noaspp.py
bottleneck_hdc
MarcWong/tensorpack
5
python
@slim.add_arg_scope def bottleneck_hdc(inputs, depth, depth_bottleneck, stride, rate=1, multi_grid=(1, 2, 4), outputs_collections=None, scope=None, use_bounded_activations=False): "Hybrid Dilated Convolution Bottleneck.\n Multi_Grid = (1,2,4)\n See Understanding Convolution for Semantic Segmentation.\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n multi_grid: multi_grid sturcture.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n use_bounded_activations: Whether or not to use bounded activations. Bounded\n activations better lend themselves to quantized inference.\n Returns:\n The ResNet unit's output.\n " with tf.variable_scope(scope, 'bottleneck_v{}'.format(resnet_vn), [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if (depth == depth_in): shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride, activation_fn=(tf.nn.relu6 if use_bounded_activations else None), scope='shortcut') residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, rate=(rate * multi_grid[0]), scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=(rate * multi_grid[1]), scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, rate=(rate * multi_grid[2]), activation_fn=None, scope='conv3') if use_bounded_activations: residual = tf.clip_by_value(residual, (- 6.0), 6.0) output = tf.nn.relu6((shortcut + residual)) else: output = tf.nn.relu((shortcut + residual)) return output
@slim.add_arg_scope def bottleneck_hdc(inputs, depth, depth_bottleneck, stride, rate=1, multi_grid=(1, 2, 4), outputs_collections=None, scope=None, use_bounded_activations=False): "Hybrid Dilated Convolution Bottleneck.\n Multi_Grid = (1,2,4)\n See Understanding Convolution for Semantic Segmentation.\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n multi_grid: multi_grid sturcture.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n use_bounded_activations: Whether or not to use bounded activations. Bounded\n activations better lend themselves to quantized inference.\n Returns:\n The ResNet unit's output.\n " with tf.variable_scope(scope, 'bottleneck_v{}'.format(resnet_vn), [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if (depth == depth_in): shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride, activation_fn=(tf.nn.relu6 if use_bounded_activations else None), scope='shortcut') residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, rate=(rate * multi_grid[0]), scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=(rate * multi_grid[1]), scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, rate=(rate * multi_grid[2]), activation_fn=None, scope='conv3') if use_bounded_activations: residual = tf.clip_by_value(residual, (- 6.0), 6.0) output = tf.nn.relu6((shortcut + residual)) else: output = tf.nn.relu((shortcut + residual)) return output<|docstring|>Hybrid Dilated Convolution Bottleneck. Multi_Grid = (1,2,4) See Understanding Convolution for Semantic Segmentation. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. multi_grid: multi_grid sturcture. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. use_bounded_activations: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. Returns: The ResNet unit's output.<|endoftext|>
6d892916b01876c101403fb69c18a91fa1c8a617c3e53f5b4c90cf07523f8497
def deeplabv3(inputs, num_classes, aspp=True, reuse=None, is_training=True, fix_bn=False): "DeepLabV3\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The number of layers of the ResNet.\n aspp: Whether to use ASPP module, if True, will use 4 blocks with \n multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1).\n reuse: Whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n end_points: A dictionary from components of the network to the \n corresponding activation.\n " if aspp: multi_grid = (1, 2, 4) else: multi_grid = (1, 2, 1) bn_is_training = is_training if fix_bn: bn_is_training = False scope = 'resnet_v{}_101'.format(resnet_vn) with slim.arg_scope(resnet_v2.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=bn_is_training): with tf.variable_scope(scope, [inputs], reuse=reuse) as sc: net = inputs with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') with tf.variable_scope('block1', [net]) as sc: base_depth = 64 for i in range(2): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_3', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block2', [net]) as sc: base_depth = 128 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_4', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block3', [net]) as sc: base_depth = 256 for i in range(23): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=1, stride=1) stage_scale = (2 if is_training else 1) with tf.variable_scope('block4', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=stage_scale, stride=1, multi_grid=multi_grid) with tf.variable_scope('lr_multiply', [net]) as sc: if aspp: with tf.variable_scope('aspp', [net]) as sc: aspp_list = [] branch_1 = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') aspp_list.append(branch_1) for i in range(3): branch_2 = slim.conv2d(net, 256, [3, 3], stride=1, rate=(6 * (i + 1)), scope='rate{}'.format((6 * (i + 1)))) aspp_list.append(branch_2) aspp = tf.add_n(aspp_list) with tf.variable_scope('img_pool', [net]) as sc: 'Image Pooling\n See ParseNet: Looking Wider to See Better\n ' pooled = tf.reduce_mean(net, [1, 2], name='avg_pool', keep_dims=True) pooled = slim.conv2d(pooled, 256, [1, 1], stride=1, scope='1x1conv') pooled = tf.image.resize_bilinear(pooled, tf.shape(net)[1:3]) with tf.variable_scope('fusion', [aspp, pooled]) as sc: net = tf.concat([aspp, pooled], 3) net = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') else: with tf.variable_scope('block5', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 2)) with tf.variable_scope('block6', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 4)) with tf.variable_scope('block7', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 8)) with tf.variable_scope('logits', [net]) as sc: net = slim.conv2d(net, num_classes, [1, 1], stride=1, activation_fn=None, normalizer_fn=None) net = tf.image.resize_bilinear(net, inputs.shape[1:3]) return net
DeepLabV3 Args: inputs: A tensor of size [batch, height, width, channels]. depth: The number of layers of the ResNet. aspp: Whether to use ASPP module, if True, will use 4 blocks with multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1). reuse: Whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. end_points: A dictionary from components of the network to the corresponding activation.
examples/Deeplab/experiments/deeplabv3_slim/deeplabv3_noaspp.py
deeplabv3
MarcWong/tensorpack
5
python
def deeplabv3(inputs, num_classes, aspp=True, reuse=None, is_training=True, fix_bn=False): "DeepLabV3\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The number of layers of the ResNet.\n aspp: Whether to use ASPP module, if True, will use 4 blocks with \n multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1).\n reuse: Whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n end_points: A dictionary from components of the network to the \n corresponding activation.\n " if aspp: multi_grid = (1, 2, 4) else: multi_grid = (1, 2, 1) bn_is_training = is_training if fix_bn: bn_is_training = False scope = 'resnet_v{}_101'.format(resnet_vn) with slim.arg_scope(resnet_v2.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=bn_is_training): with tf.variable_scope(scope, [inputs], reuse=reuse) as sc: net = inputs with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') with tf.variable_scope('block1', [net]) as sc: base_depth = 64 for i in range(2): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_3', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block2', [net]) as sc: base_depth = 128 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_4', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block3', [net]) as sc: base_depth = 256 for i in range(23): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=1, stride=1) stage_scale = (2 if is_training else 1) with tf.variable_scope('block4', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=stage_scale, stride=1, multi_grid=multi_grid) with tf.variable_scope('lr_multiply', [net]) as sc: if aspp: with tf.variable_scope('aspp', [net]) as sc: aspp_list = [] branch_1 = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') aspp_list.append(branch_1) for i in range(3): branch_2 = slim.conv2d(net, 256, [3, 3], stride=1, rate=(6 * (i + 1)), scope='rate{}'.format((6 * (i + 1)))) aspp_list.append(branch_2) aspp = tf.add_n(aspp_list) with tf.variable_scope('img_pool', [net]) as sc: 'Image Pooling\n See ParseNet: Looking Wider to See Better\n ' pooled = tf.reduce_mean(net, [1, 2], name='avg_pool', keep_dims=True) pooled = slim.conv2d(pooled, 256, [1, 1], stride=1, scope='1x1conv') pooled = tf.image.resize_bilinear(pooled, tf.shape(net)[1:3]) with tf.variable_scope('fusion', [aspp, pooled]) as sc: net = tf.concat([aspp, pooled], 3) net = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') else: with tf.variable_scope('block5', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 2)) with tf.variable_scope('block6', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 4)) with tf.variable_scope('block7', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 8)) with tf.variable_scope('logits', [net]) as sc: net = slim.conv2d(net, num_classes, [1, 1], stride=1, activation_fn=None, normalizer_fn=None) net = tf.image.resize_bilinear(net, inputs.shape[1:3]) return net
def deeplabv3(inputs, num_classes, aspp=True, reuse=None, is_training=True, fix_bn=False): "DeepLabV3\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The number of layers of the ResNet.\n aspp: Whether to use ASPP module, if True, will use 4 blocks with \n multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1).\n reuse: Whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n end_points: A dictionary from components of the network to the \n corresponding activation.\n " if aspp: multi_grid = (1, 2, 4) else: multi_grid = (1, 2, 1) bn_is_training = is_training if fix_bn: bn_is_training = False scope = 'resnet_v{}_101'.format(resnet_vn) with slim.arg_scope(resnet_v2.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=bn_is_training): with tf.variable_scope(scope, [inputs], reuse=reuse) as sc: net = inputs with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') with tf.variable_scope('block1', [net]) as sc: base_depth = 64 for i in range(2): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_3', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block2', [net]) as sc: base_depth = 128 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1) with tf.variable_scope('unit_4', values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=2) with tf.variable_scope('block3', [net]) as sc: base_depth = 256 for i in range(23): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=1, stride=1) stage_scale = (2 if is_training else 1) with tf.variable_scope('block4', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, rate=stage_scale, stride=1, multi_grid=multi_grid) with tf.variable_scope('lr_multiply', [net]) as sc: if aspp: with tf.variable_scope('aspp', [net]) as sc: aspp_list = [] branch_1 = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') aspp_list.append(branch_1) for i in range(3): branch_2 = slim.conv2d(net, 256, [3, 3], stride=1, rate=(6 * (i + 1)), scope='rate{}'.format((6 * (i + 1)))) aspp_list.append(branch_2) aspp = tf.add_n(aspp_list) with tf.variable_scope('img_pool', [net]) as sc: 'Image Pooling\n See ParseNet: Looking Wider to See Better\n ' pooled = tf.reduce_mean(net, [1, 2], name='avg_pool', keep_dims=True) pooled = slim.conv2d(pooled, 256, [1, 1], stride=1, scope='1x1conv') pooled = tf.image.resize_bilinear(pooled, tf.shape(net)[1:3]) with tf.variable_scope('fusion', [aspp, pooled]) as sc: net = tf.concat([aspp, pooled], 3) net = slim.conv2d(net, 256, [1, 1], stride=1, scope='1x1conv') else: with tf.variable_scope('block5', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 2)) with tf.variable_scope('block6', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 4)) with tf.variable_scope('block7', [net]) as sc: base_depth = 512 for i in range(3): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = bottleneck_hdc(net, depth=(base_depth * 4), depth_bottleneck=base_depth, stride=1, rate=(stage_scale * 8)) with tf.variable_scope('logits', [net]) as sc: net = slim.conv2d(net, num_classes, [1, 1], stride=1, activation_fn=None, normalizer_fn=None) net = tf.image.resize_bilinear(net, inputs.shape[1:3]) return net<|docstring|>DeepLabV3 Args: inputs: A tensor of size [batch, height, width, channels]. depth: The number of layers of the ResNet. aspp: Whether to use ASPP module, if True, will use 4 blocks with multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1). reuse: Whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. end_points: A dictionary from components of the network to the corresponding activation.<|endoftext|>
d9502fac17347ac98f5bd87da1309197b6cfae4e05adea4ed6afe8f4bb7b8233
def __init__(self, rng, input, n_in, n_out, W=None, b=None): ' Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n ' if (W is None): W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) self.W = W if (b is None): b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) self.b = b self.L1 = abs(self.W).sum() self.L2_sqr = (self.W ** 2).sum() self.p_y_given_x = T.nnet.softmax((T.dot(input, self.W) + self.b)) self.y_pred = T.argmax(self.p_y_given_x, axis=1) self.params = [self.W, self.b] self.input = input
Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie
tools/DependencyReordering/nnAdapt/models/logistic_sgd_nll_zeros.py
__init__
nusnlp/neuralreord-aaai2017
2
python
def __init__(self, rng, input, n_in, n_out, W=None, b=None): ' Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n ' if (W is None): W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) self.W = W if (b is None): b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) self.b = b self.L1 = abs(self.W).sum() self.L2_sqr = (self.W ** 2).sum() self.p_y_given_x = T.nnet.softmax((T.dot(input, self.W) + self.b)) self.y_pred = T.argmax(self.p_y_given_x, axis=1) self.params = [self.W, self.b] self.input = input
def __init__(self, rng, input, n_in, n_out, W=None, b=None): ' Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n ' if (W is None): W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) self.W = W if (b is None): b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) self.b = b self.L1 = abs(self.W).sum() self.L2_sqr = (self.W ** 2).sum() self.p_y_given_x = T.nnet.softmax((T.dot(input, self.W) + self.b)) self.y_pred = T.argmax(self.p_y_given_x, axis=1) self.params = [self.W, self.b] self.input = input<|docstring|>Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie<|endoftext|>
25b062defe18976f537a1d94b3667b154681afe4fd901c6a2d4fc598848d8498
def negative_log_likelihood(self, y): 'Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \x0crac{1}{|\\mathcal{D}|} \\mathcal{L} (\theta=\\{W,b\\}, \\mathcal{D}) =\n \x0crac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\n \\ell (\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n ' return (- T.mean(T.log(self.p_y_given_x)[(T.arange(y.shape[0]), y)]))
Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: rac{1}{|\mathcal{D}|} \mathcal{L} ( heta=\{W,b\}, \mathcal{D}) = rac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \ \ell ( heta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
tools/DependencyReordering/nnAdapt/models/logistic_sgd_nll_zeros.py
negative_log_likelihood
nusnlp/neuralreord-aaai2017
2
python
def negative_log_likelihood(self, y): 'Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \x0crac{1}{|\\mathcal{D}|} \\mathcal{L} (\theta=\\{W,b\\}, \\mathcal{D}) =\n \x0crac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\n \\ell (\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n ' return (- T.mean(T.log(self.p_y_given_x)[(T.arange(y.shape[0]), y)]))
def negative_log_likelihood(self, y): 'Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \x0crac{1}{|\\mathcal{D}|} \\mathcal{L} (\theta=\\{W,b\\}, \\mathcal{D}) =\n \x0crac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\n \\ell (\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n ' return (- T.mean(T.log(self.p_y_given_x)[(T.arange(y.shape[0]), y)]))<|docstring|>Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: rac{1}{|\mathcal{D}|} \mathcal{L} ( heta=\{W,b\}, \mathcal{D}) = rac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \ \ell ( heta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size<|endoftext|>
fb8f1d470424e0d21587f6b6f1554228a68af5e79bb411e1db67d292dcb1a667
def errors(self, y): 'Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n ' if (y.ndim != self.y_pred.ndim): raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type)) if y.dtype.startswith('int'): return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label
tools/DependencyReordering/nnAdapt/models/logistic_sgd_nll_zeros.py
errors
nusnlp/neuralreord-aaai2017
2
python
def errors(self, y): 'Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n ' if (y.ndim != self.y_pred.ndim): raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type)) if y.dtype.startswith('int'): return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
def errors(self, y): 'Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n ' if (y.ndim != self.y_pred.ndim): raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type)) if y.dtype.startswith('int'): return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()<|docstring|>Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label<|endoftext|>
cc74973cbc3e0156709841f966a4b390288a3582c84eec64f2d366c5845e038f
def PLUGIN_ENTRY(): 'Plugin entry point.' return IDAgrapPlugin()
Plugin entry point.
src/IDA/grap/grap.py
PLUGIN_ENTRY
AirbusCyber/grap
171
python
def PLUGIN_ENTRY(): return IDAgrapPlugin()
def PLUGIN_ENTRY(): return IDAgrapPlugin()<|docstring|>Plugin entry point.<|endoftext|>
beea35bfaff3cecf4ab9d6858cdc3472b799d95d2aeeeba62c4917933eca83dc
def init(self): 'Initialization of the IDAgrap plugin.' return idaapi.PLUGIN_KEEP
Initialization of the IDAgrap plugin.
src/IDA/grap/grap.py
init
AirbusCyber/grap
171
python
def init(self): return idaapi.PLUGIN_KEEP
def init(self): return idaapi.PLUGIN_KEEP<|docstring|>Initialization of the IDAgrap plugin.<|endoftext|>
f61f8c642489815c100a7edba92431738d17c7c06b762914dc59fabdd4ca322f
def term(self): 'Exit of the IDAgrap plugin.' pass
Exit of the IDAgrap plugin.
src/IDA/grap/grap.py
term
AirbusCyber/grap
171
python
def term(self): pass
def term(self): pass<|docstring|>Exit of the IDAgrap plugin.<|endoftext|>
8a88543e5b5b465274b400750be0fd4be88e13c10dc4877be010e8eabc633dc0
def run(self, arg): 'Core of the IDAgrap plugin.\n\n Args:\n arg: Plugin argument.\n ' try: auto_wait() except: Wait() form = IDAgrapForm() form.Show() return
Core of the IDAgrap plugin. Args: arg: Plugin argument.
src/IDA/grap/grap.py
run
AirbusCyber/grap
171
python
def run(self, arg): 'Core of the IDAgrap plugin.\n\n Args:\n arg: Plugin argument.\n ' try: auto_wait() except: Wait() form = IDAgrapForm() form.Show() return
def run(self, arg): 'Core of the IDAgrap plugin.\n\n Args:\n arg: Plugin argument.\n ' try: auto_wait() except: Wait() form = IDAgrapForm() form.Show() return<|docstring|>Core of the IDAgrap plugin. Args: arg: Plugin argument.<|endoftext|>
145c2b5c427c7b2f8d5cfc9312bb3c4f064fd42afec5676a3f2fe244c1491bf2
def persist_binpickle(model, dir=None, file=None): '\n Persist a model using binpickle.\n\n Args:\n model: The model to persist.\n dir: The temporary directory for persisting the model object.\n file: The file in which to save the object.\n\n Returns:\n PersistedModel: The persisted object.\n ' if (file is not None): path = pathlib.Path(file) else: if (dir is None): dir = os.environ.get('LK_TEMP_DIR', None) (fd, path) = tempfile.mkstemp(suffix='.bpk', prefix='lkpy-', dir=dir) os.close(fd) path = pathlib.Path(path) _log.debug('persisting %s to %s', model, path) with binpickle.BinPickler.mappable(path) as bp, sharing_mode(): bp.dump(model) return BPKPersisted(path)
Persist a model using binpickle. Args: model: The model to persist. dir: The temporary directory for persisting the model object. file: The file in which to save the object. Returns: PersistedModel: The persisted object.
lenskit/sharing/binpickle.py
persist_binpickle
mky2018/lkpy
210
python
def persist_binpickle(model, dir=None, file=None): '\n Persist a model using binpickle.\n\n Args:\n model: The model to persist.\n dir: The temporary directory for persisting the model object.\n file: The file in which to save the object.\n\n Returns:\n PersistedModel: The persisted object.\n ' if (file is not None): path = pathlib.Path(file) else: if (dir is None): dir = os.environ.get('LK_TEMP_DIR', None) (fd, path) = tempfile.mkstemp(suffix='.bpk', prefix='lkpy-', dir=dir) os.close(fd) path = pathlib.Path(path) _log.debug('persisting %s to %s', model, path) with binpickle.BinPickler.mappable(path) as bp, sharing_mode(): bp.dump(model) return BPKPersisted(path)
def persist_binpickle(model, dir=None, file=None): '\n Persist a model using binpickle.\n\n Args:\n model: The model to persist.\n dir: The temporary directory for persisting the model object.\n file: The file in which to save the object.\n\n Returns:\n PersistedModel: The persisted object.\n ' if (file is not None): path = pathlib.Path(file) else: if (dir is None): dir = os.environ.get('LK_TEMP_DIR', None) (fd, path) = tempfile.mkstemp(suffix='.bpk', prefix='lkpy-', dir=dir) os.close(fd) path = pathlib.Path(path) _log.debug('persisting %s to %s', model, path) with binpickle.BinPickler.mappable(path) as bp, sharing_mode(): bp.dump(model) return BPKPersisted(path)<|docstring|>Persist a model using binpickle. Args: model: The model to persist. dir: The temporary directory for persisting the model object. file: The file in which to save the object. Returns: PersistedModel: The persisted object.<|endoftext|>
b02452ebd125aa88e0f3d9e80f49a7ebda7e771a9f320e671a7b92f0faca77e8
def import_from_labelbox(dataset, json_path, label_prefix=None, download_dir=None, labelbox_id_field='labelbox_id'): 'Imports the labels from the Labelbox project into the FiftyOne dataset.\n\n The ``labelbox_id_field`` of the FiftyOne samples are used to associate the\n corresponding Labelbox labels.\n\n If a ``download_dir`` is provided, any Labelbox IDs with no matching\n FiftyOne sample are added to the FiftyOne dataset, and their media is\n downloaded into ``download_dir``.\n\n The provided ``json_path`` should contain a JSON file in the following\n format::\n\n [\n {\n "DataRow ID": <labelbox-id>,\n "Labeled Data": <url-or-None>,\n "Label": {...}\n }\n ]\n\n When importing image labels, the ``Label`` field should contain a dict of\n `Labelbox image labels <https://labelbox.com/docs/exporting-data/export-format-detail#images>`_::\n\n {\n "objects": [...],\n "classifications": [...]\n }\n\n When importing video labels, the ``Label`` field should contain a dict as\n follows::\n\n {\n "frames": <url-or-filepath>\n }\n\n where the ``frames`` field can either contain a URL, in which case the\n file is downloaded from the web, or the path to NDJSON file on disk of\n `Labelbox video labels <https://labelbox.com/docs/exporting-data/export-format-detail#video>`_::\n\n {"frameNumber": 1, "objects": [...], "classifications": [...]}\n {"frameNumber": 2, "objects": [...], "classifications": [...]}\n ...\n\n Args:\n dataset: a :class:`fiftyone.core.dataset.Dataset`\n json_path: the path to the Labelbox JSON export to load\n label_prefix (None): a prefix to prepend to the sample label field(s)\n that are created, separated by an underscore\n download_dir (None): a directory into which to download the media for\n any Labelbox IDs with no corresponding sample with the matching\n ``labelbox_id_field`` value. This can be omitted if all IDs are\n already present or you do not wish to download media and add new\n samples\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n ' if download_dir: filename_maker = fou.UniqueFilenameMaker(output_dir=download_dir) if (labelbox_id_field not in dataset.get_field_schema()): dataset.add_sample_field(labelbox_id_field, fof.StringField) id_map = {k: v for (k, v) in zip(*dataset.values([labelbox_id_field, 'id']))} if label_prefix: label_key = (lambda k: ((label_prefix + '_') + k)) else: label_key = (lambda k: k) is_video = (dataset.media_type == fomm.VIDEO) d_list = etas.read_json(json_path) with fou.ProgressBar() as pb: for d in pb(d_list): labelbox_id = d['DataRow ID'] if (labelbox_id in id_map): sample = dataset[id_map[labelbox_id]] elif download_dir: image_url = d['Labeled Data'] filepath = filename_maker.get_output_path(image_url) etaw.download_file(image_url, path=filepath, quiet=True) sample = fos.Sample(filepath=filepath) dataset.add_sample(sample) else: logger.info("Skipping labels for unknown Labelbox ID '%s'; provide a `download_dir` if you wish to download media and create samples for new media", labelbox_id) continue if (sample.metadata is None): if is_video: sample.metadata = fom.VideoMetadata.build_for(sample.filepath) else: sample.metadata = fom.ImageMetadata.build_for(sample.filepath) if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) frames = _parse_video_labels(d['Label'], frame_size) sample.frames.merge({frame_number: {label_key(fname): flabel for (fname, flabel) in frame_dict.items()} for (frame_number, frame_dict) in frames.items()}) else: frame_size = (sample.metadata.width, sample.metadata.height) labels_dict = _parse_image_labels(d['Label'], frame_size) sample.update_fields({label_key(k): v for (k, v) in labels_dict.items()}) sample.save()
Imports the labels from the Labelbox project into the FiftyOne dataset. The ``labelbox_id_field`` of the FiftyOne samples are used to associate the corresponding Labelbox labels. If a ``download_dir`` is provided, any Labelbox IDs with no matching FiftyOne sample are added to the FiftyOne dataset, and their media is downloaded into ``download_dir``. The provided ``json_path`` should contain a JSON file in the following format:: [ { "DataRow ID": <labelbox-id>, "Labeled Data": <url-or-None>, "Label": {...} } ] When importing image labels, the ``Label`` field should contain a dict of `Labelbox image labels <https://labelbox.com/docs/exporting-data/export-format-detail#images>`_:: { "objects": [...], "classifications": [...] } When importing video labels, the ``Label`` field should contain a dict as follows:: { "frames": <url-or-filepath> } where the ``frames`` field can either contain a URL, in which case the file is downloaded from the web, or the path to NDJSON file on disk of `Labelbox video labels <https://labelbox.com/docs/exporting-data/export-format-detail#video>`_:: {"frameNumber": 1, "objects": [...], "classifications": [...]} {"frameNumber": 2, "objects": [...], "classifications": [...]} ... Args: dataset: a :class:`fiftyone.core.dataset.Dataset` json_path: the path to the Labelbox JSON export to load label_prefix (None): a prefix to prepend to the sample label field(s) that are created, separated by an underscore download_dir (None): a directory into which to download the media for any Labelbox IDs with no corresponding sample with the matching ``labelbox_id_field`` value. This can be omitted if all IDs are already present or you do not wish to download media and add new samples labelbox_id_field ("labelbox_id"): the sample field to lookup/store the IDs of the Labelbox DataRows
fiftyone/utils/labelbox.py
import_from_labelbox
stbdang/fiftyone
3
python
def import_from_labelbox(dataset, json_path, label_prefix=None, download_dir=None, labelbox_id_field='labelbox_id'): 'Imports the labels from the Labelbox project into the FiftyOne dataset.\n\n The ``labelbox_id_field`` of the FiftyOne samples are used to associate the\n corresponding Labelbox labels.\n\n If a ``download_dir`` is provided, any Labelbox IDs with no matching\n FiftyOne sample are added to the FiftyOne dataset, and their media is\n downloaded into ``download_dir``.\n\n The provided ``json_path`` should contain a JSON file in the following\n format::\n\n [\n {\n "DataRow ID": <labelbox-id>,\n "Labeled Data": <url-or-None>,\n "Label": {...}\n }\n ]\n\n When importing image labels, the ``Label`` field should contain a dict of\n `Labelbox image labels <https://labelbox.com/docs/exporting-data/export-format-detail#images>`_::\n\n {\n "objects": [...],\n "classifications": [...]\n }\n\n When importing video labels, the ``Label`` field should contain a dict as\n follows::\n\n {\n "frames": <url-or-filepath>\n }\n\n where the ``frames`` field can either contain a URL, in which case the\n file is downloaded from the web, or the path to NDJSON file on disk of\n `Labelbox video labels <https://labelbox.com/docs/exporting-data/export-format-detail#video>`_::\n\n {"frameNumber": 1, "objects": [...], "classifications": [...]}\n {"frameNumber": 2, "objects": [...], "classifications": [...]}\n ...\n\n Args:\n dataset: a :class:`fiftyone.core.dataset.Dataset`\n json_path: the path to the Labelbox JSON export to load\n label_prefix (None): a prefix to prepend to the sample label field(s)\n that are created, separated by an underscore\n download_dir (None): a directory into which to download the media for\n any Labelbox IDs with no corresponding sample with the matching\n ``labelbox_id_field`` value. This can be omitted if all IDs are\n already present or you do not wish to download media and add new\n samples\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n ' if download_dir: filename_maker = fou.UniqueFilenameMaker(output_dir=download_dir) if (labelbox_id_field not in dataset.get_field_schema()): dataset.add_sample_field(labelbox_id_field, fof.StringField) id_map = {k: v for (k, v) in zip(*dataset.values([labelbox_id_field, 'id']))} if label_prefix: label_key = (lambda k: ((label_prefix + '_') + k)) else: label_key = (lambda k: k) is_video = (dataset.media_type == fomm.VIDEO) d_list = etas.read_json(json_path) with fou.ProgressBar() as pb: for d in pb(d_list): labelbox_id = d['DataRow ID'] if (labelbox_id in id_map): sample = dataset[id_map[labelbox_id]] elif download_dir: image_url = d['Labeled Data'] filepath = filename_maker.get_output_path(image_url) etaw.download_file(image_url, path=filepath, quiet=True) sample = fos.Sample(filepath=filepath) dataset.add_sample(sample) else: logger.info("Skipping labels for unknown Labelbox ID '%s'; provide a `download_dir` if you wish to download media and create samples for new media", labelbox_id) continue if (sample.metadata is None): if is_video: sample.metadata = fom.VideoMetadata.build_for(sample.filepath) else: sample.metadata = fom.ImageMetadata.build_for(sample.filepath) if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) frames = _parse_video_labels(d['Label'], frame_size) sample.frames.merge({frame_number: {label_key(fname): flabel for (fname, flabel) in frame_dict.items()} for (frame_number, frame_dict) in frames.items()}) else: frame_size = (sample.metadata.width, sample.metadata.height) labels_dict = _parse_image_labels(d['Label'], frame_size) sample.update_fields({label_key(k): v for (k, v) in labels_dict.items()}) sample.save()
def import_from_labelbox(dataset, json_path, label_prefix=None, download_dir=None, labelbox_id_field='labelbox_id'): 'Imports the labels from the Labelbox project into the FiftyOne dataset.\n\n The ``labelbox_id_field`` of the FiftyOne samples are used to associate the\n corresponding Labelbox labels.\n\n If a ``download_dir`` is provided, any Labelbox IDs with no matching\n FiftyOne sample are added to the FiftyOne dataset, and their media is\n downloaded into ``download_dir``.\n\n The provided ``json_path`` should contain a JSON file in the following\n format::\n\n [\n {\n "DataRow ID": <labelbox-id>,\n "Labeled Data": <url-or-None>,\n "Label": {...}\n }\n ]\n\n When importing image labels, the ``Label`` field should contain a dict of\n `Labelbox image labels <https://labelbox.com/docs/exporting-data/export-format-detail#images>`_::\n\n {\n "objects": [...],\n "classifications": [...]\n }\n\n When importing video labels, the ``Label`` field should contain a dict as\n follows::\n\n {\n "frames": <url-or-filepath>\n }\n\n where the ``frames`` field can either contain a URL, in which case the\n file is downloaded from the web, or the path to NDJSON file on disk of\n `Labelbox video labels <https://labelbox.com/docs/exporting-data/export-format-detail#video>`_::\n\n {"frameNumber": 1, "objects": [...], "classifications": [...]}\n {"frameNumber": 2, "objects": [...], "classifications": [...]}\n ...\n\n Args:\n dataset: a :class:`fiftyone.core.dataset.Dataset`\n json_path: the path to the Labelbox JSON export to load\n label_prefix (None): a prefix to prepend to the sample label field(s)\n that are created, separated by an underscore\n download_dir (None): a directory into which to download the media for\n any Labelbox IDs with no corresponding sample with the matching\n ``labelbox_id_field`` value. This can be omitted if all IDs are\n already present or you do not wish to download media and add new\n samples\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n ' if download_dir: filename_maker = fou.UniqueFilenameMaker(output_dir=download_dir) if (labelbox_id_field not in dataset.get_field_schema()): dataset.add_sample_field(labelbox_id_field, fof.StringField) id_map = {k: v for (k, v) in zip(*dataset.values([labelbox_id_field, 'id']))} if label_prefix: label_key = (lambda k: ((label_prefix + '_') + k)) else: label_key = (lambda k: k) is_video = (dataset.media_type == fomm.VIDEO) d_list = etas.read_json(json_path) with fou.ProgressBar() as pb: for d in pb(d_list): labelbox_id = d['DataRow ID'] if (labelbox_id in id_map): sample = dataset[id_map[labelbox_id]] elif download_dir: image_url = d['Labeled Data'] filepath = filename_maker.get_output_path(image_url) etaw.download_file(image_url, path=filepath, quiet=True) sample = fos.Sample(filepath=filepath) dataset.add_sample(sample) else: logger.info("Skipping labels for unknown Labelbox ID '%s'; provide a `download_dir` if you wish to download media and create samples for new media", labelbox_id) continue if (sample.metadata is None): if is_video: sample.metadata = fom.VideoMetadata.build_for(sample.filepath) else: sample.metadata = fom.ImageMetadata.build_for(sample.filepath) if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) frames = _parse_video_labels(d['Label'], frame_size) sample.frames.merge({frame_number: {label_key(fname): flabel for (fname, flabel) in frame_dict.items()} for (frame_number, frame_dict) in frames.items()}) else: frame_size = (sample.metadata.width, sample.metadata.height) labels_dict = _parse_image_labels(d['Label'], frame_size) sample.update_fields({label_key(k): v for (k, v) in labels_dict.items()}) sample.save()<|docstring|>Imports the labels from the Labelbox project into the FiftyOne dataset. The ``labelbox_id_field`` of the FiftyOne samples are used to associate the corresponding Labelbox labels. If a ``download_dir`` is provided, any Labelbox IDs with no matching FiftyOne sample are added to the FiftyOne dataset, and their media is downloaded into ``download_dir``. The provided ``json_path`` should contain a JSON file in the following format:: [ { "DataRow ID": <labelbox-id>, "Labeled Data": <url-or-None>, "Label": {...} } ] When importing image labels, the ``Label`` field should contain a dict of `Labelbox image labels <https://labelbox.com/docs/exporting-data/export-format-detail#images>`_:: { "objects": [...], "classifications": [...] } When importing video labels, the ``Label`` field should contain a dict as follows:: { "frames": <url-or-filepath> } where the ``frames`` field can either contain a URL, in which case the file is downloaded from the web, or the path to NDJSON file on disk of `Labelbox video labels <https://labelbox.com/docs/exporting-data/export-format-detail#video>`_:: {"frameNumber": 1, "objects": [...], "classifications": [...]} {"frameNumber": 2, "objects": [...], "classifications": [...]} ... Args: dataset: a :class:`fiftyone.core.dataset.Dataset` json_path: the path to the Labelbox JSON export to load label_prefix (None): a prefix to prepend to the sample label field(s) that are created, separated by an underscore download_dir (None): a directory into which to download the media for any Labelbox IDs with no corresponding sample with the matching ``labelbox_id_field`` value. This can be omitted if all IDs are already present or you do not wish to download media and add new samples labelbox_id_field ("labelbox_id"): the sample field to lookup/store the IDs of the Labelbox DataRows<|endoftext|>
6352bd935877320259fc4339199546654fec4b182d60b3c27417c6ece91c2f5a
def export_to_labelbox(sample_collection, ndjson_path, video_labels_dir=None, labelbox_id_field='labelbox_id', label_field=None, frame_labels_field=None): 'Exports labels from the FiftyOne samples to Labelbox format.\n\n This function is useful for loading predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n You can use :meth:`upload_labels_to_labelbox` to upload the exported labels\n to a Labelbox project.\n\n You can use :meth:`upload_media_to_labelbox` to upload sample media to\n Labelbox and populate the ``labelbox_id_field`` field, if necessary.\n\n The IDs of the Labelbox DataRows corresponding to each sample must be\n stored in the ``labelbox_id_field`` of the samples. Any samples with no\n value in ``labelbox_id_field`` will be skipped.\n\n When exporting frame labels for video datasets, the ``frames`` key of the\n exported labels will contain the paths on disk to per-sample NDJSON files\n that are written to ``video_labels_dir`` as follows::\n\n video_labels_dir/\n <labelbox-id1>.json\n <labelbox-id2>.json\n ...\n\n where each NDJSON file contains the frame labels for the video with the\n corresponding Labelbox ID.\n\n Args:\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n ndjson_path: the path to write an NDJSON export of the labels\n video_labels_dir (None): a directory to write the per-sample video\n labels. Only applicable for video datasets\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n label_field (None): optional label field(s) to export. Can be any of\n the following:\n\n - the name of a label field to export\n - a glob pattern of label field(s) to export\n - a list or tuple of label field(s) to export\n - a dictionary mapping label field names to keys to use when\n constructing the exported labels\n\n By default, no labels are exported\n frame_labels_field (None): optional frame label field(s) to export.\n Only applicable to video datasets. Can be any of the following:\n\n - the name of a frame label field to export\n - a glob pattern of frame label field(s) to export\n - a list or tuple of frame label field(s) to export\n - a dictionary mapping frame label field names to keys to use\n when constructing the exported frame labels\n\n By default, no frame labels are exported\n ' is_video = (sample_collection.media_type == fomm.VIDEO) label_fields = sample_collection._parse_label_field(label_field, allow_coercion=False, force_dict=True, required=False) if is_video: frame_label_fields = sample_collection._parse_frame_labels_field(frame_labels_field, allow_coercion=False, force_dict=True, required=False) if (frame_label_fields and (video_labels_dir is None)): raise ValueError('Must provide `video_labels_dir` when exporting frame labels for video datasets') etau.ensure_empty_file(ndjson_path) with fou.ProgressBar() as pb: for sample in pb(sample_collection): labelbox_id = sample[labelbox_id_field] if (labelbox_id is None): logger.warning("Skipping sample '%s' with no '%s' value", sample.id, labelbox_id_field) continue if (sample.metadata is None): if is_video: metadata = fom.VideoMetadata.build_for(sample.filepath) else: metadata = fom.ImageMetadata.build_for(sample.filepath) sample.metadata = metadata sample.save() if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) else: frame_size = (sample.metadata.width, sample.metadata.height) if label_fields: labels_dict = _get_labels(sample, label_fields) annos = _to_labelbox_image_labels(labels_dict, frame_size, labelbox_id) etas.write_ndjson(annos, ndjson_path, append=True) if (is_video and frame_label_fields): frames = _get_frame_labels(sample, frame_label_fields) video_annos = _to_labelbox_video_labels(frames, frame_size, labelbox_id) video_labels_path = os.path.join(video_labels_dir, (labelbox_id + '.json')) etas.write_ndjson(video_annos, video_labels_path) anno = _make_video_anno(video_labels_path, data_row_id=labelbox_id) etas.write_ndjson([anno], ndjson_path, append=True)
Exports labels from the FiftyOne samples to Labelbox format. This function is useful for loading predictions into Labelbox for `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_. You can use :meth:`upload_labels_to_labelbox` to upload the exported labels to a Labelbox project. You can use :meth:`upload_media_to_labelbox` to upload sample media to Labelbox and populate the ``labelbox_id_field`` field, if necessary. The IDs of the Labelbox DataRows corresponding to each sample must be stored in the ``labelbox_id_field`` of the samples. Any samples with no value in ``labelbox_id_field`` will be skipped. When exporting frame labels for video datasets, the ``frames`` key of the exported labels will contain the paths on disk to per-sample NDJSON files that are written to ``video_labels_dir`` as follows:: video_labels_dir/ <labelbox-id1>.json <labelbox-id2>.json ... where each NDJSON file contains the frame labels for the video with the corresponding Labelbox ID. Args: sample_collection: a :class:`fiftyone.core.collections.SampleCollection` ndjson_path: the path to write an NDJSON export of the labels video_labels_dir (None): a directory to write the per-sample video labels. Only applicable for video datasets labelbox_id_field ("labelbox_id"): the sample field to lookup/store the IDs of the Labelbox DataRows label_field (None): optional label field(s) to export. Can be any of the following: - the name of a label field to export - a glob pattern of label field(s) to export - a list or tuple of label field(s) to export - a dictionary mapping label field names to keys to use when constructing the exported labels By default, no labels are exported frame_labels_field (None): optional frame label field(s) to export. Only applicable to video datasets. Can be any of the following: - the name of a frame label field to export - a glob pattern of frame label field(s) to export - a list or tuple of frame label field(s) to export - a dictionary mapping frame label field names to keys to use when constructing the exported frame labels By default, no frame labels are exported
fiftyone/utils/labelbox.py
export_to_labelbox
stbdang/fiftyone
3
python
def export_to_labelbox(sample_collection, ndjson_path, video_labels_dir=None, labelbox_id_field='labelbox_id', label_field=None, frame_labels_field=None): 'Exports labels from the FiftyOne samples to Labelbox format.\n\n This function is useful for loading predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n You can use :meth:`upload_labels_to_labelbox` to upload the exported labels\n to a Labelbox project.\n\n You can use :meth:`upload_media_to_labelbox` to upload sample media to\n Labelbox and populate the ``labelbox_id_field`` field, if necessary.\n\n The IDs of the Labelbox DataRows corresponding to each sample must be\n stored in the ``labelbox_id_field`` of the samples. Any samples with no\n value in ``labelbox_id_field`` will be skipped.\n\n When exporting frame labels for video datasets, the ``frames`` key of the\n exported labels will contain the paths on disk to per-sample NDJSON files\n that are written to ``video_labels_dir`` as follows::\n\n video_labels_dir/\n <labelbox-id1>.json\n <labelbox-id2>.json\n ...\n\n where each NDJSON file contains the frame labels for the video with the\n corresponding Labelbox ID.\n\n Args:\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n ndjson_path: the path to write an NDJSON export of the labels\n video_labels_dir (None): a directory to write the per-sample video\n labels. Only applicable for video datasets\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n label_field (None): optional label field(s) to export. Can be any of\n the following:\n\n - the name of a label field to export\n - a glob pattern of label field(s) to export\n - a list or tuple of label field(s) to export\n - a dictionary mapping label field names to keys to use when\n constructing the exported labels\n\n By default, no labels are exported\n frame_labels_field (None): optional frame label field(s) to export.\n Only applicable to video datasets. Can be any of the following:\n\n - the name of a frame label field to export\n - a glob pattern of frame label field(s) to export\n - a list or tuple of frame label field(s) to export\n - a dictionary mapping frame label field names to keys to use\n when constructing the exported frame labels\n\n By default, no frame labels are exported\n ' is_video = (sample_collection.media_type == fomm.VIDEO) label_fields = sample_collection._parse_label_field(label_field, allow_coercion=False, force_dict=True, required=False) if is_video: frame_label_fields = sample_collection._parse_frame_labels_field(frame_labels_field, allow_coercion=False, force_dict=True, required=False) if (frame_label_fields and (video_labels_dir is None)): raise ValueError('Must provide `video_labels_dir` when exporting frame labels for video datasets') etau.ensure_empty_file(ndjson_path) with fou.ProgressBar() as pb: for sample in pb(sample_collection): labelbox_id = sample[labelbox_id_field] if (labelbox_id is None): logger.warning("Skipping sample '%s' with no '%s' value", sample.id, labelbox_id_field) continue if (sample.metadata is None): if is_video: metadata = fom.VideoMetadata.build_for(sample.filepath) else: metadata = fom.ImageMetadata.build_for(sample.filepath) sample.metadata = metadata sample.save() if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) else: frame_size = (sample.metadata.width, sample.metadata.height) if label_fields: labels_dict = _get_labels(sample, label_fields) annos = _to_labelbox_image_labels(labels_dict, frame_size, labelbox_id) etas.write_ndjson(annos, ndjson_path, append=True) if (is_video and frame_label_fields): frames = _get_frame_labels(sample, frame_label_fields) video_annos = _to_labelbox_video_labels(frames, frame_size, labelbox_id) video_labels_path = os.path.join(video_labels_dir, (labelbox_id + '.json')) etas.write_ndjson(video_annos, video_labels_path) anno = _make_video_anno(video_labels_path, data_row_id=labelbox_id) etas.write_ndjson([anno], ndjson_path, append=True)
def export_to_labelbox(sample_collection, ndjson_path, video_labels_dir=None, labelbox_id_field='labelbox_id', label_field=None, frame_labels_field=None): 'Exports labels from the FiftyOne samples to Labelbox format.\n\n This function is useful for loading predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n You can use :meth:`upload_labels_to_labelbox` to upload the exported labels\n to a Labelbox project.\n\n You can use :meth:`upload_media_to_labelbox` to upload sample media to\n Labelbox and populate the ``labelbox_id_field`` field, if necessary.\n\n The IDs of the Labelbox DataRows corresponding to each sample must be\n stored in the ``labelbox_id_field`` of the samples. Any samples with no\n value in ``labelbox_id_field`` will be skipped.\n\n When exporting frame labels for video datasets, the ``frames`` key of the\n exported labels will contain the paths on disk to per-sample NDJSON files\n that are written to ``video_labels_dir`` as follows::\n\n video_labels_dir/\n <labelbox-id1>.json\n <labelbox-id2>.json\n ...\n\n where each NDJSON file contains the frame labels for the video with the\n corresponding Labelbox ID.\n\n Args:\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n ndjson_path: the path to write an NDJSON export of the labels\n video_labels_dir (None): a directory to write the per-sample video\n labels. Only applicable for video datasets\n labelbox_id_field ("labelbox_id"): the sample field to lookup/store the\n IDs of the Labelbox DataRows\n label_field (None): optional label field(s) to export. Can be any of\n the following:\n\n - the name of a label field to export\n - a glob pattern of label field(s) to export\n - a list or tuple of label field(s) to export\n - a dictionary mapping label field names to keys to use when\n constructing the exported labels\n\n By default, no labels are exported\n frame_labels_field (None): optional frame label field(s) to export.\n Only applicable to video datasets. Can be any of the following:\n\n - the name of a frame label field to export\n - a glob pattern of frame label field(s) to export\n - a list or tuple of frame label field(s) to export\n - a dictionary mapping frame label field names to keys to use\n when constructing the exported frame labels\n\n By default, no frame labels are exported\n ' is_video = (sample_collection.media_type == fomm.VIDEO) label_fields = sample_collection._parse_label_field(label_field, allow_coercion=False, force_dict=True, required=False) if is_video: frame_label_fields = sample_collection._parse_frame_labels_field(frame_labels_field, allow_coercion=False, force_dict=True, required=False) if (frame_label_fields and (video_labels_dir is None)): raise ValueError('Must provide `video_labels_dir` when exporting frame labels for video datasets') etau.ensure_empty_file(ndjson_path) with fou.ProgressBar() as pb: for sample in pb(sample_collection): labelbox_id = sample[labelbox_id_field] if (labelbox_id is None): logger.warning("Skipping sample '%s' with no '%s' value", sample.id, labelbox_id_field) continue if (sample.metadata is None): if is_video: metadata = fom.VideoMetadata.build_for(sample.filepath) else: metadata = fom.ImageMetadata.build_for(sample.filepath) sample.metadata = metadata sample.save() if is_video: frame_size = (sample.metadata.frame_width, sample.metadata.frame_height) else: frame_size = (sample.metadata.width, sample.metadata.height) if label_fields: labels_dict = _get_labels(sample, label_fields) annos = _to_labelbox_image_labels(labels_dict, frame_size, labelbox_id) etas.write_ndjson(annos, ndjson_path, append=True) if (is_video and frame_label_fields): frames = _get_frame_labels(sample, frame_label_fields) video_annos = _to_labelbox_video_labels(frames, frame_size, labelbox_id) video_labels_path = os.path.join(video_labels_dir, (labelbox_id + '.json')) etas.write_ndjson(video_annos, video_labels_path) anno = _make_video_anno(video_labels_path, data_row_id=labelbox_id) etas.write_ndjson([anno], ndjson_path, append=True)<|docstring|>Exports labels from the FiftyOne samples to Labelbox format. This function is useful for loading predictions into Labelbox for `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_. You can use :meth:`upload_labels_to_labelbox` to upload the exported labels to a Labelbox project. You can use :meth:`upload_media_to_labelbox` to upload sample media to Labelbox and populate the ``labelbox_id_field`` field, if necessary. The IDs of the Labelbox DataRows corresponding to each sample must be stored in the ``labelbox_id_field`` of the samples. Any samples with no value in ``labelbox_id_field`` will be skipped. When exporting frame labels for video datasets, the ``frames`` key of the exported labels will contain the paths on disk to per-sample NDJSON files that are written to ``video_labels_dir`` as follows:: video_labels_dir/ <labelbox-id1>.json <labelbox-id2>.json ... where each NDJSON file contains the frame labels for the video with the corresponding Labelbox ID. Args: sample_collection: a :class:`fiftyone.core.collections.SampleCollection` ndjson_path: the path to write an NDJSON export of the labels video_labels_dir (None): a directory to write the per-sample video labels. Only applicable for video datasets labelbox_id_field ("labelbox_id"): the sample field to lookup/store the IDs of the Labelbox DataRows label_field (None): optional label field(s) to export. Can be any of the following: - the name of a label field to export - a glob pattern of label field(s) to export - a list or tuple of label field(s) to export - a dictionary mapping label field names to keys to use when constructing the exported labels By default, no labels are exported frame_labels_field (None): optional frame label field(s) to export. Only applicable to video datasets. Can be any of the following: - the name of a frame label field to export - a glob pattern of frame label field(s) to export - a list or tuple of frame label field(s) to export - a dictionary mapping frame label field names to keys to use when constructing the exported frame labels By default, no frame labels are exported<|endoftext|>
e4b05e6c10c275c77636f9aeab6d8b8bfedbb2534f465d8cdf0c441e897f6157
def download_labels_from_labelbox(labelbox_project, outpath=None): 'Downloads the labels for the given Labelbox project.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n outpath (None): the path to write the JSON export on disk\n\n Returns:\n ``None`` if an ``outpath`` is provided, or the loaded JSON itself if no\n ``outpath`` is provided\n ' export_url = labelbox_project.export_labels() if outpath: etaw.download_file(export_url, path=outpath) return None labels_bytes = etaw.download_file(export_url) return etas.load_json(labels_bytes)
Downloads the labels for the given Labelbox project. Args: labelbox_project: a ``labelbox.schema.project.Project`` outpath (None): the path to write the JSON export on disk Returns: ``None`` if an ``outpath`` is provided, or the loaded JSON itself if no ``outpath`` is provided
fiftyone/utils/labelbox.py
download_labels_from_labelbox
stbdang/fiftyone
3
python
def download_labels_from_labelbox(labelbox_project, outpath=None): 'Downloads the labels for the given Labelbox project.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n outpath (None): the path to write the JSON export on disk\n\n Returns:\n ``None`` if an ``outpath`` is provided, or the loaded JSON itself if no\n ``outpath`` is provided\n ' export_url = labelbox_project.export_labels() if outpath: etaw.download_file(export_url, path=outpath) return None labels_bytes = etaw.download_file(export_url) return etas.load_json(labels_bytes)
def download_labels_from_labelbox(labelbox_project, outpath=None): 'Downloads the labels for the given Labelbox project.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n outpath (None): the path to write the JSON export on disk\n\n Returns:\n ``None`` if an ``outpath`` is provided, or the loaded JSON itself if no\n ``outpath`` is provided\n ' export_url = labelbox_project.export_labels() if outpath: etaw.download_file(export_url, path=outpath) return None labels_bytes = etaw.download_file(export_url) return etas.load_json(labels_bytes)<|docstring|>Downloads the labels for the given Labelbox project. Args: labelbox_project: a ``labelbox.schema.project.Project`` outpath (None): the path to write the JSON export on disk Returns: ``None`` if an ``outpath`` is provided, or the loaded JSON itself if no ``outpath`` is provided<|endoftext|>
199de5dcb70214bf94dfcfb65ebb4b8d9c10e3ca7bb6ba069849b1957923bb13
def upload_media_to_labelbox(labelbox_dataset, sample_collection, labelbox_id_field='labelbox_id'): 'Uploads the raw media for the FiftyOne samples to Labelbox.\n\n The IDs of the Labelbox DataRows that are created are stored in the\n ``labelbox_id_field`` of the samples.\n\n Args:\n labelbox_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n labelbox_id_field ("labelbox_id"): the sample field in which to store\n the IDs of the Labelbox DataRows\n ' with fou.ProgressBar() as pb: for sample in pb(sample_collection): try: has_id = (sample[labelbox_id_field] is not None) except: has_id = False if has_id: logger.warning("Skipping sample '%s' with an existing '%s' value", sample.id, labelbox_id_field) continue filepath = sample.filepath data_row = labelbox_dataset.create_data_row(row_data=filepath) sample[labelbox_id_field] = data_row.uid sample.save()
Uploads the raw media for the FiftyOne samples to Labelbox. The IDs of the Labelbox DataRows that are created are stored in the ``labelbox_id_field`` of the samples. Args: labelbox_dataset: a ``labelbox.schema.dataset.Dataset`` to which to add the media sample_collection: a :class:`fiftyone.core.collections.SampleCollection` labelbox_id_field ("labelbox_id"): the sample field in which to store the IDs of the Labelbox DataRows
fiftyone/utils/labelbox.py
upload_media_to_labelbox
stbdang/fiftyone
3
python
def upload_media_to_labelbox(labelbox_dataset, sample_collection, labelbox_id_field='labelbox_id'): 'Uploads the raw media for the FiftyOne samples to Labelbox.\n\n The IDs of the Labelbox DataRows that are created are stored in the\n ``labelbox_id_field`` of the samples.\n\n Args:\n labelbox_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n labelbox_id_field ("labelbox_id"): the sample field in which to store\n the IDs of the Labelbox DataRows\n ' with fou.ProgressBar() as pb: for sample in pb(sample_collection): try: has_id = (sample[labelbox_id_field] is not None) except: has_id = False if has_id: logger.warning("Skipping sample '%s' with an existing '%s' value", sample.id, labelbox_id_field) continue filepath = sample.filepath data_row = labelbox_dataset.create_data_row(row_data=filepath) sample[labelbox_id_field] = data_row.uid sample.save()
def upload_media_to_labelbox(labelbox_dataset, sample_collection, labelbox_id_field='labelbox_id'): 'Uploads the raw media for the FiftyOne samples to Labelbox.\n\n The IDs of the Labelbox DataRows that are created are stored in the\n ``labelbox_id_field`` of the samples.\n\n Args:\n labelbox_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n sample_collection: a\n :class:`fiftyone.core.collections.SampleCollection`\n labelbox_id_field ("labelbox_id"): the sample field in which to store\n the IDs of the Labelbox DataRows\n ' with fou.ProgressBar() as pb: for sample in pb(sample_collection): try: has_id = (sample[labelbox_id_field] is not None) except: has_id = False if has_id: logger.warning("Skipping sample '%s' with an existing '%s' value", sample.id, labelbox_id_field) continue filepath = sample.filepath data_row = labelbox_dataset.create_data_row(row_data=filepath) sample[labelbox_id_field] = data_row.uid sample.save()<|docstring|>Uploads the raw media for the FiftyOne samples to Labelbox. The IDs of the Labelbox DataRows that are created are stored in the ``labelbox_id_field`` of the samples. Args: labelbox_dataset: a ``labelbox.schema.dataset.Dataset`` to which to add the media sample_collection: a :class:`fiftyone.core.collections.SampleCollection` labelbox_id_field ("labelbox_id"): the sample field in which to store the IDs of the Labelbox DataRows<|endoftext|>
1f98a713cd86420a95d22bfdceef54a5f9590f795bfc0f5f9a9eab684e20549a
def upload_labels_to_labelbox(labelbox_project, annos_or_ndjson_path, batch_size=None): 'Uploads labels to a Labelbox project.\n\n Use this function to load predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n Use :meth:`export_to_labelbox` to export annotations in the format expected\n by this method.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n annos_or_ndjson_path: a list of annotation dicts or the path to an\n NDJSON file on disk containing annotations\n batch_size (None): an optional batch size to use when uploading the\n annotations. By default, ``annos_or_ndjson_path`` is passed\n directly to ``labelbox_project.upload_annotations()``\n ' if (batch_size is None): name = ('%s-upload-request' % labelbox_project.name) return labelbox_project.upload_annotations(name, annos_or_ndjson_path) if etau.is_str(annos_or_ndjson_path): annos = etas.read_ndjson(annos_or_ndjson_path) else: annos = annos_or_ndjson_path requests = [] count = 0 for anno_batch in fou.iter_batches(annos, batch_size): count += 1 name = ('%s-upload-request-%d' % (labelbox_project.name, count)) request = labelbox_project.upload_annotations(name, anno_batch) requests.append(request) return requests
Uploads labels to a Labelbox project. Use this function to load predictions into Labelbox for `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_. Use :meth:`export_to_labelbox` to export annotations in the format expected by this method. Args: labelbox_project: a ``labelbox.schema.project.Project`` annos_or_ndjson_path: a list of annotation dicts or the path to an NDJSON file on disk containing annotations batch_size (None): an optional batch size to use when uploading the annotations. By default, ``annos_or_ndjson_path`` is passed directly to ``labelbox_project.upload_annotations()``
fiftyone/utils/labelbox.py
upload_labels_to_labelbox
stbdang/fiftyone
3
python
def upload_labels_to_labelbox(labelbox_project, annos_or_ndjson_path, batch_size=None): 'Uploads labels to a Labelbox project.\n\n Use this function to load predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n Use :meth:`export_to_labelbox` to export annotations in the format expected\n by this method.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n annos_or_ndjson_path: a list of annotation dicts or the path to an\n NDJSON file on disk containing annotations\n batch_size (None): an optional batch size to use when uploading the\n annotations. By default, ``annos_or_ndjson_path`` is passed\n directly to ``labelbox_project.upload_annotations()``\n ' if (batch_size is None): name = ('%s-upload-request' % labelbox_project.name) return labelbox_project.upload_annotations(name, annos_or_ndjson_path) if etau.is_str(annos_or_ndjson_path): annos = etas.read_ndjson(annos_or_ndjson_path) else: annos = annos_or_ndjson_path requests = [] count = 0 for anno_batch in fou.iter_batches(annos, batch_size): count += 1 name = ('%s-upload-request-%d' % (labelbox_project.name, count)) request = labelbox_project.upload_annotations(name, anno_batch) requests.append(request) return requests
def upload_labels_to_labelbox(labelbox_project, annos_or_ndjson_path, batch_size=None): 'Uploads labels to a Labelbox project.\n\n Use this function to load predictions into Labelbox for\n `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_.\n\n Use :meth:`export_to_labelbox` to export annotations in the format expected\n by this method.\n\n Args:\n labelbox_project: a ``labelbox.schema.project.Project``\n annos_or_ndjson_path: a list of annotation dicts or the path to an\n NDJSON file on disk containing annotations\n batch_size (None): an optional batch size to use when uploading the\n annotations. By default, ``annos_or_ndjson_path`` is passed\n directly to ``labelbox_project.upload_annotations()``\n ' if (batch_size is None): name = ('%s-upload-request' % labelbox_project.name) return labelbox_project.upload_annotations(name, annos_or_ndjson_path) if etau.is_str(annos_or_ndjson_path): annos = etas.read_ndjson(annos_or_ndjson_path) else: annos = annos_or_ndjson_path requests = [] count = 0 for anno_batch in fou.iter_batches(annos, batch_size): count += 1 name = ('%s-upload-request-%d' % (labelbox_project.name, count)) request = labelbox_project.upload_annotations(name, anno_batch) requests.append(request) return requests<|docstring|>Uploads labels to a Labelbox project. Use this function to load predictions into Labelbox for `model-assisted labeling <https://labelbox.com/docs/automation/model-assisted-labeling>`_. Use :meth:`export_to_labelbox` to export annotations in the format expected by this method. Args: labelbox_project: a ``labelbox.schema.project.Project`` annos_or_ndjson_path: a list of annotation dicts or the path to an NDJSON file on disk containing annotations batch_size (None): an optional batch size to use when uploading the annotations. By default, ``annos_or_ndjson_path`` is passed directly to ``labelbox_project.upload_annotations()``<|endoftext|>
00bd6a642e498b3ebb2b4cdd0690cf46b9d8923404574a4c0f6e503ab10bd527
def convert_labelbox_export_to_import(inpath, outpath=None, video_outdir=None): "Converts a Labelbox NDJSON export generated by\n :meth:`export_to_labelbox` into the format expected by\n :meth:`import_from_labelbox`.\n\n The output JSON file will have the same format that is generated when\n `exporting a Labelbox project's labels <https://labelbox.com/docs/exporting-data/export-overview>`_.\n\n The ``Labeled Data`` fields of the output labels will be ``None``.\n\n Args:\n inpath: the path to an NDJSON file generated (for example) by\n :meth:`export_to_labelbox`\n outpath (None): the path to write a JSON file containing the converted\n labels. If omitted, the input file will be overwritten\n video_outdir (None): a directory to write the converted video frame\n labels (if applicable). If omitted, the input frame label files\n will be overwritten\n " if (outpath is None): outpath = inpath din_list = etas.read_ndjson(inpath) dout_map = {} for din in din_list: uuid = din.pop('dataRow')['id'] din.pop('uuid') if ('frames' in din): frames_inpath = din['frames'] if (video_outdir is not None): frames_outpath = os.path.join(video_outdir, os.path.basename(frames_inpath)) else: frames_outpath = frames_inpath _convert_labelbox_frames_export_to_import(frames_inpath, frames_outpath) dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'frames': frames_outpath}} continue if (uuid not in dout_map): dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'objects': [], 'classifications': []}} _ingest_label(din, dout_map[uuid]['Label']) dout = list(dout_map.values()) etas.write_json(dout, outpath)
Converts a Labelbox NDJSON export generated by :meth:`export_to_labelbox` into the format expected by :meth:`import_from_labelbox`. The output JSON file will have the same format that is generated when `exporting a Labelbox project's labels <https://labelbox.com/docs/exporting-data/export-overview>`_. The ``Labeled Data`` fields of the output labels will be ``None``. Args: inpath: the path to an NDJSON file generated (for example) by :meth:`export_to_labelbox` outpath (None): the path to write a JSON file containing the converted labels. If omitted, the input file will be overwritten video_outdir (None): a directory to write the converted video frame labels (if applicable). If omitted, the input frame label files will be overwritten
fiftyone/utils/labelbox.py
convert_labelbox_export_to_import
stbdang/fiftyone
3
python
def convert_labelbox_export_to_import(inpath, outpath=None, video_outdir=None): "Converts a Labelbox NDJSON export generated by\n :meth:`export_to_labelbox` into the format expected by\n :meth:`import_from_labelbox`.\n\n The output JSON file will have the same format that is generated when\n `exporting a Labelbox project's labels <https://labelbox.com/docs/exporting-data/export-overview>`_.\n\n The ``Labeled Data`` fields of the output labels will be ``None``.\n\n Args:\n inpath: the path to an NDJSON file generated (for example) by\n :meth:`export_to_labelbox`\n outpath (None): the path to write a JSON file containing the converted\n labels. If omitted, the input file will be overwritten\n video_outdir (None): a directory to write the converted video frame\n labels (if applicable). If omitted, the input frame label files\n will be overwritten\n " if (outpath is None): outpath = inpath din_list = etas.read_ndjson(inpath) dout_map = {} for din in din_list: uuid = din.pop('dataRow')['id'] din.pop('uuid') if ('frames' in din): frames_inpath = din['frames'] if (video_outdir is not None): frames_outpath = os.path.join(video_outdir, os.path.basename(frames_inpath)) else: frames_outpath = frames_inpath _convert_labelbox_frames_export_to_import(frames_inpath, frames_outpath) dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'frames': frames_outpath}} continue if (uuid not in dout_map): dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'objects': [], 'classifications': []}} _ingest_label(din, dout_map[uuid]['Label']) dout = list(dout_map.values()) etas.write_json(dout, outpath)
def convert_labelbox_export_to_import(inpath, outpath=None, video_outdir=None): "Converts a Labelbox NDJSON export generated by\n :meth:`export_to_labelbox` into the format expected by\n :meth:`import_from_labelbox`.\n\n The output JSON file will have the same format that is generated when\n `exporting a Labelbox project's labels <https://labelbox.com/docs/exporting-data/export-overview>`_.\n\n The ``Labeled Data`` fields of the output labels will be ``None``.\n\n Args:\n inpath: the path to an NDJSON file generated (for example) by\n :meth:`export_to_labelbox`\n outpath (None): the path to write a JSON file containing the converted\n labels. If omitted, the input file will be overwritten\n video_outdir (None): a directory to write the converted video frame\n labels (if applicable). If omitted, the input frame label files\n will be overwritten\n " if (outpath is None): outpath = inpath din_list = etas.read_ndjson(inpath) dout_map = {} for din in din_list: uuid = din.pop('dataRow')['id'] din.pop('uuid') if ('frames' in din): frames_inpath = din['frames'] if (video_outdir is not None): frames_outpath = os.path.join(video_outdir, os.path.basename(frames_inpath)) else: frames_outpath = frames_inpath _convert_labelbox_frames_export_to_import(frames_inpath, frames_outpath) dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'frames': frames_outpath}} continue if (uuid not in dout_map): dout_map[uuid] = {'DataRow ID': uuid, 'Labeled Data': None, 'Label': {'objects': [], 'classifications': []}} _ingest_label(din, dout_map[uuid]['Label']) dout = list(dout_map.values()) etas.write_json(dout, outpath)<|docstring|>Converts a Labelbox NDJSON export generated by :meth:`export_to_labelbox` into the format expected by :meth:`import_from_labelbox`. The output JSON file will have the same format that is generated when `exporting a Labelbox project's labels <https://labelbox.com/docs/exporting-data/export-overview>`_. The ``Labeled Data`` fields of the output labels will be ``None``. Args: inpath: the path to an NDJSON file generated (for example) by :meth:`export_to_labelbox` outpath (None): the path to write a JSON file containing the converted labels. If omitted, the input file will be overwritten video_outdir (None): a directory to write the converted video frame labels (if applicable). If omitted, the input frame label files will be overwritten<|endoftext|>
4636470cac5adc8a3bde3f9221721784cf33ce5773ec8690540f81a3bd9526db
def get_project_users(self, project=None, project_id=None): 'Returns a list of users that are assigned to the given project.\n\n Provide either ``project`` or ``project_id`` to this method.\n\n Args:\n project: a ``labelbox.schema.project.Project``\n project_id: the project ID\n\n Returns:\n a list of ``labelbox.schema.user.User`` objects\n ' if (project is None): if (project_id is None): raise ValueError('Either `project` or `project_id` must be provided') project = self.get_project(project_id) project_users = [] project_id = project.uid users = list(project.organization().users()) for user in users: if (project in user.projects()): project_users.append(user) return users
Returns a list of users that are assigned to the given project. Provide either ``project`` or ``project_id`` to this method. Args: project: a ``labelbox.schema.project.Project`` project_id: the project ID Returns: a list of ``labelbox.schema.user.User`` objects
fiftyone/utils/labelbox.py
get_project_users
stbdang/fiftyone
3
python
def get_project_users(self, project=None, project_id=None): 'Returns a list of users that are assigned to the given project.\n\n Provide either ``project`` or ``project_id`` to this method.\n\n Args:\n project: a ``labelbox.schema.project.Project``\n project_id: the project ID\n\n Returns:\n a list of ``labelbox.schema.user.User`` objects\n ' if (project is None): if (project_id is None): raise ValueError('Either `project` or `project_id` must be provided') project = self.get_project(project_id) project_users = [] project_id = project.uid users = list(project.organization().users()) for user in users: if (project in user.projects()): project_users.append(user) return users
def get_project_users(self, project=None, project_id=None): 'Returns a list of users that are assigned to the given project.\n\n Provide either ``project`` or ``project_id`` to this method.\n\n Args:\n project: a ``labelbox.schema.project.Project``\n project_id: the project ID\n\n Returns:\n a list of ``labelbox.schema.user.User`` objects\n ' if (project is None): if (project_id is None): raise ValueError('Either `project` or `project_id` must be provided') project = self.get_project(project_id) project_users = [] project_id = project.uid users = list(project.organization().users()) for user in users: if (project in user.projects()): project_users.append(user) return users<|docstring|>Returns a list of users that are assigned to the given project. Provide either ``project`` or ``project_id`` to this method. Args: project: a ``labelbox.schema.project.Project`` project_id: the project ID Returns: a list of ``labelbox.schema.user.User`` objects<|endoftext|>
7c269688bf6ebea2dc1a5b8d4fc725428104825bb35cc7d5d57fbd5cb7a13e05
def add_member(self, project, email, role): 'Adds a member to the given Labelbox project with the given\n project-level role.\n\n If the user is not a member of the project\'s parent organization, an\n email invitivation will be sent.\n\n Args:\n project: the ``labelbox.schema.project.Project``\n email: the email of the user\n role: the role for the user. Supported values are\n ``["LABELER", "REVIEWER", "TEAM_MANAGER", "ADMIN"]``\n ' if (not self._experimental): raise ValueError('This method can only be used if the `LabelboxAnnotationAPI` object was initialized with `_experimental=True`') if ((role not in self.roles) or (role == 'NONE')): raise ValueError(("Unsupported user role '%s'" % role)) role_id = self.roles[role] organization = self._client.get_organization() existing_users = {u.email: u for u in organization.users()} if (email in existing_users): user = existing_users[email] user.upsert_project_role(project, role_id) return limit = organization.invite_limit() if (limit.remaining == 0): logger.warning("Your organization has reached its limit of %d members. Cannot invite new member %s to project '%s'", limit.limit, email, project.name) return project_role = lbs.organization.ProjectRole(project=project, role=role_id) organization.invite_user(email, self.roles['NONE'], project_roles=[project_role])
Adds a member to the given Labelbox project with the given project-level role. If the user is not a member of the project's parent organization, an email invitivation will be sent. Args: project: the ``labelbox.schema.project.Project`` email: the email of the user role: the role for the user. Supported values are ``["LABELER", "REVIEWER", "TEAM_MANAGER", "ADMIN"]``
fiftyone/utils/labelbox.py
add_member
stbdang/fiftyone
3
python
def add_member(self, project, email, role): 'Adds a member to the given Labelbox project with the given\n project-level role.\n\n If the user is not a member of the project\'s parent organization, an\n email invitivation will be sent.\n\n Args:\n project: the ``labelbox.schema.project.Project``\n email: the email of the user\n role: the role for the user. Supported values are\n ``["LABELER", "REVIEWER", "TEAM_MANAGER", "ADMIN"]``\n ' if (not self._experimental): raise ValueError('This method can only be used if the `LabelboxAnnotationAPI` object was initialized with `_experimental=True`') if ((role not in self.roles) or (role == 'NONE')): raise ValueError(("Unsupported user role '%s'" % role)) role_id = self.roles[role] organization = self._client.get_organization() existing_users = {u.email: u for u in organization.users()} if (email in existing_users): user = existing_users[email] user.upsert_project_role(project, role_id) return limit = organization.invite_limit() if (limit.remaining == 0): logger.warning("Your organization has reached its limit of %d members. Cannot invite new member %s to project '%s'", limit.limit, email, project.name) return project_role = lbs.organization.ProjectRole(project=project, role=role_id) organization.invite_user(email, self.roles['NONE'], project_roles=[project_role])
def add_member(self, project, email, role): 'Adds a member to the given Labelbox project with the given\n project-level role.\n\n If the user is not a member of the project\'s parent organization, an\n email invitivation will be sent.\n\n Args:\n project: the ``labelbox.schema.project.Project``\n email: the email of the user\n role: the role for the user. Supported values are\n ``["LABELER", "REVIEWER", "TEAM_MANAGER", "ADMIN"]``\n ' if (not self._experimental): raise ValueError('This method can only be used if the `LabelboxAnnotationAPI` object was initialized with `_experimental=True`') if ((role not in self.roles) or (role == 'NONE')): raise ValueError(("Unsupported user role '%s'" % role)) role_id = self.roles[role] organization = self._client.get_organization() existing_users = {u.email: u for u in organization.users()} if (email in existing_users): user = existing_users[email] user.upsert_project_role(project, role_id) return limit = organization.invite_limit() if (limit.remaining == 0): logger.warning("Your organization has reached its limit of %d members. Cannot invite new member %s to project '%s'", limit.limit, email, project.name) return project_role = lbs.organization.ProjectRole(project=project, role=role_id) organization.invite_user(email, self.roles['NONE'], project_roles=[project_role])<|docstring|>Adds a member to the given Labelbox project with the given project-level role. If the user is not a member of the project's parent organization, an email invitivation will be sent. Args: project: the ``labelbox.schema.project.Project`` email: the email of the user role: the role for the user. Supported values are ``["LABELER", "REVIEWER", "TEAM_MANAGER", "ADMIN"]``<|endoftext|>
0f0cf9af403c84654ba53696ce4fdd5bd58f8439eb7665620e335c0eb0c72e65
def list_datasets(self): 'Retrieves the list of datasets in your Labelbox account.\n\n Returns:\n a list of dataset IDs\n ' datasets = self._client.get_datasets() return [d.uid for d in datasets]
Retrieves the list of datasets in your Labelbox account. Returns: a list of dataset IDs
fiftyone/utils/labelbox.py
list_datasets
stbdang/fiftyone
3
python
def list_datasets(self): 'Retrieves the list of datasets in your Labelbox account.\n\n Returns:\n a list of dataset IDs\n ' datasets = self._client.get_datasets() return [d.uid for d in datasets]
def list_datasets(self): 'Retrieves the list of datasets in your Labelbox account.\n\n Returns:\n a list of dataset IDs\n ' datasets = self._client.get_datasets() return [d.uid for d in datasets]<|docstring|>Retrieves the list of datasets in your Labelbox account. Returns: a list of dataset IDs<|endoftext|>
e7419ae4e770cdfa537943b7357ec925eeafdae41240ed72e9fc5973055ceb97
def delete_datasets(self, dataset_ids): 'Deletes the given datasets from the Labelbox server.\n\n Args:\n dataset_ids: an iterable of dataset IDs\n ' logger.info('Deleting datasets...') with fou.ProgressBar() as pb: for dataset_id in pb(list(dataset_ids)): dataset = self._client.get_dataset(dataset_id) dataset.delete()
Deletes the given datasets from the Labelbox server. Args: dataset_ids: an iterable of dataset IDs
fiftyone/utils/labelbox.py
delete_datasets
stbdang/fiftyone
3
python
def delete_datasets(self, dataset_ids): 'Deletes the given datasets from the Labelbox server.\n\n Args:\n dataset_ids: an iterable of dataset IDs\n ' logger.info('Deleting datasets...') with fou.ProgressBar() as pb: for dataset_id in pb(list(dataset_ids)): dataset = self._client.get_dataset(dataset_id) dataset.delete()
def delete_datasets(self, dataset_ids): 'Deletes the given datasets from the Labelbox server.\n\n Args:\n dataset_ids: an iterable of dataset IDs\n ' logger.info('Deleting datasets...') with fou.ProgressBar() as pb: for dataset_id in pb(list(dataset_ids)): dataset = self._client.get_dataset(dataset_id) dataset.delete()<|docstring|>Deletes the given datasets from the Labelbox server. Args: dataset_ids: an iterable of dataset IDs<|endoftext|>
066ce0531ab84fe9044f938c11ebdb5d2d19fb73fdc10675406593265002e428
def list_projects(self): 'Retrieves the list of projects in your Labelbox account.\n\n Returns:\n a list of project IDs\n ' projects = self._client.get_projects() return [p.uid for p in projects]
Retrieves the list of projects in your Labelbox account. Returns: a list of project IDs
fiftyone/utils/labelbox.py
list_projects
stbdang/fiftyone
3
python
def list_projects(self): 'Retrieves the list of projects in your Labelbox account.\n\n Returns:\n a list of project IDs\n ' projects = self._client.get_projects() return [p.uid for p in projects]
def list_projects(self): 'Retrieves the list of projects in your Labelbox account.\n\n Returns:\n a list of project IDs\n ' projects = self._client.get_projects() return [p.uid for p in projects]<|docstring|>Retrieves the list of projects in your Labelbox account. Returns: a list of project IDs<|endoftext|>
ad601c3eeeb8bf5fe9d632210e1584d3e0abf1dd48dd110fa504bd72786903a4
def get_project(self, project_id): 'Retrieves the ``labelbox.schema.project.Project`` for the project\n with the given ID.\n\n Args:\n project_id: the project ID\n\n Returns:\n a ``labelbox.schema.project.Project``\n ' return self._client.get_project(project_id)
Retrieves the ``labelbox.schema.project.Project`` for the project with the given ID. Args: project_id: the project ID Returns: a ``labelbox.schema.project.Project``
fiftyone/utils/labelbox.py
get_project
stbdang/fiftyone
3
python
def get_project(self, project_id): 'Retrieves the ``labelbox.schema.project.Project`` for the project\n with the given ID.\n\n Args:\n project_id: the project ID\n\n Returns:\n a ``labelbox.schema.project.Project``\n ' return self._client.get_project(project_id)
def get_project(self, project_id): 'Retrieves the ``labelbox.schema.project.Project`` for the project\n with the given ID.\n\n Args:\n project_id: the project ID\n\n Returns:\n a ``labelbox.schema.project.Project``\n ' return self._client.get_project(project_id)<|docstring|>Retrieves the ``labelbox.schema.project.Project`` for the project with the given ID. Args: project_id: the project ID Returns: a ``labelbox.schema.project.Project``<|endoftext|>
0550de4197718535ea30774a6a8f4aa92120ae2e685c21399ab523361d8f2494
def delete_project(self, project_id, delete_datasets=True): 'Deletes the given project from the Labelbox server.\n\n Args:\n project_id: the project ID\n delete_datasets: whether to delete the attached datasets as well\n ' project = self._client.get_project(project_id) logger.info("Deleting project '%s'...", project_id) if delete_datasets: for dataset in project.datasets(): dataset.delete() project.delete()
Deletes the given project from the Labelbox server. Args: project_id: the project ID delete_datasets: whether to delete the attached datasets as well
fiftyone/utils/labelbox.py
delete_project
stbdang/fiftyone
3
python
def delete_project(self, project_id, delete_datasets=True): 'Deletes the given project from the Labelbox server.\n\n Args:\n project_id: the project ID\n delete_datasets: whether to delete the attached datasets as well\n ' project = self._client.get_project(project_id) logger.info("Deleting project '%s'...", project_id) if delete_datasets: for dataset in project.datasets(): dataset.delete() project.delete()
def delete_project(self, project_id, delete_datasets=True): 'Deletes the given project from the Labelbox server.\n\n Args:\n project_id: the project ID\n delete_datasets: whether to delete the attached datasets as well\n ' project = self._client.get_project(project_id) logger.info("Deleting project '%s'...", project_id) if delete_datasets: for dataset in project.datasets(): dataset.delete() project.delete()<|docstring|>Deletes the given project from the Labelbox server. Args: project_id: the project ID delete_datasets: whether to delete the attached datasets as well<|endoftext|>
cf87e23435544d969543dd6dccba045115de79c3bdb284ae5197beccad87966e
def delete_projects(self, project_ids, delete_datasets=True): 'Deletes the given projects from the Labelbox server.\n\n Args:\n project_ids: an iterable of project IDs\n delete_datasets: whether to delete the attached datasets as well\n ' for project_id in project_ids: self.delete_project(project_id, delete_datasets=delete_datasets)
Deletes the given projects from the Labelbox server. Args: project_ids: an iterable of project IDs delete_datasets: whether to delete the attached datasets as well
fiftyone/utils/labelbox.py
delete_projects
stbdang/fiftyone
3
python
def delete_projects(self, project_ids, delete_datasets=True): 'Deletes the given projects from the Labelbox server.\n\n Args:\n project_ids: an iterable of project IDs\n delete_datasets: whether to delete the attached datasets as well\n ' for project_id in project_ids: self.delete_project(project_id, delete_datasets=delete_datasets)
def delete_projects(self, project_ids, delete_datasets=True): 'Deletes the given projects from the Labelbox server.\n\n Args:\n project_ids: an iterable of project IDs\n delete_datasets: whether to delete the attached datasets as well\n ' for project_id in project_ids: self.delete_project(project_id, delete_datasets=delete_datasets)<|docstring|>Deletes the given projects from the Labelbox server. Args: project_ids: an iterable of project IDs delete_datasets: whether to delete the attached datasets as well<|endoftext|>
435ea04fc34bb036ac7bca6c8993644e9557d64e434e0ef942ac5f1c9a9ac0f3
def launch_editor(self, url=None): 'Launches the Labelbox editor in your default web browser.\n\n Args:\n url (None): an optional URL to open. By default, the base URL of\n the server is opened\n ' if (url is None): url = self.projects_url webbrowser.open(url, new=2)
Launches the Labelbox editor in your default web browser. Args: url (None): an optional URL to open. By default, the base URL of the server is opened
fiftyone/utils/labelbox.py
launch_editor
stbdang/fiftyone
3
python
def launch_editor(self, url=None): 'Launches the Labelbox editor in your default web browser.\n\n Args:\n url (None): an optional URL to open. By default, the base URL of\n the server is opened\n ' if (url is None): url = self.projects_url webbrowser.open(url, new=2)
def launch_editor(self, url=None): 'Launches the Labelbox editor in your default web browser.\n\n Args:\n url (None): an optional URL to open. By default, the base URL of\n the server is opened\n ' if (url is None): url = self.projects_url webbrowser.open(url, new=2)<|docstring|>Launches the Labelbox editor in your default web browser. Args: url (None): an optional URL to open. By default, the base URL of the server is opened<|endoftext|>
a3a10caf0d43ffcd50e2a0aa3e84e19c2007efbf86ac4c5ed6c31b2e78aa6f41
def upload_data(self, samples, lb_dataset, media_field='filepath'): 'Uploads the media for the given samples to Labelbox.\n\n This method uses ``labelbox.schema.dataset.Dataset.create_data_rows()``\n to add data in batches, and sets the external ID of each DataRow to the\n ID of the corresponding sample.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection`\n containing the media to upload\n lb_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n media_field ("filepath"): string field name containing the paths to\n media files on disk to upload\n ' (media_paths, sample_ids) = samples.values([media_field, 'id']) upload_info = [] for (media_path, sample_id) in zip(media_paths, sample_ids): item_url = self._client.upload_file(media_path) upload_info.append({lb.DataRow.row_data: item_url, lb.DataRow.external_id: sample_id}) task = lb_dataset.create_data_rows(upload_info) task.wait_till_done()
Uploads the media for the given samples to Labelbox. This method uses ``labelbox.schema.dataset.Dataset.create_data_rows()`` to add data in batches, and sets the external ID of each DataRow to the ID of the corresponding sample. Args: samples: a :class:`fiftyone.core.collections.SampleCollection` containing the media to upload lb_dataset: a ``labelbox.schema.dataset.Dataset`` to which to add the media media_field ("filepath"): string field name containing the paths to media files on disk to upload
fiftyone/utils/labelbox.py
upload_data
stbdang/fiftyone
3
python
def upload_data(self, samples, lb_dataset, media_field='filepath'): 'Uploads the media for the given samples to Labelbox.\n\n This method uses ``labelbox.schema.dataset.Dataset.create_data_rows()``\n to add data in batches, and sets the external ID of each DataRow to the\n ID of the corresponding sample.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection`\n containing the media to upload\n lb_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n media_field ("filepath"): string field name containing the paths to\n media files on disk to upload\n ' (media_paths, sample_ids) = samples.values([media_field, 'id']) upload_info = [] for (media_path, sample_id) in zip(media_paths, sample_ids): item_url = self._client.upload_file(media_path) upload_info.append({lb.DataRow.row_data: item_url, lb.DataRow.external_id: sample_id}) task = lb_dataset.create_data_rows(upload_info) task.wait_till_done()
def upload_data(self, samples, lb_dataset, media_field='filepath'): 'Uploads the media for the given samples to Labelbox.\n\n This method uses ``labelbox.schema.dataset.Dataset.create_data_rows()``\n to add data in batches, and sets the external ID of each DataRow to the\n ID of the corresponding sample.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection`\n containing the media to upload\n lb_dataset: a ``labelbox.schema.dataset.Dataset`` to which to\n add the media\n media_field ("filepath"): string field name containing the paths to\n media files on disk to upload\n ' (media_paths, sample_ids) = samples.values([media_field, 'id']) upload_info = [] for (media_path, sample_id) in zip(media_paths, sample_ids): item_url = self._client.upload_file(media_path) upload_info.append({lb.DataRow.row_data: item_url, lb.DataRow.external_id: sample_id}) task = lb_dataset.create_data_rows(upload_info) task.wait_till_done()<|docstring|>Uploads the media for the given samples to Labelbox. This method uses ``labelbox.schema.dataset.Dataset.create_data_rows()`` to add data in batches, and sets the external ID of each DataRow to the ID of the corresponding sample. Args: samples: a :class:`fiftyone.core.collections.SampleCollection` containing the media to upload lb_dataset: a ``labelbox.schema.dataset.Dataset`` to which to add the media media_field ("filepath"): string field name containing the paths to media files on disk to upload<|endoftext|>
5a6e06f59b9d222213a19c660895245ce6ee6aed02bed2461985e5c91132e023
def upload_samples(self, samples, backend): "Uploads the given samples to Labelbox according to the given\n backend's annotation and server configuration.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection` to\n upload to CVAT\n backend: a :class:`LabelboxBackend` to use to perform the upload\n\n Returns:\n a :class:`LabelboxAnnotationResults`\n " config = backend.config label_schema = config.label_schema media_field = config.media_field project_name = config.project_name members = config.members classes_as_attrs = config.classes_as_attrs for (label_field, label_info) in label_schema.items(): if label_info['existing_field']: raise ValueError(("Cannot use existing field '%s'; the Labelbox backend does not yet support uploading existing labels" % label_field)) if (project_name is None): _dataset_name = samples._root_dataset.name.replace(' ', '_') project_name = ('FiftyOne_%s' % _dataset_name) dataset = self._client.create_dataset(name=project_name) self.upload_data(samples, dataset, media_field=media_field) project = self._setup_project(project_name, dataset, label_schema, classes_as_attrs) if members: for (email, role) in members: self.add_member(project, email, role) project_id = project.uid id_map = {} frame_id_map = self._build_frame_id_map(samples) return LabelboxAnnotationResults(samples, config, id_map, project_id, frame_id_map, backend=backend)
Uploads the given samples to Labelbox according to the given backend's annotation and server configuration. Args: samples: a :class:`fiftyone.core.collections.SampleCollection` to upload to CVAT backend: a :class:`LabelboxBackend` to use to perform the upload Returns: a :class:`LabelboxAnnotationResults`
fiftyone/utils/labelbox.py
upload_samples
stbdang/fiftyone
3
python
def upload_samples(self, samples, backend): "Uploads the given samples to Labelbox according to the given\n backend's annotation and server configuration.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection` to\n upload to CVAT\n backend: a :class:`LabelboxBackend` to use to perform the upload\n\n Returns:\n a :class:`LabelboxAnnotationResults`\n " config = backend.config label_schema = config.label_schema media_field = config.media_field project_name = config.project_name members = config.members classes_as_attrs = config.classes_as_attrs for (label_field, label_info) in label_schema.items(): if label_info['existing_field']: raise ValueError(("Cannot use existing field '%s'; the Labelbox backend does not yet support uploading existing labels" % label_field)) if (project_name is None): _dataset_name = samples._root_dataset.name.replace(' ', '_') project_name = ('FiftyOne_%s' % _dataset_name) dataset = self._client.create_dataset(name=project_name) self.upload_data(samples, dataset, media_field=media_field) project = self._setup_project(project_name, dataset, label_schema, classes_as_attrs) if members: for (email, role) in members: self.add_member(project, email, role) project_id = project.uid id_map = {} frame_id_map = self._build_frame_id_map(samples) return LabelboxAnnotationResults(samples, config, id_map, project_id, frame_id_map, backend=backend)
def upload_samples(self, samples, backend): "Uploads the given samples to Labelbox according to the given\n backend's annotation and server configuration.\n\n Args:\n samples: a :class:`fiftyone.core.collections.SampleCollection` to\n upload to CVAT\n backend: a :class:`LabelboxBackend` to use to perform the upload\n\n Returns:\n a :class:`LabelboxAnnotationResults`\n " config = backend.config label_schema = config.label_schema media_field = config.media_field project_name = config.project_name members = config.members classes_as_attrs = config.classes_as_attrs for (label_field, label_info) in label_schema.items(): if label_info['existing_field']: raise ValueError(("Cannot use existing field '%s'; the Labelbox backend does not yet support uploading existing labels" % label_field)) if (project_name is None): _dataset_name = samples._root_dataset.name.replace(' ', '_') project_name = ('FiftyOne_%s' % _dataset_name) dataset = self._client.create_dataset(name=project_name) self.upload_data(samples, dataset, media_field=media_field) project = self._setup_project(project_name, dataset, label_schema, classes_as_attrs) if members: for (email, role) in members: self.add_member(project, email, role) project_id = project.uid id_map = {} frame_id_map = self._build_frame_id_map(samples) return LabelboxAnnotationResults(samples, config, id_map, project_id, frame_id_map, backend=backend)<|docstring|>Uploads the given samples to Labelbox according to the given backend's annotation and server configuration. Args: samples: a :class:`fiftyone.core.collections.SampleCollection` to upload to CVAT backend: a :class:`LabelboxBackend` to use to perform the upload Returns: a :class:`LabelboxAnnotationResults`<|endoftext|>
17ecd7463119fe09114b13e3d9f854b0dadd25aeb125abe96f4da39f1c955539
def download_annotations(self, results): 'Download the annotations from the Labelbox server for the given\n results instance and parses them into the appropriate FiftyOne types.\n\n Args:\n results: a :class:`LabelboxAnnotationResults`\n\n Returns:\n the annotations dict\n ' project_id = results.project_id frame_id_map = results.frame_id_map classes_as_attrs = results.config.classes_as_attrs label_schema = results.config.label_schema project = self._client.get_project(project_id) labels_json = self._download_project_labels(project=project) is_video = (results._samples.media_type == fomm.VIDEO) annotations = {} if classes_as_attrs: class_attr = 'class_name' else: class_attr = False for d in labels_json: labelbox_id = d['DataRow ID'] sample_id = d['External ID'] if (sample_id is None): logger.warning("Skipping DataRow '%s' with no sample ID", labelbox_id) continue metadata = self._get_sample_metadata(project, sample_id) if (metadata is None): logger.warning("Skipping sample '%s' with no metadata", sample_id) continue frame_size = (metadata['width'], metadata['height']) if is_video: video_d_list = self._get_video_labels(d['Label']) frames = {} for label_d in video_d_list: frame_number = label_d['frameNumber'] frame_id = frame_id_map[sample_id][frame_number] labels_dict = _parse_image_labels(label_d, frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) frames[frame_id] = labels_dict self._add_video_labels_to_results(annotations, frames, sample_id, label_schema) else: labels_dict = _parse_image_labels(d['Label'], frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) annotations = self._add_labels_to_results(annotations, labels_dict, sample_id, label_schema) return annotations
Download the annotations from the Labelbox server for the given results instance and parses them into the appropriate FiftyOne types. Args: results: a :class:`LabelboxAnnotationResults` Returns: the annotations dict
fiftyone/utils/labelbox.py
download_annotations
stbdang/fiftyone
3
python
def download_annotations(self, results): 'Download the annotations from the Labelbox server for the given\n results instance and parses them into the appropriate FiftyOne types.\n\n Args:\n results: a :class:`LabelboxAnnotationResults`\n\n Returns:\n the annotations dict\n ' project_id = results.project_id frame_id_map = results.frame_id_map classes_as_attrs = results.config.classes_as_attrs label_schema = results.config.label_schema project = self._client.get_project(project_id) labels_json = self._download_project_labels(project=project) is_video = (results._samples.media_type == fomm.VIDEO) annotations = {} if classes_as_attrs: class_attr = 'class_name' else: class_attr = False for d in labels_json: labelbox_id = d['DataRow ID'] sample_id = d['External ID'] if (sample_id is None): logger.warning("Skipping DataRow '%s' with no sample ID", labelbox_id) continue metadata = self._get_sample_metadata(project, sample_id) if (metadata is None): logger.warning("Skipping sample '%s' with no metadata", sample_id) continue frame_size = (metadata['width'], metadata['height']) if is_video: video_d_list = self._get_video_labels(d['Label']) frames = {} for label_d in video_d_list: frame_number = label_d['frameNumber'] frame_id = frame_id_map[sample_id][frame_number] labels_dict = _parse_image_labels(label_d, frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) frames[frame_id] = labels_dict self._add_video_labels_to_results(annotations, frames, sample_id, label_schema) else: labels_dict = _parse_image_labels(d['Label'], frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) annotations = self._add_labels_to_results(annotations, labels_dict, sample_id, label_schema) return annotations
def download_annotations(self, results): 'Download the annotations from the Labelbox server for the given\n results instance and parses them into the appropriate FiftyOne types.\n\n Args:\n results: a :class:`LabelboxAnnotationResults`\n\n Returns:\n the annotations dict\n ' project_id = results.project_id frame_id_map = results.frame_id_map classes_as_attrs = results.config.classes_as_attrs label_schema = results.config.label_schema project = self._client.get_project(project_id) labels_json = self._download_project_labels(project=project) is_video = (results._samples.media_type == fomm.VIDEO) annotations = {} if classes_as_attrs: class_attr = 'class_name' else: class_attr = False for d in labels_json: labelbox_id = d['DataRow ID'] sample_id = d['External ID'] if (sample_id is None): logger.warning("Skipping DataRow '%s' with no sample ID", labelbox_id) continue metadata = self._get_sample_metadata(project, sample_id) if (metadata is None): logger.warning("Skipping sample '%s' with no metadata", sample_id) continue frame_size = (metadata['width'], metadata['height']) if is_video: video_d_list = self._get_video_labels(d['Label']) frames = {} for label_d in video_d_list: frame_number = label_d['frameNumber'] frame_id = frame_id_map[sample_id][frame_number] labels_dict = _parse_image_labels(label_d, frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) frames[frame_id] = labels_dict self._add_video_labels_to_results(annotations, frames, sample_id, label_schema) else: labels_dict = _parse_image_labels(d['Label'], frame_size, class_attr=class_attr) if (not classes_as_attrs): labels_dict = self._process_label_fields(label_schema, labels_dict) annotations = self._add_labels_to_results(annotations, labels_dict, sample_id, label_schema) return annotations<|docstring|>Download the annotations from the Labelbox server for the given results instance and parses them into the appropriate FiftyOne types. Args: results: a :class:`LabelboxAnnotationResults` Returns: the annotations dict<|endoftext|>
635cb88087cfcd065333063ca2257be74d50c6e3802a0195ca4f2e562cbdac0f
def _build_classifications(self, classes, name, general_attrs, label_type, label_field): 'Returns the classifications for the given label field. Generally,\n the classification is a dropdown selection for given classes, but can\n be a text entry for scalars without provided classes.\n\n Attributes are available for Classification and Classifications types\n in nested dropdowns.\n ' classifications = [] options = [] for c in classes: if isinstance(c, dict): sub_classes = c['classes'] attrs = (self._build_attributes(c['attributes']) + general_attrs) else: sub_classes = [c] attrs = general_attrs if (label_type == 'scalar'): attrs = [] for sc in sub_classes: if (label_type == 'scalar'): sub_attrs = attrs else: prefix = ('field:%s_class:%s_attr:' % (label_field, str(sc))) sub_attrs = deepcopy(attrs) for attr in sub_attrs: attr.instructions = (prefix + attr.instructions) options.append(lbo.Option(value=str(sc), options=sub_attrs)) if ((label_type == 'scalar') and (not classes)): classification = lbo.Classification(class_type=lbo.Classification.Type.TEXT, instructions=name) classifications.append(classification) elif (label_type == 'classifications'): classification = lbo.Classification(class_type=lbo.Classification.Type.CHECKLIST, instructions=name, options=options) classifications.append(classification) else: classification = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions=name, options=options) classifications.append(classification) return classifications
Returns the classifications for the given label field. Generally, the classification is a dropdown selection for given classes, but can be a text entry for scalars without provided classes. Attributes are available for Classification and Classifications types in nested dropdowns.
fiftyone/utils/labelbox.py
_build_classifications
stbdang/fiftyone
3
python
def _build_classifications(self, classes, name, general_attrs, label_type, label_field): 'Returns the classifications for the given label field. Generally,\n the classification is a dropdown selection for given classes, but can\n be a text entry for scalars without provided classes.\n\n Attributes are available for Classification and Classifications types\n in nested dropdowns.\n ' classifications = [] options = [] for c in classes: if isinstance(c, dict): sub_classes = c['classes'] attrs = (self._build_attributes(c['attributes']) + general_attrs) else: sub_classes = [c] attrs = general_attrs if (label_type == 'scalar'): attrs = [] for sc in sub_classes: if (label_type == 'scalar'): sub_attrs = attrs else: prefix = ('field:%s_class:%s_attr:' % (label_field, str(sc))) sub_attrs = deepcopy(attrs) for attr in sub_attrs: attr.instructions = (prefix + attr.instructions) options.append(lbo.Option(value=str(sc), options=sub_attrs)) if ((label_type == 'scalar') and (not classes)): classification = lbo.Classification(class_type=lbo.Classification.Type.TEXT, instructions=name) classifications.append(classification) elif (label_type == 'classifications'): classification = lbo.Classification(class_type=lbo.Classification.Type.CHECKLIST, instructions=name, options=options) classifications.append(classification) else: classification = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions=name, options=options) classifications.append(classification) return classifications
def _build_classifications(self, classes, name, general_attrs, label_type, label_field): 'Returns the classifications for the given label field. Generally,\n the classification is a dropdown selection for given classes, but can\n be a text entry for scalars without provided classes.\n\n Attributes are available for Classification and Classifications types\n in nested dropdowns.\n ' classifications = [] options = [] for c in classes: if isinstance(c, dict): sub_classes = c['classes'] attrs = (self._build_attributes(c['attributes']) + general_attrs) else: sub_classes = [c] attrs = general_attrs if (label_type == 'scalar'): attrs = [] for sc in sub_classes: if (label_type == 'scalar'): sub_attrs = attrs else: prefix = ('field:%s_class:%s_attr:' % (label_field, str(sc))) sub_attrs = deepcopy(attrs) for attr in sub_attrs: attr.instructions = (prefix + attr.instructions) options.append(lbo.Option(value=str(sc), options=sub_attrs)) if ((label_type == 'scalar') and (not classes)): classification = lbo.Classification(class_type=lbo.Classification.Type.TEXT, instructions=name) classifications.append(classification) elif (label_type == 'classifications'): classification = lbo.Classification(class_type=lbo.Classification.Type.CHECKLIST, instructions=name, options=options) classifications.append(classification) else: classification = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions=name, options=options) classifications.append(classification) return classifications<|docstring|>Returns the classifications for the given label field. Generally, the classification is a dropdown selection for given classes, but can be a text entry for scalars without provided classes. Attributes are available for Classification and Classifications types in nested dropdowns.<|endoftext|>
06ecb04d4fd9451a554ea9e785a3d12e32d5f2981013106843ea492aea556e2a
def _create_classes_as_attrs(self, classes, general_attrs): 'Creates radio attributes for all classes and formats all\n class-specific attributes.\n ' options = [] for c in classes: if isinstance(c, dict): subset_attrs = self._build_attributes(c['attributes']) for sc in c['classes']: options.append(lbo.Option(value=str(sc), options=subset_attrs)) else: options.append(lbo.Option(value=str(c))) classes_attr = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions='class_name', options=options, required=True) return ([classes_attr] + general_attrs)
Creates radio attributes for all classes and formats all class-specific attributes.
fiftyone/utils/labelbox.py
_create_classes_as_attrs
stbdang/fiftyone
3
python
def _create_classes_as_attrs(self, classes, general_attrs): 'Creates radio attributes for all classes and formats all\n class-specific attributes.\n ' options = [] for c in classes: if isinstance(c, dict): subset_attrs = self._build_attributes(c['attributes']) for sc in c['classes']: options.append(lbo.Option(value=str(sc), options=subset_attrs)) else: options.append(lbo.Option(value=str(c))) classes_attr = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions='class_name', options=options, required=True) return ([classes_attr] + general_attrs)
def _create_classes_as_attrs(self, classes, general_attrs): 'Creates radio attributes for all classes and formats all\n class-specific attributes.\n ' options = [] for c in classes: if isinstance(c, dict): subset_attrs = self._build_attributes(c['attributes']) for sc in c['classes']: options.append(lbo.Option(value=str(sc), options=subset_attrs)) else: options.append(lbo.Option(value=str(c))) classes_attr = lbo.Classification(class_type=lbo.Classification.Type.RADIO, instructions='class_name', options=options, required=True) return ([classes_attr] + general_attrs)<|docstring|>Creates radio attributes for all classes and formats all class-specific attributes.<|endoftext|>
ece68499516784221bda9467abe895743b3f8648e18111e1f549635bd5c87d8e
def _add_labels_to_results(self, results, labels_dict, sample_id, label_schema): 'Adds the labels in ``labels_dict`` to ``results``.\n\n results::\n\n <label_field>: {\n <label_type>: {\n <sample_id>: {\n <label_id>:\n <fo.Label> or <label - for scalars>\n }\n }\n }\n\n labels_dict::\n\n {\n <label_field>: {\n <label_type>: [<fo.Label>, ...]\n }\n }\n ' attributes = self._gather_classification_attributes(labels_dict, label_schema) results = self._parse_expected_label_fields(results, labels_dict, sample_id, label_schema, attributes) return results
Adds the labels in ``labels_dict`` to ``results``. results:: <label_field>: { <label_type>: { <sample_id>: { <label_id>: <fo.Label> or <label - for scalars> } } } labels_dict:: { <label_field>: { <label_type>: [<fo.Label>, ...] } }
fiftyone/utils/labelbox.py
_add_labels_to_results
stbdang/fiftyone
3
python
def _add_labels_to_results(self, results, labels_dict, sample_id, label_schema): 'Adds the labels in ``labels_dict`` to ``results``.\n\n results::\n\n <label_field>: {\n <label_type>: {\n <sample_id>: {\n <label_id>:\n <fo.Label> or <label - for scalars>\n }\n }\n }\n\n labels_dict::\n\n {\n <label_field>: {\n <label_type>: [<fo.Label>, ...]\n }\n }\n ' attributes = self._gather_classification_attributes(labels_dict, label_schema) results = self._parse_expected_label_fields(results, labels_dict, sample_id, label_schema, attributes) return results
def _add_labels_to_results(self, results, labels_dict, sample_id, label_schema): 'Adds the labels in ``labels_dict`` to ``results``.\n\n results::\n\n <label_field>: {\n <label_type>: {\n <sample_id>: {\n <label_id>:\n <fo.Label> or <label - for scalars>\n }\n }\n }\n\n labels_dict::\n\n {\n <label_field>: {\n <label_type>: [<fo.Label>, ...]\n }\n }\n ' attributes = self._gather_classification_attributes(labels_dict, label_schema) results = self._parse_expected_label_fields(results, labels_dict, sample_id, label_schema, attributes) return results<|docstring|>Adds the labels in ``labels_dict`` to ``results``. results:: <label_field>: { <label_type>: { <sample_id>: { <label_id>: <fo.Label> or <label - for scalars> } } } labels_dict:: { <label_field>: { <label_type>: [<fo.Label>, ...] } }<|endoftext|>