function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _get_sz_max(self): got = self.cli('show virtual-service global') limits = got['TABLE_resource_limits']['ROW_resource_limits'] for resource in limits: name = resource['media_name'] max_val = int(resource['quota']) if 'CPU' in name: self.sz_max['cpu'] = max_val elif 'memory' in name: self.sz_max['memory'] = max_val elif 'flash' in name: self.sz_max['disk'] = max_val
Apstra/aeon-venos
[ 2, 4, 2, 1, 1463407794 ]
def error(errtype, msg, code=42): sys.stderr.write("{t.red}[ERROR] {t.yellow}{er}: {msg}" "{t.normal}\n".format(er=errtype, msg=msg, t=log.term)) sys.exit(code)
yac/rdoupdate
[ 1, 3, 1, 2, 1380286143 ]
def _get_update_files(args): if args.files and args.git: error("invalid invocation", "-g and -f are exclusive.", 19) if args.files: files = args.files else: if not args.git: args.git = '.' f = actions.get_last_commit_update(args.git) files = [os.path.join(args.git, f)] return files
yac/rdoupdate
[ 1, 3, 1, 2, 1380286143 ]
def _parse_build_filter(fargs): bf = [] if not fargs: return bf for f in fargs: try: attr, rex = f.split(':', 1) except Exception as ex: raise exception.InvalidFilter(what=f) bf.append((attr, rex)) return bf
yac/rdoupdate
[ 1, 3, 1, 2, 1380286143 ]
def do_move(args): actions.move_files(args.files, args.dir)
yac/rdoupdate
[ 1, 3, 1, 2, 1380286143 ]
def run(*cargs): parser = get_parser() args = parser.parse_args(cargs) action = args.action return action(args)
yac/rdoupdate
[ 1, 3, 1, 2, 1380286143 ]
def do_GET(self): b_obj = self.barix NOT_CONNECTED = "<b>NOT CONNECTED</b>" left_level_avg = 0 right_level_avg = 0 level_avg_window_minutes = 0 if level_history: N = len(level_history) left_level_avg = sum(L for L, _ in level_history) / N right_level_avg = sum(R for _, R in level_history) / N level_avg_window_minutes = N * _POLLING_FREQUENCY_S / 60 barix_info = { "status_time": b_obj.last_update_time_str, "status": b_obj.status, "left_level": b_obj.left_level, "right_level": b_obj.right_level, "left_level_avg": int(left_level_avg), "right_level_avg": int(right_level_avg), "level_avg_window_minutes": int(level_avg_window_minutes), "live365_connected": NOT_CONNECTED, "archiver_connected": NOT_CONNECTED, } # TODO(trow): Check IP address. if "12345" in b_obj.clients: barix_info["live365_connected"] = "connected" # TODO(trow): Check IP address. if "12346" in b_obj.clients: barix_info["archiver_connected"] = "connected" response_str = _STATUS_PAGE % barix_info self.send_response(200) self.send_header("Content-Type", "text/html") self.send_header("Content-Length", str(len(response_str))) self.end_headers() self.wfile.write(response_str)
chirpradio/chirpradio-machine
[ 12, 8, 12, 2, 1315249848 ]
def log_message(self, format, *args): pass # Throw away log messages for now.
chirpradio/chirpradio-machine
[ 12, 8, 12, 2, 1315249848 ]
def poll_barix(b_obj, log_fh): try: if not b_obj.ping(): return level_history.append( (float(b_obj.left_level), float(b_obj.right_level))) if len(level_history) > LEVEL_HISTORY_MAX_SIZE: level_history.pop(0) if log_fh: now = int(b_obj.last_update_time) ip, far_port = b_obj.clients.get("12345", ("None", 0)) log_info = "%d %04x %s\n" % (now, int(far_port), ip) log_fh.write(log_info) log_fh.flush() except Exception, err: logging.exception("Swallowed exception")
chirpradio/chirpradio-machine
[ 12, 8, 12, 2, 1315249848 ]
def __init__(self, categories): """Constructor. Args: categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. """ self._categories = categories
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): """Adds groundtruth for a single image to be used for evaluation. Args: image_id: A unique string/integer identifier for the image. groundtruth_dict: A dictionary of groundtruth numpy arrays required for evaluations. """ pass
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def add_single_detected_image_info(self, image_id, detections_dict): """Adds detections for a single image to be used for evaluation. Args: image_id: A unique string/integer identifier for the image. detections_dict: A dictionary of detection numpy arrays required for evaluation. """ pass
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def evaluate(self): """Evaluates detections and returns a dictionary of metrics.""" pass
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def clear(self): """Clears the state to prepare for a fresh evaluation.""" pass
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False, metric_prefix=None, use_weighted_mean_ap=False, evaluate_masks=False, group_of_weight=0.0): """Constructor. Args: categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. matching_iou_threshold: IOU threshold to use for matching groundtruth boxes to detection boxes. evaluate_corlocs: (optional) boolean which determines if corloc scores are to be returned or not. metric_prefix: (optional) string prefix for metric name; if None, no prefix is used. use_weighted_mean_ap: (optional) boolean which determines if the mean average precision is computed directly from the scores and tp_fp_labels of all classes. evaluate_masks: If False, evaluation will be performed based on boxes. If True, mask evaluation will be performed instead. group_of_weight: Weight of group-of boxes.If set to 0, detections of the correct class within a group-of box are ignored. If weight is > 0, then if at least one detection falls within a group-of box with matching_iou_threshold, weight group_of_weight is added to true positives. Consequently, if no detection falls within a group-of box, weight group_of_weight is added to false negatives. Raises: ValueError: If the category ids are not 1-indexed. """ super(ObjectDetectionEvaluator, self).__init__(categories) self._num_classes = max([cat['id'] for cat in categories]) if min(cat['id'] for cat in categories) < 1: raise ValueError('Classes should be 1-indexed.') self._matching_iou_threshold = matching_iou_threshold self._use_weighted_mean_ap = use_weighted_mean_ap self._label_id_offset = 1 self._evaluate_masks = evaluate_masks self._group_of_weight = group_of_weight self._evaluation = ObjectDetectionEvaluation( num_groundtruth_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset, group_of_weight=self._group_of_weight) self._image_ids = set([]) self._evaluate_corlocs = evaluate_corlocs self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def add_single_detected_image_info(self, image_id, detections_dict): """Adds detections for a single image to be used for evaluation. Args: image_id: A unique string/integer identifier for the image. detections_dict: A dictionary containing - standard_fields.DetectionResultFields.detection_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. standard_fields.DetectionResultFields.detection_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. standard_fields.DetectionResultFields.detection_classes: integer numpy array of shape [num_boxes] containing 1-indexed detection classes for the boxes. standard_fields.DetectionResultFields.detection_masks: uint8 numpy array of shape [num_boxes, height, width] containing `num_boxes` masks of values ranging between 0 and 1. Raises: ValueError: If detection masks are not in detections dictionary. """ detection_classes = ( detections_dict[standard_fields.DetectionResultFields.detection_classes] - self._label_id_offset) detection_masks = None if self._evaluate_masks: if (standard_fields.DetectionResultFields.detection_masks not in detections_dict): raise ValueError('Detection masks not in detections dictionary.') detection_masks = detections_dict[ standard_fields.DetectionResultFields.detection_masks] self._evaluation.add_single_detected_image_info( image_key=image_id, detected_boxes=detections_dict[ standard_fields.DetectionResultFields.detection_boxes], detected_scores=detections_dict[ standard_fields.DetectionResultFields.detection_scores], detected_class_labels=detection_classes, detected_masks=detection_masks)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def clear(self): """Clears the state to prepare for a fresh evaluation.""" self._evaluation = ObjectDetectionEvaluation( num_groundtruth_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset) self._image_ids.clear()
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5): super(PascalDetectionEvaluator, self).__init__( categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='PascalBoxes', use_weighted_mean_ap=False)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5): super(WeightedPascalDetectionEvaluator, self).__init__( categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPascalBoxes', use_weighted_mean_ap=True)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5): super(PascalInstanceSegmentationEvaluator, self).__init__( categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='PascalMasks', use_weighted_mean_ap=False, evaluate_masks=True)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5): super(WeightedPascalInstanceSegmentationEvaluator, self).__init__( categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPascalMasks', use_weighted_mean_ap=True, evaluate_masks=True)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False, metric_prefix='OpenImagesV2', group_of_weight=0.0): """Constructor. Args: categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. matching_iou_threshold: IOU threshold to use for matching groundtruth boxes to detection boxes. evaluate_corlocs: if True, additionally evaluates and returns CorLoc. metric_prefix: Prefix name of the metric. group_of_weight: Weight of the group-of bounding box. If set to 0 (default for Open Images V2 detection protocol), detections of the correct class within a group-of box are ignored. If weight is > 0, then if at least one detection falls within a group-of box with matching_iou_threshold, weight group_of_weight is added to true positives. Consequently, if no detection falls within a group-of box, weight group_of_weight is added to false negatives. """ super(OpenImagesDetectionEvaluator, self).__init__( categories, matching_iou_threshold, evaluate_corlocs, metric_prefix=metric_prefix, group_of_weight=group_of_weight)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False, group_of_weight=1.0): """Constructor. Args: categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. matching_iou_threshold: IOU threshold to use for matching groundtruth boxes to detection boxes. evaluate_corlocs: if True, additionally evaluates and returns CorLoc. group_of_weight: weight of a group-of box. If set to 0, detections of the correct class within a group-of box are ignored. If weight is > 0 (default for Open Images Detection Challenge 2018), then if at least one detection falls within a group-of box with matching_iou_threshold, weight group_of_weight is added to true positives. Consequently, if no detection falls within a group-of box, weight group_of_weight is added to false negatives. """ super(OpenImagesDetectionChallengeEvaluator, self).__init__( categories, matching_iou_threshold, evaluate_corlocs, metric_prefix='OpenImagesChallenge2018', group_of_weight=group_of_weight) self._evaluatable_labels = {}
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def add_single_detected_image_info(self, image_id, detections_dict): """Adds detections for a single image to be used for evaluation. Args: image_id: A unique string/integer identifier for the image. detections_dict: A dictionary containing - standard_fields.DetectionResultFields.detection_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. standard_fields.DetectionResultFields.detection_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. standard_fields.DetectionResultFields.detection_classes: integer numpy array of shape [num_boxes] containing 1-indexed detection classes for the boxes. Raises: ValueError: If detection masks are not in detections dictionary. """ if image_id not in self._image_ids: # Since for the correct work of evaluator it is assumed that groundtruth # is inserted first we make sure to break the code if is it not the case. self._image_ids.update([image_id]) self._evaluatable_labels[image_id] = np.array([]) detection_classes = ( detections_dict[standard_fields.DetectionResultFields.detection_classes] - self._label_id_offset) allowed_classes = np.where( np.isin(detection_classes, self._evaluatable_labels[image_id])) detection_classes = detection_classes[allowed_classes] detected_boxes = detections_dict[ standard_fields.DetectionResultFields.detection_boxes][allowed_classes] detected_scores = detections_dict[ standard_fields.DetectionResultFields.detection_scores][allowed_classes] self._evaluation.add_single_detected_image_info( image_key=image_id, detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detection_classes)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, use_weighted_mean_ap=False, label_id_offset=0, group_of_weight=0.0, per_image_eval_class=per_image_evaluation.PerImageEvaluation): """Constructor. Args: num_groundtruth_classes: Number of ground-truth classes. matching_iou_threshold: IOU threshold used for matching detected boxes to ground-truth boxes. nms_iou_threshold: IOU threshold used for non-maximum suppression. nms_max_output_boxes: Maximum number of boxes returned by non-maximum suppression. use_weighted_mean_ap: (optional) boolean which determines if the mean average precision is computed directly from the scores and tp_fp_labels of all classes. label_id_offset: The label id offset. group_of_weight: Weight of group-of boxes.If set to 0, detections of the correct class within a group-of box are ignored. If weight is > 0, then if at least one detection falls within a group-of box with matching_iou_threshold, weight group_of_weight is added to true positives. Consequently, if no detection falls within a group-of box, weight group_of_weight is added to false negatives. per_image_eval_class: The class that contains functions for computing per image metrics. Raises: ValueError: if num_groundtruth_classes is smaller than 1. """ if num_groundtruth_classes < 1: raise ValueError('Need at least 1 groundtruth class for evaluation.') self.per_image_eval = per_image_eval_class( num_groundtruth_classes=num_groundtruth_classes, matching_iou_threshold=matching_iou_threshold, nms_iou_threshold=nms_iou_threshold, nms_max_output_boxes=nms_max_output_boxes, group_of_weight=group_of_weight) self.group_of_weight = group_of_weight self.num_class = num_groundtruth_classes self.use_weighted_mean_ap = use_weighted_mean_ap self.label_id_offset = label_id_offset self.groundtruth_boxes = {} self.groundtruth_class_labels = {} self.groundtruth_masks = {} self.groundtruth_is_difficult_list = {} self.groundtruth_is_group_of_list = {} self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float) self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int) self._initialize_detections()
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def clear_detections(self): self._initialize_detections()
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels, detected_masks=None): """Adds detections for a single image to be used for evaluation. Args: image_key: A unique string/integer identifier for the image. detected_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. detected_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. detected_class_labels: integer numpy array of shape [num_boxes] containing 0-indexed detection classes for the boxes. detected_masks: np.uint8 numpy array of shape [num_boxes, height, width] containing `num_boxes` detection masks with values ranging between 0 and 1. Raises: ValueError: if the number of boxes, scores and class labels differ in length. """ if (len(detected_boxes) != len(detected_scores) or len(detected_boxes) != len(detected_class_labels)): raise ValueError('detected_boxes, detected_scores and ' 'detected_class_labels should all have same lengths. Got' '[%d, %d, %d]' % len(detected_boxes), len(detected_scores), len(detected_class_labels)) if image_key in self.detection_keys: logging.warn( 'image %s has already been added to the detection result database', image_key) return self.detection_keys.add(image_key) if image_key in self.groundtruth_boxes: groundtruth_boxes = self.groundtruth_boxes[image_key] groundtruth_class_labels = self.groundtruth_class_labels[image_key] # Masks are popped instead of look up. The reason is that we do not want # to keep all masks in memory which can cause memory overflow. groundtruth_masks = self.groundtruth_masks.pop( image_key) groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[ image_key] groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[ image_key] else: groundtruth_boxes = np.empty(shape=[0, 4], dtype=float) groundtruth_class_labels = np.array([], dtype=int) if detected_masks is None: groundtruth_masks = None else: groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float) groundtruth_is_difficult_list = np.array([], dtype=bool) groundtruth_is_group_of_list = np.array([], dtype=bool) scores, tp_fp_labels, is_class_correctly_detected_in_image = ( self.per_image_eval.compute_object_detection_metrics( detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detected_class_labels, groundtruth_boxes=groundtruth_boxes, groundtruth_class_labels=groundtruth_class_labels, groundtruth_is_difficult_list=groundtruth_is_difficult_list, groundtruth_is_group_of_list=groundtruth_is_group_of_list, detected_masks=detected_masks, groundtruth_masks=groundtruth_masks)) for i in range(self.num_class): if scores[i].shape[0] > 0: self.scores_per_class[i].append(scores[i]) self.tp_fp_labels_per_class[i].append(tp_fp_labels[i]) (self.num_images_correctly_detected_per_class ) += is_class_correctly_detected_in_image
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def testNotImplemented(self): for name in jnp.linalg._NOT_IMPLEMENTED: func = getattr(jnp.linalg, name) with self.assertRaises(NotImplementedError): func()
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testCholesky(self, shape, dtype): rng = jtu.rand_default(self.rng()) def args_maker(): factor_shape = shape[:-1] + (2 * shape[-1],) a = rng(factor_shape, dtype) return [np.matmul(a, jnp.conj(T(a)))] self._CheckAgainstNumpy(np.linalg.cholesky, jnp.linalg.cholesky, args_maker, tol=1e-3) self._CompileAndCheck(jnp.linalg.cholesky, args_maker) if jnp.finfo(dtype).bits == 64: jtu.check_grads(jnp.linalg.cholesky, args_maker(), order=2)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testDet(self, n, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng((n, n), dtype)] self._CheckAgainstNumpy(np.linalg.det, jnp.linalg.det, args_maker, tol=1e-3) self._CompileAndCheck(jnp.linalg.det, args_maker, rtol={np.float64: 1e-13, np.complex128: 1e-13})
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testDetGrad(self, shape, dtype): rng = jtu.rand_default(self.rng()) a = rng(shape, dtype) jtu.check_grads(jnp.linalg.det, (a,), 2, atol=1e-1, rtol=1e-1) # make sure there are no NaNs when a matrix is zero if len(shape) == 2: pass jtu.check_grads( jnp.linalg.det, (jnp.zeros_like(a),), 1, atol=1e-1, rtol=1e-1) else: a[0] = 0 jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testDetGradOfSingularMatrixCorank1(self): # Rank 2 matrix with nonzero gradient a = jnp.array([[ 50, -30, 45], [-30, 90, -81], [ 45, -81, 81]], dtype=jnp.float32) jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testTensorsolve(self, m, nq, dtype): rng = jtu.rand_default(self.rng()) # According to numpy docs the shapes are as follows: # Coefficient tensor (a), of shape b.shape + Q. # And prod(Q) == prod(b.shape) # Therefore, n = prod(q) n, q = nq b_shape = (n, m) # To accomplish prod(Q) == prod(b.shape) we append the m extra dim # to Q shape Q = q + (m,) args_maker = lambda: [ rng(b_shape + Q, dtype), # = a rng(b_shape, dtype)] # = b a, b = args_maker() result = jnp.linalg.tensorsolve(*args_maker()) self.assertEqual(result.shape, Q) self._CheckAgainstNumpy(np.linalg.tensorsolve, jnp.linalg.tensorsolve, args_maker, tol={np.float32: 1e-2, np.float64: 1e-3}) self._CompileAndCheck(jnp.linalg.tensorsolve, args_maker, rtol={np.float64: 1e-13})
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSlogdet(self, shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] self._CheckAgainstNumpy(np.linalg.slogdet, jnp.linalg.slogdet, args_maker, tol=1e-3) self._CompileAndCheck(jnp.linalg.slogdet, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSlogdetGrad(self, shape, dtype): rng = jtu.rand_default(self.rng()) a = rng(shape, dtype) jtu.check_grads(jnp.linalg.slogdet, (a,), 2, atol=1e-1, rtol=2e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEig(self, shape, dtype, compute_left_eigenvectors, compute_right_eigenvectors): rng = jtu.rand_default(self.rng()) n = shape[-1] args_maker = lambda: [rng(shape, dtype)] # Norm, adjusted for dimension and type. def norm(x): norm = np.linalg.norm(x, axis=(-2, -1)) return norm / ((n + 1) * jnp.finfo(dtype).eps) def check_right_eigenvectors(a, w, vr): self.assertTrue( np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100)) def check_left_eigenvectors(a, w, vl): rank = len(a.shape) aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2])) wC = jnp.conj(w) check_right_eigenvectors(aH, wC, vl) a, = args_maker() results = lax.linalg.eig(a, compute_left_eigenvectors, compute_right_eigenvectors) w = results[0] if compute_left_eigenvectors: check_left_eigenvectors(a, w, results[1]) if compute_right_eigenvectors: check_right_eigenvectors(a, w, results[1 + compute_left_eigenvectors]) self._CompileAndCheck(partial(jnp.linalg.eig), args_maker, rtol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigvalsGrad(self, shape, dtype): # This test sometimes fails for large matrices. I (@j-towns) suspect, but # haven't checked, that might be because of perturbations causing the # ordering of eigenvalues to change, which will trip up check_grads. So we # just test on small-ish matrices. rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] a, = args_maker() tol = 1e-4 if dtype in (np.float64, np.complex128) else 1e-1 jtu.check_grads(lambda x: jnp.linalg.eigvals(x), (a,), order=1, modes=['fwd', 'rev'], rtol=tol, atol=tol)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigvals(self, shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] a, = args_maker() w1, _ = jnp.linalg.eig(a) w2 = jnp.linalg.eigvals(a) self.assertAllClose(w1, w2, rtol={np.complex128: 1e-14})
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigvalsInf(self): # https://github.com/google/jax/issues/2661 x = jnp.array([[jnp.inf]]) self.assertTrue(jnp.all(jnp.isnan(jnp.linalg.eigvals(x))))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigBatching(self, shape, dtype): rng = jtu.rand_default(self.rng()) shape = (10,) + shape args = rng(shape, dtype) ws, vs = vmap(jnp.linalg.eig)(args) self.assertTrue(np.all(np.linalg.norm( np.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigh(self, n, dtype, lower): rng = jtu.rand_default(self.rng()) tol = 1e-3 args_maker = lambda: [rng((n, n), dtype)] uplo = "L" if lower else "U" a, = args_maker() a = (a + np.conj(a.T)) / 2 w, v = jnp.linalg.eigh(np.tril(a) if lower else np.triu(a), UPLO=uplo, symmetrize_input=False) self.assertLessEqual( np.linalg.norm(np.eye(n) - np.matmul(np.conj(T(v)), v)), 1e-3) with jax.numpy_rank_promotion('allow'): self.assertLessEqual(np.linalg.norm(np.matmul(a, v) - w * v), tol * np.linalg.norm(a)) self._CompileAndCheck(partial(jnp.linalg.eigh, UPLO=uplo), args_maker, rtol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEigvalsh(self, shape, dtype): rng = jtu.rand_default(self.rng()) n = shape[-1] def args_maker(): a = rng((n, n), dtype) a = (a + np.conj(a.T)) / 2 return [a] self._CheckAgainstNumpy(np.linalg.eigvalsh, jnp.linalg.eigvalsh, args_maker, tol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEighGrad(self, shape, dtype, lower): rng = jtu.rand_default(self.rng()) self.skipTest("Test fails with numeric errors.") uplo = "L" if lower else "U" a = rng(shape, dtype) a = (a + np.conj(T(a))) / 2 ones = np.ones((a.shape[-1], a.shape[-1]), dtype=dtype) a *= np.tril(ones) if lower else np.triu(ones) # Gradient checks will fail without symmetrization as the eigh jvp rule # is only correct for tangents in the symmetric subspace, whereas the # checker checks against unconstrained (co)tangents. if dtype not in complex_types: f = partial(jnp.linalg.eigh, UPLO=uplo, symmetrize_input=True) else: # only check eigenvalue grads for complex matrices f = lambda a: partial(jnp.linalg.eigh, UPLO=uplo, symmetrize_input=True)(a)[0] jtu.check_grads(f, (a,), 2, rtol=1e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEighGradVectorComplex(self, shape, dtype, lower, eps): rng = jtu.rand_default(self.rng()) # Special case to test for complex eigenvector grad correctness. # Exact eigenvector coordinate gradients are hard to test numerically for complex # eigensystem solvers given the extra degrees of per-eigenvector phase freedom. # Instead, we numerically verify the eigensystem properties on the perturbed # eigenvectors. You only ever want to optimize eigenvector directions, not coordinates! uplo = "L" if lower else "U" a = rng(shape, dtype) a = (a + np.conj(a.T)) / 2 a = np.tril(a) if lower else np.triu(a) a_dot = eps * rng(shape, dtype) a_dot = (a_dot + np.conj(a_dot.T)) / 2 a_dot = np.tril(a_dot) if lower else np.triu(a_dot) # evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix f = partial(jnp.linalg.eigh, UPLO=uplo) (w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(a_dot,)) self.assertTrue(jnp.issubdtype(w.dtype, jnp.floating)) self.assertTrue(jnp.issubdtype(dw.dtype, jnp.floating)) new_a = a + a_dot new_w, new_v = f(new_a) new_a = (new_a + np.conj(new_a.T)) / 2 # Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues. RTOL = 1e-2 with jax.numpy_rank_promotion('allow'): assert np.max( np.abs((np.diag(np.dot(np.conj((v+dv).T), np.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL # Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues. assert np.max( np.linalg.norm(np.abs(new_w*(v+dv) - np.dot(new_a, (v+dv))), axis=0) / np.linalg.norm(np.abs(new_w*(v+dv)), axis=0) ) < RTOL
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testEighBatching(self, shape, dtype): rng = jtu.rand_default(self.rng()) shape = (10,) + shape args = rng(shape, dtype) args = (args + np.conj(T(args))) / 2 ws, vs = vmap(jsp.linalg.eigh)(args) self.assertTrue(np.all(np.linalg.norm( np.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuPivotsToPermutation(self, shape, dtype): pivots_size = shape[-1] permutation_size = 2 * pivots_size pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype) pivots = jnp.broadcast_to(pivots, shape) actual = lax.linalg.lu_pivots_to_permutation(pivots, permutation_size) expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype) expected = jnp.broadcast_to(expected, actual.shape) self.assertArraysEqual(actual, expected)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuPivotsToPermutationBatching(self, shape, dtype): shape = (10,) + shape pivots_size = shape[-1] permutation_size = 2 * pivots_size pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype) pivots = jnp.broadcast_to(pivots, shape) batched_fn = vmap( lambda x: lax.linalg.lu_pivots_to_permutation(x, permutation_size)) actual = batched_fn(pivots) expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype) expected = jnp.broadcast_to(expected, actual.shape) self.assertArraysEqual(actual, expected)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testNorm(self, shape, dtype, ord, axis, keepdims): rng = jtu.rand_default(self.rng()) if (ord in ('nuc', 2, -2) and ( jtu.device_under_test() != "cpu" or (isinstance(axis, tuple) and len(axis) == 2))): raise unittest.SkipTest("No adequate SVD implementation available") args_maker = lambda: [rng(shape, dtype)] np_fn = partial(np.linalg.norm, ord=ord, axis=axis, keepdims=keepdims) jnp_fn = partial(jnp.linalg.norm, ord=ord, axis=axis, keepdims=keepdims) self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, tol=1e-3) self._CompileAndCheck(jnp_fn, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, hermitian): if (jnp.issubdtype(dtype, np.complexfloating) and jtu.device_under_test() == "tpu"): raise unittest.SkipTest("No complex SVD implementation") rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(b + (m, n), dtype)] # Norm, adjusted for dimension and type. def norm(x): norm = np.linalg.norm(x, axis=(-2, -1)) return norm / (max(1, m, n) * jnp.finfo(dtype).eps) a, = args_maker() if hermitian: a = a + np.conj(T(a)) out = jnp.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv, hermitian=hermitian) if compute_uv: # Check the reconstructed matrices if full_matrices: k = min(m, n) if m < n: self.assertTrue(np.all( norm(a - np.matmul(out[1][..., None, :] * out[0], out[2][..., :k, :])) < 50)) else: self.assertTrue(np.all( norm(a - np.matmul(out[1][..., None, :] * out[0][..., :, :k], out[2])) < 350)) else: self.assertTrue(np.all( norm(a - np.matmul(out[1][..., None, :] * out[0], out[2])) < 350)) # Check the unitary properties of the singular vector matrices. self.assertTrue(np.all(norm(np.eye(out[0].shape[-1]) - np.matmul(np.conj(T(out[0])), out[0])) < 15)) if m >= n: self.assertTrue(np.all(norm(np.eye(out[2].shape[-1]) - np.matmul(np.conj(T(out[2])), out[2])) < 10)) else: self.assertTrue(np.all(norm(np.eye(out[2].shape[-2]) - np.matmul(out[2], np.conj(T(out[2])))) < 20)) else: self.assertTrue(np.allclose(np.linalg.svd(a, compute_uv=False), np.asarray(out), atol=1e-4, rtol=1e-4)) self._CompileAndCheck(partial(jnp.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv), args_maker) if not compute_uv: svd = partial(jnp.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv) # TODO(phawkins): these tolerances seem very loose. if dtype == np.complex128: jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=1e-4, atol=1e-4, eps=1e-8) else: jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=5e-2, atol=2e-1) if jtu.device_under_test() == "tpu": raise unittest.SkipTest("TPU matmul does not have enough precision") # TODO(frederikwilde): Find the appropriate precision to use for this test on TPUs. if compute_uv and (not full_matrices): b, = args_maker() def f(x): u, s, v = jnp.linalg.svd( a + x * b, full_matrices=full_matrices, compute_uv=compute_uv) vdiag = jnp.vectorize(jnp.diag, signature='(k)->(k,k)') return jnp.matmul(jnp.matmul(u, vdiag(s)), v).real _, t_out = jvp(f, (1.,), (1.,)) if dtype == np.complex128: atol = 1e-13 else: atol = 5e-4 self.assertArraysAllClose(t_out, b.real, atol=atol)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testQr(self, shape, dtype, full_matrices): rng = jtu.rand_default(self.rng()) m, n = shape[-2:] if full_matrices: mode, k = "complete", m else: mode, k = "reduced", min(m, n) a = rng(shape, dtype) lq, lr = jnp.linalg.qr(a, mode=mode) # np.linalg.qr doesn't support batch dimensions. But it seems like an # inevitable extension so we support it in our version. nq = np.zeros(shape[:-2] + (m, k), dtype) nr = np.zeros(shape[:-2] + (k, n), dtype) for index in np.ndindex(*shape[:-2]): nq[index], nr[index] = np.linalg.qr(a[index], mode=mode) max_rank = max(m, n) # Norm, adjusted for dimension and type. def norm(x): n = np.linalg.norm(x, axis=(-2, -1)) return n / (max_rank * jnp.finfo(dtype).eps) def compare_orthogonal(q1, q2): # Q is unique up to sign, so normalize the sign first. sum_of_ratios = np.sum(np.divide(q1, q2), axis=-2, keepdims=True) phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios)) q1 *= phases self.assertTrue(np.all(norm(q1 - q2) < 30)) # Check a ~= qr self.assertTrue(np.all(norm(a - np.matmul(lq, lr)) < 30)) # Compare the first 'k' vectors of Q; the remainder form an arbitrary # orthonormal basis for the null space. compare_orthogonal(nq[..., :k], lq[..., :k]) # Check that q is close to unitary. self.assertTrue(np.all( norm(np.eye(k) - np.matmul(np.conj(T(lq)), lq)) < 5)) if not full_matrices and m >= n: jtu.check_jvp(jnp.linalg.qr, partial(jvp, jnp.linalg.qr), (a,), atol=3e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testQrBatching(self, shape, dtype): rng = jtu.rand_default(self.rng()) args = rng(shape, jnp.float32) qs, rs = vmap(jsp.linalg.qr)(args) self.assertTrue(np.all(np.linalg.norm(args - np.matmul(qs, rs)) < 1e-3))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testCond(self, shape, pnorm, dtype): if (jnp.issubdtype(dtype, np.complexfloating) and jtu.device_under_test() == "tpu"): raise unittest.SkipTest("No complex SVD implementation") def gen_mat(): # arr_gen = jtu.rand_some_nan(self.rng()) arr_gen = jtu.rand_default(self.rng()) res = arr_gen(shape, dtype) return res def args_gen(p): def _args_gen(): return [gen_mat(), p] return _args_gen args_maker = args_gen(pnorm) if pnorm not in [2, -2] and len(set(shape[-2:])) != 1: with self.assertRaises(np.linalg.LinAlgError): jnp.linalg.cond(*args_maker()) else: self._CheckAgainstNumpy(np.linalg.cond, jnp.linalg.cond, args_maker, check_dtypes=False, tol=1e-3) partial_norm = partial(jnp.linalg.cond, p=pnorm) self._CompileAndCheck(partial_norm, lambda: [gen_mat()], check_dtypes=False, rtol=1e-03, atol=1e-03)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testTensorinv(self, shape, dtype): rng = jtu.rand_default(self.rng()) def tensor_maker(): invertible = False while not invertible: a = rng(shape, dtype) try: np.linalg.inv(a) invertible = True except np.linalg.LinAlgError: pass return a args_maker = lambda: [tensor_maker(), int(np.floor(len(shape) / 2))] self._CheckAgainstNumpy(np.linalg.tensorinv, jnp.linalg.tensorinv, args_maker, check_dtypes=False, tol=1e-3) partial_inv = partial(jnp.linalg.tensorinv, ind=int(np.floor(len(shape) / 2))) self._CompileAndCheck(partial_inv, lambda: [tensor_maker()], check_dtypes=False, rtol=1e-03, atol=1e-03)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSolve(self, lhs_shape, rhs_shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)] self._CheckAgainstNumpy(np.linalg.solve, jnp.linalg.solve, args_maker, tol=1e-3) self._CompileAndCheck(jnp.linalg.solve, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testInv(self, shape, dtype): rng = jtu.rand_default(self.rng()) if jtu.device_under_test() == "gpu" and shape == (200, 200): raise unittest.SkipTest("Test is flaky on GPU") def args_maker(): invertible = False while not invertible: a = rng(shape, dtype) try: np.linalg.inv(a) invertible = True except np.linalg.LinAlgError: pass return [a] self._CheckAgainstNumpy(np.linalg.inv, jnp.linalg.inv, args_maker, tol=1e-3) self._CompileAndCheck(jnp.linalg.inv, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testPinv(self, shape, dtype): if (jnp.issubdtype(dtype, np.complexfloating) and jtu.device_under_test() == "tpu"): raise unittest.SkipTest("No complex SVD implementation") rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] self._CheckAgainstNumpy(np.linalg.pinv, jnp.linalg.pinv, args_maker, tol=1e-2) self._CompileAndCheck(jnp.linalg.pinv, args_maker) if jtu.device_under_test() != "tpu": # TODO(phawkins): 1e-1 seems like a very loose tolerance. jtu.check_grads(jnp.linalg.pinv, args_maker(), 2, rtol=1e-1, atol=2e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testPinvGradIssue2792(self): def f(p): a = jnp.array([[0., 0.],[-p, 1.]], jnp.float32) * 1 / (1 + p**2) return jnp.linalg.pinv(a) j = jax.jacobian(f)(jnp.float32(2.)) self.assertAllClose(jnp.array([[0., -1.], [ 0., 0.]], jnp.float32), j) expected = jnp.array([[[[-1., 0.], [ 0., 0.]], [[0., -1.], [0., 0.]]], [[[0., 0.], [-1., 0.]], [[0., 0.], [0., -1.]]]], dtype=jnp.float32) self.assertAllClose( expected, jax.jacobian(jnp.linalg.pinv)(jnp.eye(2, dtype=jnp.float32)))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testMatrixPower(self, shape, dtype, n): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] tol = 1e-1 if jtu.device_under_test() == "tpu" else 1e-3 self._CheckAgainstNumpy(partial(np.linalg.matrix_power, n=n), partial(jnp.linalg.matrix_power, n=n), args_maker, tol=tol) self._CompileAndCheck(partial(jnp.linalg.matrix_power, n=n), args_maker, rtol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testMatrixRank(self, shape, dtype): if (jnp.issubdtype(dtype, np.complexfloating) and jtu.device_under_test() == "tpu"): raise unittest.SkipTest("No complex SVD implementation") rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] a, = args_maker() self._CheckAgainstNumpy(np.linalg.matrix_rank, jnp.linalg.matrix_rank, args_maker, check_dtypes=False, tol=1e-3) self._CompileAndCheck(jnp.linalg.matrix_rank, args_maker, check_dtypes=False, rtol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testMultiDot(self, shapes, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [[rng(shape, dtype) for shape in shapes]] np_fun = np.linalg.multi_dot jnp_fun = partial(jnp.linalg.multi_dot, precision=lax.Precision.HIGHEST) tol = {np.float32: 1e-4, np.float64: 1e-10, np.complex64: 1e-4, np.complex128: 1e-10} self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol) self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLstsq(self, lhs_shape, rhs_shape, dtype, rcond): rng = jtu.rand_default(self.rng()) np_fun = partial(np.linalg.lstsq, rcond=rcond) jnp_fun = partial(jnp.linalg.lstsq, rcond=rcond) jnp_fun_numpy_resid = partial(jnp.linalg.lstsq, rcond=rcond, numpy_resid=True) tol = {np.float32: 1e-5, np.float64: 1e-12, np.complex64: 1e-5, np.complex128: 1e-12} args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)] self._CheckAgainstNumpy(np_fun, jnp_fun_numpy_resid, args_maker, check_dtypes=False, tol=tol) self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol) # Disabled because grad is flaky for low-rank inputs. # TODO: # jtu.check_grads(lambda *args: jnp_fun(*args)[0], args_maker(), order=2, atol=1e-2, rtol=1e-2)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testIssue669(self): def test(x): val, vec = jnp.linalg.eigh(x) return jnp.real(jnp.sum(val)) grad_test_jc = jit(grad(jit(test))) xc = np.eye(3, dtype=np.complex64) self.assertAllClose(xc, grad_test_jc(xc))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testIssue1151(self): rng = self.rng() A = jnp.array(rng.randn(100, 3, 3), dtype=jnp.float32) b = jnp.array(rng.randn(100, 3), dtype=jnp.float32) x = jnp.linalg.solve(A, b) self.assertAllClose(vmap(jnp.dot)(A, x), b, atol=2e-3, rtol=1e-2) _ = jax.jacobian(jnp.linalg.solve, argnums=0)(A, b) _ = jax.jacobian(jnp.linalg.solve, argnums=1)(A, b) _ = jax.jacobian(jnp.linalg.solve, argnums=0)(A[0], b[0]) _ = jax.jacobian(jnp.linalg.solve, argnums=1)(A[0], b[0])
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testIssue1383(self): seed = jax.random.PRNGKey(0) tmp = jax.random.uniform(seed, (2,2)) a = jnp.dot(tmp, tmp.T) def f(inp): val, vec = jnp.linalg.eigh(inp) return jnp.dot(jnp.dot(vec, inp), vec.T) grad_func = jax.jacfwd(f) hess_func = jax.jacfwd(grad_func) cube_func = jax.jacfwd(hess_func) self.assertFalse(np.any(np.isnan(cube_func(a))))
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testBlockDiag(self, args): args_maker = lambda: args self._CheckAgainstNumpy(osp.linalg.block_diag, jsp.linalg.block_diag, args_maker) self._CompileAndCheck(jsp.linalg.block_diag, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLu(self, shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] x, = args_maker() p, l, u = jsp.linalg.lu(x) self.assertAllClose(x, np.matmul(p, np.matmul(l, u)), rtol={np.float32: 1e-3, np.float64: 1e-12, np.complex64: 1e-3, np.complex128: 1e-12}) self._CompileAndCheck(jsp.linalg.lu, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuGrad(self, shape, dtype): rng = jtu.rand_default(self.rng()) a = rng(shape, dtype) lu = vmap(jsp.linalg.lu) if len(shape) > 2 else jsp.linalg.lu jtu.check_grads(lu, (a,), 2, atol=5e-2, rtol=3e-1)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuBatching(self, shape, dtype): rng = jtu.rand_default(self.rng()) args = [rng(shape, jnp.float32) for _ in range(10)] expected = list(osp.linalg.lu(x) for x in args) ps = np.stack([out[0] for out in expected]) ls = np.stack([out[1] for out in expected]) us = np.stack([out[2] for out in expected]) actual_ps, actual_ls, actual_us = vmap(jsp.linalg.lu)(jnp.stack(args)) self.assertAllClose(ps, actual_ps) self.assertAllClose(ls, actual_ls, rtol=5e-6) self.assertAllClose(us, actual_us)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuCPUBackendOnGPU(self): # tests running `lu` on cpu when a gpu is present. jit(jsp.linalg.lu, backend="cpu")(np.ones((2, 2))) # does not crash
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuFactor(self, n, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng((n, n), dtype)] x, = args_maker() lu, piv = jsp.linalg.lu_factor(x) l = np.tril(lu, -1) + np.eye(n, dtype=dtype) u = np.triu(lu) for i in range(n): x[[i, piv[i]],] = x[[piv[i], i],] self.assertAllClose(x, np.matmul(l, u), rtol=1e-3, atol=1e-3) self._CompileAndCheck(jsp.linalg.lu_factor, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testLuSolve(self, lhs_shape, rhs_shape, dtype, trans): rng = jtu.rand_default(self.rng()) osp_fun = lambda lu, piv, rhs: osp.linalg.lu_solve((lu, piv), rhs, trans=trans) jsp_fun = lambda lu, piv, rhs: jsp.linalg.lu_solve((lu, piv), rhs, trans=trans) def args_maker(): a = rng(lhs_shape, dtype) lu, piv = osp.linalg.lu_factor(a) return [lu, piv, rng(rhs_shape, dtype)] self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, tol=1e-3) self._CompileAndCheck(jsp_fun, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSolve(self, lhs_shape, rhs_shape, dtype, sym_pos, lower): rng = jtu.rand_default(self.rng()) osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower) jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower) def args_maker(): a = rng(lhs_shape, dtype) if sym_pos: a = np.matmul(a, np.conj(T(a))) a = np.tril(a) if lower else np.triu(a) return [a, rng(rhs_shape, dtype)] self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, tol=1e-3) self._CompileAndCheck(jsp_fun, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSolveTriangular(self, lower, transpose_a, unit_diagonal, lhs_shape, rhs_shape, dtype): rng = jtu.rand_default(self.rng()) k = rng(lhs_shape, dtype) l = np.linalg.cholesky(np.matmul(k, T(k)) + lhs_shape[-1] * np.eye(lhs_shape[-1])) l = l.astype(k.dtype) b = rng(rhs_shape, dtype) if unit_diagonal: a = np.tril(l, -1) + np.eye(lhs_shape[-1], dtype=dtype) else: a = l a = a if lower else T(a) inv = np.linalg.inv(T(a) if transpose_a else a).astype(a.dtype) if len(lhs_shape) == len(rhs_shape): np_ans = np.matmul(inv, b) else: np_ans = np.einsum("...ij,...j->...i", inv, b) # The standard scipy.linalg.solve_triangular doesn't support broadcasting. # But it seems like an inevitable extension so we support it. ans = jsp.linalg.solve_triangular( l if lower else T(l), b, trans=1 if transpose_a else 0, lower=lower, unit_diagonal=unit_diagonal) self.assertAllClose(np_ans, ans, rtol={np.float32: 1e-4, np.float64: 1e-11})
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testTriangularSolveGrad( self, lower, transpose_a, conjugate_a, unit_diagonal, left_side, a_shape, b_shape, dtype): rng = jtu.rand_default(self.rng()) # Test lax.linalg.triangular_solve instead of scipy.linalg.solve_triangular # because it exposes more options. A = jnp.tril(rng(a_shape, dtype) + 5 * np.eye(a_shape[-1], dtype=dtype)) A = A if lower else T(A) B = rng(b_shape, dtype) f = partial(lax.linalg.triangular_solve, lower=lower, transpose_a=transpose_a, conjugate_a=conjugate_a, unit_diagonal=unit_diagonal, left_side=left_side) jtu.check_grads(f, (A, B), 2, rtol=4e-2, eps=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testTriangularSolveBatching(self, left_side, a_shape, b_shape, bdims): rng = jtu.rand_default(self.rng()) A = jnp.tril(rng(a_shape, np.float32) + 5 * np.eye(a_shape[-1], dtype=np.float32)) B = rng(b_shape, np.float32) solve = partial(lax.linalg.triangular_solve, lower=True, transpose_a=False, conjugate_a=False, unit_diagonal=False, left_side=left_side) X = vmap(solve, bdims)(A, B) matmul = partial(jnp.matmul, precision=lax.Precision.HIGHEST) Y = matmul(A, X) if left_side else matmul(X, A) self.assertArraysAllClose(Y, jnp.broadcast_to(B, Y.shape), atol=1e-4)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testExpm(self, n, dtype): rng = jtu.rand_small(self.rng()) args_maker = lambda: [rng((n, n), dtype)] osp_fun = lambda a: osp.linalg.expm(a) jsp_fun = lambda a: jsp.linalg.expm(a) self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker) self._CompileAndCheck(jsp_fun, args_maker) args_maker_triu = lambda: [np.triu(rng((n, n), dtype))] jsp_fun_triu = lambda a: jsp.linalg.expm(a, upper_triangular=True) self._CheckAgainstNumpy(osp_fun, jsp_fun_triu, args_maker_triu) self._CompileAndCheck(jsp_fun_triu, args_maker_triu)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testIssue2131(self, n, dtype): args_maker_zeros = lambda: [np.zeros((n, n), dtype)] osp_fun = lambda a: osp.linalg.expm(a) jsp_fun = lambda a: jsp.linalg.expm(a) self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker_zeros) self._CompileAndCheck(jsp_fun, args_maker_zeros)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testChoSolve(self, lhs_shape, rhs_shape, dtype, lower): rng = jtu.rand_default(self.rng()) def args_maker(): b = rng(rhs_shape, dtype) if lower: L = np.tril(rng(lhs_shape, dtype)) return [(L, lower), b] else: U = np.triu(rng(lhs_shape, dtype)) return [(U, lower), b] self._CheckAgainstNumpy(osp.linalg.cho_solve, jsp.linalg.cho_solve, args_maker, tol=1e-3)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testExpmFrechet(self, n, dtype): rng = jtu.rand_small(self.rng()) if dtype == np.float64 or dtype == np.complex128: target_norms = [1.0e-2, 2.0e-1, 9.0e-01, 2.0, 3.0] # TODO(zhangqiaorjc): Reduce tol to default 1e-15. tol = { np.dtype(np.float64): 1e-14, np.dtype(np.complex128): 1e-14, } elif dtype == np.float32 or dtype == np.complex64: target_norms = [4.0e-1, 1.0, 3.0] tol = None else: raise TypeError("dtype={} is not supported.".format(dtype)) for norm in target_norms: def args_maker(): a = rng((n, n), dtype) a = a / np.linalg.norm(a, 1) * norm e = rng((n, n), dtype) return [a, e, ] #compute_expm is True osp_fun = lambda a,e: osp.linalg.expm_frechet(a,e,compute_expm=True) jsp_fun = lambda a,e: jsp.linalg.expm_frechet(a,e,compute_expm=True) self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, check_dtypes=False, tol=tol) self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=False) #compute_expm is False osp_fun = lambda a,e: osp.linalg.expm_frechet(a,e,compute_expm=False) jsp_fun = lambda a,e: jsp.linalg.expm_frechet(a,e,compute_expm=False) self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, check_dtypes=False, tol=tol) self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=False)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testExpmGrad(self, n, dtype): rng = jtu.rand_small(self.rng()) a = rng((n, n), dtype) if dtype == np.float64 or dtype == np.complex128: target_norms = [1.0e-2, 2.0e-1, 9.0e-01, 2.0, 3.0] elif dtype == np.float32 or dtype == np.complex64: target_norms = [4.0e-1, 1.0, 3.0] else: raise TypeError("dtype={} is not supported.".format(dtype)) # TODO(zhangqiaorjc): Reduce tol to default 1e-5. # Lower tolerance is due to 2nd order derivative. tol = { # Note that due to inner_product, float and complex tol are coupled. np.dtype(np.float32): 0.02, np.dtype(np.complex64): 0.02, np.dtype(np.float64): 1e-4, np.dtype(np.complex128): 1e-4, } for norm in target_norms: a = a / np.linalg.norm(a, 1) * norm def expm(x): return jsp.linalg.expm(x, upper_triangular=False, max_squarings=16) jtu.check_grads(expm, (a,), modes=["fwd", "rev"], order=1, atol=tol, rtol=tol)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSchur(self, shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] self._CheckAgainstNumpy(osp.linalg.schur, jsp.linalg.schur, args_maker) self._CompileAndCheck(jsp.linalg.schur, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSqrtmPSDMatrix(self, shape, dtype): # Checks against scipy.linalg.sqrtm when the principal square root # is guaranteed to be unique (i.e no negative real eigenvalue) rng = jtu.rand_default(self.rng()) arg = rng(shape, dtype) mat = arg @ arg.T args_maker = lambda : [mat] if dtype == np.float32 or dtype == np.complex64: tol = 1e-4 else: tol = 1e-8 self._CheckAgainstNumpy(osp.linalg.sqrtm, jsp.linalg.sqrtm, args_maker, tol=tol, check_dtypes=False) self._CompileAndCheck(jsp.linalg.sqrtm, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSqrtmGenMatrix(self, shape, dtype): rng = jtu.rand_default(self.rng()) arg = rng(shape, dtype) if dtype == np.float32 or dtype == np.complex64: tol = 1e-3 else: tol = 1e-8 R = jsp.linalg.sqrtm(arg) self.assertAllClose(R @ R, arg, atol=tol, check_dtypes=False)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSqrtmEdgeCase(self, diag, expected, dtype): """ Tests the zero numerator condition """ mat = jnp.diag(jnp.array(diag)).astype(dtype) expected = jnp.diag(jnp.array(expected)) root = jsp.linalg.sqrtm(mat) self.assertAllClose(root, expected, check_dtypes=False)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def run_test(self, alpha, beta): n = alpha.shape[-1] # scipy.linalg.eigh_tridiagonal doesn't support complex inputs, so for # this we call the slower numpy.linalg.eigh. if np.issubdtype(alpha.dtype, np.complexfloating): tridiagonal = np.diag(alpha) + np.diag(beta, 1) + np.diag( np.conj(beta), -1) eigvals_expected, _ = np.linalg.eigh(tridiagonal) else: eigvals_expected = scipy.linalg.eigh_tridiagonal( alpha, beta, eigvals_only=True) eigvals = jax.scipy.linalg.eigh_tridiagonal( alpha, beta, eigvals_only=True) finfo = np.finfo(alpha.dtype) atol = 4 * np.sqrt(n) * finfo.eps * np.amax(np.abs(eigvals_expected)) self.assertAllClose(eigvals_expected, eigvals, atol=atol, rtol=1e-4)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testToeplitz(self, n, dtype): for a, b in [[2, -1], [1, 0], [0, 1], [-1e10, 1e10], [-1e-10, 1e-10]]: alpha = a * np.ones([n], dtype=dtype) beta = b * np.ones([n - 1], dtype=dtype) self.run_test(alpha, beta)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testRandomUniform(self, n, dtype): alpha = jtu.rand_uniform(self.rng())((n,), dtype) beta = jtu.rand_uniform(self.rng())((n - 1,), dtype) self.run_test(alpha, beta)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSelect(self, dtype): n = 5 alpha = jtu.rand_uniform(self.rng())((n,), dtype) beta = jtu.rand_uniform(self.rng())((n - 1,), dtype) eigvals_all = jax.scipy.linalg.eigh_tridiagonal(alpha, beta, select="a", eigvals_only=True) eps = np.finfo(alpha.dtype).eps atol = 2 * n * eps for first in range(n - 1): for last in range(first + 1, n - 1): # Check that we get the expected eigenvalues by selecting by # index range. eigvals_index = jax.scipy.linalg.eigh_tridiagonal( alpha, beta, select="i", select_range=(first, last), eigvals_only=True) self.assertAllClose( eigvals_all[first:(last + 1)], eigvals_index, atol=atol)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def test_tridiagonal_solve(self, dtype): dl = np.array([0.0, 2.0, 3.0], dtype=dtype) d = np.ones(3, dtype=dtype) du = np.array([1.0, 2.0, 0.0], dtype=dtype) m = 3 B = np.ones([m, 1], dtype=dtype) X = lax.linalg.tridiagonal_solve(dl, d, du, B) A = np.eye(3, dtype=dtype) A[[1, 2], [0, 1]] = dl[1:] A[[0, 1], [1, 2]] = du[:-1] np.testing.assert_allclose(A @ X, B, rtol=1e-6, atol=1e-6)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSchur(self, shape, dtype): rng = jtu.rand_default(self.rng()) args_maker = lambda: [rng(shape, dtype)] self._CheckAgainstNumpy(osp.linalg.schur, lax.linalg.schur, args_maker) self._CompileAndCheck(lax.linalg.schur, args_maker)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def testSchurBatching(self, shape, dtype): rng = jtu.rand_default(self.rng()) batch_size = 10 shape = (batch_size, ) + shape args = rng(shape, dtype) reconstruct = vmap(lambda S, T: S @ T @ jnp.conj(S.T)) Ts, Ss = vmap(lax.linalg.schur)(args) self.assertAllClose(reconstruct(Ss, Ts), args, atol=1e-4)
google/jax
[ 22193, 2080, 22193, 1296, 1540502702 ]
def initialize_options(self): self.test = False
puentesarrin/asyncflux
[ 26, 1, 26, 1, 1400633988 ]
def run(self): if self.test: path = "docs/_build/doctest" mode = "doctest" else: path = "docs/_build/%s" % __version__ mode = "html" try: os.makedirs(path) except: pass status = subprocess.call(["sphinx-build", "-E", "-b", mode, "docs", path]) if status: raise RuntimeError("documentation step '%s' failed" % (mode,)) sys.stdout.write("\nDocumentation step '%s' performed, results here:\n" " %s/\n" % (mode, path))
puentesarrin/asyncflux
[ 26, 1, 26, 1, 1400633988 ]
def setUpTestData(cls): cls.user = User.objects.create_user(username='api_login', email='api_login@email.com', password='123456') cls.the_user = TheUser.objects.get(id_user=cls.user)
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_missing_params(self): response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'username'})
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_too_long_username(self): response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'a' * 40, 'password': 'somepassword'})
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_too_short_username(self): response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'a', 'password': 'somepassword'})
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_username_regex_not_valid(self): username_patterns = [ 'ab#$@cdev', '#$@username', 'username%#&#&', 'db24!!!db34', '#$@234234', '#123dkf%' ]
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_email_regex_not_valid(self): email_patterns = [ 'no_extension@ddd', '@first.missing', 'after_at_miss@', '$%#@474**.om', 'em#$@ail@m.com', '#em@ail@m.com' ]
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]
def test_user_login_too_long_password(self): response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'test_username', 'password': 'p' * 17})
OlegKlimenko/Plamber
[ 9, 1, 9, 28, 1487368387 ]