body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def create_corp_faucet_config(self):
'Create Faucet config for corp network'
setup_vlan = SETUP_VLAN
switch = 'corp'
dps = {}
interfaces = self._build_dp_interfaces(CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1, native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT)
dps[switch] = self._build_datapath_config(CORP_DP_ID, interfaces)
return FaucetConfig(dps=dps, version=2)
| -8,864,064,651,153,820,000
|
Create Faucet config for corp network
|
testing/python_lib/build_config.py
|
create_corp_faucet_config
|
henry54809/forch
|
python
|
def create_corp_faucet_config(self):
setup_vlan = SETUP_VLAN
switch = 'corp'
dps = {}
interfaces = self._build_dp_interfaces(CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1, native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT)
dps[switch] = self._build_datapath_config(CORP_DP_ID, interfaces)
return FaucetConfig(dps=dps, version=2)
|
def scan():
'Caller function that tries to scans the file and write the report.'
spec_path = settings['spec_path']
try:
api_spec = load_config_file(spec_path)
except FileNotFoundError as e:
error_message = f'Could not find API spec file: {spec_path}. {str(e)}'
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except EmptyConfigFileError as e:
error_message = f'API spec file is empty. {str(e)}'
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except yaml.YAMLError as e:
error_message = 'Error loading specification file.'
error_message = '{}\nPyYAML: {}'.format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
root_node = EndpointNode(api_spec)
results = root_node.run()
except (InvalidKeyError, KeyError, InvalidPythonCodeError) as e:
error_message = 'Error loading API spec.'
error_message = '{} {}'.format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
write_report(results)
except (BadConfigurationError, InvalidPythonCodeError) as e:
logger.error(e)
raise SystemExit(ExitCode.USAGE_ERROR)
session.exit()
| -2,529,913,209,857,200,000
|
Caller function that tries to scans the file and write the report.
|
scanapi/scan.py
|
scan
|
hebertjulio/scanapi
|
python
|
def scan():
spec_path = settings['spec_path']
try:
api_spec = load_config_file(spec_path)
except FileNotFoundError as e:
error_message = f'Could not find API spec file: {spec_path}. {str(e)}'
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except EmptyConfigFileError as e:
error_message = f'API spec file is empty. {str(e)}'
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except yaml.YAMLError as e:
error_message = 'Error loading specification file.'
error_message = '{}\nPyYAML: {}'.format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
root_node = EndpointNode(api_spec)
results = root_node.run()
except (InvalidKeyError, KeyError, InvalidPythonCodeError) as e:
error_message = 'Error loading API spec.'
error_message = '{} {}'.format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
write_report(results)
except (BadConfigurationError, InvalidPythonCodeError) as e:
logger.error(e)
raise SystemExit(ExitCode.USAGE_ERROR)
session.exit()
|
def write_report(results):
'Constructs a Reporter object and calls the write method of Reporter to\n push the results to a file.\n '
reporter = Reporter(settings['output_path'], settings['template'])
reporter.write(results)
| -3,180,117,976,623,210,500
|
Constructs a Reporter object and calls the write method of Reporter to
push the results to a file.
|
scanapi/scan.py
|
write_report
|
hebertjulio/scanapi
|
python
|
def write_report(results):
'Constructs a Reporter object and calls the write method of Reporter to\n push the results to a file.\n '
reporter = Reporter(settings['output_path'], settings['template'])
reporter.write(results)
|
def _ConvertBoxToCOCOFormat(box):
'Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.\n\n This is a utility function for converting from our internal\n [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API\n i.e., [xmin, ymin, width, height].\n\n Args:\n box: a [ymin, xmin, ymax, xmax] numpy array\n\n Returns:\n a list of floats representing [xmin, ymin, width, height]\n '
return [float(box[1]), float(box[0]), float((box[3] - box[1])), float((box[2] - box[0]))]
| -6,747,070,920,789,550,000
|
Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
|
research/object_detection/metrics/coco_tools.py
|
_ConvertBoxToCOCOFormat
|
1911590204/models
|
python
|
def _ConvertBoxToCOCOFormat(box):
'Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.\n\n This is a utility function for converting from our internal\n [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API\n i.e., [xmin, ymin, width, height].\n\n Args:\n box: a [ymin, xmin, ymax, xmax] numpy array\n\n Returns:\n a list of floats representing [xmin, ymin, width, height]\n '
return [float(box[1]), float(box[0]), float((box[3] - box[1])), float((box[2] - box[0]))]
|
def _RleCompress(masks):
'Compresses mask using Run-length encoding provided by pycocotools.\n\n Args:\n masks: uint8 numpy array of shape [mask_height, mask_width] with values in\n {0, 1}.\n\n Returns:\n A pycocotools Run-length encoding of the mask.\n '
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
| -4,503,842,151,480,810,000
|
Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
|
research/object_detection/metrics/coco_tools.py
|
_RleCompress
|
1911590204/models
|
python
|
def _RleCompress(masks):
'Compresses mask using Run-length encoding provided by pycocotools.\n\n Args:\n masks: uint8 numpy array of shape [mask_height, mask_width] with values in\n {0, 1}.\n\n Returns:\n A pycocotools Run-length encoding of the mask.\n '
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
|
def ExportSingleImageGroundtruthToCoco(image_id, next_annotation_id, category_id_set, groundtruth_boxes, groundtruth_classes, groundtruth_keypoints=None, groundtruth_keypoint_visibilities=None, groundtruth_masks=None, groundtruth_is_crowd=None, groundtruth_area=None):
'Export groundtruth of a single image to COCO format.\n\n This function converts groundtruth detection annotations represented as numpy\n arrays to dictionaries that can be ingested by the COCO evaluation API. Note\n that the image_ids provided here must match the ones given to\n ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in\n correspondence - that is: groundtruth_boxes[i, :], and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, "area" fields are always set to the area of the\n groundtruth bounding box.\n\n Args:\n image_id: a unique image identifier either of type integer or string.\n next_annotation_id: integer specifying the first id to use for the\n groundtruth annotations. All annotations are assigned a continuous integer\n id starting from this value.\n category_id_set: A set of valid class ids. Groundtruth with classes not in\n category_id_set are dropped.\n groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]\n groundtruth_classes: numpy array (int) with shape [num_gt_boxes]\n groundtruth_keypoints: optional float numpy array of keypoints\n with shape [num_gt_boxes, num_keypoints, 2].\n groundtruth_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated\n as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and\n visible.\n groundtruth_masks: optional uint8 numpy array of shape [num_detections,\n image_height, image_width] containing detection_masks.\n groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]\n indicating whether groundtruth boxes are crowd.\n groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If\n provided, then the area values (in the original absolute coordinates) will\n be populated instead of calculated from bounding box coordinates.\n\n Returns:\n a list of groundtruth annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n '
if (len(groundtruth_classes.shape) != 1):
raise ValueError('groundtruth_classes is expected to be of rank 1.')
if (len(groundtruth_boxes.shape) != 2):
raise ValueError('groundtruth_boxes is expected to be of rank 2.')
if (groundtruth_boxes.shape[1] != 4):
raise ValueError('groundtruth_boxes should have shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if (num_boxes != groundtruth_boxes.shape[0]):
raise ValueError(('Corresponding entries in groundtruth_classes, and groundtruth_boxes should have compatible shapes (i.e., agree on the 0th dimension).Classes shape: %d. Boxes shape: %d. Image ID: %s' % (groundtruth_classes.shape[0], groundtruth_boxes.shape[0], image_id)))
has_is_crowd = (groundtruth_is_crowd is not None)
if (has_is_crowd and (len(groundtruth_is_crowd.shape) != 1)):
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = (groundtruth_keypoints is not None)
has_keypoint_visibilities = (groundtruth_keypoint_visibilities is not None)
if (has_keypoints and (not has_keypoint_visibilities)):
groundtruth_keypoint_visibilities = np.full((num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if (groundtruth_classes[i] in category_id_set):
iscrowd = (groundtruth_is_crowd[i] if has_is_crowd else 0)
if ((groundtruth_area is not None) and (groundtruth_area[i] > 0)):
area = float(groundtruth_area[i])
else:
area = float(((groundtruth_boxes[(i, 2)] - groundtruth_boxes[(i, 0)]) * (groundtruth_boxes[(i, 3)] - groundtruth_boxes[(i, 1)])))
export_dict = {'id': (next_annotation_id + i), 'image_id': image_id, 'category_id': int(groundtruth_classes[i]), 'bbox': list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), 'area': area, 'iscrowd': iscrowd}
if (groundtruth_masks is not None):
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [(- 1)])
coco_keypoints = []
num_valid_keypoints = 0
for (keypoint, visibility) in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if (int(visibility) > 0):
num_valid_keypoints = (num_valid_keypoints + 1)
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
| -6,087,324,160,309,731,000
|
Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
|
research/object_detection/metrics/coco_tools.py
|
ExportSingleImageGroundtruthToCoco
|
1911590204/models
|
python
|
def ExportSingleImageGroundtruthToCoco(image_id, next_annotation_id, category_id_set, groundtruth_boxes, groundtruth_classes, groundtruth_keypoints=None, groundtruth_keypoint_visibilities=None, groundtruth_masks=None, groundtruth_is_crowd=None, groundtruth_area=None):
'Export groundtruth of a single image to COCO format.\n\n This function converts groundtruth detection annotations represented as numpy\n arrays to dictionaries that can be ingested by the COCO evaluation API. Note\n that the image_ids provided here must match the ones given to\n ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in\n correspondence - that is: groundtruth_boxes[i, :], and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, "area" fields are always set to the area of the\n groundtruth bounding box.\n\n Args:\n image_id: a unique image identifier either of type integer or string.\n next_annotation_id: integer specifying the first id to use for the\n groundtruth annotations. All annotations are assigned a continuous integer\n id starting from this value.\n category_id_set: A set of valid class ids. Groundtruth with classes not in\n category_id_set are dropped.\n groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]\n groundtruth_classes: numpy array (int) with shape [num_gt_boxes]\n groundtruth_keypoints: optional float numpy array of keypoints\n with shape [num_gt_boxes, num_keypoints, 2].\n groundtruth_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated\n as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and\n visible.\n groundtruth_masks: optional uint8 numpy array of shape [num_detections,\n image_height, image_width] containing detection_masks.\n groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]\n indicating whether groundtruth boxes are crowd.\n groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If\n provided, then the area values (in the original absolute coordinates) will\n be populated instead of calculated from bounding box coordinates.\n\n Returns:\n a list of groundtruth annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n '
if (len(groundtruth_classes.shape) != 1):
raise ValueError('groundtruth_classes is expected to be of rank 1.')
if (len(groundtruth_boxes.shape) != 2):
raise ValueError('groundtruth_boxes is expected to be of rank 2.')
if (groundtruth_boxes.shape[1] != 4):
raise ValueError('groundtruth_boxes should have shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if (num_boxes != groundtruth_boxes.shape[0]):
raise ValueError(('Corresponding entries in groundtruth_classes, and groundtruth_boxes should have compatible shapes (i.e., agree on the 0th dimension).Classes shape: %d. Boxes shape: %d. Image ID: %s' % (groundtruth_classes.shape[0], groundtruth_boxes.shape[0], image_id)))
has_is_crowd = (groundtruth_is_crowd is not None)
if (has_is_crowd and (len(groundtruth_is_crowd.shape) != 1)):
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = (groundtruth_keypoints is not None)
has_keypoint_visibilities = (groundtruth_keypoint_visibilities is not None)
if (has_keypoints and (not has_keypoint_visibilities)):
groundtruth_keypoint_visibilities = np.full((num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if (groundtruth_classes[i] in category_id_set):
iscrowd = (groundtruth_is_crowd[i] if has_is_crowd else 0)
if ((groundtruth_area is not None) and (groundtruth_area[i] > 0)):
area = float(groundtruth_area[i])
else:
area = float(((groundtruth_boxes[(i, 2)] - groundtruth_boxes[(i, 0)]) * (groundtruth_boxes[(i, 3)] - groundtruth_boxes[(i, 1)])))
export_dict = {'id': (next_annotation_id + i), 'image_id': image_id, 'category_id': int(groundtruth_classes[i]), 'bbox': list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), 'area': area, 'iscrowd': iscrowd}
if (groundtruth_masks is not None):
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [(- 1)])
coco_keypoints = []
num_valid_keypoints = 0
for (keypoint, visibility) in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if (int(visibility) > 0):
num_valid_keypoints = (num_valid_keypoints + 1)
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
|
def ExportGroundtruthToCOCO(image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=None):
'Export groundtruth detection annotations in numpy arrays to COCO API.\n\n This function converts a set of groundtruth detection annotations represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are three lists: image ids for each groundtruth image,\n groundtruth boxes for each image and groundtruth classes respectively.\n Note that the image_ids provided here must match the ones given to the\n ExportDetectionsToCOCO function in order for evaluation to work properly.\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, "area" fields are always set to the area of the\n groundtruth bounding box and "iscrowd" fields are always set to 0.\n TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]\n (note that num_gt_boxes can be different for each entry in the list)\n groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]\n (note that num_gt_boxes can be different for each entry in the list)\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list has the following keys:\n \'id\': (required) an integer id uniquely identifying this category\n \'name\': (required) string representing category name\n e.g., \'cat\', \'dog\', \'pizza\'\n \'supercategory\': (optional) string representing the supercategory\n e.g., \'animal\', \'vehicle\', \'food\', etc\n output_path: (optional) path for exporting result to JSON\n Returns:\n dictionary that can be read by COCO API\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n '
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if (not (len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes))):
raise ValueError('Input lists must have the same length')
annotation_id = 1
for (image_id, boxes, classes) in zip(image_ids, groundtruth_boxes, groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(image_id, annotation_id, category_id_set, boxes, classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {'annotations': groundtruth_export_list, 'images': image_export_list, 'categories': categories}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
| -3,856,544,612,097,964,000
|
Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
|
research/object_detection/metrics/coco_tools.py
|
ExportGroundtruthToCOCO
|
1911590204/models
|
python
|
def ExportGroundtruthToCOCO(image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=None):
'Export groundtruth detection annotations in numpy arrays to COCO API.\n\n This function converts a set of groundtruth detection annotations represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are three lists: image ids for each groundtruth image,\n groundtruth boxes for each image and groundtruth classes respectively.\n Note that the image_ids provided here must match the ones given to the\n ExportDetectionsToCOCO function in order for evaluation to work properly.\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and\n groundtruth_classes[i] are associated with the same groundtruth annotation.\n\n In the exported result, "area" fields are always set to the area of the\n groundtruth bounding box and "iscrowd" fields are always set to 0.\n TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]\n (note that num_gt_boxes can be different for each entry in the list)\n groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]\n (note that num_gt_boxes can be different for each entry in the list)\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list has the following keys:\n \'id\': (required) an integer id uniquely identifying this category\n \'name\': (required) string representing category name\n e.g., \'cat\', \'dog\', \'pizza\'\n \'supercategory\': (optional) string representing the supercategory\n e.g., \'animal\', \'vehicle\', \'food\', etc\n output_path: (optional) path for exporting result to JSON\n Returns:\n dictionary that can be read by COCO API\n Raises:\n ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers\n '
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if (not (len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes))):
raise ValueError('Input lists must have the same length')
annotation_id = 1
for (image_id, boxes, classes) in zip(image_ids, groundtruth_boxes, groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(image_id, annotation_id, category_id_set, boxes, classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {'annotations': groundtruth_export_list, 'images': image_export_list, 'categories': categories}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
|
def ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, detection_boxes, detection_scores, detection_classes, detection_keypoints=None, detection_keypoint_visibilities=None):
'Export detections of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. Note that the image_ids\n provided here must match the ones given to the\n ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in\n correspondence - that is: boxes[i, :], and classes[i]\n are associated with the same groundtruth annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_boxes: float numpy array of shape [num_detections, 4] containing\n detection boxes.\n detection_scores: float numpy array of shape [num_detections] containing\n scored for the detection boxes.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection boxes.\n detection_keypoints: optional float numpy array of keypoints\n with shape [num_detections, num_keypoints, 2].\n detection_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_detections, num_keypoints]. Integer is\n treated as an enum with 0=not labels, 1=labeled but not visible and\n 2=labeled and visible.\n\n Returns:\n a list of detection annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_boxes, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n '
if ((len(detection_classes.shape) != 1) or (len(detection_scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(detection_boxes.shape) != 2):
raise ValueError('All entries in detection_boxes expected to be of rank 2.')
if (detection_boxes.shape[1] != 4):
raise ValueError('All entries in detection_boxes should have shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if (not (num_boxes == detection_boxes.shape[0] == detection_scores.shape[0])):
raise ValueError(('Corresponding entries in detection_classes, detection_scores and detection_boxes should have compatible shapes (i.e., agree on the 0th dimension). Classes shape: %d. Boxes shape: %d. Scores shape: %d' % (detection_classes.shape[0], detection_boxes.shape[0], detection_scores.shape[0])))
detections_list = []
for i in range(num_boxes):
if (detection_classes[i] in category_id_set):
export_dict = {'image_id': image_id, 'category_id': int(detection_classes[i]), 'bbox': list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), 'score': float(detection_scores[i])}
if (detection_keypoints is not None):
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if (detection_keypoint_visibilities is None):
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints), 2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [(- 1)])
coco_keypoints = []
for (keypoint, visibility) in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
| 3,486,113,173,692,428,300
|
Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
|
research/object_detection/metrics/coco_tools.py
|
ExportSingleImageDetectionBoxesToCoco
|
1911590204/models
|
python
|
def ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, detection_boxes, detection_scores, detection_classes, detection_keypoints=None, detection_keypoint_visibilities=None):
'Export detections of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. Note that the image_ids\n provided here must match the ones given to the\n ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in\n correspondence - that is: boxes[i, :], and classes[i]\n are associated with the same groundtruth annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_boxes: float numpy array of shape [num_detections, 4] containing\n detection boxes.\n detection_scores: float numpy array of shape [num_detections] containing\n scored for the detection boxes.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection boxes.\n detection_keypoints: optional float numpy array of keypoints\n with shape [num_detections, num_keypoints, 2].\n detection_keypoint_visibilities: optional integer numpy array of keypoint\n visibilities with shape [num_detections, num_keypoints]. Integer is\n treated as an enum with 0=not labels, 1=labeled but not visible and\n 2=labeled and visible.\n\n Returns:\n a list of detection annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_boxes, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n '
if ((len(detection_classes.shape) != 1) or (len(detection_scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(detection_boxes.shape) != 2):
raise ValueError('All entries in detection_boxes expected to be of rank 2.')
if (detection_boxes.shape[1] != 4):
raise ValueError('All entries in detection_boxes should have shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if (not (num_boxes == detection_boxes.shape[0] == detection_scores.shape[0])):
raise ValueError(('Corresponding entries in detection_classes, detection_scores and detection_boxes should have compatible shapes (i.e., agree on the 0th dimension). Classes shape: %d. Boxes shape: %d. Scores shape: %d' % (detection_classes.shape[0], detection_boxes.shape[0], detection_scores.shape[0])))
detections_list = []
for i in range(num_boxes):
if (detection_classes[i] in category_id_set):
export_dict = {'image_id': image_id, 'category_id': int(detection_classes[i]), 'bbox': list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), 'score': float(detection_scores[i])}
if (detection_keypoints is not None):
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if (detection_keypoint_visibilities is None):
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints), 2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [(- 1)])
coco_keypoints = []
for (keypoint, visibility) in zip(keypoints, visibilities):
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
|
def ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, detection_masks, detection_scores, detection_classes):
'Export detection masks of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. We assume that\n detection_masks, detection_scores, and detection_classes are in correspondence\n - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]\n are associated with the same annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_masks: uint8 numpy array of shape [num_detections, image_height,\n image_width] containing detection_masks.\n detection_scores: float numpy array of shape [num_detections] containing\n scores for detection masks.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection masks.\n\n Returns:\n a list of detection mask annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_masks, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n '
if ((len(detection_classes.shape) != 1) or (len(detection_scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if (not (num_boxes == len(detection_masks) == detection_scores.shape[0])):
raise ValueError(('Corresponding entries in detection_classes, detection_scores and detection_masks should have compatible lengths and shapes Classes length: %d. Masks length: %d. Scores length: %d' % (detection_classes.shape[0], len(detection_masks), detection_scores.shape[0])))
detections_list = []
for i in range(num_boxes):
if (detection_classes[i] in category_id_set):
detections_list.append({'image_id': image_id, 'category_id': int(detection_classes[i]), 'segmentation': _RleCompress(detection_masks[i]), 'score': float(detection_scores[i])})
return detections_list
| 38,152,405,171,328,380
|
Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
|
research/object_detection/metrics/coco_tools.py
|
ExportSingleImageDetectionMasksToCoco
|
1911590204/models
|
python
|
def ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, detection_masks, detection_scores, detection_classes):
'Export detection masks of a single image to COCO format.\n\n This function converts detections represented as numpy arrays to dictionaries\n that can be ingested by the COCO evaluation API. We assume that\n detection_masks, detection_scores, and detection_classes are in correspondence\n - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]\n are associated with the same annotation.\n\n Args:\n image_id: unique image identifier either of type integer or string.\n category_id_set: A set of valid class ids. Detections with classes not in\n category_id_set are dropped.\n detection_masks: uint8 numpy array of shape [num_detections, image_height,\n image_width] containing detection_masks.\n detection_scores: float numpy array of shape [num_detections] containing\n scores for detection masks.\n detection_classes: integer numpy array of shape [num_detections] containing\n the classes for detection masks.\n\n Returns:\n a list of detection mask annotations for a single image in the COCO format.\n\n Raises:\n ValueError: if (1) detection_masks, detection_scores and detection_classes\n do not have the right lengths or (2) if each of the elements inside these\n lists do not have the correct shapes or (3) if image_ids are not integers.\n '
if ((len(detection_classes.shape) != 1) or (len(detection_scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if (not (num_boxes == len(detection_masks) == detection_scores.shape[0])):
raise ValueError(('Corresponding entries in detection_classes, detection_scores and detection_masks should have compatible lengths and shapes Classes length: %d. Masks length: %d. Scores length: %d' % (detection_classes.shape[0], len(detection_masks), detection_scores.shape[0])))
detections_list = []
for i in range(num_boxes):
if (detection_classes[i] in category_id_set):
detections_list.append({'image_id': image_id, 'category_id': int(detection_classes[i]), 'segmentation': _RleCompress(detection_masks[i]), 'score': float(detection_scores[i])})
return detections_list
|
def ExportDetectionsToCOCO(image_ids, detection_boxes, detection_scores, detection_classes, categories, output_path=None):
"Export detection annotations in numpy arrays to COCO API.\n\n This function converts a set of predicted detections represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of boxes, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced. Note that the image_ids provided here must\n match the ones given to the ExportGroundtruthToCOCO function in order\n for evaluation to work properly.\n\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: detection_boxes[i, :], detection_scores[i] and\n detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]\n detection_scores: list of numpy arrays (float) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'bbox', 'score'].\n Raises:\n ValueError: if (1) detection_boxes and detection_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers.\n "
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if (not (len(image_ids) == len(detection_boxes) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
for (image_id, boxes, scores, classes) in zip(image_ids, detection_boxes, detection_scores, detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, boxes, scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
| -1,430,712,689,237,600,800
|
Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
|
research/object_detection/metrics/coco_tools.py
|
ExportDetectionsToCOCO
|
1911590204/models
|
python
|
def ExportDetectionsToCOCO(image_ids, detection_boxes, detection_scores, detection_classes, categories, output_path=None):
"Export detection annotations in numpy arrays to COCO API.\n\n This function converts a set of predicted detections represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of boxes, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced. Note that the image_ids provided here must\n match the ones given to the ExportGroundtruthToCOCO function in order\n for evaluation to work properly.\n\n We assume that for each image, boxes, scores and classes are in\n correspondence --- that is: detection_boxes[i, :], detection_scores[i] and\n detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: a list of unique image identifier either of type integer or\n string.\n detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]\n detection_scores: list of numpy arrays (float) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection_boxes]. Note that num_detection_boxes can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'bbox', 'score'].\n Raises:\n ValueError: if (1) detection_boxes and detection_classes do not have the\n right lengths or (2) if each of the elements inside these lists do not\n have the correct shapes or (3) if image_ids are not integers.\n "
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if (not (len(image_ids) == len(detection_boxes) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
for (image_id, boxes, scores, classes) in zip(image_ids, detection_boxes, detection_scores, detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(image_id, category_id_set, boxes, scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
|
def ExportSegmentsToCOCO(image_ids, detection_masks, detection_scores, detection_classes, categories, output_path=None):
"Export segmentation masks in numpy arrays to COCO API.\n\n This function converts a set of predicted instance masks represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of segments, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n Note this function is recommended to use for small dataset.\n For large dataset, it should be used with a merge function\n (e.g. in map reduce), otherwise the memory consumption is large.\n\n We assume that for each image, masks, scores and classes are in\n correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]\n and detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]\n and type uint8. The height and width should match the shape of\n corresponding image.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'segmentation', 'score'].\n\n Raises:\n ValueError: if detection_masks and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n "
if (not (len(image_ids) == len(detection_masks) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for (image_id, masks, scores, classes) in zip(image_ids, detection_masks, detection_scores, detection_classes):
if ((len(classes.shape) != 1) or (len(scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(masks.shape) != 4):
raise ValueError('All entries in masks expected to be of rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if (not (num_boxes == masks.shape[0] == scores.shape[0])):
raise ValueError('Corresponding entries in segment_classes, detection_scores and detection_boxes should have compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
| -927,010,710,476,147,200
|
Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
|
research/object_detection/metrics/coco_tools.py
|
ExportSegmentsToCOCO
|
1911590204/models
|
python
|
def ExportSegmentsToCOCO(image_ids, detection_masks, detection_scores, detection_classes, categories, output_path=None):
"Export segmentation masks in numpy arrays to COCO API.\n\n This function converts a set of predicted instance masks represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of segments, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n Note this function is recommended to use for small dataset.\n For large dataset, it should be used with a merge function\n (e.g. in map reduce), otherwise the memory consumption is large.\n\n We assume that for each image, masks, scores and classes are in\n correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]\n and detection_classes[i] are associated with the same detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]\n and type uint8. The height and width should match the shape of\n corresponding image.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'segmentation', 'score'].\n\n Raises:\n ValueError: if detection_masks and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n "
if (not (len(image_ids) == len(detection_masks) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for (image_id, masks, scores, classes) in zip(image_ids, detection_masks, detection_scores, detection_classes):
if ((len(classes.shape) != 1) or (len(scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(masks.shape) != 4):
raise ValueError('All entries in masks expected to be of rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if (not (num_boxes == masks.shape[0] == scores.shape[0])):
raise ValueError('Corresponding entries in segment_classes, detection_scores and detection_boxes should have compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
|
def ExportKeypointsToCOCO(image_ids, detection_keypoints, detection_scores, detection_classes, categories, output_path=None):
"Exports keypoints in numpy arrays to COCO API.\n\n This function converts a set of predicted keypoints represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of keypoints, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n We assume that for each image, keypoints, scores and classes are in\n correspondence --- that is: detection_keypoints[i, :, :, :],\n detection_scores[i] and detection_classes[i] are associated with the same\n detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_keypoints: list of numpy arrays with shape\n [num_detection, num_keypoints, 2] and type float32 in absolute\n x-y coordinates.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category and an integer 'num_keypoints' key specifying the number of\n keypoints the category has.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'keypoints', 'score'].\n\n Raises:\n ValueError: if detection_keypoints and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n "
if (not (len(image_ids) == len(detection_keypoints) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for (image_id, keypoints, scores, classes) in zip(image_ids, detection_keypoints, detection_scores, detection_classes):
if ((len(classes.shape) != 1) or (len(scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(keypoints.shape) != 3):
raise ValueError('All entries in keypoints expected to be of rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if (not (num_boxes == keypoints.shape[0] == scores.shape[0])):
raise ValueError('Corresponding entries in detection_classes, detection_keypoints, and detection_scores should have compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {cat['id']: cat['num_keypoints'] for cat in categories if ('num_keypoints' in cat)}
for i in range(num_boxes):
if (classes[i] not in category_id_set):
raise ValueError('class id should be in category_id_set\n')
if (classes[i] in category_id_to_num_keypoints_map):
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
instance_keypoints = np.concatenate([keypoints[i, 0:num_keypoints, :], np.expand_dims(np.ones(num_keypoints), axis=1)], axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({'image_id': image_id, 'category_id': int(classes[i]), 'keypoints': instance_keypoints, 'score': float(scores[i])})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| 7,840,153,829,423,577,000
|
Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
|
research/object_detection/metrics/coco_tools.py
|
ExportKeypointsToCOCO
|
1911590204/models
|
python
|
def ExportKeypointsToCOCO(image_ids, detection_keypoints, detection_scores, detection_classes, categories, output_path=None):
"Exports keypoints in numpy arrays to COCO API.\n\n This function converts a set of predicted keypoints represented\n as numpy arrays to dictionaries that can be ingested by the COCO API.\n Inputs to this function are lists, consisting of keypoints, scores and\n classes, respectively, corresponding to each image for which detections\n have been produced.\n\n We assume that for each image, keypoints, scores and classes are in\n correspondence --- that is: detection_keypoints[i, :, :, :],\n detection_scores[i] and detection_classes[i] are associated with the same\n detection.\n\n Args:\n image_ids: list of image ids (typically ints or strings)\n detection_keypoints: list of numpy arrays with shape\n [num_detection, num_keypoints, 2] and type float32 in absolute\n x-y coordinates.\n detection_scores: list of numpy arrays (float) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n detection_classes: list of numpy arrays (int) with shape\n [num_detection]. Note that num_detection can be different\n for each entry in the list.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list must have an integer 'id' key uniquely identifying\n this category and an integer 'num_keypoints' key specifying the number of\n keypoints the category has.\n output_path: (optional) path for exporting result to JSON\n\n Returns:\n list of dictionaries that can be read by COCO API, where each entry\n corresponds to a single detection and has keys from:\n ['image_id', 'category_id', 'keypoints', 'score'].\n\n Raises:\n ValueError: if detection_keypoints and detection_classes do not have the\n right lengths or if each of the elements inside these lists do not\n have the correct shapes.\n "
if (not (len(image_ids) == len(detection_keypoints) == len(detection_scores) == len(detection_classes))):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for (image_id, keypoints, scores, classes) in zip(image_ids, detection_keypoints, detection_scores, detection_classes):
if ((len(classes.shape) != 1) or (len(scores.shape) != 1)):
raise ValueError('All entries in detection_classes and detection_scoresexpected to be of rank 1.')
if (len(keypoints.shape) != 3):
raise ValueError('All entries in keypoints expected to be of rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if (not (num_boxes == keypoints.shape[0] == scores.shape[0])):
raise ValueError('Corresponding entries in detection_classes, detection_keypoints, and detection_scores should have compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {cat['id']: cat['num_keypoints'] for cat in categories if ('num_keypoints' in cat)}
for i in range(num_boxes):
if (classes[i] not in category_id_set):
raise ValueError('class id should be in category_id_set\n')
if (classes[i] in category_id_to_num_keypoints_map):
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
instance_keypoints = np.concatenate([keypoints[i, 0:num_keypoints, :], np.expand_dims(np.ones(num_keypoints), axis=1)], axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({'image_id': image_id, 'category_id': int(classes[i]), 'keypoints': instance_keypoints, 'score': float(scores[i])})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
|
def __init__(self, dataset, detection_type='bbox'):
"COCOWrapper constructor.\n\n See http://mscoco.org/dataset/#format for a description of the format.\n By default, the coco.COCO class constructor reads from a JSON file.\n This function duplicates the same behavior but loads from a dictionary,\n allowing us to perform evaluation without writing to external storage.\n\n Args:\n dataset: a dictionary holding bounding box annotations in the COCO format.\n detection_type: type of detections being wrapped. Can be one of ['bbox',\n 'segmentation']\n\n Raises:\n ValueError: if detection_type is unsupported.\n "
supported_detection_types = ['bbox', 'segmentation']
if (detection_type not in supported_detection_types):
raise ValueError('Unsupported detection type: {}. Supported values are: {}'.format(detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
| 3,777,113,071,917,594,000
|
COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
|
research/object_detection/metrics/coco_tools.py
|
__init__
|
1911590204/models
|
python
|
def __init__(self, dataset, detection_type='bbox'):
"COCOWrapper constructor.\n\n See http://mscoco.org/dataset/#format for a description of the format.\n By default, the coco.COCO class constructor reads from a JSON file.\n This function duplicates the same behavior but loads from a dictionary,\n allowing us to perform evaluation without writing to external storage.\n\n Args:\n dataset: a dictionary holding bounding box annotations in the COCO format.\n detection_type: type of detections being wrapped. Can be one of ['bbox',\n 'segmentation']\n\n Raises:\n ValueError: if detection_type is unsupported.\n "
supported_detection_types = ['bbox', 'segmentation']
if (detection_type not in supported_detection_types):
raise ValueError('Unsupported detection type: {}. Supported values are: {}'.format(detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
|
def LoadAnnotations(self, annotations):
"Load annotations dictionary into COCO datastructure.\n\n See http://mscoco.org/dataset/#format for a description of the annotations\n format. As above, this function replicates the default behavior of the API\n but does not require writing to external storage.\n\n Args:\n annotations: python list holding object detection results where each\n detection is encoded as a dict with required keys ['image_id',\n 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on\n `detection_type`.\n\n Returns:\n a coco.COCO datastructure holding object detection annotations results\n\n Raises:\n ValueError: if annotations is not a list\n ValueError: if annotations do not correspond to the images contained\n in self.\n "
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if (not isinstance(annotations, list)):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if (self._detection_type == 'bbox'):
for (idx, ann) in enumerate(annotations):
bb = ann['bbox']
ann['area'] = (bb[2] * bb[3])
ann['id'] = (idx + 1)
ann['iscrowd'] = 0
elif (self._detection_type == 'segmentation'):
for (idx, ann) in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = (idx + 1)
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
| 8,178,324,416,221,913,000
|
Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
|
research/object_detection/metrics/coco_tools.py
|
LoadAnnotations
|
1911590204/models
|
python
|
def LoadAnnotations(self, annotations):
"Load annotations dictionary into COCO datastructure.\n\n See http://mscoco.org/dataset/#format for a description of the annotations\n format. As above, this function replicates the default behavior of the API\n but does not require writing to external storage.\n\n Args:\n annotations: python list holding object detection results where each\n detection is encoded as a dict with required keys ['image_id',\n 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on\n `detection_type`.\n\n Returns:\n a coco.COCO datastructure holding object detection annotations results\n\n Raises:\n ValueError: if annotations is not a list\n ValueError: if annotations do not correspond to the images contained\n in self.\n "
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if (not isinstance(annotations, list)):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if (self._detection_type == 'bbox'):
for (idx, ann) in enumerate(annotations):
bb = ann['bbox']
ann['area'] = (bb[2] * bb[3])
ann['id'] = (idx + 1)
ann['iscrowd'] = 0
elif (self._detection_type == 'segmentation'):
for (idx, ann) in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = (idx + 1)
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
|
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, iou_type='bbox', oks_sigmas=None):
"COCOEvalWrapper constructor.\n\n Note that for the area-based metrics to be meaningful, detection and\n groundtruth boxes must be in image coordinates measured in pixels.\n\n Args:\n groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding\n groundtruth annotations\n detections: a coco.COCO (or coco_tools.COCOWrapper) object holding\n detections\n agnostic_mode: boolean (default: False). If True, evaluation ignores\n class labels, treating all detections as proposals.\n iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,\n `keypoints`.\n oks_sigmas: Float numpy array holding the OKS variances for keypoints.\n "
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if (oks_sigmas is not None):
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
| -4,644,386,061,494,226,000
|
COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
|
research/object_detection/metrics/coco_tools.py
|
__init__
|
1911590204/models
|
python
|
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False, iou_type='bbox', oks_sigmas=None):
"COCOEvalWrapper constructor.\n\n Note that for the area-based metrics to be meaningful, detection and\n groundtruth boxes must be in image coordinates measured in pixels.\n\n Args:\n groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding\n groundtruth annotations\n detections: a coco.COCO (or coco_tools.COCOWrapper) object holding\n detections\n agnostic_mode: boolean (default: False). If True, evaluation ignores\n class labels, treating all detections as proposals.\n iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,\n `keypoints`.\n oks_sigmas: Float numpy array holding the OKS variances for keypoints.\n "
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if (oks_sigmas is not None):
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
|
def GetCategory(self, category_id):
"Fetches dictionary holding category information given category id.\n\n Args:\n category_id: integer id\n Returns:\n dictionary holding 'id', 'name'.\n "
return self.cocoGt.cats[category_id]
| -3,998,284,783,981,275,000
|
Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
|
research/object_detection/metrics/coco_tools.py
|
GetCategory
|
1911590204/models
|
python
|
def GetCategory(self, category_id):
"Fetches dictionary holding category information given category id.\n\n Args:\n category_id: integer id\n Returns:\n dictionary holding 'id', 'name'.\n "
return self.cocoGt.cats[category_id]
|
def GetAgnosticMode(self):
'Returns true if COCO Eval is configured to evaluate in agnostic mode.'
return (self.params.useCats == 0)
| -4,317,986,916,639,350,300
|
Returns true if COCO Eval is configured to evaluate in agnostic mode.
|
research/object_detection/metrics/coco_tools.py
|
GetAgnosticMode
|
1911590204/models
|
python
|
def GetAgnosticMode(self):
return (self.params.useCats == 0)
|
def GetCategoryIdList(self):
'Returns list of valid category ids.'
return self.params.catIds
| -2,981,913,091,674,385,400
|
Returns list of valid category ids.
|
research/object_detection/metrics/coco_tools.py
|
GetCategoryIdList
|
1911590204/models
|
python
|
def GetCategoryIdList(self):
return self.params.catIds
|
def ComputeMetrics(self, include_metrics_per_category=False, all_metrics_per_category=False):
"Computes detection/keypoint metrics.\n\n Args:\n include_metrics_per_category: If True, will include metrics per category.\n all_metrics_per_category: If true, include all the summery metrics for\n each category in per_category_ap. Be careful with setting it to true if\n you have more than handful of categories, because it will pollute\n your mldash.\n\n Returns:\n 1. summary_metrics: a dictionary holding:\n 'Precision/mAP': mean average precision over classes averaged over IOU\n thresholds ranging from .5 to .95 with .05 increments\n 'Precision/mAP@.50IOU': mean average precision at 50% IOU\n 'Precision/mAP@.75IOU': mean average precision at 75% IOU\n 'Precision/mAP (small)': mean average precision for small objects\n (area < 32^2 pixels). NOTE: not present for 'keypoints'\n 'Precision/mAP (medium)': mean average precision for medium sized\n objects (32^2 pixels < area < 96^2 pixels)\n 'Precision/mAP (large)': mean average precision for large objects\n (96^2 pixels < area < 10000^2 pixels)\n 'Recall/AR@1': average recall with 1 detection\n 'Recall/AR@10': average recall with 10 detections\n 'Recall/AR@100': average recall with 100 detections\n 'Recall/AR@100 (small)': average recall for small objects with 100\n detections. NOTE: not present for 'keypoints'\n 'Recall/AR@100 (medium)': average recall for medium objects with 100\n detections\n 'Recall/AR@100 (large)': average recall for large objects with 100\n detections\n 2. per_category_ap: a dictionary holding category specific results with\n keys of the form: 'Precision mAP ByCategory/category'\n (without the supercategory part if no supercategories exist).\n For backward compatibility 'PerformanceByCategory' is included in the\n output regardless of all_metrics_per_category.\n If evaluating class-agnostic mode, per_category_ap is an empty\n dictionary.\n\n Raises:\n ValueError: If category_stats does not exist.\n "
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if (self._iou_type in ['bbox', 'segm']):
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]), ('Precision/mAP@.50IOU', self.stats[1]), ('Precision/mAP@.75IOU', self.stats[2]), ('Precision/mAP (small)', self.stats[3]), ('Precision/mAP (medium)', self.stats[4]), ('Precision/mAP (large)', self.stats[5]), ('Recall/AR@1', self.stats[6]), ('Recall/AR@10', self.stats[7]), ('Recall/AR@100', self.stats[8]), ('Recall/AR@100 (small)', self.stats[9]), ('Recall/AR@100 (medium)', self.stats[10]), ('Recall/AR@100 (large)', self.stats[11])])
elif (self._iou_type == 'keypoints'):
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(category_name)] = self.stats[0]
summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(category_name)] = self.stats[1]
summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(category_name)] = self.stats[9]
if (not include_metrics_per_category):
return (summary_metrics, {})
if (not hasattr(self, 'category_stats')):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return (summary_metrics, per_category_ap)
for (category_index, category_id) in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
per_category_ap['PerformanceByCategory/mAP/{}'.format(category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(category)] = self.category_stats[11][category_index]
return (summary_metrics, per_category_ap)
| 5,216,740,938,967,259,000
|
Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
|
research/object_detection/metrics/coco_tools.py
|
ComputeMetrics
|
1911590204/models
|
python
|
def ComputeMetrics(self, include_metrics_per_category=False, all_metrics_per_category=False):
"Computes detection/keypoint metrics.\n\n Args:\n include_metrics_per_category: If True, will include metrics per category.\n all_metrics_per_category: If true, include all the summery metrics for\n each category in per_category_ap. Be careful with setting it to true if\n you have more than handful of categories, because it will pollute\n your mldash.\n\n Returns:\n 1. summary_metrics: a dictionary holding:\n 'Precision/mAP': mean average precision over classes averaged over IOU\n thresholds ranging from .5 to .95 with .05 increments\n 'Precision/mAP@.50IOU': mean average precision at 50% IOU\n 'Precision/mAP@.75IOU': mean average precision at 75% IOU\n 'Precision/mAP (small)': mean average precision for small objects\n (area < 32^2 pixels). NOTE: not present for 'keypoints'\n 'Precision/mAP (medium)': mean average precision for medium sized\n objects (32^2 pixels < area < 96^2 pixels)\n 'Precision/mAP (large)': mean average precision for large objects\n (96^2 pixels < area < 10000^2 pixels)\n 'Recall/AR@1': average recall with 1 detection\n 'Recall/AR@10': average recall with 10 detections\n 'Recall/AR@100': average recall with 100 detections\n 'Recall/AR@100 (small)': average recall for small objects with 100\n detections. NOTE: not present for 'keypoints'\n 'Recall/AR@100 (medium)': average recall for medium objects with 100\n detections\n 'Recall/AR@100 (large)': average recall for large objects with 100\n detections\n 2. per_category_ap: a dictionary holding category specific results with\n keys of the form: 'Precision mAP ByCategory/category'\n (without the supercategory part if no supercategories exist).\n For backward compatibility 'PerformanceByCategory' is included in the\n output regardless of all_metrics_per_category.\n If evaluating class-agnostic mode, per_category_ap is an empty\n dictionary.\n\n Raises:\n ValueError: If category_stats does not exist.\n "
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if (self._iou_type in ['bbox', 'segm']):
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]), ('Precision/mAP@.50IOU', self.stats[1]), ('Precision/mAP@.75IOU', self.stats[2]), ('Precision/mAP (small)', self.stats[3]), ('Precision/mAP (medium)', self.stats[4]), ('Precision/mAP (large)', self.stats[5]), ('Recall/AR@1', self.stats[6]), ('Recall/AR@10', self.stats[7]), ('Recall/AR@100', self.stats[8]), ('Recall/AR@100 (small)', self.stats[9]), ('Recall/AR@100 (medium)', self.stats[10]), ('Recall/AR@100 (large)', self.stats[11])])
elif (self._iou_type == 'keypoints'):
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(category_name)] = self.stats[0]
summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(category_name)] = self.stats[1]
summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(category_name)] = self.stats[9]
if (not include_metrics_per_category):
return (summary_metrics, {})
if (not hasattr(self, 'category_stats')):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return (summary_metrics, per_category_ap)
for (category_index, category_id) in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
per_category_ap['PerformanceByCategory/mAP/{}'.format(category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(category)] = self.category_stats[11][category_index]
return (summary_metrics, per_category_ap)
|
def accept(self):
'\n Override the accept method so that we can confirm saving an\n invalid configuration.\n '
result = QtWidgets.QMessageBox.Yes
if (not self.validate()):
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration', "This configuration is invalid. Unpredictable behaviour may result if you choose 'Yes', are you sure you want to save this configuration?)", (QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No), QtWidgets.QMessageBox.No)
if (result == QtWidgets.QMessageBox.Yes):
QtWidgets.QDialog.accept(self)
| 7,433,577,860,333,540,000
|
Override the accept method so that we can confirm saving an
invalid configuration.
|
mapclientplugins/filechooserstep/configuredialog.py
|
accept
|
mapclient-plugins/mapclientplugins.filechooserstep
|
python
|
def accept(self):
'\n Override the accept method so that we can confirm saving an\n invalid configuration.\n '
result = QtWidgets.QMessageBox.Yes
if (not self.validate()):
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration', "This configuration is invalid. Unpredictable behaviour may result if you choose 'Yes', are you sure you want to save this configuration?)", (QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No), QtWidgets.QMessageBox.No)
if (result == QtWidgets.QMessageBox.Yes):
QtWidgets.QDialog.accept(self)
|
def validate(self):
'\n Validate the configuration dialog fields. For any field that is not valid\n set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the\n overall validity of the configuration.\n '
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = ((value == 0) or ((value == 1) and (self._previousIdentifier == self._ui.lineEdit0.text())))
self._ui.lineEdit0.setStyleSheet((DEFAULT_STYLE_SHEET if valid else INVALID_STYLE_SHEET))
non_empty = len(self._ui.lineEditFileLocation.text())
file_path = self._output_location()
if self._workflow_location:
file_path = os.path.join(self._workflow_location, file_path)
location_valid = (non_empty and os.path.isfile(file_path))
self._ui.lineEditFileLocation.setStyleSheet((DEFAULT_STYLE_SHEET if location_valid else INVALID_STYLE_SHEET))
return (valid and location_valid)
| 441,426,544,836,570,000
|
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
|
mapclientplugins/filechooserstep/configuredialog.py
|
validate
|
mapclient-plugins/mapclientplugins.filechooserstep
|
python
|
def validate(self):
'\n Validate the configuration dialog fields. For any field that is not valid\n set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the\n overall validity of the configuration.\n '
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = ((value == 0) or ((value == 1) and (self._previousIdentifier == self._ui.lineEdit0.text())))
self._ui.lineEdit0.setStyleSheet((DEFAULT_STYLE_SHEET if valid else INVALID_STYLE_SHEET))
non_empty = len(self._ui.lineEditFileLocation.text())
file_path = self._output_location()
if self._workflow_location:
file_path = os.path.join(self._workflow_location, file_path)
location_valid = (non_empty and os.path.isfile(file_path))
self._ui.lineEditFileLocation.setStyleSheet((DEFAULT_STYLE_SHEET if location_valid else INVALID_STYLE_SHEET))
return (valid and location_valid)
|
def getConfig(self):
'\n Get the current value of the configuration from the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n '
self._previousIdentifier = self._ui.lineEdit0.text()
config = {'identifier': self._ui.lineEdit0.text(), 'File': self._output_location()}
if self._previousLocation:
config['previous_location'] = os.path.relpath(self._previousLocation, self._workflow_location)
else:
config['previous_location'] = ''
return config
| -1,545,015,863,487,636,500
|
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
|
mapclientplugins/filechooserstep/configuredialog.py
|
getConfig
|
mapclient-plugins/mapclientplugins.filechooserstep
|
python
|
def getConfig(self):
'\n Get the current value of the configuration from the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n '
self._previousIdentifier = self._ui.lineEdit0.text()
config = {'identifier': self._ui.lineEdit0.text(), 'File': self._output_location()}
if self._previousLocation:
config['previous_location'] = os.path.relpath(self._previousLocation, self._workflow_location)
else:
config['previous_location'] =
return config
|
def setConfig(self, config):
'\n Set the current value of the configuration for the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n '
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
self._ui.lineEditFileLocation.setText(config['File'])
if ('previous_location' in config):
self._previousLocation = os.path.join(self._workflow_location, config['previous_location'])
| 5,738,320,274,872,744,000
|
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
|
mapclientplugins/filechooserstep/configuredialog.py
|
setConfig
|
mapclient-plugins/mapclientplugins.filechooserstep
|
python
|
def setConfig(self, config):
'\n Set the current value of the configuration for the dialog. Also\n set the _previousIdentifier value so that we can check uniqueness of the\n identifier over the whole of the workflow.\n '
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
self._ui.lineEditFileLocation.setText(config['File'])
if ('previous_location' in config):
self._previousLocation = os.path.join(self._workflow_location, config['previous_location'])
|
def verify(self, hash, sig):
'Verify a DER signature'
return (ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1)
| -2,046,395,906,615,599,900
|
Verify a DER signature
|
test/functional/test_framework/key.py
|
verify
|
Alonewolf-123/AmbankCoin-Core
|
python
|
def verify(self, hash, sig):
return (ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1)
|
def parse_python_version(output):
"Parse a Python version output returned by `python --version`.\n\n Return a dict with three keys: major, minor, and micro. Each value is a\n string containing a version part.\n\n Note: The micro part would be `'0'` if it's missing from the input string.\n "
version_pattern = re.compile('\n ^ # Beginning of line.\n Python # Literally "Python".\n \\s # Space.\n (?P<major>\\d+) # Major = one or more digits.\n \\. # Dot.\n (?P<minor>\\d+) # Minor = one or more digits.\n (?: # Unnamed group for dot-micro.\n \\. # Dot.\n (?P<micro>\\d+) # Micro = one or more digit.\n )? # Micro is optional because pypa/pipenv#1893.\n .* # Trailing garbage.\n $ # End of line.\n ', re.VERBOSE)
match = version_pattern.match(output)
if (not match):
return None
return match.groupdict(default='0')
| -7,576,245,133,647,391,000
|
Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
|
pipenv/utils.py
|
parse_python_version
|
bryant1410/pipenv
|
python
|
def parse_python_version(output):
"Parse a Python version output returned by `python --version`.\n\n Return a dict with three keys: major, minor, and micro. Each value is a\n string containing a version part.\n\n Note: The micro part would be `'0'` if it's missing from the input string.\n "
version_pattern = re.compile('\n ^ # Beginning of line.\n Python # Literally "Python".\n \\s # Space.\n (?P<major>\\d+) # Major = one or more digits.\n \\. # Dot.\n (?P<minor>\\d+) # Minor = one or more digits.\n (?: # Unnamed group for dot-micro.\n \\. # Dot.\n (?P<micro>\\d+) # Micro = one or more digit.\n )? # Micro is optional because pypa/pipenv#1893.\n .* # Trailing garbage.\n $ # End of line.\n ', re.VERBOSE)
match = version_pattern.match(output)
if (not match):
return None
return match.groupdict(default='0')
|
def escape_grouped_arguments(s):
'Prepares a string for the shell (on Windows too!)\n\n Only for use on grouped arguments (passed as a string to Popen)\n '
if (s is None):
return None
if (os.name == 'nt'):
s = '{}'.format(s.replace('\\', '\\\\'))
return (('"' + s.replace("'", "'\\''")) + '"')
| 2,562,507,320,774,941,700
|
Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
|
pipenv/utils.py
|
escape_grouped_arguments
|
bryant1410/pipenv
|
python
|
def escape_grouped_arguments(s):
'Prepares a string for the shell (on Windows too!)\n\n Only for use on grouped arguments (passed as a string to Popen)\n '
if (s is None):
return None
if (os.name == 'nt'):
s = '{}'.format(s.replace('\\', '\\\\'))
return (('"' + s.replace("'", "'\\")) + '"')
|
def clean_pkg_version(version):
'Uses pip to prepare a package version string, from our internal version.'
return six.u(pep440_version(str(version).replace('==', '')))
| 1,798,999,973,971,679,200
|
Uses pip to prepare a package version string, from our internal version.
|
pipenv/utils.py
|
clean_pkg_version
|
bryant1410/pipenv
|
python
|
def clean_pkg_version(version):
return six.u(pep440_version(str(version).replace('==', )))
|
def resolve_deps(deps, which, project, sources=None, verbose=False, python=False, clear=False, pre=False, allow_global=False):
'Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip9.\n '
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
(resolved_tree, resolver) = actually_resolve_reps(deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre)
except RuntimeError:
resolved_tree = None
if (resolved_tree is None):
with HackedPythonVersion(python_version='.'.join([str(s) for s in sys.version_info[:3]]), python_path=backup_python_path):
try:
(resolved_tree, resolver) = actually_resolve_reps(deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if (not result.editable):
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if (not markers_lookup.get(result.name)):
markers = (str(result.markers) if (result.markers and ('extra' not in str(result.markers))) else None)
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any(((('python.org' in source['url']) or ('pypi.org' in source['url'])) for source in sources)):
try:
r = requests.get('https://pypi.org/pypi/{0}/json'.format(name), timeout=10)
api_releases = r.json()['releases']
cleaned_releases = {}
for (api_version, api_info) in api_releases.items():
cleaned_releases[clean_pkg_version(api_version)] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [('sha256:' + s) for s in collected_hashes]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo('{0}: Error generating hash for {1}'.format(crayons.red('Warning', bold=True), name))
try:
collected_hashes = (collected_hashes + list(list(resolver.resolve_hashes([result]).items())[0][1]))
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
| 5,097,824,507,640,910,000
|
Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip9.
|
pipenv/utils.py
|
resolve_deps
|
bryant1410/pipenv
|
python
|
def resolve_deps(deps, which, project, sources=None, verbose=False, python=False, clear=False, pre=False, allow_global=False):
'Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip9.\n '
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
(resolved_tree, resolver) = actually_resolve_reps(deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre)
except RuntimeError:
resolved_tree = None
if (resolved_tree is None):
with HackedPythonVersion(python_version='.'.join([str(s) for s in sys.version_info[:3]]), python_path=backup_python_path):
try:
(resolved_tree, resolver) = actually_resolve_reps(deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if (not result.editable):
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if (not markers_lookup.get(result.name)):
markers = (str(result.markers) if (result.markers and ('extra' not in str(result.markers))) else None)
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any(((('python.org' in source['url']) or ('pypi.org' in source['url'])) for source in sources)):
try:
r = requests.get('https://pypi.org/pypi/{0}/json'.format(name), timeout=10)
api_releases = r.json()['releases']
cleaned_releases = {}
for (api_version, api_info) in api_releases.items():
cleaned_releases[clean_pkg_version(api_version)] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [('sha256:' + s) for s in collected_hashes]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo('{0}: Error generating hash for {1}'.format(crayons.red('Warning', bold=True), name))
try:
collected_hashes = (collected_hashes + list(list(resolver.resolve_hashes([result]).items())[0][1]))
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
|
def multi_split(s, split):
'Splits on multiple given separators.'
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if (len(i) > 0)]
| -6,995,361,326,840,965,000
|
Splits on multiple given separators.
|
pipenv/utils.py
|
multi_split
|
bryant1410/pipenv
|
python
|
def multi_split(s, split):
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if (len(i) > 0)]
|
def convert_deps_from_pip(dep):
'"Converts a pip-formatted dependency to a Pipfile-formatted one.'
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
if ((req.uri or req.path or is_installable_file(req.name)) and (not req.vcs)):
if ((not req.uri) and (not req.path)):
req.path = os.path.abspath(req.name)
hashable_path = (req.uri if req.uri else req.path)
if (not req.name):
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[(len(req.name) - 7):]
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
if req.editable:
dependency[req.name].update({'editable': True})
elif req.vcs:
if (req.name is None):
raise ValueError('pipenv requires an #egg fragment for version controlled dependencies. Please install remote dependency in the form {0}#egg=<package-name>.'.format(req.uri))
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[(len(req.vcs) + 1):]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
if req.editable:
dependency[req.name].update({'editable': True})
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
if req.revision:
dependency[req.name].update({'ref': req.revision})
if req.extras:
dependency[req.name].update({'extras': req.extras})
elif (req.extras or req.specs or hasattr(req, 'markers')):
specs = None
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
else:
dependency[dep] = '*'
if (len(dependency) > 1):
for key in dependency.copy():
if (not hasattr(dependency[key], 'keys')):
del dependency[key]
return dependency
| 6,363,460,669,016,941,000
|
"Converts a pip-formatted dependency to a Pipfile-formatted one.
|
pipenv/utils.py
|
convert_deps_from_pip
|
bryant1410/pipenv
|
python
|
def convert_deps_from_pip(dep):
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
if ((req.uri or req.path or is_installable_file(req.name)) and (not req.vcs)):
if ((not req.uri) and (not req.path)):
req.path = os.path.abspath(req.name)
hashable_path = (req.uri if req.uri else req.path)
if (not req.name):
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[(len(req.name) - 7):]
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
if req.editable:
dependency[req.name].update({'editable': True})
elif req.vcs:
if (req.name is None):
raise ValueError('pipenv requires an #egg fragment for version controlled dependencies. Please install remote dependency in the form {0}#egg=<package-name>.'.format(req.uri))
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[(len(req.vcs) + 1):]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
if req.editable:
dependency[req.name].update({'editable': True})
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
if req.revision:
dependency[req.name].update({'ref': req.revision})
if req.extras:
dependency[req.name].update({'extras': req.extras})
elif (req.extras or req.specs or hasattr(req, 'markers')):
specs = None
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
else:
dependency[dep] = '*'
if (len(dependency) > 1):
for key in dependency.copy():
if (not hasattr(dependency[key], 'keys')):
del dependency[key]
return dependency
|
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
'"Converts a Pipfile-formatted dependency to a pip-formatted one.'
dependencies = []
for dep in deps.keys():
extra = (deps[dep] if isinstance(deps[dep], six.string_types) else '')
version = ''
index = ''
if (is_star(deps[dep]) or (str(extra) == '{}')):
extra = ''
hash = ''
if ('hash' in deps[dep]):
hash = ' --hash={0}'.format(deps[dep]['hash'])
if ('hashes' in deps[dep]):
hash = '{0} '.format(''.join([' --hash={0} '.format(h) for h in deps[dep]['hashes']]))
if ('extras' in deps[dep]):
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if ('version' in deps[dep]):
if (not is_star(deps[dep]['version'])):
version = deps[dep]['version']
if ('markers' in deps[dep]):
specs = '; {0}'.format(deps[dep]['markers'])
else:
specs = []
for specifier in specifiers:
if (specifier in deps[dep]):
if (not is_star(deps[dep][specifier])):
specs.append('{0} {1}'.format(specifier, deps[dep][specifier]))
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs = ''
if (include_index and (not is_file(deps[dep])) and (not is_vcs(deps[dep]))):
pip_src_args = []
if ('index' in deps[dep]):
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
maybe_vcs = [vcs for vcs in VCS_LIST if (vcs in deps[dep])]
vcs = (maybe_vcs[0] if maybe_vcs else None)
if ('file' in deps[dep]):
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
if ('editable' in deps[dep]):
dep = '-e '
else:
dep = ''
elif ('path' in deps[dep]):
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
if ('editable' in deps[dep]):
dep = '-e '
else:
dep = ''
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
if ('ref' in deps[dep]):
extra += '@{0}'.format(deps[dep]['ref'])
extra += '#egg={0}'.format(dep)
if ('subdirectory' in deps[dep]):
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
if ('editable' in deps[dep]):
dep = '-e '
else:
dep = ''
s = '{0}{1}{2}{3}{4} {5}'.format(dep, extra, version, specs, hash, index).strip()
dependencies.append(s)
if (not r):
return dependencies
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
| 3,140,597,842,437,439,500
|
"Converts a Pipfile-formatted dependency to a pip-formatted one.
|
pipenv/utils.py
|
convert_deps_to_pip
|
bryant1410/pipenv
|
python
|
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
dependencies = []
for dep in deps.keys():
extra = (deps[dep] if isinstance(deps[dep], six.string_types) else )
version =
index =
if (is_star(deps[dep]) or (str(extra) == '{}')):
extra =
hash =
if ('hash' in deps[dep]):
hash = ' --hash={0}'.format(deps[dep]['hash'])
if ('hashes' in deps[dep]):
hash = '{0} '.format(.join([' --hash={0} '.format(h) for h in deps[dep]['hashes']]))
if ('extras' in deps[dep]):
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if ('version' in deps[dep]):
if (not is_star(deps[dep]['version'])):
version = deps[dep]['version']
if ('markers' in deps[dep]):
specs = '; {0}'.format(deps[dep]['markers'])
else:
specs = []
for specifier in specifiers:
if (specifier in deps[dep]):
if (not is_star(deps[dep][specifier])):
specs.append('{0} {1}'.format(specifier, deps[dep][specifier]))
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs =
if (include_index and (not is_file(deps[dep])) and (not is_vcs(deps[dep]))):
pip_src_args = []
if ('index' in deps[dep]):
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
maybe_vcs = [vcs for vcs in VCS_LIST if (vcs in deps[dep])]
vcs = (maybe_vcs[0] if maybe_vcs else None)
if ('file' in deps[dep]):
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
if ('editable' in deps[dep]):
dep = '-e '
else:
dep =
elif ('path' in deps[dep]):
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
if ('editable' in deps[dep]):
dep = '-e '
else:
dep =
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
if ('ref' in deps[dep]):
extra += '@{0}'.format(deps[dep]['ref'])
extra += '#egg={0}'.format(dep)
if ('subdirectory' in deps[dep]):
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
if ('editable' in deps[dep]):
dep = '-e '
else:
dep =
s = '{0}{1}{2}{3}{4} {5}'.format(dep, extra, version, specs, hash, index).strip()
dependencies.append(s)
if (not r):
return dependencies
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
|
def mkdir_p(newdir):
'works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n '
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired dir, '{0}', already exists.".format(newdir))
else:
(head, tail) = os.path.split(newdir)
if (head and (not os.path.isdir(head))):
mkdir_p(head)
if tail:
os.mkdir(newdir)
| -8,025,579,765,829,738,000
|
works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
|
pipenv/utils.py
|
mkdir_p
|
bryant1410/pipenv
|
python
|
def mkdir_p(newdir):
'works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n '
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired dir, '{0}', already exists.".format(newdir))
else:
(head, tail) = os.path.split(newdir)
if (head and (not os.path.isdir(head))):
mkdir_p(head)
if tail:
os.mkdir(newdir)
|
def is_required_version(version, specified_version):
"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n "
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', '')
if specified_version.startswith('=='):
return (version.strip() == specified_version.split('==')[1].strip())
return True
| 3,528,375,736,170,234,000
|
Check to see if there's a hard requirement for version
number provided in the Pipfile.
|
pipenv/utils.py
|
is_required_version
|
bryant1410/pipenv
|
python
|
def is_required_version(version, specified_version):
"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n "
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', )
if specified_version.startswith('=='):
return (version.strip() == specified_version.split('==')[1].strip())
return True
|
def strip_ssh_from_git_uri(uri):
'Return git+ssh:// formatted URI to git+git@ format'
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
| -5,153,976,107,256,773,000
|
Return git+ssh:// formatted URI to git+git@ format
|
pipenv/utils.py
|
strip_ssh_from_git_uri
|
bryant1410/pipenv
|
python
|
def strip_ssh_from_git_uri(uri):
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
|
def clean_git_uri(uri):
'Cleans VCS uris from pip9 format'
if isinstance(uri, six.string_types):
if (uri.startswith('git+') and ('://' not in uri)):
uri = uri.replace('git+', 'git+ssh://')
return uri
| 8,837,214,570,924,101,000
|
Cleans VCS uris from pip9 format
|
pipenv/utils.py
|
clean_git_uri
|
bryant1410/pipenv
|
python
|
def clean_git_uri(uri):
if isinstance(uri, six.string_types):
if (uri.startswith('git+') and ('://' not in uri)):
uri = uri.replace('git+', 'git+ssh://')
return uri
|
def is_installable_file(path):
'Determine if a path can potentially be installed'
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if (hasattr(path, 'keys') and any((key for key in path.keys() if (key in ['file', 'path'])))):
path = (urlparse(path['file']).path if ('file' in path) else path['path'])
if ((not isinstance(path, six.string_types)) or (path == '*')):
return False
if any((path.startswith(spec) for spec in '!=<>~')):
try:
specifiers.SpecifierSet(path)
except specifiers.InvalidSpecifier:
pass
else:
return False
if (not os.path.exists(os.path.abspath(path))):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if (lookup_path.is_dir() and is_installable_dir(absolute_path)):
return True
elif (lookup_path.is_file() and is_archive_file(absolute_path)):
return True
return False
| -8,326,956,013,517,452,000
|
Determine if a path can potentially be installed
|
pipenv/utils.py
|
is_installable_file
|
bryant1410/pipenv
|
python
|
def is_installable_file(path):
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if (hasattr(path, 'keys') and any((key for key in path.keys() if (key in ['file', 'path'])))):
path = (urlparse(path['file']).path if ('file' in path) else path['path'])
if ((not isinstance(path, six.string_types)) or (path == '*')):
return False
if any((path.startswith(spec) for spec in '!=<>~')):
try:
specifiers.SpecifierSet(path)
except specifiers.InvalidSpecifier:
pass
else:
return False
if (not os.path.exists(os.path.abspath(path))):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if (lookup_path.is_dir() and is_installable_dir(absolute_path)):
return True
elif (lookup_path.is_file() and is_archive_file(absolute_path)):
return True
return False
|
def is_file(package):
'Determine if a package name is for a File dependency.'
if hasattr(package, 'keys'):
return any((key for key in package.keys() if (key in ['file', 'path'])))
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
| 1,091,657,782,702,303,400
|
Determine if a package name is for a File dependency.
|
pipenv/utils.py
|
is_file
|
bryant1410/pipenv
|
python
|
def is_file(package):
if hasattr(package, 'keys'):
return any((key for key in package.keys() if (key in ['file', 'path'])))
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
|
def pep440_version(version):
'Normalize version to PEP 440 standards'
from .vendor.pip9.index import parse_version
return str(parse_version(version))
| 5,361,031,010,979,994,000
|
Normalize version to PEP 440 standards
|
pipenv/utils.py
|
pep440_version
|
bryant1410/pipenv
|
python
|
def pep440_version(version):
from .vendor.pip9.index import parse_version
return str(parse_version(version))
|
def pep423_name(name):
'Normalize package name to PEP 423 style standard.'
name = name.lower()
if any(((i not in name) for i in (VCS_LIST + SCHEME_LIST))):
return name.replace('_', '-')
else:
return name
| 6,748,167,606,597,170,000
|
Normalize package name to PEP 423 style standard.
|
pipenv/utils.py
|
pep423_name
|
bryant1410/pipenv
|
python
|
def pep423_name(name):
name = name.lower()
if any(((i not in name) for i in (VCS_LIST + SCHEME_LIST))):
return name.replace('_', '-')
else:
return name
|
def proper_case(package_name):
'Properly case project name from pypi.org.'
r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True)
if (not r.ok):
raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name))
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
| 5,332,965,172,988,998,000
|
Properly case project name from pypi.org.
|
pipenv/utils.py
|
proper_case
|
bryant1410/pipenv
|
python
|
def proper_case(package_name):
r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True)
if (not r.ok):
raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name))
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
|
def split_section(input_file, section_suffix, test_function):
'\n Split a pipfile or a lockfile section out by section name and test function\n\n :param dict input_file: A dictionary containing either a pipfile or lockfile\n :param str section_suffix: A string of the name of the section\n :param func test_function: A test function to test against the value in the key/value pair\n\n >>> split_section(my_lockfile, \'vcs\', is_vcs)\n {\n \'default\': {\n "six": {\n "hashes": [\n "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",\n "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"\n ],\n "version": "==1.11.0"\n }\n },\n \'default-vcs\': {\n "e1839a8": {\n "editable": true,\n "path": "."\n }\n }\n }\n '
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(((section in input_file) for section in pipfile_sections)):
sections = pipfile_sections
elif any(((section in input_file) for section in lockfile_sections)):
sections = lockfile_sections
else:
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
| 3,888,405,553,536,379,400
|
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
|
pipenv/utils.py
|
split_section
|
bryant1410/pipenv
|
python
|
def split_section(input_file, section_suffix, test_function):
'\n Split a pipfile or a lockfile section out by section name and test function\n\n :param dict input_file: A dictionary containing either a pipfile or lockfile\n :param str section_suffix: A string of the name of the section\n :param func test_function: A test function to test against the value in the key/value pair\n\n >>> split_section(my_lockfile, \'vcs\', is_vcs)\n {\n \'default\': {\n "six": {\n "hashes": [\n "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",\n "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"\n ],\n "version": "==1.11.0"\n }\n },\n \'default-vcs\': {\n "e1839a8": {\n "editable": true,\n "path": "."\n }\n }\n }\n '
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(((section in input_file) for section in pipfile_sections)):
sections = pipfile_sections
elif any(((section in input_file) for section in lockfile_sections)):
sections = lockfile_sections
else:
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
|
def split_file(file_dict):
'Split VCS and editable dependencies out from file.'
sections = {'vcs': is_vcs, 'editable': (lambda x: (hasattr(x, 'keys') and x.get('editable')))}
for (k, func) in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
| 1,330,811,071,559,589,000
|
Split VCS and editable dependencies out from file.
|
pipenv/utils.py
|
split_file
|
bryant1410/pipenv
|
python
|
def split_file(file_dict):
sections = {'vcs': is_vcs, 'editable': (lambda x: (hasattr(x, 'keys') and x.get('editable')))}
for (k, func) in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
|
def merge_deps(file_dict, project, dev=False, requirements=False, ignore_hashes=False, blocking=False, only=False):
'\n Given a file_dict, merges dependencies and converts them to pip dependency lists.\n :param dict file_dict: The result of calling :func:`pipenv.utils.split_file`\n :param :class:`pipenv.project.Project` project: Pipenv project\n :param bool dev=False: Flag indicating whether dev dependencies are to be installed\n :param bool requirements=False: Flag indicating whether to use a requirements file\n :param bool ignore_hashes=False:\n :param bool blocking=False:\n :param bool only=False:\n :return: Pip-converted 3-tuples of [deps, requirements_deps]\n '
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
(section_name, suffix) = (section.rsplit('-', 1) if (('-' in section) and (not (section == 'dev-packages'))) else (section, None))
if ((not file_dict[section]) or (section_name not in ('dev-packages', 'packages', 'default', 'develop'))):
continue
is_dev = (section_name in ('dev-packages', 'develop'))
if (is_dev and (not dev)):
continue
if ignore_hashes:
for (k, v) in file_dict[section]:
if ('hash' in v):
del v['hash']
no_hashes = (True if suffix else ignore_hashes)
block = (True if suffix else blocking)
include_index = (True if (not suffix) else False)
converted = convert_deps_to_pip(file_dict[section], project, r=False, include_index=include_index)
deps.extend(((d, no_hashes, block) for d in converted))
if (dev and is_dev and requirements):
requirements_deps.extend(((d, no_hashes, block) for d in converted))
return (deps, requirements_deps)
| 6,053,193,627,376,801,000
|
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
|
pipenv/utils.py
|
merge_deps
|
bryant1410/pipenv
|
python
|
def merge_deps(file_dict, project, dev=False, requirements=False, ignore_hashes=False, blocking=False, only=False):
'\n Given a file_dict, merges dependencies and converts them to pip dependency lists.\n :param dict file_dict: The result of calling :func:`pipenv.utils.split_file`\n :param :class:`pipenv.project.Project` project: Pipenv project\n :param bool dev=False: Flag indicating whether dev dependencies are to be installed\n :param bool requirements=False: Flag indicating whether to use a requirements file\n :param bool ignore_hashes=False:\n :param bool blocking=False:\n :param bool only=False:\n :return: Pip-converted 3-tuples of [deps, requirements_deps]\n '
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
(section_name, suffix) = (section.rsplit('-', 1) if (('-' in section) and (not (section == 'dev-packages'))) else (section, None))
if ((not file_dict[section]) or (section_name not in ('dev-packages', 'packages', 'default', 'develop'))):
continue
is_dev = (section_name in ('dev-packages', 'develop'))
if (is_dev and (not dev)):
continue
if ignore_hashes:
for (k, v) in file_dict[section]:
if ('hash' in v):
del v['hash']
no_hashes = (True if suffix else ignore_hashes)
block = (True if suffix else blocking)
include_index = (True if (not suffix) else False)
converted = convert_deps_to_pip(file_dict[section], project, r=False, include_index=include_index)
deps.extend(((d, no_hashes, block) for d in converted))
if (dev and is_dev and requirements):
requirements_deps.extend(((d, no_hashes, block) for d in converted))
return (deps, requirements_deps)
|
def recase_file(file_dict):
'Recase file before writing to output.'
if (('packages' in file_dict) or ('dev-packages' in file_dict)):
sections = ('packages', 'dev-packages')
elif (('default' in file_dict) or ('develop' in file_dict)):
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
| -392,200,137,092,393,150
|
Recase file before writing to output.
|
pipenv/utils.py
|
recase_file
|
bryant1410/pipenv
|
python
|
def recase_file(file_dict):
if (('packages' in file_dict) or ('dev-packages' in file_dict)):
sections = ('packages', 'dev-packages')
elif (('default' in file_dict) or ('develop' in file_dict)):
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
|
def get_windows_path(*args):
'Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path'
return os.path.normpath(os.path.join(*args))
| -5,803,461,582,242,583,000
|
Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path
|
pipenv/utils.py
|
get_windows_path
|
bryant1410/pipenv
|
python
|
def get_windows_path(*args):
'Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path'
return os.path.normpath(os.path.join(*args))
|
def find_windows_executable(bin_path, exe_name):
'Given an executable name, search the given location for an executable'
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
exe_name = os.path.splitext(exe_name)[0]
files = ['{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [filename for filename in exec_paths if os.path.isfile(filename)]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
| -2,987,833,260,518,996,000
|
Given an executable name, search the given location for an executable
|
pipenv/utils.py
|
find_windows_executable
|
bryant1410/pipenv
|
python
|
def find_windows_executable(bin_path, exe_name):
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
exe_name = os.path.splitext(exe_name)[0]
files = ['{0}.{1}'.format(exe_name, ext) for ext in [, 'py', 'exe', 'bat']]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [filename for filename in exec_paths if os.path.isfile(filename)]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
|
def get_converted_relative_path(path, relative_to=os.curdir):
'Given a vague relative path, return the path relative to the given location'
return os.path.join('.', os.path.relpath(path, start=relative_to))
| -8,656,903,140,058,767,000
|
Given a vague relative path, return the path relative to the given location
|
pipenv/utils.py
|
get_converted_relative_path
|
bryant1410/pipenv
|
python
|
def get_converted_relative_path(path, relative_to=os.curdir):
return os.path.join('.', os.path.relpath(path, start=relative_to))
|
def walk_up(bottom):
"Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n "
bottom = os.path.realpath(bottom)
try:
names = os.listdir(bottom)
except Exception:
return
(dirs, nondirs) = ([], [])
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
(yield (bottom, dirs, nondirs))
new_path = os.path.realpath(os.path.join(bottom, '..'))
if (new_path == bottom):
return
for x in walk_up(new_path):
(yield x)
| -7,195,392,152,588,847,000
|
Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
|
pipenv/utils.py
|
walk_up
|
bryant1410/pipenv
|
python
|
def walk_up(bottom):
"Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n "
bottom = os.path.realpath(bottom)
try:
names = os.listdir(bottom)
except Exception:
return
(dirs, nondirs) = ([], [])
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
(yield (bottom, dirs, nondirs))
new_path = os.path.realpath(os.path.join(bottom, '..'))
if (new_path == bottom):
return
for x in walk_up(new_path):
(yield x)
|
def find_requirements(max_depth=3):
'Returns the path of a Pipfile in parent directories.'
i = 0
for (c, d, f) in walk_up(os.getcwd()):
i += 1
if (i < max_depth):
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
| -8,605,925,904,386,501,000
|
Returns the path of a Pipfile in parent directories.
|
pipenv/utils.py
|
find_requirements
|
bryant1410/pipenv
|
python
|
def find_requirements(max_depth=3):
i = 0
for (c, d, f) in walk_up(os.getcwd()):
i += 1
if (i < max_depth):
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
|
@contextmanager
def temp_environ():
'Allow the ability to set os.environ temporarily'
environ = dict(os.environ)
try:
(yield)
finally:
os.environ.clear()
os.environ.update(environ)
| -5,083,302,786,420,072,000
|
Allow the ability to set os.environ temporarily
|
pipenv/utils.py
|
temp_environ
|
bryant1410/pipenv
|
python
|
@contextmanager
def temp_environ():
environ = dict(os.environ)
try:
(yield)
finally:
os.environ.clear()
os.environ.update(environ)
|
def is_valid_url(url):
'Checks if a given string is an url'
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
| -4,789,592,044,157,309,000
|
Checks if a given string is an url
|
pipenv/utils.py
|
is_valid_url
|
bryant1410/pipenv
|
python
|
def is_valid_url(url):
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
|
def download_file(url, filename):
'Downloads file from url to a path with filename'
r = requests.get(url, stream=True)
if (not r.ok):
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
| -7,474,985,168,864,853,000
|
Downloads file from url to a path with filename
|
pipenv/utils.py
|
download_file
|
bryant1410/pipenv
|
python
|
def download_file(url, filename):
r = requests.get(url, stream=True)
if (not r.ok):
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
|
def need_update_check():
'Determines whether we need to check for updates.'
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if (not os.path.exists(p)):
return True
out_of_date_time = (time() - ((24 * 60) * 60))
if (os.path.isfile(p) and (os.path.getmtime(p) <= out_of_date_time)):
return True
else:
return False
| -8,032,898,415,673,751,000
|
Determines whether we need to check for updates.
|
pipenv/utils.py
|
need_update_check
|
bryant1410/pipenv
|
python
|
def need_update_check():
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if (not os.path.exists(p)):
return True
out_of_date_time = (time() - ((24 * 60) * 60))
if (os.path.isfile(p) and (os.path.getmtime(p) <= out_of_date_time)):
return True
else:
return False
|
def touch_update_stamp():
'Touches PIPENV_CACHE_DIR/.pipenv_update_check'
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write('')
| -4,278,246,743,979,614,000
|
Touches PIPENV_CACHE_DIR/.pipenv_update_check
|
pipenv/utils.py
|
touch_update_stamp
|
bryant1410/pipenv
|
python
|
def touch_update_stamp():
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write()
|
def normalize_drive(path):
'Normalize drive in path so they stay consistent.\n\n This currently only affects local drives on Windows, which can be\n identified with either upper or lower cased drive names. The case is\n always converted to uppercase because it seems to be preferred.\n\n See: <https://github.com/pypa/pipenv/issues/1218>\n '
if ((os.name != 'nt') or (not isinstance(path, six.string_types))):
return path
(drive, tail) = os.path.splitdrive(path)
if (drive.islower() and (len(drive) == 2) and (drive[1] == ':')):
return '{}{}'.format(drive.upper(), tail)
return path
| 7,206,725,071,959,051,000
|
Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
|
pipenv/utils.py
|
normalize_drive
|
bryant1410/pipenv
|
python
|
def normalize_drive(path):
'Normalize drive in path so they stay consistent.\n\n This currently only affects local drives on Windows, which can be\n identified with either upper or lower cased drive names. The case is\n always converted to uppercase because it seems to be preferred.\n\n See: <https://github.com/pypa/pipenv/issues/1218>\n '
if ((os.name != 'nt') or (not isinstance(path, six.string_types))):
return path
(drive, tail) = os.path.splitdrive(path)
if (drive.islower() and (len(drive) == 2) and (drive[1] == ':')):
return '{}{}'.format(drive.upper(), tail)
return path
|
def is_readonly_path(fn):
'Check if a provided path exists and is readonly.\n\n Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`\n '
if os.path.exists(fn):
return ((os.stat(fn).st_mode & stat.S_IREAD) or (not os.access(fn, os.W_OK)))
return False
| 4,072,325,937,409,912,000
|
Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
|
pipenv/utils.py
|
is_readonly_path
|
bryant1410/pipenv
|
python
|
def is_readonly_path(fn):
'Check if a provided path exists and is readonly.\n\n Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`\n '
if os.path.exists(fn):
return ((os.stat(fn).st_mode & stat.S_IREAD) or (not os.access(fn, os.W_OK)))
return False
|
def handle_remove_readonly(func, path, exc):
'Error handler for shutil.rmtree.\n\n Windows source repo folders are read-only by default, so this error handler\n attempts to set them as writeable and then proceed with deletion.'
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
(exc_type, exc_exception, exc_tb) = exc
if is_readonly_path(path):
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if (e.errno in [errno.EACCES, errno.EPERM]):
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
if (exc_exception.errno in [errno.EACCES, errno.EPERM]):
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
| -2,753,335,397,450,273,000
|
Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
|
pipenv/utils.py
|
handle_remove_readonly
|
bryant1410/pipenv
|
python
|
def handle_remove_readonly(func, path, exc):
'Error handler for shutil.rmtree.\n\n Windows source repo folders are read-only by default, so this error handler\n attempts to set them as writeable and then proceed with deletion.'
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
(exc_type, exc_exception, exc_tb) = exc
if is_readonly_path(path):
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if (e.errno in [errno.EACCES, errno.EPERM]):
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
if (exc_exception.errno in [errno.EACCES, errno.EPERM]):
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
|
def _deduplicate(data):
'Remove duplicated records.'
cnt = collections.Counter((row['id'] for row in data))
nonuniq_ids = set((id for (id, count) in cnt.items() if (count > 1)))
nonuniq_data = [row for row in data if (row['id'] in nonuniq_ids)]
unique_data = [row for row in data if (row['id'] not in nonuniq_ids)]
nonuniq_data = sorted(nonuniq_data, key=(lambda row: row['id']))
for (_, same_id_data) in itertools.groupby(nonuniq_data, (lambda row: row['id'])):
same_id_data = list(same_id_data)
if all(((same_id_data[0] == x) for x in same_id_data)):
unique_data.append(same_id_data[0])
else:
non_deleted_same_id_data = [row for row in same_id_data if (row['author'] != '[deleted]')]
if (len(non_deleted_same_id_data) != 1):
raise ValueError('Found several message with id {} in the original data'.format(non_deleted_same_id_data[0]['id']))
unique_data.append(non_deleted_same_id_data[0])
return sorted(unique_data, key=(lambda row: (row['link_id'], row['created_utc'])))
| 4,788,760,498,953,770,000
|
Remove duplicated records.
|
tensorflow_datasets/text/reddit_disentanglement.py
|
_deduplicate
|
Ak0303/datasets
|
python
|
def _deduplicate(data):
cnt = collections.Counter((row['id'] for row in data))
nonuniq_ids = set((id for (id, count) in cnt.items() if (count > 1)))
nonuniq_data = [row for row in data if (row['id'] in nonuniq_ids)]
unique_data = [row for row in data if (row['id'] not in nonuniq_ids)]
nonuniq_data = sorted(nonuniq_data, key=(lambda row: row['id']))
for (_, same_id_data) in itertools.groupby(nonuniq_data, (lambda row: row['id'])):
same_id_data = list(same_id_data)
if all(((same_id_data[0] == x) for x in same_id_data)):
unique_data.append(same_id_data[0])
else:
non_deleted_same_id_data = [row for row in same_id_data if (row['author'] != '[deleted]')]
if (len(non_deleted_same_id_data) != 1):
raise ValueError('Found several message with id {} in the original data'.format(non_deleted_same_id_data[0]['id']))
unique_data.append(non_deleted_same_id_data[0])
return sorted(unique_data, key=(lambda row: (row['link_id'], row['created_utc'])))
|
def _split_generators(self, dl_manager):
'Returns SplitGenerators.'
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'train.csv')}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'val.csv')}), tfds.core.SplitGenerator(name=tfds.Split.TEST, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'test.csv')})]
| -2,188,673,168,850,584,000
|
Returns SplitGenerators.
|
tensorflow_datasets/text/reddit_disentanglement.py
|
_split_generators
|
Ak0303/datasets
|
python
|
def _split_generators(self, dl_manager):
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'train.csv')}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'val.csv')}), tfds.core.SplitGenerator(name=tfds.Split.TEST, gen_kwargs={'path': os.path.join(dl_manager.manual_dir, 'test.csv')})]
|
def _generate_examples(self, path):
'Yields examples.'
data = list(_read_csv(path))
data = _deduplicate(data)
for (link_id, one_topic_data) in itertools.groupby(data, (lambda row: row['link_id'])):
one_topic_data = list(one_topic_data)
for row in one_topic_data:
row['text'] = row.pop('body')
(yield (link_id, {_THREAD_KEY: one_topic_data}))
| 6,543,013,553,364,795,000
|
Yields examples.
|
tensorflow_datasets/text/reddit_disentanglement.py
|
_generate_examples
|
Ak0303/datasets
|
python
|
def _generate_examples(self, path):
data = list(_read_csv(path))
data = _deduplicate(data)
for (link_id, one_topic_data) in itertools.groupby(data, (lambda row: row['link_id'])):
one_topic_data = list(one_topic_data)
for row in one_topic_data:
row['text'] = row.pop('body')
(yield (link_id, {_THREAD_KEY: one_topic_data}))
|
def detect(image: str, verbose: bool=False):
'Detects faces on a given image using dlib and returns matches.\n\n :param image: Path to access the image to be searched\n :type image: [string]\n :param verbose: Wether or not command should output informations\n :type image: [bool], default to False\n\n :raises RuntimeError: When the provided image_path is invalid\n\n :return: The detected faces\n :rtype: [list of dlib.rectangle]\n '
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(image)
dets = detector.run(img, 1, DLIB_FACE_DETECTING_MIN_SCORE)[0]
(verbose and print(colored(f'''Number of faces detected: {len(dets)}
''', 'yellow')))
detections = []
from face_cropper.cli.output import colored_detection_output
for (index, detection) in enumerate(dets):
detections.append(detection)
(verbose and print(colored(f'Detection {(index + 1)}:', 'green')))
(verbose and colored_detection_output(detection))
return detections
| -7,453,832,317,566,232,000
|
Detects faces on a given image using dlib and returns matches.
:param image: Path to access the image to be searched
:type image: [string]
:param verbose: Wether or not command should output informations
:type image: [bool], default to False
:raises RuntimeError: When the provided image_path is invalid
:return: The detected faces
:rtype: [list of dlib.rectangle]
|
face_cropper/core/detector.py
|
detect
|
Dave-Lopper/face_cropper
|
python
|
def detect(image: str, verbose: bool=False):
'Detects faces on a given image using dlib and returns matches.\n\n :param image: Path to access the image to be searched\n :type image: [string]\n :param verbose: Wether or not command should output informations\n :type image: [bool], default to False\n\n :raises RuntimeError: When the provided image_path is invalid\n\n :return: The detected faces\n :rtype: [list of dlib.rectangle]\n '
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(image)
dets = detector.run(img, 1, DLIB_FACE_DETECTING_MIN_SCORE)[0]
(verbose and print(colored(f'Number of faces detected: {len(dets)}
', 'yellow')))
detections = []
from face_cropper.cli.output import colored_detection_output
for (index, detection) in enumerate(dets):
detections.append(detection)
(verbose and print(colored(f'Detection {(index + 1)}:', 'green')))
(verbose and colored_detection_output(detection))
return detections
|
def download_progress_hook(count, blockSize, totalSize):
'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 5% change in download progress.\n '
global last_percent_reported
percent = int((((count * blockSize) * 100) / totalSize))
if (last_percent_reported != percent):
if ((percent % 5) == 0):
sys.stdout.write(('%s%%' % percent))
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
| 2,470,292,000,998,774,300
|
A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
|
udacity_deep_learning/download_data.py
|
download_progress_hook
|
fcarsten/ai_playground
|
python
|
def download_progress_hook(count, blockSize, totalSize):
'A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 5% change in download progress.\n '
global last_percent_reported
percent = int((((count * blockSize) * 100) / totalSize))
if (last_percent_reported != percent):
if ((percent % 5) == 0):
sys.stdout.write(('%s%%' % percent))
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
|
def maybe_download(filename, expected_bytes, force=False):
"Download a file if not present, and make sure it's the right size."
dest_filename = os.path.join(data_root, filename)
if (force or (not os.path.exists(dest_filename))):
print('Attempting to download:', filename)
(filename, _) = urlretrieve((url + filename), dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if (statinfo.st_size == expected_bytes):
print('Found and verified', dest_filename)
else:
raise Exception((('Failed to verify ' + dest_filename) + '. Can you get to it with a browser?'))
return dest_filename
| 2,058,923,476,989,784,600
|
Download a file if not present, and make sure it's the right size.
|
udacity_deep_learning/download_data.py
|
maybe_download
|
fcarsten/ai_playground
|
python
|
def maybe_download(filename, expected_bytes, force=False):
dest_filename = os.path.join(data_root, filename)
if (force or (not os.path.exists(dest_filename))):
print('Attempting to download:', filename)
(filename, _) = urlretrieve((url + filename), dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if (statinfo.st_size == expected_bytes):
print('Found and verified', dest_filename)
else:
raise Exception((('Failed to verify ' + dest_filename) + '. Can you get to it with a browser?'))
return dest_filename
|
def channel_split_naive(r, channel_ranges):
'Slower but simpler implementation of straxen.split_channel_ranges'
results = []
for (left, right) in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))])
return results
| -3,514,169,492,615,701,500
|
Slower but simpler implementation of straxen.split_channel_ranges
|
tests/test_channel_split.py
|
channel_split_naive
|
AlexElykov/straxen
|
python
|
def channel_split_naive(r, channel_ranges):
results = []
for (left, right) in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))])
return results
|
def __init__(self, obs_space, action_space, config, loss_fn, stats_fn=None, grad_stats_fn=None, before_loss_init=None, make_model=None, action_sampler_fn=None, existing_inputs=None, existing_model=None, get_batch_divisibility_req=None, obs_include_prev_action_reward=True):
'Initialize a dynamic TF policy.\n\n Arguments:\n observation_space (gym.Space): Observation space of the policy.\n action_space (gym.Space): Action space of the policy.\n config (dict): Policy-specific configuration data.\n loss_fn (func): function that returns a loss tensor the policy\n graph, and dict of experience tensor placeholders\n stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and batch input tensors\n grad_stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and loss gradient tensors\n before_loss_init (func): optional function to run prior to loss\n init that takes the same arguments as __init__\n make_model (func): optional function that returns a ModelV2 object\n given (policy, obs_space, action_space, config).\n All policy variables should be created in this function. If not\n specified, a default model will be created.\n action_sampler_fn (func): optional function that returns a\n tuple of action and action logp tensors given\n (policy, model, input_dict, obs_space, action_space, config).\n If not specified, a default action distribution will be used.\n existing_inputs (OrderedDict): when copying a policy, this\n specifies an existing dict of placeholders to use instead of\n defining new ones\n existing_model (ModelV2): when copying a policy, this specifies\n an existing model to clone and share weights with\n get_batch_divisibility_req (func): optional function that returns\n the divisibility requirement for sample batches\n obs_include_prev_action_reward (bool): whether to include the\n previous action and reward in the model input\n '
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
prev_actions = None
prev_rewards = None
if (existing_inputs is not None):
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(tf.float32, shape=([None] + list(obs_space.shape)), name='observation')
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(action_space)
prev_rewards = tf.placeholder(tf.float32, [None], name='prev_reward')
self._input_dict = {SampleBatch.CUR_OBS: obs, SampleBatch.PREV_ACTIONS: prev_actions, SampleBatch.PREV_REWARDS: prev_rewards, 'is_training': self._get_is_training_placeholder()}
self._seq_lens = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_lens')
if action_sampler_fn:
if (not make_model):
raise ValueError('make_model is required if action_sampler_fn is given')
self.dist_class = None
else:
(self.dist_class, logit_dim) = ModelCatalog.get_action_dist(action_space, self.config['model'])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(obs_space, action_space, logit_dim, self.config['model'], framework='tf')
if existing_inputs:
self._state_in = [v for (k, v) in existing_inputs.items() if k.startswith('state_in_')]
if self._state_in:
self._seq_lens = existing_inputs['seq_lens']
else:
self._state_in = [tf.placeholder(shape=((None,) + s.shape), dtype=s.dtype) for s in self.model.get_initial_state()]
(model_out, self._state_out) = self.model(self._input_dict, self._state_in, self._seq_lens)
if action_sampler_fn:
(action_sampler, action_logp) = action_sampler_fn(self, self.model, self._input_dict, obs_space, action_space, config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
sess = (tf.get_default_session() or tf.Session())
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(self, obs_space, action_space, sess, obs_input=obs, action_sampler=action_sampler, action_logp=action_logp, loss=None, loss_inputs=[], model=self.model, state_inputs=self._state_in, state_outputs=self._state_out, prev_action_input=prev_actions, prev_reward_input=prev_rewards, seq_lens=self._seq_lens, max_seq_len=config['model']['max_seq_len'], batch_divisibility_req=batch_divisibility_req)
before_loss_init(self, obs_space, action_space, config)
if (not existing_inputs):
self._initialize_loss()
| 5,892,416,507,873,919,000
|
Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (func): optional function to run prior to loss
init that takes the same arguments as __init__
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): when copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
|
rllib/policy/dynamic_tf_policy.py
|
__init__
|
lisadunlap/ray
|
python
|
def __init__(self, obs_space, action_space, config, loss_fn, stats_fn=None, grad_stats_fn=None, before_loss_init=None, make_model=None, action_sampler_fn=None, existing_inputs=None, existing_model=None, get_batch_divisibility_req=None, obs_include_prev_action_reward=True):
'Initialize a dynamic TF policy.\n\n Arguments:\n observation_space (gym.Space): Observation space of the policy.\n action_space (gym.Space): Action space of the policy.\n config (dict): Policy-specific configuration data.\n loss_fn (func): function that returns a loss tensor the policy\n graph, and dict of experience tensor placeholders\n stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and batch input tensors\n grad_stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and loss gradient tensors\n before_loss_init (func): optional function to run prior to loss\n init that takes the same arguments as __init__\n make_model (func): optional function that returns a ModelV2 object\n given (policy, obs_space, action_space, config).\n All policy variables should be created in this function. If not\n specified, a default model will be created.\n action_sampler_fn (func): optional function that returns a\n tuple of action and action logp tensors given\n (policy, model, input_dict, obs_space, action_space, config).\n If not specified, a default action distribution will be used.\n existing_inputs (OrderedDict): when copying a policy, this\n specifies an existing dict of placeholders to use instead of\n defining new ones\n existing_model (ModelV2): when copying a policy, this specifies\n an existing model to clone and share weights with\n get_batch_divisibility_req (func): optional function that returns\n the divisibility requirement for sample batches\n obs_include_prev_action_reward (bool): whether to include the\n previous action and reward in the model input\n '
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
prev_actions = None
prev_rewards = None
if (existing_inputs is not None):
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(tf.float32, shape=([None] + list(obs_space.shape)), name='observation')
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(action_space)
prev_rewards = tf.placeholder(tf.float32, [None], name='prev_reward')
self._input_dict = {SampleBatch.CUR_OBS: obs, SampleBatch.PREV_ACTIONS: prev_actions, SampleBatch.PREV_REWARDS: prev_rewards, 'is_training': self._get_is_training_placeholder()}
self._seq_lens = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_lens')
if action_sampler_fn:
if (not make_model):
raise ValueError('make_model is required if action_sampler_fn is given')
self.dist_class = None
else:
(self.dist_class, logit_dim) = ModelCatalog.get_action_dist(action_space, self.config['model'])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(obs_space, action_space, logit_dim, self.config['model'], framework='tf')
if existing_inputs:
self._state_in = [v for (k, v) in existing_inputs.items() if k.startswith('state_in_')]
if self._state_in:
self._seq_lens = existing_inputs['seq_lens']
else:
self._state_in = [tf.placeholder(shape=((None,) + s.shape), dtype=s.dtype) for s in self.model.get_initial_state()]
(model_out, self._state_out) = self.model(self._input_dict, self._state_in, self._seq_lens)
if action_sampler_fn:
(action_sampler, action_logp) = action_sampler_fn(self, self.model, self._input_dict, obs_space, action_space, config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
sess = (tf.get_default_session() or tf.Session())
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(self, obs_space, action_space, sess, obs_input=obs, action_sampler=action_sampler, action_logp=action_logp, loss=None, loss_inputs=[], model=self.model, state_inputs=self._state_in, state_outputs=self._state_out, prev_action_input=prev_actions, prev_reward_input=prev_rewards, seq_lens=self._seq_lens, max_seq_len=config['model']['max_seq_len'], batch_divisibility_req=batch_divisibility_req)
before_loss_init(self, obs_space, action_space, config)
if (not existing_inputs):
self._initialize_loss()
|
@override(TFPolicy)
def copy(self, existing_inputs):
'Creates a copy of self using existing input placeholders.'
if self._state_inputs:
num_state_inputs = (len(self._state_inputs) + 1)
else:
num_state_inputs = 0
if ((len(self._loss_inputs) + num_state_inputs) != len(existing_inputs)):
raise ValueError('Tensor list mismatch', self._loss_inputs, self._state_inputs, existing_inputs)
for (i, (k, v)) in enumerate(self._loss_inputs):
if (v.shape.as_list() != existing_inputs[i].shape.as_list()):
raise ValueError('Tensor shape mismatch', i, k, v.shape, existing_inputs[i].shape)
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(('state_in_{}'.format(i), existing_inputs[(len(self._loss_inputs) + i)]))
if rnn_inputs:
rnn_inputs.append(('seq_lens', existing_inputs[(- 1)]))
input_dict = OrderedDict(([(k, existing_inputs[i]) for (i, (k, _)) in enumerate(self._loss_inputs)] + rnn_inputs))
instance = self.__class__(self.observation_space, self.action_space, self.config, existing_inputs=input_dict, existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i]) for (i, (k, _)) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
| -2,234,550,702,876,427,800
|
Creates a copy of self using existing input placeholders.
|
rllib/policy/dynamic_tf_policy.py
|
copy
|
lisadunlap/ray
|
python
|
@override(TFPolicy)
def copy(self, existing_inputs):
if self._state_inputs:
num_state_inputs = (len(self._state_inputs) + 1)
else:
num_state_inputs = 0
if ((len(self._loss_inputs) + num_state_inputs) != len(existing_inputs)):
raise ValueError('Tensor list mismatch', self._loss_inputs, self._state_inputs, existing_inputs)
for (i, (k, v)) in enumerate(self._loss_inputs):
if (v.shape.as_list() != existing_inputs[i].shape.as_list()):
raise ValueError('Tensor shape mismatch', i, k, v.shape, existing_inputs[i].shape)
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(('state_in_{}'.format(i), existing_inputs[(len(self._loss_inputs) + i)]))
if rnn_inputs:
rnn_inputs.append(('seq_lens', existing_inputs[(- 1)]))
input_dict = OrderedDict(([(k, existing_inputs[i]) for (i, (k, _)) in enumerate(self._loss_inputs)] + rnn_inputs))
instance = self.__class__(self.observation_space, self.action_space, self.config, existing_inputs=input_dict, existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i]) for (i, (k, _)) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
|
def main():
"The entry point for the console script xbmcswift2.\n\n The 'xbcmswift2' script is command bassed, so the second argument is always\n the command to execute. Each command has its own parser options and usages.\n If no command is provided or the -h flag is used without any other\n commands, the general help message is shown.\n "
parser = OptionParser()
if (len(sys.argv) == 1):
parser.set_usage(USAGE)
parser.error('At least one command is required.')
command = sys.argv[1]
if (command == '-h'):
parser.set_usage(USAGE)
(opts, args) = parser.parse_args()
if (command not in COMMANDS.keys()):
parser.error('Invalid command')
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for (args, kwargs) in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
(opts, args) = parser.parse_args()
manager.run(opts, args[1:])
| -6,102,954,789,046,832,000
|
The entry point for the console script xbmcswift2.
The 'xbcmswift2' script is command bassed, so the second argument is always
the command to execute. Each command has its own parser options and usages.
If no command is provided or the -h flag is used without any other
commands, the general help message is shown.
|
resources/lib/xbmcswift2/cli/cli.py
|
main
|
liberty-developer/plugin.video.metalliq-forqed
|
python
|
def main():
"The entry point for the console script xbmcswift2.\n\n The 'xbcmswift2' script is command bassed, so the second argument is always\n the command to execute. Each command has its own parser options and usages.\n If no command is provided or the -h flag is used without any other\n commands, the general help message is shown.\n "
parser = OptionParser()
if (len(sys.argv) == 1):
parser.set_usage(USAGE)
parser.error('At least one command is required.')
command = sys.argv[1]
if (command == '-h'):
parser.set_usage(USAGE)
(opts, args) = parser.parse_args()
if (command not in COMMANDS.keys()):
parser.error('Invalid command')
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for (args, kwargs) in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
(opts, args) = parser.parse_args()
manager.run(opts, args[1:])
|
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3):
'Compute the scores of a synthesizer over a list of datasets.\n\n The results are returned in a raw format as a ``pandas.DataFrame`` containing:\n - One row for each dataset+scoring method (for example, a classifier)\n - One column for each computed metric\n - The columns:\n - dataset\n - distance\n - name (of the scoring method)\n - iteration\n\n For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2\n iterations produces a table similar to this::\n\n dataset name iter distance accuracy f1 syn_likelihood test_likelihood\n adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN\n adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN\n adult Logistic... 0 0.0 0.79 0.66 NaN NaN\n adult MLP... 0 0.0 0.84 0.67 NaN NaN\n adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN\n adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN\n adult Logistic... 1 0.0 0.79 0.65 NaN NaN\n adult MLP... 1 0.0 0.84 0.64 NaN NaN\n asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24\n asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24\n '
results = list()
for dataset_name in datasets:
LOGGER.info('Evaluating dataset %s', dataset_name)
(train, test, meta, categoricals, ordinals) = load_dataset(dataset_name, benchmark=True)
for iteration in range(iterations):
try:
synthesized = synthesizer(train, categoricals, ordinals)
scores = compute_scores(train, test, synthesized, meta)
scores['dataset'] = dataset_name
scores['iteration'] = iteration
results.append(scores)
except Exception:
LOGGER.exception('Error computing scores for %s on dataset %s - iteration %s', _get_synthesizer_name(synthesizer), dataset_name, iteration)
return pd.concat(results, sort=False)
| 6,867,888,405,591,949,000
|
Compute the scores of a synthesizer over a list of datasets.
The results are returned in a raw format as a ``pandas.DataFrame`` containing:
- One row for each dataset+scoring method (for example, a classifier)
- One column for each computed metric
- The columns:
- dataset
- distance
- name (of the scoring method)
- iteration
For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2
iterations produces a table similar to this::
dataset name iter distance accuracy f1 syn_likelihood test_likelihood
adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN
adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN
adult Logistic... 0 0.0 0.79 0.66 NaN NaN
adult MLP... 0 0.0 0.84 0.67 NaN NaN
adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN
adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN
adult Logistic... 1 0.0 0.79 0.65 NaN NaN
adult MLP... 1 0.0 0.84 0.64 NaN NaN
asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24
asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24
|
sdgym/benchmark.py
|
compute_benchmark
|
csala/SDGym
|
python
|
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3):
'Compute the scores of a synthesizer over a list of datasets.\n\n The results are returned in a raw format as a ``pandas.DataFrame`` containing:\n - One row for each dataset+scoring method (for example, a classifier)\n - One column for each computed metric\n - The columns:\n - dataset\n - distance\n - name (of the scoring method)\n - iteration\n\n For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2\n iterations produces a table similar to this::\n\n dataset name iter distance accuracy f1 syn_likelihood test_likelihood\n adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN\n adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN\n adult Logistic... 0 0.0 0.79 0.66 NaN NaN\n adult MLP... 0 0.0 0.84 0.67 NaN NaN\n adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN\n adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN\n adult Logistic... 1 0.0 0.79 0.65 NaN NaN\n adult MLP... 1 0.0 0.84 0.64 NaN NaN\n asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24\n asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24\n '
results = list()
for dataset_name in datasets:
LOGGER.info('Evaluating dataset %s', dataset_name)
(train, test, meta, categoricals, ordinals) = load_dataset(dataset_name, benchmark=True)
for iteration in range(iterations):
try:
synthesized = synthesizer(train, categoricals, ordinals)
scores = compute_scores(train, test, synthesized, meta)
scores['dataset'] = dataset_name
scores['iteration'] = iteration
results.append(scores)
except Exception:
LOGGER.exception('Error computing scores for %s on dataset %s - iteration %s', _get_synthesizer_name(synthesizer), dataset_name, iteration)
return pd.concat(results, sort=False)
|
def _summarize_scores(scores):
'Computes a summary of the scores obtained by a synthesizer.\n\n The raw scores returned by the ``compute_benchmark`` function are summarized\n by grouping them by dataset and computing the average.\n\n The results are then put in a ``pandas.Series`` object with one value per\n dataset and metric.\n\n As an example, the summary of a synthesizer that has been evaluated on the\n ``adult`` and the ``asia`` datasets produces the following output::\n\n adult/accuracy 0.8765\n adult/f1_micro 0.7654\n adult/f1_macro 0.7654\n asia/syn_likelihood -2.5364\n asia/test_likelihood -2.4321\n dtype: float64\n\n Args:\n scores (pandas.DataFrame):\n Raw Scores dataframe as returned by the ``compute_benchmark`` function.\n\n Returns:\n pandas.Series:\n Summarized scores series in the format described above.\n '
scores = scores.drop(['distance', 'iteration', 'name'], axis=1, errors='ignore')
grouped = scores.groupby('dataset').apply(_dataset_summary)
if isinstance(grouped, pd.Series):
return grouped.droplevel(0)
return grouped.iloc[0]
| -9,160,691,643,630,375,000
|
Computes a summary of the scores obtained by a synthesizer.
The raw scores returned by the ``compute_benchmark`` function are summarized
by grouping them by dataset and computing the average.
The results are then put in a ``pandas.Series`` object with one value per
dataset and metric.
As an example, the summary of a synthesizer that has been evaluated on the
``adult`` and the ``asia`` datasets produces the following output::
adult/accuracy 0.8765
adult/f1_micro 0.7654
adult/f1_macro 0.7654
asia/syn_likelihood -2.5364
asia/test_likelihood -2.4321
dtype: float64
Args:
scores (pandas.DataFrame):
Raw Scores dataframe as returned by the ``compute_benchmark`` function.
Returns:
pandas.Series:
Summarized scores series in the format described above.
|
sdgym/benchmark.py
|
_summarize_scores
|
csala/SDGym
|
python
|
def _summarize_scores(scores):
'Computes a summary of the scores obtained by a synthesizer.\n\n The raw scores returned by the ``compute_benchmark`` function are summarized\n by grouping them by dataset and computing the average.\n\n The results are then put in a ``pandas.Series`` object with one value per\n dataset and metric.\n\n As an example, the summary of a synthesizer that has been evaluated on the\n ``adult`` and the ``asia`` datasets produces the following output::\n\n adult/accuracy 0.8765\n adult/f1_micro 0.7654\n adult/f1_macro 0.7654\n asia/syn_likelihood -2.5364\n asia/test_likelihood -2.4321\n dtype: float64\n\n Args:\n scores (pandas.DataFrame):\n Raw Scores dataframe as returned by the ``compute_benchmark`` function.\n\n Returns:\n pandas.Series:\n Summarized scores series in the format described above.\n '
scores = scores.drop(['distance', 'iteration', 'name'], axis=1, errors='ignore')
grouped = scores.groupby('dataset').apply(_dataset_summary)
if isinstance(grouped, pd.Series):
return grouped.droplevel(0)
return grouped.iloc[0]
|
def _get_synthesizer_name(synthesizer):
'Get the name of the synthesizer function or class.\n\n If the given synthesizer is a function, return its name.\n If it is a method, return the name of the class to which\n the method belongs.\n\n Args:\n synthesizer (function or method):\n The synthesizer function or method.\n\n Returns:\n str:\n Name of the function or the class to which the method belongs.\n '
if isinstance(synthesizer, types.MethodType):
synthesizer_name = synthesizer.__self__.__class__.__name__
else:
synthesizer_name = synthesizer.__name__
return synthesizer_name
| 6,233,313,625,423,672,000
|
Get the name of the synthesizer function or class.
If the given synthesizer is a function, return its name.
If it is a method, return the name of the class to which
the method belongs.
Args:
synthesizer (function or method):
The synthesizer function or method.
Returns:
str:
Name of the function or the class to which the method belongs.
|
sdgym/benchmark.py
|
_get_synthesizer_name
|
csala/SDGym
|
python
|
def _get_synthesizer_name(synthesizer):
'Get the name of the synthesizer function or class.\n\n If the given synthesizer is a function, return its name.\n If it is a method, return the name of the class to which\n the method belongs.\n\n Args:\n synthesizer (function or method):\n The synthesizer function or method.\n\n Returns:\n str:\n Name of the function or the class to which the method belongs.\n '
if isinstance(synthesizer, types.MethodType):
synthesizer_name = synthesizer.__self__.__class__.__name__
else:
synthesizer_name = synthesizer.__name__
return synthesizer_name
|
def _get_synthesizers(synthesizers):
'Get the dict of synthesizers from the input value.\n\n If the input is a synthesizer or an iterable of synthesizers, get their names\n and put them on a dict.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n A synthesizer (function or method or class) or an iterable of synthesizers\n or a dict containing synthesizer names as keys and synthesizers as values.\n\n Returns:\n dict[str, function]:\n dict containing synthesizer names as keys and function as values.\n\n Raises:\n TypeError:\n if neither a synthesizer or an iterable or a dict is passed.\n '
if callable(synthesizers):
synthesizers = {_get_synthesizer_name(synthesizers): synthesizers}
if isinstance(synthesizers, (list, tuple)):
synthesizers = {_get_synthesizer_name(synthesizer): synthesizer for synthesizer in synthesizers}
elif (not isinstance(synthesizers, dict)):
raise TypeError('`synthesizers` can only be a function, a class, a list or a dict')
for (name, synthesizer) in synthesizers.items():
if (isinstance(synthesizer, type) and issubclass(synthesizer, BaseSynthesizer)):
synthesizers[name] = synthesizer().fit_sample
return synthesizers
| 256,732,817,812,438,270
|
Get the dict of synthesizers from the input value.
If the input is a synthesizer or an iterable of synthesizers, get their names
and put them on a dict.
Args:
synthesizers (function, class, list, tuple or dict):
A synthesizer (function or method or class) or an iterable of synthesizers
or a dict containing synthesizer names as keys and synthesizers as values.
Returns:
dict[str, function]:
dict containing synthesizer names as keys and function as values.
Raises:
TypeError:
if neither a synthesizer or an iterable or a dict is passed.
|
sdgym/benchmark.py
|
_get_synthesizers
|
csala/SDGym
|
python
|
def _get_synthesizers(synthesizers):
'Get the dict of synthesizers from the input value.\n\n If the input is a synthesizer or an iterable of synthesizers, get their names\n and put them on a dict.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n A synthesizer (function or method or class) or an iterable of synthesizers\n or a dict containing synthesizer names as keys and synthesizers as values.\n\n Returns:\n dict[str, function]:\n dict containing synthesizer names as keys and function as values.\n\n Raises:\n TypeError:\n if neither a synthesizer or an iterable or a dict is passed.\n '
if callable(synthesizers):
synthesizers = {_get_synthesizer_name(synthesizers): synthesizers}
if isinstance(synthesizers, (list, tuple)):
synthesizers = {_get_synthesizer_name(synthesizer): synthesizer for synthesizer in synthesizers}
elif (not isinstance(synthesizers, dict)):
raise TypeError('`synthesizers` can only be a function, a class, a list or a dict')
for (name, synthesizer) in synthesizers.items():
if (isinstance(synthesizer, type) and issubclass(synthesizer, BaseSynthesizer)):
synthesizers[name] = synthesizer().fit_sample
return synthesizers
|
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True, leaderboard_path=LEADERBOARD_PATH, replace_existing=True):
'Compute the benchmark scores for the synthesizers and return a leaderboard.\n\n The ``synthesizers`` object can either be a single synthesizer or, an iterable of\n synthesizers or a dict containing synthesizer names as keys and synthesizers as values.\n\n If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard\n stored in the ``lederboard_path``. By default, the leaderboard used is the one which\n is included in the package, which contains the scores obtained by the SDGym Synthesizers.\n\n If ``replace_existing`` is ``True`` and any of the given synthesizers already existed\n in the leaderboard, the old rows are dropped.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n The synthesizer or synthesizers to evaluate. It can be a single synthesizer\n (function or method or class), or an iterable of synthesizers, or a dict\n containing synthesizer names as keys and synthesizers as values. If the input\n is not a dict, synthesizer names will be extracted from the given object.\n datasets (list[str]):\n Names of the datasets to use for the benchmark. Defaults to all the ones available.\n iterations (int):\n Number of iterations to perform over each dataset and synthesizer. Defaults to 3.\n add_leaderboard (bool):\n Whether to append the obtained scores to the previous leaderboard or not. Defaults\n to ``True``.\n leaderboard_path (str):\n Path to where the leaderboard is stored. Defaults to the leaderboard included\n with the package, which contains the scores obtained by the SDGym synthesizers.\n replace_existing (bool):\n Whether to replace old scores or keep them in the returned leaderboard. Defaults\n to ``True``.\n\n Returns:\n pandas.DataFrame:\n Table containing one row per synthesizer and one column for each dataset and metric.\n '
synthesizers = _get_synthesizers(synthesizers)
scores = list()
for (synthesizer_name, synthesizer) in synthesizers.items():
synthesizer_scores = compute_benchmark(synthesizer, datasets, iterations)
summary_row = _summarize_scores(synthesizer_scores)
summary_row.name = synthesizer_name
scores.append(summary_row)
leaderboard = pd.DataFrame(scores)
leaderboard['timestamp'] = datetime.utcnow()
if add_leaderboard:
old_leaderboard = pd.read_csv(leaderboard_path, index_col=0, parse_dates=['timestamp'])[leaderboard.columns]
if replace_existing:
old_leaderboard.drop(labels=[leaderboard.index], errors='ignore', inplace=True)
leaderboard = old_leaderboard.append(leaderboard, sort=False)
return leaderboard
| -6,008,760,859,194,131,000
|
Compute the benchmark scores for the synthesizers and return a leaderboard.
The ``synthesizers`` object can either be a single synthesizer or, an iterable of
synthesizers or a dict containing synthesizer names as keys and synthesizers as values.
If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard
stored in the ``lederboard_path``. By default, the leaderboard used is the one which
is included in the package, which contains the scores obtained by the SDGym Synthesizers.
If ``replace_existing`` is ``True`` and any of the given synthesizers already existed
in the leaderboard, the old rows are dropped.
Args:
synthesizers (function, class, list, tuple or dict):
The synthesizer or synthesizers to evaluate. It can be a single synthesizer
(function or method or class), or an iterable of synthesizers, or a dict
containing synthesizer names as keys and synthesizers as values. If the input
is not a dict, synthesizer names will be extracted from the given object.
datasets (list[str]):
Names of the datasets to use for the benchmark. Defaults to all the ones available.
iterations (int):
Number of iterations to perform over each dataset and synthesizer. Defaults to 3.
add_leaderboard (bool):
Whether to append the obtained scores to the previous leaderboard or not. Defaults
to ``True``.
leaderboard_path (str):
Path to where the leaderboard is stored. Defaults to the leaderboard included
with the package, which contains the scores obtained by the SDGym synthesizers.
replace_existing (bool):
Whether to replace old scores or keep them in the returned leaderboard. Defaults
to ``True``.
Returns:
pandas.DataFrame:
Table containing one row per synthesizer and one column for each dataset and metric.
|
sdgym/benchmark.py
|
benchmark
|
csala/SDGym
|
python
|
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True, leaderboard_path=LEADERBOARD_PATH, replace_existing=True):
'Compute the benchmark scores for the synthesizers and return a leaderboard.\n\n The ``synthesizers`` object can either be a single synthesizer or, an iterable of\n synthesizers or a dict containing synthesizer names as keys and synthesizers as values.\n\n If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard\n stored in the ``lederboard_path``. By default, the leaderboard used is the one which\n is included in the package, which contains the scores obtained by the SDGym Synthesizers.\n\n If ``replace_existing`` is ``True`` and any of the given synthesizers already existed\n in the leaderboard, the old rows are dropped.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n The synthesizer or synthesizers to evaluate. It can be a single synthesizer\n (function or method or class), or an iterable of synthesizers, or a dict\n containing synthesizer names as keys and synthesizers as values. If the input\n is not a dict, synthesizer names will be extracted from the given object.\n datasets (list[str]):\n Names of the datasets to use for the benchmark. Defaults to all the ones available.\n iterations (int):\n Number of iterations to perform over each dataset and synthesizer. Defaults to 3.\n add_leaderboard (bool):\n Whether to append the obtained scores to the previous leaderboard or not. Defaults\n to ``True``.\n leaderboard_path (str):\n Path to where the leaderboard is stored. Defaults to the leaderboard included\n with the package, which contains the scores obtained by the SDGym synthesizers.\n replace_existing (bool):\n Whether to replace old scores or keep them in the returned leaderboard. Defaults\n to ``True``.\n\n Returns:\n pandas.DataFrame:\n Table containing one row per synthesizer and one column for each dataset and metric.\n '
synthesizers = _get_synthesizers(synthesizers)
scores = list()
for (synthesizer_name, synthesizer) in synthesizers.items():
synthesizer_scores = compute_benchmark(synthesizer, datasets, iterations)
summary_row = _summarize_scores(synthesizer_scores)
summary_row.name = synthesizer_name
scores.append(summary_row)
leaderboard = pd.DataFrame(scores)
leaderboard['timestamp'] = datetime.utcnow()
if add_leaderboard:
old_leaderboard = pd.read_csv(leaderboard_path, index_col=0, parse_dates=['timestamp'])[leaderboard.columns]
if replace_existing:
old_leaderboard.drop(labels=[leaderboard.index], errors='ignore', inplace=True)
leaderboard = old_leaderboard.append(leaderboard, sort=False)
return leaderboard
|
def hello():
'\n This is a docstring\n '
print('hello')
| -6,392,466,694,877,974,000
|
This is a docstring
|
tests/example.py
|
hello
|
bwohlberg/py2jn
|
python
|
def hello():
'\n \n '
print('hello')
|
def parse_env(config_schema, env):
'Parse the values from a given environment against a given config schema\n\n Args:\n config_schema: A dict which maps the variable name to a Schema object\n that describes the requested value.\n env: A dict which represents the value of each variable in the\n environment.\n '
try:
return {key: item_schema.parse(key, env.get(key)) for (key, item_schema) in config_schema.items()}
except KeyError as error:
raise MissingConfigError('Required config not set: {}'.format(error.args[0]))
| 2,493,724,030,623,137,300
|
Parse the values from a given environment against a given config schema
Args:
config_schema: A dict which maps the variable name to a Schema object
that describes the requested value.
env: A dict which represents the value of each variable in the
environment.
|
envpy/parser.py
|
parse_env
|
jonathanlloyd/envpy
|
python
|
def parse_env(config_schema, env):
'Parse the values from a given environment against a given config schema\n\n Args:\n config_schema: A dict which maps the variable name to a Schema object\n that describes the requested value.\n env: A dict which represents the value of each variable in the\n environment.\n '
try:
return {key: item_schema.parse(key, env.get(key)) for (key, item_schema) in config_schema.items()}
except KeyError as error:
raise MissingConfigError('Required config not set: {}'.format(error.args[0]))
|
def parse(self, key, value):
'Parse the environment value for a given key against the schema.\n\n Args:\n key: The name of the environment variable.\n value: The value to be parsed.\n '
if (value is not None):
try:
return self._parser(value)
except Exception:
raise ParsingError('Error parsing {}'.format(key))
elif (self._default is not SENTINAL):
return self._default
else:
raise KeyError(key)
| -3,832,913,277,313,911,300
|
Parse the environment value for a given key against the schema.
Args:
key: The name of the environment variable.
value: The value to be parsed.
|
envpy/parser.py
|
parse
|
jonathanlloyd/envpy
|
python
|
def parse(self, key, value):
'Parse the environment value for a given key against the schema.\n\n Args:\n key: The name of the environment variable.\n value: The value to be parsed.\n '
if (value is not None):
try:
return self._parser(value)
except Exception:
raise ParsingError('Error parsing {}'.format(key))
elif (self._default is not SENTINAL):
return self._default
else:
raise KeyError(key)
|
def global_scope():
'\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(fluid.global_scope().find_var("data").get_tensor())\n\n Returns:\n Scope: The global/default scope instance.\n '
return g_scope
| -2,561,556,626,074,283,000
|
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
Returns:
Scope: The global/default scope instance.
|
python/paddle/fluid/executor.py
|
global_scope
|
AnKingOne/Paddle
|
python
|
def global_scope():
'\n Get the global/default scope instance. There are a lot of APIs use\n :code:`global_scope` as its default value, e.g., :code:`Executor.run`\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(fluid.global_scope().find_var("data").get_tensor())\n\n Returns:\n Scope: The global/default scope instance.\n '
return g_scope
|
@signature_safe_contextmanager
def scope_guard(scope):
'\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Args:\n scope: The new global/default scope.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(new_scope.find_var("data").get_tensor())\n '
ex = _switch_scope(scope)
(yield)
_switch_scope(ex)
| 1,367,163,491,478,758,700
|
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Args:
scope: The new global/default scope.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
|
python/paddle/fluid/executor.py
|
scope_guard
|
AnKingOne/Paddle
|
python
|
@signature_safe_contextmanager
def scope_guard(scope):
'\n Change the global/default scope instance by Python `with` statement. All\n variable in runtime will assigned to the new scope.\n\n Args:\n scope: The new global/default scope.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n numpy.array(new_scope.find_var("data").get_tensor())\n '
ex = _switch_scope(scope)
(yield)
_switch_scope(ex)
|
def as_numpy(tensor):
'\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n tensor = new_scope.find_var("data").get_tensor()\n fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())\n\n Args:\n tensor(Variable): a instance of Tensor\n\n Returns:\n numpy.ndarray\n '
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if (len(lod) > 0):
raise RuntimeError("Some of your fetched tensors hold LoD information. They can not be completely cast to Python ndarray. Please set the parameter 'return_numpy' as 'False' to return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
| -7,444,017,813,485,285,000
|
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
|
python/paddle/fluid/executor.py
|
as_numpy
|
AnKingOne/Paddle
|
python
|
def as_numpy(tensor):
'\n Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())\n tensor = new_scope.find_var("data").get_tensor()\n fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())\n\n Args:\n tensor(Variable): a instance of Tensor\n\n Returns:\n numpy.ndarray\n '
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if (len(lod) > 0):
raise RuntimeError("Some of your fetched tensors hold LoD information. They can not be completely cast to Python ndarray. Please set the parameter 'return_numpy' as 'False' to return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
|
def has_feed_operators(block, feed_targets, feed_holder_name):
' Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the feed_targets\n and feed_holder_name. Raise exception when any mismatch is found.\n Return true when the block has feed operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n feed_targets: a dictionary of {feed_target_name: feed_target_data}\n feed_holder_name: the name of the variable that holds the data of\n all feed targets. The type of this feed_holder variable is\n FEED_MINIBATCH, which is essentially vector<LoDTensor>.\n\n Returns:\n A boolean value that indicates whether a block has feed operators\n that match the info contained in feed_targets and feed_holder_name.\n '
feed_count = 0
for op in block.ops:
if (op.desc.type() == 'feed'):
feed_count += 1
assert (op.desc.input('X')[0] == feed_holder_name)
feed_target_name = op.desc.output('Out')[0]
if (feed_target_name not in feed_targets):
raise Exception("'feed_targets' does not have {} variable".format(feed_target_name))
else:
break
if ((feed_count > 0) and (feed_count != len(feed_targets))):
raise Exception("Feed operators in program desc do not match 'feed_targets'")
return (feed_count > 0)
| -4,258,719,829,844,028,000
|
Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
|
python/paddle/fluid/executor.py
|
has_feed_operators
|
AnKingOne/Paddle
|
python
|
def has_feed_operators(block, feed_targets, feed_holder_name):
' Check whether the block already has feed operators.\n\n Return false if the block does not have any feed operators.\n If some feed operators have been prepended to the block, check that\n the info contained in these feed operators matches the feed_targets\n and feed_holder_name. Raise exception when any mismatch is found.\n Return true when the block has feed operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n feed_targets: a dictionary of {feed_target_name: feed_target_data}\n feed_holder_name: the name of the variable that holds the data of\n all feed targets. The type of this feed_holder variable is\n FEED_MINIBATCH, which is essentially vector<LoDTensor>.\n\n Returns:\n A boolean value that indicates whether a block has feed operators\n that match the info contained in feed_targets and feed_holder_name.\n '
feed_count = 0
for op in block.ops:
if (op.desc.type() == 'feed'):
feed_count += 1
assert (op.desc.input('X')[0] == feed_holder_name)
feed_target_name = op.desc.output('Out')[0]
if (feed_target_name not in feed_targets):
raise Exception("'feed_targets' does not have {} variable".format(feed_target_name))
else:
break
if ((feed_count > 0) and (feed_count != len(feed_targets))):
raise Exception("Feed operators in program desc do not match 'feed_targets'")
return (feed_count > 0)
|
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
' Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators matches the fetch_targets\n and fetch_holder_name. Raise exception when any mismatch is found.\n Return true when the block has fetch operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}\n fetch_holder_name: the name of the variable that holds the data of\n all fetch targets. The type of this fetch_holder variable is\n FETCH_LIST, which is essentially vector<LoDTensor>.\n\n Return:\n A boolean value that indicates whether a block has fetch operators\n that match the info contained in fetch_targets and fetch_holder_name.\n '
fetch_count = 0
for op in block.ops:
if (op.desc.type() == 'fetch'):
fetch_count += 1
assert (op.desc.output('Out')[0] == fetch_holder_name)
fetch_target_name = op.desc.input('X')[0]
if (fetch_target_name not in [var.desc.name() for var in fetch_targets]):
raise Exception("'fetch_targets' does not have {} variable".format(fetch_target_name))
idx = op.desc.attr('col')
assert (fetch_target_name == fetch_targets[idx].desc.name())
if ((fetch_count > 0) and (fetch_count != len(fetch_targets))):
raise Exception("Fetch operators in program desc do not match 'fetch_targets'")
return (fetch_count > 0)
| -1,140,413,373,672,059,300
|
Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
|
python/paddle/fluid/executor.py
|
has_fetch_operators
|
AnKingOne/Paddle
|
python
|
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
' Check whether the block already has fetch operators.\n\n Return false if the block does not have any fetch operators.\n If some fetch operators have been appended to the block, check that\n the info contained in these fetch operators matches the fetch_targets\n and fetch_holder_name. Raise exception when any mismatch is found.\n Return true when the block has fetch operators with matching info.\n\n Args:\n block: a block instance (typically global block of a program)\n fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}\n fetch_holder_name: the name of the variable that holds the data of\n all fetch targets. The type of this fetch_holder variable is\n FETCH_LIST, which is essentially vector<LoDTensor>.\n\n Return:\n A boolean value that indicates whether a block has fetch operators\n that match the info contained in fetch_targets and fetch_holder_name.\n '
fetch_count = 0
for op in block.ops:
if (op.desc.type() == 'fetch'):
fetch_count += 1
assert (op.desc.output('Out')[0] == fetch_holder_name)
fetch_target_name = op.desc.input('X')[0]
if (fetch_target_name not in [var.desc.name() for var in fetch_targets]):
raise Exception("'fetch_targets' does not have {} variable".format(fetch_target_name))
idx = op.desc.attr('col')
assert (fetch_target_name == fetch_targets[idx].desc.name())
if ((fetch_count > 0) and (fetch_count != len(fetch_targets))):
raise Exception("Fetch operators in program desc do not match 'fetch_targets'")
return (fetch_count > 0)
|
def _fetch_var(name, scope=None, return_numpy=True):
'\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(core.Scope|None): scope object. It should be the scope where\n you pass to Executor.run() when running your program.\n If None, global_scope() will be used. Default None.\n return_numpy(bool): whether convert the tensor to numpy.ndarray.\n Default True.\n\n Returns:\n LodTensor|numpy.ndarray\n '
assert isinstance(name, str)
if (scope is None):
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(name)
assert (var is not None), (('Cannot find ' + name) + ' in scope. Perhaps you need to make the variable persistable by using var.persistable = True in your program.')
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
| -6,382,690,931,197,901,000
|
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
|
python/paddle/fluid/executor.py
|
_fetch_var
|
AnKingOne/Paddle
|
python
|
def _fetch_var(name, scope=None, return_numpy=True):
'\n Fetch the value of the variable with the given name from the\n given scope.\n\n Args:\n name(str): name of the variable. Typically, only persistable variables\n can be found in the scope used for running the program.\n scope(core.Scope|None): scope object. It should be the scope where\n you pass to Executor.run() when running your program.\n If None, global_scope() will be used. Default None.\n return_numpy(bool): whether convert the tensor to numpy.ndarray.\n Default True.\n\n Returns:\n LodTensor|numpy.ndarray\n '
assert isinstance(name, str)
if (scope is None):
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(name)
assert (var is not None), (('Cannot find ' + name) + ' in scope. Perhaps you need to make the variable persistable by using var.persistable = True in your program.')
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
|
def _as_lodtensor(data, place):
'\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n >>> exe = fluid.executor(place)\n >>> data = np.array(size=(100, 200, 300))\n >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)\n >>> ...\n\n Args:\n data(numpy.ndarray): a instance of array\n\n Returns:\n LoDTensor\n '
if isinstance(data, list):
raise RuntimeError('Some of your feed data hold LoD information. They can not be completely cast from a list of Python ndarray to LoDTensor. Please convert data to LoDTensor directly before feeding the data. ')
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
| -7,073,756,137,704,113,000
|
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
Returns:
LoDTensor
|
python/paddle/fluid/executor.py
|
_as_lodtensor
|
AnKingOne/Paddle
|
python
|
def _as_lodtensor(data, place):
'\n Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.\n For higher dimensional sequence data, please use LoDTensor directly.\n\n Examples:\n >>> import paddle.fluid as fluid\n >>> place = fluid.CPUPlace()\n >>> exe = fluid.executor(place)\n >>> data = np.array(size=(100, 200, 300))\n >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)\n >>> ...\n\n Args:\n data(numpy.ndarray): a instance of array\n\n Returns:\n LoDTensor\n '
if isinstance(data, list):
raise RuntimeError('Some of your feed data hold LoD information. They can not be completely cast from a list of Python ndarray to LoDTensor. Please convert data to LoDTensor directly before feeding the data. ')
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
|
def close(self):
'\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource\n on PServers related to the current Trainer.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cpu = fluid.CPUPlace()\n exe = fluid.Executor(cpu)\n # execute training or testing\n exe.close()\n '
if (not self._closed):
self._default_executor.close()
self._closed = True
| -7,197,737,734,027,222,000
|
Close this executor.
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource
on PServers related to the current Trainer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
|
python/paddle/fluid/executor.py
|
close
|
AnKingOne/Paddle
|
python
|
def close(self):
'\n Close this executor.\n\n You can no longer use this executor after calling this method.\n For the distributed training, this method would free the resource\n on PServers related to the current Trainer.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cpu = fluid.CPUPlace()\n exe = fluid.Executor(cpu)\n # execute training or testing\n exe.close()\n '
if (not self._closed):
self._default_executor.close()
self._closed = True
|
def run(self, program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False):
'\n Run program by this Executor. Feed data by feed map, fetch result by\n fetch_list. Python executor takes a program, add feed operators and\n fetch operators to this program according to feed map and fetch_list.\n Feed map provides input data for the program. fetch_list provides\n the variables(or names) that user want to get after program run.\n\n Note: the executor will run all operators in the program but not\n only the operators dependent by the fetch_list.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n data = fluid.layers.data(name=\'X\', shape=[1], dtype=\'float32\')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n adam = fluid.optimizer.Adam()\n adam.minimize(loss)\n\n # Run the startup program once and only once.\n exe.run(fluid.default_startup_program())\n\n x = numpy.random.random(size=(10, 1)).astype(\'float32\')\n outs = exe.run(feed={\'X\': x},\n fetch_list=[loss.name])\n\n Args:\n program(Program|CompiledProgram): the program that need to run,\n if not provided, then default_main_program (not compiled) will be used.\n feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}\n fetch_list(list): a list of variable or variable names that user \n wants to get, this method will return them according to this list.\n feed_var_name(str): the name for the input variable of \n feed Operator.\n fetch_var_name(str): the name for the output variable of \n fetch Operator.\n scope(Scope): the scope used to run this program, you can switch \n it to different scope. default is global_scope\n return_numpy(bool): if convert the fetched tensor to numpy\n use_program_cache(bool): whether to use the cached program \n settings across batches. Setting it be true would be faster \n only when (1) the program is not compiled with data parallel, \n and (2) program, feed variable names and fetch_list variable \n names do not changed compared to the last step. \n \n Returns:\n\n list(numpy.array): fetch result according to fetch_list.\n '
try:
return self._run_impl(program=program, feed=feed, fetch_list=fetch_list, feed_var_name=feed_var_name, fetch_var_name=fetch_var_name, scope=scope, return_numpy=return_numpy, use_program_cache=use_program_cache)
except Exception as e:
if (not isinstance(e, core.EOFException)):
print('An exception was thrown!\n {}'.format(str(e)))
raise e
| -8,958,766,470,868,862,000
|
Run program by this Executor. Feed data by feed map, fetch result by
fetch_list. Python executor takes a program, add feed operators and
fetch operators to this program according to feed map and fetch_list.
Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want to get after program run.
Note: the executor will run all operators in the program but not
only the operators dependent by the fetch_list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
Args:
program(Program|CompiledProgram): the program that need to run,
if not provided, then default_main_program (not compiled) will be used.
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
fetch_list(list): a list of variable or variable names that user
wants to get, this method will return them according to this list.
feed_var_name(str): the name for the input variable of
feed Operator.
fetch_var_name(str): the name for the output variable of
fetch Operator.
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is global_scope
return_numpy(bool): if convert the fetched tensor to numpy
use_program_cache(bool): whether to use the cached program
settings across batches. Setting it be true would be faster
only when (1) the program is not compiled with data parallel,
and (2) program, feed variable names and fetch_list variable
names do not changed compared to the last step.
Returns:
list(numpy.array): fetch result according to fetch_list.
|
python/paddle/fluid/executor.py
|
run
|
AnKingOne/Paddle
|
python
|
def run(self, program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False):
'\n Run program by this Executor. Feed data by feed map, fetch result by\n fetch_list. Python executor takes a program, add feed operators and\n fetch operators to this program according to feed map and fetch_list.\n Feed map provides input data for the program. fetch_list provides\n the variables(or names) that user want to get after program run.\n\n Note: the executor will run all operators in the program but not\n only the operators dependent by the fetch_list.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n data = fluid.layers.data(name=\'X\', shape=[1], dtype=\'float32\')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n adam = fluid.optimizer.Adam()\n adam.minimize(loss)\n\n # Run the startup program once and only once.\n exe.run(fluid.default_startup_program())\n\n x = numpy.random.random(size=(10, 1)).astype(\'float32\')\n outs = exe.run(feed={\'X\': x},\n fetch_list=[loss.name])\n\n Args:\n program(Program|CompiledProgram): the program that need to run,\n if not provided, then default_main_program (not compiled) will be used.\n feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}\n fetch_list(list): a list of variable or variable names that user \n wants to get, this method will return them according to this list.\n feed_var_name(str): the name for the input variable of \n feed Operator.\n fetch_var_name(str): the name for the output variable of \n fetch Operator.\n scope(Scope): the scope used to run this program, you can switch \n it to different scope. default is global_scope\n return_numpy(bool): if convert the fetched tensor to numpy\n use_program_cache(bool): whether to use the cached program \n settings across batches. Setting it be true would be faster \n only when (1) the program is not compiled with data parallel, \n and (2) program, feed variable names and fetch_list variable \n names do not changed compared to the last step. \n \n Returns:\n\n list(numpy.array): fetch result according to fetch_list.\n '
try:
return self._run_impl(program=program, feed=feed, fetch_list=fetch_list, feed_var_name=feed_var_name, fetch_var_name=fetch_var_name, scope=scope, return_numpy=return_numpy, use_program_cache=use_program_cache)
except Exception as e:
if (not isinstance(e, core.EOFException)):
print('An exception was thrown!\n {}'.format(str(e)))
raise e
|
def infer_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100):
'\n The document of infer_from_dataset is almost the same as\n train_from_dataset, except that in distributed training,\n push gradients will be disabled in infer_from_dataset.\n infer_from_dataset() can be used for evaluation in multi-thread\n very easily.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed. default is None\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0\n debug(bool): whether a user wants to run infer_from_dataset, default is False\n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training, default is None\n fetch_info(String List): print information for each variable, default is None\n print_period(int): the number of mini-batches for each print, default is 100\n\n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")\n y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.infer_from_dataset(program=fluid.default_main_program(),\n dataset=dataset) \n\n '
if (dataset == None):
raise RuntimeError('dataset is needed and should be initialized')
dataset._prepare_to_run()
(scope, trainer) = self._prepare_trainer(program=program, dataset=dataset, scope=scope, thread=thread, debug=debug, fetch_list=fetch_list, fetch_info=fetch_info, print_period=print_period)
trainer._set_infer(True)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope, dataset.dataset, trainer._desc())
dataset._finish_to_run()
return None
| 5,420,110,943,490,376,000
|
The document of infer_from_dataset is almost the same as
train_from_dataset, except that in distributed training,
push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-thread
very easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable
will be printed during training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
|
python/paddle/fluid/executor.py
|
infer_from_dataset
|
AnKingOne/Paddle
|
python
|
def infer_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100):
'\n The document of infer_from_dataset is almost the same as\n train_from_dataset, except that in distributed training,\n push gradients will be disabled in infer_from_dataset.\n infer_from_dataset() can be used for evaluation in multi-thread\n very easily.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed. default is None\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0\n debug(bool): whether a user wants to run infer_from_dataset, default is False\n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training, default is None\n fetch_info(String List): print information for each variable, default is None\n print_period(int): the number of mini-batches for each print, default is 100\n\n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")\n y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.infer_from_dataset(program=fluid.default_main_program(),\n dataset=dataset) \n\n '
if (dataset == None):
raise RuntimeError('dataset is needed and should be initialized')
dataset._prepare_to_run()
(scope, trainer) = self._prepare_trainer(program=program, dataset=dataset, scope=scope, thread=thread, debug=debug, fetch_list=fetch_list, fetch_info=fetch_info, print_period=print_period)
trainer._set_infer(True)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope, dataset.dataset, trainer._desc())
dataset._finish_to_run()
return None
|
def train_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100):
'\n Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.\n Given a program, either a program or compiled program, train_from_dataset will\n consume all data samples in dataset. Input scope can be given by users. By default,\n scope is global_scope(). The total number of thread run in training is `thread`.\n Thread number used in training will be minimum value of threadnum in Dataset and\n the value of thread in this interface. Debug can be set so that executor will display\n Run-Time for all operators and the throughputs of current training task.\n \n Note: train_from_dataset will destroy all resources created within executor for each run.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed.\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread)\n debug(bool): whether a user wants to run train_from_dataset \n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training\n fetch_info(String List): print information for each variable\n print_period(int): the number of mini-batches for each print\n\n Returns:\n None\n \n Examples:\n \n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")\n y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.train_from_dataset(program=fluid.default_main_program(),\n dataset=dataset)\n\n '
if (dataset == None):
raise RuntimeError('dataset is need and should be initialized')
if program._pipeline_opt:
thread = self._adjust_pipeline_resource(program._pipeline_opt, dataset, thread)
dataset._prepare_to_run()
(scope, trainer) = self._prepare_trainer(program=program, dataset=dataset, scope=scope, thread=thread, debug=debug, fetch_list=fetch_list, fetch_info=fetch_info, print_period=print_period)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope, dataset.dataset, trainer._desc())
dataset._finish_to_run()
return None
| -4,721,268,134,907,001,000
|
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread)
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable
will be printed during training
fetch_info(String List): print information for each variable
print_period(int): the number of mini-batches for each print
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
|
python/paddle/fluid/executor.py
|
train_from_dataset
|
AnKingOne/Paddle
|
python
|
def train_from_dataset(self, program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100):
'\n Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.\n Given a program, either a program or compiled program, train_from_dataset will\n consume all data samples in dataset. Input scope can be given by users. By default,\n scope is global_scope(). The total number of thread run in training is `thread`.\n Thread number used in training will be minimum value of threadnum in Dataset and\n the value of thread in this interface. Debug can be set so that executor will display\n Run-Time for all operators and the throughputs of current training task.\n \n Note: train_from_dataset will destroy all resources created within executor for each run.\n\n Args:\n program(Program|CompiledProgram): the program that needs to be run,\n if not provided, then default_main_program (not compiled) will be used.\n dataset(paddle.fluid.Dataset): dataset created outside this function,\n a user should provide a well-defined dataset before calling this function.\n Please check the document of Dataset if needed.\n scope(Scope): the scope used to run this program, you can switch it to different scope\n for each run. default is global_scope\n thread(int): number of thread a user wants to run in this function. The actual number\n of thread will be min(Dataset.thread_num, thread)\n debug(bool): whether a user wants to run train_from_dataset \n fetch_list(Variable List): fetch variable list, each variable\n will be printed during training\n fetch_info(String List): print information for each variable\n print_period(int): the number of mini-batches for each print\n\n Returns:\n None\n \n Examples:\n \n .. code-block:: python\n\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu\n exe = fluid.Executor(place)\n x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")\n y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var([x, y])\n dataset.set_thread(1)\n filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]\n dataset.set_filelist(filelist)\n exe.run(fluid.default_startup_program())\n exe.train_from_dataset(program=fluid.default_main_program(),\n dataset=dataset)\n\n '
if (dataset == None):
raise RuntimeError('dataset is need and should be initialized')
if program._pipeline_opt:
thread = self._adjust_pipeline_resource(program._pipeline_opt, dataset, thread)
dataset._prepare_to_run()
(scope, trainer) = self._prepare_trainer(program=program, dataset=dataset, scope=scope, thread=thread, debug=debug, fetch_list=fetch_list, fetch_info=fetch_info, print_period=print_period)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope, dataset.dataset, trainer._desc())
dataset._finish_to_run()
return None
|
def placeholder_inputs(batch_size):
'Generate placeholder variables to represent the input tensors.\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n Args:\n batch_size: The batch size will be baked into both placeholders.\n Returns:\n images_placeholder: Images placeholder.\n labels_placeholder: Labels placeholder.\n '
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, c3d_model.NUM_FRAMES_PER_CLIP, c3d_model.CROP_SIZE, c3d_model.CROP_SIZE, c3d_model.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=batch_size)
return (images_placeholder, labels_placeholder)
| 4,792,516,056,658,818,000
|
Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
|
c3d_model/predict_c3d_ucf101.py
|
placeholder_inputs
|
b-safwat/multi_action_recognition
|
python
|
def placeholder_inputs(batch_size):
'Generate placeholder variables to represent the input tensors.\n These placeholders are used as inputs by the rest of the model building\n code and will be fed from the downloaded data in the .run() loop, below.\n Args:\n batch_size: The batch size will be baked into both placeholders.\n Returns:\n images_placeholder: Images placeholder.\n labels_placeholder: Labels placeholder.\n '
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, c3d_model.NUM_FRAMES_PER_CLIP, c3d_model.CROP_SIZE, c3d_model.CROP_SIZE, c3d_model.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=batch_size)
return (images_placeholder, labels_placeholder)
|
def GenerateCSRFToken(user_id, time):
'Generates a CSRF token based on a secret key, id and time.'
precondition.AssertType(user_id, Text)
precondition.AssertOptionalType(time, int)
time = (time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch())
secret = config.CONFIG.Get('AdminUI.csrf_secret_key', None)
if (secret is None):
raise ValueError('CSRF secret not available.')
digester = hmac.new(secret.encode('ascii'), digestmod=hashlib.sha256)
digester.update(user_id.encode('ascii'))
digester.update(CSRF_DELIMITER)
digester.update(str(time).encode('ascii'))
digest = digester.digest()
token = base64.urlsafe_b64encode((b'%s%s%d' % (digest, CSRF_DELIMITER, time)))
return token.rstrip(b'=')
| 6,125,651,692,541,662,000
|
Generates a CSRF token based on a secret key, id and time.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
GenerateCSRFToken
|
Codehardt/grr
|
python
|
def GenerateCSRFToken(user_id, time):
precondition.AssertType(user_id, Text)
precondition.AssertOptionalType(time, int)
time = (time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch())
secret = config.CONFIG.Get('AdminUI.csrf_secret_key', None)
if (secret is None):
raise ValueError('CSRF secret not available.')
digester = hmac.new(secret.encode('ascii'), digestmod=hashlib.sha256)
digester.update(user_id.encode('ascii'))
digester.update(CSRF_DELIMITER)
digester.update(str(time).encode('ascii'))
digest = digester.digest()
token = base64.urlsafe_b64encode((b'%s%s%d' % (digest, CSRF_DELIMITER, time)))
return token.rstrip(b'=')
|
def StoreCSRFCookie(user, response):
'Decorator for WSGI handler that inserts CSRF cookie into response.'
csrf_token = GenerateCSRFToken(user, None)
response.set_cookie('csrftoken', csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
| 4,536,204,827,103,691,300
|
Decorator for WSGI handler that inserts CSRF cookie into response.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
StoreCSRFCookie
|
Codehardt/grr
|
python
|
def StoreCSRFCookie(user, response):
csrf_token = GenerateCSRFToken(user, None)
response.set_cookie('csrftoken', csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
|
def ValidateCSRFTokenOrRaise(request):
'Decorator for WSGI handler that checks CSRF cookie against the request.'
if (request.method in ('GET', 'HEAD')):
return
csrf_token = request.headers.get('X-CSRFToken', '').encode('ascii')
if (not csrf_token):
logging.info('Did not find headers CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('CSRF token is missing')
try:
decoded = base64.urlsafe_b64decode((csrf_token + b'=='))
(digest, token_time) = decoded.rsplit(CSRF_DELIMITER, 1)
token_time = int(token_time)
except (TypeError, ValueError):
logging.info('Malformed CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Malformed CSRF token')
if (len(digest) != hashlib.sha256().digest_size):
logging.info('Invalid digest size for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Malformed CSRF token digest')
expected = GenerateCSRFToken(request.user, token_time)
if (not constant_time.bytes_eq(csrf_token, expected)):
logging.info('Non-matching CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Non-matching CSRF token')
current_time = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
if ((current_time - token_time) > CSRF_TOKEN_DURATION.microseconds):
logging.info('Expired CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Expired CSRF token')
| -7,794,270,443,633,931,000
|
Decorator for WSGI handler that checks CSRF cookie against the request.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
ValidateCSRFTokenOrRaise
|
Codehardt/grr
|
python
|
def ValidateCSRFTokenOrRaise(request):
if (request.method in ('GET', 'HEAD')):
return
csrf_token = request.headers.get('X-CSRFToken', ).encode('ascii')
if (not csrf_token):
logging.info('Did not find headers CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('CSRF token is missing')
try:
decoded = base64.urlsafe_b64decode((csrf_token + b'=='))
(digest, token_time) = decoded.rsplit(CSRF_DELIMITER, 1)
token_time = int(token_time)
except (TypeError, ValueError):
logging.info('Malformed CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Malformed CSRF token')
if (len(digest) != hashlib.sha256().digest_size):
logging.info('Invalid digest size for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Malformed CSRF token digest')
expected = GenerateCSRFToken(request.user, token_time)
if (not constant_time.bytes_eq(csrf_token, expected)):
logging.info('Non-matching CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Non-matching CSRF token')
current_time = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
if ((current_time - token_time) > CSRF_TOKEN_DURATION.microseconds):
logging.info('Expired CSRF token for: %s', request.path)
raise werkzeug_exceptions.Forbidden('Expired CSRF token')
|
def LogAccessWrapper(func):
'Decorator that ensures that HTTP access is logged.'
def Wrapper(request, *args, **kwargs):
'Wrapping function.'
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception:
response = werkzeug_wrappers.Response('', status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
return Wrapper
| -115,557,866,535,678,200
|
Decorator that ensures that HTTP access is logged.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
LogAccessWrapper
|
Codehardt/grr
|
python
|
def LogAccessWrapper(func):
def Wrapper(request, *args, **kwargs):
'Wrapping function.'
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception:
response = werkzeug_wrappers.Response(, status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
return Wrapper
|
def Wrapper(request, *args, **kwargs):
'Wrapping function.'
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception:
response = werkzeug_wrappers.Response('', status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
| -986,668,722,510,930,300
|
Wrapping function.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
Wrapper
|
Codehardt/grr
|
python
|
def Wrapper(request, *args, **kwargs):
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception:
response = werkzeug_wrappers.Response(, status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
|
def _BuildToken(self, request, execution_time):
'Build an ACLToken from the request.'
token = access_control.ACLToken(username=request.user, reason=request.args.get('reason', ''), process='GRRAdminUI', expiry=(rdfvalue.RDFDatetime.Now() + execution_time))
for field in ['Remote_Addr', 'X-Forwarded-For']:
remote_addr = request.headers.get(field, '')
if remote_addr:
token.source_ips.append(remote_addr)
return token
| 3,942,364,055,699,815,400
|
Build an ACLToken from the request.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
_BuildToken
|
Codehardt/grr
|
python
|
def _BuildToken(self, request, execution_time):
token = access_control.ACLToken(username=request.user, reason=request.args.get('reason', ), process='GRRAdminUI', expiry=(rdfvalue.RDFDatetime.Now() + execution_time))
for field in ['Remote_Addr', 'X-Forwarded-For']:
remote_addr = request.headers.get(field, )
if remote_addr:
token.source_ips.append(remote_addr)
return token
|
def _HandleHomepage(self, request):
'Renders GRR home page by rendering base.html Jinja template.'
_ = request
env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.CONFIG['AdminUI.template_root']), autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {'heading': config.CONFIG['AdminUI.heading'], 'report_url': config.CONFIG['AdminUI.report_url'], 'help_url': config.CONFIG['AdminUI.help_url'], 'timestamp': utils.SmartStr(create_time), 'use_precompiled_js': config.CONFIG['AdminUI.use_precompiled_js'], 'firebase_api_key': config.CONFIG['AdminUI.firebase_api_key'], 'firebase_auth_domain': config.CONFIG['AdminUI.firebase_auth_domain'], 'firebase_auth_provider': config.CONFIG['AdminUI.firebase_auth_provider'], 'grr_version': config.CONFIG['Source.version_string']}
template = env.get_template('base.html')
response = werkzeug_wrappers.Response(template.render(context), mimetype='text/html')
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response
| 6,814,278,485,432,112,000
|
Renders GRR home page by rendering base.html Jinja template.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
_HandleHomepage
|
Codehardt/grr
|
python
|
def _HandleHomepage(self, request):
_ = request
env = jinja2.Environment(loader=jinja2.FileSystemLoader(config.CONFIG['AdminUI.template_root']), autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {'heading': config.CONFIG['AdminUI.heading'], 'report_url': config.CONFIG['AdminUI.report_url'], 'help_url': config.CONFIG['AdminUI.help_url'], 'timestamp': utils.SmartStr(create_time), 'use_precompiled_js': config.CONFIG['AdminUI.use_precompiled_js'], 'firebase_api_key': config.CONFIG['AdminUI.firebase_api_key'], 'firebase_auth_domain': config.CONFIG['AdminUI.firebase_auth_domain'], 'firebase_auth_provider': config.CONFIG['AdminUI.firebase_auth_provider'], 'grr_version': config.CONFIG['Source.version_string']}
template = env.get_template('base.html')
response = werkzeug_wrappers.Response(template.render(context), mimetype='text/html')
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response
|
def _HandleApi(self, request):
'Handles API requests.'
ValidateCSRFTokenOrRaise(request)
response = http_api.RenderHttpResponse(request)
if (('csrftoken' not in request.cookies) or (response.headers.get('X-API-Method', '') == 'GetPendingUserNotificationsCount')):
StoreCSRFCookie(request.user, response)
return response
| 6,756,775,622,371,802,000
|
Handles API requests.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
_HandleApi
|
Codehardt/grr
|
python
|
def _HandleApi(self, request):
ValidateCSRFTokenOrRaise(request)
response = http_api.RenderHttpResponse(request)
if (('csrftoken' not in request.cookies) or (response.headers.get('X-API-Method', ) == 'GetPendingUserNotificationsCount')):
StoreCSRFCookie(request.user, response)
return response
|
def _RedirectToRemoteHelp(self, path):
'Redirect to GitHub-hosted documentation.'
allowed_chars = set(((string.ascii_letters + string.digits) + '._-/'))
if (not (set(path) <= allowed_chars)):
raise RuntimeError(('Unusual chars in path %r - possible exploit attempt.' % path))
target_path = os.path.join(config.CONFIG['AdminUI.docs_location'], path)
return werkzeug_wrappers.Response(("\n<script>\nvar friendly_hash = window.location.hash;\nwindow.location = '%s' + friendly_hash;\n</script>\n" % target_path), mimetype='text/html')
| -4,929,114,115,641,130,000
|
Redirect to GitHub-hosted documentation.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
_RedirectToRemoteHelp
|
Codehardt/grr
|
python
|
def _RedirectToRemoteHelp(self, path):
allowed_chars = set(((string.ascii_letters + string.digits) + '._-/'))
if (not (set(path) <= allowed_chars)):
raise RuntimeError(('Unusual chars in path %r - possible exploit attempt.' % path))
target_path = os.path.join(config.CONFIG['AdminUI.docs_location'], path)
return werkzeug_wrappers.Response(("\n<script>\nvar friendly_hash = window.location.hash;\nwindow.location = '%s' + friendly_hash;\n</script>\n" % target_path), mimetype='text/html')
|
def _HandleHelp(self, request):
'Handles help requests.'
help_path = request.path.split('/', 2)[(- 1)]
if (not help_path):
raise werkzeug_exceptions.Forbidden('Error: Invalid help path.')
return self._RedirectToRemoteHelp(help_path)
| -810,152,685,980,187,800
|
Handles help requests.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
_HandleHelp
|
Codehardt/grr
|
python
|
def _HandleHelp(self, request):
help_path = request.path.split('/', 2)[(- 1)]
if (not help_path):
raise werkzeug_exceptions.Forbidden('Error: Invalid help path.')
return self._RedirectToRemoteHelp(help_path)
|
@werkzeug_wsgi.responder
def __call__(self, environ, start_response):
'Dispatches a request.'
request = self._BuildRequest(environ)
matcher = self.routing_map.bind_to_environ(environ)
try:
(endpoint, _) = matcher.match(request.path, request.method)
return endpoint(request)
except werkzeug_exceptions.NotFound as e:
logging.info('Request for non existent url: %s [%s]', request.path, request.method)
return e
except werkzeug_exceptions.HTTPException as e:
logging.exception('http exception: %s [%s]', request.path, request.method)
return e
| -6,936,825,454,743,817,000
|
Dispatches a request.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
__call__
|
Codehardt/grr
|
python
|
@werkzeug_wsgi.responder
def __call__(self, environ, start_response):
request = self._BuildRequest(environ)
matcher = self.routing_map.bind_to_environ(environ)
try:
(endpoint, _) = matcher.match(request.path, request.method)
return endpoint(request)
except werkzeug_exceptions.NotFound as e:
logging.info('Request for non existent url: %s [%s]', request.path, request.method)
return e
except werkzeug_exceptions.HTTPException as e:
logging.exception('http exception: %s [%s]', request.path, request.method)
return e
|
def WSGIHandler(self):
"Returns GRR's WSGI handler."
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {'/': config.CONFIG['AdminUI.document_root']})
return werkzeug_wsgi.DispatcherMiddleware(self, {'/static': sdm})
| -4,133,702,679,565,647,400
|
Returns GRR's WSGI handler.
|
grr/server/grr_response_server/gui/wsgiapp.py
|
WSGIHandler
|
Codehardt/grr
|
python
|
def WSGIHandler(self):
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {'/': config.CONFIG['AdminUI.document_root']})
return werkzeug_wsgi.DispatcherMiddleware(self, {'/static': sdm})
|
def scope_vars(scope, trainable_only=False):
'\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n '
return tf.compat.v1.get_collection((tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES), scope=(scope if isinstance(scope, str) else scope.name))
| -3,037,051,232,383,622,000
|
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
|
baselines/deepq/build_graph.py
|
scope_vars
|
rwill128/baselines
|
python
|
def scope_vars(scope, trainable_only=False):
'\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n '
return tf.compat.v1.get_collection((tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES), scope=(scope if isinstance(scope, str) else scope.name))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.