body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def find_job_id_by_name(self, job_name: str) -> Optional[int]:
'\n Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.\n\n :param job_name: The name of the job to look up.\n :return: The job_id as an int or None if no job was found.\n '
all_jobs = self.list_jobs()
matching_jobs = [j for j in all_jobs if (j['settings']['name'] == job_name)]
if (len(matching_jobs) > 1):
raise AirflowException(f'There are more than one job with name {job_name}. Please delete duplicated jobs first')
if (not matching_jobs):
return None
else:
return matching_jobs[0]['job_id']
| -3,557,110,850,345,249,300
|
Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.
:param job_name: The name of the job to look up.
:return: The job_id as an int or None if no job was found.
|
airflow/providers/databricks/hooks/databricks.py
|
find_job_id_by_name
|
AMS-Kepler/airflow
|
python
|
def find_job_id_by_name(self, job_name: str) -> Optional[int]:
'\n Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.\n\n :param job_name: The name of the job to look up.\n :return: The job_id as an int or None if no job was found.\n '
all_jobs = self.list_jobs()
matching_jobs = [j for j in all_jobs if (j['settings']['name'] == job_name)]
if (len(matching_jobs) > 1):
raise AirflowException(f'There are more than one job with name {job_name}. Please delete duplicated jobs first')
if (not matching_jobs):
return None
else:
return matching_jobs[0]['job_id']
|
def get_run_page_url(self, run_id: int) -> str:
'\n Retrieves run_page_url.\n\n :param run_id: id of the run\n :return: URL of the run page\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
| 3,152,716,416,204,417,000
|
Retrieves run_page_url.
:param run_id: id of the run
:return: URL of the run page
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_page_url
|
AMS-Kepler/airflow
|
python
|
def get_run_page_url(self, run_id: int) -> str:
'\n Retrieves run_page_url.\n\n :param run_id: id of the run\n :return: URL of the run page\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['run_page_url']
|
def get_job_id(self, run_id: int) -> int:
'\n Retrieves job_id from run_id.\n\n :param run_id: id of the run\n :return: Job id for given Databricks run\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['job_id']
| -6,716,865,040,276,354,000
|
Retrieves job_id from run_id.
:param run_id: id of the run
:return: Job id for given Databricks run
|
airflow/providers/databricks/hooks/databricks.py
|
get_job_id
|
AMS-Kepler/airflow
|
python
|
def get_job_id(self, run_id: int) -> int:
'\n Retrieves job_id from run_id.\n\n :param run_id: id of the run\n :return: Job id for given Databricks run\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response['job_id']
|
def get_run_state(self, run_id: int) -> RunState:
'\n Retrieves run state of the run.\n\n Please note that any Airflow tasks that call the ``get_run_state`` method will result in\n failure unless you have enabled xcom pickling. This can be done using the following\n environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``\n\n If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get\n a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or\n ``get_run_state_message`` to get individual components of the run state.\n\n :param run_id: id of the run\n :return: state of the run\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
return RunState(**state)
| 4,658,948,002,425,555,000
|
Retrieves run state of the run.
Please note that any Airflow tasks that call the ``get_run_state`` method will result in
failure unless you have enabled xcom pickling. This can be done using the following
environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``
If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get
a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or
``get_run_state_message`` to get individual components of the run state.
:param run_id: id of the run
:return: state of the run
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_state
|
AMS-Kepler/airflow
|
python
|
def get_run_state(self, run_id: int) -> RunState:
'\n Retrieves run state of the run.\n\n Please note that any Airflow tasks that call the ``get_run_state`` method will result in\n failure unless you have enabled xcom pickling. This can be done using the following\n environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``\n\n If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get\n a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or\n ``get_run_state_message`` to get individual components of the run state.\n\n :param run_id: id of the run\n :return: state of the run\n '
json = {'run_id': run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response['state']
return RunState(**state)
|
def get_run_state_str(self, run_id: int) -> str:
'\n Return the string representation of RunState.\n\n :param run_id: id of the run\n :return: string describing run state\n '
state = self.get_run_state(run_id)
run_state_str = f'State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}'
return run_state_str
| 3,414,676,769,183,912,000
|
Return the string representation of RunState.
:param run_id: id of the run
:return: string describing run state
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_state_str
|
AMS-Kepler/airflow
|
python
|
def get_run_state_str(self, run_id: int) -> str:
'\n Return the string representation of RunState.\n\n :param run_id: id of the run\n :return: string describing run state\n '
state = self.get_run_state(run_id)
run_state_str = f'State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}'
return run_state_str
|
def get_run_state_lifecycle(self, run_id: int) -> str:
'\n Returns the lifecycle state of the run\n\n :param run_id: id of the run\n :return: string with lifecycle state\n '
return self.get_run_state(run_id).life_cycle_state
| 556,569,400,692,368,640
|
Returns the lifecycle state of the run
:param run_id: id of the run
:return: string with lifecycle state
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_state_lifecycle
|
AMS-Kepler/airflow
|
python
|
def get_run_state_lifecycle(self, run_id: int) -> str:
'\n Returns the lifecycle state of the run\n\n :param run_id: id of the run\n :return: string with lifecycle state\n '
return self.get_run_state(run_id).life_cycle_state
|
def get_run_state_result(self, run_id: int) -> str:
'\n Returns the resulting state of the run\n\n :param run_id: id of the run\n :return: string with resulting state\n '
return self.get_run_state(run_id).result_state
| -7,836,864,939,624,392,000
|
Returns the resulting state of the run
:param run_id: id of the run
:return: string with resulting state
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_state_result
|
AMS-Kepler/airflow
|
python
|
def get_run_state_result(self, run_id: int) -> str:
'\n Returns the resulting state of the run\n\n :param run_id: id of the run\n :return: string with resulting state\n '
return self.get_run_state(run_id).result_state
|
def get_run_state_message(self, run_id: int) -> str:
'\n Returns the state message for the run\n\n :param run_id: id of the run\n :return: string with state message\n '
return self.get_run_state(run_id).state_message
| -2,459,958,259,318,688,300
|
Returns the state message for the run
:param run_id: id of the run
:return: string with state message
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_state_message
|
AMS-Kepler/airflow
|
python
|
def get_run_state_message(self, run_id: int) -> str:
'\n Returns the state message for the run\n\n :param run_id: id of the run\n :return: string with state message\n '
return self.get_run_state(run_id).state_message
|
def get_run_output(self, run_id: int) -> dict:
'\n Retrieves run output of the run.\n\n :param run_id: id of the run\n :return: output of the run\n '
json = {'run_id': run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
| -1,827,507,862,642,975,700
|
Retrieves run output of the run.
:param run_id: id of the run
:return: output of the run
|
airflow/providers/databricks/hooks/databricks.py
|
get_run_output
|
AMS-Kepler/airflow
|
python
|
def get_run_output(self, run_id: int) -> dict:
'\n Retrieves run output of the run.\n\n :param run_id: id of the run\n :return: output of the run\n '
json = {'run_id': run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
|
def cancel_run(self, run_id: int) -> None:
'\n Cancels the run.\n\n :param run_id: id of the run\n '
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
| -7,019,494,973,630,746,000
|
Cancels the run.
:param run_id: id of the run
|
airflow/providers/databricks/hooks/databricks.py
|
cancel_run
|
AMS-Kepler/airflow
|
python
|
def cancel_run(self, run_id: int) -> None:
'\n Cancels the run.\n\n :param run_id: id of the run\n '
json = {'run_id': run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
|
def restart_cluster(self, json: dict) -> None:
'\n Restarts the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
| 6,216,211,878,414,445,000
|
Restarts the cluster.
:param json: json dictionary containing cluster specification.
|
airflow/providers/databricks/hooks/databricks.py
|
restart_cluster
|
AMS-Kepler/airflow
|
python
|
def restart_cluster(self, json: dict) -> None:
'\n Restarts the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
|
def start_cluster(self, json: dict) -> None:
'\n Starts the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(START_CLUSTER_ENDPOINT, json)
| 1,305,953,017,967,516,700
|
Starts the cluster.
:param json: json dictionary containing cluster specification.
|
airflow/providers/databricks/hooks/databricks.py
|
start_cluster
|
AMS-Kepler/airflow
|
python
|
def start_cluster(self, json: dict) -> None:
'\n Starts the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(START_CLUSTER_ENDPOINT, json)
|
def terminate_cluster(self, json: dict) -> None:
'\n Terminates the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
| -5,048,575,919,681,346,000
|
Terminates the cluster.
:param json: json dictionary containing cluster specification.
|
airflow/providers/databricks/hooks/databricks.py
|
terminate_cluster
|
AMS-Kepler/airflow
|
python
|
def terminate_cluster(self, json: dict) -> None:
'\n Terminates the cluster.\n\n :param json: json dictionary containing cluster specification.\n '
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
|
def install(self, json: dict) -> None:
'\n Install libraries on the cluster.\n\n Utility function to call the ``2.0/libraries/install`` endpoint.\n\n :param json: json dictionary containing cluster_id and an array of library\n '
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
| -789,050,516,703,948,400
|
Install libraries on the cluster.
Utility function to call the ``2.0/libraries/install`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
|
airflow/providers/databricks/hooks/databricks.py
|
install
|
AMS-Kepler/airflow
|
python
|
def install(self, json: dict) -> None:
'\n Install libraries on the cluster.\n\n Utility function to call the ``2.0/libraries/install`` endpoint.\n\n :param json: json dictionary containing cluster_id and an array of library\n '
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
|
def uninstall(self, json: dict) -> None:
'\n Uninstall libraries on the cluster.\n\n Utility function to call the ``2.0/libraries/uninstall`` endpoint.\n\n :param json: json dictionary containing cluster_id and an array of library\n '
self._do_api_call(UNINSTALL_LIBS_ENDPOINT, json)
| -3,274,519,760,249,699,000
|
Uninstall libraries on the cluster.
Utility function to call the ``2.0/libraries/uninstall`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
|
airflow/providers/databricks/hooks/databricks.py
|
uninstall
|
AMS-Kepler/airflow
|
python
|
def uninstall(self, json: dict) -> None:
'\n Uninstall libraries on the cluster.\n\n Utility function to call the ``2.0/libraries/uninstall`` endpoint.\n\n :param json: json dictionary containing cluster_id and an array of library\n '
self._do_api_call(UNINSTALL_LIBS_ENDPOINT, json)
|
def update_repo(self, repo_id: str, json: Dict[(str, Any)]) -> dict:
'\n Updates given Databricks Repos\n\n :param repo_id: ID of Databricks Repos\n :param json: payload\n :return: metadata from update\n '
repos_endpoint = ('PATCH', f'api/2.0/repos/{repo_id}')
return self._do_api_call(repos_endpoint, json)
| -8,024,518,480,074,445,000
|
Updates given Databricks Repos
:param repo_id: ID of Databricks Repos
:param json: payload
:return: metadata from update
|
airflow/providers/databricks/hooks/databricks.py
|
update_repo
|
AMS-Kepler/airflow
|
python
|
def update_repo(self, repo_id: str, json: Dict[(str, Any)]) -> dict:
'\n Updates given Databricks Repos\n\n :param repo_id: ID of Databricks Repos\n :param json: payload\n :return: metadata from update\n '
repos_endpoint = ('PATCH', f'api/2.0/repos/{repo_id}')
return self._do_api_call(repos_endpoint, json)
|
def delete_repo(self, repo_id: str):
'\n Deletes given Databricks Repos\n\n :param repo_id: ID of Databricks Repos\n :return:\n '
repos_endpoint = ('DELETE', f'api/2.0/repos/{repo_id}')
self._do_api_call(repos_endpoint)
| 5,674,904,661,011,425,000
|
Deletes given Databricks Repos
:param repo_id: ID of Databricks Repos
:return:
|
airflow/providers/databricks/hooks/databricks.py
|
delete_repo
|
AMS-Kepler/airflow
|
python
|
def delete_repo(self, repo_id: str):
'\n Deletes given Databricks Repos\n\n :param repo_id: ID of Databricks Repos\n :return:\n '
repos_endpoint = ('DELETE', f'api/2.0/repos/{repo_id}')
self._do_api_call(repos_endpoint)
|
def create_repo(self, json: Dict[(str, Any)]) -> dict:
'\n Creates a Databricks Repos\n\n :param json: payload\n :return:\n '
repos_endpoint = ('POST', 'api/2.0/repos')
return self._do_api_call(repos_endpoint, json)
| -7,546,461,420,643,207,000
|
Creates a Databricks Repos
:param json: payload
:return:
|
airflow/providers/databricks/hooks/databricks.py
|
create_repo
|
AMS-Kepler/airflow
|
python
|
def create_repo(self, json: Dict[(str, Any)]) -> dict:
'\n Creates a Databricks Repos\n\n :param json: payload\n :return:\n '
repos_endpoint = ('POST', 'api/2.0/repos')
return self._do_api_call(repos_endpoint, json)
|
def get_repo_by_path(self, path: str) -> Optional[str]:
"\n Obtains Repos ID by path\n :param path: path to a repository\n :return: Repos ID if it exists, None if doesn't.\n "
try:
result = self._do_api_call(WORKSPACE_GET_STATUS_ENDPOINT, {'path': path}, wrap_http_errors=False)
if (result.get('object_type', '') == 'REPO'):
return str(result['object_id'])
except requests_exceptions.HTTPError as e:
if (e.response.status_code != 404):
raise e
return None
| 4,764,001,046,479,572,000
|
Obtains Repos ID by path
:param path: path to a repository
:return: Repos ID if it exists, None if doesn't.
|
airflow/providers/databricks/hooks/databricks.py
|
get_repo_by_path
|
AMS-Kepler/airflow
|
python
|
def get_repo_by_path(self, path: str) -> Optional[str]:
"\n Obtains Repos ID by path\n :param path: path to a repository\n :return: Repos ID if it exists, None if doesn't.\n "
try:
result = self._do_api_call(WORKSPACE_GET_STATUS_ENDPOINT, {'path': path}, wrap_http_errors=False)
if (result.get('object_type', ) == 'REPO'):
return str(result['object_id'])
except requests_exceptions.HTTPError as e:
if (e.response.status_code != 404):
raise e
return None
|
def start(self):
'\n Placeholder, this detector just reads out whatever buffer is on the\n scancontrol device. That device is managed manually from macros.\n '
pass
| 7,744,862,992,731,217,000
|
Placeholder, this detector just reads out whatever buffer is on the
scancontrol device. That device is managed manually from macros.
|
contrast/detectors/LC400Buffer.py
|
start
|
alexbjorling/acquisition-framework
|
python
|
def start(self):
'\n Placeholder, this detector just reads out whatever buffer is on the\n scancontrol device. That device is managed manually from macros.\n '
pass
|
def get_coco_dataset():
"A dummy COCO dataset that includes only the 'classes' field."
ds = AttrDict()
classes = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
ds.classes = {i: name for (i, name) in enumerate(classes)}
return ds
| 2,974,285,091,224,692,700
|
A dummy COCO dataset that includes only the 'classes' field.
|
lib/datasets/dummy_datasets.py
|
get_coco_dataset
|
Bigwode/FPN-Pytorch
|
python
|
def get_coco_dataset():
ds = AttrDict()
classes = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
ds.classes = {i: name for (i, name) in enumerate(classes)}
return ds
|
def get_detections(self, frames):
'Returns all detections on frames'
assert (len(frames) <= self.max_num_frames)
all_detections = []
for i in range(len(frames)):
self.net.forward_async(frames[i])
outputs = self.net.grab_all_async()
for (i, out) in enumerate(outputs):
detections = self.__decode_detections(out, frames[i].shape)
all_detections.append(detections)
return all_detections
| -5,370,912,130,922,129,000
|
Returns all detections on frames
|
multi_camera_multi_person_tracking/utils/network_wrappers.py
|
get_detections
|
565353780/open-vino
|
python
|
def get_detections(self, frames):
assert (len(frames) <= self.max_num_frames)
all_detections = []
for i in range(len(frames)):
self.net.forward_async(frames[i])
outputs = self.net.grab_all_async()
for (i, out) in enumerate(outputs):
detections = self.__decode_detections(out, frames[i].shape)
all_detections.append(detections)
return all_detections
|
def __decode_detections(self, out, frame_shape):
'Decodes raw SSD output'
detections = []
for detection in out[(0, 0)]:
confidence = detection[2]
if (confidence > self.confidence):
left = int((max(detection[3], 0) * frame_shape[1]))
top = int((max(detection[4], 0) * frame_shape[0]))
right = int((max(detection[5], 0) * frame_shape[1]))
bottom = int((max(detection[6], 0) * frame_shape[0]))
if (self.expand_ratio != (1.0, 1.0)):
w = (right - left)
h = (bottom - top)
dw = ((w * (self.expand_ratio[0] - 1.0)) / 2)
dh = ((h * (self.expand_ratio[1] - 1.0)) / 2)
left = max(int((left - dw)), 0)
right = int((right + dw))
top = max(int((top - dh)), 0)
bottom = int((bottom + dh))
detections.append(((left, top, right, bottom), confidence))
if (len(detections) > 1):
detections.sort(key=(lambda x: x[1]), reverse=True)
return detections
| 6,288,819,005,664,865,000
|
Decodes raw SSD output
|
multi_camera_multi_person_tracking/utils/network_wrappers.py
|
__decode_detections
|
565353780/open-vino
|
python
|
def __decode_detections(self, out, frame_shape):
detections = []
for detection in out[(0, 0)]:
confidence = detection[2]
if (confidence > self.confidence):
left = int((max(detection[3], 0) * frame_shape[1]))
top = int((max(detection[4], 0) * frame_shape[0]))
right = int((max(detection[5], 0) * frame_shape[1]))
bottom = int((max(detection[6], 0) * frame_shape[0]))
if (self.expand_ratio != (1.0, 1.0)):
w = (right - left)
h = (bottom - top)
dw = ((w * (self.expand_ratio[0] - 1.0)) / 2)
dh = ((h * (self.expand_ratio[1] - 1.0)) / 2)
left = max(int((left - dw)), 0)
right = int((right + dw))
top = max(int((top - dh)), 0)
bottom = int((bottom + dh))
detections.append(((left, top, right, bottom), confidence))
if (len(detections) > 1):
detections.sort(key=(lambda x: x[1]), reverse=True)
return detections
|
def forward(self, batch):
'Performs forward of the underlying network on a given batch'
assert (len(batch) <= self.max_reqs)
for frame in batch:
self.net.forward_async(frame)
outputs = self.net.grab_all_async()
return outputs
| -5,311,186,696,799,280,000
|
Performs forward of the underlying network on a given batch
|
multi_camera_multi_person_tracking/utils/network_wrappers.py
|
forward
|
565353780/open-vino
|
python
|
def forward(self, batch):
assert (len(batch) <= self.max_reqs)
for frame in batch:
self.net.forward_async(frame)
outputs = self.net.grab_all_async()
return outputs
|
def convert_location_from_source_to_agent(self, source: carla.Location) -> Location:
"\n Convert Location data from Carla.location to Agent's lcoation data type\n invert the Z axis to make it into right hand coordinate system\n Args:\n source: carla.location\n\n Returns:\n\n "
return Location(x=source.x, y=source.z, z=source.y)
| -6,020,882,996,146,611,000
|
Convert Location data from Carla.location to Agent's lcoation data type
invert the Z axis to make it into right hand coordinate system
Args:
source: carla.location
Returns:
|
Bridges/carla_bridge.py
|
convert_location_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_location_from_source_to_agent(self, source: carla.Location) -> Location:
"\n Convert Location data from Carla.location to Agent's lcoation data type\n invert the Z axis to make it into right hand coordinate system\n Args:\n source: carla.location\n\n Returns:\n\n "
return Location(x=source.x, y=source.z, z=source.y)
|
def convert_rotation_from_source_to_agent(self, source: carla.Rotation) -> Rotation:
'Convert a CARLA raw rotation to Rotation(pitch=float,yaw=float,roll=float).'
return Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)
| 264,018,444,264,146,080
|
Convert a CARLA raw rotation to Rotation(pitch=float,yaw=float,roll=float).
|
Bridges/carla_bridge.py
|
convert_rotation_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_rotation_from_source_to_agent(self, source: carla.Rotation) -> Rotation:
return Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)
|
def convert_transform_from_source_to_agent(self, source: carla.Transform) -> Transform:
'Convert CARLA raw location and rotation to Transform(location,rotation).'
return Transform(location=self.convert_location_from_source_to_agent(source=source.location), rotation=self.convert_rotation_from_source_to_agent(source=source.rotation))
| -2,277,312,415,609,952,000
|
Convert CARLA raw location and rotation to Transform(location,rotation).
|
Bridges/carla_bridge.py
|
convert_transform_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_transform_from_source_to_agent(self, source: carla.Transform) -> Transform:
return Transform(location=self.convert_location_from_source_to_agent(source=source.location), rotation=self.convert_rotation_from_source_to_agent(source=source.rotation))
|
def convert_control_from_source_to_agent(self, source: carla.VehicleControl) -> VehicleControl:
'Convert CARLA raw vehicle control to VehicleControl(throttle,steering).'
return VehicleControl(throttle=(((- 1) * source.throttle) if source.reverse else source.throttle), steering=source.steer)
| -6,737,263,032,983,955,000
|
Convert CARLA raw vehicle control to VehicleControl(throttle,steering).
|
Bridges/carla_bridge.py
|
convert_control_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_control_from_source_to_agent(self, source: carla.VehicleControl) -> VehicleControl:
return VehicleControl(throttle=(((- 1) * source.throttle) if source.reverse else source.throttle), steering=source.steer)
|
def convert_rgb_from_source_to_agent(self, source: carla.Image) -> Union[(RGBData, None)]:
'Convert CARLA raw Image to a Union with RGB numpy array'
try:
source.convert(cc.Raw)
return RGBData(data=self._to_rgb_array(source))
except:
return None
| -3,428,005,910,081,119,700
|
Convert CARLA raw Image to a Union with RGB numpy array
|
Bridges/carla_bridge.py
|
convert_rgb_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_rgb_from_source_to_agent(self, source: carla.Image) -> Union[(RGBData, None)]:
try:
source.convert(cc.Raw)
return RGBData(data=self._to_rgb_array(source))
except:
return None
|
def convert_depth_from_source_to_agent(self, source: carla.Image) -> Union[(DepthData, None)]:
'Convert CARLA raw depth info to '
try:
array = np.frombuffer(source.raw_data, dtype=np.dtype('uint8'))
array = np.reshape(array, (source.height, source.width, 4))
array = array[:, :, :3]
array = array[:, :, ::(- 1)]
array = png_to_depth(array)
return DepthData(data=array)
except:
return None
| -4,212,296,384,200,390,700
|
Convert CARLA raw depth info to
|
Bridges/carla_bridge.py
|
convert_depth_from_source_to_agent
|
Amanda-Chiang/ROAR
|
python
|
def convert_depth_from_source_to_agent(self, source: carla.Image) -> Union[(DepthData, None)]:
' '
try:
array = np.frombuffer(source.raw_data, dtype=np.dtype('uint8'))
array = np.reshape(array, (source.height, source.width, 4))
array = array[:, :, :3]
array = array[:, :, ::(- 1)]
array = png_to_depth(array)
return DepthData(data=array)
except:
return None
|
def _to_bgra_array(self, image):
'Convert a CARLA raw image to a BGRA numpy array.'
if (not isinstance(image, carla.Image)):
raise ValueError('Argument must be a carla.sensor.Image')
array = np.frombuffer(image.raw_data, dtype=np.dtype('uint8'))
array = np.reshape(array, (image.height, image.width, 4))
return array
| 998,347,524,162,644,400
|
Convert a CARLA raw image to a BGRA numpy array.
|
Bridges/carla_bridge.py
|
_to_bgra_array
|
Amanda-Chiang/ROAR
|
python
|
def _to_bgra_array(self, image):
if (not isinstance(image, carla.Image)):
raise ValueError('Argument must be a carla.sensor.Image')
array = np.frombuffer(image.raw_data, dtype=np.dtype('uint8'))
array = np.reshape(array, (image.height, image.width, 4))
return array
|
def _to_rgb_array(self, image):
'Convert a CARLA raw image to a RGB numpy array.'
array = self._to_bgra_array(image)
array = array[:, :, :3]
return array
| -8,760,952,046,813,695,000
|
Convert a CARLA raw image to a RGB numpy array.
|
Bridges/carla_bridge.py
|
_to_rgb_array
|
Amanda-Chiang/ROAR
|
python
|
def _to_rgb_array(self, image):
array = self._to_bgra_array(image)
array = array[:, :, :3]
return array
|
def find_plugins():
'Returns a list of plugin path names.'
for (root, dirs, files) in os.walk(PLUGINS_DIR):
for file in files:
if file.endswith('.py'):
(yield os.path.join(root, file))
| 1,893,547,758,489,075,200
|
Returns a list of plugin path names.
|
app/processor.py
|
find_plugins
|
glombard/python-plugin-experiment
|
python
|
def find_plugins():
for (root, dirs, files) in os.walk(PLUGINS_DIR):
for file in files:
if file.endswith('.py'):
(yield os.path.join(root, file))
|
def load_plugins(hook_plugins, command_plugins):
'Populates the plugin lists.'
for file in find_plugins():
try:
module_name = os.path.splitext(os.path.basename(file))[0]
module = importlib.import_module(((PLUGINS_DIR + '.') + module_name))
for entry_name in dir(module):
entry = getattr(module, entry_name)
if ((not inspect.isclass(entry)) or (inspect.getmodule(entry) != module)):
continue
if issubclass(entry, Hook):
hook_plugins.append(entry())
elif issubclass(entry, Command):
command_plugins.append(entry())
except (ImportError, NotImplementedError):
continue
| 3,624,441,520,412,969,000
|
Populates the plugin lists.
|
app/processor.py
|
load_plugins
|
glombard/python-plugin-experiment
|
python
|
def load_plugins(hook_plugins, command_plugins):
for file in find_plugins():
try:
module_name = os.path.splitext(os.path.basename(file))[0]
module = importlib.import_module(((PLUGINS_DIR + '.') + module_name))
for entry_name in dir(module):
entry = getattr(module, entry_name)
if ((not inspect.isclass(entry)) or (inspect.getmodule(entry) != module)):
continue
if issubclass(entry, Hook):
hook_plugins.append(entry())
elif issubclass(entry, Command):
command_plugins.append(entry())
except (ImportError, NotImplementedError):
continue
|
def build_cmsinfo(cm_list, qreq_):
"\n Helper function to report results over multiple queries (chip matches).\n Basically given a group of queries of the same name, we only care if one of\n them is correct. This emulates encounters.\n\n Runs queries of a specific configuration returns the best rank of each\n query.\n\n Args:\n cm_list (list): list of chip matches\n qreq_ (QueryRequest): request that computed the chip matches.\n\n Returns:\n dict: cmsinfo - info about multiple chip matches cm_list\n\n CommandLine:\n python -m wbia get_query_result_info\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> qreq_ = wbia.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])\n >>> cm_list = qreq_.execute()\n >>> cmsinfo = build_cmsinfo(cm_list, qreq_)\n >>> print(ut.repr2(cmsinfo))\n\n Ignore:\n wbia -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \\\n -t :proot=BC_DTW --show --nocache-big\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \\\n -t :pipeline_root=BC_DTW\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True \\\n -t :pipeline_root=BC_DTW \\\n --qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \\\n --show --debug-depc\n\n --clear-all-depcache\n "
ibs = qreq_.ibs
qaids = qreq_.qaids
daids = qreq_.daids
qx2_cminfo = []
for cm in cm_list:
if hasattr(cm, 'extend_results'):
cminfo = cm.extend_results(qreq_).summarize(qreq_)
else:
cminfo = cm.summarize(qreq_)
qx2_cminfo.append(cminfo)
cmsinfo = ut.dict_stack(qx2_cminfo, 'qx2_')
cmsinfo['qx2_gt_rank'] = ut.replace_nones(cmsinfo['qx2_gt_rank'], (- 1))
if False:
qx2_gtaids = ibs.get_annot_groundtruth(qaids, daid_list=daids)
qx2_avepercision = np.array([cm.get_average_percision(ibs=ibs, gt_aids=gt_aids) for (cm, gt_aids) in zip(cm_list, qx2_gtaids)])
cmsinfo['qx2_avepercision'] = qx2_avepercision
qaids = qreq_.qaids
qnids = ibs.get_annot_nids(qaids)
unique_dnids = np.unique(ibs.get_annot_nids(qreq_.daids))
(unique_qnids, groupxs) = ut.group_indices(qnids)
cm_group_list = ut.apply_grouping(cm_list, groupxs)
qnid2_aggnamescores = {}
qnx2_nameres_info = []
nameres_info_list = []
for (qnid, cm_group) in zip(unique_qnids, cm_group_list):
nid2_name_score_group = [dict([(nid, cm.name_score_list[nidx]) for (nid, nidx) in cm.nid2_nidx.items()]) for cm in cm_group]
aligned_name_scores = np.array([ut.dict_take(nid_to_name_score, unique_dnids.tolist(), (- np.inf)) for nid_to_name_score in nid2_name_score_group]).T
name_score_list = np.nanmax(aligned_name_scores, axis=1)
qnid2_aggnamescores[qnid] = name_score_list
sortx = name_score_list.argsort()[::(- 1)]
sorted_namescores = name_score_list[sortx]
sorted_dnids = unique_dnids[sortx]
success = (sorted_dnids == qnid)
failure = np.logical_and((~ success), (sorted_dnids > 0))
gt_name_rank = (None if (not np.any(success)) else np.where(success)[0][0])
gf_name_rank = (None if (not np.any(failure)) else np.nonzero(failure)[0][0])
gt_nid = sorted_dnids[gt_name_rank]
gf_nid = sorted_dnids[gf_name_rank]
gt_name_score = sorted_namescores[gt_name_rank]
gf_name_score = sorted_namescores[gf_name_rank]
if (gt_name_score <= 0):
if hasattr(qreq_, 'dnids'):
gt_name_rank = (len(qreq_.dnids) + 1)
else:
dnids = list(set(ibs.get_annot_nids(qreq_.daids)))
gt_name_rank = (len(dnids) + 1)
qnx2_nameres_info = {}
qnx2_nameres_info['qnid'] = qnid
qnx2_nameres_info['gt_nid'] = gt_nid
qnx2_nameres_info['gf_nid'] = gf_nid
qnx2_nameres_info['gt_name_rank'] = gt_name_rank
qnx2_nameres_info['gf_name_rank'] = gf_name_rank
qnx2_nameres_info['gt_name_score'] = gt_name_score
qnx2_nameres_info['gf_name_score'] = gf_name_score
nameres_info_list.append(qnx2_nameres_info)
nameres_info = ut.dict_stack(nameres_info_list, 'qnx2_')
cmsinfo.update(nameres_info)
return cmsinfo
| 1,409,253,002,260,995,000
|
Helper function to report results over multiple queries (chip matches).
Basically given a group of queries of the same name, we only care if one of
them is correct. This emulates encounters.
Runs queries of a specific configuration returns the best rank of each
query.
Args:
cm_list (list): list of chip matches
qreq_ (QueryRequest): request that computed the chip matches.
Returns:
dict: cmsinfo - info about multiple chip matches cm_list
CommandLine:
python -m wbia get_query_result_info
python -m wbia get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1
python -m wbia get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> qreq_ = wbia.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])
>>> cm_list = qreq_.execute()
>>> cmsinfo = build_cmsinfo(cm_list, qreq_)
>>> print(ut.repr2(cmsinfo))
Ignore:
wbia -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \
-t :proot=BC_DTW --show --nocache-big
wbia -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \
-t :pipeline_root=BC_DTW
wbia -e rank_cmc --db humpbacks -a :is_known=True \
-t :pipeline_root=BC_DTW \
--qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \
--show --debug-depc
--clear-all-depcache
|
wbia/expt/test_result.py
|
build_cmsinfo
|
WildMeOrg/wildbook-ia
|
python
|
def build_cmsinfo(cm_list, qreq_):
"\n Helper function to report results over multiple queries (chip matches).\n Basically given a group of queries of the same name, we only care if one of\n them is correct. This emulates encounters.\n\n Runs queries of a specific configuration returns the best rank of each\n query.\n\n Args:\n cm_list (list): list of chip matches\n qreq_ (QueryRequest): request that computed the chip matches.\n\n Returns:\n dict: cmsinfo - info about multiple chip matches cm_list\n\n CommandLine:\n python -m wbia get_query_result_info\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> qreq_ = wbia.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])\n >>> cm_list = qreq_.execute()\n >>> cmsinfo = build_cmsinfo(cm_list, qreq_)\n >>> print(ut.repr2(cmsinfo))\n\n Ignore:\n wbia -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \\\n -t :proot=BC_DTW --show --nocache-big\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \\\n -t :pipeline_root=BC_DTW\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True \\\n -t :pipeline_root=BC_DTW \\\n --qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \\\n --show --debug-depc\n\n --clear-all-depcache\n "
ibs = qreq_.ibs
qaids = qreq_.qaids
daids = qreq_.daids
qx2_cminfo = []
for cm in cm_list:
if hasattr(cm, 'extend_results'):
cminfo = cm.extend_results(qreq_).summarize(qreq_)
else:
cminfo = cm.summarize(qreq_)
qx2_cminfo.append(cminfo)
cmsinfo = ut.dict_stack(qx2_cminfo, 'qx2_')
cmsinfo['qx2_gt_rank'] = ut.replace_nones(cmsinfo['qx2_gt_rank'], (- 1))
if False:
qx2_gtaids = ibs.get_annot_groundtruth(qaids, daid_list=daids)
qx2_avepercision = np.array([cm.get_average_percision(ibs=ibs, gt_aids=gt_aids) for (cm, gt_aids) in zip(cm_list, qx2_gtaids)])
cmsinfo['qx2_avepercision'] = qx2_avepercision
qaids = qreq_.qaids
qnids = ibs.get_annot_nids(qaids)
unique_dnids = np.unique(ibs.get_annot_nids(qreq_.daids))
(unique_qnids, groupxs) = ut.group_indices(qnids)
cm_group_list = ut.apply_grouping(cm_list, groupxs)
qnid2_aggnamescores = {}
qnx2_nameres_info = []
nameres_info_list = []
for (qnid, cm_group) in zip(unique_qnids, cm_group_list):
nid2_name_score_group = [dict([(nid, cm.name_score_list[nidx]) for (nid, nidx) in cm.nid2_nidx.items()]) for cm in cm_group]
aligned_name_scores = np.array([ut.dict_take(nid_to_name_score, unique_dnids.tolist(), (- np.inf)) for nid_to_name_score in nid2_name_score_group]).T
name_score_list = np.nanmax(aligned_name_scores, axis=1)
qnid2_aggnamescores[qnid] = name_score_list
sortx = name_score_list.argsort()[::(- 1)]
sorted_namescores = name_score_list[sortx]
sorted_dnids = unique_dnids[sortx]
success = (sorted_dnids == qnid)
failure = np.logical_and((~ success), (sorted_dnids > 0))
gt_name_rank = (None if (not np.any(success)) else np.where(success)[0][0])
gf_name_rank = (None if (not np.any(failure)) else np.nonzero(failure)[0][0])
gt_nid = sorted_dnids[gt_name_rank]
gf_nid = sorted_dnids[gf_name_rank]
gt_name_score = sorted_namescores[gt_name_rank]
gf_name_score = sorted_namescores[gf_name_rank]
if (gt_name_score <= 0):
if hasattr(qreq_, 'dnids'):
gt_name_rank = (len(qreq_.dnids) + 1)
else:
dnids = list(set(ibs.get_annot_nids(qreq_.daids)))
gt_name_rank = (len(dnids) + 1)
qnx2_nameres_info = {}
qnx2_nameres_info['qnid'] = qnid
qnx2_nameres_info['gt_nid'] = gt_nid
qnx2_nameres_info['gf_nid'] = gf_nid
qnx2_nameres_info['gt_name_rank'] = gt_name_rank
qnx2_nameres_info['gf_name_rank'] = gf_name_rank
qnx2_nameres_info['gt_name_score'] = gt_name_score
qnx2_nameres_info['gf_name_score'] = gf_name_score
nameres_info_list.append(qnx2_nameres_info)
nameres_info = ut.dict_stack(nameres_info_list, 'qnx2_')
cmsinfo.update(nameres_info)
return cmsinfo
|
def combine_testres_list(ibs, testres_list):
"\n combine test results over multiple annot configs\n\n The combination of pipeline and annotation config is indexed by cfgx.\n A cfgx corresponds to a unique query request\n\n CommandLine:\n python -m wbia --tf combine_testres_list\n\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show\n python -m wbia --tf -draw_rank_cmc --db PZ_Master1 --show\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])\n "
import copy
from wbia.expt import annotation_configs
acfg_list = [tr.acfg for tr in testres_list]
acfg_lbl_list = annotation_configs.get_varied_acfg_labels(acfg_list)
flat_acfg_list = annotation_configs.flatten_acfg_list(acfg_list)
(nonvaried_acfg, varied_acfg_list) = ut.partition_varied_cfg_list(flat_acfg_list)
def combine_lbls(lbl, acfg_lbl):
if (len(lbl) == 0):
return acfg_lbl
if (len(acfg_lbl) == 0):
return lbl
return ((lbl + '+') + acfg_lbl)
agg_cfg_list = ut.flatten([tr.cfg_list for tr in testres_list])
agg_cfgx2_qreq_ = ut.flatten([tr.cfgx2_qreq_ for tr in testres_list])
agg_cfgdict_list = ut.flatten([tr.cfgdict_list for tr in testres_list])
agg_cfgx2_cmsinfo = ut.flatten([tr.cfgx2_cmsinfo for tr in testres_list])
agg_varied_acfg_list = ut.flatten([([acfg] * len(tr.cfg_list)) for (tr, acfg) in zip(testres_list, varied_acfg_list)])
agg_cfgx2_lbls = ut.flatten([[combine_lbls(lbl, acfg_lbl) for lbl in tr.cfgx2_lbl] for (tr, acfg_lbl) in zip(testres_list, acfg_lbl_list)])
agg_cfgx2_acfg = ut.flatten([([copy.deepcopy(acfg)] * len(tr.cfg_list)) for (tr, acfg) in zip(testres_list, acfg_list)])
big_testres = TestResult(agg_cfg_list, agg_cfgx2_lbls, agg_cfgx2_cmsinfo, agg_cfgx2_qreq_)
big_testres.acfg = annotation_configs.unflatten_acfgdict(nonvaried_acfg)
big_testres.cfgdict_list = agg_cfgdict_list
big_testres.common_acfg = annotation_configs.compress_aidcfg(big_testres.acfg)
big_testres.common_cfgdict = reduce(ut.dict_intersection, big_testres.cfgdict_list)
big_testres.varied_acfg_list = agg_varied_acfg_list
big_testres.nonvaried_acfg = nonvaried_acfg
big_testres.varied_cfg_list = [ut.delete_dict_keys(cfgdict.copy(), list(big_testres.common_cfgdict.keys())) for cfgdict in big_testres.cfgdict_list]
big_testres.acfg_list = acfg_list
big_testres.cfgx2_acfg = agg_cfgx2_acfg
big_testres.cfgx2_pcfg = agg_cfgdict_list
assert (len(agg_cfgdict_list) == len(agg_cfgx2_acfg))
testres = big_testres
return testres
| 260,257,291,209,548,700
|
combine test results over multiple annot configs
The combination of pipeline and annotation config is indexed by cfgx.
A cfgx corresponds to a unique query request
CommandLine:
python -m wbia --tf combine_testres_list
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show
python -m wbia --tf -draw_rank_cmc --db PZ_Master1 --show
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])
|
wbia/expt/test_result.py
|
combine_testres_list
|
WildMeOrg/wildbook-ia
|
python
|
def combine_testres_list(ibs, testres_list):
"\n combine test results over multiple annot configs\n\n The combination of pipeline and annotation config is indexed by cfgx.\n A cfgx corresponds to a unique query request\n\n CommandLine:\n python -m wbia --tf combine_testres_list\n\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show\n python -m wbia --tf -draw_rank_cmc --db PZ_Master1 --show\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])\n "
import copy
from wbia.expt import annotation_configs
acfg_list = [tr.acfg for tr in testres_list]
acfg_lbl_list = annotation_configs.get_varied_acfg_labels(acfg_list)
flat_acfg_list = annotation_configs.flatten_acfg_list(acfg_list)
(nonvaried_acfg, varied_acfg_list) = ut.partition_varied_cfg_list(flat_acfg_list)
def combine_lbls(lbl, acfg_lbl):
if (len(lbl) == 0):
return acfg_lbl
if (len(acfg_lbl) == 0):
return lbl
return ((lbl + '+') + acfg_lbl)
agg_cfg_list = ut.flatten([tr.cfg_list for tr in testres_list])
agg_cfgx2_qreq_ = ut.flatten([tr.cfgx2_qreq_ for tr in testres_list])
agg_cfgdict_list = ut.flatten([tr.cfgdict_list for tr in testres_list])
agg_cfgx2_cmsinfo = ut.flatten([tr.cfgx2_cmsinfo for tr in testres_list])
agg_varied_acfg_list = ut.flatten([([acfg] * len(tr.cfg_list)) for (tr, acfg) in zip(testres_list, varied_acfg_list)])
agg_cfgx2_lbls = ut.flatten([[combine_lbls(lbl, acfg_lbl) for lbl in tr.cfgx2_lbl] for (tr, acfg_lbl) in zip(testres_list, acfg_lbl_list)])
agg_cfgx2_acfg = ut.flatten([([copy.deepcopy(acfg)] * len(tr.cfg_list)) for (tr, acfg) in zip(testres_list, acfg_list)])
big_testres = TestResult(agg_cfg_list, agg_cfgx2_lbls, agg_cfgx2_cmsinfo, agg_cfgx2_qreq_)
big_testres.acfg = annotation_configs.unflatten_acfgdict(nonvaried_acfg)
big_testres.cfgdict_list = agg_cfgdict_list
big_testres.common_acfg = annotation_configs.compress_aidcfg(big_testres.acfg)
big_testres.common_cfgdict = reduce(ut.dict_intersection, big_testres.cfgdict_list)
big_testres.varied_acfg_list = agg_varied_acfg_list
big_testres.nonvaried_acfg = nonvaried_acfg
big_testres.varied_cfg_list = [ut.delete_dict_keys(cfgdict.copy(), list(big_testres.common_cfgdict.keys())) for cfgdict in big_testres.cfgdict_list]
big_testres.acfg_list = acfg_list
big_testres.cfgx2_acfg = agg_cfgx2_acfg
big_testres.cfgx2_pcfg = agg_cfgdict_list
assert (len(agg_cfgdict_list) == len(agg_cfgx2_acfg))
testres = big_testres
return testres
|
def get_infoprop_list(testres, key, qaids=None):
"\n key = 'qx2_gt_rank'\n key = 'qx2_gt_rank'\n qaids = testres.get_test_qaids()\n "
if (key == 'participant'):
cfgx2_infoprop = [np.in1d(qaids, aids_) for aids_ in testres.cfgx2_qaids]
else:
_tmp1_cfgx2_infoprop = ut.get_list_column(testres.cfgx2_cmsinfo, key)
_tmp2_cfgx2_infoprop = list(map(np.array, ut.util_list.replace_nones(_tmp1_cfgx2_infoprop, np.nan)))
if (qaids is None):
cfgx2_infoprop = _tmp2_cfgx2_infoprop
else:
cfgx2_qaid2_qx = [dict(zip(aids_, range(len(aids_)))) for aids_ in testres.cfgx2_qaids]
qxs_list = [ut.dict_take(qaid2_qx, qaids, None) for qaid2_qx in cfgx2_qaid2_qx]
cfgx2_infoprop = [[(np.nan if (x is None) else props[x]) for x in qxs] for (props, qxs) in zip(_tmp2_cfgx2_infoprop, qxs_list)]
if ((key == 'qx2_gt_rank') or key.endswith('_rank')):
wpr = testres.get_worst_possible_rank()
cfgx2_infoprop = [np.array([(wpr if (rank == (- 1)) else rank) for rank in infoprop]) for infoprop in cfgx2_infoprop]
return cfgx2_infoprop
| -6,915,283,949,004,807,000
|
key = 'qx2_gt_rank'
key = 'qx2_gt_rank'
qaids = testres.get_test_qaids()
|
wbia/expt/test_result.py
|
get_infoprop_list
|
WildMeOrg/wildbook-ia
|
python
|
def get_infoprop_list(testres, key, qaids=None):
"\n key = 'qx2_gt_rank'\n key = 'qx2_gt_rank'\n qaids = testres.get_test_qaids()\n "
if (key == 'participant'):
cfgx2_infoprop = [np.in1d(qaids, aids_) for aids_ in testres.cfgx2_qaids]
else:
_tmp1_cfgx2_infoprop = ut.get_list_column(testres.cfgx2_cmsinfo, key)
_tmp2_cfgx2_infoprop = list(map(np.array, ut.util_list.replace_nones(_tmp1_cfgx2_infoprop, np.nan)))
if (qaids is None):
cfgx2_infoprop = _tmp2_cfgx2_infoprop
else:
cfgx2_qaid2_qx = [dict(zip(aids_, range(len(aids_)))) for aids_ in testres.cfgx2_qaids]
qxs_list = [ut.dict_take(qaid2_qx, qaids, None) for qaid2_qx in cfgx2_qaid2_qx]
cfgx2_infoprop = [[(np.nan if (x is None) else props[x]) for x in qxs] for (props, qxs) in zip(_tmp2_cfgx2_infoprop, qxs_list)]
if ((key == 'qx2_gt_rank') or key.endswith('_rank')):
wpr = testres.get_worst_possible_rank()
cfgx2_infoprop = [np.array([(wpr if (rank == (- 1)) else rank) for rank in infoprop]) for infoprop in cfgx2_infoprop]
return cfgx2_infoprop
|
def get_infoprop_mat(testres, key, qaids=None):
"\n key = 'qx2_gf_raw_score'\n key = 'qx2_gt_raw_score'\n "
cfgx2_infoprop = testres.get_infoprop_list(key, qaids)
infoprop_mat = np.vstack(cfgx2_infoprop).T
return infoprop_mat
| 4,360,242,293,059,248,600
|
key = 'qx2_gf_raw_score'
key = 'qx2_gt_raw_score'
|
wbia/expt/test_result.py
|
get_infoprop_mat
|
WildMeOrg/wildbook-ia
|
python
|
def get_infoprop_mat(testres, key, qaids=None):
"\n key = 'qx2_gf_raw_score'\n key = 'qx2_gt_raw_score'\n "
cfgx2_infoprop = testres.get_infoprop_list(key, qaids)
infoprop_mat = np.vstack(cfgx2_infoprop).T
return infoprop_mat
|
def get_rank_histograms(testres, bins=None, key=None, join_acfgs=False):
"\n Ignore:\n testres.get_infoprop_mat('qnx2_gt_name_rank')\n testres.get_infoprop_mat('qnx2_gf_name_rank')\n testres.get_infoprop_mat('qnx2_qnid')\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])\n >>> bins = 'dense'\n >>> key = 'qnx2_gt_name_rank'\n >>> config_hists = testres.get_rank_histograms(bins, key=key)\n "
if (key is None):
key = 'qx2_gt_rank'
if (bins is None):
bins = testres.get_rank_histogram_bins()
elif (bins == 'dense'):
bins = np.arange((testres.get_worst_possible_rank() + 1))
cfgx2_ranks = testres.get_infoprop_list(key=key)
cfgx2_hist = np.zeros((len(cfgx2_ranks), (len(bins) - 1)), dtype=np.int32)
for (cfgx, ranks) in enumerate(cfgx2_ranks):
freq = np.histogram(ranks, bins=bins)[0]
cfgx2_hist[cfgx] = freq
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
cfgx2_hist = np.array([np.sum(group, axis=0) for group in ut.apply_grouping(cfgx2_hist, groupxs)])
return (cfgx2_hist, bins)
| 3,132,119,514,067,143,700
|
Ignore:
testres.get_infoprop_mat('qnx2_gt_name_rank')
testres.get_infoprop_mat('qnx2_gf_name_rank')
testres.get_infoprop_mat('qnx2_qnid')
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])
>>> bins = 'dense'
>>> key = 'qnx2_gt_name_rank'
>>> config_hists = testres.get_rank_histograms(bins, key=key)
|
wbia/expt/test_result.py
|
get_rank_histograms
|
WildMeOrg/wildbook-ia
|
python
|
def get_rank_histograms(testres, bins=None, key=None, join_acfgs=False):
"\n Ignore:\n testres.get_infoprop_mat('qnx2_gt_name_rank')\n testres.get_infoprop_mat('qnx2_gf_name_rank')\n testres.get_infoprop_mat('qnx2_qnid')\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])\n >>> bins = 'dense'\n >>> key = 'qnx2_gt_name_rank'\n >>> config_hists = testres.get_rank_histograms(bins, key=key)\n "
if (key is None):
key = 'qx2_gt_rank'
if (bins is None):
bins = testres.get_rank_histogram_bins()
elif (bins == 'dense'):
bins = np.arange((testres.get_worst_possible_rank() + 1))
cfgx2_ranks = testres.get_infoprop_list(key=key)
cfgx2_hist = np.zeros((len(cfgx2_ranks), (len(bins) - 1)), dtype=np.int32)
for (cfgx, ranks) in enumerate(cfgx2_ranks):
freq = np.histogram(ranks, bins=bins)[0]
cfgx2_hist[cfgx] = freq
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
cfgx2_hist = np.array([np.sum(group, axis=0) for group in ut.apply_grouping(cfgx2_hist, groupxs)])
return (cfgx2_hist, bins)
|
def get_rank_percentage_cumhist(testres, bins='dense', key=None, join_acfgs=False):
"\n Args:\n bins (unicode): (default = u'dense')\n key (None): (default = None)\n join_acfgs (bool): (default = False)\n\n Returns:\n tuple: (config_cdfs, edges)\n\n CommandLine:\n python -m wbia --tf TestResult.get_rank_percentage_cumhist\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n -t baseline -a unctrl ctrl\n\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n --db lynx \\\n -a default:qsame_imageset=True,been_adjusted=True,excluderef=True \\\n -t default:K=1 --show --cmd\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])\n >>> bins = u'dense'\n >>> key = None\n >>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)\n >>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))\n >>> print(result)\n "
(cfgx2_hist, edges) = testres.get_rank_histograms(bins, key=key, join_acfgs=join_acfgs)
cfgx2_cumhist = np.cumsum(cfgx2_hist, axis=1)
cfgx2_cumhist_percent = ((100 * cfgx2_cumhist) / cfgx2_cumhist.T[(- 1)].T[:, None])
return (cfgx2_cumhist_percent, edges)
| -5,253,765,832,227,622,000
|
Args:
bins (unicode): (default = u'dense')
key (None): (default = None)
join_acfgs (bool): (default = False)
Returns:
tuple: (config_cdfs, edges)
CommandLine:
python -m wbia --tf TestResult.get_rank_percentage_cumhist
python -m wbia --tf TestResult.get_rank_percentage_cumhist \
-t baseline -a unctrl ctrl
python -m wbia --tf TestResult.get_rank_percentage_cumhist \
--db lynx \
-a default:qsame_imageset=True,been_adjusted=True,excluderef=True \
-t default:K=1 --show --cmd
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])
>>> bins = u'dense'
>>> key = None
>>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)
>>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))
>>> print(result)
|
wbia/expt/test_result.py
|
get_rank_percentage_cumhist
|
WildMeOrg/wildbook-ia
|
python
|
def get_rank_percentage_cumhist(testres, bins='dense', key=None, join_acfgs=False):
"\n Args:\n bins (unicode): (default = u'dense')\n key (None): (default = None)\n join_acfgs (bool): (default = False)\n\n Returns:\n tuple: (config_cdfs, edges)\n\n CommandLine:\n python -m wbia --tf TestResult.get_rank_percentage_cumhist\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n -t baseline -a unctrl ctrl\n\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n --db lynx \\\n -a default:qsame_imageset=True,been_adjusted=True,excluderef=True \\\n -t default:K=1 --show --cmd\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])\n >>> bins = u'dense'\n >>> key = None\n >>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)\n >>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))\n >>> print(result)\n "
(cfgx2_hist, edges) = testres.get_rank_histograms(bins, key=key, join_acfgs=join_acfgs)
cfgx2_cumhist = np.cumsum(cfgx2_hist, axis=1)
cfgx2_cumhist_percent = ((100 * cfgx2_cumhist) / cfgx2_cumhist.T[(- 1)].T[:, None])
return (cfgx2_cumhist_percent, edges)
|
def get_cfgx_groupxs(testres):
"\n Returns the group indices of configurations specified to be joined.\n\n Ignore:\n a = [\n 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n ]\n >>> a = [\n >>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n >>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n >>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n >>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n >>> ]\n >>> from wbia.init import main_helpers\n >>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'\n >>> t = 'default:K=[1]'\n >>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)\n >>> testres.get_cfgx_groupxs()\n\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'PZ_MTEST',\n >>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',\n >>> 'default:qsize=1,dpername=[1,2]'],\n >>> t=['default:K=[1,2]'])\n >>> groupxs = testres.get_cfgx_groupxs()\n >>> result = groupxs\n >>> print(result)\n [[6], [4], [0, 2], [7], [5], [1, 3]]\n "
acfg_joinid = [acfg['qcfg']['joinme'] for acfg in testres.cfgx2_acfg]
gen_groupid = it.count((- 1), step=(- 1))
acfg_groupids = [(next(gen_groupid) if (grpid is None) else grpid) for grpid in acfg_joinid]
pcfg_groupids = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg)
cfg_groupids = list(zip(pcfg_groupids, acfg_groupids))
groupxs = ut.group_indices(cfg_groupids)[1]
return groupxs
| -37,846,633,727,779,384
|
Returns the group indices of configurations specified to be joined.
Ignore:
a = [
'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
]
>>> a = [
>>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
>>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
>>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
>>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
>>> ]
>>> from wbia.init import main_helpers
>>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'
>>> t = 'default:K=[1]'
>>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)
>>> testres.get_cfgx_groupxs()
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'PZ_MTEST',
>>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',
>>> 'default:qsize=1,dpername=[1,2]'],
>>> t=['default:K=[1,2]'])
>>> groupxs = testres.get_cfgx_groupxs()
>>> result = groupxs
>>> print(result)
[[6], [4], [0, 2], [7], [5], [1, 3]]
|
wbia/expt/test_result.py
|
get_cfgx_groupxs
|
WildMeOrg/wildbook-ia
|
python
|
def get_cfgx_groupxs(testres):
"\n Returns the group indices of configurations specified to be joined.\n\n Ignore:\n a = [\n 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n ]\n >>> a = [\n >>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n >>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n >>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n >>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n >>> ]\n >>> from wbia.init import main_helpers\n >>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'\n >>> t = 'default:K=[1]'\n >>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)\n >>> testres.get_cfgx_groupxs()\n\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'PZ_MTEST',\n >>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',\n >>> 'default:qsize=1,dpername=[1,2]'],\n >>> t=['default:K=[1,2]'])\n >>> groupxs = testres.get_cfgx_groupxs()\n >>> result = groupxs\n >>> print(result)\n [[6], [4], [0, 2], [7], [5], [1, 3]]\n "
acfg_joinid = [acfg['qcfg']['joinme'] for acfg in testres.cfgx2_acfg]
gen_groupid = it.count((- 1), step=(- 1))
acfg_groupids = [(next(gen_groupid) if (grpid is None) else grpid) for grpid in acfg_joinid]
pcfg_groupids = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg)
cfg_groupids = list(zip(pcfg_groupids, acfg_groupids))
groupxs = ut.group_indices(cfg_groupids)[1]
return groupxs
|
def get_rank_histogram_bins(testres):
'easy to see histogram bins'
worst_possible_rank = testres.get_worst_possible_rank()
if (worst_possible_rank > 50):
bins = [0, 1, 5, 50, worst_possible_rank, (worst_possible_rank + 1)]
elif (worst_possible_rank > 5):
bins = [0, 1, 5, worst_possible_rank, (worst_possible_rank + 1)]
else:
bins = [0, 1, 5]
return bins
| 2,918,817,636,958,619,600
|
easy to see histogram bins
|
wbia/expt/test_result.py
|
get_rank_histogram_bins
|
WildMeOrg/wildbook-ia
|
python
|
def get_rank_histogram_bins(testres):
worst_possible_rank = testres.get_worst_possible_rank()
if (worst_possible_rank > 50):
bins = [0, 1, 5, 50, worst_possible_rank, (worst_possible_rank + 1)]
elif (worst_possible_rank > 5):
bins = [0, 1, 5, worst_possible_rank, (worst_possible_rank + 1)]
else:
bins = [0, 1, 5]
return bins
|
def get_X_LIST(testres):
'DEPRICATE or refactor'
X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1, 5])
return X_LIST
| -3,195,641,197,643,784,700
|
DEPRICATE or refactor
|
wbia/expt/test_result.py
|
get_X_LIST
|
WildMeOrg/wildbook-ia
|
python
|
def get_X_LIST(testres):
X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1, 5])
return X_LIST
|
def get_nLessX_dict(testres):
'\n Build a (histogram) dictionary mapping X (as in #ranks < X) to a list\n of cfg scores\n '
X_LIST = testres.get_X_LIST()
nLessX_dict = {int(X): np.zeros(testres.nConfig) for X in X_LIST}
cfgx2_qx2_gt_rank = testres.get_infoprop_list('qx2_gt_rank')
for X in X_LIST:
cfgx2_lessX_mask = [np.logical_and((0 <= qx2_gt_ranks), (qx2_gt_ranks < X)) for qx2_gt_ranks in cfgx2_qx2_gt_rank]
cfgx2_nLessX = np.array([lessX_.sum(axis=0) for lessX_ in cfgx2_lessX_mask])
nLessX_dict[int(X)] = cfgx2_nLessX
return nLessX_dict
| 6,837,458,029,855,711,000
|
Build a (histogram) dictionary mapping X (as in #ranks < X) to a list
of cfg scores
|
wbia/expt/test_result.py
|
get_nLessX_dict
|
WildMeOrg/wildbook-ia
|
python
|
def get_nLessX_dict(testres):
'\n Build a (histogram) dictionary mapping X (as in #ranks < X) to a list\n of cfg scores\n '
X_LIST = testres.get_X_LIST()
nLessX_dict = {int(X): np.zeros(testres.nConfig) for X in X_LIST}
cfgx2_qx2_gt_rank = testres.get_infoprop_list('qx2_gt_rank')
for X in X_LIST:
cfgx2_lessX_mask = [np.logical_and((0 <= qx2_gt_ranks), (qx2_gt_ranks < X)) for qx2_gt_ranks in cfgx2_qx2_gt_rank]
cfgx2_nLessX = np.array([lessX_.sum(axis=0) for lessX_ in cfgx2_lessX_mask])
nLessX_dict[int(X)] = cfgx2_nLessX
return nLessX_dict
|
def get_all_varied_params(testres):
"\n Returns the parameters that were varied between different\n configurations in this test\n\n Returns:\n list: varied_params\n\n CommandLine:\n python -m wbia TestResult.get_all_varied_params\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts(\n >>> 'PZ_MTEST', t='default:K=[1,2]')[1]\n >>> varied_params = sorted(testres.get_all_varied_params())\n >>> result = ('varied_params = %s' % (ut.repr2(varied_params),))\n >>> print(result)\n varied_params = ['K', '_cfgindex']\n "
varied_cfg_params = list(set(ut.flatten([cfgdict.keys() for cfgdict in testres.varied_cfg_list])))
varied_acfg_params = list(set(ut.flatten([acfg.keys() for acfg in testres.varied_acfg_list])))
varied_params = (varied_acfg_params + varied_cfg_params)
return varied_params
| -8,763,251,522,791,680,000
|
Returns the parameters that were varied between different
configurations in this test
Returns:
list: varied_params
CommandLine:
python -m wbia TestResult.get_all_varied_params
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> testres = wbia.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]')[1]
>>> varied_params = sorted(testres.get_all_varied_params())
>>> result = ('varied_params = %s' % (ut.repr2(varied_params),))
>>> print(result)
varied_params = ['K', '_cfgindex']
|
wbia/expt/test_result.py
|
get_all_varied_params
|
WildMeOrg/wildbook-ia
|
python
|
def get_all_varied_params(testres):
"\n Returns the parameters that were varied between different\n configurations in this test\n\n Returns:\n list: varied_params\n\n CommandLine:\n python -m wbia TestResult.get_all_varied_params\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts(\n >>> 'PZ_MTEST', t='default:K=[1,2]')[1]\n >>> varied_params = sorted(testres.get_all_varied_params())\n >>> result = ('varied_params = %s' % (ut.repr2(varied_params),))\n >>> print(result)\n varied_params = ['K', '_cfgindex']\n "
varied_cfg_params = list(set(ut.flatten([cfgdict.keys() for cfgdict in testres.varied_cfg_list])))
varied_acfg_params = list(set(ut.flatten([acfg.keys() for acfg in testres.varied_acfg_list])))
varied_params = (varied_acfg_params + varied_cfg_params)
return varied_params
|
def get_param_basis(testres, key):
"\n Returns what a param was varied between over all tests\n key = 'K'\n key = 'dcfg_sample_size'\n "
if (key == 'len(daids)'):
basis = sorted(list(set([len(daids) for daids in testres.cfgx2_daids])))
elif any([(key in cfgdict) for cfgdict in testres.varied_cfg_list]):
basis = sorted(list(set([cfgdict[key] for cfgdict in testres.varied_cfg_list])))
elif any([(key in cfgdict) for cfgdict in testres.varied_acfg_list]):
basis = sorted(list(set([acfg[key] for acfg in testres.varied_acfg_list])))
elif (key in testres.common_cfgdict):
basis = [testres.common_cfgdict[key]]
elif (key in testres.nonvaried_acfg):
basis = [testres.nonvaried_acfg[key]]
else:
assert False, ('param=%r doesnt exist' % (key,))
return basis
| -1,060,106,364,104,284,900
|
Returns what a param was varied between over all tests
key = 'K'
key = 'dcfg_sample_size'
|
wbia/expt/test_result.py
|
get_param_basis
|
WildMeOrg/wildbook-ia
|
python
|
def get_param_basis(testres, key):
"\n Returns what a param was varied between over all tests\n key = 'K'\n key = 'dcfg_sample_size'\n "
if (key == 'len(daids)'):
basis = sorted(list(set([len(daids) for daids in testres.cfgx2_daids])))
elif any([(key in cfgdict) for cfgdict in testres.varied_cfg_list]):
basis = sorted(list(set([cfgdict[key] for cfgdict in testres.varied_cfg_list])))
elif any([(key in cfgdict) for cfgdict in testres.varied_acfg_list]):
basis = sorted(list(set([acfg[key] for acfg in testres.varied_acfg_list])))
elif (key in testres.common_cfgdict):
basis = [testres.common_cfgdict[key]]
elif (key in testres.nonvaried_acfg):
basis = [testres.nonvaried_acfg[key]]
else:
assert False, ('param=%r doesnt exist' % (key,))
return basis
|
def get_cfgx_with_param(testres, key, val):
'\n Gets configs where the given parameter is held constant\n '
if (key == 'len(daids)'):
cfgx_list = [cfgx for (cfgx, daids) in enumerate(testres.cfgx2_daids) if (len(daids) == val)]
elif any([(key in cfgdict) for cfgdict in testres.varied_cfg_list]):
cfgx_list = [cfgx for (cfgx, cfgdict) in enumerate(testres.varied_cfg_list) if (cfgdict[key] == val)]
elif any([(key in cfgdict) for cfgdict in testres.varied_acfg_list]):
cfgx_list = [cfgx for (cfgx, acfg) in enumerate(testres.varied_acfg_list) if (acfg[key] == val)]
elif (key in testres.common_cfgdict):
cfgx_list = list(range(testres.nConfig))
elif (key in testres.nonvaried_acfg):
cfgx_list = list(range(testres.nConfig))
else:
assert False, ('param=%r doesnt exist' % (key,))
return cfgx_list
| -2,903,806,636,783,050,000
|
Gets configs where the given parameter is held constant
|
wbia/expt/test_result.py
|
get_cfgx_with_param
|
WildMeOrg/wildbook-ia
|
python
|
def get_cfgx_with_param(testres, key, val):
'\n \n '
if (key == 'len(daids)'):
cfgx_list = [cfgx for (cfgx, daids) in enumerate(testres.cfgx2_daids) if (len(daids) == val)]
elif any([(key in cfgdict) for cfgdict in testres.varied_cfg_list]):
cfgx_list = [cfgx for (cfgx, cfgdict) in enumerate(testres.varied_cfg_list) if (cfgdict[key] == val)]
elif any([(key in cfgdict) for cfgdict in testres.varied_acfg_list]):
cfgx_list = [cfgx for (cfgx, acfg) in enumerate(testres.varied_acfg_list) if (acfg[key] == val)]
elif (key in testres.common_cfgdict):
cfgx_list = list(range(testres.nConfig))
elif (key in testres.nonvaried_acfg):
cfgx_list = list(range(testres.nConfig))
else:
assert False, ('param=%r doesnt exist' % (key,))
return cfgx_list
|
def get_annotcfg_args(testres):
'\n CommandLine:\n # TODO: More robust fix\n # To reproduce the error\n wbia -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show\n '
if ('_cfgstr' in testres.common_acfg['common']):
annotcfg_args = [testres.common_acfg['common']['_cfgstr']]
else:
try:
annotcfg_args = ut.unique_ordered([acfg['common']['_cfgstr'] for acfg in testres.varied_acfg_list])
except KeyError:
try:
annotcfg_args = ut.unique_ordered([acfg['_cfgstr'] for acfg in testres.varied_acfg_list])
except KeyError:
annotcfg_args = ut.unique_ordered([acfg['qcfg__cfgstr'] for acfg in testres.varied_acfg_list])
return ' '.join(annotcfg_args)
| -8,628,802,053,686,688,000
|
CommandLine:
# TODO: More robust fix
# To reproduce the error
wbia -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show
|
wbia/expt/test_result.py
|
get_annotcfg_args
|
WildMeOrg/wildbook-ia
|
python
|
def get_annotcfg_args(testres):
'\n CommandLine:\n # TODO: More robust fix\n # To reproduce the error\n wbia -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show\n '
if ('_cfgstr' in testres.common_acfg['common']):
annotcfg_args = [testres.common_acfg['common']['_cfgstr']]
else:
try:
annotcfg_args = ut.unique_ordered([acfg['common']['_cfgstr'] for acfg in testres.varied_acfg_list])
except KeyError:
try:
annotcfg_args = ut.unique_ordered([acfg['_cfgstr'] for acfg in testres.varied_acfg_list])
except KeyError:
annotcfg_args = ut.unique_ordered([acfg['qcfg__cfgstr'] for acfg in testres.varied_acfg_list])
return ' '.join(annotcfg_args)
|
def get_full_cfgstr(testres, cfgx):
'both qannots and dannots included'
full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
return full_cfgstr
| 4,782,968,721,399,948,000
|
both qannots and dannots included
|
wbia/expt/test_result.py
|
get_full_cfgstr
|
WildMeOrg/wildbook-ia
|
python
|
def get_full_cfgstr(testres, cfgx):
full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
return full_cfgstr
|
@ut.memoize
def get_cfgstr(testres, cfgx):
'just dannots and config_str'
cfgstr = testres.cfgx2_qreq_[cfgx].get_cfgstr()
return cfgstr
| 5,608,703,665,681,617,000
|
just dannots and config_str
|
wbia/expt/test_result.py
|
get_cfgstr
|
WildMeOrg/wildbook-ia
|
python
|
@ut.memoize
def get_cfgstr(testres, cfgx):
cfgstr = testres.cfgx2_qreq_[cfgx].get_cfgstr()
return cfgstr
|
def _shorten_lbls(testres, lbl):
'\n hacky function\n '
import re
repl_list = [('candidacy_', ''), ('viewpoint_compare', 'viewpoint'), ('fg_on=True', 'FG=True'), ('fg_on=False,?', 'FG=False'), ('lnbnn_on=True', 'LNBNN'), ('lnbnn_on=False,?', ''), ('normonly_on=True', 'normonly'), ('normonly_on=False,?', ''), ('bar_l2_on=True', 'dist'), ('bar_l2_on=False,?', ''), ('joinme=\\d+,?', ''), ('dcrossval_enc', 'denc_per_name'), ('sv_on', 'SV'), ('rotation_invariance', 'RI'), ('affine_invariance', 'AI'), ('query_rotation_heuristic', 'QRH'), ('nNameShortlistSVER', 'nRR'), ('sample_per_ref_name', 'per_gt_name'), ('require_timestamp=True', 'require_timestamp'), ('require_timestamp=False,?', ''), ('require_timestamp=None,?', ''), ('[_A-Za-z]*=None,?', ''), ('dpername=None,?', ''), ("prescore_method='?csum'?,score_method='?csum'?,?", 'mech=annot'), ("prescore_method='?nsum'?,score_method='?nsum'?,?", 'mech=name'), ('force_const_size=[^,]+,?', ''), ('[dq]?_true_size=\\d+,?', ''), ('[dq]?_orig_size=[^,]+,?', ''), ((('[qd]?exclude_reference=' + ut.regex_or(['True', 'False', 'None'])) + '\\,?'), ''), ('=True', '=T'), ('=False', '=F'), (',$', '')]
for (ser, rep) in repl_list:
lbl = re.sub(ser, rep, lbl)
return lbl
| -4,939,464,198,397,053,000
|
hacky function
|
wbia/expt/test_result.py
|
_shorten_lbls
|
WildMeOrg/wildbook-ia
|
python
|
def _shorten_lbls(testres, lbl):
'\n \n '
import re
repl_list = [('candidacy_', ), ('viewpoint_compare', 'viewpoint'), ('fg_on=True', 'FG=True'), ('fg_on=False,?', 'FG=False'), ('lnbnn_on=True', 'LNBNN'), ('lnbnn_on=False,?', ), ('normonly_on=True', 'normonly'), ('normonly_on=False,?', ), ('bar_l2_on=True', 'dist'), ('bar_l2_on=False,?', ), ('joinme=\\d+,?', ), ('dcrossval_enc', 'denc_per_name'), ('sv_on', 'SV'), ('rotation_invariance', 'RI'), ('affine_invariance', 'AI'), ('query_rotation_heuristic', 'QRH'), ('nNameShortlistSVER', 'nRR'), ('sample_per_ref_name', 'per_gt_name'), ('require_timestamp=True', 'require_timestamp'), ('require_timestamp=False,?', ), ('require_timestamp=None,?', ), ('[_A-Za-z]*=None,?', ), ('dpername=None,?', ), ("prescore_method='?csum'?,score_method='?csum'?,?", 'mech=annot'), ("prescore_method='?nsum'?,score_method='?nsum'?,?", 'mech=name'), ('force_const_size=[^,]+,?', ), ('[dq]?_true_size=\\d+,?', ), ('[dq]?_orig_size=[^,]+,?', ), ((('[qd]?exclude_reference=' + ut.regex_or(['True', 'False', 'None'])) + '\\,?'), ), ('=True', '=T'), ('=False', '=F'), (',$', )]
for (ser, rep) in repl_list:
lbl = re.sub(ser, rep, lbl)
return lbl
|
def get_short_cfglbls(testres, join_acfgs=False):
"\n Labels for published tables\n\n cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']\n\n CommandLine:\n python -m wbia --tf TestResult.get_short_cfglbls\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],\n >>> t=['default:dim_size=[450,550]'])\n >>> cfg_lbls = testres.get_short_cfglbls()\n >>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))\n >>> print(result)\n cfg_lbls = [\n 'default:dim_size=450+ctrl',\n 'default:dim_size=550+ctrl',\n ]\n "
from wbia.expt import annotation_configs
if False:
acfg_names = [acfg['qcfg']['_cfgstr'] for acfg in testres.cfgx2_acfg]
pcfg_names = [pcfg['_cfgstr'] for pcfg in testres.cfgx2_pcfg]
acfg_hashes = np.array(list(map(hash, acfg_names)))
(unique_hashes, a_groupxs) = vt.group_indices(acfg_hashes)
a_label_groups = []
for groupx in a_groupxs:
acfg_list = ut.take(testres.cfgx2_acfg, groupx)
varied_lbls = annotation_configs.get_varied_acfg_labels(acfg_list, mainkey='_cfgstr')
a_label_groups.append(varied_lbls)
acfg_lbls = vt.invert_apply_grouping(a_label_groups, a_groupxs)
pcfg_hashes = np.array(list(map(hash, pcfg_names)))
(unique_hashes, p_groupxs) = vt.group_indices(pcfg_hashes)
p_label_groups = []
for groupx in p_groupxs:
pcfg_list = ut.take(testres.cfgx2_pcfg, groupx)
varied_lbls = ut.get_varied_cfg_lbls(pcfg_list, mainkey='_cfgstr')
p_label_groups.append(varied_lbls)
pcfg_lbls = vt.invert_apply_grouping(p_label_groups, p_groupxs)
cfg_lbls = [((albl + '+') + plbl) for (albl, plbl) in zip(acfg_lbls, pcfg_lbls)]
else:
cfg_lbls_ = testres.cfgx2_lbl[:]
cfg_lbls_ = [testres._shorten_lbls(lbl) for lbl in cfg_lbls_]
pa_tups = [lbl.split('+') for lbl in cfg_lbls_]
cfg_lbls = []
for pa in pa_tups:
new_parts = []
for part in pa:
_tup = part.split(ut.NAMEVARSEP)
(name, settings) = (_tup if (len(_tup) > 1) else (_tup[0], ''))
new_parts.append((part if settings else name))
if ((len(new_parts) == 2) and (new_parts[1] == 'default')):
newlbl = new_parts[0]
else:
newlbl = '+'.join(new_parts)
cfg_lbls.append(newlbl)
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
group_lbls = []
for group in ut.apply_grouping(cfg_lbls, groupxs):
num_parts = 0
part_dicts = []
for lbl in group:
parts = []
for (count, pa) in enumerate(lbl.split('+')):
num_parts = max(num_parts, (count + 1))
cfgdict = cfghelpers.parse_cfgstr_list2([pa], strict=False)[0][0]
parts.append(cfgdict)
part_dicts.append(parts)
group_lbl_parts = []
for px in range(num_parts):
cfgs = ut.take_column(part_dicts, px)
nonvaried_cfg = ut.partition_varied_cfg_list(cfgs)[0]
group_lbl_parts.append(ut.get_cfg_lbl(nonvaried_cfg))
group_lbl = '+'.join(group_lbl_parts)
group_lbls.append(group_lbl)
cfg_lbls = group_lbls
return cfg_lbls
| 2,210,508,630,745,849,600
|
Labels for published tables
cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']
CommandLine:
python -m wbia --tf TestResult.get_short_cfglbls
Example:
>>> # SLOW_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],
>>> t=['default:dim_size=[450,550]'])
>>> cfg_lbls = testres.get_short_cfglbls()
>>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))
>>> print(result)
cfg_lbls = [
'default:dim_size=450+ctrl',
'default:dim_size=550+ctrl',
]
|
wbia/expt/test_result.py
|
get_short_cfglbls
|
WildMeOrg/wildbook-ia
|
python
|
def get_short_cfglbls(testres, join_acfgs=False):
"\n Labels for published tables\n\n cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']\n\n CommandLine:\n python -m wbia --tf TestResult.get_short_cfglbls\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],\n >>> t=['default:dim_size=[450,550]'])\n >>> cfg_lbls = testres.get_short_cfglbls()\n >>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))\n >>> print(result)\n cfg_lbls = [\n 'default:dim_size=450+ctrl',\n 'default:dim_size=550+ctrl',\n ]\n "
from wbia.expt import annotation_configs
if False:
acfg_names = [acfg['qcfg']['_cfgstr'] for acfg in testres.cfgx2_acfg]
pcfg_names = [pcfg['_cfgstr'] for pcfg in testres.cfgx2_pcfg]
acfg_hashes = np.array(list(map(hash, acfg_names)))
(unique_hashes, a_groupxs) = vt.group_indices(acfg_hashes)
a_label_groups = []
for groupx in a_groupxs:
acfg_list = ut.take(testres.cfgx2_acfg, groupx)
varied_lbls = annotation_configs.get_varied_acfg_labels(acfg_list, mainkey='_cfgstr')
a_label_groups.append(varied_lbls)
acfg_lbls = vt.invert_apply_grouping(a_label_groups, a_groupxs)
pcfg_hashes = np.array(list(map(hash, pcfg_names)))
(unique_hashes, p_groupxs) = vt.group_indices(pcfg_hashes)
p_label_groups = []
for groupx in p_groupxs:
pcfg_list = ut.take(testres.cfgx2_pcfg, groupx)
varied_lbls = ut.get_varied_cfg_lbls(pcfg_list, mainkey='_cfgstr')
p_label_groups.append(varied_lbls)
pcfg_lbls = vt.invert_apply_grouping(p_label_groups, p_groupxs)
cfg_lbls = [((albl + '+') + plbl) for (albl, plbl) in zip(acfg_lbls, pcfg_lbls)]
else:
cfg_lbls_ = testres.cfgx2_lbl[:]
cfg_lbls_ = [testres._shorten_lbls(lbl) for lbl in cfg_lbls_]
pa_tups = [lbl.split('+') for lbl in cfg_lbls_]
cfg_lbls = []
for pa in pa_tups:
new_parts = []
for part in pa:
_tup = part.split(ut.NAMEVARSEP)
(name, settings) = (_tup if (len(_tup) > 1) else (_tup[0], ))
new_parts.append((part if settings else name))
if ((len(new_parts) == 2) and (new_parts[1] == 'default')):
newlbl = new_parts[0]
else:
newlbl = '+'.join(new_parts)
cfg_lbls.append(newlbl)
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
group_lbls = []
for group in ut.apply_grouping(cfg_lbls, groupxs):
num_parts = 0
part_dicts = []
for lbl in group:
parts = []
for (count, pa) in enumerate(lbl.split('+')):
num_parts = max(num_parts, (count + 1))
cfgdict = cfghelpers.parse_cfgstr_list2([pa], strict=False)[0][0]
parts.append(cfgdict)
part_dicts.append(parts)
group_lbl_parts = []
for px in range(num_parts):
cfgs = ut.take_column(part_dicts, px)
nonvaried_cfg = ut.partition_varied_cfg_list(cfgs)[0]
group_lbl_parts.append(ut.get_cfg_lbl(nonvaried_cfg))
group_lbl = '+'.join(group_lbl_parts)
group_lbls.append(group_lbl)
cfg_lbls = group_lbls
return cfg_lbls
|
def get_varied_labels(testres, shorten=False, join_acfgs=False, sep=''):
'\n Returns labels indicating only the parameters that have been varied between\n different annot/pipeline configurations.\n\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n python -m wbia TestResult.get_varied_labels\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(\n >>> \'PZ_MTEST\', t=\'default:K=[1,2]\',\n >>> #a=[\'timectrl:qsize=[1,2],dsize=[3,4]\']\n >>> a=[\n >>> \'default:qsize=[1,2],dsize=2,joinme=1,view=left\',\n >>> \'default:qsize=2,dsize=3,joinme=1,view=primary\',\n >>> \'default:qsize=[3,2],dsize=4,joinme=2,view=left\',\n >>> \'default:qsize=4,dsize=5,joinme=2,view=primary\',\n >>> ]\n >>> )\n >>> # >>> ibs, testres = wbia.testdata_expts(\n >>> # >>> \'WWF_Lynx_Copy\', t=\'default:K=1\',\n >>> # >>> a=[\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2\',\n >>> # >>> #\'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2\',\n >>> # >>> #\'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3\',\n >>> # >>> ]\n >>> # >>> )\n >>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)\n >>> result = (\'varied_lbls = %s\' % (ut.repr2(varied_lbls, strvals=True, nl=2),))\n >>> print(result)\n\n varied_lbls = [u\'K=1+qsize=1\', u\'K=2+qsize=1\', u\'K=1+qsize=2\', u\'K=2+qsize=2\']\n '
from wbia.expt import annotation_configs
varied_acfgs = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg, checkname=True)
varied_pcfgs = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg, checkname=True)
name_sep = ':'
cfg_sep = '+'
if join_acfgs:
new_varied_acfgs = []
groupxs = testres.get_cfgx_groupxs()
grouped_acfgs = ut.apply_grouping(varied_acfgs, groupxs)
grouped_pcfgs = ut.apply_grouping(varied_pcfgs, groupxs)
for group in grouped_acfgs:
group = [(p if (name_sep in p) else (name_sep + p)) for p in group]
cfgdicts_ = cfghelpers.parse_cfgstr_list2(group, strict=False)
cfgdicts = ut.take_column(cfgdicts_, 0)
new_acfgs = ut.partition_varied_cfg_list(cfgdicts)
new_acfg = new_acfgs[0]
if True:
internal_cfgs = new_acfgs[1]
import pandas as pd
intern_variations = pd.DataFrame.from_dict(internal_cfgs).to_dict(orient='list')
op_prefixes = {'sum': (np.sum, 'Σ-', ''), 'mean': (np.mean, 'µ-', ''), 'set': ((lambda x: '&'.join(set(map(str, x)))), '', 's')}
known_modes = {'dsize': 'mean', 'qsize': 'sum', 'view': 'set'}
for key in intern_variations.keys():
if key.startswith('_'):
continue
mode = known_modes.get(key, None)
vals = intern_variations[key]
if (mode is None):
mode = 'set'
if (key == 'crossval_idx'):
new_acfg['folds'] = len(intern_variations['crossval_idx'])
else:
(op, pref, suff) = op_prefixes[mode]
c = op(vals)
if isinstance(c, str):
new_acfg[((pref + key) + suff)] = c
else:
new_acfg[((pref + key) + suff)] = ut.repr2(c, precision=2)
new_varied_acfgs.append(new_acfg)
common_new_acfg = ut.partition_varied_cfg_list(new_varied_acfgs)[0]
for key in common_new_acfg.keys():
if (not key.startswith('_')):
for new_acfg in new_varied_acfgs:
del new_acfg[key]
varied_pcfgs = ut.take_column(grouped_pcfgs, 0)
varied_acfgs = [ut.get_cfg_lbl(new_acfg_, with_name=False, sep=sep) for new_acfg_ in new_varied_acfgs]
def combo_lbls(lbla, lblp):
parts = []
if ((lbla != name_sep) and lbla):
parts.append(lbla)
if ((lblp != name_sep) and lblp):
parts.append(lblp)
return (sep + cfg_sep).join(parts)
varied_lbls = [combo_lbls(lbla, lblp) for (lblp, lbla) in zip(varied_acfgs, varied_pcfgs)]
if shorten:
varied_lbls = [testres._shorten_lbls(lbl) for lbl in varied_lbls]
return varied_lbls
| 5,242,806,394,810,212,000
|
Returns labels indicating only the parameters that have been varied between
different annot/pipeline configurations.
Helper for consistent figure titles
CommandLine:
python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m wbia --tf TestResult.make_figtitle
python -m wbia TestResult.get_varied_labels
Example:
>>> # SLOW_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]',
>>> #a=['timectrl:qsize=[1,2],dsize=[3,4]']
>>> a=[
>>> 'default:qsize=[1,2],dsize=2,joinme=1,view=left',
>>> 'default:qsize=2,dsize=3,joinme=1,view=primary',
>>> 'default:qsize=[3,2],dsize=4,joinme=2,view=left',
>>> 'default:qsize=4,dsize=5,joinme=2,view=primary',
>>> ]
>>> )
>>> # >>> ibs, testres = wbia.testdata_expts(
>>> # >>> 'WWF_Lynx_Copy', t='default:K=1',
>>> # >>> a=[
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3',
>>> # >>> ]
>>> # >>> )
>>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)
>>> result = ('varied_lbls = %s' % (ut.repr2(varied_lbls, strvals=True, nl=2),))
>>> print(result)
varied_lbls = [u'K=1+qsize=1', u'K=2+qsize=1', u'K=1+qsize=2', u'K=2+qsize=2']
|
wbia/expt/test_result.py
|
get_varied_labels
|
WildMeOrg/wildbook-ia
|
python
|
def get_varied_labels(testres, shorten=False, join_acfgs=False, sep=):
'\n Returns labels indicating only the parameters that have been varied between\n different annot/pipeline configurations.\n\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n python -m wbia TestResult.get_varied_labels\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(\n >>> \'PZ_MTEST\', t=\'default:K=[1,2]\',\n >>> #a=[\'timectrl:qsize=[1,2],dsize=[3,4]\']\n >>> a=[\n >>> \'default:qsize=[1,2],dsize=2,joinme=1,view=left\',\n >>> \'default:qsize=2,dsize=3,joinme=1,view=primary\',\n >>> \'default:qsize=[3,2],dsize=4,joinme=2,view=left\',\n >>> \'default:qsize=4,dsize=5,joinme=2,view=primary\',\n >>> ]\n >>> )\n >>> # >>> ibs, testres = wbia.testdata_expts(\n >>> # >>> \'WWF_Lynx_Copy\', t=\'default:K=1\',\n >>> # >>> a=[\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2\',\n >>> # >>> #\'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1\',\n >>> # >>> \'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2\',\n >>> # >>> #\'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3\',\n >>> # >>> ]\n >>> # >>> )\n >>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)\n >>> result = (\'varied_lbls = %s\' % (ut.repr2(varied_lbls, strvals=True, nl=2),))\n >>> print(result)\n\n varied_lbls = [u\'K=1+qsize=1\', u\'K=2+qsize=1\', u\'K=1+qsize=2\', u\'K=2+qsize=2\']\n '
from wbia.expt import annotation_configs
varied_acfgs = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg, checkname=True)
varied_pcfgs = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg, checkname=True)
name_sep = ':'
cfg_sep = '+'
if join_acfgs:
new_varied_acfgs = []
groupxs = testres.get_cfgx_groupxs()
grouped_acfgs = ut.apply_grouping(varied_acfgs, groupxs)
grouped_pcfgs = ut.apply_grouping(varied_pcfgs, groupxs)
for group in grouped_acfgs:
group = [(p if (name_sep in p) else (name_sep + p)) for p in group]
cfgdicts_ = cfghelpers.parse_cfgstr_list2(group, strict=False)
cfgdicts = ut.take_column(cfgdicts_, 0)
new_acfgs = ut.partition_varied_cfg_list(cfgdicts)
new_acfg = new_acfgs[0]
if True:
internal_cfgs = new_acfgs[1]
import pandas as pd
intern_variations = pd.DataFrame.from_dict(internal_cfgs).to_dict(orient='list')
op_prefixes = {'sum': (np.sum, 'Σ-', ), 'mean': (np.mean, 'µ-', ), 'set': ((lambda x: '&'.join(set(map(str, x)))), , 's')}
known_modes = {'dsize': 'mean', 'qsize': 'sum', 'view': 'set'}
for key in intern_variations.keys():
if key.startswith('_'):
continue
mode = known_modes.get(key, None)
vals = intern_variations[key]
if (mode is None):
mode = 'set'
if (key == 'crossval_idx'):
new_acfg['folds'] = len(intern_variations['crossval_idx'])
else:
(op, pref, suff) = op_prefixes[mode]
c = op(vals)
if isinstance(c, str):
new_acfg[((pref + key) + suff)] = c
else:
new_acfg[((pref + key) + suff)] = ut.repr2(c, precision=2)
new_varied_acfgs.append(new_acfg)
common_new_acfg = ut.partition_varied_cfg_list(new_varied_acfgs)[0]
for key in common_new_acfg.keys():
if (not key.startswith('_')):
for new_acfg in new_varied_acfgs:
del new_acfg[key]
varied_pcfgs = ut.take_column(grouped_pcfgs, 0)
varied_acfgs = [ut.get_cfg_lbl(new_acfg_, with_name=False, sep=sep) for new_acfg_ in new_varied_acfgs]
def combo_lbls(lbla, lblp):
parts = []
if ((lbla != name_sep) and lbla):
parts.append(lbla)
if ((lblp != name_sep) and lblp):
parts.append(lblp)
return (sep + cfg_sep).join(parts)
varied_lbls = [combo_lbls(lbla, lblp) for (lblp, lbla) in zip(varied_acfgs, varied_pcfgs)]
if shorten:
varied_lbls = [testres._shorten_lbls(lbl) for lbl in varied_lbls]
return varied_lbls
|
def get_sorted_config_labels(testres):
'\n helper\n '
key = 'qx2_gt_rank'
(cfgx2_cumhist_percent, edges) = testres.get_rank_percentage_cumhist(bins='dense', key=key)
label_list = testres.get_short_cfglbls()
label_list = [((('%6.2f%%' % (percent,)) + ' - ') + label) for (percent, label) in zip(cfgx2_cumhist_percent.T[0], label_list)]
sortx = cfgx2_cumhist_percent.T[0].argsort()[::(- 1)]
label_list = ut.take(label_list, sortx)
return label_list
| 7,623,049,211,645,293,000
|
helper
|
wbia/expt/test_result.py
|
get_sorted_config_labels
|
WildMeOrg/wildbook-ia
|
python
|
def get_sorted_config_labels(testres):
'\n \n '
key = 'qx2_gt_rank'
(cfgx2_cumhist_percent, edges) = testres.get_rank_percentage_cumhist(bins='dense', key=key)
label_list = testres.get_short_cfglbls()
label_list = [((('%6.2f%%' % (percent,)) + ' - ') + label) for (percent, label) in zip(cfgx2_cumhist_percent.T[0], label_list)]
sortx = cfgx2_cumhist_percent.T[0].argsort()[::(- 1)]
label_list = ut.take(label_list, sortx)
return label_list
|
def make_figtitle(testres, plotname='', filt_cfg=None):
'\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(\'PZ_MTEST\')\n >>> plotname = \'\'\n >>> figtitle = testres.make_figtitle(plotname)\n >>> result = (\'figtitle = %r\' % (figtitle,))\n >>> print(result)\n '
figtitle_prefix = ut.get_argval('--prefix', type_=str, default='')
if (figtitle_prefix != ''):
figtitle_prefix = (figtitle_prefix.rstrip() + ' ')
figtitle = (figtitle_prefix + plotname)
hasprefix = (figtitle_prefix == '')
if hasprefix:
figtitle += '\n'
title_aug = testres.get_title_aug(friendly=True, with_cfg=hasprefix)
figtitle += (' ' + title_aug)
if (filt_cfg is not None):
filt_cfgstr = ut.get_cfg_lbl(filt_cfg)
if (filt_cfgstr.strip() != ':'):
figtitle += (' ' + filt_cfgstr)
return figtitle
| -4,311,391,420,868,987,400
|
Helper for consistent figure titles
CommandLine:
python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m wbia --tf TestResult.make_figtitle
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST')
>>> plotname = ''
>>> figtitle = testres.make_figtitle(plotname)
>>> result = ('figtitle = %r' % (figtitle,))
>>> print(result)
|
wbia/expt/test_result.py
|
make_figtitle
|
WildMeOrg/wildbook-ia
|
python
|
def make_figtitle(testres, plotname=, filt_cfg=None):
'\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(\'PZ_MTEST\')\n >>> plotname = \'\'\n >>> figtitle = testres.make_figtitle(plotname)\n >>> result = (\'figtitle = %r\' % (figtitle,))\n >>> print(result)\n '
figtitle_prefix = ut.get_argval('--prefix', type_=str, default=)
if (figtitle_prefix != ):
figtitle_prefix = (figtitle_prefix.rstrip() + ' ')
figtitle = (figtitle_prefix + plotname)
hasprefix = (figtitle_prefix == )
if hasprefix:
figtitle += '\n'
title_aug = testres.get_title_aug(friendly=True, with_cfg=hasprefix)
figtitle += (' ' + title_aug)
if (filt_cfg is not None):
filt_cfgstr = ut.get_cfg_lbl(filt_cfg)
if (filt_cfgstr.strip() != ':'):
figtitle += (' ' + filt_cfgstr)
return figtitle
|
def get_title_aug(testres, with_size=True, with_db=True, with_cfg=True, friendly=False):
"\n Args:\n with_size (bool): (default = True)\n\n Returns:\n str: title_aug\n\n CommandLine:\n python -m wbia --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST')\n >>> with_size = True\n >>> title_aug = testres.get_title_aug(with_size)\n >>> res = u'title_aug = %s' % (title_aug,)\n >>> print(res)\n "
ibs = testres.ibs
title_aug = ''
if with_db:
title_aug += ('db=' + ibs.get_dbname())
if with_cfg:
try:
if ('_cfgname' in testres.common_acfg['common']):
try:
annot_cfgname = testres.common_acfg['common']['_cfgstr']
except KeyError:
annot_cfgname = testres.common_acfg['common']['_cfgname']
else:
cfgname_list = [cfg['dcfg__cfgname'] for cfg in testres.varied_acfg_list]
cfgname_list = ut.unique_ordered(cfgname_list)
annot_cfgname = (('[' + ','.join(cfgname_list)) + ']')
try:
pipeline_cfgname = testres.common_cfgdict['_cfgstr']
except KeyError:
cfgstr_list = [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]
uniuqe_cfgstrs = ut.unique_ordered(cfgstr_list)
pipeline_cfgname = (('[' + ','.join(uniuqe_cfgstrs)) + ']')
annot_cfgname = testres._shorten_lbls(annot_cfgname)
pipeline_cfgname = testres._shorten_lbls(pipeline_cfgname)
if (len(annot_cfgname) < 64):
title_aug += (' a=' + annot_cfgname)
if (len(pipeline_cfgname) < 64):
title_aug += (' t=' + pipeline_cfgname)
except Exception as ex:
logger.info(ut.repr2(testres.common_acfg))
logger.info(ut.repr2(testres.common_cfgdict))
ut.printex(ex)
raise
if with_size:
if ut.get_argflag('--hack_size_nl'):
title_aug += '\n'
if testres.has_constant_qaids():
title_aug += (' #qaids=%r' % (len(testres.qaids),))
elif testres.has_constant_length_qaids():
title_aug += (' #qaids=%r*' % (len(testres.cfgx2_qaids[0]),))
if testres.has_constant_daids():
daids = testres.cfgx2_daids[0]
title_aug += (' #daids=%r' % (len(testres.cfgx2_daids[0]),))
if testres.has_constant_qaids():
all_daid_per_name_stats = ut.get_stats(ibs.get_num_annots_per_name(daids)[0], use_nan=True)
if (all_daid_per_name_stats['std'] == 0):
title_aug += (' dper_name=%s' % (ut.scalar_str(all_daid_per_name_stats['mean'], max_precision=2),))
else:
title_aug += (' dper_name=%s±%s' % (ut.scalar_str(all_daid_per_name_stats['mean'], precision=2), ut.scalar_str(all_daid_per_name_stats['std'], precision=2)))
elif testres.has_constant_length_daids():
daids = testres.cfgx2_daids[0]
title_aug += (' #daids=%r*' % (len(testres.cfgx2_daids[0]),))
if friendly:
title_aug = ut.multi_replace(title_aug, list(ibs.const.DBNAME_ALIAS.keys()), list(ibs.const.DBNAME_ALIAS.values()))
return title_aug
| 8,572,646,108,948,270,000
|
Args:
with_size (bool): (default = True)
Returns:
str: title_aug
CommandLine:
python -m wbia --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST')
>>> with_size = True
>>> title_aug = testres.get_title_aug(with_size)
>>> res = u'title_aug = %s' % (title_aug,)
>>> print(res)
|
wbia/expt/test_result.py
|
get_title_aug
|
WildMeOrg/wildbook-ia
|
python
|
def get_title_aug(testres, with_size=True, with_db=True, with_cfg=True, friendly=False):
"\n Args:\n with_size (bool): (default = True)\n\n Returns:\n str: title_aug\n\n CommandLine:\n python -m wbia --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST')\n >>> with_size = True\n >>> title_aug = testres.get_title_aug(with_size)\n >>> res = u'title_aug = %s' % (title_aug,)\n >>> print(res)\n "
ibs = testres.ibs
title_aug =
if with_db:
title_aug += ('db=' + ibs.get_dbname())
if with_cfg:
try:
if ('_cfgname' in testres.common_acfg['common']):
try:
annot_cfgname = testres.common_acfg['common']['_cfgstr']
except KeyError:
annot_cfgname = testres.common_acfg['common']['_cfgname']
else:
cfgname_list = [cfg['dcfg__cfgname'] for cfg in testres.varied_acfg_list]
cfgname_list = ut.unique_ordered(cfgname_list)
annot_cfgname = (('[' + ','.join(cfgname_list)) + ']')
try:
pipeline_cfgname = testres.common_cfgdict['_cfgstr']
except KeyError:
cfgstr_list = [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]
uniuqe_cfgstrs = ut.unique_ordered(cfgstr_list)
pipeline_cfgname = (('[' + ','.join(uniuqe_cfgstrs)) + ']')
annot_cfgname = testres._shorten_lbls(annot_cfgname)
pipeline_cfgname = testres._shorten_lbls(pipeline_cfgname)
if (len(annot_cfgname) < 64):
title_aug += (' a=' + annot_cfgname)
if (len(pipeline_cfgname) < 64):
title_aug += (' t=' + pipeline_cfgname)
except Exception as ex:
logger.info(ut.repr2(testres.common_acfg))
logger.info(ut.repr2(testres.common_cfgdict))
ut.printex(ex)
raise
if with_size:
if ut.get_argflag('--hack_size_nl'):
title_aug += '\n'
if testres.has_constant_qaids():
title_aug += (' #qaids=%r' % (len(testres.qaids),))
elif testres.has_constant_length_qaids():
title_aug += (' #qaids=%r*' % (len(testres.cfgx2_qaids[0]),))
if testres.has_constant_daids():
daids = testres.cfgx2_daids[0]
title_aug += (' #daids=%r' % (len(testres.cfgx2_daids[0]),))
if testres.has_constant_qaids():
all_daid_per_name_stats = ut.get_stats(ibs.get_num_annots_per_name(daids)[0], use_nan=True)
if (all_daid_per_name_stats['std'] == 0):
title_aug += (' dper_name=%s' % (ut.scalar_str(all_daid_per_name_stats['mean'], max_precision=2),))
else:
title_aug += (' dper_name=%s±%s' % (ut.scalar_str(all_daid_per_name_stats['mean'], precision=2), ut.scalar_str(all_daid_per_name_stats['std'], precision=2)))
elif testres.has_constant_length_daids():
daids = testres.cfgx2_daids[0]
title_aug += (' #daids=%r*' % (len(testres.cfgx2_daids[0]),))
if friendly:
title_aug = ut.multi_replace(title_aug, list(ibs.const.DBNAME_ALIAS.keys()), list(ibs.const.DBNAME_ALIAS.values()))
return title_aug
|
def print_pcfg_info(testres):
'\n Prints verbose information about each pipeline configuration\n\n >>> from wbia.expt.test_result import * # NOQA\n '
experiment_helpers.print_pipe_configs(testres.cfgx2_pcfg, testres.cfgx2_qreq_)
| -2,667,704,884,458,157,600
|
Prints verbose information about each pipeline configuration
>>> from wbia.expt.test_result import * # NOQA
|
wbia/expt/test_result.py
|
print_pcfg_info
|
WildMeOrg/wildbook-ia
|
python
|
def print_pcfg_info(testres):
'\n Prints verbose information about each pipeline configuration\n\n >>> from wbia.expt.test_result import * # NOQA\n '
experiment_helpers.print_pipe_configs(testres.cfgx2_pcfg, testres.cfgx2_qreq_)
|
def print_acfg_info(testres, **kwargs):
"\n Prints verbose information about the annotations used in each test\n configuration\n\n CommandLine:\n python -m wbia --tf TestResult.print_acfg_info\n\n Kwargs:\n see ibs.get_annot_stats_dict\n hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,\n min_name_hourdist\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST',\n >>> a=['ctrl::unctrl_comp'],\n >>> t=['candk:K=[1,2]'])\n >>> ibs = None\n >>> result = testres.print_acfg_info()\n >>> print(result)\n "
from wbia.expt import annotation_configs
ibs = testres.ibs
cfgx2_acfg_label = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg)
flags = ut.flag_unique_items(cfgx2_acfg_label)
qreq_list = ut.compress(testres.cfgx2_qreq_, flags)
acfg_list = ut.compress(testres.cfgx2_acfg, flags)
expanded_aids_list = [(qreq_.qaids, qreq_.daids) for qreq_ in qreq_list]
annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs, **kwargs)
| 8,689,599,207,488,858,000
|
Prints verbose information about the annotations used in each test
configuration
CommandLine:
python -m wbia --tf TestResult.print_acfg_info
Kwargs:
see ibs.get_annot_stats_dict
hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,
min_name_hourdist
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST',
>>> a=['ctrl::unctrl_comp'],
>>> t=['candk:K=[1,2]'])
>>> ibs = None
>>> result = testres.print_acfg_info()
>>> print(result)
|
wbia/expt/test_result.py
|
print_acfg_info
|
WildMeOrg/wildbook-ia
|
python
|
def print_acfg_info(testres, **kwargs):
"\n Prints verbose information about the annotations used in each test\n configuration\n\n CommandLine:\n python -m wbia --tf TestResult.print_acfg_info\n\n Kwargs:\n see ibs.get_annot_stats_dict\n hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,\n min_name_hourdist\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST',\n >>> a=['ctrl::unctrl_comp'],\n >>> t=['candk:K=[1,2]'])\n >>> ibs = None\n >>> result = testres.print_acfg_info()\n >>> print(result)\n "
from wbia.expt import annotation_configs
ibs = testres.ibs
cfgx2_acfg_label = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg)
flags = ut.flag_unique_items(cfgx2_acfg_label)
qreq_list = ut.compress(testres.cfgx2_qreq_, flags)
acfg_list = ut.compress(testres.cfgx2_acfg, flags)
expanded_aids_list = [(qreq_.qaids, qreq_.daids) for qreq_ in qreq_list]
annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs, **kwargs)
|
def print_unique_annot_config_stats(testres, ibs=None):
"\n Args:\n ibs (IBEISController): wbia controller object(default = None)\n\n CommandLine:\n python -m wbia TestResult.print_unique_annot_config_stats\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])\n >>> ibs = None\n >>> result = testres.print_unique_annot_config_stats(ibs)\n >>> print(result)\n "
if (ibs is None):
ibs = testres.ibs
cfx2_dannot_hashid = [ibs.get_annot_hashid_visual_uuid(daids) for daids in testres.cfgx2_daids]
unique_daids = ut.compress(testres.cfgx2_daids, ut.flag_unique_items(cfx2_dannot_hashid))
with ut.Indenter('[acfgstats]'):
logger.info('+====')
logger.info(('Printing %d unique annotconfig stats' % len(unique_daids)))
common_acfg = testres.common_acfg
common_acfg['common'] = ut.dict_filter_nones(common_acfg['common'])
logger.info(('testres.common_acfg = ' + ut.repr2(common_acfg)))
logger.info(('param_basis(len(daids)) = %r' % (testres.get_param_basis('len(daids)'),)))
for (count, daids) in enumerate(unique_daids):
logger.info('+---')
logger.info(('acfgx = %r/%r' % (count, len(unique_daids))))
if testres.has_constant_qaids():
ibs.print_annotconfig_stats(testres.qaids, daids)
else:
ibs.print_annot_stats(daids, prefix='d')
logger.info('L___')
| -6,608,052,288,272,814,000
|
Args:
ibs (IBEISController): wbia controller object(default = None)
CommandLine:
python -m wbia TestResult.print_unique_annot_config_stats
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])
>>> ibs = None
>>> result = testres.print_unique_annot_config_stats(ibs)
>>> print(result)
|
wbia/expt/test_result.py
|
print_unique_annot_config_stats
|
WildMeOrg/wildbook-ia
|
python
|
def print_unique_annot_config_stats(testres, ibs=None):
"\n Args:\n ibs (IBEISController): wbia controller object(default = None)\n\n CommandLine:\n python -m wbia TestResult.print_unique_annot_config_stats\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])\n >>> ibs = None\n >>> result = testres.print_unique_annot_config_stats(ibs)\n >>> print(result)\n "
if (ibs is None):
ibs = testres.ibs
cfx2_dannot_hashid = [ibs.get_annot_hashid_visual_uuid(daids) for daids in testres.cfgx2_daids]
unique_daids = ut.compress(testres.cfgx2_daids, ut.flag_unique_items(cfx2_dannot_hashid))
with ut.Indenter('[acfgstats]'):
logger.info('+====')
logger.info(('Printing %d unique annotconfig stats' % len(unique_daids)))
common_acfg = testres.common_acfg
common_acfg['common'] = ut.dict_filter_nones(common_acfg['common'])
logger.info(('testres.common_acfg = ' + ut.repr2(common_acfg)))
logger.info(('param_basis(len(daids)) = %r' % (testres.get_param_basis('len(daids)'),)))
for (count, daids) in enumerate(unique_daids):
logger.info('+---')
logger.info(('acfgx = %r/%r' % (count, len(unique_daids))))
if testres.has_constant_qaids():
ibs.print_annotconfig_stats(testres.qaids, daids)
else:
ibs.print_annot_stats(daids, prefix='d')
logger.info('L___')
|
def print_results(testres, **kwargs):
"\n CommandLine:\n python -m wbia --tf TestResult.print_results\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST')\n >>> result = testres.print_results()\n >>> print(result)\n "
from wbia.expt import experiment_printres
ibs = testres.ibs
experiment_printres.print_results(ibs, testres, **kwargs)
| -4,263,183,096,241,799,000
|
CommandLine:
python -m wbia --tf TestResult.print_results
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST')
>>> result = testres.print_results()
>>> print(result)
|
wbia/expt/test_result.py
|
print_results
|
WildMeOrg/wildbook-ia
|
python
|
def print_results(testres, **kwargs):
"\n CommandLine:\n python -m wbia --tf TestResult.print_results\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST')\n >>> result = testres.print_results()\n >>> print(result)\n "
from wbia.expt import experiment_printres
ibs = testres.ibs
experiment_printres.print_results(ibs, testres, **kwargs)
|
def get_all_tags(testres):
"\n CommandLine:\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> all_tags = testres.get_all_tags()\n >>> selected_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))\n >>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))\n >>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))\n >>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))\n >>> ut.show_if_requested()\n "
gt_tags = testres.get_gt_tags()
gf_tags = testres.get_gf_tags()
all_tags = [ut.list_zipflatten(*item) for item in zip(gf_tags, gt_tags)]
return all_tags
| 7,433,998,893,923,260,000
|
CommandLine:
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h
python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> all_tags = testres.get_all_tags()
>>> selected_tags = ut.take(all_tags, case_pos_list.T[0])
>>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))
>>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))
>>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))
>>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))
>>> ut.show_if_requested()
|
wbia/expt/test_result.py
|
get_all_tags
|
WildMeOrg/wildbook-ia
|
python
|
def get_all_tags(testres):
"\n CommandLine:\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> all_tags = testres.get_all_tags()\n >>> selected_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))\n >>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))\n >>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))\n >>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))\n >>> ut.show_if_requested()\n "
gt_tags = testres.get_gt_tags()
gf_tags = testres.get_gf_tags()
all_tags = [ut.list_zipflatten(*item) for item in zip(gf_tags, gt_tags)]
return all_tags
|
def get_gf_tags(testres):
"\n Returns:\n list: case_pos_list\n\n CommandLine:\n python -m wbia --tf TestResult.get_gf_tags --db PZ_Master1 --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> gf_tags = testres.get_gf_tags()\n "
ibs = testres.ibs
(truth2_prop, prop2_mat) = testres.get_truth2_prop()
gf_annotmatch_rowids = truth2_prop['gf']['annotmatch_rowid']
gf_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gf_annotmatch_rowids)
return gf_tags
| -6,750,314,083,588,924,000
|
Returns:
list: case_pos_list
CommandLine:
python -m wbia --tf TestResult.get_gf_tags --db PZ_Master1 --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> gf_tags = testres.get_gf_tags()
|
wbia/expt/test_result.py
|
get_gf_tags
|
WildMeOrg/wildbook-ia
|
python
|
def get_gf_tags(testres):
"\n Returns:\n list: case_pos_list\n\n CommandLine:\n python -m wbia --tf TestResult.get_gf_tags --db PZ_Master1 --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> gf_tags = testres.get_gf_tags()\n "
ibs = testres.ibs
(truth2_prop, prop2_mat) = testres.get_truth2_prop()
gf_annotmatch_rowids = truth2_prop['gf']['annotmatch_rowid']
gf_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gf_annotmatch_rowids)
return gf_tags
|
def case_sample2(testres, filt_cfg, qaids=None, return_mask=False, verbose=None):
"\n Filters individual test result cases based on how they performed, what\n tags they had, and various other things.\n\n Args:\n filt_cfg (dict):\n\n Returns:\n list: case_pos_list (list of (qx, cfgx)) or isvalid mask\n\n CommandLine:\n python -m wbia TestResult.case_sample2\n python -m wbia TestResult.case_sample2:0\n python -m wbia TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1\n python -m wbia TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1\n\n python -m wbia TestResult.case_sample2:2 --db PZ_Master1\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # The same results is achievable with different filter config settings\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> verbose = True\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg1 = {'fail': True}\n >>> case_pos_list1 = testres.case_sample2(filt_cfg1)\n >>> filt_cfg2 = {'min_gtrank': 1}\n >>> case_pos_list2 = testres.case_sample2(filt_cfg2)\n >>> filt_cfg3 = {'min_gtrank': 0}\n >>> case_pos_list3 = testres.case_sample2(filt_cfg3)\n >>> filt_cfg4 = {}\n >>> case_pos_list4 = testres.case_sample2(filt_cfg4)\n >>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'\n >>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'\n >>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])\n >>> filt_cfg5 = filt_cfg1.copy()\n >>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)\n >>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)\n >>> assert len(mask5.shape) == 2\n >>> assert np.all(mask5.T[0] == mask5.T[1])\n >>> filt_cfg6 = {'fail': True, 'allcfg': True}\n >>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)\n >>> assert np.all(mask6.T[0] == mask6.T[1])\n >>> print(mask5)\n >>> print(case_pos_list5)\n >>> filt_cfg = filt_cfg7 = {'disagree': True}\n >>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)\n >>> print(case_pos_list7)\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])\n >>> ut.exec_funckw(testres.case_sample2, globals())\n >>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}\n >>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])\n >>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'\n >>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]\n >>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n\n logger.info('qaid = %r' % (qaid,))\n logger.info('qx = %r' % (qx,))\n logger.info('cfgxs = %r' % (cfgxs,))\n # print testres info about this item\n take_cfgs = ut.partial(ut.take, index_list=cfgxs)\n take_qx = ut.partial(ut.take, index_list=qx)\n truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)\n truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)\n prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)\n prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)\n logger.info('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))\n logger.info('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))\n "
from wbia.expt import cfghelpers
if (verbose is None):
verbose = ut.NOT_QUIET
if verbose:
logger.info('[testres] case_sample2')
if isinstance(filt_cfg, str):
filt_cfg = [filt_cfg]
if isinstance(filt_cfg, list):
_combos = cfghelpers.parse_cfgstr_list2(filt_cfg, strict=False)
filt_cfg = ut.flatten(_combos)[0]
if isinstance(filt_cfg, str):
_combos = cfghelpers.parse_cfgstr_list2([filt_cfg], strict=False)
filt_cfg = ut.flatten(_combos)[0]
if (filt_cfg is None):
filt_cfg = {}
qaids = (testres.get_test_qaids() if (qaids is None) else qaids)
(truth2_prop, prop2_mat) = testres.get_truth2_prop(qaids)
ibs = testres.ibs
participates = prop2_mat['participates']
is_valid = participates.copy()
def unflat_tag_filterflags(tags_list, **kwargs):
from wbia import tag_funcs
(flat_tags, cumsum) = ut.invertible_flatten2(tags_list)
flat_flags = tag_funcs.filterflags_general_tags(flat_tags, **kwargs)
flags = np.array(ut.unflatten2(flat_flags, cumsum))
return flags
UTFF = unflat_tag_filterflags
def cols_disagree(mat, val):
"\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
sums = mat.sum(axis=1)
disagree_flags1d = np.logical_and((sums > 0), (sums < nCols))
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if (not val):
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
def cfg_scoresep(mat, val, op):
"\n Compares scores between different configs\n\n op = operator.ge\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list]
offsets = np.arange(0, (nCols * len(mat)), step=nCols)
idx_list = ut.flatten([(colx + offset) for (colx, offset) in zip(colx_list, offsets)])
mask = vt.index_to_boolmask(idx_list, maxval=(offsets[(- 1)] + nCols))
flags = mask.reshape(mat.shape)
return flags
rule_list = [('disagree', (lambda val: cols_disagree(prop2_mat['is_failure'], val))), ('min_gt_cfg_scoresep', (lambda val: cfg_scoresep(truth2_prop['gt']['score'], val, operator.ge))), ('fail', prop2_mat['is_failure']), ('success', prop2_mat['is_success']), ('min_gtrank', partial(operator.ge, truth2_prop['gt']['rank'])), ('max_gtrank', partial(operator.le, truth2_prop['gt']['rank'])), ('max_gtscore', partial(operator.le, truth2_prop['gt']['score'])), ('min_gtscore', partial(operator.ge, truth2_prop['gt']['score'])), ('min_gf_timedelta', partial(operator.ge, truth2_prop['gf']['timedelta'])), ('max_gf_timedelta', partial(operator.le, truth2_prop['gf']['timedelta'])), ('min_tags', (lambda val: UTFF(testres.get_all_tags(), min_num=val))), ('max_tags', (lambda val: UTFF(testres.get_all_tags(), max_num=val))), ('min_gf_tags', (lambda val: UTFF(testres.get_gf_tags(), min_num=val))), ('max_gf_tags', (lambda val: UTFF(testres.get_gf_tags(), max_num=val))), ('min_gt_tags', (lambda val: UTFF(testres.get_gt_tags(), min_num=val))), ('max_gt_tags', (lambda val: UTFF(testres.get_gt_tags(), max_num=val))), ('min_query_annot_tags', (lambda val: UTFF(testres.get_query_annot_tags(), min_num=val))), ('min_gt_annot_tags', (lambda val: UTFF(testres.get_gt_annot_tags(), min_num=val))), ('min_gtq_tags', (lambda val: UTFF(testres.get_gtquery_annot_tags(), min_num=val))), ('max_gtq_tags', (lambda val: UTFF(testres.get_gtquery_annot_tags(), max_num=val))), ('without_gf_tag', (lambda val: UTFF(testres.get_gf_tags(), has_none=val))), ('without_gt_tag', (lambda val: UTFF(testres.get_gt_tags(), has_none=val))), ('with_gf_tag', (lambda val: UTFF(testres.get_gf_tags(), has_any=val))), ('with_gt_tag', (lambda val: UTFF(testres.get_gt_tags(), has_any=val))), ('with_tag', (lambda val: UTFF(testres.get_all_tags(), has_any=val))), ('without_tag', (lambda val: UTFF(testres.get_all_tags(), has_none=val)))]
rule_dict = ut.odict(rule_list)
rule_list.append(('max_gf_td', rule_dict['max_gf_timedelta']))
rule_list.append(('min_gf_td', rule_dict['min_gf_timedelta']))
filt_cfg_ = copy.deepcopy(filt_cfg)
for tdkey in filt_cfg_.keys():
if tdkey.endswith('_timedelta'):
filt_cfg_[tdkey] = ut.ensure_timedelta(filt_cfg_[tdkey])
class VerbFilterInfo(object):
def __init__(self):
self.prev_num_valid = None
def print_pre(self, is_valid, filt_cfg_):
num_valid = is_valid.sum()
logger.info(('[testres] Sampling from is_valid.size=%r with filt=%r' % (is_valid.size, ut.get_cfg_lbl(filt_cfg_))))
logger.info((' * is_valid.shape = %r' % (is_valid.shape,)))
logger.info((' * num_valid = %r' % (num_valid,)))
self.prev_num_valid = num_valid
def print_post(self, is_valid, flags, msg):
if (flags is not None):
num_passed = flags.sum()
num_valid = is_valid.sum()
num_invalidated = (self.prev_num_valid - num_valid)
logger.info(msg)
if (num_invalidated == 0):
if (flags is not None):
logger.info((' * num_passed = %r' % (num_passed,)))
logger.info((' * num_invalided = %r' % (num_invalidated,)))
else:
logger.info((' * prev_num_valid = %r' % (self.prev_num_valid,)))
logger.info((' * num_valid = %r' % (num_valid,)))
self.prev_num_valid = num_valid
verbinfo = VerbFilterInfo()
if verbose:
verbinfo.print_pre(is_valid, filt_cfg_)
ut.delete_keys(filt_cfg_, ['_cfgstr', '_cfgindex', '_cfgname', '_cfgtype'])
valid_rules = []
def poprule(rulename, default):
valid_rules.append(rulename)
return filt_cfg_.pop(rulename, default)
allcfg = poprule('allcfg', None)
orderby = poprule('orderby', None)
reverse = poprule('reverse', None)
sortasc = poprule('sortasc', None)
sortdsc = poprule('sortdsc', poprule('sortdesc', None))
max_pername = poprule('max_pername', None)
require_all_cfg = poprule('require_all_cfg', None)
index = poprule('index', None)
rule_value_list = [poprule(key, None) for (key, rule) in rule_list]
if (len(filt_cfg_) > 0):
logger.info('ERROR')
logger.info(('filtcfg valid rules are = %s' % (ut.repr2(valid_rules, nl=1),)))
for key in filt_cfg_.keys():
logger.info(('did you mean %r instead of %r?' % (ut.closet_words(key, valid_rules)[0], key)))
raise NotImplementedError(('Unhandled filt_cfg.keys() = %r' % filt_cfg_.keys()))
chosen_rule_idxs = ut.where([(val is not None) for val in rule_value_list])
chosen_rules = ut.take(rule_list, chosen_rule_idxs)
chosen_vals = ut.take(rule_value_list, chosen_rule_idxs)
for ((key, rule), val) in zip(chosen_rules, chosen_vals):
if isinstance(rule, np.ndarray):
flags = (rule == val)
else:
flags = rule(val)
flags = np.logical_and(flags, participates)
is_valid = np.logical_and(is_valid, flags)
if verbose:
verbinfo.print_post(is_valid, flags, ('SampleRule: %s = %r' % (key, val)))
if allcfg:
is_valid = np.logical_or(np.logical_or.reduce(is_valid.T)[:, None], is_valid)
is_valid = np.logical_and(is_valid, participates)
(qx_list, cfgx_list) = np.nonzero(is_valid)
if (sortdsc is not None):
assert (orderby is None), 'use orderby or sortasc'
assert (reverse is None), 'reverse does not work with sortdsc'
orderby = sortdsc
reverse = True
elif (sortasc is not None):
assert (reverse is None), 'reverse does not work with sortasc'
assert (orderby is None), 'use orderby or sortasc'
orderby = sortasc
reverse = False
else:
reverse = False
if (orderby is not None):
import re
order_values = None
for prefix_pattern in ['^gt_?', '^gf_?']:
prefix_match = re.match(prefix_pattern, orderby)
if (prefix_match is not None):
truth = prefix_pattern[1:3]
propname = orderby[prefix_match.end():]
if verbose:
logger.info(('Ordering by truth=%s propname=%s' % (truth, propname)))
order_values = truth2_prop[truth][propname]
break
if (order_values is None):
raise NotImplementedError(('Unknown orerby=%r' % (orderby,)))
else:
order_values = np.arange(is_valid.size).reshape(is_valid.shape)
flat_order = order_values[is_valid]
if verbose:
if verbose:
logger.info('Reversing ordering (descending)')
else:
logger.info('Normal ordering (ascending)')
if reverse:
sortx = flat_order.argsort()[::(- 1)]
else:
sortx = flat_order.argsort()
qx_list = qx_list.take(sortx, axis=0)
cfgx_list = cfgx_list.take(sortx, axis=0)
if (max_pername is not None):
if verbose:
logger.info(('Returning at most %d cases per name ' % (max_pername,)))
_qaid_list = np.take(qaids, qx_list)
_qnid_list = ibs.get_annot_nids(_qaid_list)
_valid_idxs = []
seen_ = ut.ddict((lambda : 0))
for (idx, _qnid) in enumerate(_qnid_list):
if (seen_[_qnid] < max_pername):
seen_[_qnid] += 1
_valid_idxs.append(idx)
_qx_list = qx_list[_valid_idxs]
_cfgx_list = cfgx_list[_valid_idxs]
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if require_all_cfg:
if verbose:
prev_num_valid = is_valid.sum()
logger.info('Enforcing that all configs must pass filters')
logger.info((' * prev_num_valid = %r' % (prev_num_valid,)))
qx2_valid_cfgs = ut.group_items(cfgx_list, qx_list)
hasall_cfg = [(len(qx2_valid_cfgs[qx]) == testres.nConfig) for qx in qx_list]
_qx_list = qx_list.compress(hasall_cfg)
_cfgx_list = cfgx_list.compress(hasall_cfg)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(is_valid, None, 'Enforcing that all configs must pass filters')
if (index is not None):
if isinstance(index, str):
index = ut.smart_cast(index, slice)
_qx_list = ut.take(qx_list, index)
_cfgx_list = ut.take(cfgx_list, index)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(is_valid, None, ('Taking index=%r sample from len(qx_list) = %r' % (index, len(qx_list))))
if (not return_mask):
case_pos_list = np.vstack((qx_list, cfgx_list)).T
case_identifier = case_pos_list
else:
if verbose:
logger.info('Converting cases indicies to a 2d-mask')
case_identifier = is_valid
if verbose:
logger.info('Finished case filtering')
logger.info('Final case stats:')
qx_hist = ut.dict_hist(qx_list)
logger.info(('config per query stats: %r' % (ut.get_stats_str(qx_hist.values()),)))
logger.info(('query per config stats: %r' % (ut.get_stats_str(ut.dict_hist(cfgx_list).values()),)))
return case_identifier
| 8,126,729,369,772,073,000
|
Filters individual test result cases based on how they performed, what
tags they had, and various other things.
Args:
filt_cfg (dict):
Returns:
list: case_pos_list (list of (qx, cfgx)) or isvalid mask
CommandLine:
python -m wbia TestResult.case_sample2
python -m wbia TestResult.case_sample2:0
python -m wbia TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1
python -m wbia TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1
python -m wbia TestResult.case_sample2:2 --db PZ_Master1
Example:
>>> # DISABLE_DOCTEST
>>> # The same results is achievable with different filter config settings
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> verbose = True
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg1 = {'fail': True}
>>> case_pos_list1 = testres.case_sample2(filt_cfg1)
>>> filt_cfg2 = {'min_gtrank': 1}
>>> case_pos_list2 = testres.case_sample2(filt_cfg2)
>>> filt_cfg3 = {'min_gtrank': 0}
>>> case_pos_list3 = testres.case_sample2(filt_cfg3)
>>> filt_cfg4 = {}
>>> case_pos_list4 = testres.case_sample2(filt_cfg4)
>>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'
>>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'
>>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])
>>> filt_cfg5 = filt_cfg1.copy()
>>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)
>>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)
>>> assert len(mask5.shape) == 2
>>> assert np.all(mask5.T[0] == mask5.T[1])
>>> filt_cfg6 = {'fail': True, 'allcfg': True}
>>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)
>>> assert np.all(mask6.T[0] == mask6.T[1])
>>> print(mask5)
>>> print(case_pos_list5)
>>> filt_cfg = filt_cfg7 = {'disagree': True}
>>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)
>>> print(case_pos_list7)
Example:
>>> # SCRIPT
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])
>>> ut.exec_funckw(testres.case_sample2, globals())
>>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}
>>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])
>>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'
>>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]
>>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
logger.info('qaid = %r' % (qaid,))
logger.info('qx = %r' % (qx,))
logger.info('cfgxs = %r' % (cfgxs,))
# print testres info about this item
take_cfgs = ut.partial(ut.take, index_list=cfgxs)
take_qx = ut.partial(ut.take, index_list=qx)
truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)
truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)
prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)
prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)
logger.info('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))
logger.info('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))
|
wbia/expt/test_result.py
|
case_sample2
|
WildMeOrg/wildbook-ia
|
python
|
def case_sample2(testres, filt_cfg, qaids=None, return_mask=False, verbose=None):
"\n Filters individual test result cases based on how they performed, what\n tags they had, and various other things.\n\n Args:\n filt_cfg (dict):\n\n Returns:\n list: case_pos_list (list of (qx, cfgx)) or isvalid mask\n\n CommandLine:\n python -m wbia TestResult.case_sample2\n python -m wbia TestResult.case_sample2:0\n python -m wbia TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1\n python -m wbia TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1\n\n python -m wbia TestResult.case_sample2:2 --db PZ_Master1\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # The same results is achievable with different filter config settings\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> verbose = True\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg1 = {'fail': True}\n >>> case_pos_list1 = testres.case_sample2(filt_cfg1)\n >>> filt_cfg2 = {'min_gtrank': 1}\n >>> case_pos_list2 = testres.case_sample2(filt_cfg2)\n >>> filt_cfg3 = {'min_gtrank': 0}\n >>> case_pos_list3 = testres.case_sample2(filt_cfg3)\n >>> filt_cfg4 = {}\n >>> case_pos_list4 = testres.case_sample2(filt_cfg4)\n >>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'\n >>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'\n >>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])\n >>> filt_cfg5 = filt_cfg1.copy()\n >>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)\n >>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)\n >>> assert len(mask5.shape) == 2\n >>> assert np.all(mask5.T[0] == mask5.T[1])\n >>> filt_cfg6 = {'fail': True, 'allcfg': True}\n >>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)\n >>> assert np.all(mask6.T[0] == mask6.T[1])\n >>> print(mask5)\n >>> print(case_pos_list5)\n >>> filt_cfg = filt_cfg7 = {'disagree': True}\n >>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)\n >>> print(case_pos_list7)\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])\n >>> ut.exec_funckw(testres.case_sample2, globals())\n >>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}\n >>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])\n >>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'\n >>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]\n >>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n\n logger.info('qaid = %r' % (qaid,))\n logger.info('qx = %r' % (qx,))\n logger.info('cfgxs = %r' % (cfgxs,))\n # print testres info about this item\n take_cfgs = ut.partial(ut.take, index_list=cfgxs)\n take_qx = ut.partial(ut.take, index_list=qx)\n truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)\n truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)\n prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)\n prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)\n logger.info('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))\n logger.info('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))\n "
from wbia.expt import cfghelpers
if (verbose is None):
verbose = ut.NOT_QUIET
if verbose:
logger.info('[testres] case_sample2')
if isinstance(filt_cfg, str):
filt_cfg = [filt_cfg]
if isinstance(filt_cfg, list):
_combos = cfghelpers.parse_cfgstr_list2(filt_cfg, strict=False)
filt_cfg = ut.flatten(_combos)[0]
if isinstance(filt_cfg, str):
_combos = cfghelpers.parse_cfgstr_list2([filt_cfg], strict=False)
filt_cfg = ut.flatten(_combos)[0]
if (filt_cfg is None):
filt_cfg = {}
qaids = (testres.get_test_qaids() if (qaids is None) else qaids)
(truth2_prop, prop2_mat) = testres.get_truth2_prop(qaids)
ibs = testres.ibs
participates = prop2_mat['participates']
is_valid = participates.copy()
def unflat_tag_filterflags(tags_list, **kwargs):
from wbia import tag_funcs
(flat_tags, cumsum) = ut.invertible_flatten2(tags_list)
flat_flags = tag_funcs.filterflags_general_tags(flat_tags, **kwargs)
flags = np.array(ut.unflatten2(flat_flags, cumsum))
return flags
UTFF = unflat_tag_filterflags
def cols_disagree(mat, val):
"\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
sums = mat.sum(axis=1)
disagree_flags1d = np.logical_and((sums > 0), (sums < nCols))
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if (not val):
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
def cfg_scoresep(mat, val, op):
"\n Compares scores between different configs\n\n op = operator.ge\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list]
offsets = np.arange(0, (nCols * len(mat)), step=nCols)
idx_list = ut.flatten([(colx + offset) for (colx, offset) in zip(colx_list, offsets)])
mask = vt.index_to_boolmask(idx_list, maxval=(offsets[(- 1)] + nCols))
flags = mask.reshape(mat.shape)
return flags
rule_list = [('disagree', (lambda val: cols_disagree(prop2_mat['is_failure'], val))), ('min_gt_cfg_scoresep', (lambda val: cfg_scoresep(truth2_prop['gt']['score'], val, operator.ge))), ('fail', prop2_mat['is_failure']), ('success', prop2_mat['is_success']), ('min_gtrank', partial(operator.ge, truth2_prop['gt']['rank'])), ('max_gtrank', partial(operator.le, truth2_prop['gt']['rank'])), ('max_gtscore', partial(operator.le, truth2_prop['gt']['score'])), ('min_gtscore', partial(operator.ge, truth2_prop['gt']['score'])), ('min_gf_timedelta', partial(operator.ge, truth2_prop['gf']['timedelta'])), ('max_gf_timedelta', partial(operator.le, truth2_prop['gf']['timedelta'])), ('min_tags', (lambda val: UTFF(testres.get_all_tags(), min_num=val))), ('max_tags', (lambda val: UTFF(testres.get_all_tags(), max_num=val))), ('min_gf_tags', (lambda val: UTFF(testres.get_gf_tags(), min_num=val))), ('max_gf_tags', (lambda val: UTFF(testres.get_gf_tags(), max_num=val))), ('min_gt_tags', (lambda val: UTFF(testres.get_gt_tags(), min_num=val))), ('max_gt_tags', (lambda val: UTFF(testres.get_gt_tags(), max_num=val))), ('min_query_annot_tags', (lambda val: UTFF(testres.get_query_annot_tags(), min_num=val))), ('min_gt_annot_tags', (lambda val: UTFF(testres.get_gt_annot_tags(), min_num=val))), ('min_gtq_tags', (lambda val: UTFF(testres.get_gtquery_annot_tags(), min_num=val))), ('max_gtq_tags', (lambda val: UTFF(testres.get_gtquery_annot_tags(), max_num=val))), ('without_gf_tag', (lambda val: UTFF(testres.get_gf_tags(), has_none=val))), ('without_gt_tag', (lambda val: UTFF(testres.get_gt_tags(), has_none=val))), ('with_gf_tag', (lambda val: UTFF(testres.get_gf_tags(), has_any=val))), ('with_gt_tag', (lambda val: UTFF(testres.get_gt_tags(), has_any=val))), ('with_tag', (lambda val: UTFF(testres.get_all_tags(), has_any=val))), ('without_tag', (lambda val: UTFF(testres.get_all_tags(), has_none=val)))]
rule_dict = ut.odict(rule_list)
rule_list.append(('max_gf_td', rule_dict['max_gf_timedelta']))
rule_list.append(('min_gf_td', rule_dict['min_gf_timedelta']))
filt_cfg_ = copy.deepcopy(filt_cfg)
for tdkey in filt_cfg_.keys():
if tdkey.endswith('_timedelta'):
filt_cfg_[tdkey] = ut.ensure_timedelta(filt_cfg_[tdkey])
class VerbFilterInfo(object):
def __init__(self):
self.prev_num_valid = None
def print_pre(self, is_valid, filt_cfg_):
num_valid = is_valid.sum()
logger.info(('[testres] Sampling from is_valid.size=%r with filt=%r' % (is_valid.size, ut.get_cfg_lbl(filt_cfg_))))
logger.info((' * is_valid.shape = %r' % (is_valid.shape,)))
logger.info((' * num_valid = %r' % (num_valid,)))
self.prev_num_valid = num_valid
def print_post(self, is_valid, flags, msg):
if (flags is not None):
num_passed = flags.sum()
num_valid = is_valid.sum()
num_invalidated = (self.prev_num_valid - num_valid)
logger.info(msg)
if (num_invalidated == 0):
if (flags is not None):
logger.info((' * num_passed = %r' % (num_passed,)))
logger.info((' * num_invalided = %r' % (num_invalidated,)))
else:
logger.info((' * prev_num_valid = %r' % (self.prev_num_valid,)))
logger.info((' * num_valid = %r' % (num_valid,)))
self.prev_num_valid = num_valid
verbinfo = VerbFilterInfo()
if verbose:
verbinfo.print_pre(is_valid, filt_cfg_)
ut.delete_keys(filt_cfg_, ['_cfgstr', '_cfgindex', '_cfgname', '_cfgtype'])
valid_rules = []
def poprule(rulename, default):
valid_rules.append(rulename)
return filt_cfg_.pop(rulename, default)
allcfg = poprule('allcfg', None)
orderby = poprule('orderby', None)
reverse = poprule('reverse', None)
sortasc = poprule('sortasc', None)
sortdsc = poprule('sortdsc', poprule('sortdesc', None))
max_pername = poprule('max_pername', None)
require_all_cfg = poprule('require_all_cfg', None)
index = poprule('index', None)
rule_value_list = [poprule(key, None) for (key, rule) in rule_list]
if (len(filt_cfg_) > 0):
logger.info('ERROR')
logger.info(('filtcfg valid rules are = %s' % (ut.repr2(valid_rules, nl=1),)))
for key in filt_cfg_.keys():
logger.info(('did you mean %r instead of %r?' % (ut.closet_words(key, valid_rules)[0], key)))
raise NotImplementedError(('Unhandled filt_cfg.keys() = %r' % filt_cfg_.keys()))
chosen_rule_idxs = ut.where([(val is not None) for val in rule_value_list])
chosen_rules = ut.take(rule_list, chosen_rule_idxs)
chosen_vals = ut.take(rule_value_list, chosen_rule_idxs)
for ((key, rule), val) in zip(chosen_rules, chosen_vals):
if isinstance(rule, np.ndarray):
flags = (rule == val)
else:
flags = rule(val)
flags = np.logical_and(flags, participates)
is_valid = np.logical_and(is_valid, flags)
if verbose:
verbinfo.print_post(is_valid, flags, ('SampleRule: %s = %r' % (key, val)))
if allcfg:
is_valid = np.logical_or(np.logical_or.reduce(is_valid.T)[:, None], is_valid)
is_valid = np.logical_and(is_valid, participates)
(qx_list, cfgx_list) = np.nonzero(is_valid)
if (sortdsc is not None):
assert (orderby is None), 'use orderby or sortasc'
assert (reverse is None), 'reverse does not work with sortdsc'
orderby = sortdsc
reverse = True
elif (sortasc is not None):
assert (reverse is None), 'reverse does not work with sortasc'
assert (orderby is None), 'use orderby or sortasc'
orderby = sortasc
reverse = False
else:
reverse = False
if (orderby is not None):
import re
order_values = None
for prefix_pattern in ['^gt_?', '^gf_?']:
prefix_match = re.match(prefix_pattern, orderby)
if (prefix_match is not None):
truth = prefix_pattern[1:3]
propname = orderby[prefix_match.end():]
if verbose:
logger.info(('Ordering by truth=%s propname=%s' % (truth, propname)))
order_values = truth2_prop[truth][propname]
break
if (order_values is None):
raise NotImplementedError(('Unknown orerby=%r' % (orderby,)))
else:
order_values = np.arange(is_valid.size).reshape(is_valid.shape)
flat_order = order_values[is_valid]
if verbose:
if verbose:
logger.info('Reversing ordering (descending)')
else:
logger.info('Normal ordering (ascending)')
if reverse:
sortx = flat_order.argsort()[::(- 1)]
else:
sortx = flat_order.argsort()
qx_list = qx_list.take(sortx, axis=0)
cfgx_list = cfgx_list.take(sortx, axis=0)
if (max_pername is not None):
if verbose:
logger.info(('Returning at most %d cases per name ' % (max_pername,)))
_qaid_list = np.take(qaids, qx_list)
_qnid_list = ibs.get_annot_nids(_qaid_list)
_valid_idxs = []
seen_ = ut.ddict((lambda : 0))
for (idx, _qnid) in enumerate(_qnid_list):
if (seen_[_qnid] < max_pername):
seen_[_qnid] += 1
_valid_idxs.append(idx)
_qx_list = qx_list[_valid_idxs]
_cfgx_list = cfgx_list[_valid_idxs]
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if require_all_cfg:
if verbose:
prev_num_valid = is_valid.sum()
logger.info('Enforcing that all configs must pass filters')
logger.info((' * prev_num_valid = %r' % (prev_num_valid,)))
qx2_valid_cfgs = ut.group_items(cfgx_list, qx_list)
hasall_cfg = [(len(qx2_valid_cfgs[qx]) == testres.nConfig) for qx in qx_list]
_qx_list = qx_list.compress(hasall_cfg)
_cfgx_list = cfgx_list.compress(hasall_cfg)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(is_valid, None, 'Enforcing that all configs must pass filters')
if (index is not None):
if isinstance(index, str):
index = ut.smart_cast(index, slice)
_qx_list = ut.take(qx_list, index)
_cfgx_list = ut.take(cfgx_list, index)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(is_valid, None, ('Taking index=%r sample from len(qx_list) = %r' % (index, len(qx_list))))
if (not return_mask):
case_pos_list = np.vstack((qx_list, cfgx_list)).T
case_identifier = case_pos_list
else:
if verbose:
logger.info('Converting cases indicies to a 2d-mask')
case_identifier = is_valid
if verbose:
logger.info('Finished case filtering')
logger.info('Final case stats:')
qx_hist = ut.dict_hist(qx_list)
logger.info(('config per query stats: %r' % (ut.get_stats_str(qx_hist.values()),)))
logger.info(('query per config stats: %r' % (ut.get_stats_str(ut.dict_hist(cfgx_list).values()),)))
return case_identifier
|
def get_truth2_prop(testres, qaids=None, join_acfg=False):
"\n Returns:\n tuple: (truth2_prop, prop2_mat)\n\n CommandLine:\n python -m wbia.expt.test_result --exec-get_truth2_prop --show\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()\n >>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))\n >>> print(result)\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> ut.show_if_requested()\n "
ibs = testres.ibs
test_qaids = (testres.get_test_qaids() if (qaids is None) else qaids)
truth2_prop = ut.ddict(ut.odict)
participates = testres.get_infoprop_mat('participant', test_qaids)
truth2_prop['gt']['aid'] = testres.get_infoprop_mat('qx2_gt_aid', test_qaids)
truth2_prop['gf']['aid'] = testres.get_infoprop_mat('qx2_gf_aid', test_qaids)
truth2_prop['gt']['rank'] = testres.get_infoprop_mat('qx2_gt_rank', test_qaids)
truth2_prop['gf']['rank'] = testres.get_infoprop_mat('qx2_gf_rank', test_qaids)
truth2_prop['gt']['score'] = testres.get_infoprop_mat('qx2_gt_raw_score', test_qaids)
truth2_prop['gf']['score'] = testres.get_infoprop_mat('qx2_gf_raw_score', test_qaids)
truth2_prop['gt']['score'] = np.nan_to_num(truth2_prop['gt']['score'])
truth2_prop['gf']['score'] = np.nan_to_num(truth2_prop['gf']['score'])
for truth in ['gt', 'gf']:
rank_mat = truth2_prop[truth]['rank']
flags = np.logical_and(np.isnan(rank_mat), participates)
rank_mat[flags] = testres.get_worst_possible_rank()
is_success = (truth2_prop['gt']['rank'] == 0)
is_failure = np.logical_not(is_success)
assert np.all((is_success == (truth2_prop['gt']['rank'] == 0)))
for truth in ['gt', 'gf']:
aid_mat = truth2_prop[truth]['aid']
timedelta_mat = np.vstack([ibs.get_annot_pair_timedelta(test_qaids, aids) for aids in aid_mat.T]).T
annotmatch_rowid_mat = np.vstack([ibs.get_annotmatch_rowid_from_undirected_superkey(test_qaids, aids) for aids in aid_mat.T]).T
truth2_prop[truth]['annotmatch_rowid'] = annotmatch_rowid_mat
truth2_prop[truth]['timedelta'] = timedelta_mat
prop2_mat = {}
prop2_mat['is_success'] = is_success
prop2_mat['is_failure'] = is_failure
prop2_mat['participates'] = participates
groupxs = testres.get_cfgx_groupxs()
def group_prop(val, grouped_flags, groupxs):
nRows = len(val)
new_shape = (nRows, len(groupxs))
if ((val.dtype == object) or (val.dtype.type == object)):
new_val = np.full(new_shape, None, dtype=val.dtype)
elif ut.is_float(val):
new_val = np.full(new_shape, np.nan, dtype=val.dtype)
else:
new_val = np.zeros(new_shape, dtype=val.dtype)
grouped_vals = vt.apply_grouping(val.T, groupxs)
_iter = enumerate(zip(grouped_flags, grouped_vals))
for (new_col, (flags, group)) in _iter:
(rows, cols) = np.where(flags.T)
new_val[(rows, new_col)] = group.T[(rows, cols)]
return new_val
if join_acfg:
assert ut.allsame(participates.sum(axis=1))
grouped_flags = vt.apply_grouping(participates.T, groupxs)
new_prop2_mat = {}
for (key, val) in prop2_mat.items():
new_prop2_mat[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop = {}
for (truth, props) in truth2_prop.items():
new_props = {}
for (key, val) in props.items():
new_props[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop[truth] = new_props
prop2_mat_ = new_prop2_mat
truth2_prop_ = new_truth2_prop
else:
prop2_mat_ = prop2_mat
truth2_prop_ = truth2_prop
return (truth2_prop_, prop2_mat_)
| -7,367,768,152,250,038,000
|
Returns:
tuple: (truth2_prop, prop2_mat)
CommandLine:
python -m wbia.expt.test_result --exec-get_truth2_prop --show
Example:
>>> # xdoctest: +REQUIRES(--slow)
>>> # ENABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()
>>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import wbia.plottool as pt
>>> ut.show_if_requested()
|
wbia/expt/test_result.py
|
get_truth2_prop
|
WildMeOrg/wildbook-ia
|
python
|
def get_truth2_prop(testres, qaids=None, join_acfg=False):
"\n Returns:\n tuple: (truth2_prop, prop2_mat)\n\n CommandLine:\n python -m wbia.expt.test_result --exec-get_truth2_prop --show\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()\n >>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))\n >>> print(result)\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> ut.show_if_requested()\n "
ibs = testres.ibs
test_qaids = (testres.get_test_qaids() if (qaids is None) else qaids)
truth2_prop = ut.ddict(ut.odict)
participates = testres.get_infoprop_mat('participant', test_qaids)
truth2_prop['gt']['aid'] = testres.get_infoprop_mat('qx2_gt_aid', test_qaids)
truth2_prop['gf']['aid'] = testres.get_infoprop_mat('qx2_gf_aid', test_qaids)
truth2_prop['gt']['rank'] = testres.get_infoprop_mat('qx2_gt_rank', test_qaids)
truth2_prop['gf']['rank'] = testres.get_infoprop_mat('qx2_gf_rank', test_qaids)
truth2_prop['gt']['score'] = testres.get_infoprop_mat('qx2_gt_raw_score', test_qaids)
truth2_prop['gf']['score'] = testres.get_infoprop_mat('qx2_gf_raw_score', test_qaids)
truth2_prop['gt']['score'] = np.nan_to_num(truth2_prop['gt']['score'])
truth2_prop['gf']['score'] = np.nan_to_num(truth2_prop['gf']['score'])
for truth in ['gt', 'gf']:
rank_mat = truth2_prop[truth]['rank']
flags = np.logical_and(np.isnan(rank_mat), participates)
rank_mat[flags] = testres.get_worst_possible_rank()
is_success = (truth2_prop['gt']['rank'] == 0)
is_failure = np.logical_not(is_success)
assert np.all((is_success == (truth2_prop['gt']['rank'] == 0)))
for truth in ['gt', 'gf']:
aid_mat = truth2_prop[truth]['aid']
timedelta_mat = np.vstack([ibs.get_annot_pair_timedelta(test_qaids, aids) for aids in aid_mat.T]).T
annotmatch_rowid_mat = np.vstack([ibs.get_annotmatch_rowid_from_undirected_superkey(test_qaids, aids) for aids in aid_mat.T]).T
truth2_prop[truth]['annotmatch_rowid'] = annotmatch_rowid_mat
truth2_prop[truth]['timedelta'] = timedelta_mat
prop2_mat = {}
prop2_mat['is_success'] = is_success
prop2_mat['is_failure'] = is_failure
prop2_mat['participates'] = participates
groupxs = testres.get_cfgx_groupxs()
def group_prop(val, grouped_flags, groupxs):
nRows = len(val)
new_shape = (nRows, len(groupxs))
if ((val.dtype == object) or (val.dtype.type == object)):
new_val = np.full(new_shape, None, dtype=val.dtype)
elif ut.is_float(val):
new_val = np.full(new_shape, np.nan, dtype=val.dtype)
else:
new_val = np.zeros(new_shape, dtype=val.dtype)
grouped_vals = vt.apply_grouping(val.T, groupxs)
_iter = enumerate(zip(grouped_flags, grouped_vals))
for (new_col, (flags, group)) in _iter:
(rows, cols) = np.where(flags.T)
new_val[(rows, new_col)] = group.T[(rows, cols)]
return new_val
if join_acfg:
assert ut.allsame(participates.sum(axis=1))
grouped_flags = vt.apply_grouping(participates.T, groupxs)
new_prop2_mat = {}
for (key, val) in prop2_mat.items():
new_prop2_mat[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop = {}
for (truth, props) in truth2_prop.items():
new_props = {}
for (key, val) in props.items():
new_props[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop[truth] = new_props
prop2_mat_ = new_prop2_mat
truth2_prop_ = new_truth2_prop
else:
prop2_mat_ = prop2_mat
truth2_prop_ = truth2_prop
return (truth2_prop_, prop2_mat_)
|
def draw_score_diff_disti(testres):
"\n\n CommandLine:\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1\n\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1\n\n python -m wbia --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1\n 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])\n >>> result = testres.draw_score_diff_disti()\n >>> print(result)\n >>> ut.show_if_requested()\n "
import wbia.plottool as pt
import vtool as vt
ibs = testres.ibs
qaids = testres.get_test_qaids()
qaids = ibs.get_annot_tag_filterflags(qaids, {'has_none': 'timedeltaerror'})
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
gt_valid_flags_list = np.isfinite(gt_rawscore).T
gf_valid_flags_list = np.isfinite(gf_rawscore).T
cfgx2_gt_scores = vt.zipcompress(gt_rawscore.T, gt_valid_flags_list)
cfgx2_gf_scores = vt.zipcompress(gf_rawscore.T, gf_valid_flags_list)
gt_rank = testres.get_infoprop_mat('qx2_gt_rank', qaids=qaids)
gf_ranks = testres.get_infoprop_mat('qx2_gf_rank', qaids=qaids)
cfgx2_gt_ranks = vt.zipcompress(gt_rank.T, gt_valid_flags_list)
cfgx2_rank0_gt_scores = vt.zipcompress(cfgx2_gt_scores, [(ranks == 0) for ranks in cfgx2_gt_ranks])
cfgx2_rankX_gt_scores = vt.zipcompress(cfgx2_gt_scores, [(ranks > 0) for ranks in cfgx2_gt_ranks])
cfgx2_gf_ranks = vt.zipcompress(gf_ranks.T, gf_valid_flags_list)
cfgx2_rank0_gf_scores = vt.zipcompress(cfgx2_gf_scores, [(ranks == 0) for ranks in cfgx2_gf_ranks])
xdata = list(map(len, testres.cfgx2_daids))
USE_MEDIAN = True
USE_LOG = False
if USE_MEDIAN:
ave = np.median
dev = vt.median_abs_dev
else:
ave = np.mean
dev = np.std
def make_interval_args(arr_list, ave=ave, dev=dev, **kwargs):
import utool as ut
if USE_LOG:
arr_list = list(map((lambda x: np.log((x + 1))), arr_list))
sizes_ = list(map(len, arr_list))
ydata_ = list(map(ave, arr_list))
spread_ = list(map(dev, arr_list))
label = kwargs.get('label', '')
label += (' ' + ut.get_funcname(ave))
kwargs['label'] = label
logger.info(((label + 'score stats : ') + ut.repr2(ut.get_jagged_stats(arr_list, use_median=True), nl=1, precision=1)))
return (ydata_, spread_, kwargs, sizes_)
args_list1 = [make_interval_args(cfgx2_gt_scores, label='GT', color=pt.TRUE_BLUE), make_interval_args(cfgx2_gf_scores, label='GF', color=pt.FALSE_RED)]
args_list2 = [make_interval_args(cfgx2_rank0_gt_scores, label='GT-rank = 0', color=pt.LIGHT_GREEN), make_interval_args(cfgx2_rankX_gt_scores, label='GT-rank > 0', color=pt.YELLOW), make_interval_args(cfgx2_rank0_gf_scores, label='GF-rank = 0', color=pt.PINK)]
plotargs_list = [args_list1, args_list2]
ymax = (- np.inf)
ymin = np.inf
for args_list in plotargs_list:
ydata_list = np.array(ut.get_list_column(args_list, 0))
spread = np.array(ut.get_list_column(args_list, 1))
ymax = max(ymax, np.array((ydata_list + spread)).max())
ymin = min(ymax, np.array((ydata_list - spread)).min())
ylabel = ('log name score' if USE_LOG else 'name score')
statickw = dict(xlabel='database size (number of annotations)', ylabel=ylabel, linewidth=2, spread_alpha=0.5, lightbg=True, marker='o', ymax=ymax, ymin=ymin, xmax='data', xmin='data')
fnum = pt.ensure_fnum(None)
pnum_ = pt.make_pnum_nextgen(len(plotargs_list), 1)
for args_list in plotargs_list:
ydata_list = ut.get_list_column(args_list, 0)
spread_list = ut.get_list_column(args_list, 1)
kwargs_list = ut.get_list_column(args_list, 2)
sizes_list = ut.get_list_column(args_list, 3)
logger.info(('sizes_list = %s' % (ut.repr2(sizes_list, nl=1),)))
plotkw = ut.dict_stack2(kwargs_list, '_list')
plotkw2 = ut.merge_dicts(statickw, plotkw)
pt.multi_plot(xdata, ydata_list, spread_list=spread_list, fnum=fnum, pnum=pnum_(), **plotkw2)
figtitle = ('Score vs DBSize: %s' % testres.get_title_aug())
pt.set_figtitle(figtitle)
| 1,651,632,516,879,799,300
|
CommandLine:
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1
python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1
python -m wbia --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1
13502
python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502
python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])
>>> result = testres.draw_score_diff_disti()
>>> print(result)
>>> ut.show_if_requested()
|
wbia/expt/test_result.py
|
draw_score_diff_disti
|
WildMeOrg/wildbook-ia
|
python
|
def draw_score_diff_disti(testres):
"\n\n CommandLine:\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1\n\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1\n\n python -m wbia --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1\n 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])\n >>> result = testres.draw_score_diff_disti()\n >>> print(result)\n >>> ut.show_if_requested()\n "
import wbia.plottool as pt
import vtool as vt
ibs = testres.ibs
qaids = testres.get_test_qaids()
qaids = ibs.get_annot_tag_filterflags(qaids, {'has_none': 'timedeltaerror'})
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
gt_valid_flags_list = np.isfinite(gt_rawscore).T
gf_valid_flags_list = np.isfinite(gf_rawscore).T
cfgx2_gt_scores = vt.zipcompress(gt_rawscore.T, gt_valid_flags_list)
cfgx2_gf_scores = vt.zipcompress(gf_rawscore.T, gf_valid_flags_list)
gt_rank = testres.get_infoprop_mat('qx2_gt_rank', qaids=qaids)
gf_ranks = testres.get_infoprop_mat('qx2_gf_rank', qaids=qaids)
cfgx2_gt_ranks = vt.zipcompress(gt_rank.T, gt_valid_flags_list)
cfgx2_rank0_gt_scores = vt.zipcompress(cfgx2_gt_scores, [(ranks == 0) for ranks in cfgx2_gt_ranks])
cfgx2_rankX_gt_scores = vt.zipcompress(cfgx2_gt_scores, [(ranks > 0) for ranks in cfgx2_gt_ranks])
cfgx2_gf_ranks = vt.zipcompress(gf_ranks.T, gf_valid_flags_list)
cfgx2_rank0_gf_scores = vt.zipcompress(cfgx2_gf_scores, [(ranks == 0) for ranks in cfgx2_gf_ranks])
xdata = list(map(len, testres.cfgx2_daids))
USE_MEDIAN = True
USE_LOG = False
if USE_MEDIAN:
ave = np.median
dev = vt.median_abs_dev
else:
ave = np.mean
dev = np.std
def make_interval_args(arr_list, ave=ave, dev=dev, **kwargs):
import utool as ut
if USE_LOG:
arr_list = list(map((lambda x: np.log((x + 1))), arr_list))
sizes_ = list(map(len, arr_list))
ydata_ = list(map(ave, arr_list))
spread_ = list(map(dev, arr_list))
label = kwargs.get('label', )
label += (' ' + ut.get_funcname(ave))
kwargs['label'] = label
logger.info(((label + 'score stats : ') + ut.repr2(ut.get_jagged_stats(arr_list, use_median=True), nl=1, precision=1)))
return (ydata_, spread_, kwargs, sizes_)
args_list1 = [make_interval_args(cfgx2_gt_scores, label='GT', color=pt.TRUE_BLUE), make_interval_args(cfgx2_gf_scores, label='GF', color=pt.FALSE_RED)]
args_list2 = [make_interval_args(cfgx2_rank0_gt_scores, label='GT-rank = 0', color=pt.LIGHT_GREEN), make_interval_args(cfgx2_rankX_gt_scores, label='GT-rank > 0', color=pt.YELLOW), make_interval_args(cfgx2_rank0_gf_scores, label='GF-rank = 0', color=pt.PINK)]
plotargs_list = [args_list1, args_list2]
ymax = (- np.inf)
ymin = np.inf
for args_list in plotargs_list:
ydata_list = np.array(ut.get_list_column(args_list, 0))
spread = np.array(ut.get_list_column(args_list, 1))
ymax = max(ymax, np.array((ydata_list + spread)).max())
ymin = min(ymax, np.array((ydata_list - spread)).min())
ylabel = ('log name score' if USE_LOG else 'name score')
statickw = dict(xlabel='database size (number of annotations)', ylabel=ylabel, linewidth=2, spread_alpha=0.5, lightbg=True, marker='o', ymax=ymax, ymin=ymin, xmax='data', xmin='data')
fnum = pt.ensure_fnum(None)
pnum_ = pt.make_pnum_nextgen(len(plotargs_list), 1)
for args_list in plotargs_list:
ydata_list = ut.get_list_column(args_list, 0)
spread_list = ut.get_list_column(args_list, 1)
kwargs_list = ut.get_list_column(args_list, 2)
sizes_list = ut.get_list_column(args_list, 3)
logger.info(('sizes_list = %s' % (ut.repr2(sizes_list, nl=1),)))
plotkw = ut.dict_stack2(kwargs_list, '_list')
plotkw2 = ut.merge_dicts(statickw, plotkw)
pt.multi_plot(xdata, ydata_list, spread_list=spread_list, fnum=fnum, pnum=pnum_(), **plotkw2)
figtitle = ('Score vs DBSize: %s' % testres.get_title_aug())
pt.set_figtitle(figtitle)
|
def draw_rank_cmc(testres):
'\n Wrapper\n '
from wbia.expt import experiment_drawing
experiment_drawing.draw_rank_cmc(testres.ibs, testres)
| -2,835,022,498,734,089,000
|
Wrapper
|
wbia/expt/test_result.py
|
draw_rank_cmc
|
WildMeOrg/wildbook-ia
|
python
|
def draw_rank_cmc(testres):
'\n \n '
from wbia.expt import experiment_drawing
experiment_drawing.draw_rank_cmc(testres.ibs, testres)
|
def draw_match_cases(testres, **kwargs):
'\n Wrapper\n '
from wbia.expt import experiment_drawing
experiment_drawing.draw_match_cases(testres.ibs, testres, **kwargs)
| 413,246,073,130,097,800
|
Wrapper
|
wbia/expt/test_result.py
|
draw_match_cases
|
WildMeOrg/wildbook-ia
|
python
|
def draw_match_cases(testres, **kwargs):
'\n \n '
from wbia.expt import experiment_drawing
experiment_drawing.draw_match_cases(testres.ibs, testres, **kwargs)
|
def draw_failure_cases(testres, **kwargs):
"\n >>> from wbia.other.dbinfo import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)\n "
from wbia.expt import experiment_drawing
orig_filter = ':'
kwargs['f'] = (orig_filter + 'fail')
case_pos_list = testres.case_sample2(':fail=True,index=0:5')
experiment_drawing.draw_match_cases(testres.ibs, testres, case_pos_list=case_pos_list, annot_modes=[1], interact=True)
| 4,938,227,401,944,836,000
|
>>> from wbia.other.dbinfo import * # NOQA
>>> import wbia
>>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)
|
wbia/expt/test_result.py
|
draw_failure_cases
|
WildMeOrg/wildbook-ia
|
python
|
def draw_failure_cases(testres, **kwargs):
"\n >>> from wbia.other.dbinfo import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)\n "
from wbia.expt import experiment_drawing
orig_filter = ':'
kwargs['f'] = (orig_filter + 'fail')
case_pos_list = testres.case_sample2(':fail=True,index=0:5')
experiment_drawing.draw_match_cases(testres.ibs, testres, case_pos_list=case_pos_list, annot_modes=[1], interact=True)
|
def find_score_thresh_cutoff(testres):
'\n FIXME\n DUPLICATE CODE\n rectify with experiment_drawing\n '
import vtool as vt
if ut.VERBOSE:
logger.info('[dev] FIX DUPLICATE CODE find_thresh_cutoff')
assert (len(testres.cfgx2_qreq_) == 1), 'can only specify one config here'
cfgx = 0
test_qaids = testres.get_test_qaids()
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=test_qaids).T[cfgx]
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=test_qaids).T[cfgx]
tp_nscores = gt_rawscore
tn_nscores = gf_rawscore
tn_qaids = tp_qaids = test_qaids
part_attrs = {1: {'qaid': tp_qaids}, 0: {'qaid': tn_qaids}}
fpr = None
tpr = 0.85
encoder = vt.ScoreNormalizer(adjust=8, fpr=fpr, tpr=tpr, monotonize=True)
(name_scores, labels, attrs) = encoder._to_xy(tp_nscores, tn_nscores, part_attrs)
encoder.fit(name_scores, labels, attrs)
score_thresh = encoder.learn_threshold2()
return score_thresh
| 316,158,937,080,194,100
|
FIXME
DUPLICATE CODE
rectify with experiment_drawing
|
wbia/expt/test_result.py
|
find_score_thresh_cutoff
|
WildMeOrg/wildbook-ia
|
python
|
def find_score_thresh_cutoff(testres):
'\n FIXME\n DUPLICATE CODE\n rectify with experiment_drawing\n '
import vtool as vt
if ut.VERBOSE:
logger.info('[dev] FIX DUPLICATE CODE find_thresh_cutoff')
assert (len(testres.cfgx2_qreq_) == 1), 'can only specify one config here'
cfgx = 0
test_qaids = testres.get_test_qaids()
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=test_qaids).T[cfgx]
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=test_qaids).T[cfgx]
tp_nscores = gt_rawscore
tn_nscores = gf_rawscore
tn_qaids = tp_qaids = test_qaids
part_attrs = {1: {'qaid': tp_qaids}, 0: {'qaid': tn_qaids}}
fpr = None
tpr = 0.85
encoder = vt.ScoreNormalizer(adjust=8, fpr=fpr, tpr=tpr, monotonize=True)
(name_scores, labels, attrs) = encoder._to_xy(tp_nscores, tn_nscores, part_attrs)
encoder.fit(name_scores, labels, attrs)
score_thresh = encoder.learn_threshold2()
return score_thresh
|
def print_percent_identification_success(testres):
'\n Prints names identified (at rank 1) / names queried.\n This combines results over multiple queries of a particular name using\n max\n\n OLD, MAYBE DEPRIATE\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n '
ibs = testres.ibs
qaids = testres.get_test_qaids()
(unique_nids, groupxs) = ut.group_indices(ibs.get_annot_nids(qaids))
qx2_gt_raw_score = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
qx2_gf_raw_score = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
nx2_gt_raw_score = np.array([np.nanmax(scores, axis=0) for scores in vt.apply_grouping(qx2_gt_raw_score, groupxs)])
nx2_gf_raw_score = np.array([np.nanmax(scores, axis=0) for scores in vt.apply_grouping(qx2_gf_raw_score, groupxs)])
cfgx2_success = (nx2_gt_raw_score > nx2_gf_raw_score).T
logger.info('Identification success (names identified / names queried)')
for (cfgx, success) in enumerate(cfgx2_success):
pipelbl = testres.cfgx2_lbl[cfgx]
percent = ((100 * success.sum()) / len(success))
logger.info(('%2d) success = %r/%r = %.2f%% -- %s' % (cfgx, success.sum(), len(success), percent, pipelbl)))
| -5,736,439,353,157,751,000
|
Prints names identified (at rank 1) / names queried.
This combines results over multiple queries of a particular name using
max
OLD, MAYBE DEPRIATE
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.expt.test_result import * # NOQA
|
wbia/expt/test_result.py
|
print_percent_identification_success
|
WildMeOrg/wildbook-ia
|
python
|
def print_percent_identification_success(testres):
'\n Prints names identified (at rank 1) / names queried.\n This combines results over multiple queries of a particular name using\n max\n\n OLD, MAYBE DEPRIATE\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n '
ibs = testres.ibs
qaids = testres.get_test_qaids()
(unique_nids, groupxs) = ut.group_indices(ibs.get_annot_nids(qaids))
qx2_gt_raw_score = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
qx2_gf_raw_score = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
nx2_gt_raw_score = np.array([np.nanmax(scores, axis=0) for scores in vt.apply_grouping(qx2_gt_raw_score, groupxs)])
nx2_gf_raw_score = np.array([np.nanmax(scores, axis=0) for scores in vt.apply_grouping(qx2_gf_raw_score, groupxs)])
cfgx2_success = (nx2_gt_raw_score > nx2_gf_raw_score).T
logger.info('Identification success (names identified / names queried)')
for (cfgx, success) in enumerate(cfgx2_success):
pipelbl = testres.cfgx2_lbl[cfgx]
percent = ((100 * success.sum()) / len(success))
logger.info(('%2d) success = %r/%r = %.2f%% -- %s' % (cfgx, success.sum(), len(success), percent, pipelbl)))
|
def map_score(testres):
"\n For each query compute a precision recall curve.\n Then, for each query compute the average precision.\n Then take the mean of all average precisions to obtain the mAP.\n\n Script:\n >>> #ibs = wbia.opendb('Oxford')\n >>> #ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')\n "
import sklearn.metrics
qaids = testres.get_test_qaids()
ibs = testres.ibs
PLOT = True
PLOT = False
cfgx2_cms = []
for qreq_ in testres.cfgx2_qreq_:
cm_list = qreq_.execute(qaids)
cm_list = [cm.extend_results(qreq_) for cm in cm_list]
for cm in cm_list:
cm.score_annot_csum(qreq_)
cfgx2_cms.append(cm_list)
map_list = []
(unique_names, groupxs) = ut.group_indices(ibs.annots(qaids).names)
for (cm_list, qreq_) in zip(cfgx2_cms, testres.cfgx2_qreq_):
if PLOT:
import wbia.plottool as pt
pt.qt4ensure()
fnum = pt.ensure_fnum(None)
pt.figure(fnum=fnum)
avep_list = []
for cm in cm_list:
flags = (np.array(ibs.annots(cm.daid_list).quality_texts) != 'junk')
assert np.all(flags)
daid_list = cm.daid_list
dnid_list = cm.dnid_list
y_true = (cm.qnid == dnid_list).compress(flags).astype(np.int)
y_score = cm.annot_score_list.compress(flags)
y_score[(~ np.isfinite(y_score))] = 0
y_score = np.nan_to_num(y_score)
sortx = np.argsort(y_score)[::(- 1)]
daid_list = daid_list.take(sortx)
dnid_list = dnid_list.take(sortx)
y_true = y_true.take(sortx)
y_score = y_score.take(sortx)
(precision, recall, thresholds) = sklearn.metrics.precision_recall_curve(y_true, y_score)
if PLOT:
pt.plot2(recall, precision, marker='', linestyle='-', x_label='recall', y_label='precision')
avep = sklearn.metrics.average_precision_score(y_true, y_score)
avep_list.append(avep)
name_to_ave = [np.mean(a) for a in ut.apply_grouping(avep_list, groupxs)]
name_to_ave_ = dict(zip(unique_names, name_to_ave))
logger.info(('name_to_ave_ = %s' % ut.align(ut.repr3(name_to_ave_, precision=3), ':')))
mean_ave_precision = np.mean(name_to_ave)
logger.info(('mean_ave_precision = %r' % (mean_ave_precision,)))
map_list.append(mean_ave_precision)
return map_list
| -8,849,611,054,417,056,000
|
For each query compute a precision recall curve.
Then, for each query compute the average precision.
Then take the mean of all average precisions to obtain the mAP.
Script:
>>> #ibs = wbia.opendb('Oxford')
>>> #ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')
>>> import wbia
>>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')
|
wbia/expt/test_result.py
|
map_score
|
WildMeOrg/wildbook-ia
|
python
|
def map_score(testres):
"\n For each query compute a precision recall curve.\n Then, for each query compute the average precision.\n Then take the mean of all average precisions to obtain the mAP.\n\n Script:\n >>> #ibs = wbia.opendb('Oxford')\n >>> #ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')\n "
import sklearn.metrics
qaids = testres.get_test_qaids()
ibs = testres.ibs
PLOT = True
PLOT = False
cfgx2_cms = []
for qreq_ in testres.cfgx2_qreq_:
cm_list = qreq_.execute(qaids)
cm_list = [cm.extend_results(qreq_) for cm in cm_list]
for cm in cm_list:
cm.score_annot_csum(qreq_)
cfgx2_cms.append(cm_list)
map_list = []
(unique_names, groupxs) = ut.group_indices(ibs.annots(qaids).names)
for (cm_list, qreq_) in zip(cfgx2_cms, testres.cfgx2_qreq_):
if PLOT:
import wbia.plottool as pt
pt.qt4ensure()
fnum = pt.ensure_fnum(None)
pt.figure(fnum=fnum)
avep_list = []
for cm in cm_list:
flags = (np.array(ibs.annots(cm.daid_list).quality_texts) != 'junk')
assert np.all(flags)
daid_list = cm.daid_list
dnid_list = cm.dnid_list
y_true = (cm.qnid == dnid_list).compress(flags).astype(np.int)
y_score = cm.annot_score_list.compress(flags)
y_score[(~ np.isfinite(y_score))] = 0
y_score = np.nan_to_num(y_score)
sortx = np.argsort(y_score)[::(- 1)]
daid_list = daid_list.take(sortx)
dnid_list = dnid_list.take(sortx)
y_true = y_true.take(sortx)
y_score = y_score.take(sortx)
(precision, recall, thresholds) = sklearn.metrics.precision_recall_curve(y_true, y_score)
if PLOT:
pt.plot2(recall, precision, marker=, linestyle='-', x_label='recall', y_label='precision')
avep = sklearn.metrics.average_precision_score(y_true, y_score)
avep_list.append(avep)
name_to_ave = [np.mean(a) for a in ut.apply_grouping(avep_list, groupxs)]
name_to_ave_ = dict(zip(unique_names, name_to_ave))
logger.info(('name_to_ave_ = %s' % ut.align(ut.repr3(name_to_ave_, precision=3), ':')))
mean_ave_precision = np.mean(name_to_ave)
logger.info(('mean_ave_precision = %r' % (mean_ave_precision,)))
map_list.append(mean_ave_precision)
return map_list
|
def embed_testres(testres):
"\n CommandLine:\n python -m wbia TestResults.embed_testres\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')\n >>> embed_testres(testres)\n "
ut.embed()
| -6,328,134,899,002,221,000
|
CommandLine:
python -m wbia TestResults.embed_testres
Example:
>>> # SCRIPT
>>> from wbia.expt.test_result import * # NOQA
>>> from wbia.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')
>>> embed_testres(testres)
|
wbia/expt/test_result.py
|
embed_testres
|
WildMeOrg/wildbook-ia
|
python
|
def embed_testres(testres):
"\n CommandLine:\n python -m wbia TestResults.embed_testres\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')\n >>> embed_testres(testres)\n "
ut.embed()
|
def cols_disagree(mat, val):
"\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
sums = mat.sum(axis=1)
disagree_flags1d = np.logical_and((sums > 0), (sums < nCols))
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if (not val):
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
| -4,581,002,074,630,044,700
|
is_success = prop2_mat['is_success']
|
wbia/expt/test_result.py
|
cols_disagree
|
WildMeOrg/wildbook-ia
|
python
|
def cols_disagree(mat, val):
"\n \n "
nCols = mat.shape[1]
sums = mat.sum(axis=1)
disagree_flags1d = np.logical_and((sums > 0), (sums < nCols))
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if (not val):
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
|
def cfg_scoresep(mat, val, op):
"\n Compares scores between different configs\n\n op = operator.ge\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list]
offsets = np.arange(0, (nCols * len(mat)), step=nCols)
idx_list = ut.flatten([(colx + offset) for (colx, offset) in zip(colx_list, offsets)])
mask = vt.index_to_boolmask(idx_list, maxval=(offsets[(- 1)] + nCols))
flags = mask.reshape(mat.shape)
return flags
| -5,820,052,321,142,968,000
|
Compares scores between different configs
op = operator.ge
is_success = prop2_mat['is_success']
|
wbia/expt/test_result.py
|
cfg_scoresep
|
WildMeOrg/wildbook-ia
|
python
|
def cfg_scoresep(mat, val, op):
"\n Compares scores between different configs\n\n op = operator.ge\n is_success = prop2_mat['is_success']\n "
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list]
offsets = np.arange(0, (nCols * len(mat)), step=nCols)
idx_list = ut.flatten([(colx + offset) for (colx, offset) in zip(colx_list, offsets)])
mask = vt.index_to_boolmask(idx_list, maxval=(offsets[(- 1)] + nCols))
flags = mask.reshape(mat.shape)
return flags
|
@pytest.fixture
def response():
'Sample pytest fixture.\n\n See more at: http://doc.pytest.org/en/latest/fixture.html\n '
import requests
return requests.get('https://github.com/torvalds/linux')
| 8,155,420,939,485,564,000
|
Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
|
tests/test_SEIR.py
|
response
|
sellisd/seir
|
python
|
@pytest.fixture
def response():
'Sample pytest fixture.\n\n See more at: http://doc.pytest.org/en/latest/fixture.html\n '
import requests
return requests.get('https://github.com/torvalds/linux')
|
def test_content(response):
'Sample pytest test function with the pytest fixture as an argument.'
| -9,075,191,156,716,607,000
|
Sample pytest test function with the pytest fixture as an argument.
|
tests/test_SEIR.py
|
test_content
|
sellisd/seir
|
python
|
def test_content(response):
|
def test_command_line_interface():
'Test the CLI.'
runner = CliRunner()
help_result = runner.invoke(cli.main, ['--help'])
assert (help_result.exit_code == 0)
assert ('Show this message and exit.' in help_result.output)
| 3,112,977,249,146,923,500
|
Test the CLI.
|
tests/test_SEIR.py
|
test_command_line_interface
|
sellisd/seir
|
python
|
def test_command_line_interface():
runner = CliRunner()
help_result = runner.invoke(cli.main, ['--help'])
assert (help_result.exit_code == 0)
assert ('Show this message and exit.' in help_result.output)
|
def image_path_at(self, i):
'\n Return the absolute path to image i in the image sequence.\n '
return self.image_path_from_index(self._image_index[i])
| -1,883,448,437,578,965,500
|
Return the absolute path to image i in the image sequence.
|
faster_rcnn/datasets/pascal_voc2.py
|
image_path_at
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def image_path_at(self, i):
'\n \n '
return self.image_path_from_index(self._image_index[i])
|
def image_path_from_index(self, index):
'\n Construct an image path from the image\'s "index" identifier.\n '
image_path = os.path.join(self._data_path, 'JPEGImages', (index + self._image_ext))
assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)
return image_path
| 6,881,484,836,329,622,000
|
Construct an image path from the image's "index" identifier.
|
faster_rcnn/datasets/pascal_voc2.py
|
image_path_from_index
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def image_path_from_index(self, index):
'\n Construct an image path from the image\'s "index" identifier.\n '
image_path = os.path.join(self._data_path, 'JPEGImages', (index + self._image_ext))
assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)
return image_path
|
def _load_image_set_index(self):
"\n Load the indexes listed in this dataset's image set file.\n "
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', (self._image_set + '.txt'))
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
| -9,143,730,377,540,284,000
|
Load the indexes listed in this dataset's image set file.
|
faster_rcnn/datasets/pascal_voc2.py
|
_load_image_set_index
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def _load_image_set_index(self):
"\n \n "
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', (self._image_set + '.txt'))
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
|
def _get_default_path(self):
'\n Return the default path where PASCAL VOC is expected to be installed.\n '
return os.path.join(ROOT_DIR, 'data', 'PASCAL')
| -5,326,098,657,779,618,000
|
Return the default path where PASCAL VOC is expected to be installed.
|
faster_rcnn/datasets/pascal_voc2.py
|
_get_default_path
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def _get_default_path(self):
'\n \n '
return os.path.join(ROOT_DIR, 'data', 'PASCAL')
|
def gt_roidb(self):
'\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (self.name + '_gt_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_subcategory_exemplar_annotation(index) for index in self.image_index]
if cfg.IS_RPN:
for i in range(1, self.num_classes):
print('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print('{}: Recall {:f}'.format(self.classes[i], (float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
| 2,501,634,246,087,732,700
|
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
|
faster_rcnn/datasets/pascal_voc2.py
|
gt_roidb
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def gt_roidb(self):
'\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (self.name + '_gt_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_subcategory_exemplar_annotation(index) for index in self.image_index]
if cfg.IS_RPN:
for i in range(1, self.num_classes):
print('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print('{}: Recall {:f}'.format(self.classes[i], (float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
|
def _load_pascal_annotation(self, index):
'\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n '
filename = os.path.join(self._data_path, 'Annotations', (index + '.xml'))
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for (ix, obj) in enumerate(objs):
x1 = (float(get_data_from_tag(obj, 'xmin')) - 1)
y1 = (float(get_data_from_tag(obj, 'ymin')) - 1)
x2 = (float(get_data_from_tag(obj, 'xmax')) - 1)
y2 = (float(get_data_from_tag(obj, 'ymax')) - 1)
cls = self._class_to_ind[str(get_data_from_tag(obj, 'name')).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[(ix, cls)] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros(num_objs, dtype=np.int32)
gt_subclasses_flipped = np.zeros(num_objs, dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, (boxes * scale)))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
(boxes_grid, _, _) = get_boxes_grid(image_height, image_width)
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
if (num_objs != 0):
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[index_covered] == i))[0])
else:
assert (len(cfg.TRAIN.SCALES_BASE) == 1)
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
anchors = generate_anchors()
num_anchors = anchors.shape[0]
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
height = np.round(((((image_height * scale) - 1) / 4.0) + 1))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
width = np.round(((((image_width * scale) - 1) / 4.0) + 1))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
gt_boxes = (boxes * scale)
shift_x = (np.arange(0, width) * feat_stride)
shift_y = (np.arange(0, height) * feat_stride)
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape(((K * A), 4))
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
if (num_objs != 0):
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[fg_inds] == i))[0])
return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_subclasses': gt_subclasses, 'gt_subclasses_flipped': gt_subclasses_flipped, 'gt_overlaps': overlaps, 'gt_subindexes': subindexes, 'gt_subindexes_flipped': subindexes_flipped, 'flipped': False}
| -1,646,110,813,317,172,000
|
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
|
faster_rcnn/datasets/pascal_voc2.py
|
_load_pascal_annotation
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def _load_pascal_annotation(self, index):
'\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n '
filename = os.path.join(self._data_path, 'Annotations', (index + '.xml'))
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for (ix, obj) in enumerate(objs):
x1 = (float(get_data_from_tag(obj, 'xmin')) - 1)
y1 = (float(get_data_from_tag(obj, 'ymin')) - 1)
x2 = (float(get_data_from_tag(obj, 'xmax')) - 1)
y2 = (float(get_data_from_tag(obj, 'ymax')) - 1)
cls = self._class_to_ind[str(get_data_from_tag(obj, 'name')).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[(ix, cls)] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros(num_objs, dtype=np.int32)
gt_subclasses_flipped = np.zeros(num_objs, dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, (boxes * scale)))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
(boxes_grid, _, _) = get_boxes_grid(image_height, image_width)
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
if (num_objs != 0):
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[index_covered] == i))[0])
else:
assert (len(cfg.TRAIN.SCALES_BASE) == 1)
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
anchors = generate_anchors()
num_anchors = anchors.shape[0]
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
height = np.round(((((image_height * scale) - 1) / 4.0) + 1))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
width = np.round(((((image_width * scale) - 1) / 4.0) + 1))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
gt_boxes = (boxes * scale)
shift_x = (np.arange(0, width) * feat_stride)
shift_y = (np.arange(0, height) * feat_stride)
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape(((K * A), 4))
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
if (num_objs != 0):
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[fg_inds] == i))[0])
return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_subclasses': gt_subclasses, 'gt_subclasses_flipped': gt_subclasses_flipped, 'gt_overlaps': overlaps, 'gt_subindexes': subindexes, 'gt_subindexes_flipped': subindexes_flipped, 'flipped': False}
|
def _load_pascal_subcategory_exemplar_annotation(self, index):
'\n Load image and bounding boxes info from txt file in the pascal subcategory exemplar format.\n '
if (self._image_set == 'test'):
return self._load_pascal_annotation(index)
filename = os.path.join(self._pascal_path, 'subcategory_exemplars', (index + '.txt'))
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if (subcls != (- 1)):
if (is_flip == 0):
lines.append(line)
else:
lines_flipped.append(line)
num_objs = len(lines)
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros(num_objs, dtype=np.int32)
for (ix, line) in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros(num_objs, dtype=np.int32)
gt_subclasses = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for (ix, line) in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [(float(n) - 1) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[(ix, cls)] = 1.0
subindexes[(ix, cls)] = subcls
subindexes_flipped[(ix, cls)] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, (boxes * scale)))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
(boxes_grid, _, _) = get_boxes_grid(image_height, image_width)
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
if (num_objs != 0):
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[index_covered] == i))[0])
else:
assert (len(cfg.TRAIN.SCALES_BASE) == 1)
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = (2 ** np.arange(1, 6, 0.5))
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
height = np.round(((((image_height * scale) - 1) / 4.0) + 1))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
width = np.round(((((image_width * scale) - 1) / 4.0) + 1))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
gt_boxes = (boxes * scale)
shift_x = (np.arange(0, width) * feat_stride)
shift_y = (np.arange(0, height) * feat_stride)
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape(((K * A), 4))
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
if (num_objs != 0):
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[fg_inds] == i))[0])
return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_subclasses': gt_subclasses, 'gt_subclasses_flipped': gt_subclasses_flipped, 'gt_overlaps': overlaps, 'gt_subindexes': subindexes, 'gt_subindexes_flipped': subindexes_flipped, 'flipped': False}
| 2,134,446,988,790,855,400
|
Load image and bounding boxes info from txt file in the pascal subcategory exemplar format.
|
faster_rcnn/datasets/pascal_voc2.py
|
_load_pascal_subcategory_exemplar_annotation
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def _load_pascal_subcategory_exemplar_annotation(self, index):
'\n \n '
if (self._image_set == 'test'):
return self._load_pascal_annotation(index)
filename = os.path.join(self._pascal_path, 'subcategory_exemplars', (index + '.txt'))
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if (subcls != (- 1)):
if (is_flip == 0):
lines.append(line)
else:
lines_flipped.append(line)
num_objs = len(lines)
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros(num_objs, dtype=np.int32)
for (ix, line) in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros(num_objs, dtype=np.int32)
gt_subclasses = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for (ix, line) in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [(float(n) - 1) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[(ix, cls)] = 1.0
subindexes[(ix, cls)] = subcls
subindexes_flipped[(ix, cls)] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, (boxes * scale)))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
(boxes_grid, _, _) = get_boxes_grid(image_height, image_width)
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
if (num_objs != 0):
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[index_covered] == i))[0])
else:
assert (len(cfg.TRAIN.SCALES_BASE) == 1)
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = (2 ** np.arange(1, 6, 0.5))
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
height = np.round(((((image_height * scale) - 1) / 4.0) + 1))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
height = np.floor(((((height - 1) / 2) + 1) + 0.5))
width = np.round(((((image_width * scale) - 1) / 4.0) + 1))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
width = np.floor(((((width - 1) / 2.0) + 1) + 0.5))
gt_boxes = (boxes * scale)
shift_x = (np.arange(0, width) * feat_stride)
shift_y = (np.arange(0, height) * feat_stride)
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape(((K * A), 4))
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
if (num_objs != 0):
max_overlaps = overlaps_grid.max(axis=0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where(((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[(k - 1)])))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where((gt_classes == i))[0])
self._num_boxes_covered[i] += len(np.where((gt_classes[fg_inds] == i))[0])
return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_subclasses': gt_subclasses, 'gt_subclasses_flipped': gt_subclasses_flipped, 'gt_overlaps': overlaps, 'gt_subindexes': subindexes, 'gt_subindexes_flipped': subindexes_flipped, 'flipped': False}
|
def region_proposal_roidb(self):
'\n Return the database of regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (((self.name + '_') + cfg.REGION_PROPOSAL) + '_region_proposal_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
if (self._image_set != 'test'):
gt_roidb = self.gt_roidb()
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print('Region proposal network boxes loaded')
roidb = imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
roidb = self._load_rpn_roidb(None, model)
print('Region proposal network boxes loaded')
print('{} region proposals per image'.format((self._num_boxes_proposal / len(self.image_index))))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote roidb to {}'.format(cache_file))
return roidb
| -1,463,459,393,962,246,000
|
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
|
faster_rcnn/datasets/pascal_voc2.py
|
region_proposal_roidb
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def region_proposal_roidb(self):
'\n Return the database of regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (((self.name + '_') + cfg.REGION_PROPOSAL) + '_region_proposal_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
if (self._image_set != 'test'):
gt_roidb = self.gt_roidb()
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print('Region proposal network boxes loaded')
roidb = imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL
roidb = self._load_rpn_roidb(None, model)
print('Region proposal network boxes loaded')
print('{} region proposals per image'.format((self._num_boxes_proposal / len(self.image_index))))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote roidb to {}'.format(cache_file))
return roidb
|
def selective_search_roidb(self):
'\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (self.name + '_selective_search_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if ((int(self._year) == 2007) or (self._image_set != 'test')):
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
| 8,759,385,287,031,027,000
|
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
|
faster_rcnn/datasets/pascal_voc2.py
|
selective_search_roidb
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def selective_search_roidb(self):
'\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, (self.name + '_selective_search_roidb.pkl'))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if ((int(self._year) == 2007) or (self._image_set != 'test')):
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
|
def selective_search_IJCV_roidb(self):
'\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, '{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
| 8,391,850,426,552,893,000
|
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
|
faster_rcnn/datasets/pascal_voc2.py
|
selective_search_IJCV_roidb
|
zjjszj/PS_DM_mydetector_faster_rcnn_pytorch
|
python
|
def selective_search_IJCV_roidb(self):
'\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n '
cache_file = os.path.join(self.cache_path, '{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
|
def run(args: 'argparse.Namespace', name: str, runtime_cls, envs: Dict[(str, str)], is_started: Union[('multiprocessing.Event', 'threading.Event')], is_shutdown: Union[('multiprocessing.Event', 'threading.Event')], is_ready: Union[('multiprocessing.Event', 'threading.Event')], cancel_event: Union[('multiprocessing.Event', 'threading.Event')]):
"Method representing the :class:`BaseRuntime` activity.\n\n This method is the target for the Pea's `thread` or `process`\n\n .. note::\n :meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process.\n Hence, please do not raise any exception here.\n\n .. note::\n Please note that env variables are process-specific. Subprocess inherits envs from\n the main process. But Subprocess's envs do NOT affect the main process. It does NOT\n mess up user local system envs.\n\n .. warning::\n If you are using ``thread`` as backend, envs setting will likely be overidden by others\n\n :param args: namespace args from the Pea\n :param name: name of the Pea to have proper logging\n :param runtime_cls: the runtime class to instantiate\n :param envs: a dictionary of environment variables to be set in the new Process\n :param is_started: concurrency event to communicate runtime is properly started. Used for better logging\n :param is_shutdown: concurrency event to communicate runtime is terminated\n :param is_ready: concurrency event to communicate runtime is ready to receive messages\n :param cancel_event: concurrency event to receive cancelling signal from the Pea. Needed by some runtimes\n "
logger = JinaLogger(name, **vars(args))
def _unset_envs():
if (envs and (args.runtime_backend != RuntimeBackendType.THREAD)):
for k in envs.keys():
os.unsetenv(k)
def _set_envs():
if args.env:
if (args.runtime_backend == RuntimeBackendType.THREAD):
logger.warning('environment variables should not be set when runtime="thread".')
else:
os.environ.update({k: str(v) for (k, v) in envs.items()})
try:
_set_envs()
runtime = runtime_cls(args=args, cancel_event=cancel_event)
except Exception as ex:
logger.error(((f'{ex!r} during {runtime_cls!r} initialization' + f'''
add "--quiet-error" to suppress the exception details''') if (not args.quiet_error) else ''), exc_info=(not args.quiet_error))
else:
is_started.set()
with runtime:
is_ready.set()
runtime.run_forever()
finally:
_unset_envs()
is_shutdown.set()
| 8,911,172,289,690,000,000
|
Method representing the :class:`BaseRuntime` activity.
This method is the target for the Pea's `thread` or `process`
.. note::
:meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process.
Hence, please do not raise any exception here.
.. note::
Please note that env variables are process-specific. Subprocess inherits envs from
the main process. But Subprocess's envs do NOT affect the main process. It does NOT
mess up user local system envs.
.. warning::
If you are using ``thread`` as backend, envs setting will likely be overidden by others
:param args: namespace args from the Pea
:param name: name of the Pea to have proper logging
:param runtime_cls: the runtime class to instantiate
:param envs: a dictionary of environment variables to be set in the new Process
:param is_started: concurrency event to communicate runtime is properly started. Used for better logging
:param is_shutdown: concurrency event to communicate runtime is terminated
:param is_ready: concurrency event to communicate runtime is ready to receive messages
:param cancel_event: concurrency event to receive cancelling signal from the Pea. Needed by some runtimes
|
jina/peapods/peas/__init__.py
|
run
|
MaxielMrvaljevic/jina
|
python
|
def run(args: 'argparse.Namespace', name: str, runtime_cls, envs: Dict[(str, str)], is_started: Union[('multiprocessing.Event', 'threading.Event')], is_shutdown: Union[('multiprocessing.Event', 'threading.Event')], is_ready: Union[('multiprocessing.Event', 'threading.Event')], cancel_event: Union[('multiprocessing.Event', 'threading.Event')]):
"Method representing the :class:`BaseRuntime` activity.\n\n This method is the target for the Pea's `thread` or `process`\n\n .. note::\n :meth:`run` is running in subprocess/thread, the exception can not be propagated to the main process.\n Hence, please do not raise any exception here.\n\n .. note::\n Please note that env variables are process-specific. Subprocess inherits envs from\n the main process. But Subprocess's envs do NOT affect the main process. It does NOT\n mess up user local system envs.\n\n .. warning::\n If you are using ``thread`` as backend, envs setting will likely be overidden by others\n\n :param args: namespace args from the Pea\n :param name: name of the Pea to have proper logging\n :param runtime_cls: the runtime class to instantiate\n :param envs: a dictionary of environment variables to be set in the new Process\n :param is_started: concurrency event to communicate runtime is properly started. Used for better logging\n :param is_shutdown: concurrency event to communicate runtime is terminated\n :param is_ready: concurrency event to communicate runtime is ready to receive messages\n :param cancel_event: concurrency event to receive cancelling signal from the Pea. Needed by some runtimes\n "
logger = JinaLogger(name, **vars(args))
def _unset_envs():
if (envs and (args.runtime_backend != RuntimeBackendType.THREAD)):
for k in envs.keys():
os.unsetenv(k)
def _set_envs():
if args.env:
if (args.runtime_backend == RuntimeBackendType.THREAD):
logger.warning('environment variables should not be set when runtime="thread".')
else:
os.environ.update({k: str(v) for (k, v) in envs.items()})
try:
_set_envs()
runtime = runtime_cls(args=args, cancel_event=cancel_event)
except Exception as ex:
logger.error(((f'{ex!r} during {runtime_cls!r} initialization' + f'
add "--quiet-error" to suppress the exception details') if (not args.quiet_error) else ), exc_info=(not args.quiet_error))
else:
is_started.set()
with runtime:
is_ready.set()
runtime.run_forever()
finally:
_unset_envs()
is_shutdown.set()
|
def _set_ctrl_adrr(self):
'Sets control address for different runtimes'
self.runtime_ctrl_address = self.runtime_cls.get_control_address(host=self.args.host, port=self.args.port_ctrl, docker_kwargs=getattr(self.args, 'docker_kwargs', None))
if (not self.runtime_ctrl_address):
self.runtime_ctrl_address = f'{self.args.host}:{self.args.port_in}'
| 5,640,064,089,109,753,000
|
Sets control address for different runtimes
|
jina/peapods/peas/__init__.py
|
_set_ctrl_adrr
|
MaxielMrvaljevic/jina
|
python
|
def _set_ctrl_adrr(self):
self.runtime_ctrl_address = self.runtime_cls.get_control_address(host=self.args.host, port=self.args.port_ctrl, docker_kwargs=getattr(self.args, 'docker_kwargs', None))
if (not self.runtime_ctrl_address):
self.runtime_ctrl_address = f'{self.args.host}:{self.args.port_in}'
|
def start(self):
'Start the Pea.\n This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n .. #noqa: DAR201\n '
self.worker.start()
if (not self.args.noblock_on_start):
self.wait_start_success()
return self
| -4,257,458,394,315,266,000
|
Start the Pea.
This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
.. #noqa: DAR201
|
jina/peapods/peas/__init__.py
|
start
|
MaxielMrvaljevic/jina
|
python
|
def start(self):
'Start the Pea.\n This method calls :meth:`start` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n .. #noqa: DAR201\n '
self.worker.start()
if (not self.args.noblock_on_start):
self.wait_start_success()
return self
|
def join(self, *args, **kwargs):
'Joins the Pea.\n This method calls :meth:`join` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n\n :param args: extra positional arguments to pass to join\n :param kwargs: extra keyword arguments to pass to join\n '
self.worker.join(*args, **kwargs)
| -4,591,021,977,245,066,000
|
Joins the Pea.
This method calls :meth:`join` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
:param args: extra positional arguments to pass to join
:param kwargs: extra keyword arguments to pass to join
|
jina/peapods/peas/__init__.py
|
join
|
MaxielMrvaljevic/jina
|
python
|
def join(self, *args, **kwargs):
'Joins the Pea.\n This method calls :meth:`join` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n\n :param args: extra positional arguments to pass to join\n :param kwargs: extra keyword arguments to pass to join\n '
self.worker.join(*args, **kwargs)
|
def terminate(self):
'Terminate the Pea.\n This method calls :meth:`terminate` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n '
if hasattr(self.worker, 'terminate'):
self.worker.terminate()
| -1,042,618,979,188,756,700
|
Terminate the Pea.
This method calls :meth:`terminate` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.
|
jina/peapods/peas/__init__.py
|
terminate
|
MaxielMrvaljevic/jina
|
python
|
def terminate(self):
'Terminate the Pea.\n This method calls :meth:`terminate` in :class:`threading.Thread` or :class:`multiprocesssing.Process`.\n '
if hasattr(self.worker, 'terminate'):
self.worker.terminate()
|
def activate_runtime(self):
' Send activate control message. '
self.runtime_cls.activate(logger=self.logger, socket_in_type=self.args.socket_in, control_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl)
| -2,365,994,081,478,579,000
|
Send activate control message.
|
jina/peapods/peas/__init__.py
|
activate_runtime
|
MaxielMrvaljevic/jina
|
python
|
def activate_runtime(self):
' '
self.runtime_cls.activate(logger=self.logger, socket_in_type=self.args.socket_in, control_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl)
|
def _cancel_runtime(self, skip_deactivate: bool=False):
'\n Send terminate control message.\n\n :param skip_deactivate: Mark that the DEACTIVATE signal may be missed if set to True\n '
self.runtime_cls.cancel(cancel_event=self.cancel_event, logger=self.logger, socket_in_type=self.args.socket_in, control_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl, skip_deactivate=skip_deactivate)
| -7,147,230,928,784,672,000
|
Send terminate control message.
:param skip_deactivate: Mark that the DEACTIVATE signal may be missed if set to True
|
jina/peapods/peas/__init__.py
|
_cancel_runtime
|
MaxielMrvaljevic/jina
|
python
|
def _cancel_runtime(self, skip_deactivate: bool=False):
'\n Send terminate control message.\n\n :param skip_deactivate: Mark that the DEACTIVATE signal may be missed if set to True\n '
self.runtime_cls.cancel(cancel_event=self.cancel_event, logger=self.logger, socket_in_type=self.args.socket_in, control_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl, skip_deactivate=skip_deactivate)
|
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
'\n Waits for the process to be ready or to know it has failed.\n\n :param timeout: The time to wait before readiness or failure is determined\n .. # noqa: DAR201\n '
return self.runtime_cls.wait_for_ready_or_shutdown(timeout=timeout, ready_or_shutdown_event=self.ready_or_shutdown.event, ctrl_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl, shutdown_event=self.is_shutdown)
| -6,739,447,112,481,603,000
|
Waits for the process to be ready or to know it has failed.
:param timeout: The time to wait before readiness or failure is determined
.. # noqa: DAR201
|
jina/peapods/peas/__init__.py
|
_wait_for_ready_or_shutdown
|
MaxielMrvaljevic/jina
|
python
|
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
'\n Waits for the process to be ready or to know it has failed.\n\n :param timeout: The time to wait before readiness or failure is determined\n .. # noqa: DAR201\n '
return self.runtime_cls.wait_for_ready_or_shutdown(timeout=timeout, ready_or_shutdown_event=self.ready_or_shutdown.event, ctrl_address=self.runtime_ctrl_address, timeout_ctrl=self._timeout_ctrl, shutdown_event=self.is_shutdown)
|
def wait_start_success(self):
'Block until all peas starts successfully.\n\n If not success, it will raise an error hoping the outer function to catch it\n '
_timeout = self.args.timeout_ready
if (_timeout <= 0):
_timeout = None
else:
_timeout /= 1000.0
if self._wait_for_ready_or_shutdown(_timeout):
if self.is_shutdown.is_set():
if (not self.is_started.is_set()):
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
else:
self.logger.success(__ready_msg__)
else:
_timeout = (_timeout or (- 1))
self.logger.warning(f'{self.runtime_cls!r} timeout after waiting for {self.args.timeout_ready}ms, if your executor takes time to load, you may increase --timeout-ready')
self.close()
raise TimeoutError(f'{typename(self)}:{self.name} can not be initialized after {(_timeout * 1000.0)}ms')
| 5,697,993,069,352,098,000
|
Block until all peas starts successfully.
If not success, it will raise an error hoping the outer function to catch it
|
jina/peapods/peas/__init__.py
|
wait_start_success
|
MaxielMrvaljevic/jina
|
python
|
def wait_start_success(self):
'Block until all peas starts successfully.\n\n If not success, it will raise an error hoping the outer function to catch it\n '
_timeout = self.args.timeout_ready
if (_timeout <= 0):
_timeout = None
else:
_timeout /= 1000.0
if self._wait_for_ready_or_shutdown(_timeout):
if self.is_shutdown.is_set():
if (not self.is_started.is_set()):
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
else:
self.logger.success(__ready_msg__)
else:
_timeout = (_timeout or (- 1))
self.logger.warning(f'{self.runtime_cls!r} timeout after waiting for {self.args.timeout_ready}ms, if your executor takes time to load, you may increase --timeout-ready')
self.close()
raise TimeoutError(f'{typename(self)}:{self.name} can not be initialized after {(_timeout * 1000.0)}ms')
|
@property
def _is_dealer(self):
'Return true if this `Pea` must act as a Dealer responding to a Router\n .. # noqa: DAR201\n '
return (self.args.socket_in == SocketType.DEALER_CONNECT)
| 7,485,969,000,715,377,000
|
Return true if this `Pea` must act as a Dealer responding to a Router
.. # noqa: DAR201
|
jina/peapods/peas/__init__.py
|
_is_dealer
|
MaxielMrvaljevic/jina
|
python
|
@property
def _is_dealer(self):
'Return true if this `Pea` must act as a Dealer responding to a Router\n .. # noqa: DAR201\n '
return (self.args.socket_in == SocketType.DEALER_CONNECT)
|
def close(self) -> None:
'Close the Pea\n\n This method makes sure that the `Process/thread` is properly finished and its resources properly released\n '
self.logger.debug('waiting for ready or shutdown signal from runtime')
if (self.is_ready.is_set() and (not self.is_shutdown.is_set())):
try:
self._cancel_runtime()
if (not self.is_shutdown.wait(timeout=self._timeout_ctrl)):
self.terminate()
time.sleep(0.1)
raise Exception(f'Shutdown signal was not received for {self._timeout_ctrl}')
except Exception as ex:
self.logger.error(((f'{ex!r} during {self.close!r}' + f'''
add "--quiet-error" to suppress the exception details''') if (not self.args.quiet_error) else ''), exc_info=(not self.args.quiet_error))
if (not self.args.daemon):
self.join()
elif self.is_shutdown.is_set():
pass
else:
self.logger.warning('Pea is being closed before being ready. Most likely some other Pea in the Flow or Pod failed to start')
_timeout = self.args.timeout_ready
if (_timeout <= 0):
_timeout = None
else:
_timeout /= 1000.0
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self._wait_for_ready_or_shutdown(_timeout):
if (not self.is_shutdown.is_set()):
self._cancel_runtime(skip_deactivate=True)
if (not self.is_shutdown.wait(timeout=self._timeout_ctrl)):
self.terminate()
time.sleep(0.1)
raise Exception(f'Shutdown signal was not received for {self._timeout_ctrl}')
else:
self.logger.warning('Terminating process after waiting for readiness signal for graceful shutdown')
self.terminate()
time.sleep(0.1)
self.logger.debug(__stop_msg__)
self.logger.close()
| 7,289,301,697,291,670,000
|
Close the Pea
This method makes sure that the `Process/thread` is properly finished and its resources properly released
|
jina/peapods/peas/__init__.py
|
close
|
MaxielMrvaljevic/jina
|
python
|
def close(self) -> None:
'Close the Pea\n\n This method makes sure that the `Process/thread` is properly finished and its resources properly released\n '
self.logger.debug('waiting for ready or shutdown signal from runtime')
if (self.is_ready.is_set() and (not self.is_shutdown.is_set())):
try:
self._cancel_runtime()
if (not self.is_shutdown.wait(timeout=self._timeout_ctrl)):
self.terminate()
time.sleep(0.1)
raise Exception(f'Shutdown signal was not received for {self._timeout_ctrl}')
except Exception as ex:
self.logger.error(((f'{ex!r} during {self.close!r}' + f'
add "--quiet-error" to suppress the exception details') if (not self.args.quiet_error) else ), exc_info=(not self.args.quiet_error))
if (not self.args.daemon):
self.join()
elif self.is_shutdown.is_set():
pass
else:
self.logger.warning('Pea is being closed before being ready. Most likely some other Pea in the Flow or Pod failed to start')
_timeout = self.args.timeout_ready
if (_timeout <= 0):
_timeout = None
else:
_timeout /= 1000.0
self.logger.debug('waiting for ready or shutdown signal from runtime')
if self._wait_for_ready_or_shutdown(_timeout):
if (not self.is_shutdown.is_set()):
self._cancel_runtime(skip_deactivate=True)
if (not self.is_shutdown.wait(timeout=self._timeout_ctrl)):
self.terminate()
time.sleep(0.1)
raise Exception(f'Shutdown signal was not received for {self._timeout_ctrl}')
else:
self.logger.warning('Terminating process after waiting for readiness signal for graceful shutdown')
self.terminate()
time.sleep(0.1)
self.logger.debug(__stop_msg__)
self.logger.close()
|
@property
def role(self) -> 'PeaRoleType':
'Get the role of this pea in a pod\n\n\n .. #noqa: DAR201'
return self.args.pea_role
| -7,939,200,317,559,389,000
|
Get the role of this pea in a pod
.. #noqa: DAR201
|
jina/peapods/peas/__init__.py
|
role
|
MaxielMrvaljevic/jina
|
python
|
@property
def role(self) -> 'PeaRoleType':
'Get the role of this pea in a pod\n\n\n .. #noqa: DAR201'
return self.args.pea_role
|
@property
def _is_inner_pea(self) -> bool:
'Determine whether this is a inner pea or a head/tail\n\n\n .. #noqa: DAR201'
return ((self.role is PeaRoleType.SINGLETON) or (self.role is PeaRoleType.PARALLEL))
| 8,180,830,302,830,605,000
|
Determine whether this is a inner pea or a head/tail
.. #noqa: DAR201
|
jina/peapods/peas/__init__.py
|
_is_inner_pea
|
MaxielMrvaljevic/jina
|
python
|
@property
def _is_inner_pea(self) -> bool:
'Determine whether this is a inner pea or a head/tail\n\n\n .. #noqa: DAR201'
return ((self.role is PeaRoleType.SINGLETON) or (self.role is PeaRoleType.PARALLEL))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.