blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76bbd2810770224bb57dde28bea1edec6834da43
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/securityinsights/v20230501preview/_inputs.py
|
6df00df97bf4b518a34a280590df448e24502f58
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 18,851
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AzureDevOpsResourceInfoArgs',
'ContentPathMapArgs',
'DeploymentInfoArgs',
'DeploymentArgs',
'GitHubResourceInfoArgs',
'RepositoryResourceInfoArgs',
'RepositoryArgs',
'WebhookArgs',
]
@pulumi.input_type
class AzureDevOpsResourceInfoArgs:
def __init__(__self__, *,
pipeline_id: Optional[pulumi.Input[str]] = None,
service_connection_id: Optional[pulumi.Input[str]] = None):
"""
Resources created in Azure DevOps repository.
:param pulumi.Input[str] pipeline_id: Id of the pipeline created for the source-control.
:param pulumi.Input[str] service_connection_id: Id of the service-connection created for the source-control.
"""
if pipeline_id is not None:
pulumi.set(__self__, "pipeline_id", pipeline_id)
if service_connection_id is not None:
pulumi.set(__self__, "service_connection_id", service_connection_id)
@property
@pulumi.getter(name="pipelineId")
def pipeline_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the pipeline created for the source-control.
"""
return pulumi.get(self, "pipeline_id")
@pipeline_id.setter
def pipeline_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_id", value)
@property
@pulumi.getter(name="serviceConnectionId")
def service_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the service-connection created for the source-control.
"""
return pulumi.get(self, "service_connection_id")
@service_connection_id.setter
def service_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_connection_id", value)
@pulumi.input_type
class ContentPathMapArgs:
def __init__(__self__, *,
content_type: Optional[pulumi.Input[Union[str, 'ContentType']]] = None,
path: Optional[pulumi.Input[str]] = None):
"""
The mapping of content type to a repo path.
:param pulumi.Input[Union[str, 'ContentType']] content_type: Content type.
:param pulumi.Input[str] path: The path to the content.
"""
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[Union[str, 'ContentType']]]:
"""
Content type.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[Union[str, 'ContentType']]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path to the content.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@pulumi.input_type
class DeploymentInfoArgs:
def __init__(__self__, *,
deployment: Optional[pulumi.Input['DeploymentArgs']] = None,
deployment_fetch_status: Optional[pulumi.Input[Union[str, 'DeploymentFetchStatus']]] = None,
message: Optional[pulumi.Input[str]] = None):
"""
Information regarding a deployment.
:param pulumi.Input['DeploymentArgs'] deployment: Deployment information.
:param pulumi.Input[Union[str, 'DeploymentFetchStatus']] deployment_fetch_status: Status while fetching the last deployment.
:param pulumi.Input[str] message: Additional details about the deployment that can be shown to the user.
"""
if deployment is not None:
pulumi.set(__self__, "deployment", deployment)
if deployment_fetch_status is not None:
pulumi.set(__self__, "deployment_fetch_status", deployment_fetch_status)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter
def deployment(self) -> Optional[pulumi.Input['DeploymentArgs']]:
"""
Deployment information.
"""
return pulumi.get(self, "deployment")
@deployment.setter
def deployment(self, value: Optional[pulumi.Input['DeploymentArgs']]):
pulumi.set(self, "deployment", value)
@property
@pulumi.getter(name="deploymentFetchStatus")
def deployment_fetch_status(self) -> Optional[pulumi.Input[Union[str, 'DeploymentFetchStatus']]]:
"""
Status while fetching the last deployment.
"""
return pulumi.get(self, "deployment_fetch_status")
@deployment_fetch_status.setter
def deployment_fetch_status(self, value: Optional[pulumi.Input[Union[str, 'DeploymentFetchStatus']]]):
pulumi.set(self, "deployment_fetch_status", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Additional details about the deployment that can be shown to the user.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@pulumi.input_type
class DeploymentArgs:
def __init__(__self__, *,
deployment_id: Optional[pulumi.Input[str]] = None,
deployment_logs_url: Optional[pulumi.Input[str]] = None,
deployment_result: Optional[pulumi.Input[Union[str, 'DeploymentResult']]] = None,
deployment_state: Optional[pulumi.Input[Union[str, 'DeploymentState']]] = None,
deployment_time: Optional[pulumi.Input[str]] = None):
"""
Description about a deployment.
:param pulumi.Input[str] deployment_id: Deployment identifier.
:param pulumi.Input[str] deployment_logs_url: Url to access repository action logs.
:param pulumi.Input[Union[str, 'DeploymentResult']] deployment_result: The outcome of the deployment.
:param pulumi.Input[Union[str, 'DeploymentState']] deployment_state: Current status of the deployment.
:param pulumi.Input[str] deployment_time: The time when the deployment finished.
"""
if deployment_id is not None:
pulumi.set(__self__, "deployment_id", deployment_id)
if deployment_logs_url is not None:
pulumi.set(__self__, "deployment_logs_url", deployment_logs_url)
if deployment_result is not None:
pulumi.set(__self__, "deployment_result", deployment_result)
if deployment_state is not None:
pulumi.set(__self__, "deployment_state", deployment_state)
if deployment_time is not None:
pulumi.set(__self__, "deployment_time", deployment_time)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> Optional[pulumi.Input[str]]:
"""
Deployment identifier.
"""
return pulumi.get(self, "deployment_id")
@deployment_id.setter
def deployment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_id", value)
@property
@pulumi.getter(name="deploymentLogsUrl")
def deployment_logs_url(self) -> Optional[pulumi.Input[str]]:
"""
Url to access repository action logs.
"""
return pulumi.get(self, "deployment_logs_url")
@deployment_logs_url.setter
def deployment_logs_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_logs_url", value)
@property
@pulumi.getter(name="deploymentResult")
def deployment_result(self) -> Optional[pulumi.Input[Union[str, 'DeploymentResult']]]:
"""
The outcome of the deployment.
"""
return pulumi.get(self, "deployment_result")
@deployment_result.setter
def deployment_result(self, value: Optional[pulumi.Input[Union[str, 'DeploymentResult']]]):
pulumi.set(self, "deployment_result", value)
@property
@pulumi.getter(name="deploymentState")
def deployment_state(self) -> Optional[pulumi.Input[Union[str, 'DeploymentState']]]:
"""
Current status of the deployment.
"""
return pulumi.get(self, "deployment_state")
@deployment_state.setter
def deployment_state(self, value: Optional[pulumi.Input[Union[str, 'DeploymentState']]]):
pulumi.set(self, "deployment_state", value)
@property
@pulumi.getter(name="deploymentTime")
def deployment_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the deployment finished.
"""
return pulumi.get(self, "deployment_time")
@deployment_time.setter
def deployment_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_time", value)
@pulumi.input_type
class GitHubResourceInfoArgs:
def __init__(__self__, *,
app_installation_id: Optional[pulumi.Input[str]] = None):
"""
Resources created in GitHub repository.
:param pulumi.Input[str] app_installation_id: GitHub application installation id.
"""
if app_installation_id is not None:
pulumi.set(__self__, "app_installation_id", app_installation_id)
@property
@pulumi.getter(name="appInstallationId")
def app_installation_id(self) -> Optional[pulumi.Input[str]]:
"""
GitHub application installation id.
"""
return pulumi.get(self, "app_installation_id")
@app_installation_id.setter
def app_installation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_installation_id", value)
@pulumi.input_type
class RepositoryResourceInfoArgs:
def __init__(__self__, *,
azure_dev_ops_resource_info: Optional[pulumi.Input['AzureDevOpsResourceInfoArgs']] = None,
git_hub_resource_info: Optional[pulumi.Input['GitHubResourceInfoArgs']] = None,
webhook: Optional[pulumi.Input['WebhookArgs']] = None):
"""
Resources created in user's repository for the source-control.
:param pulumi.Input['AzureDevOpsResourceInfoArgs'] azure_dev_ops_resource_info: Resources created in Azure DevOps for this source-control.
:param pulumi.Input['GitHubResourceInfoArgs'] git_hub_resource_info: Resources created in GitHub for this source-control.
:param pulumi.Input['WebhookArgs'] webhook: The webhook object created for the source-control.
"""
if azure_dev_ops_resource_info is not None:
pulumi.set(__self__, "azure_dev_ops_resource_info", azure_dev_ops_resource_info)
if git_hub_resource_info is not None:
pulumi.set(__self__, "git_hub_resource_info", git_hub_resource_info)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter(name="azureDevOpsResourceInfo")
def azure_dev_ops_resource_info(self) -> Optional[pulumi.Input['AzureDevOpsResourceInfoArgs']]:
"""
Resources created in Azure DevOps for this source-control.
"""
return pulumi.get(self, "azure_dev_ops_resource_info")
@azure_dev_ops_resource_info.setter
def azure_dev_ops_resource_info(self, value: Optional[pulumi.Input['AzureDevOpsResourceInfoArgs']]):
pulumi.set(self, "azure_dev_ops_resource_info", value)
@property
@pulumi.getter(name="gitHubResourceInfo")
def git_hub_resource_info(self) -> Optional[pulumi.Input['GitHubResourceInfoArgs']]:
"""
Resources created in GitHub for this source-control.
"""
return pulumi.get(self, "git_hub_resource_info")
@git_hub_resource_info.setter
def git_hub_resource_info(self, value: Optional[pulumi.Input['GitHubResourceInfoArgs']]):
pulumi.set(self, "git_hub_resource_info", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input['WebhookArgs']]:
"""
The webhook object created for the source-control.
"""
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input['WebhookArgs']]):
pulumi.set(self, "webhook", value)
@pulumi.input_type
class RepositoryArgs:
def __init__(__self__, *,
branch: Optional[pulumi.Input[str]] = None,
deployment_logs_url: Optional[pulumi.Input[str]] = None,
display_url: Optional[pulumi.Input[str]] = None,
path_mapping: Optional[pulumi.Input[Sequence[pulumi.Input['ContentPathMapArgs']]]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
metadata of a repository.
:param pulumi.Input[str] branch: Branch name of repository.
:param pulumi.Input[str] deployment_logs_url: Url to access repository action logs.
:param pulumi.Input[str] display_url: Display url of repository.
:param pulumi.Input[Sequence[pulumi.Input['ContentPathMapArgs']]] path_mapping: Dictionary of source control content type and path mapping.
:param pulumi.Input[str] url: Url of repository.
"""
if branch is not None:
pulumi.set(__self__, "branch", branch)
if deployment_logs_url is not None:
pulumi.set(__self__, "deployment_logs_url", deployment_logs_url)
if display_url is not None:
pulumi.set(__self__, "display_url", display_url)
if path_mapping is not None:
pulumi.set(__self__, "path_mapping", path_mapping)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def branch(self) -> Optional[pulumi.Input[str]]:
"""
Branch name of repository.
"""
return pulumi.get(self, "branch")
@branch.setter
def branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch", value)
@property
@pulumi.getter(name="deploymentLogsUrl")
def deployment_logs_url(self) -> Optional[pulumi.Input[str]]:
"""
Url to access repository action logs.
"""
return pulumi.get(self, "deployment_logs_url")
@deployment_logs_url.setter
def deployment_logs_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_logs_url", value)
@property
@pulumi.getter(name="displayUrl")
def display_url(self) -> Optional[pulumi.Input[str]]:
"""
Display url of repository.
"""
return pulumi.get(self, "display_url")
@display_url.setter
def display_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_url", value)
@property
@pulumi.getter(name="pathMapping")
def path_mapping(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContentPathMapArgs']]]]:
"""
Dictionary of source control content type and path mapping.
"""
return pulumi.get(self, "path_mapping")
@path_mapping.setter
def path_mapping(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContentPathMapArgs']]]]):
pulumi.set(self, "path_mapping", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
Url of repository.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class WebhookArgs:
def __init__(__self__, *,
rotate_webhook_secret: Optional[pulumi.Input[bool]] = None,
webhook_id: Optional[pulumi.Input[str]] = None,
webhook_secret_update_time: Optional[pulumi.Input[str]] = None,
webhook_url: Optional[pulumi.Input[str]] = None):
"""
Detail about the webhook object.
:param pulumi.Input[bool] rotate_webhook_secret: A flag to instruct the backend service to rotate webhook secret.
:param pulumi.Input[str] webhook_id: Unique identifier for the webhook.
:param pulumi.Input[str] webhook_secret_update_time: Time when the webhook secret was updated.
:param pulumi.Input[str] webhook_url: URL that gets invoked by the webhook.
"""
if rotate_webhook_secret is not None:
pulumi.set(__self__, "rotate_webhook_secret", rotate_webhook_secret)
if webhook_id is not None:
pulumi.set(__self__, "webhook_id", webhook_id)
if webhook_secret_update_time is not None:
pulumi.set(__self__, "webhook_secret_update_time", webhook_secret_update_time)
if webhook_url is not None:
pulumi.set(__self__, "webhook_url", webhook_url)
@property
@pulumi.getter(name="rotateWebhookSecret")
def rotate_webhook_secret(self) -> Optional[pulumi.Input[bool]]:
"""
A flag to instruct the backend service to rotate webhook secret.
"""
return pulumi.get(self, "rotate_webhook_secret")
@rotate_webhook_secret.setter
def rotate_webhook_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "rotate_webhook_secret", value)
@property
@pulumi.getter(name="webhookId")
def webhook_id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier for the webhook.
"""
return pulumi.get(self, "webhook_id")
@webhook_id.setter
def webhook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook_id", value)
@property
@pulumi.getter(name="webhookSecretUpdateTime")
def webhook_secret_update_time(self) -> Optional[pulumi.Input[str]]:
"""
Time when the webhook secret was updated.
"""
return pulumi.get(self, "webhook_secret_update_time")
@webhook_secret_update_time.setter
def webhook_secret_update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook_secret_update_time", value)
@property
@pulumi.getter(name="webhookUrl")
def webhook_url(self) -> Optional[pulumi.Input[str]]:
"""
URL that gets invoked by the webhook.
"""
return pulumi.get(self, "webhook_url")
@webhook_url.setter
def webhook_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook_url", value)
|
3a3d38c81104e0ab5f20c0eac36904cd6bb7426b
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/rapt_ble/sensor.py
|
9967a36faeef2441bc29b87c523f0d9d8a29f7d7
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,696
|
py
|
sensor.py
|
"""Support for RAPT Pill hydrometers."""
from __future__ import annotations
from rapt_ble import DeviceClass, DeviceKey, SensorUpdate, Units
from homeassistant import config_entries
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothDataProcessor,
PassiveBluetoothDataUpdate,
PassiveBluetoothEntityKey,
PassiveBluetoothProcessorCoordinator,
PassiveBluetoothProcessorEntity,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
EntityCategory,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sensor import sensor_device_info_to_hass_device_info
from .const import DOMAIN
SENSOR_DESCRIPTIONS = {
(DeviceClass.TEMPERATURE, Units.TEMP_CELSIUS): SensorEntityDescription(
key=f"{DeviceClass.TEMPERATURE}_{Units.TEMP_CELSIUS}",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.SPECIFIC_GRAVITY, Units.SPECIFIC_GRAVITY): SensorEntityDescription(
key=f"{DeviceClass.SPECIFIC_GRAVITY}_{Units.SPECIFIC_GRAVITY}",
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.BATTERY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.BATTERY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
(
DeviceClass.SIGNAL_STRENGTH,
Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
): SensorEntityDescription(
key=f"{DeviceClass.SIGNAL_STRENGTH}_{Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
def _device_key_to_bluetooth_entity_key(
device_key: DeviceKey,
) -> PassiveBluetoothEntityKey:
"""Convert a device key to an entity key."""
return PassiveBluetoothEntityKey(device_key.key, device_key.device_id)
def sensor_update_to_bluetooth_data_update(
sensor_update: SensorUpdate,
) -> PassiveBluetoothDataUpdate:
"""Convert a sensor update to a bluetooth data update."""
return PassiveBluetoothDataUpdate(
devices={
device_id: sensor_device_info_to_hass_device_info(device_info)
for device_id, device_info in sensor_update.devices.items()
},
entity_descriptions={
_device_key_to_bluetooth_entity_key(device_key): SENSOR_DESCRIPTIONS[
(description.device_class, description.native_unit_of_measurement)
]
for device_key, description in sensor_update.entity_descriptions.items()
if description.device_class and description.native_unit_of_measurement
},
entity_data={
_device_key_to_bluetooth_entity_key(device_key): sensor_values.native_value
for device_key, sensor_values in sensor_update.entity_values.items()
},
entity_names={
_device_key_to_bluetooth_entity_key(device_key): sensor_values.name
for device_key, sensor_values in sensor_update.entity_values.items()
},
)
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the RAPT Pill BLE sensors."""
coordinator: PassiveBluetoothProcessorCoordinator = hass.data[DOMAIN][
entry.entry_id
]
processor = PassiveBluetoothDataProcessor(sensor_update_to_bluetooth_data_update)
entry.async_on_unload(
processor.async_add_entities_listener(
RAPTPillBluetoothSensorEntity, async_add_entities
)
)
entry.async_on_unload(coordinator.async_register_processor(processor))
class RAPTPillBluetoothSensorEntity(
PassiveBluetoothProcessorEntity[PassiveBluetoothDataProcessor[float | int | None]],
SensorEntity,
):
"""Representation of a RAPT Pill BLE sensor."""
@property
def native_value(self) -> int | float | None:
"""Return the native value."""
return self.processor.entity_data.get(self.entity_key)
|
9e2c4ea5c8a74be261ccfe88dce12aa809ee1278
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/components/mtl/tasks/ud.py
|
f9abb0822b6814505071a1bbfbc8d259a5c76ba0
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,550
|
py
|
ud.py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-17 21:54
import logging
from typing import Dict, Any, List, Union, Iterable, Callable
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp_common.document import Document
from hanlp.common.transform import VocabDict, PunctuationMask
from hanlp.components.mtl.tasks import Task
from hanlp_common.conll import CoNLLUWord
from hanlp.components.parsers.ud.ud_model import UniversalDependenciesDecoder
from hanlp.components.parsers.ud.ud_parser import UniversalDependenciesParser
from hanlp.components.parsers.ud.util import generate_lemma_rule, append_bos
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp_common.util import merge_locals_kwargs
class UniversalDependenciesParsing(Task, UniversalDependenciesParser):
def __init__(self,
trn: str = None,
dev: str = None,
tst: str = None,
sampler_builder: SamplerBuilder = None,
dependencies: str = None,
scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=False,
lr=None,
separate_optimizer=False,
cls_is_bos=True,
sep_is_eos=False,
n_mlp_arc=768,
n_mlp_rel=256,
mlp_dropout=.33,
tree=False,
proj=False,
punct=False,
max_seq_len=None,
**kwargs) -> None:
r"""Universal Dependencies Parsing (lemmatization, features, PoS tagging and dependency parsing) implementation
of "75 Languages, 1 Model: Parsing Universal Dependencies Universally" (:cite:`kondratyuk-straka-2019-75`).
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
cls_is_bos: ``True`` to treat the first token as ``BOS``.
sep_is_eos: ``True`` to treat the last token as ``EOS``.
n_mlp_arc: Number of features for arc representation.
n_mlp_rel: Number of features for rel representation.
mlp_dropout: Dropout applied to MLPs.
tree: ``True`` to enforce tree constraint.
proj: ``True`` for projective parsing.
punct: ``True`` to include punctuations in evaluation.
max_seq_len: Prune samples longer than this length. Useful for reducing GPU consumption.
**kwargs: Not used.
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.vocabs = VocabDict()
def build_dataloader(self, data, transform: Callable = None, training=False, device=None,
logger: logging.Logger = None, cache=False, gradient_accumulation=1, **kwargs) -> DataLoader:
_transform = [generate_lemma_rule, append_bos, self.vocabs, transform]
if isinstance(data, str) and not self.config.punct:
_transform.append(PunctuationMask('token', 'punct_mask'))
dataset = UniversalDependenciesParser.build_dataset(self, data, _transform)
dataset.purge_cache()
if self.vocabs.mutable:
UniversalDependenciesParser.build_vocabs(self, dataset, logger, transformer=True)
max_seq_len = self.config.get('max_seq_len', None)
if max_seq_len and isinstance(data, str):
dataset.prune(lambda x: len(x['token_input_ids']) > max_seq_len, logger)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset),
shuffle=training, gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset,
pad={'arc': 0})
def compute_loss(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion) -> \
Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return output[0]['loss'] / 4 # we have 4 tasks
def decode_output(self, output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
mask: torch.BoolTensor, batch: Dict[str, Any], decoder: torch.nn.Module, **kwargs) -> Union[
Dict[str, Any], Any]:
return UniversalDependenciesParser.decode_output(self, *output, batch)
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any], metric: Union[MetricDict, Metric]):
UniversalDependenciesParser.update_metrics(self, metric, batch, *output)
# noinspection PyMethodOverriding
def build_model(self,
encoder_size,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
training=True,
**kwargs) -> torch.nn.Module:
return UniversalDependenciesDecoder(
encoder_size,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
len(self.vocabs.rel),
len(self.vocabs.lemma),
len(self.vocabs.pos),
len(self.vocabs.feat),
0,
0
)
def build_metric(self, **kwargs):
return UniversalDependenciesParser.build_metric(self)
def input_is_flat(self, data) -> bool:
return UniversalDependenciesParser.input_is_flat(self, data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> List:
yield from UniversalDependenciesParser.prediction_to_human(self, prediction, batch)
def feed_batch(self, h: torch.FloatTensor, batch: Dict[str, torch.Tensor], mask: torch.BoolTensor,
decoder: torch.nn.Module):
mask = self.compute_mask(batch)
output_dict = decoder(h, batch, mask)
if decoder.training:
mask = mask.clone()
mask[:, 0] = 0
return output_dict, mask
def finalize_document(self, doc: Document, task_name: str):
lem = []
pos = []
feat = []
dep = []
for sent in doc[task_name]:
sent: List[CoNLLUWord] = sent
lem.append([x.lemma for x in sent])
pos.append([x.upos for x in sent])
feat.append([x.feats for x in sent])
dep.append([(x.head, x.deprel) for x in sent])
promoted = 0
if 'lem' not in doc:
doc['lem'] = lem
promoted += 1
if 'pos' not in doc:
doc['pos'] = pos
promoted += 1
if 'feat' not in doc:
doc['fea'] = feat
promoted += 1
if 'dep' not in doc:
doc['dep'] = dep
promoted += 1
if promoted == 4:
doc.pop(task_name)
|
e9affed3e3a4a6566d12b2b05ca8eb63781f79b8
|
05b0c763ab92086e69a8d00ae6465009c596f6bc
|
/tests/cpu/test_launcher.py
|
b736e11d8d6f074e544021cbcc6fdde00e9e56a5
|
[
"Apache-2.0"
] |
permissive
|
intel/intel-extension-for-pytorch
|
60ce2af2ec3a1dacae0d0db13dd51a5b44512e61
|
7f9266789de7ca9d8bcf55606f3204f1a3640640
|
refs/heads/master
| 2023-09-01T09:13:16.866410
| 2023-08-31T08:00:37
| 2023-08-31T08:00:37
| 256,061,008
| 991
| 144
|
Apache-2.0
| 2023-08-13T13:56:07
| 2020-04-15T23:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 28,110
|
py
|
test_launcher.py
|
import unittest
from common_utils import TestCase
from utils.cpuinfo import construct_numa_config
from intel_extension_for_pytorch.cpu.launch import (
CPUPoolList,
Launcher,
DistributedTrainingLauncher,
)
import os
from os.path import expanduser
import glob
import subprocess
class TestLauncher(TestCase):
launch_scripts = [
["python", "-m", "intel_extension_for_pytorch.cpu.launch"],
["ipexrun"],
]
def find_lib(self, lib_type):
library_paths = []
if "CONDA_PREFIX" in os.environ:
library_paths.append(f'{os.environ["CONDA_PREFIX"]}/lib/')
elif "VIRTUAL_ENV" in os.environ:
library_paths.append(f'{os.environ["VIRTUAL_ENV"]}/lib/')
library_paths += [
f'{expanduser("~")}/.local/lib/',
"/usr/local/lib/",
"/usr/local/lib64/",
"/usr/lib/",
"/usr/lib64/",
]
lib_find = False
for lib_path in library_paths:
library_file = f"{lib_path}/lib{lib_type}.so"
matches = glob.glob(library_file)
if len(matches) > 0:
lib_find = True
break
return lib_find
def del_env(self, env_name):
if env_name in os.environ:
del os.environ[env_name]
def test_memory_allocator_setup(self):
launcher = Launcher()
# tcmalloc
find_tcmalloc = self.find_lib("tcmalloc")
launcher.set_memory_allocator(memory_allocator="tcmalloc")
ld_preload = (
":".join(launcher.ld_preload) if len(launcher.ld_preload) > 0 else ""
)
tcmalloc_enabled = "libtcmalloc.so" in ld_preload
self.assertEqual(find_tcmalloc, tcmalloc_enabled)
# jemalloc
find_jemalloc = self.find_lib("jemalloc")
launcher.set_memory_allocator(memory_allocator="jemalloc")
ld_preload = (
":".join(launcher.ld_preload) if len(launcher.ld_preload) > 0 else ""
)
jemalloc_enabled = "libjemalloc.so" in ld_preload
self.assertEqual(find_jemalloc, jemalloc_enabled)
if jemalloc_enabled:
self.assertTrue("MALLOC_CONF" in launcher.environ_set)
self.assertTrue(
launcher.environ_set["MALLOC_CONF"]
== "oversize_threshold:1,background_thread:true,metadata_thp:auto"
)
self.del_env("MALLOC_CONF")
launcher.set_memory_allocator(memory_allocator="jemalloc", benchmark=True)
if jemalloc_enabled:
self.assertTrue("MALLOC_CONF" in launcher.environ_set)
self.assertTrue(
launcher.environ_set["MALLOC_CONF"]
== "oversize_threshold:1,background_thread:false,metadata_thp:always,dirty_decay_ms:-1,muzzy_decay_ms:-1"
)
def test_mpi_pin_domain_and_ccl_worker_affinity(self):
# HT ON, use_logical_cores ON
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xffffff0,0xffffff00000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,28,29,30,31"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# HT ON, use_logical_cores OFF
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xfffffff,0xfffffff0000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "56,57,58,59,84,85,86,87"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# HT OFF, use_logical_cores ON
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=False, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xffffff0,0xffffff00000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,28,29,30,31"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# nodes_list
nprocs_per_node = 2
ccl_worker_count = 2
lscpu_txt = construct_numa_config(4, 14, enable_ht=True, numa_mode=1)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, nodes_list=[1, 2], use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xfff0000,0x3ffc0000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "14,15,28,29"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# ncores_per_instance
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node,
ncores_per_instance=(8 + ccl_worker_count) * nprocs_per_node,
use_logical_cores=True,
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xff0,0xff0000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,12,13,14,15"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# e-cores
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, n_e_cores=4, numa_mode=0
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xfffffff,0xfffffff000000000000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "28,29,30,31,88,89,90,91"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
def test_launcher_scripts(self):
for launch_script in self.launch_scripts:
cmd = launch_script + ["--help"]
r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.assertEqual(r.returncode, 0)
def verify_affinity(self, pools, ground_truth):
self.assertEqual(len(pools), ground_truth["ninstances"])
self.assertEqual(len(pools[0]), ground_truth["ncores_per_instance"])
self.assertEqual(
len(set([c.cpu for p in pools for c in p])), ground_truth["num_cores_sum"]
)
self.assertEqual(
len(set([c.node for p in pools for c in p])), ground_truth["num_nodes_sum"]
)
for i in range(ground_truth["ninstances"]):
self.assertEqual(
len(set([c.cpu for c in pools[i]])), ground_truth["num_cores"][i]
)
self.assertEqual(
len(set([c.node for c in pools[i]])), ground_truth["num_nodes"][i]
)
pool_txt = pools[i].get_pool_txt()
self.assertEqual(pool_txt["cores"], ground_truth["pools_cores"][i])
self.assertEqual(pool_txt["nodes"], ground_truth["pools_nodes"][i])
def test_core_affinity(self):
# mode 0
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 112,
"num_cores_sum": 112,
"num_nodes_sum": 2,
"num_cores": [112],
"num_nodes": [2],
"pools_cores": ["0-111"],
"pools_nodes": ["0,1"],
}
self.verify_affinity([cpuinfo.pool_all], ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "56-83"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "56-69", "70-83"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "56-83"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "56-69", "70-83"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(42, 56)))
cpuinfo.gen_pools_ondemand(cores_list=cores_list_local)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 1,
"num_cores": [28],
"num_nodes": [1],
"pools_cores": ["14-27,42-55"],
"pools_nodes": ["0"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 112,
"num_cores_sum": 112,
"num_nodes_sum": 4,
"num_cores": [112],
"num_nodes": [4],
"pools_cores": ["0-111"],
"pools_nodes": ["0,1,2,3"],
}
self.verify_affinity([cpuinfo.pool_all], ground_truth)
cpuinfo.gen_pools_ondemand(nodes_list=[1, 2])
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [28],
"num_nodes": [2],
"pools_cores": ["28-41,56-69"],
"pools_nodes": ["1,2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "60-87"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
# mode 1
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=1)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 56,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [56],
"num_nodes": [2],
"pools_cores": ["0-55"],
"pools_nodes": ["0,1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41", "42-55"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41", "42-55"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(42, 56)))
cpuinfo.gen_pools_ondemand(ninstances=2, cores_list=cores_list_local)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "42-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2, nodes_list=[1, 2])
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "28-41"],
"pools_nodes": ["1", "2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
# mode 2
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26",
"28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82",
"84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26",
"28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82",
"84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=3)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 18,
"num_cores_sum": 54,
"num_nodes_sum": 2,
"num_cores": [18, 18, 18],
"num_nodes": [1, 2, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34",
"36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70",
"72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106",
],
"pools_nodes": ["0", "0,1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(98, 112)))
cpuinfo.gen_pools_ondemand(ninstances=2, cores_list=cores_list_local)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "98-111"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(nodes_list=[1, 2])
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [28],
"num_nodes": [2],
"pools_cores": [
"28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82"
],
"pools_nodes": ["1,2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_logical_cores(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2, use_logical_cores=True)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 56,
"num_cores_sum": 112,
"num_nodes_sum": 2,
"num_cores": [56, 56],
"num_nodes": [1, 1],
"pools_cores": ["0-27,56-83", "28-55,84-111"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=3, skip_cross_node_cores=True)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 14,
"num_cores_sum": 42,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14],
"num_nodes": [1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41"],
"pools_nodes": ["0", "0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores_and_use_logical_core(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(
ninstances=7, use_logical_cores=True, skip_cross_node_cores=True
)
ground_truth = {
"ninstances": 7,
"ncores_per_instance": 14,
"num_cores_sum": 98,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1, 1, 1, 1],
"pools_cores": [
"0-6,56-62",
"7-13,63-69",
"14-20,70-76",
"21-27,77-83",
"28-34,84-90",
"35-41,91-97",
"42-48,98-104",
],
"pools_nodes": ["0", "0", "0", "0", "1", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores_and_node_id_use_logical_core(
self,
):
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(
ninstances=3,
nodes_list=[1, 2],
use_logical_cores=True,
skip_cross_node_cores=True,
)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 14,
"num_cores_sum": 42,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14],
"num_nodes": [1, 1, 1],
"pools_cores": ["14-20,70-76", "21-27,77-83", "28-34,84-90"],
"pools_nodes": ["1", "1", "2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
if __name__ == "__main__":
test = unittest.main()
|
9b2d4e62f9294d157355aff4fc6b18a31c1c3606
|
a69294c7d5ee75441759b66bc20cce727350bd59
|
/tests/keybench.py
|
736c4d311dfa2192e5e43ded27cda07f1cd4dd99
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/datastore-ndb-python
|
58b57437e11104bfe924fca3de2ee28319f2976f
|
59cb209ed95480025d26531fc91397575438d2fe
|
refs/heads/master
| 2023-08-21T01:16:54.021744
| 2022-10-20T23:12:51
| 2022-10-20T23:12:51
| 37,215,291
| 127
| 56
|
Apache-2.0
| 2022-10-20T23:12:53
| 2015-06-10T18:34:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
keybench.py
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Benchmark for Key comparison."""
import cProfile
import os
import pstats
import sys
from ndb import key
from ndb import utils
# Hack: replace os.environ with a plain dict. This is to make the
# benchmark more similar to the production environment, where
# os.environ is also a plain dict. In the environment where we run
# the benchmark, however, it is a UserDict instance, which makes the
# benchmark run slower -- but we don't want to measure this since it
# doesn't apply to production.
os.environ = dict(os.environ)
def bench1(n):
"""Benchmark Key comparison and hashing."""
a = key.Key('Foo', 42, 'Bar', 1, 'Hopla', 'lala')
b = key.Key('Foo', 42, 'Bar', 1, 'Hopla', 'lala')
assert a is not b
assert a == b
for _ in xrange(n):
a == b
hash(a)
def bench2(n):
"""Benchmark Key creation."""
for _ in xrange(n):
key.Key('Foo', 42, 'Bar', 1, 'Hopla', 'lala')
def bench3(n):
"""Benchmark Key creation with parent."""
p = key.Key('Foo', 42, 'Bar', 1)
for _ in xrange(n):
key.Key('Hopla', 'lala', parent=p)
def bench(n):
"""Toplevel benchmark function."""
return bench3(n)
def main():
utils.tweak_logging() # Interpret -v and -q flags.
n = 10000
for arg in sys.argv[1:]:
try:
n = int(arg)
break
except Exception:
pass
prof = cProfile.Profile()
prof = prof.runctx('bench(%d)' % n, globals(), locals())
stats = pstats.Stats(prof)
stats.strip_dirs()
stats.sort_stats('time') # 'time', 'cumulative' or 'calls'
stats.print_stats(20) # Arg: how many to print (optional)
# Uncomment (and tweak) the following calls for more details.
# stats.print_callees(10)
# stats.print_callers(10)
if __name__ == '__main__':
main()
|
f90c0a6e03a297dafb019b3f37d9b993cf417a7c
|
31cf77b4c0342c6148b35ae2613d5e2501d5e755
|
/src/encoded/tests/test_upgrade_star_qc.py
|
c8af8f31d399af2f8fe5be4c9ecc5b7c8a2f45e8
|
[
"MIT"
] |
permissive
|
ENCODE-DCC/encoded
|
096de8a6d60c959a783cc9517f1d60bd6c21b71f
|
80e05610c79b46d0890228555bb03e436b2fef11
|
refs/heads/dev
| 2023-08-08T15:45:07.493187
| 2023-08-03T20:01:24
| 2023-08-03T20:01:24
| 7,045,549
| 110
| 69
|
MIT
| 2023-09-12T23:59:45
| 2012-12-07T00:52:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
test_upgrade_star_qc.py
|
import pytest
def test_star_quality_metric_upgrade(registry, star_quality_metric_0,
bam_file, lab, award):
from snovault import UPGRADER
upgrader = registry[UPGRADER]
value = upgrader.upgrade('star_quality_metric',
star_quality_metric_0, registry=registry,
current_version='2', target_version='3')
assert value['lab'] == lab['@id'] and value['award'] == award['@id']
|
db7569d1590daaab6c087209a18904e21a2d17c9
|
df40e97d889659c320afcb16427c2478a7d263f3
|
/functions/utils.py
|
2b2712a6ea233ec905a90a11648d601b35687e0a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Esri/raster-functions
|
954d7f055a2a00c73458b67176b9b383c5b4da88
|
8548ca2ac156ea0a50e978e7465fc8a0edc727e8
|
refs/heads/master
| 2023-08-28T05:08:56.637185
| 2023-08-02T18:45:00
| 2023-08-02T18:45:00
| 22,187,946
| 191
| 82
|
Apache-2.0
| 2022-03-23T16:32:09
| 2014-07-24T00:13:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,073
|
py
|
utils.py
|
#------------------------------------------------------------------------------
# Copyright 2016 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
__all__ = ['isProductVersionOK',
'computePixelBlockExtents',
'computeCellSize',
'Projection',
'Trace',
'ZonalAttributesTable',
'projectCellSize',]
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- #
degreeToMeter = 111319.49079327357264771338267056
pi = 3.14159265358979323846
def isProductVersionOK(productInfo, major, minor, build):
v = productInfo['major']*1.e+10 + int(0.5+productInfo['minor']*10)*1.e+6 + productInfo['build']
return v >= major*1e+10 + int(0.5+minor*10)*1.e+6 + build
def computePixelBlockExtents(tlc, shape, props):
nRows, nCols = shape if len(shape) == 2 else shape[1:] # dimensions of request pixel block
e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster
dX, dY = (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster
xMin, yMax = e[0]+tlc[0]*dX, e[3]-tlc[1]*dY # top-left corner of request on map
return (xMin, yMax-nRows*dY, xMin+nCols*dX, yMax) # extents of request on map
def computeCellSize(props, sr=None, proj=None):
e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster
if sr is None:
return (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster
if proj is None:
proj = Projection() # reproject extents
(xMin, yMin) = proj.transform(props['spatialReference'], sr, e[0], e[1])
(xMax, yMax) = proj.transform(props['spatialReference'], sr, e[2], e[3])
return (xMax-xMin)/w, (yMax-yMin)/h # cell size of parent raster
def projectCellSize(cellSize, inSR, outSR, proj=None):
inSRS = proj.createSR(inSR)
outSRS = proj.createSR(outSR)
if isGeographic(inSR) and isGeographic(outSR):
x = cellSize[0] * (inSRS.radiansPerUnit/outSRS.radiansPerUnit)
y = cellSize[1] * (inSRS.radiansPerUnit/outSRS.radiansPerUnit)
elif not isGeographic(inSR) and not isGeographic(outSR):
x = cellSize[0] * (inSRS.metersPerUnit/outSRS.metersPerUnit)
y = cellSize[1] * (inSRS.metersPerUnit/outSRS.metersPerUnit)
elif isGeographic(inSR):
factor1 = inSRS.radiansPerUnit
factor1 = factor1/pi*180
factor2 = outSRS.metersPerUnit
if factor2 is None:
factor2 = 1
x = cellSize[0] * (factor1 * degreeToMeter)/factor2
y = cellSize[1] * (factor1 * degreeToMeter)/factor2
elif isGeographic(outSR):
factor2 = outSRS.radiansPerUnit
factor2 = pi/180/factor2
factor1 = inSRS.metersPerUnit
if factor1 is None:
factor1 = 1
x = cellSize[0] * (factor2/degreeToMeter) * factor1
y = cellSize[1] * (factor2/degreeToMeter) * factor1
return x, y
def isGeographic(s):
arcpy = __import__('arcpy')
sr = arcpy.SpatialReference()
sr.loadFromString(str(s) if isinstance(s, (str, int)) else s.exportToString())
return bool(sr.type == 'Geographic' and sr.angularUnitName)
def loadJSON(s):
if s is None:
return None
json = __import__('json')
from os import path
if path.exists(s):
with open(s) as f:
return json.load(f)
else:
return json.loads(s)
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- #
class Projection():
def __init__(self):
self.arcpy = __import__('arcpy')
self.inSR, self.outSR = None, None
def transform(self, inSR, outSR, x, y):
if self.inSR != inSR:
self.inSR = self.createSR(inSR)
if self.outSR != outSR:
self.outSR = self.createSR(outSR)
p = self.arcpy.PointGeometry(self.arcpy.Point(x, y), self.inSR, False, False)
q = p.projectAs(self.outSR)
return q.firstPoint.X, q.firstPoint.Y
def createSR(self, s):
sr = self.arcpy.SpatialReference()
sr.loadFromString(str(s) if isinstance(s, (str, int)) else s.exportToString())
return sr
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- #
class Trace():
def __init__(self):
ctypes = __import__('ctypes')
self.trace = ctypes.windll.kernel32.OutputDebugStringA
self.trace.argtypes = [ctypes.c_char_p]
self.c_char_p = ctypes.c_char_p
def log(self, s):
self.trace(self.c_char_p(s.encode('utf-8')))
return s
# ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- #
# TODO: support early termination (when only one row is needed), like in non-zonal rasterize attributes.
class ZonalAttributesTable():
def __init__(self, tableUri, idField=None, attribList=None):
if tableUri is None:
raise Exception("TODO");
self.tableUri = tableUri
self.idField, self.idFI = (idField.lower(), 0) if idField else (None, None)
self.attribList = attribList or []
k = 0
self.fi, self.queryFields = [], []
for a in self.attribList:
if a is not None and len(a):
self.queryFields.append(a)
self.fi.append(k)
k = k + 1
else:
self.fi.append(None)
if self.idField:
self.fi = [k+1 if k is not None else None for k in self.fi]
self.tupleSize = len(self.fi)
self.queryFields = ([self.idField] if self.idField else []) + self.queryFields
if not len(self.queryFields):
raise Exception("TODO")
self.fieldCSV = ",".join(self.queryFields)
self.arcpy = None
self.queryUrl = None # indicator of remote URL vs local table
s = tableUri.lower()
if s.startswith('http://') or s.startswith('https://'):
self.queryUrl = tableUri + ('/query' if tableUri[-1] != '/' else 'query')
self.urllib = __import__('urllib')
self.json = __import__('json')
def query(self, idList=[], where=None, extent=None, sr=None):
if self.arcpy is None:
self.arcpy = __import__('arcpy')
w = self._constructWhereClause(idList, where)
if not self.queryUrl:
return self._queryTable(w)
else:
return self._queryFeatureService(w, extent, sr)
def _queryTable(self, where=None):
T = {}
with self.arcpy.da.SearchCursor(self.tableUri, self.queryFields, where_clause=where) as cursor:
for row in cursor:
I = []
for k in range(self.tupleSize):
I.append(row[self.fi[k]] if self.fi[k] is not None else None)
self._addAttributes(T, row[self.idFI] if self.idFI is not None else None, tuple(I))
return T
def _queryFeatureService(self, where=None, extent=None, sr=None):
p = {'f': 'json', 'returnGeometry': 'false'}
p.update({'outFields': self.fieldCSV})
if where and len(where):
p.update({'where': where})
if extent and len(extent) == 4 and sr:
_sr = sr
if not isinstance(sr, self.arcpy.SpatialReference) and isinstance(sr, (str, int)):
_sr = self.arcpy.SpatialReference()
_sr.loadFromString(str(sr))
if _sr.factoryCode > 0:
p.update({'inSR': {'latestWkid': _sr.factoryCode}})
else:
p.update({'inSR': {'wkt': _sr.exportToString()}})
p.update({'geometryType': 'esriGeometryEnvelope',
'geometry': {'xmin': extent[0],
'ymin': extent[1],
'xmax': extent[2],
'ymax': extent[3]},
'spatialRel': 'esriSpatialRelEnvelopeIntersects'})
T = {}
r = self.urllib.urlopen(self.queryUrl, self.urllib.urlencode(p)).read()
responseJO = self.json.loads(r)
featuresJA = responseJO.get('features', None)
if featuresJA is not None:
for featureJO in featuresJA:
attrJO = featureJO.get('attributes', None)
if attrJO is not None:
A = []
for z in self.attribList:
A = A + [attrJO.get(z, None)]
self._addAttributes(T, attrJO.get(self.idField, None), tuple(A))
return T
def _constructWhereClause(self, idList=[], where=None):
w1 = "( " + where + " )" if where and len(where) else None
if self.idField and idList is not None and len(idList):
w2 = "( {0} IN ({1}) )".format(self.idField, ",".join(str(z) for z in idList))
else:
w2 = None
return "{0}{1}{2}".format(w1 if w1 else "",
" AND " if w1 and w2 else "",
w2 if w2 else "")
def _addAttributes(self, T, zoneId, attribValues):
T[zoneId] = T.get(zoneId, []) + [attribValues]
|
1f0319f7fcf6ad8ca88ef4233dd5f5bb5476c44f
|
f2e027bf993bdad87b894d606c1bc8335c032c29
|
/fw/make_thermistor_table.py
|
1ed5499355058f8735daf4f6f6b0d5515fd6c429
|
[
"Apache-2.0"
] |
permissive
|
mjbots/moteus
|
857546df67c4140a4c55ae73baab2077ada83849
|
da016eb5bb77c6d3a6cbc2191576579f26238a59
|
refs/heads/main
| 2023-08-08T22:02:53.352368
| 2023-08-08T15:00:02
| 2023-08-08T15:00:02
| 174,456,820
| 638
| 215
|
Apache-2.0
| 2023-05-06T16:05:15
| 2019-03-08T02:43:21
|
C++
|
UTF-8
|
Python
| false
| false
| 967
|
py
|
make_thermistor_table.py
|
#!/usr/bin/python3
# Copyright 2019 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
def temp(counts):
v = 3.3 * max(1, counts) / 4096.0
B = 4050.0
R = 10000.0
r_t = 3.3 * R / v - R
return 1.0 / (1.0 / (273.15 + 25.0) + (1.0 / B) * math.log(r_t / 47000)) - 273.15
def main():
for x in range(0, 4096, 128):
print(" {:.2f}f, // {}".format(temp(x), x))
if __name__ == '__main__':
main()
|
706b92dadf775e3c090e50a5b7f6c50ec8dc666e
|
643410b6ed94f3b11fb7fcab1dcb9f9f9532a8bb
|
/rolepermissions/decorators.py
|
6d61ef67fad4d5bec4bb2adae4b657e79ec1f0a9
|
[
"MIT"
] |
permissive
|
vintasoftware/django-role-permissions
|
51158a4c550fbce93b088a2563d0dbc93cdd962b
|
e74613d78d188788ee4c5c84b99341648e06e78c
|
refs/heads/master
| 2023-09-01T19:12:34.472365
| 2023-06-09T20:27:07
| 2023-06-09T20:27:07
| 13,388,701
| 666
| 130
|
MIT
| 2023-06-09T20:27:08
| 2013-10-07T16:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
decorators.py
|
from __future__ import unicode_literals
from functools import wraps
from django.conf import settings
from django.contrib.auth.views import redirect_to_login as dj_redirect_to_login
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect as dj_redirect
from rolepermissions.checkers import has_role, has_permission
from rolepermissions.utils import user_is_authenticated
def _role_permission_checker(function, subject, redirect_to_login, redirect_url):
def request_decorator(dispatch):
@wraps(dispatch)
def wrapper(request, *args, **kwargs):
user = request.user
if user_is_authenticated(user):
if function(user, subject):
return dispatch(request, *args, **kwargs)
if redirect_url:
return dj_redirect(redirect_url)
redirect = redirect_to_login
if redirect is None:
redirect = getattr(
settings, 'ROLEPERMISSIONS_REDIRECT_TO_LOGIN', False)
if redirect:
return dj_redirect_to_login(request.get_full_path())
raise PermissionDenied
return wrapper
return request_decorator
def has_role_decorator(role, redirect_to_login=None, redirect_url=None):
return _role_permission_checker(has_role, role, redirect_to_login, redirect_url)
def has_permission_decorator(permission_name, redirect_to_login=None, redirect_url=None):
return _role_permission_checker(has_permission, permission_name, redirect_to_login, redirect_url)
|
10748acef5837fa43c9e10142d1432f1bf824778
|
ccc00ce0041bd050a33880a84e68d6231610bfe5
|
/elasticdl/python/tests/elasticdl_job_service_test.py
|
616100bd2e732fe916b837123e49949956a2ff99
|
[
"MIT"
] |
permissive
|
sql-machine-learning/elasticdl
|
009bfa5bcd0c2e8ffd032c559af5d3b0d13e2ed2
|
86cd6ff7e175939a4bcd7938e334c4865acacb6d
|
refs/heads/develop
| 2023-08-31T06:13:50.904537
| 2023-08-15T10:39:31
| 2023-08-15T10:39:31
| 154,232,678
| 724
| 122
|
MIT
| 2023-08-15T10:39:33
| 2018-10-22T23:53:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
elasticdl_job_service_test.py
|
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from elasticai_api.proto import elasticai_api_pb2
from elasticdl.python.common.args import parse_master_args
from elasticdl.python.master.elasticdl_job_service import ElasticdlJobService
from elasticdl.python.tests.test_utils import (
DatasetName,
TaskManager,
create_recordio_file,
)
from elasticdl_client.common.constants import DistributionStrategy
class ElasticdlJobServiceTest(unittest.TestCase):
def setUp(self):
self._model_zoo_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../../model_zoo"
)
self.arguments = {
"num_ps_pods": "1",
"num_workers": "2",
"job_type": str(elasticai_api_pb2.TRAINING),
"minibatch_size": "32",
"model_zoo": self._model_zoo_path,
"model_def": "mnist.mnist_functional_api.custom_model",
"job_name": "test",
"worker_image": "ubuntu:18.04",
}
self._num_records = 128
def _get_args(self):
args = []
for key, value in self.arguments.items():
args.append("--" + key)
args.append(value)
return args
def test_create_master_for_allreduce(self):
self.arguments[
"distribution_strategy"
] = DistributionStrategy.ALLREDUCE
with tempfile.TemporaryDirectory() as temp_dir_name:
create_recordio_file(
self._num_records,
DatasetName.TEST_MODULE,
1,
temp_dir=temp_dir_name,
)
self.arguments["training_data"] = temp_dir_name
args = self._get_args()
args = parse_master_args(args)
master = ElasticdlJobService(args, TaskManager(args))
self.assertIsNotNone(master)
def test_create_master_without_eval(self):
self.arguments[
"distribution_strategy"
] = DistributionStrategy.ALLREDUCE
self.arguments["model_def"] = "mnist.mnist_functional_api.custom_model"
with tempfile.TemporaryDirectory() as temp_dir_name:
create_recordio_file(
self._num_records,
DatasetName.TEST_MODULE,
1,
temp_dir=temp_dir_name,
)
self.arguments["training_data"] = temp_dir_name
args = self._get_args()
args = parse_master_args(args)
master = ElasticdlJobService(args, TaskManager(args))
self.assertIsNone(master.evaluation_service)
if __name__ == "__main__":
unittest.main()
|
cd28afdf3ce5cac8fd3da4cdd50ef1404ea9a310
|
b60686a2e351a756f249e0d9faab8fe154a08f11
|
/tests/util/test_config.py
|
302eb9918a647da1829588fc550a37ad634c615e
|
[
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
intel/dffml
|
86483b47229b9b62c9f8dfef51491aa02563347e
|
7d381bf67a72fe1ecb1012393d5726085564cb0e
|
refs/heads/main
| 2023-08-28T00:35:04.219193
| 2023-06-06T18:29:16
| 2023-06-06T18:29:16
| 149,512,216
| 237
| 204
|
MIT
| 2023-05-05T15:39:35
| 2018-09-19T21:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
test_config.py
|
import unittest
from dffml.util.config.numpy import numpy_docstring_args
from .double_ret import double_ret as numpy_double_ret
class TestMakeConfig(unittest.TestCase):
def test_numpy_docstring_args(self):
args = numpy_docstring_args(numpy_double_ret)
self.assertIn("super_cool_arg", args)
dtype, field = args["super_cool_arg"]
self.assertEqual(dtype, str)
self.assertEqual(
field.metadata["description"],
"Argument we want the string value of.",
)
self.assertIn("other_very_cool_arg", args)
dtype, field = args["other_very_cool_arg"]
self.assertEqual(dtype, dict)
self.assertEqual(
field.metadata["description"],
"Dictionary we want to turn into a tuple of (keys, values).",
)
|
6a57695606b2580523f64139c74591c639a0d3c9
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/elasticcloud/get_elasticsearch.py
|
210ed3b126e52a93795f96d04dd864c96cc86738
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 12,276
|
py
|
get_elasticsearch.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetElasticsearchResult',
'AwaitableGetElasticsearchResult',
'get_elasticsearch',
'get_elasticsearch_output',
]
@pulumi.output_type
class GetElasticsearchResult:
"""
A collection of values returned by getElasticsearch.
"""
def __init__(__self__, elastic_cloud_deployment_id=None, elastic_cloud_email_address=None, elastic_cloud_sso_default_url=None, elastic_cloud_user_id=None, elasticsearch_service_url=None, id=None, kibana_service_url=None, kibana_sso_uri=None, location=None, logs=None, monitoring_enabled=None, name=None, resource_group_name=None, sku_name=None, tags=None):
if elastic_cloud_deployment_id and not isinstance(elastic_cloud_deployment_id, str):
raise TypeError("Expected argument 'elastic_cloud_deployment_id' to be a str")
pulumi.set(__self__, "elastic_cloud_deployment_id", elastic_cloud_deployment_id)
if elastic_cloud_email_address and not isinstance(elastic_cloud_email_address, str):
raise TypeError("Expected argument 'elastic_cloud_email_address' to be a str")
pulumi.set(__self__, "elastic_cloud_email_address", elastic_cloud_email_address)
if elastic_cloud_sso_default_url and not isinstance(elastic_cloud_sso_default_url, str):
raise TypeError("Expected argument 'elastic_cloud_sso_default_url' to be a str")
pulumi.set(__self__, "elastic_cloud_sso_default_url", elastic_cloud_sso_default_url)
if elastic_cloud_user_id and not isinstance(elastic_cloud_user_id, str):
raise TypeError("Expected argument 'elastic_cloud_user_id' to be a str")
pulumi.set(__self__, "elastic_cloud_user_id", elastic_cloud_user_id)
if elasticsearch_service_url and not isinstance(elasticsearch_service_url, str):
raise TypeError("Expected argument 'elasticsearch_service_url' to be a str")
pulumi.set(__self__, "elasticsearch_service_url", elasticsearch_service_url)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kibana_service_url and not isinstance(kibana_service_url, str):
raise TypeError("Expected argument 'kibana_service_url' to be a str")
pulumi.set(__self__, "kibana_service_url", kibana_service_url)
if kibana_sso_uri and not isinstance(kibana_sso_uri, str):
raise TypeError("Expected argument 'kibana_sso_uri' to be a str")
pulumi.set(__self__, "kibana_sso_uri", kibana_sso_uri)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if logs and not isinstance(logs, list):
raise TypeError("Expected argument 'logs' to be a list")
pulumi.set(__self__, "logs", logs)
if monitoring_enabled and not isinstance(monitoring_enabled, bool):
raise TypeError("Expected argument 'monitoring_enabled' to be a bool")
pulumi.set(__self__, "monitoring_enabled", monitoring_enabled)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if sku_name and not isinstance(sku_name, str):
raise TypeError("Expected argument 'sku_name' to be a str")
pulumi.set(__self__, "sku_name", sku_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="elasticCloudDeploymentId")
def elastic_cloud_deployment_id(self) -> str:
"""
The ID of the Deployment within Elastic Cloud.
"""
return pulumi.get(self, "elastic_cloud_deployment_id")
@property
@pulumi.getter(name="elasticCloudEmailAddress")
def elastic_cloud_email_address(self) -> str:
"""
The Email Address which is associated with this Elasticsearch account.
"""
return pulumi.get(self, "elastic_cloud_email_address")
@property
@pulumi.getter(name="elasticCloudSsoDefaultUrl")
def elastic_cloud_sso_default_url(self) -> str:
"""
The Default URL used for Single Sign On (SSO) to Elastic Cloud.
"""
return pulumi.get(self, "elastic_cloud_sso_default_url")
@property
@pulumi.getter(name="elasticCloudUserId")
def elastic_cloud_user_id(self) -> str:
"""
The ID of the User Account within Elastic Cloud.
"""
return pulumi.get(self, "elastic_cloud_user_id")
@property
@pulumi.getter(name="elasticsearchServiceUrl")
def elasticsearch_service_url(self) -> str:
"""
The URL to the Elasticsearch Service associated with this Elasticsearch.
"""
return pulumi.get(self, "elasticsearch_service_url")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kibanaServiceUrl")
def kibana_service_url(self) -> str:
"""
The URL to the Kibana Dashboard associated with this Elasticsearch.
"""
return pulumi.get(self, "kibana_service_url")
@property
@pulumi.getter(name="kibanaSsoUri")
def kibana_sso_uri(self) -> str:
"""
The URI used for SSO to the Kibana Dashboard associated with this Elasticsearch.
"""
return pulumi.get(self, "kibana_sso_uri")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region in which this Elasticsearch exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def logs(self) -> Sequence['outputs.GetElasticsearchLogResult']:
"""
A `logs` block as defined below.
"""
return pulumi.get(self, "logs")
@property
@pulumi.getter(name="monitoringEnabled")
def monitoring_enabled(self) -> bool:
"""
Specifies if monitoring is enabled on this Elasticsearch or not.
"""
return pulumi.get(self, "monitoring_enabled")
@property
@pulumi.getter
def name(self) -> str:
"""
The name (key) of the Tag which should be filtered.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> str:
"""
The name of the SKU used for this Elasticsearch.
"""
return pulumi.get(self, "sku_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the Elasticsearch.
"""
return pulumi.get(self, "tags")
class AwaitableGetElasticsearchResult(GetElasticsearchResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetElasticsearchResult(
elastic_cloud_deployment_id=self.elastic_cloud_deployment_id,
elastic_cloud_email_address=self.elastic_cloud_email_address,
elastic_cloud_sso_default_url=self.elastic_cloud_sso_default_url,
elastic_cloud_user_id=self.elastic_cloud_user_id,
elasticsearch_service_url=self.elasticsearch_service_url,
id=self.id,
kibana_service_url=self.kibana_service_url,
kibana_sso_uri=self.kibana_sso_uri,
location=self.location,
logs=self.logs,
monitoring_enabled=self.monitoring_enabled,
name=self.name,
resource_group_name=self.resource_group_name,
sku_name=self.sku_name,
tags=self.tags)
def get_elasticsearch(logs: Optional[Sequence[pulumi.InputType['GetElasticsearchLogArgs']]] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetElasticsearchResult:
"""
Use this data source to access information about an existing Elasticsearch resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.elasticcloud.get_elasticsearch(name="my-elastic-search",
resource_group_name="example-resources")
pulumi.export("elasticsearchEndpoint", example.elasticsearch_service_url)
pulumi.export("kibanaEndpoint", example.kibana_service_url)
```
:param Sequence[pulumi.InputType['GetElasticsearchLogArgs']] logs: A `logs` block as defined below.
:param str name: The name of the Elasticsearch resource.
:param str resource_group_name: The name of the resource group in which the Elasticsearch exists.
"""
__args__ = dict()
__args__['logs'] = logs
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:elasticcloud/getElasticsearch:getElasticsearch', __args__, opts=opts, typ=GetElasticsearchResult).value
return AwaitableGetElasticsearchResult(
elastic_cloud_deployment_id=pulumi.get(__ret__, 'elastic_cloud_deployment_id'),
elastic_cloud_email_address=pulumi.get(__ret__, 'elastic_cloud_email_address'),
elastic_cloud_sso_default_url=pulumi.get(__ret__, 'elastic_cloud_sso_default_url'),
elastic_cloud_user_id=pulumi.get(__ret__, 'elastic_cloud_user_id'),
elasticsearch_service_url=pulumi.get(__ret__, 'elasticsearch_service_url'),
id=pulumi.get(__ret__, 'id'),
kibana_service_url=pulumi.get(__ret__, 'kibana_service_url'),
kibana_sso_uri=pulumi.get(__ret__, 'kibana_sso_uri'),
location=pulumi.get(__ret__, 'location'),
logs=pulumi.get(__ret__, 'logs'),
monitoring_enabled=pulumi.get(__ret__, 'monitoring_enabled'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
sku_name=pulumi.get(__ret__, 'sku_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_elasticsearch)
def get_elasticsearch_output(logs: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetElasticsearchLogArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetElasticsearchResult]:
"""
Use this data source to access information about an existing Elasticsearch resource.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.elasticcloud.get_elasticsearch(name="my-elastic-search",
resource_group_name="example-resources")
pulumi.export("elasticsearchEndpoint", example.elasticsearch_service_url)
pulumi.export("kibanaEndpoint", example.kibana_service_url)
```
:param Sequence[pulumi.InputType['GetElasticsearchLogArgs']] logs: A `logs` block as defined below.
:param str name: The name of the Elasticsearch resource.
:param str resource_group_name: The name of the resource group in which the Elasticsearch exists.
"""
...
|
6dee6e3aa10697a737bb9bdfd504a2a5e9db3c57
|
422a48c4deaee6a02e4d9de5243dcda00f9a1d89
|
/sparse/_dok.py
|
1aa330b704b61c1e551d804a76532837bb2b8d6e
|
[
"BSD-3-Clause"
] |
permissive
|
pydata/sparse
|
3510bc2f2ef15d9a28299dd550ae1bf7931744c9
|
096f9b04e9abf666e442f6cfd87b18c9395bc157
|
refs/heads/master
| 2023-08-28T18:43:35.349118
| 2023-07-01T06:15:17
| 2023-07-01T06:15:17
| 88,426,738
| 467
| 102
|
BSD-3-Clause
| 2023-09-07T21:08:35
| 2017-04-16T16:12:10
|
Python
|
UTF-8
|
Python
| false
| false
| 16,399
|
py
|
_dok.py
|
from math import ceil
from numbers import Integral
from collections.abc import Iterable
import numpy as np
import scipy.sparse
from numpy.lib.mixins import NDArrayOperatorsMixin
from ._slicing import normalize_index
from ._utils import equivalent
from ._sparse_array import SparseArray
class DOK(SparseArray, NDArrayOperatorsMixin):
"""
A class for building sparse multidimensional arrays.
Parameters
----------
shape : tuple[int] (DOK.ndim,)
The shape of the array.
data : dict, optional
The key-value pairs for the data in this array.
dtype : np.dtype, optional
The data type of this array. If left empty, it is inferred from
the first element.
fill_value : scalar, optional
The fill value of this array.
Attributes
----------
dtype : numpy.dtype
The datatype of this array. Can be :code:`None` if no elements
have been set yet.
shape : tuple[int]
The shape of this array.
data : dict
The keys of this dictionary contain all the indices and the values
contain the nonzero entries.
See Also
--------
COO : A read-only sparse array.
Examples
--------
You can create :obj:`DOK` objects from Numpy arrays.
>>> x = np.eye(5, dtype=np.uint8)
>>> x[2, 3] = 5
>>> s = DOK.from_numpy(x)
>>> s
<DOK: shape=(5, 5), dtype=uint8, nnz=6, fill_value=0>
You can also create them from just shapes, and use slicing assignment.
>>> s2 = DOK((5, 5), dtype=np.int64)
>>> s2[1:3, 1:3] = [[4, 5], [6, 7]]
>>> s2
<DOK: shape=(5, 5), dtype=int64, nnz=4, fill_value=0>
You can convert :obj:`DOK` arrays to :obj:`COO` arrays, or :obj:`numpy.ndarray`
objects.
>>> from sparse import COO
>>> s3 = COO(s2)
>>> s3
<COO: shape=(5, 5), dtype=int64, nnz=4, fill_value=0>
>>> s2.todense() # doctest: +NORMALIZE_WHITESPACE
array([[0, 0, 0, 0, 0],
[0, 4, 5, 0, 0],
[0, 6, 7, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> s4 = COO.from_numpy(np.eye(4, dtype=np.uint8))
>>> s4
<COO: shape=(4, 4), dtype=uint8, nnz=4, fill_value=0>
>>> s5 = DOK.from_coo(s4)
>>> s5
<DOK: shape=(4, 4), dtype=uint8, nnz=4, fill_value=0>
You can also create :obj:`DOK` arrays from a shape and a dict of
values. Zeros are automatically ignored.
>>> values = {
... (1, 2, 3): 4,
... (3, 2, 1): 0,
... }
>>> s6 = DOK((5, 5, 5), values)
>>> s6
<DOK: shape=(5, 5, 5), dtype=int64, nnz=1, fill_value=0.0>
"""
def __init__(self, shape, data=None, dtype=None, fill_value=None):
from ._coo import COO
self.data = dict()
if isinstance(shape, COO):
ar = DOK.from_coo(shape)
self._make_shallow_copy_of(ar)
return
if isinstance(shape, np.ndarray):
ar = DOK.from_numpy(shape)
self._make_shallow_copy_of(ar)
return
if isinstance(shape, scipy.sparse.spmatrix):
ar = DOK.from_scipy_sparse(shape)
self._make_shallow_copy_of(ar)
return
self.dtype = np.dtype(dtype)
if not data:
data = dict()
super().__init__(shape, fill_value=fill_value)
if isinstance(data, dict):
if not dtype:
if not len(data):
self.dtype = np.dtype("float64")
else:
self.dtype = np.result_type(
*map(lambda x: np.asarray(x).dtype, data.values())
)
for c, d in data.items():
self[c] = d
else:
raise ValueError("data must be a dict.")
@classmethod
def from_scipy_sparse(cls, x):
"""
Create a :obj:`DOK` array from a :obj:`scipy.sparse.spmatrix`.
Parameters
----------
x : scipy.sparse.spmatrix
The matrix to convert.
Returns
-------
DOK
The equivalent :obj:`DOK` array.
Examples
--------
>>> x = scipy.sparse.rand(6, 3, density=0.2)
>>> s = DOK.from_scipy_sparse(x)
>>> np.array_equal(x.todense(), s.todense())
True
"""
from sparse import COO
return COO.from_scipy_sparse(x).asformat(cls)
@classmethod
def from_coo(cls, x):
"""
Get a :obj:`DOK` array from a :obj:`COO` array.
Parameters
----------
x : COO
The array to convert.
Returns
-------
DOK
The equivalent :obj:`DOK` array.
Examples
--------
>>> from sparse import COO
>>> s = COO.from_numpy(np.eye(4))
>>> s2 = DOK.from_coo(s)
>>> s2
<DOK: shape=(4, 4), dtype=float64, nnz=4, fill_value=0.0>
"""
ar = cls(x.shape, dtype=x.dtype, fill_value=x.fill_value)
for c, d in zip(x.coords.T, x.data):
ar.data[tuple(c)] = d
return ar
def to_coo(self):
"""
Convert this :obj:`DOK` array to a :obj:`COO` array.
Returns
-------
COO
The equivalent :obj:`COO` array.
Examples
--------
>>> s = DOK((5, 5))
>>> s[1:3, 1:3] = [[4, 5], [6, 7]]
>>> s
<DOK: shape=(5, 5), dtype=float64, nnz=4, fill_value=0.0>
>>> s2 = s.to_coo()
>>> s2
<COO: shape=(5, 5), dtype=float64, nnz=4, fill_value=0.0>
"""
from ._coo import COO
return COO(self)
@classmethod
def from_numpy(cls, x):
"""
Get a :obj:`DOK` array from a Numpy array.
Parameters
----------
x : np.ndarray
The array to convert.
Returns
-------
DOK
The equivalent :obj:`DOK` array.
Examples
--------
>>> s = DOK.from_numpy(np.eye(4))
>>> s
<DOK: shape=(4, 4), dtype=float64, nnz=4, fill_value=0.0>
"""
ar = cls(x.shape, dtype=x.dtype)
coords = np.nonzero(x)
data = x[coords]
for c in zip(data, *coords):
d, c = c[0], c[1:]
ar.data[c] = d
return ar
@property
def nnz(self):
"""
The number of nonzero elements in this array.
Returns
-------
int
The number of nonzero elements.
See Also
--------
COO.nnz : Equivalent :obj:`COO` array property.
numpy.count_nonzero : A similar Numpy function.
scipy.sparse.dok_matrix.nnz : The Scipy equivalent property.
Examples
--------
>>> values = {
... (1, 2, 3): 4,
... (3, 2, 1): 0,
... }
>>> s = DOK((5, 5, 5), values)
>>> s.nnz
1
"""
return len(self.data)
@property
def format(self):
"""
The storage format of this array.
Returns
-------
str
The storage format of this array.
See Also
-------
scipy.sparse.dok_matrix.format : The Scipy equivalent property.
Examples
-------
>>> import sparse
>>> s = sparse.random((5,5), density=0.2, format='dok')
>>> s.format
'dok'
>>> t = sparse.random((5,5), density=0.2, format='coo')
>>> t.format
'coo'
"""
return "dok"
@property
def nbytes(self):
"""
The number of bytes taken up by this object. Note that for small arrays,
this may undercount the number of bytes due to the large constant overhead.
Returns
-------
int
The approximate bytes of memory taken by this object.
See Also
--------
numpy.ndarray.nbytes : The equivalent Numpy property.
Examples
--------
>>> import sparse
>>> x = sparse.random((100,100),density=.1,format='dok')
>>> x.nbytes
8000
"""
return self.nnz * self.dtype.itemsize
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
if all(isinstance(k, Iterable) for k in key):
if len(key) != self.ndim:
raise NotImplementedError(
f"Index sequences for all {self.ndim} array dimensions needed!"
)
if not all(len(key[0]) == len(k) for k in key):
raise IndexError("Unequal length of index sequences!")
return self._fancy_getitem(key)
key = normalize_index(key, self.shape)
ret = self.asformat("coo")[key]
if isinstance(ret, SparseArray):
ret = ret.asformat("dok")
return ret
def _fancy_getitem(self, key):
"""Subset of fancy indexing, when all dimensions are accessed"""
new_data = {}
for i, k in enumerate(zip(*key)):
if k in self.data:
new_data[i] = self.data[k]
return DOK(
shape=(len(key[0])),
data=new_data,
dtype=self.dtype,
fill_value=self.fill_value,
)
def __setitem__(self, key, value):
value = np.asarray(value, dtype=self.dtype)
# 1D fancy indexing
if (
self.ndim == 1
and isinstance(key, Iterable)
and all(isinstance(i, (int, np.integer)) for i in key)
):
key = (key,)
if isinstance(key, tuple) and all(isinstance(k, Iterable) for k in key):
if len(key) != self.ndim:
raise NotImplementedError(
f"Index sequences for all {self.ndim} array dimensions needed!"
)
if not all(len(key[0]) == len(k) for k in key):
raise IndexError("Unequal length of index sequences!")
self._fancy_setitem(key, value)
return
key = normalize_index(key, self.shape)
key_list = [int(k) if isinstance(k, Integral) else k for k in key]
self._setitem(key_list, value)
def _fancy_setitem(self, idxs, values):
idxs = tuple(np.asanyarray(idxs) for idxs in idxs)
if not all(np.issubdtype(k.dtype, np.integer) for k in idxs):
raise IndexError("Indices must be sequences of integer types!")
if idxs[0].ndim != 1:
raise IndexError("Indices are not 1d sequences!")
if values.ndim == 0:
values = np.full(idxs[0].size, values, self.dtype)
elif values.ndim > 1:
raise ValueError(f"Dimension of values ({values.ndim}) must be 0 or 1!")
if not idxs[0].shape == values.shape:
raise ValueError(
f"Shape mismatch of indices ({idxs[0].shape}) and values ({values.shape})!"
)
fill_value = self.fill_value
data = self.data
for idx, value in zip(zip(*idxs), values):
if not value == fill_value:
data[idx] = value
elif idx in data:
del data[idx]
def _setitem(self, key_list, value):
value_missing_dims = (
len([ind for ind in key_list if isinstance(ind, slice)]) - value.ndim
)
if value_missing_dims < 0:
raise ValueError("setting an array element with a sequence.")
for i, ind in enumerate(key_list):
if isinstance(ind, slice):
step = ind.step if ind.step is not None else 1
if step > 0:
start = ind.start if ind.start is not None else 0
start = max(start, 0)
stop = ind.stop if ind.stop is not None else self.shape[i]
stop = min(stop, self.shape[i])
if start > stop:
start = stop
else:
start = ind.start or self.shape[i] - 1
stop = ind.stop if ind.stop is not None else -1
start = min(start, self.shape[i] - 1)
stop = max(stop, -1)
if start < stop:
start = stop
key_list_temp = key_list[:]
for v_idx, ki in enumerate(range(start, stop, step)):
key_list_temp[i] = ki
vi = (
value
if value_missing_dims > 0
else (value[0] if value.shape[0] == 1 else value[v_idx])
)
self._setitem(key_list_temp, vi)
return
elif not isinstance(ind, Integral):
raise IndexError(
"All indices must be slices or integers when setting an item."
)
key = tuple(key_list)
if not equivalent(value, self.fill_value):
self.data[key] = value[()]
elif key in self.data:
del self.data[key]
def __str__(self):
return "<DOK: shape={!s}, dtype={!s}, nnz={:d}, fill_value={!s}>".format(
self.shape, self.dtype, self.nnz, self.fill_value
)
__repr__ = __str__
def todense(self):
"""
Convert this :obj:`DOK` array into a Numpy array.
Returns
-------
numpy.ndarray
The equivalent dense array.
See Also
--------
COO.todense : Equivalent :obj:`COO` array method.
scipy.sparse.dok_matrix.todense : Equivalent Scipy method.
Examples
--------
>>> s = DOK((5, 5))
>>> s[1:3, 1:3] = [[4, 5], [6, 7]]
>>> s.todense() # doctest: +SKIP
array([[0., 0., 0., 0., 0.],
[0., 4., 5., 0., 0.],
[0., 6., 7., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
result = np.full(self.shape, self.fill_value, self.dtype)
for c, d in self.data.items():
result[c] = d
return result
def asformat(self, format, **kwargs):
"""
Convert this sparse array to a given format.
Parameters
----------
format : str
A format string.
Returns
-------
out : SparseArray
The converted array.
Raises
------
NotImplementedError
If the format isn't supported.
"""
from ._utils import convert_format
format = convert_format(format)
if format == "dok":
return self
if format == "coo":
from ._coo import COO
if len(kwargs) != 0:
raise ValueError(f"Extra kwargs found: {kwargs}")
return COO.from_iter(
self.data,
shape=self.shape,
fill_value=self.fill_value,
dtype=self.dtype,
)
return self.asformat("coo").asformat(format, **kwargs)
def reshape(self, shape, order="C"):
"""
Returns a new :obj:`DOK` array that is a reshaped version of this array.
Parameters
----------
shape : tuple[int]
The desired shape of the output array.
Returns
-------
DOK
The reshaped output array.
See Also
--------
numpy.ndarray.reshape : The equivalent Numpy function.
Notes
-----
The :code:`order` parameter is provided just for compatibility with
Numpy and isn't actually supported.
Examples
--------
>>> s = DOK.from_numpy(np.arange(25))
>>> s2 = s.reshape((5, 5))
>>> s2.todense() # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
"""
if order not in {"C", None}:
raise NotImplementedError("The 'order' parameter is not supported")
return DOK.from_coo(self.to_coo().reshape(shape))
def to_slice(k):
"""Convert integer indices to one-element slices for consistency"""
if isinstance(k, Integral):
return slice(k, k + 1, 1)
return k
|
25ff85c3c868c36be14f90d30a2e14fe98afbb26
|
d47abf79e58b2982bd2f5359057126864fdb6e48
|
/b2sdk/transfer/outbound/copy_source.py
|
0c66f89d98fced30b15b157601d880e2d0d07f10
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Backblaze/b2-sdk-python
|
ca90f4dca6a1a9a52c8dbccd0294bfc01290970a
|
072f96dfe90ff191cb74dd2b657564ed5649553c
|
refs/heads/master
| 2023-08-16T23:53:48.691286
| 2023-08-10T13:37:40
| 2023-08-10T13:37:40
| 168,011,367
| 160
| 67
|
NOASSERTION
| 2023-08-31T11:01:52
| 2019-01-28T18:13:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
copy_source.py
|
######################################################################
#
# File: b2sdk/transfer/outbound/copy_source.py
#
# Copyright 2020 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
from b2sdk.encryption.setting import EncryptionSetting
from b2sdk.http_constants import LARGE_FILE_SHA1
from b2sdk.transfer.outbound.outbound_source import OutboundTransferSource
class CopySource(OutboundTransferSource):
def __init__(
self,
file_id,
offset=0,
length=None,
encryption: EncryptionSetting | None = None,
source_file_info=None,
source_content_type=None,
):
if not length and offset > 0:
raise ValueError('Cannot copy with non zero offset and unknown or zero length')
self.file_id = file_id
self.length = length
self.offset = offset
self.encryption = encryption
self.source_file_info = source_file_info
self.source_content_type = source_content_type
def __repr__(self):
return (
'<{classname} file_id={file_id} offset={offset} length={length} id={id}, encryption={encryption},'
'source_content_type={source_content_type}>, source_file_info={source_file_info}'
).format(
classname=self.__class__.__name__,
file_id=self.file_id,
offset=self.offset,
length=self.length,
id=id(self),
encryption=self.encryption,
source_content_type=self.source_content_type,
source_file_info=self.source_file_info,
)
def get_content_length(self):
return self.length
def is_upload(self):
return False
def is_copy(self):
return True
def get_bytes_range(self):
if not self.length:
if self.offset > 0:
# auto mode should get file info and create correct copy source (with length)
raise ValueError(
'cannot return bytes range for non zero offset and unknown or zero length'
)
return None
return self.offset, self.offset + self.length - 1
def get_copy_source_range(self, relative_offset, range_length):
if self.length is not None and range_length + relative_offset > self.length:
raise ValueError('Range length overflow source length')
range_offset = self.offset + relative_offset
return self.__class__(
self.file_id,
range_offset,
range_length,
encryption=self.encryption,
source_file_info=self.source_file_info,
source_content_type=self.source_content_type
)
def get_content_sha1(self):
if self.offset or self.length:
# this is a copy of only a range of the source, can't copy the SHA1
return None
return self.source_file_info.get(LARGE_FILE_SHA1)
|
8dae7fb54725c97f67c1a66a4f3bd8b08e53b0b1
|
4cb9ceb8840ed18c9d5b17faee4eb5841f890c83
|
/tests/rustworkx_tests/graph/test_connected_components.py
|
f8b254abc75f2f506391e5f3eaf72fb09edf524f
|
[
"Apache-2.0"
] |
permissive
|
Qiskit/rustworkx
|
f5c4b94d21bee2fcac999cd2057246c307570bad
|
e0af0a26c71eadb6d15807efb6ec1cd4d3893244
|
refs/heads/main
| 2023-09-01T03:45:25.807566
| 2023-08-30T13:48:41
| 2023-08-30T13:48:41
| 186,025,444
| 250
| 38
|
Apache-2.0
| 2023-09-14T14:08:45
| 2019-05-10T17:09:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,177
|
py
|
test_connected_components.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import rustworkx
class TestConnectedComponents(unittest.TestCase):
def test_number_connected(self):
graph = rustworkx.PyGraph()
graph.add_nodes_from(range(3))
graph.add_edge(0, 1, None)
self.assertEqual(rustworkx.number_connected_components(graph), 2)
def test_number_connected_direct(self):
graph = rustworkx.PyDiGraph()
graph.add_nodes_from(range(4))
graph.add_edges_from_no_data([(3, 2), (2, 1), (1, 0)])
self.assertEqual(len(rustworkx.weakly_connected_components(graph)), 1)
def test_number_connected_node_holes(self):
graph = rustworkx.PyGraph()
graph.add_nodes_from(range(3))
graph.remove_node(1)
self.assertEqual(rustworkx.number_connected_components(graph), 2)
def test_connected_components(self):
graph = rustworkx.PyGraph()
graph.extend_from_edge_list(
[(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4)]
)
components = rustworkx.connected_components(graph)
self.assertEqual([{0, 1, 2, 3}, {4, 5, 6, 7}], components)
def test_node_connected_component(self):
graph = rustworkx.PyGraph()
graph.extend_from_edge_list(
[(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4)]
)
component = rustworkx.node_connected_component(graph, 0)
self.assertEqual({0, 1, 2, 3}, component)
def test_node_connected_component_invalid_node(self):
graph = rustworkx.PyGraph()
graph.extend_from_edge_list(
[(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4)]
)
with self.assertRaises(rustworkx.InvalidNode):
rustworkx.node_connected_component(graph, 10)
def test_is_connected_false(self):
graph = rustworkx.PyGraph()
graph.extend_from_edge_list(
[(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4)]
)
self.assertFalse(rustworkx.is_connected(graph))
def test_is_connected_true(self):
graph = rustworkx.PyGraph()
graph.extend_from_edge_list(
[
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(2, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 4),
]
)
self.assertTrue(rustworkx.is_connected(graph))
def test_is_connected_null_graph(self):
graph = rustworkx.PyGraph()
with self.assertRaises(rustworkx.NullGraph):
rustworkx.is_connected(graph)
|
628539d011c91be898d32fc785793416e2d7ecf3
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/monai/transforms/smooth_field/array.py
|
c9df5f1dbb8cc7ce1c4e224bfc0b75333db6c610
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 17,833
|
py
|
array.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transforms using a smooth spatial field generated by interpolating from smaller randomized fields."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any
import numpy as np
import torch
from torch.nn.functional import grid_sample, interpolate
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.networks.utils import meshgrid_ij
from monai.transforms.transform import Randomizable, RandomizableTransform
from monai.transforms.utils_pytorch_numpy_unification import moveaxis
from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode
from monai.utils.enums import TransformBackends
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor
__all__ = ["SmoothField", "RandSmoothFieldAdjustContrast", "RandSmoothFieldAdjustIntensity", "RandSmoothDeform"]
class SmoothField(Randomizable):
"""
Generate a smooth field array by defining a smaller randomized field and then reinterpolating to the desired size.
This exploits interpolation to create a smoothly varying field used for other applications. An initial randomized
field is defined with `rand_size` dimensions with `pad` number of values padding it along each dimension using
`pad_val` as the value. If `spatial_size` is given this is interpolated to that size, otherwise if None the random
array is produced uninterpolated. The output is always a Pytorch tensor allocated on the specified device.
Args:
rand_size: size of the randomized field to start from
pad: number of pixels/voxels along the edges of the field to pad with `pad_val`
pad_val: value with which to pad field edges
low: low value for randomized field
high: high value for randomized field
channels: number of channels of final output
spatial_size: final output size of the array, None to produce original uninterpolated field
mode: interpolation mode for resizing the field
align_corners: if True align the corners when upsampling field
device: Pytorch device to define field on
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
rand_size: Sequence[int],
pad: int = 0,
pad_val: float = 0,
low: float = -1.0,
high: float = 1.0,
channels: int = 1,
spatial_size: Sequence[int] | None = None,
mode: str = InterpolateMode.AREA,
align_corners: bool | None = None,
device: torch.device | None = None,
):
self.rand_size = tuple(rand_size)
self.pad = pad
self.low = low
self.high = high
self.channels = channels
self.mode = mode
self.align_corners = align_corners
self.device = device
self.spatial_size: Sequence[int] | None = None
self.spatial_zoom: Sequence[float] | None = None
if low >= high:
raise ValueError("Value for `low` must be less than `high` otherwise field will be zeros")
self.total_rand_size = tuple(rs + self.pad * 2 for rs in self.rand_size)
self.field = torch.ones((1, self.channels) + self.total_rand_size, device=self.device) * pad_val
self.crand_size = (self.channels,) + self.rand_size
pad_slice = slice(None) if self.pad == 0 else slice(self.pad, -self.pad)
self.rand_slices = (0, slice(None)) + (pad_slice,) * len(self.rand_size)
self.set_spatial_size(spatial_size)
def randomize(self, data: Any | None = None) -> None:
self.field[self.rand_slices] = torch.from_numpy(self.R.uniform(self.low, self.high, self.crand_size))
def set_spatial_size(self, spatial_size: Sequence[int] | None) -> None:
"""
Set the `spatial_size` and `spatial_zoom` attributes used for interpolating the field to the given
dimension, or not interpolate at all if None.
Args:
spatial_size: new size to interpolate to, or None to not interpolate
"""
if spatial_size is None:
self.spatial_size = None
self.spatial_zoom = None
else:
self.spatial_size = tuple(spatial_size)
self.spatial_zoom = tuple(s / f for s, f in zip(self.spatial_size, self.total_rand_size))
def set_mode(self, mode: str) -> None:
self.mode = mode
def __call__(self, randomize=False) -> torch.Tensor:
if randomize:
self.randomize()
field = self.field.clone()
if self.spatial_zoom is not None:
resized_field = interpolate(
input=field,
scale_factor=self.spatial_zoom,
mode=look_up_option(self.mode, InterpolateMode),
align_corners=self.align_corners,
recompute_scale_factor=False,
)
mina = resized_field.min()
maxa = resized_field.max()
minv = self.field.min()
maxv = self.field.max()
# faster than rescale_array, this uses in-place operations and doesn't perform unneeded range checks
norm_field = (resized_field.squeeze(0) - mina).div_(maxa - mina)
field = norm_field.mul_(maxv - minv).add_(minv)
return field
class RandSmoothFieldAdjustContrast(RandomizableTransform):
"""
Randomly adjust the contrast of input images by calculating a randomized smooth field for each invocation.
This uses SmoothField internally to define the adjustment over the image. If `pad` is greater than 0 the
edges of the input volume of that width will be mostly unchanged. Contrast is changed by raising input
values by the power of the smooth field so the range of values given by `gamma` should be chosen with this
in mind. For example, a minimum value of 0 in `gamma` will produce white areas so this should be avoided.
After the contrast is adjusted the values of the result are rescaled to the range of the original input.
Args:
spatial_size: size of input array's spatial dimensions
rand_size: size of the randomized field to start from
pad: number of pixels/voxels along the edges of the field to pad with 1
mode: interpolation mode to use when upsampling
align_corners: if True align the corners when upsampling field
prob: probability transform is applied
gamma: (min, max) range for exponential field
device: Pytorch device to define field on
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Sequence[int],
rand_size: Sequence[int],
pad: int = 0,
mode: str = InterpolateMode.AREA,
align_corners: bool | None = None,
prob: float = 0.1,
gamma: Sequence[float] | float = (0.5, 4.5),
device: torch.device | None = None,
):
super().__init__(prob)
if isinstance(gamma, (int, float)):
self.gamma = (0.5, gamma)
else:
if len(gamma) != 2:
raise ValueError("Argument `gamma` should be a number or pair of numbers.")
self.gamma = (min(gamma), max(gamma))
self.sfield = SmoothField(
rand_size=rand_size,
pad=pad,
pad_val=1,
low=self.gamma[0],
high=self.gamma[1],
channels=1,
spatial_size=spatial_size,
mode=mode,
align_corners=align_corners,
device=device,
)
def set_random_state(
self, seed: int | None = None, state: np.random.RandomState | None = None
) -> RandSmoothFieldAdjustContrast:
super().set_random_state(seed, state)
self.sfield.set_random_state(seed, state)
return self
def randomize(self, data: Any | None = None) -> None:
super().randomize(None)
if self._do_transform:
self.sfield.randomize()
def set_mode(self, mode: str) -> None:
self.sfield.set_mode(mode)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`, if `randomize` randomizing the smooth field otherwise reusing the previous.
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
if randomize:
self.randomize()
if not self._do_transform:
return img
img_min = img.min()
img_max = img.max()
img_rng = img_max - img_min
field = self.sfield()
rfield, *_ = convert_to_dst_type(field, img)
# everything below here is to be computed using the destination type (numpy, tensor, etc.)
img = (img - img_min) / (img_rng + 1e-10) # rescale to unit values
img = img**rfield # contrast is changed by raising image data to a power, in this case the field
out = (img * img_rng) + img_min # rescale back to the original image value range
return out
class RandSmoothFieldAdjustIntensity(RandomizableTransform):
"""
Randomly adjust the intensity of input images by calculating a randomized smooth field for each invocation.
This uses SmoothField internally to define the adjustment over the image. If `pad` is greater than 0 the
edges of the input volume of that width will be mostly unchanged. Intensity is changed by multiplying the
inputs by the smooth field, so the values of `gamma` should be chosen with this in mind. The default values
of `(0.1, 1.0)` are sensible in that values will not be zeroed out by the field nor multiplied greater than
the original value range.
Args:
spatial_size: size of input array
rand_size: size of the randomized field to start from
pad: number of pixels/voxels along the edges of the field to pad with 1
mode: interpolation mode to use when upsampling
align_corners: if True align the corners when upsampling field
prob: probability transform is applied
gamma: (min, max) range of intensity multipliers
device: Pytorch device to define field on
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Sequence[int],
rand_size: Sequence[int],
pad: int = 0,
mode: str = InterpolateMode.AREA,
align_corners: bool | None = None,
prob: float = 0.1,
gamma: Sequence[float] | float = (0.1, 1.0),
device: torch.device | None = None,
):
super().__init__(prob)
if isinstance(gamma, (int, float)):
self.gamma = (0.5, gamma)
else:
if len(gamma) != 2:
raise ValueError("Argument `gamma` should be a number or pair of numbers.")
self.gamma = (min(gamma), max(gamma))
self.sfield = SmoothField(
rand_size=rand_size,
pad=pad,
pad_val=1,
low=self.gamma[0],
high=self.gamma[1],
channels=1,
spatial_size=spatial_size,
mode=mode,
align_corners=align_corners,
device=device,
)
def set_random_state(
self, seed: int | None = None, state: np.random.RandomState | None = None
) -> RandSmoothFieldAdjustIntensity:
super().set_random_state(seed, state)
self.sfield.set_random_state(seed, state)
return self
def randomize(self, data: Any | None = None) -> None:
super().randomize(None)
if self._do_transform:
self.sfield.randomize()
def set_mode(self, mode: str) -> None:
self.sfield.set_mode(mode)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`, if `randomize` randomizing the smooth field otherwise reusing the previous.
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
if randomize:
self.randomize()
if not self._do_transform:
return img
field = self.sfield()
rfield, *_ = convert_to_dst_type(field, img)
# everything below here is to be computed using the destination type (numpy, tensor, etc.)
out = img * rfield
return out
class RandSmoothDeform(RandomizableTransform):
"""
Deform an image using a random smooth field and Pytorch's grid_sample.
The amount of deformation is given by `def_range` in fractions of the size of the image. The size of each dimension
of the input image is always defined as 2 regardless of actual image voxel dimensions, that is the coordinates in
every dimension range from -1 to 1. A value of 0.1 means pixels/voxels can be moved by up to 5% of the image's size.
Args:
spatial_size: input array size to which deformation grid is interpolated
rand_size: size of the randomized field to start from
pad: number of pixels/voxels along the edges of the field to pad with 0
field_mode: interpolation mode to use when upsampling the deformation field
align_corners: if True align the corners when upsampling field
prob: probability transform is applied
def_range: value of the deformation range in image size fractions, single min/max value or min/max pair
grid_dtype: type for the deformation grid calculated from the field
grid_mode: interpolation mode used for sampling input using deformation grid
grid_padding_mode: padding mode used for sampling input using deformation grid
grid_align_corners: if True align the corners when sampling the deformation grid
device: Pytorch device to define field on
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Sequence[int],
rand_size: Sequence[int],
pad: int = 0,
field_mode: str = InterpolateMode.AREA,
align_corners: bool | None = None,
prob: float = 0.1,
def_range: Sequence[float] | float = 1.0,
grid_dtype=torch.float32,
grid_mode: str = GridSampleMode.NEAREST,
grid_padding_mode: str = GridSamplePadMode.BORDER,
grid_align_corners: bool | None = False,
device: torch.device | None = None,
):
super().__init__(prob)
self.grid_dtype = grid_dtype
self.grid_mode = grid_mode
self.def_range = def_range
self.device = device
self.grid_align_corners = grid_align_corners
self.grid_padding_mode = grid_padding_mode
if isinstance(def_range, (int, float)):
self.def_range = (-def_range, def_range)
else:
if len(def_range) != 2:
raise ValueError("Argument `def_range` should be a number or pair of numbers.")
self.def_range = (min(def_range), max(def_range))
self.sfield = SmoothField(
spatial_size=spatial_size,
rand_size=rand_size,
pad=pad,
low=self.def_range[0],
high=self.def_range[1],
channels=len(rand_size),
mode=field_mode,
align_corners=align_corners,
device=device,
)
grid_space = tuple(spatial_size) if spatial_size is not None else self.sfield.field.shape[2:]
grid_ranges = [torch.linspace(-1, 1, d) for d in grid_space]
grid = meshgrid_ij(*grid_ranges)
self.grid = torch.stack(grid).unsqueeze(0).to(self.device, self.grid_dtype)
def set_random_state(self, seed: int | None = None, state: np.random.RandomState | None = None) -> Randomizable:
super().set_random_state(seed, state)
self.sfield.set_random_state(seed, state)
return self
def randomize(self, data: Any | None = None) -> None:
super().randomize(None)
if self._do_transform:
self.sfield.randomize()
def set_field_mode(self, mode: str) -> None:
self.sfield.set_mode(mode)
def set_grid_mode(self, mode: str) -> None:
self.grid_mode = mode
def __call__(
self, img: NdarrayOrTensor, randomize: bool = True, device: torch.device | None = None
) -> NdarrayOrTensor:
img = convert_to_tensor(img, track_meta=get_track_meta())
if randomize:
self.randomize()
if not self._do_transform:
return img
device = device if device is not None else self.device
field = self.sfield()
dgrid = self.grid + field.to(self.grid_dtype)
dgrid = moveaxis(dgrid, 1, -1) # type: ignore
dgrid = dgrid[..., list(range(dgrid.shape[-1] - 1, -1, -1))] # invert order of coordinates
img_t = convert_to_tensor(img[None], torch.float32, device)
out = grid_sample(
input=img_t,
grid=dgrid,
mode=look_up_option(self.grid_mode, GridSampleMode),
align_corners=self.grid_align_corners,
padding_mode=look_up_option(self.grid_padding_mode, GridSamplePadMode),
)
out_t, *_ = convert_to_dst_type(out.squeeze(0), img)
return out_t
|
8f70ab938b89d775729c84df58b7b573cf19759d
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/src/dbnd/_vendor/ascii_graph/colordata.py
|
0073e1cdd0c78f24e8109eeb0eabd4bfbee7c691
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
colordata.py
|
def vcolor(data, pattern):
""" Color a graph line by line
:param data: the data
:type data: list of tuples (info, value)
:param pattern: list of colors, this list defines
the pattern to color each line of the graph.
:type pattern: list of 'colors' (str)
:return: the colored graph
:rtype: list of arrays (<info>, <value>, <color>)
"""
ret = []
l = len(pattern)
c = 0
for info, value in data:
ret.append((info, value, pattern[c]))
c = (c + 1) % l
return ret
def hcolor(data, thresholds):
""" Multicolor a graph according to thresholds
:param data: the data
:type data: list of tuples (info, value)
:param thresholds: dict of thresholds, format
{<threshold>: <color>,}
:type thresholds: dict
:return: the colored graph
:rtype: list of arrays
"""
ret = []
for info, value in data:
newval = []
minover = None
maxt = 0
for t in thresholds:
if maxt < t:
maxt = t
if value > t:
newval.append((t, thresholds[t]))
else:
if minover is None or minover > t:
minover = t
if minover is None:
minover = maxt
newval.append((value, thresholds[minover]))
ret.append((info, newval))
return ret
|
b9f670d1962fd606991be35e4fc77f2059b33a74
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/tests/test_context/test_hybrid_parallel.py
|
9f26a5af53ce6d13fc0d00e4c4d8b949b1172b17
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,006
|
py
|
test_hybrid_parallel.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
import pytest
import torch
from colossalai import launch
from colossalai.context import reset_seeds
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as tp_env
from colossalai.testing import free_port, rerun_if_address_is_in_use, spawn
CONFIG_PATH_LIST = list(Path(__file__).parent.glob('configs/*.py'))
def check_data_parallel_rank(rank):
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
mp_size = gpc.get_world_size(ParallelMode.MODEL)
num_dp_groups = global_world_size // mp_size
dp_local_rank = gpc.get_local_rank(ParallelMode.DATA)
assert gpc.get_world_size(ParallelMode.DATA) == num_dp_groups
for group_idx in range(num_dp_groups):
ranks_in_dp_group = range(group_idx * mp_size, (group_idx + 1) * mp_size)
if rank in ranks_in_dp_group:
assert dp_local_rank == group_idx
def check_pipeline_parallel_rank(rank):
mp_world_size = gpc.get_world_size(ParallelMode.MODEL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_pipeline_stage = mp_world_size // tp_world_size
pipeline_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
for stage_idx in range(num_pipeline_stage):
ranks_in_current_stage = range(stage_idx * tp_world_size, (stage_idx + 1) * tp_world_size)
if rank in ranks_in_current_stage:
assert stage_idx == pipeline_local_rank
def check_model_parallel_rank(rank):
mp_size = gpc.get_world_size(ParallelMode.MODEL)
rank_within_mp_group = rank % mp_size
mp_local_rank = gpc.get_local_rank(ParallelMode.MODEL)
assert rank_within_mp_group == mp_local_rank
def check_tensor_parallel_rank(rank):
if tp_env.mode == '2d':
check_2d_tensor_parallel_rank(rank)
elif tp_env == '2.5d':
check_2p5d_tensor_parallel_rank(rank)
elif tp_env == '3d':
check_3d_tensor_parallel_rank(rank)
def get_tp_info():
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_tp_groups = global_world_size // tp_world_size
tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)
return tp_local_rank, tp_world_size, num_tp_groups
def check_2d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
col_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
row_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
assert col_local_rank == tp_local_rank // tp_env.summa_dim
assert row_local_rank == tp_local_rank % tp_env.summa_dim
def check_2p5d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)
assert rp_rank == tp_local_rank % tp_env.summa_dim
assert cp_rank == tp_local_rank // tp_env.tesseract_dim
assert dp_rank == tp_local_rank // (tp_env.summa_dim**2)
assert xp_rank == tp_local_rank // tp_env.summa_dim
def check_3d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)
wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)
op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)
assert ip_rank == tp_local_rank % tp_env.depth_3d
assert wp_rank == tp_local_rank // tp_env.depth_3d
assert op_rank == tp_local_rank // (tp_env.depth_3d**2)
def init_context(config_path, rank, world_size, backend, port, host):
dist_args = dict(config=config_path,
rank=rank,
world_size=world_size,
backend=backend,
port=port,
host=host,
verbose=True)
launch(**dist_args)
check_tensor_parallel_rank(rank)
check_data_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
check_model_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
def run_dist(rank, world_size, port, backend, port_list, host):
for config_path, current_port in zip(CONFIG_PATH_LIST, port_list):
init_context(config_path=config_path,
rank=rank,
world_size=world_size,
backend=backend,
port=current_port,
host=host)
reset_seeds()
@pytest.mark.cpu
@rerun_if_address_is_in_use()
def test_context():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
port_list = []
for _ in range(len(CONFIG_PATH_LIST)):
while True:
port = free_port()
if port not in port_list:
port_list.append(port)
break
spawn(run_dist, world_size, backend='gloo', port_list=port_list, host='localhost')
if __name__ == '__main__':
test_context()
|
0c93433c868da48ecca7219858b926457f374eed
|
d6a165271729a11bbe20c3724e877f29bdb580d1
|
/yabgp/tests/unit/message/test_notification.py
|
ac58b4f43f5beaf0965130e26d37b03fb06bce7f
|
[
"Apache-2.0"
] |
permissive
|
smartbgp/yabgp
|
67482f4368a39f777a99f9428ee5034fe8672977
|
24cbb732d4380ab54d000ac08690e521c60d4f2a
|
refs/heads/master
| 2023-06-22T23:44:20.481844
| 2023-06-21T11:01:08
| 2023-06-21T11:01:08
| 33,908,998
| 227
| 82
|
Apache-2.0
| 2023-06-21T11:01:10
| 2015-04-14T03:36:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
test_notification.py
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test Notification message"""
import unittest
from yabgp.message.notification import Notification
class TestNotification(unittest.TestCase):
def test_parse(self):
msg_hex = b'\x03\x05\x00\x00'
noti_msg = Notification().parse(msg_hex)
self.assertEqual((3, 5, b'\x00\x00'), noti_msg)
def test_construct(self):
msg_hex = Notification().construct(error=3, suberror=5, data=b'\x00\x00')
hope_msg = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff' \
b'\xff\xff\x00\x17\x03\x03\x05\x00\x00'
self.assertEqual(hope_msg, msg_hex)
if __name__ == '__main__':
unittest.main()
|
9148eda8555ce4741055c587f35af356370150a6
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/references/tests/integration/test_adv_search_filters_and_hashes.py
|
ed05e65b8ce952a4a7c3974123614ceabee62de2
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
test_adv_search_filters_and_hashes.py
|
import pytest
from model_bakery import baker
from rest_framework import status
from usaspending_api.references.models import FilterHash
HASH_ENDPOINT = "/api/v2/references/hash/"
FILTER_ENDPOINT = "/api/v2/references/filter/"
@pytest.fixture
def stored_hashes(db):
baker.make("references.FilterHash", filter={}, hash="")
@pytest.mark.django_db
def test_missing_hash(client):
resp = client.post(
HASH_ENDPOINT, content_type="application/json", data={"hash": "1c89eccf09b7dc74a75b651af79602e7"}
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_generate_hash_success(client):
resp = client.post(
FILTER_ENDPOINT, content_type="application/json", data={"filters": "Department of Transportation"}
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data["hash"] == "1c89eccf09b7dc74a75b651af79602e7"
@pytest.mark.django_db
def test_new_hash(client):
filter_payload = {"filters": "Department of Transportation"}
resp = client.post(FILTER_ENDPOINT, content_type="application/json", data=filter_payload)
resp = client.post(
HASH_ENDPOINT, content_type="application/json", data={"hash": "1c89eccf09b7dc74a75b651af79602e7"}
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data["filter"] == filter_payload
@pytest.mark.django_db
def test_hash_algorithm(client):
import hashlib
import json
filter_payloads = [
{"filters": "Department of Transportation"},
{"filters": {"agency": {"name": "Department of Transportation"}}},
{"filters": {"agency": {"name": "DOT", "level": "toptier"}}},
{"filters": {"def_codes": ["A", "B", "C", "9"], "cfda": ["10.987", "19.001"]}},
{"filters": {"agency": {"name": "Department of Transportation"}}},
{"empty": None},
]
def get_hash_from_api(payload):
return client.post(FILTER_ENDPOINT, content_type="application/json", data=payload).data["hash"]
def hash_payload(payload):
m = hashlib.md5()
m.update(json.dumps(payload).encode("utf8"))
return str(m.hexdigest().encode("utf8"))[2:-1]
def get_filters_from_db(provided_hash):
return FilterHash.objects.get(hash=provided_hash).filter
for fp in filter_payloads:
assert get_hash_from_api(fp) == hash_payload(fp)
assert fp == get_filters_from_db(hash_payload(fp))
|
6426c7f482f35e594c28cec9a1430ef9a3c840f0
|
36977d5aba5592ec4ee2090d16958f90df11997d
|
/dit/math/combinatorics.py
|
90dfa15af0f6a0ffd1c06f9a0ebd85da5c5971f9
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
dit/dit
|
8589f969973b204fed828d1f8133f8c30de2cb6b
|
b13c5020a2b8524527a4a0db5a81d8549142228c
|
refs/heads/master
| 2023-08-31T03:58:57.651496
| 2023-08-30T21:55:54
| 2023-08-30T21:55:54
| 13,201,610
| 468
| 95
|
BSD-3-Clause
| 2023-08-29T03:54:31
| 2013-09-29T23:03:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
combinatorics.py
|
"""
Combinatorial functions.
"""
__all__ = (
'slots',
'unitsum_tuples',
)
def unitsum_tuples(n, k, mn, mx):
"""Generates unitsum k-tuples with elements from mn to mx.
This function is more general than slots(n,k,normalized=True), as it can
return unitsum vectors with elements outside of (0,1).
In order to generate unitsum samples, the following must be satisfied:
1 = mx + (k-1) * mn
Parameters
----------
n : int
The number of increments to include between mn and mx. n >= 1. The
meaning of n is similar to the n in slots(n,k) and represents the
number of ``items'' to place in each slot.
k : int
The length of the tuples (equivalently, the number of slots).
mn : float
The minimum value in the unitsum samples.
mx : float
The maximum value in the unitsum samples.
Examples
--------
>>> s = unitsum_tuples(3, 2, .2, .8)
>>> s.next()
(0.20000000000000001, 0.80000000000000004)
>>> s.next()
(0.40000000000000008, 0.60000000000000009)
>>> s.next()
(0.60000000000000009, 0.40000000000000002)
>>> s.next()
(0.80000000000000004, 0.19999999999999996)
"""
# In order to add up to 1 properly...we must have:
# sum((mx, mn/(k-1), ... , mn/(k-1))) == 1
s = mx + (k - 1) * mn
tol = 1e-9
if not (abs(s - 1) <= tol):
msg = "Specified min and max will not create unitsum tuples."
e = Exception(msg)
raise e
# Now we convert from "number of increments/items" to "number of points"
# The number of points behaviors similar to numpy.linspace(mn,mx,n)
n += 1
if mn < 0:
shift = float(abs(mn))
else:
shift = -float(mn)
seq, i = [mx + shift] * k + [0], k
while i:
t = tuple((seq[i] - seq[i + 1] - shift) for i in range(k))
# This should be a unitsum tuple.
s = float(sum(t))
assert s > .001
yield tuple(t)
for idx, val in enumerate(seq): # pragma: no branch
if abs(val) < 1e-9:
i = idx - 1
break
seq[i:k] = [seq[i] - (mx - mn) / float(n - 1)] * (k - i)
# Thanks to Arnaud Delobelle
def slots(n, k, normalized=False):
"""Generates distributions of n identical items into k distinct slots.
A generator over distributions of n indistinguishable items into k
distinguishable slots, where each slot can hold up to n items.
Selection of items is done without replacement, and the order within the
slots cannot matter since the items are indistinguishable.
The number of distributions is (n + k - 1)! / n! / (k-1)!
Parameters
----------
n : int
The number of indistinguishable items.
k : int
The number of distinguishable slots.
normalized : bool
If True, then we divide each term in the tuple by the number of items.
The default value is False.
Yields
------
t : tuple
A tuple of length k where each element is an integer representing
the number of indistinguishable items within the slot.
Examples
--------
>>> list(slots(3,2))
[(0, 3), (1, 2), (2, 1), (3, 0)]
"""
seq, i = [n] * k + [0], k
if normalized:
nf = float(n)
while i:
yield tuple((seq[i] - seq[i + 1]) / nf for i in range(k))
i = seq.index(0) - 1
seq[i:k] = [seq[i] - 1] * (k - i)
else:
while i:
yield tuple((seq[i] - seq[i + 1]) for i in range(k))
i = seq.index(0) - 1
seq[i:k] = [seq[i] - 1] * (k - i)
|
9918c61e08fb4e583167caea4b6b0bfb2958e286
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_protos/public/modeldb/DatasetService_pb2.py
|
0296cb61ba0753b5eebd233401638c3cdb4cfd05
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| true
| 83,489
|
py
|
DatasetService_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modeldb/DatasetService.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ..common import CommonService_pb2 as common_dot_CommonService__pb2
from ..modeldb import CommonService_pb2 as modeldb_dot_CommonService__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from ..modeldb import ExperimentService_pb2 as modeldb_dot_ExperimentService__pb2
from ..modeldb import ExperimentRunService_pb2 as modeldb_dot_ExperimentRunService__pb2
from ..uac import Collaborator_pb2 as uac_dot_Collaborator__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modeldb/DatasetService.proto',
package='ai.verta.modeldb',
syntax='proto3',
serialized_options=b'P\001Z>github.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb',
serialized_pb=b'\n\x1cmodeldb/DatasetService.proto\x12\x10\x61i.verta.modeldb\x1a\x1a\x63ommon/CommonService.proto\x1a\x1bmodeldb/CommonService.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1fmodeldb/ExperimentService.proto\x1a\"modeldb/ExperimentRunService.proto\x1a\x16uac/Collaborator.proto\"\xb4\x05\n\x07\x44\x61taset\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05owner\x18\x03 \x01(\t\x12\x12\n\x08owner_id\x18\x11 \x01(\x04H\x00\x12\x32\n\x0egroup_owner_id\x18\x12 \x01(\x0b\x32\x18.ai.verta.common.GroupIdH\x00\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x0c\n\x04tags\x18\x05 \x03(\t\x12U\n\x12\x64\x61taset_visibility\x18\x06 \x01(\x0e\x32\x39.ai.verta.modeldb.DatasetVisibilityEnum.DatasetVisibility\x12\x43\n\x0c\x64\x61taset_type\x18\x07 \x01(\x0e\x32-.ai.verta.modeldb.DatasetTypeEnum.DatasetType\x12-\n\nattributes\x18\x08 \x03(\x0b\x32\x19.ai.verta.common.KeyValue\x12\x14\n\x0ctime_created\x18\t \x01(\x04\x12\x14\n\x0ctime_updated\x18\n \x01(\x04\x12\x14\n\x0cworkspace_id\x18\x0b \x01(\t\x12H\n\x0eworkspace_type\x18\x0c \x01(\x0e\x32\x30.ai.verta.common.WorkspaceTypeEnum.WorkspaceType\x12\x1c\n\x14workspace_service_id\x18\r \x01(\x04\x12@\n\x11\x63ustom_permission\x18\x0e \x01(\x0b\x32%.ai.verta.uac.CollaboratorPermissions\x12\x34\n\nvisibility\x18\x0f \x01(\x0e\x32 .ai.verta.uac.ResourceVisibility\x12\x16\n\x0eversion_number\x18\x10 \x01(\x04\x42\x10\n\x0eowner_tracking\">\n\x0f\x44\x61tasetTypeEnum\"+\n\x0b\x44\x61tasetType\x12\x07\n\x03RAW\x10\x00\x12\x08\n\x04PATH\x10\x01\x12\t\n\x05QUERY\x10\x02\"\\\n\x15\x44\x61tasetVisibilityEnum\"C\n\x11\x44\x61tasetVisibility\x12\x0b\n\x07PRIVATE\x10\x00\x12\n\n\x06PUBLIC\x10\x01\x12\x15\n\x11ORG_SCOPED_PUBLIC\x10\x02\"\xe9\x03\n\rCreateDataset\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0c\n\x04tags\x18\x03 \x03(\t\x12-\n\nattributes\x18\x04 \x03(\x0b\x32\x19.ai.verta.common.KeyValue\x12U\n\x12\x64\x61taset_visibility\x18\x05 \x01(\x0e\x32\x39.ai.verta.modeldb.DatasetVisibilityEnum.DatasetVisibility\x12\x43\n\x0c\x64\x61taset_type\x18\x06 \x01(\x0e\x32-.ai.verta.modeldb.DatasetTypeEnum.DatasetType\x12\x16\n\x0eworkspace_name\x18\x07 \x01(\t\x12\x14\n\x0ctime_created\x18\x08 \x01(\x04\x12@\n\x11\x63ustom_permission\x18\t \x01(\x0b\x32%.ai.verta.uac.CollaboratorPermissions\x12\x34\n\nvisibility\x18\n \x01(\x0e\x32 .ai.verta.uac.ResourceVisibility\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"\xc6\x01\n\x0eGetAllDatasets\x12\x13\n\x0bpage_number\x18\x01 \x01(\x05\x12\x12\n\npage_limit\x18\x02 \x01(\x05\x12\x11\n\tascending\x18\x03 \x01(\x08\x12\x10\n\x08sort_key\x18\x04 \x01(\t\x12\x16\n\x0eworkspace_name\x18\x07 \x01(\t\x1aN\n\x08Response\x12+\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32\x19.ai.verta.modeldb.Dataset\x12\x15\n\rtotal_records\x18\x02 \x01(\x04\"T\n\x0eGetDatasetById\x12\n\n\x02id\x18\x01 \x01(\t\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"\xac\x01\n\x10GetDatasetByName\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0eworkspace_name\x18\x02 \x01(\t\x1ar\n\x08Response\x12\x32\n\x0f\x64\x61taset_by_user\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\x12\x32\n\x0fshared_datasets\x18\x02 \x03(\x0b\x32\x19.ai.verta.modeldb.Dataset\"7\n\rDeleteDataset\x12\n\n\x02id\x18\x01 \x01(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\"9\n\x0e\x44\x65leteDatasets\x12\x0b\n\x03ids\x18\x01 \x03(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\"\x9f\x02\n\x0c\x46indDatasets\x12\x13\n\x0b\x64\x61taset_ids\x18\x01 \x03(\t\x12\x32\n\npredicates\x18\x02 \x03(\x0b\x32\x1e.ai.verta.common.KeyValueQuery\x12\x10\n\x08ids_only\x18\x03 \x01(\x08\x12\x16\n\x0eworkspace_name\x18\x08 \x01(\t\x12\x13\n\x0bpage_number\x18\x04 \x01(\x05\x12\x12\n\npage_limit\x18\x05 \x01(\x05\x12\x11\n\tascending\x18\x06 \x01(\x08\x12\x10\n\x08sort_key\x18\x07 \x01(\t\x1aN\n\x08Response\x12+\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32\x19.ai.verta.modeldb.Dataset\x12\x15\n\rtotal_records\x18\x02 \x01(\x03\"e\n\x11UpdateDatasetName\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"s\n\x18UpdateDatasetDescription\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"b\n\x0e\x41\x64\x64\x44\x61tasetTags\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04tags\x18\x02 \x03(\t\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"y\n\x11\x44\x65leteDatasetTags\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04tags\x18\x02 \x03(\t\x12\x12\n\ndelete_all\x18\x03 \x01(\x08\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"\x89\x01\n\x14\x41\x64\x64\x44\x61tasetAttributes\x12\n\n\x02id\x18\x01 \x01(\t\x12-\n\nattributes\x18\x02 \x03(\x0b\x32\x19.ai.verta.common.KeyValue\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"\x8b\x01\n\x17UpdateDatasetAttributes\x12\n\n\x02id\x18\x01 \x01(\t\x12,\n\tattribute\x18\x02 \x01(\x0b\x32\x19.ai.verta.common.KeyValue\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"\x89\x01\n\x17\x44\x65leteDatasetAttributes\x12\n\n\x02id\x18\x01 \x01(\t\x12\x16\n\x0e\x61ttribute_keys\x18\x02 \x03(\t\x12\x12\n\ndelete_all\x18\x03 \x01(\x08\x1a\x36\n\x08Response\x12*\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x19.ai.verta.modeldb.Dataset\"m\n\x19LastExperimentByDatasetId\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x1a<\n\x08Response\x12\x30\n\nexperiment\x18\x01 \x01(\x0b\x32\x1c.ai.verta.modeldb.Experiment\"u\n\x19GetExperimentRunByDataset\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x1a\x44\n\x08Response\x12\x38\n\x0f\x65xperiment_runs\x18\x01 \x03(\x0b\x32\x1f.ai.verta.modeldb.ExperimentRun2\xde\x13\n\x0e\x44\x61tasetService\x12\x80\x01\n\rcreateDataset\x12\x1f.ai.verta.modeldb.CreateDataset\x1a(.ai.verta.modeldb.CreateDataset.Response\"$\x82\xd3\xe4\x93\x02\x1e\"\x19/v1/dataset/createDataset:\x01*\x12\x81\x01\n\x0egetAllDatasets\x12 .ai.verta.modeldb.GetAllDatasets\x1a).ai.verta.modeldb.GetAllDatasets.Response\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/dataset/getAllDatasets\x12\x81\x01\n\x0egetDatasetById\x12 .ai.verta.modeldb.GetDatasetById\x1a).ai.verta.modeldb.GetDatasetById.Response\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/dataset/getDatasetById\x12\x89\x01\n\x10getDatasetByName\x12\".ai.verta.modeldb.GetDatasetByName\x1a+.ai.verta.modeldb.GetDatasetByName.Response\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/v1/dataset/getDatasetByName\x12\x80\x01\n\rdeleteDataset\x12\x1f.ai.verta.modeldb.DeleteDataset\x1a(.ai.verta.modeldb.DeleteDataset.Response\"$\x82\xd3\xe4\x93\x02\x1e*\x19/v1/dataset/deleteDataset:\x01*\x12\x84\x01\n\x0e\x64\x65leteDatasets\x12 .ai.verta.modeldb.DeleteDatasets\x1a).ai.verta.modeldb.DeleteDatasets.Response\"%\x82\xd3\xe4\x93\x02\x1f*\x1a/v1/dataset/deleteDatasets:\x01*\x12|\n\x0c\x66indDatasets\x12\x1e.ai.verta.modeldb.FindDatasets\x1a\'.ai.verta.modeldb.FindDatasets.Response\"#\x82\xd3\xe4\x93\x02\x1d\"\x18/v1/dataset/findDatasets:\x01*\x12\x90\x01\n\x11updateDatasetName\x12#.ai.verta.modeldb.UpdateDatasetName\x1a,.ai.verta.modeldb.UpdateDatasetName.Response\"(\x82\xd3\xe4\x93\x02\"\"\x1d/v1/dataset/updateDatasetName:\x01*\x12\xac\x01\n\x18updateDatasetDescription\x12*.ai.verta.modeldb.UpdateDatasetDescription\x1a\x33.ai.verta.modeldb.UpdateDatasetDescription.Response\"/\x82\xd3\xe4\x93\x02)\"$/v1/dataset/updateDatasetDescription:\x01*\x12\x84\x01\n\x0e\x61\x64\x64\x44\x61tasetTags\x12 .ai.verta.modeldb.AddDatasetTags\x1a).ai.verta.modeldb.AddDatasetTags.Response\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/v1/dataset/addDatasetTags:\x01*\x12s\n\x0egetDatasetTags\x12\x19.ai.verta.modeldb.GetTags\x1a\".ai.verta.modeldb.GetTags.Response\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/dataset/getDatasetTags\x12\x90\x01\n\x11\x64\x65leteDatasetTags\x12#.ai.verta.modeldb.DeleteDatasetTags\x1a,.ai.verta.modeldb.DeleteDatasetTags.Response\"(\x82\xd3\xe4\x93\x02\"*\x1d/v1/dataset/deleteDatasetTags:\x01*\x12\x9c\x01\n\x14\x61\x64\x64\x44\x61tasetAttributes\x12&.ai.verta.modeldb.AddDatasetAttributes\x1a/.ai.verta.modeldb.AddDatasetAttributes.Response\"+\x82\xd3\xe4\x93\x02%\" /v1/dataset/addDatasetAttributes:\x01*\x12\xa8\x01\n\x17updateDatasetAttributes\x12).ai.verta.modeldb.UpdateDatasetAttributes\x1a\x32.ai.verta.modeldb.UpdateDatasetAttributes.Response\".\x82\xd3\xe4\x93\x02(\"#/v1/dataset/updateDatasetAttributes:\x01*\x12\xa8\x01\n\x17\x64\x65leteDatasetAttributes\x12).ai.verta.modeldb.DeleteDatasetAttributes\x1a\x32.ai.verta.modeldb.DeleteDatasetAttributes.Response\".\x82\xd3\xe4\x93\x02(*#/v1/dataset/deleteDatasetAttributes:\x01*\x12\xb3\x01\n\x1cgetLastExperimentByDatasetId\x12+.ai.verta.modeldb.LastExperimentByDatasetId\x1a\x34.ai.verta.modeldb.LastExperimentByDatasetId.Response\"0\x82\xd3\xe4\x93\x02*\x12(/v1/dataset/getLastExperimentByDatasetId\x12\xb0\x01\n\x19getExperimentRunByDataset\x12+.ai.verta.modeldb.GetExperimentRunByDataset\x1a\x34.ai.verta.modeldb.GetExperimentRunByDataset.Response\"0\x82\xd3\xe4\x93\x02*\"%/v1/dataset/getExperimentRunByDataset:\x01*BBP\x01Z>github.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldbb\x06proto3'
,
dependencies=[common_dot_CommonService__pb2.DESCRIPTOR,modeldb_dot_CommonService__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,modeldb_dot_ExperimentService__pb2.DESCRIPTOR,modeldb_dot_ExperimentRunService__pb2.DESCRIPTOR,uac_dot_Collaborator__pb2.DESCRIPTOR,])
_DATASETTYPEENUM_DATASETTYPE = _descriptor.EnumDescriptor(
name='DatasetType',
full_name='ai.verta.modeldb.DatasetTypeEnum.DatasetType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RAW', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PATH', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUERY', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=944,
serialized_end=987,
)
_sym_db.RegisterEnumDescriptor(_DATASETTYPEENUM_DATASETTYPE)
_DATASETVISIBILITYENUM_DATASETVISIBILITY = _descriptor.EnumDescriptor(
name='DatasetVisibility',
full_name='ai.verta.modeldb.DatasetVisibilityEnum.DatasetVisibility',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PRIVATE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PUBLIC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ORG_SCOPED_PUBLIC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1014,
serialized_end=1081,
)
_sym_db.RegisterEnumDescriptor(_DATASETVISIBILITYENUM_DATASETVISIBILITY)
_DATASET = _descriptor.Descriptor(
name='Dataset',
full_name='ai.verta.modeldb.Dataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.Dataset.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='ai.verta.modeldb.Dataset.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owner', full_name='ai.verta.modeldb.Dataset.owner', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owner_id', full_name='ai.verta.modeldb.Dataset.owner_id', index=3,
number=17, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group_owner_id', full_name='ai.verta.modeldb.Dataset.group_owner_id', index=4,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='ai.verta.modeldb.Dataset.description', index=5,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='ai.verta.modeldb.Dataset.tags', index=6,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_visibility', full_name='ai.verta.modeldb.Dataset.dataset_visibility', index=7,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_type', full_name='ai.verta.modeldb.Dataset.dataset_type', index=8,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attributes', full_name='ai.verta.modeldb.Dataset.attributes', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_created', full_name='ai.verta.modeldb.Dataset.time_created', index=10,
number=9, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_updated', full_name='ai.verta.modeldb.Dataset.time_updated', index=11,
number=10, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_id', full_name='ai.verta.modeldb.Dataset.workspace_id', index=12,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_type', full_name='ai.verta.modeldb.Dataset.workspace_type', index=13,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_service_id', full_name='ai.verta.modeldb.Dataset.workspace_service_id', index=14,
number=13, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom_permission', full_name='ai.verta.modeldb.Dataset.custom_permission', index=15,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visibility', full_name='ai.verta.modeldb.Dataset.visibility', index=16,
number=15, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version_number', full_name='ai.verta.modeldb.Dataset.version_number', index=17,
number=16, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='owner_tracking', full_name='ai.verta.modeldb.Dataset.owner_tracking',
index=0, containing_type=None, fields=[]),
],
serialized_start=231,
serialized_end=923,
)
_DATASETTYPEENUM = _descriptor.Descriptor(
name='DatasetTypeEnum',
full_name='ai.verta.modeldb.DatasetTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DATASETTYPEENUM_DATASETTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=925,
serialized_end=987,
)
_DATASETVISIBILITYENUM = _descriptor.Descriptor(
name='DatasetVisibilityEnum',
full_name='ai.verta.modeldb.DatasetVisibilityEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DATASETVISIBILITYENUM_DATASETVISIBILITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=989,
serialized_end=1081,
)
_CREATEDATASET_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.CreateDataset.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.CreateDataset.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_CREATEDATASET = _descriptor.Descriptor(
name='CreateDataset',
full_name='ai.verta.modeldb.CreateDataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ai.verta.modeldb.CreateDataset.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='ai.verta.modeldb.CreateDataset.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='ai.verta.modeldb.CreateDataset.tags', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attributes', full_name='ai.verta.modeldb.CreateDataset.attributes', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_visibility', full_name='ai.verta.modeldb.CreateDataset.dataset_visibility', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_type', full_name='ai.verta.modeldb.CreateDataset.dataset_type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_name', full_name='ai.verta.modeldb.CreateDataset.workspace_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_created', full_name='ai.verta.modeldb.CreateDataset.time_created', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom_permission', full_name='ai.verta.modeldb.CreateDataset.custom_permission', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visibility', full_name='ai.verta.modeldb.CreateDataset.visibility', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEDATASET_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1084,
serialized_end=1573,
)
_GETALLDATASETS_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.GetAllDatasets.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datasets', full_name='ai.verta.modeldb.GetAllDatasets.Response.datasets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_records', full_name='ai.verta.modeldb.GetAllDatasets.Response.total_records', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1696,
serialized_end=1774,
)
_GETALLDATASETS = _descriptor.Descriptor(
name='GetAllDatasets',
full_name='ai.verta.modeldb.GetAllDatasets',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page_number', full_name='ai.verta.modeldb.GetAllDatasets.page_number', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_limit', full_name='ai.verta.modeldb.GetAllDatasets.page_limit', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ascending', full_name='ai.verta.modeldb.GetAllDatasets.ascending', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_key', full_name='ai.verta.modeldb.GetAllDatasets.sort_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_name', full_name='ai.verta.modeldb.GetAllDatasets.workspace_name', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETALLDATASETS_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1576,
serialized_end=1774,
)
_GETDATASETBYID_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.GetDatasetById.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.GetDatasetById.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_GETDATASETBYID = _descriptor.Descriptor(
name='GetDatasetById',
full_name='ai.verta.modeldb.GetDatasetById',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.GetDatasetById.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETDATASETBYID_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1776,
serialized_end=1860,
)
_GETDATASETBYNAME_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.GetDatasetByName.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_by_user', full_name='ai.verta.modeldb.GetDatasetByName.Response.dataset_by_user', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shared_datasets', full_name='ai.verta.modeldb.GetDatasetByName.Response.shared_datasets', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1921,
serialized_end=2035,
)
_GETDATASETBYNAME = _descriptor.Descriptor(
name='GetDatasetByName',
full_name='ai.verta.modeldb.GetDatasetByName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ai.verta.modeldb.GetDatasetByName.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_name', full_name='ai.verta.modeldb.GetDatasetByName.workspace_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETDATASETBYNAME_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1863,
serialized_end=2035,
)
_DELETEDATASET_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.DeleteDataset.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.DeleteDataset.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2066,
serialized_end=2092,
)
_DELETEDATASET = _descriptor.Descriptor(
name='DeleteDataset',
full_name='ai.verta.modeldb.DeleteDataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.DeleteDataset.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETEDATASET_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2037,
serialized_end=2092,
)
_DELETEDATASETS_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.DeleteDatasets.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.DeleteDatasets.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2066,
serialized_end=2092,
)
_DELETEDATASETS = _descriptor.Descriptor(
name='DeleteDatasets',
full_name='ai.verta.modeldb.DeleteDatasets',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='ai.verta.modeldb.DeleteDatasets.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETEDATASETS_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2094,
serialized_end=2151,
)
_FINDDATASETS_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.FindDatasets.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datasets', full_name='ai.verta.modeldb.FindDatasets.Response.datasets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_records', full_name='ai.verta.modeldb.FindDatasets.Response.total_records', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2363,
serialized_end=2441,
)
_FINDDATASETS = _descriptor.Descriptor(
name='FindDatasets',
full_name='ai.verta.modeldb.FindDatasets',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_ids', full_name='ai.verta.modeldb.FindDatasets.dataset_ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='predicates', full_name='ai.verta.modeldb.FindDatasets.predicates', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ids_only', full_name='ai.verta.modeldb.FindDatasets.ids_only', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspace_name', full_name='ai.verta.modeldb.FindDatasets.workspace_name', index=3,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_number', full_name='ai.verta.modeldb.FindDatasets.page_number', index=4,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_limit', full_name='ai.verta.modeldb.FindDatasets.page_limit', index=5,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ascending', full_name='ai.verta.modeldb.FindDatasets.ascending', index=6,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_key', full_name='ai.verta.modeldb.FindDatasets.sort_key', index=7,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FINDDATASETS_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2154,
serialized_end=2441,
)
_UPDATEDATASETNAME_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.UpdateDatasetName.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.UpdateDatasetName.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_UPDATEDATASETNAME = _descriptor.Descriptor(
name='UpdateDatasetName',
full_name='ai.verta.modeldb.UpdateDatasetName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.UpdateDatasetName.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='ai.verta.modeldb.UpdateDatasetName.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPDATEDATASETNAME_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2443,
serialized_end=2544,
)
_UPDATEDATASETDESCRIPTION_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.UpdateDatasetDescription.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.UpdateDatasetDescription.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_UPDATEDATASETDESCRIPTION = _descriptor.Descriptor(
name='UpdateDatasetDescription',
full_name='ai.verta.modeldb.UpdateDatasetDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.UpdateDatasetDescription.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='ai.verta.modeldb.UpdateDatasetDescription.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPDATEDATASETDESCRIPTION_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2546,
serialized_end=2661,
)
_ADDDATASETTAGS_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.AddDatasetTags.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.AddDatasetTags.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_ADDDATASETTAGS = _descriptor.Descriptor(
name='AddDatasetTags',
full_name='ai.verta.modeldb.AddDatasetTags',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.AddDatasetTags.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='ai.verta.modeldb.AddDatasetTags.tags', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ADDDATASETTAGS_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2663,
serialized_end=2761,
)
_DELETEDATASETTAGS_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.DeleteDatasetTags.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.DeleteDatasetTags.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_DELETEDATASETTAGS = _descriptor.Descriptor(
name='DeleteDatasetTags',
full_name='ai.verta.modeldb.DeleteDatasetTags',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.DeleteDatasetTags.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='ai.verta.modeldb.DeleteDatasetTags.tags', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delete_all', full_name='ai.verta.modeldb.DeleteDatasetTags.delete_all', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETEDATASETTAGS_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2763,
serialized_end=2884,
)
_ADDDATASETATTRIBUTES_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.AddDatasetAttributes.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.AddDatasetAttributes.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_ADDDATASETATTRIBUTES = _descriptor.Descriptor(
name='AddDatasetAttributes',
full_name='ai.verta.modeldb.AddDatasetAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.AddDatasetAttributes.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attributes', full_name='ai.verta.modeldb.AddDatasetAttributes.attributes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ADDDATASETATTRIBUTES_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2887,
serialized_end=3024,
)
_UPDATEDATASETATTRIBUTES_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.UpdateDatasetAttributes.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.UpdateDatasetAttributes.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_UPDATEDATASETATTRIBUTES = _descriptor.Descriptor(
name='UpdateDatasetAttributes',
full_name='ai.verta.modeldb.UpdateDatasetAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.UpdateDatasetAttributes.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attribute', full_name='ai.verta.modeldb.UpdateDatasetAttributes.attribute', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPDATEDATASETATTRIBUTES_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3027,
serialized_end=3166,
)
_DELETEDATASETATTRIBUTES_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.DeleteDatasetAttributes.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset', full_name='ai.verta.modeldb.DeleteDatasetAttributes.Response.dataset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1573,
)
_DELETEDATASETATTRIBUTES = _descriptor.Descriptor(
name='DeleteDatasetAttributes',
full_name='ai.verta.modeldb.DeleteDatasetAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.DeleteDatasetAttributes.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attribute_keys', full_name='ai.verta.modeldb.DeleteDatasetAttributes.attribute_keys', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delete_all', full_name='ai.verta.modeldb.DeleteDatasetAttributes.delete_all', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETEDATASETATTRIBUTES_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3169,
serialized_end=3306,
)
_LASTEXPERIMENTBYDATASETID_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.LastExperimentByDatasetId.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experiment', full_name='ai.verta.modeldb.LastExperimentByDatasetId.Response.experiment', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3357,
serialized_end=3417,
)
_LASTEXPERIMENTBYDATASETID = _descriptor.Descriptor(
name='LastExperimentByDatasetId',
full_name='ai.verta.modeldb.LastExperimentByDatasetId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_id', full_name='ai.verta.modeldb.LastExperimentByDatasetId.dataset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LASTEXPERIMENTBYDATASETID_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3308,
serialized_end=3417,
)
_GETEXPERIMENTRUNBYDATASET_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.GetExperimentRunByDataset.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experiment_runs', full_name='ai.verta.modeldb.GetExperimentRunByDataset.Response.experiment_runs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3468,
serialized_end=3536,
)
_GETEXPERIMENTRUNBYDATASET = _descriptor.Descriptor(
name='GetExperimentRunByDataset',
full_name='ai.verta.modeldb.GetExperimentRunByDataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_id', full_name='ai.verta.modeldb.GetExperimentRunByDataset.dataset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETEXPERIMENTRUNBYDATASET_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3419,
serialized_end=3536,
)
_DATASET.fields_by_name['group_owner_id'].message_type = common_dot_CommonService__pb2._GROUPID
_DATASET.fields_by_name['dataset_visibility'].enum_type = _DATASETVISIBILITYENUM_DATASETVISIBILITY
_DATASET.fields_by_name['dataset_type'].enum_type = _DATASETTYPEENUM_DATASETTYPE
_DATASET.fields_by_name['attributes'].message_type = common_dot_CommonService__pb2._KEYVALUE
_DATASET.fields_by_name['workspace_type'].enum_type = common_dot_CommonService__pb2._WORKSPACETYPEENUM_WORKSPACETYPE
_DATASET.fields_by_name['custom_permission'].message_type = uac_dot_Collaborator__pb2._COLLABORATORPERMISSIONS
_DATASET.fields_by_name['visibility'].enum_type = uac_dot_Collaborator__pb2._RESOURCEVISIBILITY
_DATASET.oneofs_by_name['owner_tracking'].fields.append(
_DATASET.fields_by_name['owner_id'])
_DATASET.fields_by_name['owner_id'].containing_oneof = _DATASET.oneofs_by_name['owner_tracking']
_DATASET.oneofs_by_name['owner_tracking'].fields.append(
_DATASET.fields_by_name['group_owner_id'])
_DATASET.fields_by_name['group_owner_id'].containing_oneof = _DATASET.oneofs_by_name['owner_tracking']
_DATASETTYPEENUM_DATASETTYPE.containing_type = _DATASETTYPEENUM
_DATASETVISIBILITYENUM_DATASETVISIBILITY.containing_type = _DATASETVISIBILITYENUM
_CREATEDATASET_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_CREATEDATASET_RESPONSE.containing_type = _CREATEDATASET
_CREATEDATASET.fields_by_name['attributes'].message_type = common_dot_CommonService__pb2._KEYVALUE
_CREATEDATASET.fields_by_name['dataset_visibility'].enum_type = _DATASETVISIBILITYENUM_DATASETVISIBILITY
_CREATEDATASET.fields_by_name['dataset_type'].enum_type = _DATASETTYPEENUM_DATASETTYPE
_CREATEDATASET.fields_by_name['custom_permission'].message_type = uac_dot_Collaborator__pb2._COLLABORATORPERMISSIONS
_CREATEDATASET.fields_by_name['visibility'].enum_type = uac_dot_Collaborator__pb2._RESOURCEVISIBILITY
_GETALLDATASETS_RESPONSE.fields_by_name['datasets'].message_type = _DATASET
_GETALLDATASETS_RESPONSE.containing_type = _GETALLDATASETS
_GETDATASETBYID_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_GETDATASETBYID_RESPONSE.containing_type = _GETDATASETBYID
_GETDATASETBYNAME_RESPONSE.fields_by_name['dataset_by_user'].message_type = _DATASET
_GETDATASETBYNAME_RESPONSE.fields_by_name['shared_datasets'].message_type = _DATASET
_GETDATASETBYNAME_RESPONSE.containing_type = _GETDATASETBYNAME
_DELETEDATASET_RESPONSE.containing_type = _DELETEDATASET
_DELETEDATASETS_RESPONSE.containing_type = _DELETEDATASETS
_FINDDATASETS_RESPONSE.fields_by_name['datasets'].message_type = _DATASET
_FINDDATASETS_RESPONSE.containing_type = _FINDDATASETS
_FINDDATASETS.fields_by_name['predicates'].message_type = common_dot_CommonService__pb2._KEYVALUEQUERY
_UPDATEDATASETNAME_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_UPDATEDATASETNAME_RESPONSE.containing_type = _UPDATEDATASETNAME
_UPDATEDATASETDESCRIPTION_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_UPDATEDATASETDESCRIPTION_RESPONSE.containing_type = _UPDATEDATASETDESCRIPTION
_ADDDATASETTAGS_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_ADDDATASETTAGS_RESPONSE.containing_type = _ADDDATASETTAGS
_DELETEDATASETTAGS_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_DELETEDATASETTAGS_RESPONSE.containing_type = _DELETEDATASETTAGS
_ADDDATASETATTRIBUTES_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_ADDDATASETATTRIBUTES_RESPONSE.containing_type = _ADDDATASETATTRIBUTES
_ADDDATASETATTRIBUTES.fields_by_name['attributes'].message_type = common_dot_CommonService__pb2._KEYVALUE
_UPDATEDATASETATTRIBUTES_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_UPDATEDATASETATTRIBUTES_RESPONSE.containing_type = _UPDATEDATASETATTRIBUTES
_UPDATEDATASETATTRIBUTES.fields_by_name['attribute'].message_type = common_dot_CommonService__pb2._KEYVALUE
_DELETEDATASETATTRIBUTES_RESPONSE.fields_by_name['dataset'].message_type = _DATASET
_DELETEDATASETATTRIBUTES_RESPONSE.containing_type = _DELETEDATASETATTRIBUTES
_LASTEXPERIMENTBYDATASETID_RESPONSE.fields_by_name['experiment'].message_type = modeldb_dot_ExperimentService__pb2._EXPERIMENT
_LASTEXPERIMENTBYDATASETID_RESPONSE.containing_type = _LASTEXPERIMENTBYDATASETID
_GETEXPERIMENTRUNBYDATASET_RESPONSE.fields_by_name['experiment_runs'].message_type = modeldb_dot_ExperimentRunService__pb2._EXPERIMENTRUN
_GETEXPERIMENTRUNBYDATASET_RESPONSE.containing_type = _GETEXPERIMENTRUNBYDATASET
DESCRIPTOR.message_types_by_name['Dataset'] = _DATASET
DESCRIPTOR.message_types_by_name['DatasetTypeEnum'] = _DATASETTYPEENUM
DESCRIPTOR.message_types_by_name['DatasetVisibilityEnum'] = _DATASETVISIBILITYENUM
DESCRIPTOR.message_types_by_name['CreateDataset'] = _CREATEDATASET
DESCRIPTOR.message_types_by_name['GetAllDatasets'] = _GETALLDATASETS
DESCRIPTOR.message_types_by_name['GetDatasetById'] = _GETDATASETBYID
DESCRIPTOR.message_types_by_name['GetDatasetByName'] = _GETDATASETBYNAME
DESCRIPTOR.message_types_by_name['DeleteDataset'] = _DELETEDATASET
DESCRIPTOR.message_types_by_name['DeleteDatasets'] = _DELETEDATASETS
DESCRIPTOR.message_types_by_name['FindDatasets'] = _FINDDATASETS
DESCRIPTOR.message_types_by_name['UpdateDatasetName'] = _UPDATEDATASETNAME
DESCRIPTOR.message_types_by_name['UpdateDatasetDescription'] = _UPDATEDATASETDESCRIPTION
DESCRIPTOR.message_types_by_name['AddDatasetTags'] = _ADDDATASETTAGS
DESCRIPTOR.message_types_by_name['DeleteDatasetTags'] = _DELETEDATASETTAGS
DESCRIPTOR.message_types_by_name['AddDatasetAttributes'] = _ADDDATASETATTRIBUTES
DESCRIPTOR.message_types_by_name['UpdateDatasetAttributes'] = _UPDATEDATASETATTRIBUTES
DESCRIPTOR.message_types_by_name['DeleteDatasetAttributes'] = _DELETEDATASETATTRIBUTES
DESCRIPTOR.message_types_by_name['LastExperimentByDatasetId'] = _LASTEXPERIMENTBYDATASETID
DESCRIPTOR.message_types_by_name['GetExperimentRunByDataset'] = _GETEXPERIMENTRUNBYDATASET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Dataset = _reflection.GeneratedProtocolMessageType('Dataset', (_message.Message,), {
'DESCRIPTOR' : _DATASET,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.Dataset)
})
_sym_db.RegisterMessage(Dataset)
DatasetTypeEnum = _reflection.GeneratedProtocolMessageType('DatasetTypeEnum', (_message.Message,), {
'DESCRIPTOR' : _DATASETTYPEENUM,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DatasetTypeEnum)
})
_sym_db.RegisterMessage(DatasetTypeEnum)
DatasetVisibilityEnum = _reflection.GeneratedProtocolMessageType('DatasetVisibilityEnum', (_message.Message,), {
'DESCRIPTOR' : _DATASETVISIBILITYENUM,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DatasetVisibilityEnum)
})
_sym_db.RegisterMessage(DatasetVisibilityEnum)
CreateDataset = _reflection.GeneratedProtocolMessageType('CreateDataset', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _CREATEDATASET_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.CreateDataset.Response)
})
,
'DESCRIPTOR' : _CREATEDATASET,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.CreateDataset)
})
_sym_db.RegisterMessage(CreateDataset)
_sym_db.RegisterMessage(CreateDataset.Response)
GetAllDatasets = _reflection.GeneratedProtocolMessageType('GetAllDatasets', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETALLDATASETS_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetAllDatasets.Response)
})
,
'DESCRIPTOR' : _GETALLDATASETS,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetAllDatasets)
})
_sym_db.RegisterMessage(GetAllDatasets)
_sym_db.RegisterMessage(GetAllDatasets.Response)
GetDatasetById = _reflection.GeneratedProtocolMessageType('GetDatasetById', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETDATASETBYID_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetDatasetById.Response)
})
,
'DESCRIPTOR' : _GETDATASETBYID,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetDatasetById)
})
_sym_db.RegisterMessage(GetDatasetById)
_sym_db.RegisterMessage(GetDatasetById.Response)
GetDatasetByName = _reflection.GeneratedProtocolMessageType('GetDatasetByName', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETDATASETBYNAME_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetDatasetByName.Response)
})
,
'DESCRIPTOR' : _GETDATASETBYNAME,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetDatasetByName)
})
_sym_db.RegisterMessage(GetDatasetByName)
_sym_db.RegisterMessage(GetDatasetByName.Response)
DeleteDataset = _reflection.GeneratedProtocolMessageType('DeleteDataset', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATASET_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDataset.Response)
})
,
'DESCRIPTOR' : _DELETEDATASET,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDataset)
})
_sym_db.RegisterMessage(DeleteDataset)
_sym_db.RegisterMessage(DeleteDataset.Response)
DeleteDatasets = _reflection.GeneratedProtocolMessageType('DeleteDatasets', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATASETS_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasets.Response)
})
,
'DESCRIPTOR' : _DELETEDATASETS,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasets)
})
_sym_db.RegisterMessage(DeleteDatasets)
_sym_db.RegisterMessage(DeleteDatasets.Response)
FindDatasets = _reflection.GeneratedProtocolMessageType('FindDatasets', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _FINDDATASETS_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.FindDatasets.Response)
})
,
'DESCRIPTOR' : _FINDDATASETS,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.FindDatasets)
})
_sym_db.RegisterMessage(FindDatasets)
_sym_db.RegisterMessage(FindDatasets.Response)
UpdateDatasetName = _reflection.GeneratedProtocolMessageType('UpdateDatasetName', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDATASETNAME_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetName.Response)
})
,
'DESCRIPTOR' : _UPDATEDATASETNAME,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetName)
})
_sym_db.RegisterMessage(UpdateDatasetName)
_sym_db.RegisterMessage(UpdateDatasetName.Response)
UpdateDatasetDescription = _reflection.GeneratedProtocolMessageType('UpdateDatasetDescription', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDATASETDESCRIPTION_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetDescription.Response)
})
,
'DESCRIPTOR' : _UPDATEDATASETDESCRIPTION,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetDescription)
})
_sym_db.RegisterMessage(UpdateDatasetDescription)
_sym_db.RegisterMessage(UpdateDatasetDescription.Response)
AddDatasetTags = _reflection.GeneratedProtocolMessageType('AddDatasetTags', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _ADDDATASETTAGS_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.AddDatasetTags.Response)
})
,
'DESCRIPTOR' : _ADDDATASETTAGS,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.AddDatasetTags)
})
_sym_db.RegisterMessage(AddDatasetTags)
_sym_db.RegisterMessage(AddDatasetTags.Response)
DeleteDatasetTags = _reflection.GeneratedProtocolMessageType('DeleteDatasetTags', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATASETTAGS_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasetTags.Response)
})
,
'DESCRIPTOR' : _DELETEDATASETTAGS,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasetTags)
})
_sym_db.RegisterMessage(DeleteDatasetTags)
_sym_db.RegisterMessage(DeleteDatasetTags.Response)
AddDatasetAttributes = _reflection.GeneratedProtocolMessageType('AddDatasetAttributes', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _ADDDATASETATTRIBUTES_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.AddDatasetAttributes.Response)
})
,
'DESCRIPTOR' : _ADDDATASETATTRIBUTES,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.AddDatasetAttributes)
})
_sym_db.RegisterMessage(AddDatasetAttributes)
_sym_db.RegisterMessage(AddDatasetAttributes.Response)
UpdateDatasetAttributes = _reflection.GeneratedProtocolMessageType('UpdateDatasetAttributes', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDATASETATTRIBUTES_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetAttributes.Response)
})
,
'DESCRIPTOR' : _UPDATEDATASETATTRIBUTES,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.UpdateDatasetAttributes)
})
_sym_db.RegisterMessage(UpdateDatasetAttributes)
_sym_db.RegisterMessage(UpdateDatasetAttributes.Response)
DeleteDatasetAttributes = _reflection.GeneratedProtocolMessageType('DeleteDatasetAttributes', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATASETATTRIBUTES_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasetAttributes.Response)
})
,
'DESCRIPTOR' : _DELETEDATASETATTRIBUTES,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.DeleteDatasetAttributes)
})
_sym_db.RegisterMessage(DeleteDatasetAttributes)
_sym_db.RegisterMessage(DeleteDatasetAttributes.Response)
LastExperimentByDatasetId = _reflection.GeneratedProtocolMessageType('LastExperimentByDatasetId', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _LASTEXPERIMENTBYDATASETID_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.LastExperimentByDatasetId.Response)
})
,
'DESCRIPTOR' : _LASTEXPERIMENTBYDATASETID,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.LastExperimentByDatasetId)
})
_sym_db.RegisterMessage(LastExperimentByDatasetId)
_sym_db.RegisterMessage(LastExperimentByDatasetId.Response)
GetExperimentRunByDataset = _reflection.GeneratedProtocolMessageType('GetExperimentRunByDataset', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETEXPERIMENTRUNBYDATASET_RESPONSE,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetExperimentRunByDataset.Response)
})
,
'DESCRIPTOR' : _GETEXPERIMENTRUNBYDATASET,
'__module__' : 'modeldb.DatasetService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.GetExperimentRunByDataset)
})
_sym_db.RegisterMessage(GetExperimentRunByDataset)
_sym_db.RegisterMessage(GetExperimentRunByDataset.Response)
DESCRIPTOR._options = None
_DATASETSERVICE = _descriptor.ServiceDescriptor(
name='DatasetService',
full_name='ai.verta.modeldb.DatasetService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=3539,
serialized_end=6065,
methods=[
_descriptor.MethodDescriptor(
name='createDataset',
full_name='ai.verta.modeldb.DatasetService.createDataset',
index=0,
containing_service=None,
input_type=_CREATEDATASET,
output_type=_CREATEDATASET_RESPONSE,
serialized_options=b'\202\323\344\223\002\036\"\031/v1/dataset/createDataset:\001*',
),
_descriptor.MethodDescriptor(
name='getAllDatasets',
full_name='ai.verta.modeldb.DatasetService.getAllDatasets',
index=1,
containing_service=None,
input_type=_GETALLDATASETS,
output_type=_GETALLDATASETS_RESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/v1/dataset/getAllDatasets',
),
_descriptor.MethodDescriptor(
name='getDatasetById',
full_name='ai.verta.modeldb.DatasetService.getDatasetById',
index=2,
containing_service=None,
input_type=_GETDATASETBYID,
output_type=_GETDATASETBYID_RESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/v1/dataset/getDatasetById',
),
_descriptor.MethodDescriptor(
name='getDatasetByName',
full_name='ai.verta.modeldb.DatasetService.getDatasetByName',
index=3,
containing_service=None,
input_type=_GETDATASETBYNAME,
output_type=_GETDATASETBYNAME_RESPONSE,
serialized_options=b'\202\323\344\223\002\036\022\034/v1/dataset/getDatasetByName',
),
_descriptor.MethodDescriptor(
name='deleteDataset',
full_name='ai.verta.modeldb.DatasetService.deleteDataset',
index=4,
containing_service=None,
input_type=_DELETEDATASET,
output_type=_DELETEDATASET_RESPONSE,
serialized_options=b'\202\323\344\223\002\036*\031/v1/dataset/deleteDataset:\001*',
),
_descriptor.MethodDescriptor(
name='deleteDatasets',
full_name='ai.verta.modeldb.DatasetService.deleteDatasets',
index=5,
containing_service=None,
input_type=_DELETEDATASETS,
output_type=_DELETEDATASETS_RESPONSE,
serialized_options=b'\202\323\344\223\002\037*\032/v1/dataset/deleteDatasets:\001*',
),
_descriptor.MethodDescriptor(
name='findDatasets',
full_name='ai.verta.modeldb.DatasetService.findDatasets',
index=6,
containing_service=None,
input_type=_FINDDATASETS,
output_type=_FINDDATASETS_RESPONSE,
serialized_options=b'\202\323\344\223\002\035\"\030/v1/dataset/findDatasets:\001*',
),
_descriptor.MethodDescriptor(
name='updateDatasetName',
full_name='ai.verta.modeldb.DatasetService.updateDatasetName',
index=7,
containing_service=None,
input_type=_UPDATEDATASETNAME,
output_type=_UPDATEDATASETNAME_RESPONSE,
serialized_options=b'\202\323\344\223\002\"\"\035/v1/dataset/updateDatasetName:\001*',
),
_descriptor.MethodDescriptor(
name='updateDatasetDescription',
full_name='ai.verta.modeldb.DatasetService.updateDatasetDescription',
index=8,
containing_service=None,
input_type=_UPDATEDATASETDESCRIPTION,
output_type=_UPDATEDATASETDESCRIPTION_RESPONSE,
serialized_options=b'\202\323\344\223\002)\"$/v1/dataset/updateDatasetDescription:\001*',
),
_descriptor.MethodDescriptor(
name='addDatasetTags',
full_name='ai.verta.modeldb.DatasetService.addDatasetTags',
index=9,
containing_service=None,
input_type=_ADDDATASETTAGS,
output_type=_ADDDATASETTAGS_RESPONSE,
serialized_options=b'\202\323\344\223\002\037\"\032/v1/dataset/addDatasetTags:\001*',
),
_descriptor.MethodDescriptor(
name='getDatasetTags',
full_name='ai.verta.modeldb.DatasetService.getDatasetTags',
index=10,
containing_service=None,
input_type=modeldb_dot_CommonService__pb2._GETTAGS,
output_type=modeldb_dot_CommonService__pb2._GETTAGS_RESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/v1/dataset/getDatasetTags',
),
_descriptor.MethodDescriptor(
name='deleteDatasetTags',
full_name='ai.verta.modeldb.DatasetService.deleteDatasetTags',
index=11,
containing_service=None,
input_type=_DELETEDATASETTAGS,
output_type=_DELETEDATASETTAGS_RESPONSE,
serialized_options=b'\202\323\344\223\002\"*\035/v1/dataset/deleteDatasetTags:\001*',
),
_descriptor.MethodDescriptor(
name='addDatasetAttributes',
full_name='ai.verta.modeldb.DatasetService.addDatasetAttributes',
index=12,
containing_service=None,
input_type=_ADDDATASETATTRIBUTES,
output_type=_ADDDATASETATTRIBUTES_RESPONSE,
serialized_options=b'\202\323\344\223\002%\" /v1/dataset/addDatasetAttributes:\001*',
),
_descriptor.MethodDescriptor(
name='updateDatasetAttributes',
full_name='ai.verta.modeldb.DatasetService.updateDatasetAttributes',
index=13,
containing_service=None,
input_type=_UPDATEDATASETATTRIBUTES,
output_type=_UPDATEDATASETATTRIBUTES_RESPONSE,
serialized_options=b'\202\323\344\223\002(\"#/v1/dataset/updateDatasetAttributes:\001*',
),
_descriptor.MethodDescriptor(
name='deleteDatasetAttributes',
full_name='ai.verta.modeldb.DatasetService.deleteDatasetAttributes',
index=14,
containing_service=None,
input_type=_DELETEDATASETATTRIBUTES,
output_type=_DELETEDATASETATTRIBUTES_RESPONSE,
serialized_options=b'\202\323\344\223\002(*#/v1/dataset/deleteDatasetAttributes:\001*',
),
_descriptor.MethodDescriptor(
name='getLastExperimentByDatasetId',
full_name='ai.verta.modeldb.DatasetService.getLastExperimentByDatasetId',
index=15,
containing_service=None,
input_type=_LASTEXPERIMENTBYDATASETID,
output_type=_LASTEXPERIMENTBYDATASETID_RESPONSE,
serialized_options=b'\202\323\344\223\002*\022(/v1/dataset/getLastExperimentByDatasetId',
),
_descriptor.MethodDescriptor(
name='getExperimentRunByDataset',
full_name='ai.verta.modeldb.DatasetService.getExperimentRunByDataset',
index=16,
containing_service=None,
input_type=_GETEXPERIMENTRUNBYDATASET,
output_type=_GETEXPERIMENTRUNBYDATASET_RESPONSE,
serialized_options=b'\202\323\344\223\002*\"%/v1/dataset/getExperimentRunByDataset:\001*',
),
])
_sym_db.RegisterServiceDescriptor(_DATASETSERVICE)
DESCRIPTOR.services_by_name['DatasetService'] = _DATASETSERVICE
# @@protoc_insertion_point(module_scope)
|
3256ea930094eb5366ab29672f1108499b68737a
|
5f800554fe91198d1abd45b7598b885c57731880
|
/torchtext/models/t5/modules.py
|
2b658efcfb6888782af407f5e4262b79093432c0
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/text
|
314ac5d550c6971ac8dd065d7b3fd14e65738ebb
|
45e4b8ca3615016625de15326a14668c8b58595d
|
refs/heads/main
| 2023-09-02T01:52:17.987567
| 2023-09-01T00:41:04
| 2023-09-01T00:41:04
| 76,210,069
| 3,640
| 1,002
|
BSD-3-Clause
| 2023-09-01T00:41:05
| 2016-12-12T00:56:03
|
Python
|
UTF-8
|
Python
| false
| false
| 55,358
|
py
|
modules.py
|
# /* Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of code are originally from the HuggingFace team and can be found here
# https://github.com/huggingface/transformers/blob/8581a798c0a48fca07b29ce2ca2ef55adcae8c7e/src/transformers/models/t5/modeling_t5.py
# */
import math
import warnings
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
# Define reusable types for past_key_values
PAST_KEY_VALUES_TYPE = Tuple[Tensor, Tensor, Tensor, Tensor]
PAST_KEY_VALUE_TYPE = Tuple[Tensor, Tensor]
# If running forward pass in encoder only, there won't be KVs from cross-attention therefore we need a version with optional tensors
PAST_KEY_VALUES_UNFILLED_TYPE = Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]
# Define reusable types for encoder/decoder outputs
SEQ_2_SEQ_OUTPUTS_TYPE = Dict[
str, Union[Optional[Tensor], List[Tensor], List[Optional[Tensor]], Optional[List[PAST_KEY_VALUES_UNFILLED_TYPE]]]
]
class T5MultiheadAttention(nn.MultiheadAttention):
def __init__(
self,
embed_dim: int,
num_heads: int,
is_decoder: bool = False,
dropout: float = 0.0,
bias: bool = False,
qkv_dim: int = 64,
compute_relative_attention_bias: bool = False,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
device: Optional[torch.device] = None,
dtype=None,
) -> None:
r"""T5MultiheadAttention based on `nn.MultiheadAttention`.
Args:
embed_dim: Total dimension of the model.
num_heads: Parallel attention heads.
is_decoder: Whether or not multihead attention is being performed on a decoder layer. Default: `False`
dropout: Probability of an element to be zeroed. Default: 0.0
bias: If specified, adds bias to input / output projection layers. Default: `False`.
qkv_dim: Projection dimension (per head) for query, keys, and values. Defualt: 64.
compute_relative_attention_bias: Whether or not the relative position embeddings
need to be computed. Wypically occurs in the first layer of the encoder/decoder
and the resulting position embeddings are returned to be passed up to higher layers. (defualt: False)
relative_attention_num_buckets: Number of relative position buckets. Default: `32`
relative_attention_max_distance: Maximum threshold on the relative distance used to
allocate buckets. Anything larger gets placed in the same bucket. Default: `128`
"""
super().__init__(embed_dim, num_heads, dropout, bias, False, False, qkv_dim, qkv_dim, True, device, dtype)
factory_kwargs = {"device": device, "dtype": dtype}
self.is_decoder = is_decoder
self.inner_dim = qkv_dim * num_heads
self.q_proj_weight = nn.Parameter(torch.empty((self.inner_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = nn.Parameter(torch.empty((self.inner_dim, embed_dim), **factory_kwargs))
self.v_proj_weight = nn.Parameter(torch.empty((self.inner_dim, embed_dim), **factory_kwargs))
self.out_proj = NonDynamicallyQuantizableLinear(self.inner_dim, embed_dim, bias=bias, **factory_kwargs)
self.register_parameter("in_proj_weight", None)
self.compute_relative_attention_bias = compute_relative_attention_bias
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
if compute_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(relative_attention_num_buckets, num_heads)
else:
self.relative_attention_bias = None
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
query_length: Optional[int] = None,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = False,
position_bias: Optional[Tensor] = None,
past_key_value: Optional[PAST_KEY_VALUE_TYPE] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor], PAST_KEY_VALUE_TYPE]:
r"""Allows the model to jointly attend to information from different representation subspaces
as described in the paper: `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`.
Also incorporates relative attention bias when computing attention scores as descripted in the paper:
`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/pdf/1910.10683.pdf>`.
Args:
query: Query embeddings of shape :math:`(N, L, E_q)`, where :math:`N` is the batch size, :math:`L` is the target sequence length,
and :math:`E_q` is the query embedding dimension `embed_dim`.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(N, S, E_k)`, where :math:`N` is the batch size, :math:`S` is the source sequence length,
and :math:`E_k` is the key embedding dimension `kdim`.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(N, S, E_v)`, where :math:`N` is the batch size, :math:`S` is the source
sequence length, and :math:`E_v` is the value embedding dimension `vdim`.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within `key`
to ignore for the purpose of attention (i.e. treat as "padding").
Binary masks are supported. For a binary mask, a `True` value indicates that the corresponding `key`
value will be ignored for the purpose of attention.
need_weights: If specified, returns `attn_output_weights` in addition to `attn_outputs`.
Default: `True`.
attn_mask: If specified, a 2D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)`, :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch. Binary, and float masks are supported.
For a binary mask, a `True` value indicates that the corresponding position is not allowed to attend.
For a float mask, the mask values will be added to the attention weight. Default: `None`
average_attn_weights: If true, indicates that the returned `attn_weights` should be averaged across
heads. Otherwise, `attn_weights` are provided separately per head. Note that this flag only has an
effect when `need_weights=True`. Default: `False` (i.e. average weights across heads)
position_bias: Position bias tensor used if to add relative attention bias to attention scores. Default: `None`
Returns:
attn_output: Attention outputs of shape :math:`(N, L, E)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`E` is the embedding dimension `embed_dim`.
attn_output_weights: Only returned when `need_weights=True`. If `average_attn_weights=True`,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If `average_weights=False`, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
position_bias: Used in attention scoring. Only computed when `compute_relative_attention_bias=True`
and `position_bias=None`. Has shape :math:`(1, num_heads, L, S)`.
key_value: Calculated weights for keys and values. Used for incremental decoding.
"""
attn_output, position_bias, attn_output_weights, key_value = self._t5_multi_head_attention_forward(
query,
key,
value,
query_length=query_length,
position_bias=position_bias,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
past_key_value=past_key_value,
)
return attn_output, position_bias, attn_output_weights, key_value
def _t5_multi_head_attention_forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
query_length: Optional[int],
position_bias: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = False,
past_key_value: Optional[PAST_KEY_VALUE_TYPE] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor], PAST_KEY_VALUE_TYPE]:
"""Modified from https://github.com/pytorch/pytorch/blob/5953fd9133c0bdcc0158acf1472fac403bc5f636/torch/nn/functional.py#L4909."""
is_self_attention = torch.equal(query, key)
is_batched = F._mha_shape_check(query, key, value, key_padding_mask, attn_mask, self.num_heads)
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
# is batched, run the computation and before returning squeeze the
# batch dimension so that the output doesn't carry this temporary batch dimension.
if not is_batched:
# Unsqueeze if the input is unbatched
query = query.unsqueeze(1)
key = key.unsqueeze(1)
value = value.unsqueeze(1)
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(0)
# Set up shape vars
bsz, tgt_len, embed_dim = query.shape
real_seq_length = tgt_len
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
src_len = real_seq_length if is_self_attention else key.shape[1]
assert (
embed_dim == self.embed_dim
), f"was expecting embedding dimension of {self.embed_dim}, but got {embed_dim}"
head_dim = self.inner_dim // self.num_heads
# Allow MHA to have different embedding dimensions when separate projection weights are used
assert (
key.shape[:2] == value.shape[:2]
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
# Compute in-projection
assert self.q_proj_weight is not None, "q_proj_weight is None"
assert self.k_proj_weight is not None, "k_proj_weight is None"
assert self.v_proj_weight is not None, "v_proj_weight is None"
if self.in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = self.in_proj_bias.chunk(3)
q, k, v = self._t5_in_projection(
query,
key,
value,
bsz,
head_dim,
self.q_proj_weight,
self.k_proj_weight,
self.v_proj_weight,
b_q,
b_k,
b_v,
is_self_attention,
past_key_value,
)
if attn_mask is None:
if self.is_decoder:
if is_self_attention:
attn_mask = torch.triu(
torch.ones((tgt_len, tgt_len), dtype=torch.bool, device=query.device), diagonal=1
)
else:
attn_mask = torch.zeros((tgt_len, src_len), device=query.device)
else:
attn_mask = torch.zeros((src_len, src_len), device=query.device, dtype=torch.bool)
# Prep attention mask
if attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask is not supported. Using bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
else:
assert (
attn_mask.is_floating_point() or attn_mask.dtype == torch.bool
), f"Only float and bool types are supported for attn_mask, not {attn_mask.dtype}"
if attn_mask.dim() == 2:
x, y = attn_mask.shape
attn_mask = attn_mask.view(1, 1, x, y).expand(bsz, self.num_heads, -1, -1)
else:
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
# Prep key padding mask
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask is not supported. Using bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
# Reshape q, k, v for multihead attention and make them batch first
q = q.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
if past_key_value is None:
k = k.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
v = v.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
# Have to check this after resize
src_len = k.size(2)
if key_padding_mask is not None:
if key_padding_mask.shape != (bsz, src_len):
# It's possible that padding mask only takes into acct curr tgt_length instead of past_key_value
assert (
past_key_value is not None
), "Must provide past_key_value if key_padding_mask needs to be expanded."
key_padding_mask = key_padding_mask.expand(bsz, src_len)
assert key_padding_mask.shape == (
bsz,
src_len,
), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand(-1, self.num_heads, tgt_len, -1)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == torch.bool:
attn_mask = attn_mask.logical_or(key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# Convert mask to float
if attn_mask is not None and attn_mask.dtype == torch.bool:
tmp_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
attn_mask = tmp_attn_mask.masked_fill(attn_mask, float("-inf"))
# Adjust dropout probability
if not self.training:
dropout_p = 0.0
else:
dropout_p = self.dropout
# Modification to torch.nn.functional._multi_head_attention_forward to incorporate relative attention bias
if position_bias is None:
if not self.compute_relative_attention_bias:
position_bias = torch.zeros(
(1, self.num_heads, real_seq_length, src_len), device=k.device, dtype=k.dtype
)
else:
position_bias = self._compute_bias(
real_seq_length, src_len, bidirectional=(not self.is_decoder), device=k.device
)
if past_key_value is not None:
position_bias = position_bias[:, :, -query.size(1) :, :]
# Always return KV pair; let user discard if they don't want it
new_key_val = (k, v)
# Calculate attention and out projection
attn_output, attn_output_weights = self._t5_dot_product_attention(q, k, v, position_bias, attn_mask, dropout_p)
attn_output = F.linear(attn_output, self.out_proj.weight, self.out_proj.bias)
if need_weights:
# Optionally average attention weights over heads
if average_attn_weights:
attn_output_weights = attn_output_weights.sum(dim=1) / self.num_heads
if not is_batched:
# Squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
attn_output_weights = attn_output_weights.squeeze(0)
return attn_output, position_bias, attn_output_weights, new_key_val
else:
if not is_batched:
# Squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
return attn_output, position_bias, None, new_key_val
def _t5_in_projection(
self,
query: Tensor,
key: Tensor,
value: Tensor,
bsz: int,
head_dim: int,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
is_self_attention: bool = True,
past_key_value: Optional[PAST_KEY_VALUE_TYPE] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Performs the in-projection step of the attention operation. This is simply
a triple of linear projections, with shape constraints on the weights which
ensure embedding dimension uniformity in the projected outputs.
Output is a triple containing projection tensors for query, key and value.
Modified from https://github.com/pytorch/pytorch/blob/5953fd9133c0bdcc0158acf1472fac403bc5f636/torch/nn/functional.py#L4761.
Args:
q, k, v: query, key and value tensors to be projected.
w_q, w_k, w_v: weights for q, k and v, respectively.
b_q, b_k, b_v: optional biases for q, k and v, respectively.
Shape:
Inputs:
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
number of leading dimensions.
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
number of leading dimensions.
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
number of leading dimensions.
- w_q: :math:`(Ei, Eq)` where Ei is the dimension to which the query, key, and value
emebeddings are to be projected
- w_k: :math:`(Ei, Ek)`
- w_v: :math:`(Ei, Ev)`
- b_q: :math:`(Ei)`
- b_k: :math:`(Ei)`
- b_v: :math:`(Ei)`
Output: in output triple :math:`(q', k', v')`,
- q': :math:`[Qdims..., Ei]`
- k': :math:`[Kdims..., Ei]`
- v': :math:`[Vdims..., Ei]`
"""
Eq, Ek, Ev = query.size(-1), key.size(-1), value.size(-1)
assert w_q.shape == (
self.inner_dim,
Eq,
), f"expecting query weights shape of {(self.inner_dim, Eq)}, but got {w_q.shape}"
assert w_k.shape == (
self.inner_dim,
Ek,
), f"expecting key weights shape of {(self.inner_dim, Ek)}, but got {w_k.shape}"
assert w_v.shape == (
self.inner_dim,
Ev,
), f"expecting value weights shape of {(self.inner_dim, Ev)}, but got {w_v.shape}"
assert b_q is None or b_q.shape == (
self.inner_dim,
), f"expecting query bias shape of {(self.inner_dim,)}, but got {b_q.shape}"
assert b_k is None or b_k.shape == (
self.inner_dim,
), f"expecting key bias shape of {(self.inner_dim,)}, but got {b_k.shape}"
assert b_v is None or b_v.shape == (
self.inner_dim,
), f"expecting value bias shape of {(self.inner_dim,)}, but got {b_v.shape}"
query_proj = F.linear(query, w_q, b_q)
if is_self_attention:
# Self-attention over query (hidden states)
key_proj = F.linear(query, w_k, b_k)
value_proj = F.linear(query, w_v, b_v)
else:
if past_key_value is None:
# Cross-attention (over current key/val states)
key_proj = F.linear(key, w_k, b_k)
value_proj = F.linear(value, w_v, b_v)
else:
# Should never reach this branch
key_proj = key
value_proj = value
if past_key_value is not None:
if is_self_attention:
# Concat old key vals w/ new calculated ones for speed in decoding
key_proj = key_proj.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
value_proj = value_proj.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
key_proj = torch.cat([past_key_value[0], key_proj], dim=2)
value_proj = torch.cat([past_key_value[1], value_proj], dim=2)
else:
# Cross-attention context
key_proj = past_key_value[0]
value_proj = past_key_value[1]
assert key_proj is not None
assert value_proj is not None
return query_proj, key_proj, value_proj
def _t5_dot_product_attention(
self,
q: Tensor,
k: Tensor,
v: Tensor,
position_bias: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Modified from https://github.com/pytorch/pytorch/blob/5953fd9133c0bdcc0158acf1472fac403bc5f636/torch/nn/functional.py#L4814.
Args:
q, k, v: Query, key and value tensors. See Shape section for shape details.
attn_mask: Optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: Dropout probability. If greater than 0.0, dropout is applied.
position_bias: Position bias used to incorporate realtive attention bias in attention scors
Shape:
- q: :math:`(B, H, Nt, E)` where B is the batch size, H is the number of heads, Nt is the target sequence length,
and E is the head dimension.
- key: :math:`(B, H, Ns, E)` where B is the batch size, H is the number of heads, Ns is the source sequence length,
and E is the head dimension.
- value: :math:`(B, H, Ns, E)` where B is the batch size, H is the number of heads, Ns is the source sequence length,
and E is the head dimension.
- attn_mask: a 4D tensor of shape :math:`(B, H, Nt, Ns)`
- position_bias: :math:`(1, H, Nt, Ns)`
- Output: attention values have shape :math:`(B, Nt, H*E)`; attention weights
have shape :math:`(B, H, Nt, Ns)`
Returns:
Tensor pair containing attended values and attention weights.
"""
B, H, _, E = q.shape
# HF implementation does not perform this normalization. For the sake of matching test results, we have commented it out
# q = q / math.sqrt(E)
attn = torch.matmul(q, k.transpose(3, 2))
# NOTE: modification from torch.nn.functional._scaled_dot_product_attention to incorporate relative attention bias
position_bias = position_bias.repeat(B, 1, 1, 1)
if attn_mask is not None:
position_bias += attn_mask
attn += position_bias
attn = F.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = F.dropout(attn, p=dropout_p)
output = torch.matmul(attn, v)
output = output.transpose(1, 2).contiguous().view(B, -1, H * E)
return output, attn
def _compute_bias(
self, query_length: int, key_length: int, bidirectional: bool = True, device: Optional[torch.device] = None
) -> Tensor:
"""Compute binned relative position bias.
Modified from https://github.com/huggingface/transformers/blob/8581a798c0a48fca07b29ce2ca2ef55adcae8c7e/src/transformers/models/t5/modeling_t5.py#L421.
"""
assert self.relative_attention_bias is not None
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=bidirectional,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def _relative_position_bucket(
self, relative_position: Tensor, bidirectional: bool = True, num_buckets: int = 32, max_distance: int = 128
) -> Tensor:
r"""Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
and https://github.com/huggingface/transformers/blob/8581a798c0a48fca07b29ce2ca2ef55adcae8c7e/src/transformers/models/t5/modeling_t5.py#L374.
Translates relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on.
Args:
relative_position: Tensor w/ initially constructed relative positions.
bidirectional: If attention is bidirectional; when in decoder, this should be False.
num_buckets: Number of buckets to utilize.
max_distance: Maximum distance between positions.
Returns:
Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets).
"""
relative_buckets = torch.zeros(relative_position.shape, dtype=torch.long, device=relative_position.device)
if bidirectional:
num_buckets = num_buckets // 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# Ensure relative_position is in the range [0, inf)
# Half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
class T5LayerNorm(nn.Module):
def __init__(self, d_model: int, eps: float = 1e-6) -> None:
r"""Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
Based on https://github.com/huggingface/transformers/blob/8581a798c0a48fca07b29ce2ca2ef55adcae8c7e/src/transformers/models/t5/modeling_t5.py#L239.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(d_model))
self.variance_epsilon = eps
def forward(self, hidden_states: Tensor) -> Tensor:
r"""T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
half-precision inputs is done in fp32.
Args:
hidden_states: Tensor to be normalized. Final dimension must be model dimension (i.e. number of expected features in the input).
Returns:
Tensor with the same shape as hidden_states after having been normalized.
"""
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# Convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class T5Layer(nn.Module):
r"""T5Layer is made up of a self-attn block, optional cross-attn block, and feed-forward network.
This T5 layer is based on the paper:
"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer".
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena,
Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Journal of Machine Learning Research.
Volume 21 Issue 140 pages 1-67. http://jmlr.org/papers/v21/20-074.html
Users may modify or implement in a different way during application.
Args:
d_model: Number of expected features in the input (required).
nhead: Number of heads in the multihead attention models (required).
dim_feedforward: Dimension of the feedforward network model (default: 3072).
qkv_dim: Projection dimension (per head) for query, keys, and values. (default: 64).
dropout: Dropout value (default: 0.1).
activation: Activation function of the intermediate layer, can be a string
("relu", "gelu", or "gelu_new") or a unary callable. (default: F.relu)
is_gated_act: Option to include gated activated as done in FLAN-T5, see
https://huggingface.co/google/flan-t5-xxl. (default: False)
layer_norm_eps: The eps value in layer normalization components. (default=1e-6)
relative_attention_num_buckets: Number of relative position buckets. (default: 32)
relative_attention_max_distance: Maximum threshold on the relative distance used to
allocate buckets. Anything larger gets placed in the same bucket. (default: 128)
compute_relative_attention_bias: Whether or not the relative position embeddings
need to be computed. Typically occurs in the first layer of the encoder
and resulting position embeddings are returned to be passed up to higher layers. (default: False)
is_decoder: Whether the T5Layer will be instantiated as a decoder layer or encoder layer. (default: False)
device: Device to use any newly constructed Tensors. (optional)
dtype: Datatype to use on any newly constructed Tensors. (optional)
Examples::
>>> single_encoder_layer = T5Layer(d_model=768, nhead=12)
>>> src = torch.rand(32, 20, 768)
>>> single_encoder_layer(src)
>>> single_decoder_layer = T5Layer(d_model=768, nhead=12, is_decoder=True)
>>> src = torch.rand(32, 20, 768)
>>> tgt = torch.rand(32, 1, 768)
>>> single_decoder_layer(tgt, src)
"""
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 3072,
qkv_dim: int = 64,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
is_gated_act: bool = False,
layer_norm_eps: float = 1e-6,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
compute_relative_attention_bias: bool = False,
is_decoder: bool = False,
device: Optional[torch.device] = None,
dtype=None,
) -> None:
super().__init__()
self.is_gated_act = is_gated_act
self.compute_relative_attention_bias = compute_relative_attention_bias
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.is_decoder = is_decoder
self.self_attn = T5MultiheadAttention(
d_model,
nhead,
is_decoder=is_decoder,
dropout=dropout,
qkv_dim=qkv_dim,
compute_relative_attention_bias=compute_relative_attention_bias,
relative_attention_num_buckets=relative_attention_num_buckets,
relative_attention_max_distance=relative_attention_max_distance,
device=device,
dtype=dtype,
)
if self.is_decoder:
self.cross_attn = T5MultiheadAttention(
d_model,
nhead,
is_decoder=True,
dropout=dropout,
qkv_dim=qkv_dim,
compute_relative_attention_bias=False,
relative_attention_num_buckets=relative_attention_num_buckets,
relative_attention_max_distance=relative_attention_max_distance,
device=device,
dtype=dtype,
)
self.norm3 = T5LayerNorm(d_model, eps=layer_norm_eps)
self.dropout4 = nn.Dropout(dropout)
else:
self.cross_attn = None
self.norm3 = None
self.dropout4 = None
if self.is_gated_act:
self.linear1 = None
self.linear1_0 = nn.Linear(d_model, dim_feedforward, bias=False)
self.linear1_1 = nn.Linear(d_model, dim_feedforward, bias=False)
else:
self.linear1 = nn.Linear(d_model, dim_feedforward, bias=False)
self.linear1_0 = None
self.linear1_1 = None
self.linear2 = nn.Linear(dim_feedforward, d_model, bias=False)
self.norm1 = T5LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = T5LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
if isinstance(activation, str):
assert activation in (
"relu",
"gelu",
"gelu_new",
), f"Do not support '{activation}' activation. Use 'relu' or 'gelu' or 'gelu_new'"
if activation == "relu":
self.activation = F.relu
elif activation == "gelu":
self.activation = F.gelu
elif activation == "gelu_new":
# The following should match the math of https://github.com/huggingface/transformers/blob/main/src/transformers/activations.py
self.activation = nn.GELU(approximate="tanh")
else:
self.activation = activation
def forward(
self,
seq: Tensor,
memory: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
seq_key_padding_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
position_bias: Optional[Tensor] = None,
past_key_values: Optional[PAST_KEY_VALUES_TYPE] = None,
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], PAST_KEY_VALUES_UNFILLED_TYPE]:
r"""Pass the inputs (and mask) through the encoder layer.
Args:
seq: Input sequence (required).
Must have shape (B, Ns, E) where B is the batch size, Nt is the sequence length,
and E is the model dimension. This will be the src sequence if `self.is_decoder = False`
and tgt sequence if `self.is_decoder = True`.
memory: Encoder sequence (optional).
Output from encoder layer, only needs to be included when in decoding context.
mask: Attention mask for self-attention. (optional).
Must have shape (Ns, Ns).
seq_key_padding_mask: Mask for the seq keys per batch (optional).
Must have shape (B, Ns).
memory_mask: Attention mask for attention in decoding context. (optional)
Must have shape (Nm, Nm).
memory_key_padding_mask: Mask for the memory keys per batch (optional).
Must have shape (B, Ns).
position_bias: Relative attention bias to be used when computing self-attention scores (optional)
Must have shape (B, H, Ns, Ns) where H is the number of heads.
past_key_values: Past key values used for incremental decoding (optional).
Tuple with Tensors of shape (B, H, N)>>>>> Check this????
Returns:
Tuple of Tensors being hidden states, position bias, self-attention scores, cross-attention scores,
and key-value pairs.
"""
# See Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
if past_key_values is not None:
self_attn_past_key_value = past_key_values[:2]
cross_attn_past_key_value = past_key_values[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
x = seq
sa_out, position_bias, sa_scores, sa_kv = self._sa_block(
self.norm1(x), mask, seq_key_padding_mask, position_bias, self_attn_past_key_value
)
x = x + sa_out
if self.is_decoder:
assert memory is not None, "Must provide memory (encoder hidden states)."
assert self.norm3 is not None
query_length = sa_kv[0].shape[2]
ca_out, ca_scores, ca_kv = self._ca_block(
self.norm3(x), memory, query_length, memory_mask, memory_key_padding_mask, cross_attn_past_key_value
)
x = x + ca_out
else:
ca_scores, ca_kv = None, None
x = x + self._ff_block(self.norm2(x))
new_key_value = sa_kv + (
ca_kv
if ca_kv is not None
else (
None,
None,
)
)
assert torch.jit.isinstance(new_key_value, PAST_KEY_VALUES_UNFILLED_TYPE)
return x, position_bias, sa_scores, ca_scores, new_key_value
def _sa_block(
self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
position_bias: Optional[Tensor],
past_key_value: Optional[PAST_KEY_VALUE_TYPE] = None,
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor], PAST_KEY_VALUE_TYPE]:
"""Self-attention block."""
attn, curr_position_bias, scores, curr_key_value = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=True,
position_bias=position_bias,
past_key_value=past_key_value,
)
if self.compute_relative_attention_bias:
position_bias = curr_position_bias
return self.dropout1(attn), position_bias, scores, curr_key_value
def _ca_block(
self,
x: Tensor,
mem: Tensor,
query_length: Optional[int],
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
past_key_value: Optional[PAST_KEY_VALUE_TYPE] = None,
) -> Tuple[Tensor, Optional[Tensor], PAST_KEY_VALUE_TYPE]:
"""Cross-attention block."""
assert self.cross_attn is not None
assert self.dropout4 is not None
attn, _, scores, curr_key_value = self.cross_attn(
x,
mem, # Pass in memory (enc) states as keys
mem, # Pass in memory (enc) states as values
query_length=query_length,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=True,
past_key_value=past_key_value,
)
return self.dropout4(attn), scores, curr_key_value
def _ff_block(self, x: Tensor) -> Tensor:
"""Feed-forward block."""
if self.is_gated_act:
assert self.linear1_0 is not None
assert self.linear1_1 is not None
wi_0 = self.activation(self.linear1_0(x))
wi_1 = self.linear1_1(x)
hidden_states = wi_0 * wi_1
hidden_states = self.dropout2(hidden_states)
hidden_states = self.linear2(hidden_states)
else:
assert self.linear1 is not None
hidden_states = self.linear2(self.dropout2(self.activation(self.linear1(x))))
return self.dropout3(hidden_states)
class T5Encoder(nn.Module):
"""T5Encoder is a stack of N encoder layers.
Args:
d_model: Number of expected features in the input (required).
nhead: Number of heads in the multihead attention models (required).
num_layers: Number of encoder layers in the stack (required)
dim_feedforward: Dimension of the feedforward network model (default=3072).
qkv_dim: Projection dimension (per head) for query, keys, and values. (defualt=64).
dropout: Dropout value (default=0.1).
activation: Activation function of the intermediate layer, can be a string
("relu", "gelu", or "gelu_new") or a unary callable. (default: F.relu)
is_gated_act: Option to include gated activated as done in FLAN-T5, see
https://huggingface.co/google/flan-t5-xxl. (default: False)
layer_norm_eps: The eps value in layer normalization components (default=1e-6).
relative_attention_num_buckets: Number of relative position buckets (default: 32)
relative_attention_max_distance: Maximum threshold on the relative distance used to
allocate buckets. Anything larger gets placed in the same bucket (defulat: 128)
token_embeddings (nn.Module): Embedding layer to be passed in the case that the input to `forward`
is not already embedded.
device: Device to use any newly constructed Tensors. (optional)
dtype: Datatype to use on any newly constructed Tensors. (optional)
Examples::
>>> encoder = T5Encoder(d_model=768, nhead=12, num_layers=12)
>>> tgt = torch.rand(32, 10, 512)
>>> encoder(tgt)
"""
def __init__(
self,
d_model: int,
nhead: int,
num_layers: int,
dim_feedforward: int = 3072,
qkv_dim: int = 64,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
is_gated_act: bool = False,
layer_norm_eps: float = 1e-6,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
token_embeddings: Optional[nn.Module] = None,
device: Optional[torch.device] = None,
dtype=None,
) -> None:
super().__init__()
self.token_embeddings = token_embeddings
self.layers = nn.ModuleList(
[
T5Layer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
qkv_dim=qkv_dim,
dropout=dropout,
activation=activation,
is_gated_act=is_gated_act,
layer_norm_eps=layer_norm_eps,
relative_attention_num_buckets=relative_attention_num_buckets,
relative_attention_max_distance=relative_attention_max_distance,
compute_relative_attention_bias=True if i == 0 else False,
is_decoder=False,
device=device,
dtype=dtype,
)
for i in range(num_layers)
]
)
self.num_layers = num_layers
self.norm = T5LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(
self,
src: Optional[Tensor] = None,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
embedded_src: Optional[Tensor] = None,
) -> SEQ_2_SEQ_OUTPUTS_TYPE:
r"""Pass the input (and masks) through the stack of encoder layers.
Args:
src (Optional[Tensor]): Tokenized input sequence to the encoder.
Must be batch first with shape (B, Ne) where B is the batch size and Ne is the
encoder input sequence length.
mask (Optional[Tensor]): Attention mask for self-attention.
Must have shape (Nt, Nt).
src_key_padding_mask (Optional[Tensor]): Mask for the tgt keys per batch.
Must have shape (B, Nt).
embedded_src (Optional[Tensor]): Embedded input sequence to the encoder layer.
Must have shape (B, Nt, E) where B is the batch size, Nt is the target sequence
length, and E is the model dimension.
*Note*: If you do not provide this `embedded_tgt`, you must have provided a `token_embedding` layer \
in the initialization of the T5Encoder.
Returns:
Dictionary of last hidden layer, all hidden layers, position bias, and self-attention scores.
"""
# This keeps the encoder self-contained and easy to use individually
if embedded_src is None:
assert (
self.token_embeddings is not None and src is not None
), "Must provide `token_embeddings` and `tgt` if not providing already embedded tokens."
embedded_src = self.token_embeddings(src)
output = self.dropout1(embedded_src)
position_bias = None
all_outputs = torch.jit.annotate(List[Tensor], [])
all_sa_scores = torch.jit.annotate(List[Optional[Tensor]], [])
for mod in self.layers:
all_outputs.append(output)
output, position_bias, sa_score, _, _ = mod(
output,
mask=mask,
seq_key_padding_mask=src_key_padding_mask,
position_bias=position_bias,
)
all_sa_scores.append(sa_score)
output = self.norm(output)
output = self.dropout2(output)
all_outputs.append(output)
return {
"encoder_output": output,
"encoder_hidden_states": all_outputs,
"encoder_position_bias": position_bias,
"encoder_sa_scores": all_sa_scores,
}
class T5Decoder(nn.Module):
r"""T5Decoder is a stack of N decoder layers.
Args:
d_model: Number of expected features in the input (required).
nhead: Number of heads in the multihead attention models (required).
num_layers: Number of decoder layers in the stack (required)
dim_feedforward: Dimension of the feedforward network model (default=3072).
qkv_dim: Projection dimension (per head) for query, keys, and values. (defualt=64).
dropout: Dropout value (default=0.1).
activation: Activation function of the intermediate layer, can be a string
("relu", "gelu", or "gelu_new") or a unary callable. (default: F.relu)
is_gated_act: Option to include gated activated as done in FLAN-T5, see
https://huggingface.co/google/flan-t5-xxl. (default: False)
layer_norm_eps: The eps value in layer normalization components (default=1e-6).
relative_attention_num_buckets: Number of relative position buckets (default: 32)
relative_attention_max_distance: Maximum threshold on the relative distance used to
allocate buckets. Anything larger gets placed in the same bucket (defulat: 128)
device: Device to use any newly constructed Tensors. (optional)
dtype: Datatype to use on any newly constructed Tensors. (optional)
Examples::
>>> decoder = T5Decoder(d_model=768, nhead=12, num_layers=12)
>>> memory = torch.rand(32, 10, 512)
>>> tgt = torch.rand(32, 1, 512)
>>> decoder(tgt, memory)
"""
def __init__(
self,
d_model: int,
nhead: int,
num_layers: int,
dim_feedforward: int = 3072,
qkv_dim: int = 64,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
is_gated_act: bool = False,
layer_norm_eps: float = 1e-6,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
device: Optional[torch.device] = None,
dtype=None,
) -> None:
super().__init__()
self.layers = nn.ModuleList(
[
T5Layer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
qkv_dim=qkv_dim,
dropout=dropout,
activation=activation,
is_gated_act=is_gated_act,
layer_norm_eps=layer_norm_eps,
relative_attention_num_buckets=relative_attention_num_buckets,
relative_attention_max_distance=relative_attention_max_distance,
compute_relative_attention_bias=True if i == 0 else False,
is_decoder=True,
device=device,
dtype=dtype,
)
for i in range(num_layers)
]
)
self.norm = T5LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.num_layers = num_layers
def forward(
self,
embedded_tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
past_key_values: Optional[List[PAST_KEY_VALUES_TYPE]] = None,
return_past_key_values: bool = False,
) -> SEQ_2_SEQ_OUTPUTS_TYPE:
r"""Pass the inputs (and masks) through the stack of decoder layers.
Args:
embedded_tgt: Input sequence to the decoder layer. (required).
Must have shape (B, Nt, E) where B is the batch size, Nt is the target sequence
length, and E is the model dimension.
memory: Sequence from the last layer of the encoder. (required).
Must have shape (B, Nts, E) where B is the batch size, Ns is the source sequence
length, and E is the model dimension.
tgt_mask: Attention mask for self-attention. (optional).
Must have shape (Nt, Nt).
memory_mask: Attention mask for cross-attention (optional).
Must have shape (Nt, Ns).
tgt_key_padding_mask: Mask for the tgt keys per batch (optional).
Must have shape (B, Nt).
memory_key_padding_mask: Mask for the memory keys per batch (optional).
Must have shape (B, Ns).
past_key_values: Past key values used for incremental decoding (optional).
List of Tuple with Tensors of shape (B, H, N)>>>>> Check this????
return_past_key_values: Boolean stating whether to return past_key_values from model. (default: False)
Returns:
Dictionary of last hidden state, all hidden states, position bias, self-attention scores, cross-attention scores
and past key values (if requested).
"""
output = self.dropout1(embedded_tgt)
position_bias = None
all_outputs = torch.jit.annotate(List[Tensor], [])
all_sa_scores = torch.jit.annotate(List[Optional[Tensor]], [])
all_ca_scores = torch.jit.annotate(List[Optional[Tensor]], [])
all_key_values = torch.jit.annotate(List[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]], [])
for i, mod in enumerate(self.layers):
all_outputs.append(output)
output, position_bias, sa_score, ca_score, past_key_value = mod(
output,
memory,
mask=tgt_mask,
memory_mask=memory_mask,
seq_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
position_bias=position_bias,
past_key_values=past_key_values[i] if past_key_values is not None else None,
)
all_sa_scores.append(sa_score)
all_ca_scores.append(ca_score)
# TODO: Can pass in enc-dec position_bias to avoid recalculating in cross-attn
if past_key_value is not None and return_past_key_values:
all_key_values.append(past_key_value)
output = self.norm(output)
output = self.dropout2(output)
all_outputs.append(output)
return {
"decoder_output": output,
"decoder_hidden_states": all_outputs,
"decoder_position_bias": position_bias,
"decoder_sa_scores": all_sa_scores,
"decoder_ca_scores": all_ca_scores,
"past_key_values": all_key_values,
}
|
a33e6e9464d4db34f93d9b953ed3db8dadf96efa
|
e4ba41f7efc71fefb22032a64d456d9c633f32f7
|
/Re-ID/reid/prepare/add_aic_gps.py
|
9e922adad0d523e7f63c39f9fa65eb8991d59656
|
[] |
no_license
|
yorkeyao/VehicleX
|
61d2dd8dd7769b50680de49246d5201d25907c0e
|
c5cbc3196abef7a285d2159acee4c9a716b98aeb
|
refs/heads/master
| 2022-05-29T18:02:40.613703
| 2022-04-15T09:40:21
| 2022-04-15T09:40:21
| 227,411,112
| 163
| 25
| null | 2020-10-25T10:40:31
| 2019-12-11T16:28:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,314
|
py
|
add_aic_gps.py
|
import os
import os.path as osp
import cv2
import numpy as np
from numpy.linalg import inv
data_path = 'D:/Data/AIC19/' if os.name == 'nt' else osp.expanduser('~/Data/AIC19/')
scenes = [1, 2, 3, 4, 5]
folder_by_scene = {1: 'train',
2: 'test',
3: 'train',
4: 'train',
5: 'test', }
world_centers = {1: np.array([42.525678, -90.723601]),
2: np.array([42.491916, -90.723723]),
3: np.array([42.498780, -90.686393]),
4: np.array([42.498780, -90.686393]),
5: np.array([42.498780, -90.686393]), }
world_scale = 6371000 / 180 * np.pi
def image2gps(feet_pos, parameters, scene):
feet_pos = feet_pos.reshape(-1, 1, 2)
if 'intrinsic' in parameters:
# Have to provide P matrix for appropriate scaling
feet_pos = cv2.undistortPoints(feet_pos, parameters['intrinsic'], parameters['distortion'],
P=parameters['intrinsic'])
world_pos = cv2.perspectiveTransform(feet_pos, inv(parameters['homography'])).reshape(-1, 2)
world_pos = (world_pos - world_centers[scene]) * world_scale
return world_pos[:, ::-1]
def gps2image(world_pos, parameters, scene):
world_pos = world_pos[:, ::-1] / world_scale + world_centers[scene]
world_pos = world_pos.reshape(-1, 1, 2)
feet_pos = cv2.perspectiveTransform(world_pos, parameters['homography']).reshape(-1, 2)
if 'intrinsic' in parameters:
rvec = np.array([0, 0, 0], dtype=np.float32)
tvec = np.array([0, 0, 0], dtype=np.float32)
feet_pos, _ = cv2.projectPoints(
np.matmul(inv(parameters['intrinsic']),
np.concatenate((feet_pos, np.ones(feet_pos.shape[0]).reshape(-1, 1)), axis=1).T,
).T,
rvec, tvec, parameters['intrinsic'], parameters['distortion'])
return feet_pos
if __name__ == '__main__':
for scene in scenes:
scene_path = osp.join(data_path, folder_by_scene[scene], 'S{:02d}'.format(scene))
frame_offset_fname = osp.join(data_path, 'cam_timestamp', 'S{:02d}.txt'.format(scene))
frame_offset = {}
with open(frame_offset_fname) as f:
for line in f:
(key, val) = line.split(' ')
key = int(key[1:])
val = 10 * float(val)
frame_offset[key] = val
for camera_dir in sorted(os.listdir(scene_path)):
iCam = int(camera_dir[1:])
calibration_fname = osp.join(data_path, 'calibration', camera_dir, 'calibration.txt')
parameters = {}
with open(calibration_fname) as f:
for line in f:
(key, val) = line.split(':')
key = key.split(' ')[0].lower()
if key == 'reprojection': key = 'error'
if ';' in val:
val = np.fromstring(val.replace(';', ' '), dtype=float, sep=' ').reshape([3, 3])
else:
val = np.fromstring(val, dtype=float, sep=' ')
parameters[key] = val
pass
bbox_types = ['gt', 'det'] if folder_by_scene[scene] == 'train' else ['det']
for bbox_type in bbox_types:
bbox_file = osp.join(scene_path, camera_dir, bbox_type,
'gt.txt' if bbox_type == 'gt' else 'det_ssd512.txt')
bboxs = np.loadtxt(bbox_file, delimiter=',')
feet_pos = np.array([bboxs[:, 2] + bboxs[:, 4] / 2, bboxs[:, 3] + bboxs[:, 5]]).T
world_pos = image2gps(feet_pos, parameters, scene)
new_feet_pos = gps2image(world_pos, parameters, scene)
error = np.mean(np.sum(new_feet_pos - feet_pos, axis=1))
bboxs[:, 7] = iCam
bboxs[:, 8] = bboxs[:, 0] + frame_offset[iCam]
bboxs = bboxs[:, :9]
bboxs = np.concatenate((bboxs, world_pos), axis=1)
bbox_gps_file = osp.join(scene_path, camera_dir, bbox_type,
'gt_gps.txt' if bbox_type == 'gt' else 'det_ssd512_gps.txt')
np.savetxt(bbox_gps_file, bboxs, delimiter=',', fmt='%g')
pass
|
f42d4d3d7753003048e772168861fca6d139c901
|
f7f3dd55fa7ab6833f3c2d8e457884c127cc203d
|
/tests/test_enhancements/output/enumeration/notebook_model_1.py
|
01a1f98d73a64cf31879e1bc41196f5f1f3c6528
|
[
"CC0-1.0"
] |
permissive
|
linkml/linkml
|
0fe41590ea729f10b1a6e2de4a85c585f284dc22
|
2354a45838c6207b01ffabc6eda92512c3fb147b
|
refs/heads/main
| 2023-08-17T05:59:08.486218
| 2023-08-11T21:31:59
| 2023-08-11T21:31:59
| 348,419,208
| 198
| 63
|
CC0-1.0
| 2023-09-14T16:04:30
| 2021-03-16T16:34:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
notebook_model_1.py
|
# Auto generated from notebook_model_1.yaml by pythongen.py version: 0.9.0
# Generation date: 2022-01-27T02:54:10
# Schema: simple
#
# id: http://example.org/test/simple
# description: Very simple enumeration
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import re
import sys
from dataclasses import dataclass
from typing import Any, ClassVar, Dict, List, Optional, Union
from jsonasobj2 import JsonObj, as_dict
from linkml_runtime.linkml_model.meta import (EnumDefinition, PermissibleValue,
PvFormulaOptions)
from linkml_runtime.linkml_model.types import String
from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.utils.dataclass_extensions_376 import \
dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from linkml_runtime.utils.formatutils import camelcase, sfx, underscore
from linkml_runtime.utils.metamodelcore import bnode, empty_dict, empty_list
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.yamlutils import (YAMLRoot, extended_float,
extended_int, extended_str)
from rdflib import Namespace, URIRef
metamodel_version = "1.7.0"
version = None
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
PLAY = CurieNamespace('play', 'http://example.org/test/play/')
DEFAULT_ = PLAY
# Types
# Class references
class PositionalRecordId(extended_str):
pass
@dataclass
class PositionalRecord(YAMLRoot):
id: Union[str, PositionalRecordId] = None
position: Union[str, "OpenEnum"] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PositionalRecordId):
self.id = PositionalRecordId(self.id)
if self._is_empty(self.position):
self.MissingRequiredField("position")
if not isinstance(self.position, OpenEnum):
self.position = OpenEnum(self.position)
super().__post_init__(**kwargs)
# Enumerations
class OpenEnum(EnumDefinitionImpl):
"""
Baseline enumeration -- simple code/value pairs, where the value (description) is optional
"""
a = PermissibleValue(text="a",
description="top")
b = PermissibleValue(text="b",
description="middle")
c = PermissibleValue(text="c",
description="bottom")
d = PermissibleValue(text="d")
_defn = EnumDefinition(
name="OpenEnum",
description="Baseline enumeration -- simple code/value pairs, where the value (description) is optional",
)
# Slots
|
c88a2a5e1f0853c28c45e5bae072bd439aefc866
|
01427af18635fe286c91c28716231e0005fc4129
|
/libs/colbert/colbert/utility/preprocess/queries_split.py
|
5ebc5b8a37618d374563f66dc31034f0bb92321d
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
IntelLabs/fastRAG
|
1441bdb95a86e74e5e44a8dfbe6493135e3d3427
|
5fb5bb18b39ee22d71349fbfe282e464f7094494
|
refs/heads/main
| 2023-08-30T19:41:39.216795
| 2023-08-22T10:55:09
| 2023-08-22T10:55:09
| 592,391,289
| 324
| 27
|
Apache-2.0
| 2023-08-22T10:55:11
| 2023-01-23T16:25:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
queries_split.py
|
"""
Divide a query set into two.
"""
import math
import os
import random
from argparse import ArgumentParser
from collections import OrderedDict
import ujson
from colbert.utils.utils import print_message
def main(args):
random.seed(12345)
"""
Load the queries
"""
Queries = OrderedDict()
print_message(f"#> Loading queries from {args.input}..")
with open(args.input) as f:
for line in f:
qid, query = line.strip().split("\t")
assert qid not in Queries
Queries[qid] = query
"""
Apply the splitting
"""
size_a = len(Queries) - args.holdout
size_b = args.holdout
size_a, size_b = max(size_a, size_b), min(size_a, size_b)
assert size_a > 0 and size_b > 0, (len(Queries), size_a, size_b)
print_message(
f"#> Deterministically splitting the queries into ({size_a}, {size_b})-sized splits."
)
keys = list(Queries.keys())
sample_b_indices = sorted(list(random.sample(range(len(keys)), size_b)))
sample_a_indices = sorted(
list(set.difference(set(list(range(len(keys)))), set(sample_b_indices)))
)
assert len(sample_a_indices) == size_a
assert len(sample_b_indices) == size_b
sample_a = [keys[idx] for idx in sample_a_indices]
sample_b = [keys[idx] for idx in sample_b_indices]
"""
Write the output
"""
output_path_a = f"{args.input}.a"
output_path_b = f"{args.input}.b"
assert not os.path.exists(output_path_a), output_path_a
assert not os.path.exists(output_path_b), output_path_b
print_message(f"#> Writing the splits out to {output_path_a} and {output_path_b} ...")
for output_path, sample in [(output_path_a, sample_a), (output_path_b, sample_b)]:
with open(output_path, "w") as f:
for qid in sample:
query = Queries[qid]
line = "\t".join([qid, query]) + "\n"
f.write(line)
if __name__ == "__main__":
parser = ArgumentParser(description="queries_split.")
# Input Arguments.
parser.add_argument("--input", dest="input", required=True)
parser.add_argument("--holdout", dest="holdout", required=True, type=int)
args = parser.parse_args()
main(args)
|
924313c2daf1abd17faf1af82ece3008ff1f6923
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/3_linkedList/篮球比赛分组-优先队列+双向链表.py
|
b7e190e04f2db546520109eaba28f10d98aacde3
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,011
|
py
|
篮球比赛分组-优先队列+双向链表.py
|
"""
某次篮球比赛前,
需要将站成一排的n名球员分为两队(两队人数可以不同),
每名球员的能力值为ai。
有两名教练轮流挑选队员,第一个教练先挑选。
每位教练每次选人时,都会选择当前剩余的所有人中,
能力值最大的那一个。当选择一个人后,
会将他左右两侧各m个人一起挑选走(若某一侧可选的人数不够m人,则将这—侧能选的人都选上)。
请输出此规则下,分到两队的具体成员情况。
"""
from heapq import heapify, heappop
from typing import List, Optional
class MaxCycleNode:
__slots__ = ("index", "value", "left", "right", "deleted")
def __init__(
self,
index: int,
value: int,
left: Optional["MaxCycleNode"] = None,
right: Optional["MaxCycleNode"] = None,
) -> None:
self.index = index
self.value = value
self.left = left
self.right = right
self.deleted = False
def __eq__(self, other: "MaxCycleNode") -> bool:
return self.value == other.value
def __lt__(self, other: "MaxCycleNode") -> bool:
return self.value > other.value
def __repr__(self) -> str:
return f"{self.index} {self.value} {self.deleted}"
def remove(node: Optional["MaxCycleNode"]) -> None:
if node is None:
return
if node.left:
node.left.right = node.right
if node.right:
node.right.left = node.left
node.deleted = True # 标记删除
def solve(n: int, m: int, nums: List[int]) -> List[str]:
def select(team: str) -> None:
maxNode = None
while pq:
cur = heappop(pq)
if not cur.deleted:
res[cur.index] = team
maxNode = cur
break
if maxNode is None:
return
left, right = maxNode.left, maxNode.right
remove(maxNode)
count = m
while count > 0 and left:
res[left.index] = team
remove(left)
left = left.left
count -= 1
count = m
while count > 0 and right:
res[right.index] = team
remove(right)
right = right.right
count -= 1
res = [""] * n
pq = [MaxCycleNode(index, value) for index, value in enumerate(nums)]
for i in range(n): # 双向链表
if i - 1 >= 0:
pq[i].left = pq[(i - 1)]
if i + 1 < n:
pq[i].right = pq[(i + 1)]
heapify(pq)
while pq:
select("A")
select("B")
return res
if __name__ == "__main__":
assert solve(7, 1, [3, 6, 1, 7, 2, 5, 4]) == ["B", "B", "A", "A", "A", "B", "A"]
assert solve(10, 2, [4, 8, 9, 10, 7, 6, 5, 3, 2, 1]) == [
"B",
"A",
"A",
"A",
"A",
"A",
"B",
"B",
"B",
"A",
]
|
fdf91d139eae0108d5dee7de69e103bb710d465c
|
d47abf79e58b2982bd2f5359057126864fdb6e48
|
/b2sdk/v2/session.py
|
9deee3f0137d0b1c8edebc7ca25ee52ed62cd8be
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Backblaze/b2-sdk-python
|
ca90f4dca6a1a9a52c8dbccd0294bfc01290970a
|
072f96dfe90ff191cb74dd2b657564ed5649553c
|
refs/heads/master
| 2023-08-16T23:53:48.691286
| 2023-08-10T13:37:40
| 2023-08-10T13:37:40
| 168,011,367
| 160
| 67
|
NOASSERTION
| 2023-08-31T11:01:52
| 2019-01-28T18:13:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
session.py
|
######################################################################
#
# File: b2sdk/v2/session.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
from b2sdk import _v3 as v3
from .b2http import B2Http
from ._compat import _file_infos_rename
# Override to use legacy B2Http
class B2Session(v3.B2Session):
B2HTTP_CLASS = staticmethod(B2Http)
@_file_infos_rename
def upload_file(
self,
bucket_id,
file_name,
content_length,
content_type,
content_sha1,
file_info,
data_stream,
server_side_encryption: v3.EncryptionSetting | None = None,
file_retention: v3.FileRetentionSetting | None = None,
legal_hold: v3.LegalHold | None = None,
custom_upload_timestamp: int | None = None,
cache_control: str | None = None,
*args,
**kwargs
):
return super().upload_file(
bucket_id,
file_name,
content_length,
content_type,
content_sha1,
file_info,
data_stream,
server_side_encryption,
file_retention,
legal_hold,
custom_upload_timestamp,
cache_control,
*args,
**kwargs,
)
|
91aeba7ad2e2b1903612a62f434cc44408313021
|
0c41f2fd4c1ad9b954097b0662e556b3eb288987
|
/cellbender/remove_background/tests/test_dataprep.py
|
8fcb1562ee818f03ace75ac15eeb997277f03a60
|
[] |
permissive
|
broadinstitute/CellBender
|
e884a5520fc3e0fc2f422f8cd6dcdc6c594b5094
|
4990df713f296256577c92cab3314daeeca0f3d7
|
refs/heads/master
| 2023-08-21T14:55:33.619290
| 2023-08-08T18:40:14
| 2023-08-08T18:40:14
| 171,951,233
| 207
| 40
|
BSD-3-Clause
| 2023-08-30T05:27:18
| 2019-02-21T21:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,177
|
py
|
test_dataprep.py
|
"""Test functions in dataprep.py"""
import pytest
import scipy.sparse as sp
import numpy as np
import torch
from cellbender.remove_background.data.dataprep import DataLoader
from cellbender.remove_background.sparse_utils import dense_to_sparse_op_torch
from .conftest import sparse_matrix_equal, simulated_dataset
USE_CUDA = torch.cuda.is_available()
@pytest.mark.parametrize('cuda',
[False,
pytest.param(True, marks=pytest.mark.skipif(not USE_CUDA,
reason='requires CUDA'))],
ids=lambda b: 'cuda' if b else 'cpu')
def test_dataloader_sorting(simulated_dataset, cuda):
"""test dataset.py _overwrite_matrix_with_columns_from_another()"""
d = simulated_dataset
data_loader = DataLoader(
d['matrix'],
empty_drop_dataset=None,
batch_size=5,
fraction_empties=0.,
shuffle=False,
use_cuda=cuda,
)
sorted_data_loader = DataLoader(
d['matrix'],
empty_drop_dataset=None,
batch_size=5,
fraction_empties=0.,
shuffle=False,
sort_by=lambda x: -1 * np.array(x.max(axis=1).todense()).squeeze(),
use_cuda=cuda,
)
# try to shuffle and sort at the same time, and expect a failure
with pytest.raises(AssertionError):
sorted_data_loader2 = DataLoader(
d['matrix'],
empty_drop_dataset=None,
batch_size=5,
fraction_empties=0.,
shuffle=True,
sort_by=lambda x: -1 * np.array(x.max(axis=1).todense()).squeeze(),
use_cuda=cuda,
)
# this is copied from infer.BasePosterior._get_mean() which is not ideal
out = []
for loader in [data_loader, sorted_data_loader]:
barcodes = []
genes = []
counts = []
ind = 0
for data in loader:
dense_counts = data # just make it the same!
# Convert to sparse.
bcs_i_chunk, genes_i, counts_i = dense_to_sparse_op_torch(dense_counts)
# Barcode index in the dataloader.
bcs_i = bcs_i_chunk + ind
# Obtain the real barcode index after unsorting the dataloader.
bcs_i = loader.unsort_inds(bcs_i)
# Add sparse matrix values to lists.
barcodes.append(bcs_i)
genes.append(genes_i)
counts.append(counts_i)
# Increment barcode index counter.
ind += data.shape[0] # Same as data_loader.batch_size
# Convert the lists to numpy arrays.
counts = np.concatenate(counts).astype(np.uint32)
barcodes = np.concatenate(barcodes).astype(np.uint32)
genes = np.concatenate(genes).astype(np.uint32) # uint16 is too small!
print('counts')
print(counts)
print('barcodes')
print(barcodes)
print('genes')
print(genes)
# Put the counts into a sparse csc_matrix.
out.append(sp.csc_matrix((counts, (barcodes, genes)),
shape=d['matrix'].shape))
assert sparse_matrix_equal(out[0], out[1])
|
5075dbd1c0d35c5797e437b9ae980de4d7c92d66
|
d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec
|
/dump/typingissues/no-overload-matches.py
|
7bc960e1c3e1652df41983591ffe1fb845547bd7
|
[
"MIT"
] |
permissive
|
viblo/pymunk
|
ca64888e45706db431788368ff8464edf2912d5f
|
20ac14f665fb38b4ef1bef5acea36a3d612dd0d5
|
refs/heads/master
| 2023-08-27T16:37:14.740653
| 2023-08-16T19:26:16
| 2023-08-16T19:26:16
| 13,273,472
| 855
| 255
|
MIT
| 2023-01-13T10:13:47
| 2013-10-02T14:36:46
|
Python
|
UTF-8
|
Python
| false
| false
| 773
|
py
|
no-overload-matches.py
|
import collections.abc
from typing import Any, Optional, Sequence, Tuple, Union, overload
class Vec(Sequence[float]):
@overload
def __init__(self, x_or_pair: Sequence[float], y: None) -> None:
...
@overload
def __init__(self, x_or_pair: float, y: float) -> None:
...
def __init__(
self, x_or_pair: Union[Sequence[float], float] = None, y: Optional[float] = None
) -> None:
pass
@overload
def __getitem__(self, index: int) -> float:
...
@overload
def __getitem__(self, index: slice) -> Sequence[float]:
...
def __getitem__(self, index: Union[int, slice]) -> Union[float, Sequence[float]]:
return 0
def __len__(self) -> int:
return 2
Vec((1.0, 2.0)) #
|
92f79d29af94ee0adf986a17c58b3091ba4ea9dc
|
219a938ab3b084f8a9a0c4e0fe552ae40a42b991
|
/liminal/runners/airflow/operators/operator_with_variable_resolving.py
|
9f00f8ed5248a32616e4bd0a74466953fd4c16e0
|
[
"LGPL-2.0-or-later",
"Apache-2.0"
] |
permissive
|
apache/incubator-liminal
|
0b9c510ed30826fcd416a5b3aaf991d675b0347a
|
57246ec472dc79529a68b2c6edd76e5fef677f1b
|
refs/heads/master
| 2023-08-31T09:25:43.704740
| 2023-01-22T08:13:30
| 2023-01-22T08:13:30
| 271,182,596
| 141
| 44
|
Apache-2.0
| 2023-09-12T19:01:36
| 2020-06-10T04:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,532
|
py
|
operator_with_variable_resolving.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import logging
import re
from datetime import datetime
from typing import Any, Dict, Optional, Set
import jinja2
from airflow.models import BaseOperator
from airflow.settings import Session
from jinja2 import Environment
from liminal.runners.airflow.config import standalone_variable_backend
_VAR_REGEX = '(.*){{([^}]*)}}(.*)'
_BASE_OPERATOR_ATTRIBUTES = list(inspect.signature(BaseOperator.__init__).parameters.keys())
class OperatorWithVariableResolving(BaseOperator):
"""
Operator delegator that handles liminal variable substitution at run time
"""
def __init__(self, dag, task_config: dict, variables: dict = None, liminal_task_instance=None, **kwargs):
self.operator_delegate: BaseOperator = kwargs.pop('operator')
self.liminal_task_instance = liminal_task_instance.serialize() if liminal_task_instance else None
if variables:
self.variables = variables.copy()
else:
self.variables = {}
self.task_config = task_config
super().__init__(task_id=self.operator_delegate.task_id, dag=dag)
self._LOG = logging.getLogger(self.__class__.__name__)
def execute(self, context):
attributes = self._get_operator_delegate_attributes()
self._LOG.info(f'task_config: {self.task_config}')
self._LOG.info(f'variables: {self.variables}')
self.operator_delegate.template_fields = set(list(self.operator_delegate.template_fields) + attributes)
self.operator_delegate.render_template_fields(context, LiminalEnvironment(self.variables, self.task_config))
self.operator_delegate.render_template_fields(context)
if 'ti' in context:
context['ti'].xcom_push(key="liminal_task_instance", value=self.liminal_task_instance)
return self.operator_delegate.execute(context)
def post_execute(self, context, result=None):
self.operator_delegate.post_execute(context, result)
def _get_operator_delegate_attributes(self):
return [
attr
for attr in dir(self.operator_delegate)
if attr not in _BASE_OPERATOR_ATTRIBUTES
and attr not in dir(BaseOperator)
and not attr.startswith('_')
and attr not in ('args', 'kwargs', 'lineage_data', 'subdag', 'template_fields')
]
def pre_execute(self, context: Any):
return self.operator_delegate.pre_execute(context)
def on_kill(self) -> None:
self.operator_delegate.on_kill()
def render_template_fields(self, context: Dict, jinja_env: Optional[jinja2.Environment] = None) -> None:
pass
def render_template(
self,
content: Any,
context: Dict,
jinja_env: Optional[jinja2.Environment] = None,
seen_oids: Optional[Set] = None,
) -> Any:
value = self.operator_delegate.render_template(
content, context, LiminalEnvironment(self.variables, self.task_config)
)
return self.operator_delegate.render_template(value, context, jinja_env, seen_oids)
def get_template_env(self) -> jinja2.Environment:
return self.operator_delegate.get_template_env()
def prepare_template(self) -> None:
self.operator_delegate.prepare_template()
def resolve_template_files(self) -> None:
self.operator_delegate.resolve_template_files()
def clear(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
upstream: bool = False,
downstream: bool = False,
session: Session = None,
):
return self.operator_delegate.clear(start_date, end_date, upstream, downstream, session)
def run(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
ignore_first_depends_on_past: bool = True,
ignore_ti_state: bool = False,
mark_success: bool = False,
) -> None:
self.operator_delegate.run(start_date, end_date, ignore_first_depends_on_past, ignore_ti_state, mark_success)
class LiminalEnvironment(Environment):
def __init__(self, variables, task_config=None):
super().__init__()
self.val = None
self.variables = variables.copy()
logging.info(f'variables: {variables}')
if task_config and 'variables' in task_config:
task_variables = task_config['variables']
if isinstance(task_variables, dict):
self.variables.update(task_variables)
elif isinstance(task_variables, str):
variables_key = self.from_string(task_variables).render()
if variables_key in variables:
self.variables.update(variables[variables_key])
def from_string(self, val, **kwargs):
self.val = val
return self
def render(self, *_, **kwargs):
"""
Implements jinja2.environment.Template.render
"""
conf = kwargs['dag_run'].conf if 'dag_run' in kwargs else {}
return self.__render(self.val, conf, set())
def __render(self, val: str, dag_run_conf: dict, unresolved_tags: set):
token = re.match(_VAR_REGEX, val)
if token and token[2].strip() not in unresolved_tags:
tag_name = token[2].strip()
prefix = self.__render(token[1], dag_run_conf, unresolved_tags)
suffix = self.__render(token[3], dag_run_conf, unresolved_tags)
if dag_run_conf and tag_name in dag_run_conf:
return self.__render(prefix + str(dag_run_conf[tag_name]) + suffix, dag_run_conf, unresolved_tags)
elif tag_name in self.variables:
return self.__render(prefix + str(self.variables[tag_name]) + suffix, dag_run_conf, unresolved_tags)
else:
backend_value = standalone_variable_backend.get_variable(tag_name, None)
if backend_value:
return self.__render(prefix + backend_value + suffix, dag_run_conf, unresolved_tags)
else:
unresolved_tags.add(tag_name)
return self.__render(prefix + '{{' + token[2] + '}}' + suffix, dag_run_conf, unresolved_tags)
else:
return val
def add_variables_to_operator(operator, task) -> BaseOperator:
"""
:param operator: Airflow operator
:type operator: BaseOperator
:param task: Task instance
:type task: Task
:returns: OperatorWithVariableResolving wrapping given operator
"""
return OperatorWithVariableResolving(
dag=task.dag,
task_config=task.task_config,
variables=task.variables,
liminal_task_instance=task,
operator=operator,
)
|
5ade632fe0c1dede0f3e95561c7fbd8f0ed227e3
|
f1973e136f49f0b5ea2ec63c4d862188d197e5a5
|
/ans/upgrade/cfgdb/v3.0.0/files/cfgdb_zk_rest_server.py
|
e90c01b98705a6c4be44b64a9cd3c9e03579fe41
|
[
"Apache-2.0"
] |
permissive
|
erigones/esdc-ce
|
65dc7d84e1bca3e3fcec668f54acae20183096a2
|
7e3dedddbe821283d909393f333eed4acd452953
|
refs/heads/master
| 2023-02-07T17:57:15.970089
| 2022-02-03T12:55:14
| 2022-02-03T12:55:14
| 73,122,985
| 123
| 36
|
Apache-2.0
| 2023-01-24T23:22:54
| 2016-11-07T21:34:53
|
Python
|
UTF-8
|
Python
| false
| false
| 8,786
|
py
|
cfgdb_zk_rest_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import ssl
import json
import signal
import logging
import subprocess
try:
# noinspection PyCompatibility,PyUnresolvedReferences
import urlparse
except ImportError:
# noinspection PyCompatibility,PyUnresolvedReferences
from urllib import parse as urlparse
try:
# noinspection PyCompatibility,PyUnresolvedReferences
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# noinspection PyCompatibility,PyUnresolvedReferences
from http.server import BaseHTTPRequestHandler, HTTPServer
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = (str,)
else:
# noinspection PyUnresolvedReferences,PyCompatibility
string_types = (basestring,) # noqa: F821
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(name)s: %(message)s')
logger = logging.getLogger(__name__)
VERSION = '0.1'
DEFAULT_HTTP_ADDRESS = ''
DEFAULT_HTTP_PORT = 12181
class ValidationError(ValueError):
def __init__(self, attr, detail, status=400):
self.attr = attr
self.detail = detail
self.status = status
@property
def as_json(self):
return {self.attr: self.detail}
# noinspection PyPep8Naming
class RESTRequestHandler(BaseHTTPRequestHandler):
method = None
_content = None
@property
def content(self):
if self._content is None:
content_length = int(self.headers.getheader('Content-Length', 0))
if content_length:
self._content = self.rfile.read(content_length)
else:
self._content = ''
return self._content
def send_json_response(self, data, status=200):
self.send_response(status)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data))
def parse_json_content(self):
content = self.content
if content:
return json.loads(content)
else:
return {}
def handle_request(self):
raise NotImplementedError
def _handle_request(self):
try:
self.handle_request()
except ValidationError as exc:
logger.exception(exc)
self.send_json_response(exc.as_json, status=exc.status)
except Exception as exc:
logger.exception(exc)
self.send_error(500, 'Internal Server Error')
def do_GET(self):
self.method = 'GET'
self._handle_request()
def do_POST(self):
self.method = 'POST'
self._handle_request()
def do_PUT(self):
self.method = 'PUT'
self._handle_request()
def do_DELETE(self):
self.method = 'DELETE'
self._handle_request()
class ZKRESTRequestHandler(RESTRequestHandler):
default_string_max_length = 4019
zk_data_size_limit = 2097152
zk_commands = frozenset((
'exists',
'get',
'ls',
'lsr',
'create',
'creater',
'set',
'delete',
'rm',
'deleter',
'rmr',
'getacl',
'setacl'
))
method_to_zk_command = {
'GET': 'get',
'POST': 'create',
'PUT': 'set',
'DELETE': 'delete',
}
zk_servers = os.environ.get('ZK_REST_ZK_SERVERS', '127.0.0.1')
zk_base_cmd = (os.environ.get('ZK_REST_ZK_CLI', 'zookeepercli'), '-servers', zk_servers)
def version_string(self):
return 'ZooKeeper REST Service / ' + VERSION
@classmethod
def validate_string_input(cls, attr, value, max_length=default_string_max_length):
if not isinstance(value, string_types):
raise ValidationError(attr, 'Invalid value.')
if max_length and len(value) > max_length:
raise ValidationError(attr, 'Too large.', status=413)
return value
def run_zk_cmd(self, command, node, data=None, force=False, username=None, password=None):
cmd = list(self.zk_base_cmd)
if force:
cmd.append('-force')
if username is not None:
cmd.extend(('-auth_usr', self.validate_string_input('username', username)))
if password is not None:
cmd.extend(('-auth_pwd', self.validate_string_input('password', password)))
cmd.extend(('-c', command, self.validate_string_input('node', node)))
if data is not None:
cmd.append(self.validate_string_input('data', data, max_length=self.zk_data_size_limit))
logger.debug('Running command: %s', cmd)
exc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = exc.communicate()
res = {'returncode': exc.returncode, 'stdout': stdout.strip(), 'stderr': stderr.strip()}
logger.info('Command "%s" finished with %s', cmd, res)
return res
def handle_request(self):
url = urlparse.urlparse(self.path)
qs = urlparse.parse_qs(url.query)
zk_cmd = qs.get('cmd', None)
if zk_cmd:
zk_cmd = zk_cmd[0]
else:
zk_cmd = self.method_to_zk_command.get(self.method, None)
logger.info('Got request: [%s %s]', zk_cmd, url.path)
if not zk_cmd or zk_cmd not in self.zk_commands:
logger.error('Request [%s %s] command is invalid', zk_cmd, url.path)
self.send_json_response({'detail': 'Invalid command'}, status=400)
return
try:
data = self.parse_json_content()
if not isinstance(data, dict):
raise TypeError
except (TypeError, ValueError):
logger.error('Request [%s %s] has invalid JSON content: "%s"', zk_cmd, url.path, self.content)
self.send_json_response('Malformed request', status=400)
return
else:
logger.debug('Request [%s %s] has JSON content: "%s"', zk_cmd, url.path, data)
res = self.run_zk_cmd(
zk_cmd,
url.path,
data=data.get('data', None),
force=bool(data.get('force', False)),
username=self.headers.get('zk-username', None),
password=self.headers.get('zk-password', None)
)
if res['returncode'] == 0:
status = 200
else:
if 'node does not exist' in res['stderr']:
status = 404
elif 'node already exists' in res['stderr']:
status = 406
else:
status = 400
logger.info('Request [%s %s] response: "%s"', zk_cmd, url.path, res)
self.send_json_response(res, status=status)
# noinspection PyPep8Naming
def do_HEAD(self):
if self.path == '/':
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
else:
self.send_error(501, 'Unsupported method')
class ESDCZKRESTRequestHandler(ZKRESTRequestHandler):
def version_string(self):
return 'ESDC ' + ZKRESTRequestHandler.version_string(self)
def handle_request(self):
if self.path.startswith('/esdc'):
ZKRESTRequestHandler.handle_request(self)
else:
self.send_json_response({'detail': 'Permission Denied'}, status=403)
def run_server(address=DEFAULT_HTTP_ADDRESS, port=DEFAULT_HTTP_PORT, ssl_cert=None, ssl_key=None, ca_certs=None,
request_handler=ESDCZKRESTRequestHandler):
http_server = HTTPServer((address, port), request_handler)
if ssl_cert:
http_server.socket = ssl.wrap_socket(http_server.socket, keyfile=ssl_key, certfile=ssl_cert, ca_certs=ca_certs,
server_side=True)
# noinspection PyUnusedLocal
def stop_server(signum, frame):
logger.info('Stopping HTTP server with signal %s', signum)
raise KeyboardInterrupt
signal.signal(signal.SIGINT, stop_server)
signal.signal(signal.SIGTERM, stop_server)
logger.info('Starting HTTP [ssl=%s] server at %s:%s', ssl_cert, address, port)
try:
http_server.serve_forever()
except KeyboardInterrupt:
http_server.shutdown()
logger.info('Stopped HTTP server')
http_server.server_close()
def main():
run_server(
address=os.environ.get('ZK_REST_HTTP_ADDRESS', DEFAULT_HTTP_ADDRESS),
port=os.environ.get('ZK_REST_HTTP_PORT', DEFAULT_HTTP_PORT),
ssl_cert=os.environ.get('ZK_REST_HTTP_SSL_CERT', None),
ssl_key=os.environ.get('ZK_REST_HTTP_SSL_KEY', None),
ca_certs=os.environ.get('ZK_REST_HTTP_CA_CERTS', None),
)
if __name__ == '__main__':
main()
|
a3f736efb4d2c364877181ccf1f98ca22426dba2
|
98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3
|
/bingads/v13/bulk/file_writer.py
|
f539d2836eeb2ee10d0917bdc32154e16b890b50
|
[
"MIT"
] |
permissive
|
BingAds/BingAds-Python-SDK
|
a2f9b0c099b574a4495d0052218f263af55cdb32
|
373a586402bf24af7137b7c49321dbc70c859fce
|
refs/heads/main
| 2023-07-27T15:31:41.354708
| 2023-07-10T03:21:03
| 2023-07-10T03:21:03
| 31,927,550
| 105
| 182
|
NOASSERTION
| 2023-09-04T06:51:20
| 2015-03-09T23:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
file_writer.py
|
from bingads.v13.internal.bulk.object_writer import _BulkObjectWriter
class BulkFileWriter:
""" Provides methods to write bulk entities to a file.
For more information about the Bulk File Schema, see https://go.microsoft.com/fwlink/?linkid=846127.
:param file_path: The file path of the bulk file to write.
:type file_path: str
:param file_type: The bulk file type.
:type file_type: str
"""
def __init__(self, file_path, file_type='Csv'):
self._file_path = file_path
self._file_type = file_type
self._bulk_object_writer = _BulkObjectWriter(file_path=self.file_path, file_type=self.file_type)
self._bulk_object_writer.__enter__()
self._bulk_object_writer.write_file_metadata()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._bulk_object_writer.__exit__(exc_type, exc_value, traceback)
def close(self):
self.__exit__(None, None, None)
def write_entity(self, entity, exclude_readonly_data=False):
""" Writes the specified :class:`.BulkEntity` to the file.
Bulk entities that are derived from :class:`._SingleRecordBulkEntity` will be written to a single row in the file.
Bulk entities that are derived from :class:`._MultiRecordBulkEntity` will be written to multiple rows in the file.
:param entity: The bulk entity to write to the file.
:type entity: BulkEntity
:param exclude_readonly_data: excludeReadonlyData indicates whether readonly data (such as errors, performance data etc.)
should be excluded when writing to file
:type exclude_readonly_data: bool
:rtype: None
"""
entity.write_to_stream(self._bulk_object_writer, exclude_readonly_data=exclude_readonly_data)
@property
def file_path(self):
""" The file path of the bulk file to write.
:rtype: str
"""
return self._file_path
@property
def file_type(self):
""" The bulk file type.
:rtype: str
"""
return self._file_type
|
500931a601a860c092aedf078c718e208112125c
|
6d58cdc52b4f882b498d44791ea41d89f2691445
|
/nipyapi/nifi/models/node_dto.py
|
34d4111b8dab801a9377d7013f6962d4567e7373
|
[
"Apache-2.0"
] |
permissive
|
Chaffelson/nipyapi
|
8cb47c1f13e9b3d53d4add8829c2efcee24349b6
|
c687fb811486d7bcada099ac0785b55cfb30aea8
|
refs/heads/main
| 2022-12-02T15:39:27.685280
| 2022-12-01T12:39:10
| 2022-12-01T12:39:10
| 101,291,622
| 229
| 84
|
NOASSERTION
| 2023-08-27T15:55:31
| 2017-08-24T12:17:36
|
Python
|
UTF-8
|
Python
| false
| false
| 10,824
|
py
|
node_dto.py
|
# coding: utf-8
"""
NiFi Rest API
The Rest API provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.19.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class NodeDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'node_id': 'str',
'address': 'str',
'api_port': 'int',
'status': 'str',
'heartbeat': 'str',
'connection_requested': 'str',
'roles': 'list[str]',
'active_thread_count': 'int',
'queued': 'str',
'events': 'list[NodeEventDTO]',
'node_start_time': 'str'
}
attribute_map = {
'node_id': 'nodeId',
'address': 'address',
'api_port': 'apiPort',
'status': 'status',
'heartbeat': 'heartbeat',
'connection_requested': 'connectionRequested',
'roles': 'roles',
'active_thread_count': 'activeThreadCount',
'queued': 'queued',
'events': 'events',
'node_start_time': 'nodeStartTime'
}
def __init__(self, node_id=None, address=None, api_port=None, status=None, heartbeat=None, connection_requested=None, roles=None, active_thread_count=None, queued=None, events=None, node_start_time=None):
"""
NodeDTO - a model defined in Swagger
"""
self._node_id = None
self._address = None
self._api_port = None
self._status = None
self._heartbeat = None
self._connection_requested = None
self._roles = None
self._active_thread_count = None
self._queued = None
self._events = None
self._node_start_time = None
if node_id is not None:
self.node_id = node_id
if address is not None:
self.address = address
if api_port is not None:
self.api_port = api_port
if status is not None:
self.status = status
if heartbeat is not None:
self.heartbeat = heartbeat
if connection_requested is not None:
self.connection_requested = connection_requested
if roles is not None:
self.roles = roles
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if queued is not None:
self.queued = queued
if events is not None:
self.events = events
if node_start_time is not None:
self.node_start_time = node_start_time
@property
def node_id(self):
"""
Gets the node_id of this NodeDTO.
The id of the node.
:return: The node_id of this NodeDTO.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""
Sets the node_id of this NodeDTO.
The id of the node.
:param node_id: The node_id of this NodeDTO.
:type: str
"""
self._node_id = node_id
@property
def address(self):
"""
Gets the address of this NodeDTO.
The node's host/ip address.
:return: The address of this NodeDTO.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this NodeDTO.
The node's host/ip address.
:param address: The address of this NodeDTO.
:type: str
"""
self._address = address
@property
def api_port(self):
"""
Gets the api_port of this NodeDTO.
The port the node is listening for API requests.
:return: The api_port of this NodeDTO.
:rtype: int
"""
return self._api_port
@api_port.setter
def api_port(self, api_port):
"""
Sets the api_port of this NodeDTO.
The port the node is listening for API requests.
:param api_port: The api_port of this NodeDTO.
:type: int
"""
self._api_port = api_port
@property
def status(self):
"""
Gets the status of this NodeDTO.
The node's status.
:return: The status of this NodeDTO.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this NodeDTO.
The node's status.
:param status: The status of this NodeDTO.
:type: str
"""
self._status = status
@property
def heartbeat(self):
"""
Gets the heartbeat of this NodeDTO.
the time of the nodes's last heartbeat.
:return: The heartbeat of this NodeDTO.
:rtype: str
"""
return self._heartbeat
@heartbeat.setter
def heartbeat(self, heartbeat):
"""
Sets the heartbeat of this NodeDTO.
the time of the nodes's last heartbeat.
:param heartbeat: The heartbeat of this NodeDTO.
:type: str
"""
self._heartbeat = heartbeat
@property
def connection_requested(self):
"""
Gets the connection_requested of this NodeDTO.
The time of the node's last connection request.
:return: The connection_requested of this NodeDTO.
:rtype: str
"""
return self._connection_requested
@connection_requested.setter
def connection_requested(self, connection_requested):
"""
Sets the connection_requested of this NodeDTO.
The time of the node's last connection request.
:param connection_requested: The connection_requested of this NodeDTO.
:type: str
"""
self._connection_requested = connection_requested
@property
def roles(self):
"""
Gets the roles of this NodeDTO.
The roles of this node.
:return: The roles of this NodeDTO.
:rtype: list[str]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""
Sets the roles of this NodeDTO.
The roles of this node.
:param roles: The roles of this NodeDTO.
:type: list[str]
"""
self._roles = roles
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this NodeDTO.
The active threads for the NiFi on the node.
:return: The active_thread_count of this NodeDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this NodeDTO.
The active threads for the NiFi on the node.
:param active_thread_count: The active_thread_count of this NodeDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def queued(self):
"""
Gets the queued of this NodeDTO.
The queue the NiFi on the node.
:return: The queued of this NodeDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this NodeDTO.
The queue the NiFi on the node.
:param queued: The queued of this NodeDTO.
:type: str
"""
self._queued = queued
@property
def events(self):
"""
Gets the events of this NodeDTO.
The node's events.
:return: The events of this NodeDTO.
:rtype: list[NodeEventDTO]
"""
return self._events
@events.setter
def events(self, events):
"""
Sets the events of this NodeDTO.
The node's events.
:param events: The events of this NodeDTO.
:type: list[NodeEventDTO]
"""
self._events = events
@property
def node_start_time(self):
"""
Gets the node_start_time of this NodeDTO.
The time at which this Node was last refreshed.
:return: The node_start_time of this NodeDTO.
:rtype: str
"""
return self._node_start_time
@node_start_time.setter
def node_start_time(self, node_start_time):
"""
Sets the node_start_time of this NodeDTO.
The time at which this Node was last refreshed.
:param node_start_time: The node_start_time of this NodeDTO.
:type: str
"""
self._node_start_time = node_start_time
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, NodeDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
6089575b00a8c46ab6f3e219ce26aac0a973eec3
|
b60686a2e351a756f249e0d9faab8fe154a08f11
|
/dffml/source/dir.py
|
d15aae7bc076ba189541e1c1052cee5e3d0cb523
|
[
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
intel/dffml
|
86483b47229b9b62c9f8dfef51491aa02563347e
|
7d381bf67a72fe1ecb1012393d5726085564cb0e
|
refs/heads/main
| 2023-08-28T00:35:04.219193
| 2023-06-06T18:29:16
| 2023-06-06T18:29:16
| 149,512,216
| 237
| 204
|
MIT
| 2023-05-05T15:39:35
| 2018-09-19T21:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,124
|
py
|
dir.py
|
"""
Loads files from a directory
"""
import os
import glob
import pathlib
from typing import List
from ..record import Record
from ..base import config, field
from .memory import MemorySource
from ..util.entrypoint import entrypoint
from ..source.source import BaseSource
from ..configloader.configloader import ConfigLoaders
from ..high_level.source import save
class FolderNotFoundError(Exception):
"""
Folder doesn't exist.
"""
@config
class DirectorySourceConfig:
foldername: str
feature: str = field("Name of the feature the data will be referenced as")
labels: List[str] = field(
"Image labels", default_factory=lambda: ["unlabelled"]
)
save: BaseSource = None
@entrypoint("dir")
class DirectorySource(MemorySource):
"""
Source to read files in a folder.
"""
CONFIG = DirectorySourceConfig
CONFIG_LOADER = ConfigLoaders()
def __init__(self, config):
super().__init__(config)
if isinstance(getattr(self.config, "foldername", None), str):
with self.config.no_enforce_immutable():
self.config.foldername = pathlib.Path(self.config.foldername)
async def __aenter__(self) -> "BaseSourceContext":
await self._open()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._close()
async def _open(self):
if not os.path.exists(self.config.foldername) and not os.path.isdir(
self.config.foldername
):
raise FolderNotFoundError(f"Folder path: {self.config.foldername}")
if (
self.config.labels != ["unlabelled"]
and len(self.config.labels) == 1
):
if os.path.isfile(self.config.labels[0]):
# Update labels with list read from the file
with self.config.no_enforce_immutable():
self.config.labels = pathlib.Path.read_text(
pathlib.Path(self.config.labels[0])
).split(",")
elif self.config.labels != ["unlabelled"]:
label_folders = [
labels
for labels in os.listdir(self.config.foldername)
if os.path.isdir(os.path.join(self.config.foldername, labels))
]
# Check if all existing label folders are given to `labels` list
if set(label_folders) > set(self.config.labels):
self.logger.warning(
"All labels not specified. Folders present: %s \nLabels entered: %s",
label_folders,
self.config.labels,
)
await self.load_fd()
async def _close(self):
if self.config.save:
await save(self.config.save, self.mem)
async def load_fd(self):
self.mem = {}
# Iterate over the labels list
for label in self.config.labels:
if self.config.labels == ["unlabelled"]:
folders = self.config.foldername
else:
folders = self.config.foldername.joinpath(label)
# Go through all image files and read them using pngconfigloader
for file_name in map(
os.path.basename, glob.glob(str(folders) + "/*")
):
image_filename = folders.joinpath(file_name)
async with self.CONFIG_LOADER as cfgl:
_, feature_data = await cfgl.load_file(image_filename)
if self.config.labels != ["unlabelled"]:
file_name = label + "/" + file_name
self.mem[file_name] = Record(
file_name,
data={
"features": {
self.config.feature: feature_data,
"label": label,
}
},
)
if self.config.labels == ["unlabelled"]:
del self.mem[file_name].features()["label"]
self.logger.debug("%r loaded %d records", self, len(self.mem))
|
e1d60bf59d0e2b1143eea190b03c262551f6ee2a
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-HealthKit/PyObjCTest/test_healthkit.py
|
4105173b86e83137de8a4638a25b412b3e297720
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
test_healthkit.py
|
from PyObjCTools.TestSupport import TestCase
import HealthKit
class TestHealthKit(TestCase):
def test_callable_metadata_is_sane(self):
self.assertCallableMetadataIsSane(HealthKit)
|
6f6495d32fef698ed1919f427c62d665d5abf8b0
|
a5ae6cdf4288c5d0f015dd2fa4718ca8d922ceda
|
/scraper/services/__init__.py
|
fe45e7e34cb5d5188f6eae57eb0e4f7a5c28b60c
|
[] |
no_license
|
itsToggle/plex_debrid
|
e2de847abd0f59259c3285cb4672200a79ef0e8f
|
8deca8c9a57deed35e065d52ca211a5f90fcbdc1
|
refs/heads/main
| 2023-08-03T11:29:13.705811
| 2023-07-20T09:09:38
| 2023-07-20T09:09:38
| 490,273,040
| 761
| 72
| null | 2023-09-04T20:06:44
| 2022-05-09T12:27:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
__init__.py
|
from base import *
#import child modules
from scraper.services import rarbg
from scraper.services import x1337
from scraper.services import jackett
from scraper.services import prowlarr
from scraper.services import orionoid
from scraper.services import nyaa
from scraper.services import torrentio
#define subclass method
def __subclasses__():
return [rarbg,x1337,jackett,prowlarr,orionoid,nyaa,torrentio]
active = ['torrentio']
overwrite = []
def setup(cls, new=False):
from settings import settings_list
global active
settings = []
for category, allsettings in settings_list:
for setting in allsettings:
if setting.cls == cls:
settings += [setting]
if settings == []:
if not cls.name in active:
active += [cls.name]
back = False
if not new:
while not back:
print("0) Back")
indices = []
for index, setting in enumerate(settings):
print(str(index + 1) + ') ' + setting.name)
indices += [str(index + 1)]
print()
if settings == []:
print("Nothing to edit!")
print()
time.sleep(3)
return
choice = input("Choose an action: ")
if choice in indices:
settings[int(choice) - 1].setup()
if not cls.name in active:
active += [cls.name]
back = True
elif choice == '0':
back = True
else:
print()
indices = []
for setting in settings:
setting.setup()
if not cls.name in active:
active += [cls.name]
def get():
cls = sys.modules[__name__]
activeservices = []
for servicename in active:
for service in cls.__subclasses__():
if service.name == servicename:
activeservices += [service]
return activeservices
def sequential():
global overwrite
cls = sys.modules[__name__]
activeservices = []
for sequence in overwrite:
activesequence = []
for servicename in sequence:
for service in cls.__subclasses__():
if service.name == servicename:
activesequence += [service]
activeservices += [activesequence]
return activeservices
|
f67f89ee9bb0ec45da5b47a57c2a2e00c54b0f38
|
3d986080cde791938fdfa11cc3dbf31ca37b5936
|
/examples/jsonfile.py
|
daf99bcd351610315053f79613a1447368ef5ff9
|
[
"MIT"
] |
permissive
|
yukinarit/pyserde
|
266545ac4183b883bf23f81e730ed626639f308c
|
89680ed429401d41e2a4584b8023cb6366786e41
|
refs/heads/main
| 2023-08-07T19:05:47.496365
| 2023-07-31T09:56:19
| 2023-07-31T09:56:19
| 160,464,105
| 533
| 40
|
MIT
| 2023-09-07T10:51:47
| 2018-12-05T05:10:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
jsonfile.py
|
"""
jsonfile.py
Deserialize JSON into an object.
Usage:
$ poetry install
$ poetry run python jsonfile.py
"""
from dataclasses import dataclass
from typing import List, Optional
from serde import serde
from serde.json import from_json
@serde
@dataclass
class Slide:
title: str
type: str
items: Optional[List[str]]
@serde
@dataclass
class Slideshow:
author: str
date: str
slides: List[Slide]
title: str
@serde
@dataclass
class Data:
slideshow: Slideshow
def main() -> None:
text = r"""{
"slideshow": {
"author": "Yours Truly",
"date": "date of publication",
"slides": [
{
"title": "Wake up to WonderWidgets!",
"type": "all"
},
{
"items": [
"Why <em>WonderWidgets</em> are great",
"Who <em>buys</em> WonderWidgets"
],
"title": "Overview",
"type": "all"
}
],
"title": "Sample Slide Show"
}
}
"""
data = from_json(Data, text)
print(data)
if __name__ == "__main__":
main()
|
6df03bc778bde720a8c9ca9ee9973138860e990e
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/tutorials/simple_applications/pytorch/bert/test_bert_inference.py
|
7838183170a59c67c337e8e9e089f6925b390a80
|
[
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
test_bert_inference.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import re
import pytest
import tutorials_tests.testing_util as testing_util
EXPECTED_RESULTS = [
("How many islands are there in Scotland?", "more than 790"),
("What sea is to the south of Scotland?", "irish sea"),
("How long is Scotland's border in km?", "154"),
("Where is England in relation to scotland?", "southeast"),
]
def parse_results(out):
lines = out.split("\n")
questions, answers = [], []
for line in lines:
match_question = re.match("Question: (.*)", line)
match_answer = re.match("Answer: (.*)", line)
if match_question:
questions.append(match_question.group(1))
if match_answer:
answers.append(match_answer.group(1))
return list(zip(questions, answers))
def run_poptorch_bert_inference(**kwargs):
cmd = ["python3", "./bert_inference.py"]
# Flatten kwargs and convert to strings
args = [str(item) for sublist in kwargs.items() for item in sublist if item != ""]
cmd.extend(args)
out = testing_util.run_command_fail_explicitly(cmd, os.path.dirname(__file__))
return out
"""High-level integration tests for BERT inference in PopTorch"""
@pytest.mark.ipus(2)
@pytest.mark.category2
def test_poptorch_bert_batch_size_2():
out = run_poptorch_bert_inference(**{"--batch-size": 2})
results = parse_results(out)
# Check both lists match in sizes and contents.
assert results == EXPECTED_RESULTS
@pytest.mark.ipus(2)
@pytest.mark.category2
def test_poptorch_bert_batch_size_4():
out = run_poptorch_bert_inference(**{"--batch-size": 4})
results = parse_results(out)
# Check both lists match in sizes and contents.
assert results == EXPECTED_RESULTS
|
d22a1d62dd80bd884d59ff3c51b675e81f1fe924
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/modules/text/language_model/lda_news/config.py
|
da0651c0bbd52e0c1bcf72d547a65c6e19559d3c
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
config.py
|
"""
This file defines the basic config information of LDA/SLDA model.
"""
class ModelType:
LDA = 0
SLDA = 1
class ModelConfig:
type = None
num_topics = None
alpha = None
beta = None
word_topic_file = None
vocab_file = None
|
e03487109a0673a98b56442254557bdac0dcb015
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Zyxel/MSAN/get_version.py
|
df2379b2c9719a69d8c1e235c08b15a77c07b511
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,998
|
py
|
get_version.py
|
# ---------------------------------------------------------------------
# Zyxel.MSAN.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Zyxel.MSAN.get_version"
interface = IGetVersion
cache = True
rx_ver1 = re.compile(
r"^\s*product model\s*:\s+(?P<platform>\S+)\s*\n"
r"^\s*system up time\s*:\s+(?P<uptime>\S+)\s*\n"
r"^\s*f/w version\s*:\s+(?P<version>\S+) \| \S+\s*\n"
r"^\s*bootbase version\s*:\s+(?P<bootprom>\S+) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver2 = re.compile(
r"^\s*Model: (?:\S+ \/ )?(?P<platform>\S+)\s*\n"
r"^\s*ZyNOS version: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r"^\s*Bootbase version: (?P<bootprom>\S+) \| \S+\s*\n"
r".+?\n"
r"(^\s*Hardware version: (?P<hardware>\S+)\s*\n)?"
r"^\s*Serial number: (?P<serial>\S+)\s*\n",
re.MULTILINE | re.DOTALL,
)
rx_ver3 = re.compile(
r"^\s*ZyNOS version\s*: (?P<version>\S+) \| \S+\s*\n"
r".+?\n"
r".+?\n"
r"^\s*bootbase version\s*: (?P<bootprom>\S+)"
r"\((?P<platform>MSC\S+)\) \| \S+\s*\n",
re.MULTILINE,
)
rx_ver4 = re.compile(
r"^\s*Bootcode Version: (?P<bootprom>.+)\s*\n"
r"^\s*Hardware Version: (?P<hardware>.+)\s*\n"
r"^\s*Serial Number: (?P<serial>.+)\s*\n"
r"^\s*F/W Version: (?P<version>\S+)\s*\n",
re.MULTILINE,
)
rx_chips = re.compile(r"^\s*(?P<platform>\S+?)(/\S+)?\s+")
def execute(self):
slots = self.profile.get_slots_n(self)
try:
c = self.cli("sys version", cached=True)
match = self.rx_ver1.search(c)
except self.CLISyntaxError:
c = self.cli("sys info show", cached=True)
match = self.rx_ver2.search(c)
if not match:
match = self.rx_ver3.search(c)
if match:
platform = self.profile.get_platform(self, slots, match.group("platform"))
else:
match = self.rx_ver4.search(self.cli("sys info show", cached=True))
if match:
match1 = self.rx_chips.search(self.cli("chips info", cached=True))
r = {
"vendor": "ZyXEL",
"platform": match1.group("platform"),
"version": match.group("version"),
}
if match.group("bootprom") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Boot PROM"] = match.group("bootprom")
if match.group("hardware") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["HW version"] = match.group("hardware")
if match.group("serial") != "not defined":
if "attributes" not in r:
r["attributes"] = {}
r["attributes"]["Serial Number"] = match.group("serial").strip()
return r
else:
raise self.NotSupportedError()
r = {
"vendor": "ZyXEL",
"platform": platform,
"version": match.group("version"),
"attributes": {"Boot PROM": match.group("bootprom")},
}
if ("hardware" in match.groupdict()) and (match.group("hardware")):
r["attributes"]["HW version"] = match.group("hardware")
if ("serial" in match.groupdict()) and (match.group("serial")):
r["attributes"]["Serial Number"] = match.group("serial")
return r
|
ed76f21a4a73ac19117bce256b71757d15eedde1
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/inference/gradual/__init__.py
|
5c86b7b349988f5dd5e0e6c3e957c59918ff997d
|
[
"MIT"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
__init__.py
|
"""
It is unfortunately not well documented how stubs and annotations work in Jedi.
If somebody needs an introduction, please let me know.
"""
|
75bc4f8f23ee1240ebba0443d2e53e2508715b4e
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/tests/test_graph_client.py
|
7e9336d580753536e293aec6e159ea63af5500b0
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,446
|
py
|
test_graph_client.py
|
import uuid
from office365.onedrive.internal.paths.url import UrlPath
from office365.runtime.odata.path_builder import ODataPathBuilder
from office365.runtime.paths.resource_path import ResourcePath
from tests import test_team_site_url
from tests.graph_case import GraphTestCase
class TestGraphClient(GraphTestCase):
def test1_execute_batch_get_requests(self):
current_user = self.client.me.get() # 1.1: construct query to retrieve current user
my_drive = self.client.me.drive.get() # 1.2: construct query to retrieve my drive
self.client.execute_batch() # 2:submit query to the server
self.assertIsNotNone(current_user.id)
self.assertIsNotNone(my_drive.web_url)
def test2_build_resource_path(self):
drive = self.client.me.drive.root.get().execute_query()
self.assertEqual("/me/drive/items/{0}".format(drive.id), str(drive.resource_path))
def test3_build_url_resource_path(self):
path = UrlPath("Sample.docx", ResourcePath("root", ResourcePath("drive", self.client.me.resource_path)))
self.assertEqual(str(path), "/me/drive/root:/Sample.docx:/")
def test4_build_url_nested_resource_path(self):
parent_path = ResourcePath("root", ResourcePath("drive", self.client.me.resource_path))
path = UrlPath("Sample.docx", UrlPath("2018", UrlPath("archive", parent_path)))
self.assertEqual("/me/drive/root:/archive/2018/Sample.docx:/", str(path))
def test5_resolve_drive_url_path(self):
parent_path = self.client.me.drive.root.resource_path
path = UrlPath("Sample.docx", UrlPath("2018", UrlPath("archive", parent_path)))
item_id = uuid.uuid4().hex
path.patch(item_id, inplace=True)
self.assertEqual(f"/me/drive/items/{item_id}", str(path))
def test6_resolve_drive_children_path(self):
path = self.client.me.drive.root.children.resource_path
item_id = uuid.uuid4().hex
path.patch(item_id, inplace=True)
self.assertEqual(f"/me/drive/items/{item_id}", str(path))
def test7_build_drive_children_path(self):
item_id = uuid.uuid4().hex
path = self.client.sites.root.drive.items[item_id].children.resource_path
self.assertEqual(f"/sites/root/drive/items/{item_id}/children", str(path))
def test8_resolve_site_url_path(self):
site = self.client.sites.get_by_url(test_team_site_url).execute_query()
self.assertEqual(f"{str(self.client.sites.resource_path)}/{site.id}", str(site.resource_path))
def test9_resolve_drive_root_path(self):
path = self.client.me.drive.root.resource_path
item_id = uuid.uuid4().hex
path.patch(item_id, inplace=True)
self.assertEqual(f"/me/drive/items/{item_id}", str(path))
def test_10_build_site_root_path(self):
site = self.client.sites.root.get().execute_query()
self.assertEqual(f"/sites/{site.id}", str(site.resource_path))
def test_11_resolve_term_children_path(self):
group_id = uuid.uuid4().hex
set_id = uuid.uuid4().hex
term_id = uuid.uuid4().hex
path = self.client.sites.root.term_store.groups[group_id].sets[set_id].children.resource_path
path = path.patch(term_id)
self.assertEqual(f"/sites/root/termStore/groups/{group_id}/sets/{set_id}/terms/{term_id}", str(path))
#def test_12_build_operation_resource_path(self):
# result = self.client.me.drive.root.get_by_path("archive/Sample.rtf").get_activities_by_interval().execute_query()
# self.assertEqual("/me/drive/root/getActivitiesByInterval()", str(result.resource_path))
def test_13_resolve_me_resource_path(self):
current_user = self.client.me.get().execute_query()
self.assertEqual("/users/{0}".format(current_user.id), str(current_user.resource_path))
def test_15_resolve_my_drive_resource_path(self):
my_drive = self.client.me.drive.get().execute_query()
self.assertEqual("/drives/{0}".format(my_drive.id), str(my_drive.resource_path))
def test_16_resolve_entity_type_name(self):
name = self.client.me.joined_teams.entity_type_name
self.assertEqual("Collection(microsoft.graph.team)", name)
def test_17_(self):
path_str = "/teams('7f919b9f-c220-4290-a4d8-5ff9300d1296')/operations('dc97f61a-0040-436f-ac09-427cd2456fd8')"
path = ODataPathBuilder.parse(path_str)
self.assertIsNotNone(path.key)
|
61f14f92dec1139a9d15f3a967bc0e645809a515
|
217c4e211060b9d699655b434a86be712324d509
|
/tests/test_bqplot.py
|
d3ceb2d93fb963523e0eb234ed54fe3c9b1eb54d
|
[
"Apache-2.0"
] |
permissive
|
Kaggle/docker-python
|
29ecfcb983839f91660665b238a48024ac87b6dc
|
cf60eda37be580511e9fc4b3ca7a25de40ee9f71
|
refs/heads/main
| 2023-08-29T08:45:31.581555
| 2023-08-24T13:14:55
| 2023-08-24T13:14:55
| 33,904,449
| 2,365
| 1,066
|
Apache-2.0
| 2023-09-01T19:16:31
| 2015-04-14T01:45:38
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
test_bqplot.py
|
import unittest
import numpy as np
import bqplot.pyplot as plt
class TestBqplot(unittest.TestCase):
def test_figure(self):
size = 100
scale = 100.0
np.random.seed(0)
x_data = np.arange(size)
y_data = np.cumsum(np.random.randn(size) * scale)
fig = plt.figure(title='First Example')
plt.plot(y_data)
fig.save_png()
|
e1d1328b16e3338f3310a4995f74d0ace8a6e05d
|
e2e34d01afc5b6bc6923a721ef92e8ffa8884f86
|
/tests/endtoend/servicebus_functions/servicebus_functions_stein/generic/function_app.py
|
4314116579b6ae09ab5273e1252f46b1071f886a
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-functions-python-worker
|
094340eeb0c4728e3202749027f01ab75e908bd8
|
d4bdf7edc544b6c15e541930f890da790b180ebd
|
refs/heads/dev
| 2023-08-22T22:48:01.645722
| 2023-08-14T14:52:42
| 2023-08-14T14:52:42
| 117,730,503
| 329
| 122
|
MIT
| 2023-09-01T16:54:58
| 2018-01-16T19:23:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,645
|
py
|
function_app.py
|
import json
import azure.functions as func
app = func.FunctionApp(http_auth_level=func.AuthLevel.ANONYMOUS)
@app.function_name(name="put_message")
@app.generic_trigger(arg_name="req", type="httpTrigger", route="put_message")
@app.generic_output_binding(arg_name="msg",
type="serviceBus",
connection="AzureWebJobsServiceBusConnectionString",
queue_name="testqueue")
@app.generic_output_binding(arg_name="$return", type="http")
def put_message(req: func.HttpRequest, msg: func.Out[str]):
msg.set(req.get_body().decode('utf-8'))
return 'OK'
@app.function_name(name="get_servicebus_triggered")
@app.generic_trigger(arg_name="req", type="httpTrigger",
route="get_servicebus_triggered")
@app.generic_input_binding(arg_name="file",
type="blob",
path="python-worker-tests/test-servicebus-triggered.txt", # NoQA
connection="AzureWebJobsStorage")
@app.generic_output_binding(arg_name="$return", type="http")
def get_servicebus_triggered(req: func.HttpRequest,
file: func.InputStream) -> str:
return func.HttpResponse(
file.read().decode('utf-8'), mimetype='application/json')
@app.generic_trigger(
arg_name="msg",
type="serviceBusTrigger",
connection="AzureWebJobsServiceBusConnectionString",
queue_name="testqueue")
@app.generic_output_binding(arg_name="$return",
path="python-worker-tests/test-servicebus-triggered.txt", # NoQA
type="blob",
connection="AzureWebJobsStorage")
def servicebus_trigger(msg: func.ServiceBusMessage) -> str:
result = json.dumps({
'message_id': msg.message_id,
'body': msg.get_body().decode('utf-8'),
'content_type': msg.content_type,
'delivery_count': msg.delivery_count,
'expiration_time': (msg.expiration_time.isoformat() if
msg.expiration_time else None),
'label': msg.label,
'partition_key': msg.partition_key,
'reply_to': msg.reply_to,
'reply_to_session_id': msg.reply_to_session_id,
'scheduled_enqueue_time': (msg.scheduled_enqueue_time.isoformat() if
msg.scheduled_enqueue_time else None),
'session_id': msg.session_id,
'time_to_live': msg.time_to_live,
'to': msg.to,
'user_properties': msg.user_properties,
})
return result
|
71de5b8b70ff3f1d1013f5e2af766f46d3e0ed8c
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/training/adagrad_test.py
|
5c60262337bac67845a9ffe26f7c1de337b3aa0f
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 15,029
|
py
|
adagrad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
def testMinimizeSparseResourceVariable(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0], [3.0, 4.0]],
self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1], [3, 4]],
self.evaluate(var0),
atol=0.01)
def testTensorLearningRate(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
def testSparseBasic(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = indexed_slices.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], self.evaluate(var0))
self.assertAllClose([[3.0], [4.0]], self.evaluate(var1))
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), self.evaluate(var1))
def testSparseRepeatedIndices(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = indexed_slices.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = indexed_slices.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def testSparseRepeatedIndicesResourceVariable(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated))
def testSparseStability(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), self.evaluate(var0))
def testSharing(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1))
def testDynamicShapeVariableWithCallableInit(self):
with ops.Graph().as_default():
var0 = variable_scope.get_variable("var0",
initializer=constant_op.constant(1.),
validate_shape=False)
grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
learning_rate = lambda: 3.0
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=True)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0], [var0]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val = self.evaluate([var0])
self.assertAllClose([1.0], v0_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0], [var0]))
# Validate updated params
v0_val = self.evaluate([var0])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932]), v0_val)
if __name__ == "__main__":
test.main()
|
16c38cb24bf343c4a2a0450dcf8590715682bf7f
|
db957e448e0247f92a33efb72a4d76e22d58e55e
|
/docs/mkdocs/prepare_mkdocs.py
|
b1c2ecf1e6d0255d39fbd783a75e116c21de6e4b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
lf-edge/eve
|
5852c0d738e3f7675f2d7c96c825ef99f30799df
|
4240dd362cefdf156c68f7126b1d60e388ec94d1
|
refs/heads/master
| 2023-09-06T09:12:03.778918
| 2023-09-01T11:59:26
| 2023-09-05T18:52:11
| 182,198,941
| 435
| 288
|
Apache-2.0
| 2023-09-14T17:13:47
| 2019-04-19T04:01:17
|
Go
|
UTF-8
|
Python
| false
| false
| 4,038
|
py
|
prepare_mkdocs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script prepares documentation files for mkdocs."""
import os
import sys
import shutil
import re
# Output directory for document sources
if len(sys.argv) <= 2:
print("Use: python", sys.argv[0], "<output-directory> <repository-url>")
sys.exit(1)
else:
outdir = sys.argv[1]
repourl = sys.argv[2]
# Regex
reLinks = re.compile(r'\[([^\]]+)\]\(([^)]+)\)')
reURL = re.compile(r'[A-Za-z0-9]+://[A-Za-z0-9%-_]+(/[A-Za-z0-9%-_])*(#|\\?)[A-Za-z0-9%-_&=]*')
reMAIL = re.compile(r'mailto:.*')
reANCHOR = re.compile(r'^#.*')
# All extensions for document sources
docsources = [".md", ".markdown", ".txt", ".png", ".svg", ".gif", ".jpg", ".jpeg"]
# Extensions for markdown files
mkdfiles = [".md", ".markdown"]
# Create output directory
os.makedirs(outdir, exist_ok=True)
# Search for all document sources in repository
pwd = os.getcwd()
for root, sub, files in os.walk(pwd):
for f in files:
fstr = os.path.splitext(f)
fext = fstr[len(fstr) - 1]
try:
# Only get files with extensions in the list
ext = docsources.index(fext)
fpath = os.path.join(root, f)
rpath = os.path.relpath(fpath, start=pwd)
# Get directory path from file
dpath = os.path.dirname(rpath)
# Don't look on output directory
if os.path.commonpath([dpath, outdir]) == outdir:
break
# Create corresponding directory
npath = os.path.join(outdir, dpath)
if os.path.isdir(npath) is False:
try:
os.makedirs(npath, exist_ok=True)
except OSError as error:
print(error)
# Destination file
destfile = os.path.join(npath, f)
# Process document source file
try:
sidx = mkdfiles.index(docsources[ext])
with open(fpath, encoding=sys.getdefaultencoding()) as fp:
with open(destfile, "w", encoding=sys.getdefaultencoding()) as fout:
ftxt = fp.read()
# Search for all markdown links in the file
mdlinks = reLinks.findall(ftxt)
for l in mdlinks:
# If the link is not an URL, check if it points to a
# non-document source file. If so, convert it to
# an URL for source code repository
link = l[1].strip()
if reURL.match(link) or reANCHOR.match(link) or reMAIL.match(link):
continue
# Not an URL, remove anchor link (if exist)
flink = re.sub(r'#.*','',link)
try:
# Get file extension of the link
lstr = os.path.splitext(flink)
lext = lstr[len(lstr) - 1]
didx = docsources.index(lext)
# It's a document source, we don't need to process
continue
except ValueError:
# Build the full URL for the link
lrpath = os.path.relpath(fpath, start=pwd)
ldpath = os.path.dirname(lrpath)
lurl = repourl + "/" + ldpath + "/" + flink
# Substitute link with the full URL in the text
ftxt = ftxt.replace("(" + flink, "(" + lurl)
fout.write(ftxt)
fout.close()
fp.close()
except ValueError:
# Not a source file, just copy the file
shutil.copy(fpath, destfile)
finally:
continue
|
5b33501ff4ff50648db63f082775ce9bbd33d7c2
|
bffbde8cc7a544f1b5d6c1bc4b84ca607226e134
|
/tests/test_split.py
|
9aff270c867b0899163fbabd5be87b8de976193c
|
[
"MIT"
] |
permissive
|
VainF/Torch-Pruning
|
c006d274e69c5c592ca1e302a70f6603504b8e07
|
e2478a72022c96af3b9053da359a726939e1adaf
|
refs/heads/master
| 2023-09-05T02:38:36.804176
| 2023-09-04T11:26:29
| 2023-09-04T11:26:29
| 228,203,350
| 1,606
| 231
|
MIT
| 2023-09-06T16:45:28
| 2019-12-15T15:07:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
test_split.py
|
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import torch
import torch_pruning as tp
import torch.nn as nn
class Net(nn.Module):
def __init__(self, in_dim):
super().__init__()
self.block1 = nn.Sequential(
nn.Conv2d(in_dim, in_dim, 1),
nn.BatchNorm2d(in_dim),
nn.GELU(),
nn.Conv2d(in_dim, in_dim*4, 1),
nn.BatchNorm2d(in_dim*4)
)
self.block2_1 = nn.Sequential(
nn.Conv2d(in_dim, in_dim, 1),
nn.BatchNorm2d(in_dim)
)
self.block2_2 = nn.Sequential(
nn.Conv2d(2*in_dim, in_dim, 1),
nn.BatchNorm2d(in_dim)
)
def forward(self, x):
x = self.block1(x)
num_ch = x.shape[1]
c1, c2 = self.block2_1[0].in_channels, self.block2_2[0].in_channels
x1, x2, x3 = torch.split(x, [c1, c1, c2], dim=1)
x1 = self.block2_1(x1)
x2 = self.block2_1(x2)
x3 = self.block2_2(x3)
return x1, x2, x3
def test_pruner():
dim = 128
model = Net(dim)
print(model)
# Global metrics
example_inputs = torch.randn(1, dim, 7, 7)
imp = tp.importance.RandomImportance()
ignored_layers = []
# DO NOT prune the final classifier!
for m in model.modules():
if isinstance(m, torch.nn.Linear) and m.out_features == 1000:
ignored_layers.append(m)
iterative_steps = 1
pruner = tp.pruner.MagnitudePruner(
model,
example_inputs,
importance=imp,
iterative_steps=iterative_steps,
ch_sparsity=0.5, # remove 50% channels, ResNet18 = {64, 128, 256, 512} => ResNet18_Half = {32, 64, 128, 256}
ignored_layers=ignored_layers,
)
for g in pruner.DG.get_all_groups():
pass
base_macs, base_nparams = tp.utils.count_ops_and_params(model, example_inputs)
for i in range(iterative_steps):
for g in pruner.step(interactive=True):
#print(g.details())
g.prune()
print(model)
macs, nparams = tp.utils.count_ops_and_params(model, example_inputs)
print([o.shape for o in model(example_inputs)])
print(
" Iter %d/%d, Params: %.2f M => %.2f M"
% (i+1, iterative_steps, base_nparams / 1e6, nparams / 1e6)
)
print(
" Iter %d/%d, MACs: %.2f G => %.2f G"
% (i+1, iterative_steps, base_macs / 1e9, macs / 1e9)
)
# finetune your model here
# finetune(model)
# ...
if __name__=='__main__':
test_pruner()
|
69105ae710431b6eb9028812ef85a3f8165eacca
|
77ff40ab8d90d15013f9cc884c2eb3ed24cd5cf7
|
/houdini/scripts/456.py
|
076ceeab36525009834c90e971a6c81eca08a169
|
[
"MIT"
] |
permissive
|
captainhammy/Houdini-Toolbox
|
78655b943244d2c086966812031bb94ab0ace3a3
|
f233232d8fbf5934ac99dad1aa72bdd5faa63b8b
|
refs/heads/master
| 2023-06-23T09:37:30.745036
| 2023-06-12T19:59:23
| 2023-06-12T19:59:23
| 3,946,767
| 159
| 34
|
MIT
| 2022-10-01T15:31:26
| 2012-04-06T04:12:28
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
456.py
|
"""Perform tasks when a .hip file is loaded."""
# =============================================================================
# IMPORTS
# =============================================================================
# Houdini Toolbox
from houdini_toolbox.events import SceneEvents, run_event
# =============================================================================
# FUNCTIONS
# =============================================================================
def main():
"""Main function."""
run_event(SceneEvents.Load)
# =============================================================================
main()
|
f7deb93c3401c27918dae53c9c41fd4cf1a725a8
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/shared/api/modules/meta.py
|
0d0d5399a2907d3967935517e52aad725db3c53b
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,742
|
py
|
meta.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.api.base import DataAPI, DataDRFAPISet, DRFActionAPI
from common.api.modules.utils import add_app_info_before_request
from django.utils.translation import ugettext_lazy as _
from dataflow.pizza_settings import BASE_META_URL
from .test.test_call_meta import TestMeta
class _MetaApi(object):
test_meta = TestMeta()
def __init__(self):
self.result_tables = DataDRFAPISet(
url=BASE_META_URL + "result_tables/",
primary_key="result_table_id",
module="meta",
description=_("获取结果表元信息"),
default_return_value=self.test_meta.set_return_value("result_tables"),
before_request=add_app_info_before_request,
custom_config={
"storages": DRFActionAPI(
method="get",
default_return_value=self.test_meta.set_return_value("storages"),
),
"fields": DRFActionAPI(
method="get",
default_return_value=self.test_meta.set_return_value("fields"),
),
},
)
self.data_processings = DataDRFAPISet(
url=BASE_META_URL + "data_processings/",
primary_key="processing_id",
module="meta",
description=_("获取数据处理表元信息"),
default_return_value=self.test_meta.set_return_value("data_processings"),
before_request=add_app_info_before_request,
custom_config={"bulk": DRFActionAPI(method="delete", detail=False)},
)
self.projects = DataDRFAPISet(
url=BASE_META_URL + "projects/",
primary_key="project_id",
module="meta",
description=_("获取项目元信息"),
default_return_value=self.test_meta.set_return_value("retrieve_projects"),
before_request=add_app_info_before_request,
)
self.data_transferrings = DataDRFAPISet(
url=BASE_META_URL + "data_transferrings/",
primary_key="transferring_id",
module="meta",
default_return_value=self.test_meta.set_return_value("data_transferrings"),
description=_("数据传输元信息"),
before_request=add_app_info_before_request,
)
self.meta_transaction = DataDRFAPISet(
url=BASE_META_URL + "meta_transaction/",
primary_key=None,
module="meta",
description=_("MetaApi集合事务接口"),
default_return_value=self.test_meta.set_return_value("meta_transaction"),
before_request=add_app_info_before_request,
)
self.lineage = DataAPI(
url=BASE_META_URL + "lineage/",
method="GET",
module="meta",
description=_("查询血缘关系"),
)
self.tdw_app_group = DataDRFAPISet(
url=BASE_META_URL + "tdw/app_groups/",
primary_key="app_group_name",
module="meta",
default_return_value=self.test_meta.set_return_value("data_transferrings"),
description=_("获取tdw应用组信息"),
custom_config={"mine": DRFActionAPI(method="get", detail=False)},
)
self.cluster_group_configs = DataDRFAPISet(
url=BASE_META_URL + "cluster_group_configs/",
primary_key="cluster_group_id",
module="meta",
description="获取集群组详情",
)
self.tag = DataDRFAPISet(
url=BASE_META_URL + "tag/",
primary_key=None,
module="meta",
description="获取集群组详情",
custom_config={"geog_tags": DRFActionAPI(method="get", detail=False)},
)
self.field_type_configs = DataDRFAPISet(
url=BASE_META_URL + "field_type_configs/",
primary_key="field_type",
module="meta",
description="获取字段类型配置",
)
self.assets = DataDRFAPISet(
url=BASE_META_URL + "basic/asset/",
primary_key=None,
module="meta",
description="元数据 asset",
custom_config={"query_via_erp": DRFActionAPI(method="post", detail=False)},
)
MetaApi = _MetaApi()
|
a315bd9ed9c73a895c94336613e5d2b581eda426
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/onedrive/sharepoint_ids.py
|
03cf8a3ba1187f88a3243dcd61551a1dc308dd6f
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
sharepoint_ids.py
|
from office365.runtime.client_value import ClientValue
class SharePointIds(ClientValue):
"""The SharePointIds resource groups the various identifiers for an item stored in a SharePoint site or OneDrive
for Business into a single structure. """
def __init__(self, list_id=None, list_item_id=None, list_item_unique_id=None):
"""
:param str list_id: The unique identifier (guid) for the item's list in SharePoint.
:param str list_item_id: An integer identifier for the item within the containing list.
:param str list_item_unique_id: The unique identifier (guid) for the item within OneDrive for Business
or a SharePoint site.
"""
self.listId = list_id
self.listItemId = list_item_id
self.listItemUniqueId = list_item_unique_id
|
555d30d4475b01fd0c229fcdce192dcfdf44ad6a
|
bf00669ed38e95a82b2ca3f5f058817ca97f4704
|
/Algorithmia/__main__.py
|
1b5f7b5a8800e772e215d031466240d58ecf606a
|
[
"MIT"
] |
permissive
|
algorithmiaio/algorithmia-python
|
e049ee9bb9aa6ad6571471b4b1f5e57ae285dc20
|
31f0e9047483bea72b4fdec9ef42b9174b2aa2dc
|
refs/heads/develop
| 2022-08-25T14:07:18.714220
| 2022-08-03T16:30:46
| 2022-08-03T16:30:46
| 38,772,089
| 144
| 48
|
MIT
| 2023-02-24T20:50:36
| 2015-07-08T18:25:17
|
Python
|
UTF-8
|
Python
| false
| false
| 9,445
|
py
|
__main__.py
|
import sys
import os
import json
sys.path = ['../'] + sys.path
import Algorithmia
import six
from Algorithmia.CLI import CLI
import argparse
import re
#bind input to raw input
try:
input = raw_input
except NameError:
pass
#CLI app to allow a user to run algorithms and manage data collections
usage = """CLI for interaction with Algorithmia\n
Usage:\n
algo [<cmd>] [options] [<args>...]\n
algo [<cmd>] [--help | --version]\n\n
General commands include:\n
auth configure authentication\n\n
Algorithm commands include:\n
run Runs an algorithm\n
clone Clones an algorithm source\n\n
Data commands include:\n
ls list the contents of a data directory\n
mkdir create a data directory\n
rmdir remove a data directory\n
rm remove a file from a data directory\n
cp copy file(s) to or from a data directory\n
cat concatenate and print file(s) in a data directory\n\n
Global options:\n
--help\n
--profile <name>\n\n
"""
def main():
parser = argparse.ArgumentParser('algo', description = "algo [<cmd>] [options] [<args>...] [--help] [--profile]")
subparsers = parser.add_subparsers(help = 'sub cmd',dest = 'cmd')
parser_auth = subparsers.add_parser('auth', help = 'save api key and api address for profile')
parser_auth.add_argument('--profile', action = 'store', type = str, default = 'default')
parser_clone = subparsers.add_parser('clone', help = 'clone <algo> clone the algorithm repository')
parser_clone.add_argument('algo')
parser_clone.add_argument('--profile', action = 'store', type = str, default = 'default')
#parse options for the run command
parser_run = subparsers.add_parser('run', help = 'algo run <algo> [input options] <args..> [output options] run an algorithm')
parser_run.add_argument('algo')
parser_run.add_argument('-d','--data', action = 'store', help = 'detect input type', default = None)
parser_run.add_argument('-t','--text', action = 'store', help = 'treat input as text', default = None)
parser_run.add_argument('-j','--json', action = 'store', help = 'treat input as json data', default = None)
parser_run.add_argument('-b','--binary', action = 'store', help = 'treat input as binary data', default = None)
parser_run.add_argument('-D','--data-file', action = 'store', help = 'specify a path to an input file', default = None)
parser_run.add_argument('-T','--text-file', action = 'store', help = 'specify a path to a text file', default = None)
parser_run.add_argument('-J','--json-file', action = 'store', help = 'specify a path to a json file', default = None)
parser_run.add_argument('-B','--binary-file', action = 'store', help = 'specify a path to a binary file', default = None)
parser_run.add_argument('--timeout', action = 'store',type = int, default = 300, help = 'specify a timeout (seconds)')
parser_run.add_argument('--debug', action = 'store_true', help = 'print the stdout from the algo <this only works for the owner>')
parser_run.add_argument('--profile', action = 'store', type = str, default = 'default')
parser_run.add_argument('-o', '--output', action = 'store', default = None, type = str)
#subparser for ls
parser_ls = subparsers.add_parser('ls', help = 'ls [-l] [directory] list the contents of a directory', )
parser_ls.add_argument('-l', '--long', action = 'store_true')
parser_ls.add_argument('path', nargs = '?', default = None)
parser_ls.add_argument('--profile', action = 'store', type = str, default = 'default')
#subparser for rm
parser_rm = subparsers.add_parser('rm', help = 'rm <path> remove a file', )
parser_rm.add_argument('path', nargs = '?', default = None)
parser_rm.add_argument('--profile', action = 'store', type = str, default = 'default')
#subparser for mkdir
parser_mkdir = subparsers.add_parser('mkdir', help = 'mkdir <directory> create a directory')
parser_mkdir.add_argument('path', help = 'directory to create')
parser_mkdir.add_argument('--profile', action = 'store', type = str, default = 'default')
#subparser for rmdir
parser_rmdir = subparsers.add_parser('rmdir', help = 'rmdir [-f] <directory> remove a directory')
parser_rmdir.add_argument('-f', '--force', action = 'store_true', help = 'force directory removal if it is not empty')
parser_rmdir.add_argument('path', help = 'directory to remove')
parser_rmdir.add_argument('--profile', action = 'store', type = str, default = 'default')
#subparser for cp
parser_cp = subparsers.add_parser('cp', help = 'cp <src,...> <dest> copy file(s) to the destination',)
parser_cp.add_argument('src', nargs = '*', type = str, help = 'file(s) to be copied')
parser_cp.add_argument('dest', help = 'destination for file(s) to be copied to')
parser_cp.add_argument('--profile', action = 'store', type = str, default = 'default')
#sub parser for cat
parser_cat = subparsers.add_parser('cat', help = 'cat <path,...> concatenate and print file(s)')
parser_cat.add_argument('path', nargs = '*', help = 'file(s) to concatenate and print')
parser_cat.add_argument('--profile', action = 'store', type = str, default = 'default')
#sub parser for getting environment template
parser_template = subparsers.add_parser('template', help='template <envid> <dest> downloads an environment template to the destination')
parser_template.add_argument('envid',help='environment specification id')
parser_template.add_argument('dest',help='destination for template download')
#sub parser for getting environment by language name
parser_env = subparsers.add_parser('environment', help = 'environment <language> gets environment info by language')
parser_env.add_argument('language', help='supported language name')
#sub parser for listing languages
subparsers.add_parser('languages', help = 'lists supported languages')
#sub parser for builds
parser_builds = subparsers.add_parser('builds', help = 'builds <user> <algo> gets build logs for algorithm')
parser_builds.add_argument('user')
parser_builds.add_argument('algo',help='algorithm name')
#sub parser for help
subparsers.add_parser('help')
parser.add_argument('--profile', action = 'store', type = str, default = 'default')
#sub parser for freeze
subparsers.add_parser('freeze', help="freezes a model_manifest.json file into a model_manifest.json.freeze")
args = parser.parse_args()
#run auth before trying to create a client
if args.cmd == 'auth':
print("Configuring authentication for profile: " + args.profile)
APIaddress = input("enter API address [https://api.algorithmia.com]: ")
APIkey = input("enter API key: ")
CACert = input('(optional) enter path to custom CA certificate: ')
if APIaddress == "" or not APIaddress.startswith("https://api."):
print("invalid API address")
else:
if len(APIkey) == 28 and APIkey.startswith("sim"):
CLI().auth(apikey=APIkey, apiaddress=APIaddress, cacert=CACert, profile=args.profile)
else:
jwt = re.compile(r"^([a-zA-Z0-9_=]+)\.([a-zA-Z0-9_=]+)\.([a-zA-Z0-9_\-\+\/=]*)")
Bearer = input("enter JWT token: ")
if jwt.match(Bearer):
CLI().auth(apikey=APIkey, bearer=Bearer, apiaddress=APIaddress, cacert=CACert, profile=args.profile)
else:
print("invalid authentication")
if args.cmd == 'help':
parser.parse_args(['-h'])
#create a client with the appropreate api address and key
client = CLI().getClient(args.profile)
if args.cmd == 'run':
print(CLI().runalgo(args, client))
elif args.cmd == 'clone':
algo_name = args.algo
print("cloning src for " + algo_name)
if CLI().getAPIaddress(args.profile) == None:
exitcode = os.system("git clone https://git.algorithmia.com/git/"+algo_name+".git")
else:
#replace https://api.<domain> with https://git.<domain>
exitcode = os.system("git clone " + (CLI().getAPIaddress(args.profile).replace("//api.", "//git."))+"/git/"+algo_name+".git")
if exitcode != 0:
print("failed to clone\nis git installed?")
elif args.cmd == 'ls':
print(CLI().ls(args.path, client, args.long))
elif args.cmd == 'mkdir':
CLI().mkdir(args.path, client)
elif args.cmd == 'rmdir':
CLI().rmdir(args.path, client, args.force)
elif args.cmd == 'rm':
CLI().rm(args.path, client)
elif args.cmd == 'cp':
CLI().cp(args.src,args.dest, client)
elif args.cmd == 'cat':
print(CLI().cat(args.path, client))
elif args.cmd == 'languages':
response = CLI().list_languages(client)
for line in response:
print(line)
elif args.cmd == 'template':
CLI().get_template(args.envid,args.dest,client)
elif args.cmd == 'environment':
response = CLI().get_environment_by_language(args.language,client)
print(response)
elif args.cmd == 'builds':
print(CLI().getBuildLogs(args.user, args.algo, client))
elif args.cmd == "freeze":
print(CLI().freezeAlgo(client))
else:
parser.parse_args(['-h'])
if __name__ == '__main__':
#main()
main()
|
761d128183d22869d5b1d240d501a662a3d54195
|
b8106a0cd1e1604de98ef2b4b5311ec3f36d122a
|
/ask-sdk-core/tests/unit/test_template_factory.py
|
7618c3753adf10c1888b9561a2cf7e8f1e8dffbb
|
[
"Apache-2.0"
] |
permissive
|
alexa/alexa-skills-kit-sdk-for-python
|
b5e8288c6dd7a3ff6e13b19a7f0026561087ed93
|
7e13ca69b240985584dff6ec633a27598a154ca1
|
refs/heads/master
| 2023-06-26T02:01:58.858446
| 2023-06-08T18:20:05
| 2023-06-08T18:20:05
| 130,283,857
| 560
| 239
|
Apache-2.0
| 2023-05-23T18:51:30
| 2018-04-19T23:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
test_template_factory.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
import mock
from ask_sdk_core.view_resolvers.template_factory import TemplateFactory
from ask_sdk_core.view_resolvers.template_content import TemplateContent
from ask_sdk_runtime.view_resolvers.abstract_template_renderer import AbstractTemplateRenderer
from ask_sdk_runtime.view_resolvers.abstract_template_loader import AbstractTemplateLoader
from ask_sdk_core.exceptions import TemplateLoaderException, TemplateRendererException
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model.response import Response
class TestTemplateFactory(unittest.TestCase):
def setUp(self):
self.test_loader = mock.MagicMock(spec=AbstractTemplateLoader)
self.list_loaders = [self.test_loader]
self.test_renderer = mock.MagicMock(spec=AbstractTemplateRenderer)
self.test_template_content = mock.MagicMock(spec=TemplateContent)
self.test_template_factory = TemplateFactory(
template_loaders=self.list_loaders,
template_renderer=self.test_renderer)
self.test_template_name = 'test_template_name'
self.test_data_map = {
'test': 'test_data'
}
self.test_handler_input = mock.MagicMock(HandlerInput, autospec=True)
def test_process_template_with_null_loaders(self):
with self.assertRaises(ValueError) as exc:
test_factory = TemplateFactory(template_loaders=None,
template_renderer=self.test_renderer)
test_factory.process_template(template_name=self.test_template_name,
data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual(
"Template Loaders list is null", str(exc.exception),
"TemplateFactory did not raise ValueError for "
"null list of loaders"
)
def test_process_template_with_null_renderer(self):
with self.assertRaises(ValueError) as exc:
test_factory = TemplateFactory(template_loaders=self.list_loaders,
template_renderer=None)
test_factory.process_template(template_name=self.test_template_name,
data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual(
"Template Renderer is null", str(exc.exception),
"TemplateFactory did not raise ValueError for "
"null renderer"
)
def test_process_template_for_null_template_name(self):
with self.assertRaises(ValueError) as exc:
self.test_template_factory.process_template(
template_name=None, data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual(
"Template Name is null", str(exc.exception),
"TemplateFactory process_template did not raise ValueError for "
"null template name"
)
def test_process_template_for_null_data_map(self):
with self.assertRaises(ValueError) as exc:
self.test_template_factory.process_template(
template_name=self.test_template_name, data_map=None,
handler_input=self.test_handler_input)
self.assertEqual(
"Data Map is null", str(exc.exception),
"TemplateFactory process_template did not raise ValueError for "
"null data map"
)
def test_process_template_with_no_matching_loader(self):
with self.assertRaises(TemplateLoaderException) as exc:
self.test_loader.load.return_value = None
self.test_template_factory.process_template(
template_name=self.test_template_name,
data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual("Unable to load template: {} using provided loaders."
.format(self.test_template_name), str(exc.exception),
"TemplateFactory did not raise "
"TemplateResolverException if none of provided "
"loaders were unable to load the templates.")
def test_process_template_raise_exception_at_load(self):
with self.assertRaises(TemplateLoaderException) as exc:
self.test_loader.load.side_effect = TemplateLoaderException(
"Test Error")
self.test_template_factory.process_template(
template_name=self.test_template_name,
data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual("Failed to load the template: {} using {} with error "
": {}".format(self.test_template_name,
self.test_loader, "Test Error"),
str(exc.exception), "TemplateFactory did not raise "
"TemplateResolverException if none"
" of provided loaders were unable"
" to load the templates.")
def test_process_template_raise_exception_at_render(self):
with self.assertRaises(TemplateRendererException) as exc:
self.test_loader.load.return_value = self.test_template_content
self.test_renderer.render.side_effect = TemplateLoaderException(
"Renderer Error")
self.test_template_factory.process_template(
template_name=self.test_template_name,
data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertEqual("Failed to render template: {} using {} with error: "
"{}".format(self.test_template_content, self.test_renderer,
"Renderer Error"), str(exc.exception),
"TemplateFactory did not raise "
"TemplateResolverException if none of provided "
"loaders were unable to load the templates.")
def test_process_template_returns_response(self):
self.test_renderer.render.return_value = mock.MagicMock(
Response, autospec=True)
response = self.test_template_factory.process_template(
template_name=self.test_template_name, data_map=self.test_data_map,
handler_input=self.test_handler_input)
self.assertIsInstance(response, Response,
"TemplateFactory process_template did not return"
"a Reponse object")
|
68881642c46be5a8415d24979379941c61613556
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/前端笔记/牛客/牛客网前端笔试题/算法题/美团10/正则序列-贪心.py
|
c30ae6fc2bee3bd8fed84e7c815e9483c148bf73
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
正则序列-贪心.py
|
# 有一天小团得到了一个长度为n的任意序列s,他需要在有限次操作内,将这个序列变成一个正则序列,每次操作他可以任选序列中的一个数字,并将该数字加一或者减一。
# 请问他最少用多少次操作可以把这个序列变成正则序列(1到n的排列)?
# 改动最少的方案一定是对输入序列和正则序列中相同排名的元素
n = int(input())
nums = [int(i) for i in input().split()]
nums.sort()
res = 0
for i in range(n):
res += abs(i + 1 - nums[i])
print(res)
|
d9401b442c562e539f80b69f8bb70478d0afdee7
|
df4361db61d10a10c46ed5f18973d89e4efda82c
|
/armi/reactor/grids/__init__.py
|
598295a65d5ade75c6615bc2c7c5d85958dc2134
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
terrapower/armi
|
5524741c5e80781e136ea3422aed0db8398f76ae
|
360791847227df3f3a337a996ef561e00f846a09
|
refs/heads/main
| 2023-09-04T05:16:29.080518
| 2023-09-01T16:10:29
| 2023-09-01T16:10:29
| 218,863,590
| 204
| 75
|
Apache-2.0
| 2023-09-14T20:42:24
| 2019-10-31T21:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
__init__.py
|
# Copyright 2023 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This contains structured meshes in multiple geometries and spatial locators (i.e. locations).
:py:class:`Grids <Grid>` are objects that map indices (i, j, k) to spatial locations
(x,y,z) or (t,r,z). They are useful for arranging things in reactors, such as:
* Fuel assemblies in a reactor
* Plates in a heat exchanger
* Pins in a fuel assembly
* Blocks in a fuel assembly (1-D)
Fast reactors often use a hexagonal grid, while other reactors may be better suited for
Cartesian or RZT grids. This module contains representations of all these.
``Grid``\ s can be defined by any arbitrary combination of absolute grid boundaries and
unit step directions.
Associated with grids are :py:class:`IndexLocations <IndexLocation>`. Each of these maps
to a single cell in a grid, or to an arbitrary point in the continuous space represented
by a grid. When a `Grid`` is built, it builds a collection of ``IndexLocation``\ s, one
for each cell.
In the ARMI :py:mod:`armi.reactor` module, each object is assigned a locator either from
a grid or in arbitrary, continuous space (using a :py:class:`CoordinateLocation`) on the
``spatialLocator`` attribute.
Below is a basic example of how to use a 2-D grid::
>>> grid = CartesianGrid.fromRectangle(1.0, 1.0) # 1 cm square-pitch Cartesian grid
>>> location = grid[1,2,0]
>>> location.getGlobalCoordinates()
array([ 1., 2., 0.])
Grids can be chained together in a parent-child relationship. This is often used in ARMI
where a 1-D axial grid (e.g. in an assembly) is being positioned in a core or spent-fuel
pool. See example in
:py:meth:`armi.reactor.tests.test_grids.TestSpatialLocator.test_recursion`.
The "radial" (ring, position) indexing used in DIF3D can be converted to and from the
more quasi-Cartesian indexing in a hex mesh easily with the utility methods
:py:meth:`HexGrid.getRingPos` and :py:func:`indicesToRingPos`.
This module is designed to satisfy the spatial arrangement requirements of :py:mod:`the
Reactor package <armi.reactor>`.
Throughout the module, the term **global** refers to the top-level coordinate system
while the word **local** refers to within the current coordinate system defined by the
current grid.
"""
from typing import Tuple, Optional
from .constants import (
BOUNDARY_CENTER,
BOUNDARY_0_DEGREES,
BOUNDARY_120_DEGREES,
BOUNDARY_60_DEGREES,
)
from .locations import (
LocationBase,
IndexLocation,
MultiIndexLocation,
CoordinateLocation,
addingIsValid,
)
from .grid import Grid
from .structuredgrid import StructuredGrid, GridParameters, _tuplify
from .axial import AxialGrid, axialUnitGrid
from .cartesian import CartesianGrid
from .hexagonal import HexGrid, COS30, SIN30, TRIANGLES_IN_HEXAGON
from .thetarz import ThetaRZGrid, TAU
def locatorLabelToIndices(label: str) -> Tuple[int, int, Optional[int]]:
"""
Convert a locator label to numerical i,j,k indices.
If there are only i,j indices, make the last item None
"""
intVals = tuple(int(idx) for idx in label.split("-"))
if len(intVals) == 2:
intVals = (intVals[0], intVals[1], None)
return intVals
|
469b9491121d55efaec5268c2e0c2a749f3d89ae
|
4805a71711625735215cc1a773a85712be305b59
|
/Cython/Compiler/UtilNodes.py
|
81d3038ead414e8cb19b1cd1b0af674080a6b545
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cython/cython
|
0a75b75b7eaf19eeedaaebca9d49adb603e3e8f5
|
6ba3daf319d94058de74e8e7f53f932092b38441
|
refs/heads/master
| 2023-09-04T11:09:56.569277
| 2023-09-04T07:45:47
| 2023-09-04T07:45:47
| 1,099,265
| 8,352
| 1,704
|
Apache-2.0
| 2023-09-14T06:33:34
| 2010-11-21T07:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 12,463
|
py
|
UtilNodes.py
|
#
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
# so it is convenient to have them in a separate module.
#
from __future__ import absolute_import
from . import Nodes
from . import ExprNodes
from .Nodes import Node
from .ExprNodes import AtomicExprNode
from .PyrexTypes import c_ptr_type, c_bint_type
class TempHandle(object):
# THIS IS DEPRECATED, USE LetRefNode instead
temp = None
needs_xdecref = False
def __init__(self, type, needs_cleanup=None):
self.type = type
if needs_cleanup is None:
self.needs_cleanup = type.is_pyobject
else:
self.needs_cleanup = needs_cleanup
def ref(self, pos):
return TempRefNode(pos, handle=self, type=self.type)
class TempRefNode(AtomicExprNode):
# THIS IS DEPRECATED, USE LetRefNode instead
# handle TempHandle
def analyse_types(self, env):
assert self.type == self.handle.type
return self
def analyse_target_types(self, env):
assert self.type == self.handle.type
return self
def analyse_target_declaration(self, env):
pass
def calculate_result_code(self):
result = self.handle.temp
if result is None: result = "<error>" # might be called and overwritten
return result
def generate_result_code(self, code):
pass
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
# TODO: analyse control flow to see if this is necessary
code.put_xdecref(self.result(), self.ctype())
code.putln('%s = %s;' % (
self.result(),
rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()),
))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
class TempsBlockNode(Node):
# THIS IS DEPRECATED, USE LetNode instead
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
child_attrs = ["body"]
def generate_execution_code(self, code):
for handle in self.temps:
handle.temp = code.funcstate.allocate_temp(
handle.type, manage_ref=handle.needs_cleanup)
self.body.generate_execution_code(code)
for handle in self.temps:
if handle.needs_cleanup:
if handle.needs_xdecref:
code.put_xdecref_clear(handle.temp, handle.type)
else:
code.put_decref_clear(handle.temp, handle.type)
code.funcstate.release_temp(handle.temp)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
class ResultRefNode(AtomicExprNode):
# A reference to the result of an expression. The result_code
# must be set externally (usually a temp name).
subexprs = []
lhs_of_first_assignment = False
def __init__(self, expression=None, pos=None, type=None, may_hold_none=True, is_temp=False):
self.expression = expression
self.pos = None
self.may_hold_none = may_hold_none
if expression is not None:
self.pos = expression.pos
self.type = getattr(expression, "type", None)
if pos is not None:
self.pos = pos
if type is not None:
self.type = type
if is_temp:
self.is_temp = True
assert self.pos is not None
def clone_node(self):
# nothing to do here
return self
def type_dependencies(self, env):
if self.expression:
return self.expression.type_dependencies(env)
else:
return ()
def update_expression(self, expression):
self.expression = expression
type = getattr(expression, "type", None)
if type:
self.type = type
def analyse_target_declaration(self, env):
pass # OK - we can assign to this
def analyse_types(self, env):
if self.expression is not None:
if not self.expression.type:
self.expression = self.expression.analyse_types(env)
self.type = self.expression.type
return self
def infer_type(self, env):
if self.type is not None:
return self.type
if self.expression is not None:
if self.expression.type is not None:
return self.expression.type
return self.expression.infer_type(env)
assert False, "cannot infer type of ResultRefNode"
def may_be_none(self):
if not self.type.is_pyobject:
return False
return self.may_hold_none
def _DISABLED_may_be_none(self):
# not sure if this is safe - the expression may not be the
# only value that gets assigned
if self.expression is not None:
return self.expression.may_be_none()
if self.type is not None:
return self.type.is_pyobject
return True # play it safe
def is_simple(self):
return True
def result(self):
try:
return self.result_code
except AttributeError:
if self.expression is not None:
self.result_code = self.expression.result()
return self.result_code
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
if not self.lhs_of_first_assignment:
code.put_decref(self.result(), self.ctype())
code.putln('%s = %s;' % (
self.result(),
rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()),
))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def allocate_temps(self, env):
pass
def release_temp(self, env):
pass
def free_temps(self, code):
pass
class LetNodeMixin:
def set_temp_expr(self, lazy_temp):
self.lazy_temp = lazy_temp
self.temp_expression = lazy_temp.expression
def setup_temp_expr(self, code):
self.temp_expression.generate_evaluation_code(code)
self.temp_type = self.temp_expression.type
if self.temp_type.is_array:
self.temp_type = c_ptr_type(self.temp_type.base_type)
self._result_in_temp = self.temp_expression.result_in_temp()
if self._result_in_temp:
self.temp = self.temp_expression.result()
else:
if self.temp_type.is_memoryviewslice:
self.temp_expression.make_owned_memoryviewslice(code)
else:
self.temp_expression.make_owned_reference(code)
self.temp = code.funcstate.allocate_temp(
self.temp_type, manage_ref=True)
code.putln("%s = %s;" % (self.temp, self.temp_expression.result()))
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
self.lazy_temp.result_code = self.temp
def teardown_temp_expr(self, code):
if self._result_in_temp:
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
else:
if self.temp_type.needs_refcounting:
code.put_decref_clear(self.temp, self.temp_type)
code.funcstate.release_temp(self.temp)
class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
# A wrapper around a subexpression that moves an expression into a
# temp variable and provides it to the subexpression.
subexprs = ['temp_expression', 'subexpression']
def __init__(self, lazy_temp, subexpression):
self.set_temp_expr(lazy_temp)
self.pos = subexpression.pos
self.subexpression = subexpression
# if called after type analysis, we already know the type here
self.type = self.subexpression.type
def infer_type(self, env):
return self.subexpression.infer_type(env)
def may_be_none(self):
return self.subexpression.may_be_none()
def result(self):
return self.subexpression.result()
def analyse_types(self, env):
self.temp_expression = self.temp_expression.analyse_types(env)
self.lazy_temp.update_expression(self.temp_expression) # overwrite in case it changed
self.subexpression = self.subexpression.analyse_types(env)
self.type = self.subexpression.type
return self
def free_subexpr_temps(self, code):
self.subexpression.free_temps(code)
def generate_subexpr_disposal_code(self, code):
self.subexpression.generate_disposal_code(code)
def generate_evaluation_code(self, code):
self.setup_temp_expr(code)
self.subexpression.generate_evaluation_code(code)
self.teardown_temp_expr(code)
LetRefNode = ResultRefNode
class LetNode(Nodes.StatNode, LetNodeMixin):
# Implements a local temporary variable scope. Imagine this
# syntax being present:
# let temp = VALUE:
# BLOCK (can modify temp)
# if temp is an object, decref
#
# Usually used after analysis phase, but forwards analysis methods
# to its children
child_attrs = ['temp_expression', 'body']
def __init__(self, lazy_temp, body):
self.set_temp_expr(lazy_temp)
self.pos = body.pos
self.body = body
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.temp_expression = self.temp_expression.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
self.setup_temp_expr(code)
self.body.generate_execution_code(code)
self.teardown_temp_expr(code)
def generate_function_definitions(self, env, code):
self.temp_expression.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
class TempResultFromStatNode(ExprNodes.ExprNode):
# An ExprNode wrapper around a StatNode that executes the StatNode
# body. Requires a ResultRefNode that it sets up to refer to its
# own temp result. The StatNode must assign a value to the result
# node, which then becomes the result of this node.
subexprs = []
child_attrs = ['body']
def __init__(self, result_ref, body):
self.result_ref = result_ref
self.pos = body.pos
self.body = body
self.type = result_ref.type
self.is_temp = 1
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_types(self, env):
self.body = self.body.analyse_expressions(env)
return self
def may_be_none(self):
return self.result_ref.may_be_none()
def generate_result_code(self, code):
self.result_ref.result_code = self.result()
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
class HasGilNode(AtomicExprNode):
"""
Simple node that evaluates to 0 or 1 depending on whether we're
in a nogil context
"""
type = c_bint_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
self.has_gil = code.funcstate.gil_owned
def calculate_result_code(self):
return "1" if self.has_gil else "0"
|
0a4b962526c9983e62c8737557f823ad3b36bcde
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/serialization/scipy.py
|
d3cdf9ab60c7e7e303abe4fd1e1941e5eca72c75
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
scipy.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Tuple
import numpy as np
try:
import scipy.sparse as sps
except ImportError: # pragma: no cover
sps = None
from .core import Serializer, buffered, serialize, deserialize
class CsrMatrixSerializer(Serializer):
@buffered
def serial(self, obj: Any, context: Dict):
data_header, data_buffers = serialize(obj.data)
idx_header, idx_buffers = serialize(obj.indices)
indptr_header, indptr_buffers = serialize(obj.indptr)
header = (
data_header, # data_header
len(data_buffers), # data_buf_num
idx_header, # idx_header
len(idx_buffers), # idx_buf_num
indptr_header, # indptr_header
obj.shape, # shape
)
return header, data_buffers + idx_buffers + indptr_buffers, True
def deserial(self, serialized: Tuple, context: Dict, subs: List):
(
data_header,
data_buf_num,
idx_header,
idx_buf_num,
indptr_header,
shape,
) = serialized
data_buffers = subs[:data_buf_num]
idx_buffers = subs[data_buf_num : data_buf_num + idx_buf_num]
indptr_buffers = subs[data_buf_num + idx_buf_num :]
data = deserialize(data_header, data_buffers)
indices = deserialize(idx_header, idx_buffers)
indptr = deserialize(indptr_header, indptr_buffers)
shape = tuple(shape)
empty_arr = np.zeros(0, dtype=data.dtype)
target_csr = sps.coo_matrix(
(empty_arr, (empty_arr,) * 2), dtype=data.dtype, shape=shape
).tocsr()
target_csr.data, target_csr.indices, target_csr.indptr = data, indices, indptr
return target_csr
if sps: # pragma: no branch
CsrMatrixSerializer.register(sps.csr_matrix)
|
a876b0eba0334bc287a732052bf1b6be7caf0a76
|
2ab8c172bc4e9d3b3c75659585e83ade1e2eb832
|
/tests/test_basic.py
|
234a83b4f8143ea2d2569c2d440016e81deb9932
|
[
"BSD-3-Clause"
] |
permissive
|
theislab/scvelo
|
0a5d717f8f025d7b6cf96ded2a7d5868f0484f43
|
d89ca6aecbe93256fbcdd8a521fdee2b9f2a673a
|
refs/heads/master
| 2023-07-25T16:28:58.484128
| 2023-07-25T15:21:12
| 2023-07-25T15:21:12
| 145,459,109
| 372
| 143
|
BSD-3-Clause
| 2023-07-25T15:21:14
| 2018-08-20T19:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,916
|
py
|
test_basic.py
|
import numpy as np
import scvelo as scv
from scvelo.tools import ExpectationMaximizationModel
def test_einsum():
from scvelo.core import l2_norm, prod_sum
Ms, Mu = np.random.rand(5, 4), np.random.rand(5, 4)
assert np.allclose(prod_sum(Ms, Mu, axis=0), np.sum(Ms * Mu, 0))
assert np.allclose(prod_sum(Ms, Mu, axis=1), np.sum(Ms * Mu, 1))
assert np.allclose(l2_norm(Ms), np.linalg.norm(Ms, axis=1))
def test_neighbors():
adata = scv.datasets.simulation(random_seed=0, n_vars=100)
scv.pp.filter_and_normalize(adata)
scv.pp.pca(adata)
scv.pp.neighbors(adata)
adata_ = scv.pp.neighbors(adata, method="sklearn", copy=True)
dists = np.round(adata.obsp["distances"][0].data, 2)
dists_ = np.round(adata_.obsp["distances"][0].data, 2)
assert np.all(dists == dists_)
def test_dynamical_model():
adata = scv.datasets.simulation(random_seed=0, n_vars=10)
scv.pp.filter_and_normalize(adata)
scv.pp.moments(adata)
em_model = ExpectationMaximizationModel(
adata=adata, var_names_key=adata.var_names[0]
)
em_model.fit(return_model=False, copy=False)
assert np.round(adata[:, adata.var_names[0]].var["fit_alpha"][0], 4) == 4.7409
def test_pipeline():
adata = scv.datasets.simulation(random_seed=0, n_vars=10)
scv.pp.filter_and_normalize(adata, n_top_genes=5)
scv.pp.pca(adata)
scv.pp.moments(adata)
em_model = ExpectationMaximizationModel(adata=adata)
em_model.fit(copy=False)
scv.tl.velocity(adata)
scv.tl.velocity(adata, vkey="dynamical_velocity", mode="dynamical")
adata.var.velocity_genes = True
scv.tl.velocity_graph(adata)
scv.tl.velocity_embedding(adata)
scv.tl.velocity_confidence(adata)
scv.tl.latent_time(adata)
with scv.GridSpec() as pl:
pl.velocity_graph(adata)
pl.velocity_embedding(adata, arrow_length=3, arrow_size=3, c="latent_time")
pl.velocity_embedding_grid(adata, scale=0.5, c="latent_time", cmap="gnuplot")
pl.velocity_embedding_stream(adata, c=adata.var_names[0], layer="velocity")
pl.scatter(adata, basis=adata.var_names[0], c="velocity", use_raw=True)
pl.hist([adata.obs.initial_size_spliced, adata.obs.initial_size_unspliced])
Ms, Mu = adata.layers["Ms"][0], adata.layers["Mu"][0]
Vs, Vd = adata.layers["velocity"][0], adata.layers["dynamical_velocity"][0]
Vgraph = adata.uns["velocity_graph"].data[:5]
pars = adata[:, 0].var[["fit_alpha", "fit_gamma"]].values
assert np.allclose(Ms, [0.8269, 1.0772, 0.9396, 1.0846, 1.0398], rtol=1e-2)
assert np.allclose(Mu, [3.8412, 3.1976, 3.5523, 3.3433, 3.8006], rtol=1e-2)
assert np.allclose(adata.X[0], [0.0, 0.0, 0.0, 0.4981, 0.0], rtol=1e-2)
# assert np.allclose(Vpca, [0.0163, 0.0185, 0.0472, 0.0025], rtol=1e-2)
assert np.allclose(Vd, [1.7582, 2.0214, 1.73, 0.6615, 1.5118], rtol=1e-2)
assert np.allclose(Vs, [3.2961, 2.5254, 2.9926, 2.634, 3.1352], rtol=1e-2)
assert np.allclose(Vgraph, [0.915, 0.5997, 0.8494, 0.1615, 0.7708], rtol=1e-2)
assert np.allclose(pars, [4.9257, 0.3239], rtol=1e-2)
def test_highly_variable_subset():
adata = scv.datasets.simulation(random_seed=0, n_vars=10)
bdata = adata.copy()
scv.pp.filter_and_normalize(adata, n_top_genes=5, subset_highly_variable=True)
scv.pp.filter_and_normalize(bdata, n_top_genes=5, subset_highly_variable=False)
scv.pp.pca(adata)
scv.pp.pca(bdata)
scv.pp.moments(adata, use_rep="pca")
scv.pp.moments(bdata, use_rep="pca")
scv.tl.velocity_graph(adata)
scv.tl.velocity_graph(bdata)
bdata._inplace_subset_var(bdata.var["highly_variable"])
assert np.allclose(adata.layers["Ms"][0], bdata.layers["Ms"][0])
assert np.allclose(adata.layers["velocity"][0], bdata.layers["velocity"][0])
assert np.allclose(
adata.uns["velocity_graph"].data[:5], bdata.uns["velocity_graph"].data[:5]
)
|
492a95e9522506f954e076b536971f91de09ba31
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py
|
968a0ce5ce48e8001091e75953655791dc0982e1
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
gyptest-sanitize-rule-names.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure rule names with non-"normal" characters in them don't cause
broken build files. This test was originally causing broken .ninja files.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('sanitize-rule-names.gyp')
test.build('sanitize-rule-names.gyp', test.ALL)
test.pass_test()
|
605993b1e10ebacfc8f25e03c9c176de9e88a9b8
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff/resources/test/fixtures/pandas_vet/pandas_use_of_dot_read_table.py
|
a0e48e223ebb47fb8f64031aefa8ea90b0c2c2a2
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 606
|
py
|
pandas_use_of_dot_read_table.py
|
import pandas as pd
# Errors.
df = pd.read_table("data.csv", sep=",")
df = pd.read_table("data.csv", sep=",", header=0)
filename = "data.csv"
df = pd.read_table(filename, sep=",")
df = pd.read_table(filename, sep=",", header=0)
# Non-errors.
df = pd.read_csv("data.csv")
df = pd.read_table("data.tsv")
df = pd.read_table("data.tsv", sep="\t")
df = pd.read_table("data.tsv", sep=",,")
df = pd.read_table("data.tsv", sep=", ")
df = pd.read_table("data.tsv", sep=" ,")
df = pd.read_table("data.tsv", sep=" , ")
not_pd.read_table("data.csv", sep=",")
data = read_table("data.csv", sep=",")
data = read_table
|
353e81dc519df9fb9826c725e82908e56ff51046
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/src/programy/storage/stores/file/store/errors.py
|
9ffdb61a8fa3cc3a696b14beace46b491efd74cc
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
errors.py
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import os.path
import shutil
from programy.utils.logging.ylogger import YLogger
from programy.storage.stores.file.store.filestore import FileStore
from programy.storage.entities.errors import ErrorsStore
class FileErrorsStore(FileStore, ErrorsStore):
def __init__(self, storage_engine):
FileStore.__init__(self, storage_engine)
ErrorsStore.__init__(self)
def _get_storage_path(self):
return self.storage_engine.configuration.errors_storage.file
def get_storage(self):
return self.storage_engine.configuration.errors_storage
def empty(self):
filename = self._get_storage_path()
if os.path.exists(filename) is True:
shutil.rmtree(filename)
def _write_errors_to_file(self, filename, errors):
with open(filename, "w+") as errors_file:
errors_file.write("Error,File,Start Line,End Line\n")
for error in errors:
errors_file.write("%s,%s,%s,%s\n" % (error[0], error[1], error[2], error[3]))
errors_file.flush()
def save_errors(self, errors, commit=True):
filename = self._get_storage_path()
file_dir = self._get_dir_from_path(filename)
self._ensure_dir_exists(file_dir)
try:
YLogger.debug(self, "Saving errors to [%s]", filename)
self._write_errors_to_file(filename, errors)
except Exception as excep:
YLogger.exception_nostack(self, "Failed to write errors file [%s]", excep, filename)
|
37c684a902f89ff4555a6261ebaf99734549ef08
|
2617bfec230858814b32795c6a47249c54a15cac
|
/tests/clpy_tests/opencl_tests/test_api.py
|
f254f6ba9b2cb821e8472673b581a19dd1133873
|
[
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fixstars/clpy
|
a06a1281887470d8faee3ec204b56fbef2496fab
|
693485f85397cc110fa45803c36c30c24c297df0
|
refs/heads/clpy
| 2021-06-10T04:00:30.974447
| 2021-02-28T06:01:26
| 2021-02-28T06:01:26
| 136,439,592
| 154
| 20
|
NOASSERTION
| 2021-04-07T02:41:03
| 2018-06-07T07:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 303
|
py
|
test_api.py
|
# -*- coding: utf-8 -*-
import unittest
class TestApi(unittest.TestCase):
"""test class of OpenCL API"""
def test_import(self):
self.assertTrue(True) # Always OK if no exeption from import
# TODO(LWisteria): Implement more case
if __name__ == "__main__":
unittest.main()
|
b3b94e3a9e154ec45183935b21c63fc42c762540
|
73305ddcc6dc9775b1e9a71506e2f3c74f678edc
|
/dags/deal_finder_dag.py
|
5ceb63e16bbfb78cdc174f80f235d83fe56353d1
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
google/starthinker
|
ef359557da4140275a8524d0d813eecf022ece9e
|
b596df09c52511e2e0c0987f6245aa4607190dd0
|
refs/heads/master
| 2023-08-25T21:16:45.578012
| 2023-07-17T22:19:18
| 2023-07-17T22:20:10
| 123,017,995
| 167
| 64
|
Apache-2.0
| 2023-08-02T01:24:51
| 2018-02-26T19:15:09
|
Python
|
UTF-8
|
Python
| false
| false
| 14,740
|
py
|
deal_finder_dag.py
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 Deal Finder
Compares open vs. deal CPM, CPC, and CPA so that clients can decide which sites, inventory, and deals work best.
- Wait for BigQuery->->->Deal_Finder_Dashboard to be created.
- Join the 1-StarThinker Assets Group to access the following assets
- Copy 2-Deal Finder Sample Data.
- Click Edit Connection, and change to BigQuery->StarThinker Data->->Deal_Finder_Dashboard.
- Copy 3-Deal Finder Sample Report.
- When prompted choose the new data source you just created.
- Or give these intructions to the client.
1-StarThinker Assets Group: https://groups.google.com/d/forum/starthinker-assets
2-Deal Finder Sample Data: https://datastudio.google.com/open/1QrWNTurvQT6nx20vnzdDveSzSmRjqHxQ
3-Deal Finder Sample Report: https://datastudio.google.com/open/1fjRI5AIKTYTA4fWs-pYkJbIMgCumlMyO
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'recipe_slug':'', # Place where tables will be written in BigQuery.
'recipe_timezone':'America/Los_Angeles', # Timezone for report dates.
'recipe_name':'', # Name of report in DV360, should be unique.
'auth_write':'service', # Credentials used for writing data.
'auth_read':'user', # Credentials used for reading data.
'partners':[], # DV360 partner id.
'advertisers':[], # Comma delimited list of DV360 advertiser ids.
}
RECIPE = {
'setup':{
'day':[
'Mon',
'Tue',
'Wed',
'Thu',
'Fri',
'Sat',
'Sun'
],
'hour':[
3,
4
]
},
'tasks':[
{
'dataset':{
'description':'Create a dataset for bigquery tables.',
'hour':[
4
],
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','description':'Place where tables will be created in BigQuery.'}}
}
},
{
'dbm':{
'description':'Create a DV360 report.',
'hour':[
3
],
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'report':{
'filters':{
'FILTER_PARTNER':{
'values':{'field':{'name':'partners','kind':'integer_list','order':5,'default':[],'description':'DV360 partner id.'}}
},
'FILTER_ADVERTISER':{
'values':{'field':{'name':'advertisers','kind':'integer_list','order':6,'default':[],'description':'Comma delimited list of DV360 advertiser ids.'}}
}
},
'body':{
'timezoneCode':{'field':{'name':'recipe_timezone','kind':'timezone','description':'Timezone for report dates.','default':'America/Los_Angeles'}},
'metadata':{
'title':{'field':{'name':'recipe_name','kind':'string','prefix':'Deal Finder For ','description':'Name of report in DV360, should be unique.'}},
'dataRange':'LAST_30_DAYS',
'format':'CSV'
},
'params':{
'type':'TYPE_CROSS_PARTNER',
'groupBys':[
'FILTER_PARTNER_NAME',
'FILTER_PARTNER',
'FILTER_ADVERTISER_NAME',
'FILTER_ADVERTISER',
'FILTER_APP_URL',
'FILTER_SITE_ID',
'FILTER_INVENTORY_SOURCE_NAME',
'FILTER_INVENTORY_SOURCE',
'FILTER_INVENTORY_SOURCE_TYPE',
'FILTER_ADVERTISER_CURRENCY',
'FILTER_CREATIVE_WIDTH',
'FILTER_CREATIVE_HEIGHT',
'FILTER_CREATIVE_TYPE'
],
'metrics':[
'METRIC_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_TOTAL_CONVERSIONS',
'METRIC_TOTAL_MEDIA_COST_ADVERTISER',
'METRIC_REVENUE_ADVERTISER',
'METRIC_ACTIVE_VIEW_MEASURABLE_IMPRESSIONS',
'METRIC_ACTIVE_VIEW_VIEWABLE_IMPRESSIONS'
]
}
}
}
}
},
{
'dbm':{
'description':'Copy a DV360 report to BigQuery.',
'hour':[
4
],
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'report':{
'name':{'field':{'name':'recipe_name','kind':'string','prefix':'Deal Finder For ','description':'Name of report in DV360, should be unique.'}},
'timeout':10
},
'out':{
'bigquery':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','description':'Place where tables will be written in BigQuery.'}},
'table':'Deal_Finder_DV360_Report',
'header':True,
'schema':[
{
'name':'Partner',
'type':'STRING'
},
{
'name':'Partner_ID',
'type':'INTEGER'
},
{
'name':'Advertiser',
'type':'STRING'
},
{
'name':'Advertiser_ID',
'type':'INTEGER'
},
{
'name':'Site',
'type':'STRING'
},
{
'name':'Site_ID',
'type':'INTEGER'
},
{
'name':'Inventory',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Inventory_ID',
'type':'INTEGER',
'mode':'NULLABLE'
},
{
'name':'Inventory_Type',
'type':'STRING'
},
{
'name':'Advertiser_Currency',
'type':'STRING'
},
{
'name':'Creative_Width',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Creative_Height',
'type':'STRING',
'mode':'NULLABLE'
},
{
'name':'Creative_Type',
'type':'STRING'
},
{
'name':'Impressions',
'type':'INTEGER'
},
{
'name':'Clicks',
'type':'INTEGER'
},
{
'name':'Conversions',
'type':'FLOAT'
},
{
'name':'Cost',
'type':'FLOAT'
},
{
'name':'Revenue',
'type':'FLOAT'
},
{
'name':'AV_Impressions_Measurable',
'type':'INTEGER'
},
{
'name':'AV_Impressions_Viewable',
'type':'INTEGER'
}
]
}
}
}
},
{
'bigquery':{
'description':'The logic query for Deal Finder, transforms report into view used by datastudio.',
'hour':[
4
],
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'from':{
'query':"SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On, Deal_Impressions, Open_Impressions, Rank_Impressions, Deal_Clicks, Open_Clicks, Rank_Clicks, Deal_Conversions, Open_Conversions, Rank_Conversions, Deal_Impressions_Viewable, Open_Impressions_Viewable, Rank_Impressions_Viewable, Deal_Impressions_Measurable, Open_Impressions_Measurable, Rank_Impressions_Measurable, Deal_Cost, Open_Cost, Rank_Cost, FROM ( SELECT FIRST(Partner) AS Partner, FIRST(Partner_ID) AS Partner_ID, FIRST(Advertiser) AS Advertiser, Advertiser_ID, First(Site) AS Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width + ' x ' + Creative_Height AS Creative_Size, IF (LEFT(Inventory, 5) == 'AO - ', True, False) AS Always_On, SUM(Deal_Impressions) AS Deal_Impressions, SUM(Open_Impressions) AS Open_Impressions, SUM(Open_Impressions) + SUM(Deal_Impressions) AS Total_Impressions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions DESC) AS Rank_Impressions, SUM(Deal_Clicks) AS Deal_Clicks, SUM(Open_Clicks) AS Open_Clicks, SUM(Open_Clicks) + SUM(Deal_Clicks) AS Total_Clicks, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Clicks DESC) AS Rank_Clicks, SUM(Deal_Conversions) AS Deal_Conversions, SUM(Open_Conversions) AS Open_Conversions, SUM(Open_Conversions) + SUM(Deal_Conversions) AS Total_Conversions, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Conversions DESC) AS Rank_Conversions, SUM(Deal_Cost) AS Deal_Cost, SUM(Open_Cost) AS Open_Cost, SUM(Open_Cost) + SUM(Deal_Cost) AS Total_Cost, RANK() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Cost DESC) AS Rank_Cost, SUM(Deal_Impressions_Viewable) AS Deal_Impressions_Viewable, SUM(Open_Impressions_Viewable) AS Open_Impressions_Viewable, SUM(Open_Impressions_Viewable) + SUM(Deal_Impressions_Viewable) AS Total_Impressions_Viewable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Viewable DESC) AS Rank_Impressions_Viewable, SUM(Deal_Impressions_Measurable) AS Deal_Impressions_Measurable, SUM(Open_Impressions_Measurable) AS Open_Impressions_Measurable, SUM(Open_Impressions_Measurable) + SUM(Deal_Impressions_Measurable) AS Total_Impressions_Measurable, ROW_NUMBER() OVER (PARTITION BY Advertiser_ID ORDER BY Total_Impressions_Measurable DESC) AS Rank_Impressions_Measurable, FROM ( SELECT Partner, Partner_ID, Advertiser, Advertiser_ID, Site, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Width, Creative_Height, IF(Inventory_ID IS NULL, Impressions, 0) AS Open_Impressions, IF(Inventory_ID IS NULL, 0, Impressions) AS Deal_Impressions, IF(Inventory_ID IS NULL, Clicks, 0) AS Open_Clicks, IF(Inventory_ID IS NULL, 0, Clicks) AS Deal_Clicks, IF(Inventory_ID IS NULL, Conversions, 0) AS Open_Conversions, IF(Inventory_ID IS NULL, 0, Conversions) AS Deal_Conversions, IF(Inventory_ID IS NULL, Cost, 0) AS Open_Cost, IF(Inventory_ID IS NULL, 0, Cost) AS Deal_Cost, IF(Inventory_ID IS NULL, AV_Impressions_Viewable, 0) AS Open_Impressions_Viewable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Viewable) AS Deal_Impressions_Viewable, IF(Inventory_ID IS NULL, AV_Impressions_Measurable, 0) AS Open_Impressions_Measurable, IF(Inventory_ID IS NULL, 0, AV_Impressions_Measurable) AS Deal_Impressions_Measurable, FROM [[PARAMETER].Deal_Finder_DV360_Report] OMIT RECORD IF Site == 'Low volume inventory') GROUP By Advertiser_ID, Site_ID, Inventory, Inventory_Type, Creative_Type, Creative_Size, Always_On) WHERE Rank_Impressions < 100 OR Rank_Clicks < 100 OR Rank_Conversions < 100 OR Rank_Cost < 100;",
'parameters':[
{'field':{'name':'recipe_slug','kind':'string','description':'Place where tables will be written in BigQuery.'}}
]
},
'to':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','description':'Place where tables will be written in BigQuery.'}},
'view':'Deal_Finder_Dashboard'
}
}
}
]
}
dag_maker = DAG_Factory('deal_finder', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
25ea74cef302f87e2073c9bff1949cc403ab7183
|
d48e09166db1ff0dae2c8a4ddbbe453606689081
|
/selfdrive/can/libdbc_py.py
|
fbf36a3d4d07643530a9cdd46fac546925685657
|
[
"MIT"
] |
permissive
|
Gernby/raspberry-pilot
|
a8857fdb2a50ade237d657ccd9d1049c5aa65515
|
0909e7594dff1c6fb524b5502ce3258f1969b6a5
|
refs/heads/resonant-rails
| 2023-04-06T10:09:35.457160
| 2023-01-06T01:57:31
| 2023-01-06T01:57:31
| 242,575,807
| 141
| 62
|
MIT
| 2023-03-05T05:22:49
| 2020-02-23T19:15:12
|
C
|
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
libdbc_py.py
|
import os
import subprocess
from cffi import FFI
can_dir = os.path.dirname(os.path.abspath(__file__))
libdbc_fn = os.path.join(can_dir, "libdbc.so")
subprocess.check_call(["make"], cwd=can_dir)
ffi = FFI()
ffi.cdef("""
typedef struct {
const char* name;
double value;
} SignalPackValue;
typedef struct {
uint32_t address;
const char* name;
double default_value;
} SignalParseOptions;
typedef struct {
uint32_t address;
int check_frequency;
} MessageParseOptions;
typedef struct {
uint32_t address;
uint16_t ts;
const char* name;
double value;
} SignalValue;
typedef enum {
DEFAULT,
HONDA_CHECKSUM,
HONDA_COUNTER,
TOYOTA_CHECKSUM,
PEDAL_CHECKSUM,
PEDAL_COUNTER,
} SignalType;
typedef struct {
const char* name;
int b1, b2, bo;
bool is_signed;
double factor, offset;
SignalType type;
} Signal;
typedef struct {
const char* name;
uint32_t address;
unsigned int size;
size_t num_sigs;
const Signal *sigs;
} Msg;
typedef struct {
const char* name;
uint32_t address;
const char* def_val;
const Signal *sigs;
} Val;
typedef struct {
const char* name;
size_t num_msgs;
const Msg *msgs;
const Val *vals;
size_t num_vals;
} DBC;
void* can_init(int bus, const char* dbc_name,
size_t num_message_options, const MessageParseOptions* message_options,
size_t num_signal_options, const SignalParseOptions* signal_options, bool sendcan,
const char* tcp_addr, int timeout);
int can_update(void* can, uint64_t sec, bool wait);
size_t can_query_latest(void* can, bool *out_can_valid, size_t out_values_size, SignalValue* out_values);
const DBC* dbc_lookup(const char* dbc_name);
void* canpack_init(const char* dbc_name);
uint64_t canpack_pack(void* inst, uint32_t address, size_t num_vals, const SignalPackValue *vals, int counter);
""")
libdbc = ffi.dlopen(libdbc_fn)
|
d9c8c9d1b442bdc20cbef0ae31657fb10a006b80
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_common/test/types/test_get_rich_schema_object_by_metadata_schema.py
|
a1919821c22d758cb64539d55f0ff6f7e558d383
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
test_get_rich_schema_object_by_metadata_schema.py
|
from collections import OrderedDict
from indy_common.types import ClientGetRichSchemaObjectByMetadataOperation
from plenum.common.messages.fields import ConstantField, LimitedLengthStringField, VersionField, NonEmptyStringField
EXPECTED_ORDERED_FIELDS = OrderedDict([
("type", ConstantField),
("rsType", NonEmptyStringField),
("rsName", LimitedLengthStringField),
("rsVersion", VersionField),
])
def test_has_expected_fields():
actual_field_names = OrderedDict(ClientGetRichSchemaObjectByMetadataOperation.schema).keys()
assert actual_field_names == EXPECTED_ORDERED_FIELDS.keys()
def test_has_expected_validators():
schema = dict(ClientGetRichSchemaObjectByMetadataOperation.schema)
for field, validator in EXPECTED_ORDERED_FIELDS.items():
assert isinstance(schema[field], validator)
|
7c7ea9a31bd032227bdf71ce2ec2c2b199d81fc9
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/aea/runtime.py
|
ef93e6212561c20f129b32ac4a4ef3dab5b4db20
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 14,470
|
py
|
runtime.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of runtime for economic agent (AEA)."""
import asyncio
from asyncio.events import AbstractEventLoop
from concurrent.futures._base import CancelledError
from contextlib import suppress
from enum import Enum
from typing import Dict, Optional, Type, cast
from aea.abstract_agent import AbstractAgent
from aea.agent_loop import (
AgentLoopStates,
AsyncAgentLoop,
AsyncState,
BaseAgentLoop,
SyncAgentLoop,
)
from aea.connections.base import ConnectionStates
from aea.decision_maker.base import DecisionMaker, DecisionMakerHandler
from aea.exceptions import _StopRuntime
from aea.helpers.async_utils import Runnable
from aea.helpers.exception_policy import ExceptionPolicyEnum
from aea.helpers.logging import WithLogger, get_logger
from aea.helpers.storage.generic_storage import Storage
from aea.multiplexer import AsyncMultiplexer
from aea.skills.tasks import ProcessTaskManager, TaskManager, ThreadedTaskManager
class RuntimeStates(Enum):
"""Runtime states."""
starting = "starting"
running = "running"
stopping = "stopping"
stopped = "stopped"
error = "error"
class BaseRuntime(Runnable, WithLogger):
"""Abstract runtime class to create implementations."""
RUN_LOOPS: Dict[str, Type[BaseAgentLoop]] = {
"async": AsyncAgentLoop,
"sync": SyncAgentLoop,
}
DEFAULT_RUN_LOOP: str = "async"
TASKMANAGERS = {"threaded": ThreadedTaskManager, "multiprocess": ProcessTaskManager}
DEFAULT_TASKMANAGER = "threaded"
def __init__(
self,
agent: AbstractAgent,
multiplexer_options: Dict,
loop_mode: Optional[str] = None,
loop: Optional[AbstractEventLoop] = None,
threaded: bool = False,
task_manager_mode: Optional[str] = None,
) -> None:
"""
Init runtime.
:param agent: Agent to run.
:param multiplexer_options: options for the multiplexer.
:param loop_mode: agent main loop mode.
:param loop: optional event loop. if not provided a new one will be created.
:param threaded: if True, run in threaded mode, else async
:param task_manager_mode: mode of the task manager.
"""
Runnable.__init__(self, threaded=threaded, loop=loop if not threaded else None)
logger = get_logger(__name__, agent.name)
WithLogger.__init__(self, logger=logger)
self._agent: AbstractAgent = agent
self._state: AsyncState = AsyncState(RuntimeStates.stopped, RuntimeStates)
self._state.add_callback(self._log_runtime_state)
self._multiplexer: AsyncMultiplexer = self._get_multiplexer_instance(
multiplexer_options
)
self._task_manager_mode = task_manager_mode or self.DEFAULT_TASKMANAGER
self._task_manager = self._get_taskmanager_instance()
self._decision_maker: Optional[DecisionMaker] = None
self._storage: Optional[Storage] = self._get_storage(agent)
self._loop_mode = loop_mode or self.DEFAULT_RUN_LOOP
self._agent_loop: BaseAgentLoop = self._get_agent_loop_instance(self._loop_mode)
def _log_runtime_state(self, state: RuntimeStates) -> None:
"""Log a runtime state changed."""
self.logger.debug(f"[{self._agent.name}]: Runtime state changed to {state}.")
def _get_taskmanager_instance(self) -> TaskManager:
"""Get taskmanager instance."""
if self._task_manager_mode not in self.TASKMANAGERS:
raise ValueError( # pragma: nocover
f"Task manager mode `{self._task_manager_mode} is not supported. valid are: `{list(self.TASKMANAGERS.keys())}`"
)
cls = self.TASKMANAGERS[self._task_manager_mode]
return cls()
def _get_multiplexer_instance(
self, multiplexer_options: Dict, threaded: bool = False
) -> AsyncMultiplexer:
"""Create multiplexer instance."""
loop: Optional[AbstractEventLoop] = None
if not threaded:
loop = self.loop
return AsyncMultiplexer(
loop=loop,
threaded=threaded,
agent_name=self._agent.name,
connections=multiplexer_options["connections"],
exception_policy=multiplexer_options.get(
"connection_exception_policy", ExceptionPolicyEnum.propagate
),
default_routing=multiplexer_options.get("default_routing"),
default_connection=multiplexer_options.get("default_connection"),
protocols=multiplexer_options.get("protocols", []),
)
@staticmethod
def _get_storage(agent: AbstractAgent) -> Optional[Storage]:
"""Get storage instance if storage_uri provided."""
if agent.storage_uri:
# threaded has to be always True, cause synchronous operations are supported
return Storage(agent.storage_uri, threaded=True)
return None # pragma: nocover
def _get_agent_loop_instance(self, loop_mode: str) -> BaseAgentLoop:
"""
Construct agent loop instance.
:param: loop_mode: str.
:return: AgentLoop instance
"""
loop_cls = self._get_agent_loop_class(loop_mode)
return loop_cls(self._agent)
def _get_agent_loop_class(self, loop_mode: str) -> Type[BaseAgentLoop]:
"""
Get agent loop class based on loop mode.
:param: loop_mode: str.
:return: AgentLoop class
"""
if loop_mode not in self.RUN_LOOPS: # pragma: nocover
raise ValueError(
f"Loop `{loop_mode} is not supported. valid are: `{list(self.RUN_LOOPS.keys())}`"
)
return self.RUN_LOOPS[loop_mode]
@property
def storage(self) -> Optional[Storage]:
"""Get optional storage."""
return self._storage
@property
def loop_mode(self) -> str: # pragma: nocover
"""Get current loop mode."""
return self._loop_mode
@property
def task_manager(self) -> TaskManager:
"""Get the task manager."""
return self._task_manager
@property
def loop(self) -> Optional[AbstractEventLoop]:
"""Get event loop."""
return self._loop
@property
def agent_loop(self) -> BaseAgentLoop:
"""Get the agent loop."""
return self._agent_loop
@property
def multiplexer(self) -> AsyncMultiplexer:
"""Get multiplexer."""
return self._multiplexer
@property
def is_running(self) -> bool:
"""Get running state of the runtime."""
return self._state.get() == RuntimeStates.running
@property
def is_stopped(self) -> bool: # pragma: nocover
"""Get stopped state of the runtime."""
return self._state.get() in [RuntimeStates.stopped]
@property
def state(self) -> RuntimeStates: # pragma: nocover
"""
Get runtime state.
:return: RuntimeStates
"""
return cast(RuntimeStates, self._state.get())
@property
def decision_maker(self) -> DecisionMaker:
"""Return decision maker if set."""
if self._decision_maker is None: # pragma: nocover
raise ValueError("call `set_decision_maker` first!")
return self._decision_maker
def _set_task(self) -> None:
"""Set task."""
if self._loop is None:
raise ValueError("Loop not set!") # pragma: nocover
self._task = self._loop.create_task(self._run_wrapper())
def set_decision_maker(self, decision_maker_handler: DecisionMakerHandler) -> None:
"""Set decision maker with handler provided."""
self._decision_maker = DecisionMaker(
decision_maker_handler=decision_maker_handler
)
def _teardown(self) -> None:
"""Tear down runtime."""
self.logger.debug("[{}]: Runtime teardown...".format(self._agent.name))
if self._decision_maker is not None: # pragma: nocover
self.decision_maker.stop()
self.task_manager.stop()
self.logger.debug("[{}]: Calling teardown method...".format(self._agent.name))
self._agent.teardown()
self.logger.debug("[{}]: Runtime teardown completed".format(self._agent.name))
def set_loop(self, loop: AbstractEventLoop) -> None:
"""
Set event loop to be used.
:param loop: event loop to use.
"""
self._loop = loop
asyncio.set_event_loop(self._loop)
class AsyncRuntime(BaseRuntime):
"""Asynchronous runtime: uses asyncio loop for multiplexer and async agent main loop."""
AGENT_LOOP_STARTED_TIMEOUT: float = 5
def __init__(
self,
agent: AbstractAgent,
multiplexer_options: Dict,
loop_mode: Optional[str] = None,
loop: Optional[AbstractEventLoop] = None,
threaded: bool = False,
task_manager_mode: Optional[str] = None,
) -> None:
"""
Init runtime.
:param agent: Agent to run.
:param multiplexer_options: options for the multiplexer.
:param loop_mode: agent main loop mode.
:param loop: optional event loop. if not provided a new one will be created.
:param threaded: if True, run in threaded mode, else async
:param task_manager_mode: mode of the task manager.
"""
super().__init__(
agent=agent,
multiplexer_options=multiplexer_options,
loop_mode=loop_mode,
loop=loop,
threaded=threaded,
task_manager_mode=task_manager_mode,
)
self._task: Optional[asyncio.Task] = None
def set_loop(self, loop: AbstractEventLoop) -> None:
"""
Set event loop to be used.
:param loop: event loop to use.
"""
BaseRuntime.set_loop(self, loop)
async def run(self) -> None:
"""
Start runtime task.
Starts multiplexer and agent loop.
"""
terminal_state = RuntimeStates.error
try:
await self.run_runtime()
except _StopRuntime as e:
self._state.set(RuntimeStates.stopping)
terminal_state = RuntimeStates.stopped
if e.reraise:
raise e.reraise
except (asyncio.CancelledError, CancelledError, KeyboardInterrupt):
self._state.set(RuntimeStates.stopping)
terminal_state = RuntimeStates.stopped
finally:
await self.stop_runtime()
self._state.set(terminal_state)
async def stop_runtime(self) -> None:
"""
Stop runtime coroutine.
Stop main loop.
Tear down the agent..
Disconnect multiplexer.
"""
self.agent_loop.stop()
with suppress(_StopRuntime):
await self.agent_loop.wait_completed()
self._teardown()
if self._storage is not None:
self._storage.stop()
await self._storage.wait_completed()
self.multiplexer.stop()
await self.multiplexer.wait_completed()
self.logger.debug("Runtime loop stopped!")
async def run_runtime(self) -> None:
"""Run runtime which means start agent loop, multiplexer and storage."""
self._state.set(RuntimeStates.starting)
await asyncio.gather(
self._start_multiplexer(), self._start_agent_loop(), self._start_storage()
)
async def _start_storage(self) -> None:
"""Start storage component asynchronously."""
if self._storage is not None:
self._storage.start()
await self._storage.wait_completed()
async def _start_multiplexer(self) -> None:
"""Call multiplexer connect asynchronous way."""
if not self._loop: # pragma: nocover
raise ValueError("no loop is set for runtime.")
self.multiplexer.set_loop(self._loop)
self.multiplexer.start()
await self.multiplexer.wait_completed()
async def _start_agent_loop(self) -> None:
"""Start agent main loop asynchronous way."""
self.logger.debug("[{}] Runtime started".format(self._agent.name))
await self.multiplexer.connection_status.wait(ConnectionStates.connected)
self.logger.debug("[{}] Multiplexer connected.".format(self._agent.name))
if self.storage:
await self.storage.wait_connected()
self.logger.debug("[{}] Storage connected.".format(self._agent.name))
self.task_manager.start()
if self._decision_maker is not None: # pragma: nocover
self.decision_maker.start()
self.logger.debug("[{}] Calling setup method...".format(self._agent.name))
self._agent.setup()
self.logger.debug("[{}] Run main loop...".format(self._agent.name))
self.agent_loop.start()
await asyncio.wait_for(
self.agent_loop.wait_state(AgentLoopStates.started),
timeout=self.AGENT_LOOP_STARTED_TIMEOUT,
)
self._state.set(RuntimeStates.running)
try:
await self.agent_loop.wait_completed()
except asyncio.CancelledError:
self.agent_loop.stop()
await self.agent_loop.wait_completed()
raise
class ThreadedRuntime(AsyncRuntime):
"""Run agent and multiplexer in different threads with own asyncio loops."""
def _get_multiplexer_instance(
self, multiplexer_options: Dict, threaded: bool = True
) -> AsyncMultiplexer:
"""Create multiplexer instance."""
return super()._get_multiplexer_instance(
multiplexer_options=multiplexer_options, threaded=threaded
)
|
60ba60cffc380f4777bdd6000639b3e40546f1d1
|
f9308d5a8efe2dbb48e9cc87cd06405b60a9dc7b
|
/samples/python/apidocs/ee_image_arrayargmax.py
|
e0b33b07dbbbb7bae1f32c57125918cad4d1e1f7
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
google/earthengine-community
|
4e054b421f66f03507d58668084aee981062fc24
|
ce931040c518860f8788b4888c0acfdebd2952fc
|
refs/heads/master
| 2023-09-01T14:47:54.812703
| 2023-08-31T23:01:00
| 2023-08-31T23:01:39
| 200,732,820
| 428
| 552
|
Apache-2.0
| 2023-09-13T21:46:51
| 2019-08-05T21:42:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
ee_image_arrayargmax.py
|
# Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_image_arrayargmax]
# A function to print the array for a selected pixel in the following examples.
def samp_arr_img(arr_img):
point = ee.Geometry.Point([-121, 42])
return arr_img.sample(point, 500).first().get('array')
# Create a 1D array image.
array_img_1d = ee.Image([0, 1, 5, 2, 3, 4]).toArray()
print('1D array image (pixel):', samp_arr_img(array_img_1d).getInfo())
# [0, 1, 5, 2, 3, 4]
# Get the position of the maximum value in a 1D array.
max_value_1d = array_img_1d.arrayArgmax()
print(
'Position of the maximum 1D array value:',
samp_arr_img(max_value_1d).getInfo()
)
# [2]
# Create a 2D 2x3 array image (reshape the 1D array image).
array_img_2d = array_img_1d.arrayReshape(ee.Image([2, 3]).toArray(), 2)
print('2D 2x3 array image (pixel):', samp_arr_img(array_img_2d).getInfo())
# [[0, 1, 5],
# [2, 3, 4]]
# Get the position of the maximum value in a 2D array.
max_value_2d = array_img_2d.arrayArgmax()
print(
'Position of the maximum 2D array value:',
samp_arr_img(max_value_2d).getInfo()
)
# [0, 2]
# [END earthengine__apidocs__ee_image_arrayargmax]
|
2ffff106d97ba4dd03a27b10a435701fec23dad8
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/company/migrations/0014_auto_20200407_0116.py
|
03985a1ef3297e868cf40a1ca188e1a0fdd5cc68
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
0014_auto_20200407_0116.py
|
# Generated by Django 2.2.10 on 2020-04-07 01:16
import company.models
from django.db import migrations
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('company', '0013_auto_20200406_0131'),
]
operations = [
migrations.AlterField(
model_name='company',
name='image',
field=stdimage.models.StdImageField(blank=True, null=True, upload_to=company.models.rename_company_image),
),
]
|
94f73c99e6171e386bdaf2d71a4c3d204a12f32d
|
87ddcf61c2faaaa795b9c25af334a76018337f62
|
/tests/schema/field/test_general.py
|
160ee3c7224aa3c1a877a5d3ea11c9c15599417d
|
[
"MIT"
] |
permissive
|
frictionlessdata/frictionless-py
|
17d663ad34c18964113c97e4d657004610fe0df0
|
740319edeee58f12cc6956a53356f3065ff18cbb
|
refs/heads/main
| 2023-08-26T16:24:25.353929
| 2023-08-04T07:55:37
| 2023-08-04T07:55:37
| 28,409,905
| 295
| 79
|
MIT
| 2023-09-04T05:01:33
| 2014-12-23T17:11:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,985
|
py
|
test_general.py
|
import textwrap
from importlib import import_module
import pytest
from frictionless import Field
# General
DESCRIPTOR = {
"name": "id",
"type": "integer",
"format": "default",
"missingValues": ["-"],
"constraints": {"required": True},
}
def test_field():
field = Field.from_descriptor(DESCRIPTOR)
assert field.name == "id"
assert field.type == "integer"
assert field.format == "default"
assert field.missing_values == ["-"]
assert field.constraints == {"required": True}
assert field.required is True
def test_field_defaults():
field = Field.from_descriptor({"name": "id", "type": "any"})
assert field.name == "id"
assert field.type == "any"
assert field.format == "default"
assert field.missing_values == [""]
assert field.constraints == {}
assert field.required is False
@pytest.mark.parametrize("create_descriptor", [(False,), (True,)])
def test_field_standard_specs_properties(create_descriptor):
helpers = import_module("frictionless.helpers")
options = dict(
name="name",
title="title",
description="description",
type="string",
format="default",
missing_values=["na"],
constraints={},
rdf_type="rdf",
)
field = (
Field(**options) # type: ignore
if not create_descriptor
else Field.from_descriptor(helpers.create_descriptor(**options))
)
assert field.name == "name"
assert field.title == "title"
assert field.description == "description"
assert field.type == "string"
assert field.format == "default"
assert field.missing_values == ["na"]
assert field.constraints == {}
assert field.rdf_type == "rdf"
def test_field_description_html():
field = Field(name="name", description="**test**")
assert field.description == "**test**"
assert field.description_html == "<p><strong>test</strong></p>"
def test_field_description_html_multiline():
field = Field(name="name", description="**test**\n\nline")
assert field.description == "**test**\n\nline"
assert field.description_html == "<p><strong>test</strong></p><p>line</p>"
def test_field_description_html_not_set():
field = Field(
name="name",
)
assert field.description is None
assert field.description_html == ""
def test_field_description_text():
field = Field(name="name", description="**test**\n\nline")
assert field.description == "**test**\n\nline"
assert field.description_text == "test line"
def test_field_description_text_plain():
field = Field(name="name", description="It's just a plain text. Another sentence")
assert field.description == "It's just a plain text. Another sentence"
assert field.description_text == "It's just a plain text. Another sentence"
def test_field_pprint():
field = Field.from_descriptor(
{
"name": "name",
"type": "string",
"constraints": {"maxLength": 2},
}
)
expected = """
{'name': 'name', 'type': 'string', 'constraints': {'maxLength': 2}}
"""
assert repr(field) == textwrap.dedent(expected).strip()
@pytest.mark.parametrize("example_value", [(None), ("42"), ("foo")])
def test_field_with_example_set(example_value):
field = Field.from_descriptor(
{
"name": "name",
"type": "string",
"example": example_value,
}
)
assert field.example == example_value
@pytest.mark.parametrize(
"type, example_value, format",
[
("date", "15/03/2023", "%d/%m/%Y"),
("date", "2001-01-01T12:00:00Z", "%Y-%m-%dT%H:%M:%SZ"),
],
)
def test_field_with_example_set_for_datetime(type, example_value, format):
field = Field.from_descriptor(
{
"name": "name",
"type": type,
"example": example_value,
"format": format,
}
)
assert field.example == example_value
|
5a64309f1909c5871346288705c1141003c9b432
|
2ed0210bc41f848a0e67fce3ad6b7a3e85228261
|
/src/pykeen/triples/utils.py
|
922f67737afb5261c062f09aab8502327795785e
|
[
"MIT"
] |
permissive
|
pykeen/pykeen
|
f7483445bd99d3a404bc4ff42538550d56702b66
|
5ff3597b18ab9a220e34361d3c3f262060811df1
|
refs/heads/master
| 2023-08-25T20:29:55.021639
| 2023-08-24T20:05:20
| 2023-08-24T20:05:20
| 242,672,435
| 1,308
| 199
|
MIT
| 2023-09-13T18:18:36
| 2020-02-24T07:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 6,683
|
py
|
utils.py
|
# -*- coding: utf-8 -*-
"""Instance creation utilities."""
import pathlib
from typing import Callable, List, Mapping, Optional, Sequence, Set, TextIO, Tuple, Union
import numpy as np
import pandas
import torch
from pkg_resources import iter_entry_points
from ..typing import LabeledTriples, MappedTriples
__all__ = [
"compute_compressed_adjacency_list",
"load_triples",
"get_entities",
"get_relations",
"tensor_to_df",
]
TRIPLES_DF_COLUMNS = ("head_id", "head_label", "relation_id", "relation_label", "tail_id", "tail_label")
def _load_importers(group_subname: str) -> Mapping[str, Callable[[str], LabeledTriples]]:
return {
entry_point.name: entry_point.load()
for entry_point in iter_entry_points(group=f"pykeen.triples.{group_subname}")
}
#: Functions for specifying exotic resources with a given prefix
PREFIX_IMPORTERS: Mapping[str, Callable[[str], LabeledTriples]] = _load_importers("prefix_importer")
#: Functions for specifying exotic resources based on their file extension
EXTENSION_IMPORTERS: Mapping[str, Callable[[str], LabeledTriples]] = _load_importers("extension_importer")
def load_triples(
path: Union[str, pathlib.Path, TextIO],
delimiter: str = "\t",
encoding: Optional[str] = None,
column_remapping: Optional[Sequence[int]] = None,
) -> LabeledTriples:
"""Load triples saved as tab separated values.
:param path: The key for the data to be loaded. Typically, this will be a file path ending in ``.tsv``
that points to a file with three columns - the head, relation, and tail. This can also be used to
invoke PyKEEN data importer entrypoints (see below).
:param delimiter: The delimiter between the columns in the file
:param encoding: The encoding for the file. Defaults to utf-8.
:param column_remapping: A remapping if the three columns do not follow the order head-relation-tail.
For example, if the order is head-tail-relation, pass ``(0, 2, 1)``
:returns: A numpy array representing "labeled" triples.
:raises ValueError: if a column remapping was passed but it was not a length 3 sequence
Besides TSV handling, PyKEEN does not come with any importers pre-installed. A few can be found at:
- :mod:`pybel.io.pykeen`
- :mod:`bio2bel.io.pykeen`
"""
if isinstance(path, (str, pathlib.Path)):
path = str(path)
for extension, handler in EXTENSION_IMPORTERS.items():
if path.endswith(f".{extension}"):
return handler(path)
for prefix, handler in PREFIX_IMPORTERS.items():
if path.startswith(f"{prefix}:"):
return handler(path[len(f"{prefix}:") :])
if encoding is None:
encoding = "utf-8"
if column_remapping is not None:
if len(column_remapping) != 3:
raise ValueError("remapping must have length of three")
df = pandas.read_csv(
path,
sep=delimiter,
encoding=encoding,
dtype=str,
header=None,
usecols=column_remapping,
keep_default_na=False,
)
if column_remapping is not None:
df = df[[df.columns[c] for c in column_remapping]]
return df.to_numpy()
def get_entities(triples: torch.LongTensor) -> Set[int]:
"""Get all entities from the triples."""
return set(triples[:, [0, 2]].flatten().tolist())
def get_relations(triples: torch.LongTensor) -> Set[int]:
"""Get all relations from the triples."""
return set(triples[:, 1].tolist())
def tensor_to_df(
tensor: torch.LongTensor,
**kwargs: Union[torch.Tensor, np.ndarray, Sequence],
) -> pandas.DataFrame:
"""Take a tensor of triples and make a pandas dataframe with labels.
:param tensor: shape: (n, 3)
The triples, ID-based and in format (head_id, relation_id, tail_id).
:param kwargs:
Any additional number of columns. Each column needs to be of shape (n,). Reserved column names:
{"head_id", "head_label", "relation_id", "relation_label", "tail_id", "tail_label"}.
:return:
A dataframe with n rows, and 3 + len(kwargs) columns.
:raises ValueError:
If a reserved column name appears in kwargs.
"""
# Input validation
additional_columns = set(kwargs.keys())
forbidden = additional_columns.intersection(TRIPLES_DF_COLUMNS)
if len(forbidden) > 0:
raise ValueError(
f"The key-words for additional arguments must not be in {TRIPLES_DF_COLUMNS}, but {forbidden} were "
f"used.",
)
# convert to numpy
tensor = tensor.cpu().numpy()
data = dict(zip(["head_id", "relation_id", "tail_id"], tensor.T))
# Additional columns
for key, values in kwargs.items():
# convert PyTorch tensors to numpy
if isinstance(values, torch.Tensor):
values = values.cpu().numpy()
data[key] = values
# convert to dataframe
rv = pandas.DataFrame(data=data)
# Re-order columns
columns = list(TRIPLES_DF_COLUMNS[::2]) + sorted(set(rv.columns).difference(TRIPLES_DF_COLUMNS))
return rv.loc[:, columns]
def compute_compressed_adjacency_list(
mapped_triples: MappedTriples,
num_entities: Optional[int] = None,
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
"""Compute compressed undirected adjacency list representation for efficient sampling.
The compressed adjacency list format is inspired by CSR sparse matrix format.
:param mapped_triples:
the ID-based triples
:param num_entities:
the number of entities.
:return: a tuple `(degrees, offsets, compressed_adj_lists)` where
- degrees: shape: `(num_entities,)`
- offsets: shape: `(num_entities,)`
- compressed_adj_list: shape: `(2 * num_triples, 2)`
with
.. code::
adj_list[i] = compressed_adj_list[offsets[i]:offsets[i+1]]
"""
num_entities = num_entities or mapped_triples[:, [0, 2]].max().item() + 1
num_triples = mapped_triples.shape[0]
adj_lists: List[List[Tuple[int, float]]] = [[] for _ in range(num_entities)]
for i, (s, _, o) in enumerate(mapped_triples):
adj_lists[s].append((i, o.item()))
adj_lists[o].append((i, s.item()))
degrees = torch.tensor([len(a) for a in adj_lists], dtype=torch.long)
assert torch.sum(degrees) == 2 * num_triples
offset = torch.empty(num_entities, dtype=torch.long)
offset[0] = 0
offset[1:] = torch.cumsum(degrees, dim=0)[:-1]
compressed_adj_lists = torch.cat([torch.as_tensor(adj_list, dtype=torch.long) for adj_list in adj_lists], dim=0)
return degrees, offset, compressed_adj_lists
|
14d7e4473b37812c6f13c61478f453e4ad51748b
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/common/__init__.py
|
246529802b71fcefa0e2d3107e781c7d85b85fd2
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
__init__.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/common/__init__.py
pass
|
fdcd60e6950a8cd9994ce09b797ff55c0a18c173
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferUI/SpreadsheetUI/_Formatting.py
|
80656005820b75fb532fd10482989be31e4807e5
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
_Formatting.py
|
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import Gaffer
import GafferUI
# _Formatting
# -----------
#
# Formatters are used to present plug values as strings in a table cell.
__valueFormatters = {}
## Returns the value of the plug as it will be formatted in a Spreadsheet.
def formatValue( plug, forToolTip = False ) :
currentPreset = Gaffer.NodeAlgo.currentPreset( plug )
if currentPreset is not None :
return currentPreset
formatter = __valueFormatters.get( plug.__class__, __defaultValueFormatter )
return formatter( plug, forToolTip )
## Registers a custom formatter for the specified `plugType`.
# `formatter` must have the same signature as `formatValue()`.
def registerValueFormatter( plugType, formatter ) :
__valueFormatters[ plugType ] = formatter
# Standard formatters
# -------------------
def __defaultValueFormatter( plug, forToolTip ) :
if not hasattr( plug, "getValue" ) :
return ""
value = plug.getValue()
if isinstance( value, str ) :
return value
elif isinstance( value, ( int, float ) ) :
return GafferUI.NumericWidget.valueToString( value )
elif isinstance( value, ( imath.V2i, imath.V2f, imath.V3i, imath.V3f ) ) :
return ", ".join( GafferUI.NumericWidget.valueToString( v ) for v in value )
# Unknown type. If iteration is supported then use that to
# format as a list, otherwise just cast to string.
try :
strings = [ str( x ) for x in value ]
except :
return str( value )
if forToolTip and not strings :
return "Empty"
separator = "\n" if forToolTip else ", "
return separator.join( strings )
def __transformPlugFormatter( plug, forToolTip ) :
separator = "\n" if forToolTip else " "
return separator.join(
"{label} : {value}".format(
label = c.getName().title() if forToolTip else c.getName()[0].title(),
value = formatValue( c, forToolTip )
)
for c in plug.children()
)
registerValueFormatter( Gaffer.TransformPlug, __transformPlugFormatter )
|
b1aa409965c272d64ff825793379950965f864ef
|
b2bcf07493b5a1bbfb7e29c7f13ac0b380cefead
|
/deprecated/scripts/dcgan_celeba_lightning.py
|
deadf65b98d99c11e834c74b7e59c24564fb3bda
|
[
"MIT"
] |
permissive
|
probml/pyprobml
|
e1952927bceec676eb414f9342470ba4b8e6703b
|
9cc22f3238ae092c2b9bff65d6283c93d38d25d4
|
refs/heads/master
| 2023-08-31T07:36:11.603301
| 2023-08-13T02:47:12
| 2023-08-13T02:47:12
| 65,924,871
| 6,263
| 1,598
|
MIT
| 2023-01-20T23:34:23
| 2016-08-17T16:42:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 13,455
|
py
|
dcgan_celeba_lightning.py
|
# -*- coding: utf-8 -*-
"""
Author: Ang Ming Liang
Please run the following command before running the script
wget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py
or curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py
Then, make sure to get your kaggle.json from kaggle.com then run
mkdir /root/.kaggle
cp kaggle.json /root/.kaggle/kaggle.json
chmod 600 /root/.kaggle/kaggle.json
rm kaggle.json
to copy kaggle.json into a folder first
"""
import superimport
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm
from torchvision.utils import make_grid
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from einops import rearrange
from tqdm import tqdm
from data import CelebADataset, CelebADataModule
from torch import Tensor
from argparse import ArgumentParser
from typing import Any, Optional
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging
from argparse import ArgumentParser
class DCGANGenerator(nn.Module):
def __init__(self, latent_dim: int, feature_maps: int, image_channels: int) -> None:
"""
Args:
latent_dim: Dimension of the latent space
feature_maps: Number of feature maps to use
image_channels: Number of channels of the images from the dataset
"""
super().__init__()
self.gen = nn.Sequential(
self._make_gen_block(latent_dim, feature_maps * 8, kernel_size=4, stride=1, padding=0),
self._make_gen_block(feature_maps * 8, feature_maps * 4),
self._make_gen_block(feature_maps * 4, feature_maps * 2),
self._make_gen_block(feature_maps * 2, feature_maps),
self._make_gen_block(feature_maps, image_channels, last_block=True),
)
@staticmethod
def _make_gen_block(
in_channels: int,
out_channels: int,
kernel_size: int = 4,
stride: int = 2,
padding: int = 1,
bias: bool = False,
last_block: bool = False,
use_relu: bool = False
) -> nn.Sequential:
if not last_block:
gen_block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.Relu() if use_relu else nn.Mish(),
)
else:
gen_block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.Sigmoid(),
)
return gen_block
def forward(self, noise: Tensor) -> Tensor:
return self.gen(noise)
class DCGANDiscriminator(nn.Module):
def __init__(self, feature_maps: int, image_channels: int) -> None:
"""
Args:
feature_maps: Number of feature maps to use
image_channels: Number of channels of the images from the dataset
"""
super().__init__()
self.disc = nn.Sequential(
self._make_disc_block(image_channels, feature_maps, batch_norm=False),
self._make_disc_block(feature_maps, feature_maps * 2),
self._make_disc_block(feature_maps * 2, feature_maps * 4),
self._make_disc_block(feature_maps * 4, feature_maps * 8),
self._make_disc_block(feature_maps * 8, 1, kernel_size=4, stride=1, padding=0, last_block=True),
)
@staticmethod
def _make_disc_block(
in_channels: int,
out_channels: int,
kernel_size: int = 4,
stride: int = 2,
padding: int = 1,
bias: bool = False,
batch_norm: bool = True,
last_block: bool = False,
) -> nn.Sequential:
if not last_block:
disc_block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.BatchNorm2d(out_channels) if batch_norm else nn.Identity(),
nn.LeakyReLU(0.2, inplace=True),
)
else:
disc_block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
nn.Sigmoid(),
)
return disc_block
def forward(self, x: Tensor) -> Tensor:
return self.disc(x).view(-1, 1).squeeze(1)
class DCGAN(LightningModule):
"""
DCGAN implementation.
Example::
from pl_bolts.models.gans import DCGAN
m = DCGAN()
Trainer(gpus=2).fit(m)
Example CLI::
# mnist
python dcgan_module.py --gpus 1
# cifar10
python dcgan_module.py --gpus 1 --dataset cifar10 --image_channels 3
"""
def __init__(
self,
beta1: float = 0.5,
feature_maps_gen: int = 64,
feature_maps_disc: int = 64,
image_channels: int = 3,
latent_dim: int = 100,
learning_rate: float = 0.0002,
topk: Optional[int] = 144,
**kwargs: Any,
) -> None:
"""
Args:
beta1: Beta1 value for Adam optimizer
feature_maps_gen: Number of feature maps to use for the generator
feature_maps_disc: Number of feature maps to use for the discriminator
image_channels: Number of channels of the images from the dataset
latent_dim: Dimension of the latent space
learning_rate: Learning rate
"""
super().__init__()
self.save_hyperparameters()
self.generator = self._get_generator()
self.discriminator = self._get_discriminator()
self.criterion = nn.BCELoss()
self.noise_factor=0
self.topk= topk
def _get_generator(self) -> nn.Module:
generator = DCGANGenerator(self.hparams.latent_dim, self.hparams.feature_maps_gen, self.hparams.image_channels)
generator.apply(self._weights_init)
return generator
def _get_discriminator(self) -> nn.Module:
discriminator = DCGANDiscriminator(self.hparams.feature_maps_disc, self.hparams.image_channels)
discriminator.apply(self._weights_init)
return discriminator
@staticmethod
def _weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight, 1.0, 0.02)
torch.nn.init.zeros_(m.bias)
def configure_optimizers(self):
lr = self.hparams.learning_rate
betas = (self.hparams.beta1, 0.999)
opt_disc = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=betas)
opt_gen = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=betas)
return [opt_disc, opt_gen], []
def forward(self, noise: Tensor) -> Tensor:
"""
Generates an image given input noise
Example::
noise = torch.rand(batch_size, latent_dim)
gan = GAN.load_from_checkpoint(PATH)
img = gan(noise)
"""
noise = noise.view(*noise.shape, 1, 1)
return self.generator(noise)
def training_step(self, batch, batch_idx, optimizer_idx):
real, _ = batch
# Train discriminator
result = None
if optimizer_idx == 0:
result = self._disc_step(real)
# Train generator
if optimizer_idx == 1:
result = self._gen_step(real)
return result
def _disc_step(self, real: Tensor) -> Tensor:
disc_loss = self._get_disc_loss(real)
self.log("loss/disc", disc_loss, on_epoch=True)
return disc_loss
def _gen_step(self, real: Tensor) -> Tensor:
gen_loss = self._get_gen_loss(real)
self.log("loss/gen", gen_loss, on_epoch=True)
return gen_loss
def _get_disc_loss(self, real: Tensor, smooth=0) -> Tensor:
# Train with real
real = real + self.noise_factor*torch.rand_like(real)
real_pred = self.discriminator(real)
real_gt = smooth*torch.rand_like(real_pred)+(1-smooth)
real_loss = self.criterion(real_pred, real_gt)
# Train with fake
fake_pred = self._get_fake_pred(real)
fake_gt = smooth*torch.rand_like(fake_pred)
fake_loss = self.criterion(fake_pred, fake_gt)
disc_loss = real_loss + fake_loss
return disc_loss
def _get_gen_loss(self, real: Tensor) -> Tensor:
# Train with fake
fake_pred = self._get_fake_pred(real)
topk_predictions = torch.topk( fake_pred , self.topk )[0]
fake_gt = torch.ones_like(topk_predictions)
gen_loss = self.criterion(topk_predictions, fake_gt)
return gen_loss
def _get_fake_pred(self, real: Tensor) -> Tensor:
batch_size = len(real)
noise = self._get_noise(batch_size, self.hparams.latent_dim)
fake = self(noise)
fake = fake + self.noise_factor*torch.rand_like(real)
fake_pred = self.discriminator(fake)
return fake_pred
def _get_noise(self, n_samples: int, latent_dim: int) -> Tensor:
return torch.randn(n_samples, latent_dim, device=self.device)
@staticmethod
def add_model_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--beta1", default=0.5, type=float)
parser.add_argument("--feature_maps_gen", default=64, type=int)
parser.add_argument("--feature_maps_disc", default=64, type=int)
parser.add_argument("--latent_dim", default=100, type=int)
parser.add_argument("--learning_rate", default=0.0002, type=float)
parser.add_argument("--topk", default=10, type=float)
return parser
def plt_image_generated(m, size, threshold=1, fname="generated.png"):
plt.figure(figsize=(size,size))
values = truncnorm.rvs(-threshold, threshold, size=(64, 100))
z = torch.from_numpy(values).float()
imgs = rearrange(make_grid(m(z)), 'c h w -> h w c').detach().numpy()
plt.imshow(imgs)
plt.savefig(fname)
def test_scaling(dm):
# Making sure the scalling is between 0-1
for batch in tqdm(dm.train_dataloader()):
x, y = batch
assert 1 >= x.max()
assert 0 <= x.min()
assert torch.any(x < 1)
assert torch.any(x > 0)
def ewa(
averaged_model_parameter: torch.Tensor, model_parameter: torch.Tensor, num_averaged: torch.LongTensor
, smooth=0.9) -> torch.FloatTensor:
"""
Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L95-L97
"""
alpha = smooth/ (num_averaged + 1)
return averaged_model_parameter*(1-alpha) + model_parameter * alpha
if __name__ == "__main__":
parser = ArgumentParser(description='Hyperparameters for our experiments')
parser.add_argument('--seed', type=int, default=1, help="random seed")
parser.add_argument('--image-size', type=int, default=64, help="Image size")
parser.add_argument('--crop-size', type=int, default=128, help="Crop size")
parser.add_argument('--bs', type=int, default=144, help="batch size")
parser.add_argument('--data-path', type=str, default="kaggle", help="batch size")
parser.add_argument('--gpus', type=int, default=1, help="gpu use")
parser.add_argument('--epochs', type=int, default=50, help="Num of epochs")
parser = DCGAN.add_model_specific_args(parser)
# Hyperparameters
hparams = parser.parse_args()
SEED = hparams.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
cudnn.deterministic = True
cudnn.benchmark = False
IMAGE_SIZE = hparams.image_size
BATCH_SIZE = hparams.bs
CROP = hparams.crop_size
DATA_PATH = hparams.data_path
trans = []
trans.append(transforms.RandomHorizontalFlip())
if CROP > 0:
trans.append(transforms.CenterCrop(CROP))
trans.append(transforms.Resize(IMAGE_SIZE))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
ds = CelebADataset(root='kaggle', split='test', target_type='attr', download=True)
dm = CelebADataModule(data_dir=DATA_PATH,
target_type='attr',
train_transform=transform,
val_transform=transform,
download=True,
batch_size=BATCH_SIZE,
num_workers=1)
dm.prepare_data() # force download now
dm.setup() # force make data loaders now
m = DCGAN()
checkpoint_callback = ModelCheckpoint(monitor='loss/gen_epoch',
dirpath='./checkpoints',
filename='sample-celeba-{epoch:02d}-{gan_loss:.2f}',
save_top_k=3)
runner = Trainer(
logger=None,
gpus = hparams.gpus,
max_epochs = hparams.epochs,
callbacks=[checkpoint_callback])
runner.fit(m, datamodule=dm)
torch.save(m.state_dict(), "dcgan.ckpt")
plt_image_generated(m, 10)
|
21aee623068c32901dfcd10da718b1fef21fc3ff
|
32ebd1bf59f0e9be34363e3c9e34b10d2cf3eb9e
|
/2019/Reversing/BIGbadEASYvm/Admin/flag_gen.py
|
46c454ed04fbb39ca6d2b76686feccd7383bc9ad
|
[] |
no_license
|
teambi0s/InCTFi
|
d7fb450ec7b8b08e36dcee656d6111d9bd14127c
|
b249e5b41dba80bcbfc6ccd986c8fd64d8afa87c
|
refs/heads/master
| 2022-07-22T09:54:58.393301
| 2021-08-19T18:14:40
| 2021-08-19T18:14:40
| 152,749,662
| 139
| 64
| null | 2022-07-05T22:14:31
| 2018-10-12T12:48:23
|
CSS
|
UTF-8
|
Python
| false
| false
| 753
|
py
|
flag_gen.py
|
import random
import time
random.seed(time.time())
s = '''mov cnt r2 {}
chk lt r1 r2
jmp chk 4095'''
s2 = '''mov cnt r2 {}
chk gt r1 r2
jmp chk 4095'''
flag = "inctf{1_kN0w_1t5_R3411y_3z_&_fuNNy_but_1ts_h0n3st_w0rk!}"
for letter in flag:
k = random.randrange(13,29)
f_loc = random.randrange(0,123) % k
f_loc2 = random.randrange(0,123) % k
print "in r1"
if(f_loc == f_loc2):
f_loc = f_loc + 1
for i in range(k+1):
if(i==f_loc):
print s.format(ord(letter))
elif(i==f_loc2):
print s2.format(ord(letter))
elif(random.choice([0,1])):
print s.format(random.randrange(0,ord(letter)-1))
else:
print s2.format(random.randrange(ord(letter)+1,254))
|
39729318ce7ae70e9174b85c608f29fb0cc4e07a
|
07e810873aa0134ba5017ccfef641d1038ca9b92
|
/hs_core/hydroshare/utils.py
|
24b92fdcc8f8a19cffdbd735a43cc2ad4a299667
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
hydroshare/hydroshare
|
9093e6dce047a30d4b2b7720257a7841d209353f
|
69855813052243c702c9b0108d2eac3f4f1a768f
|
refs/heads/develop
| 2023-09-04T12:52:30.816709
| 2023-08-30T16:46:20
| 2023-08-30T16:46:20
| 24,703,136
| 207
| 57
|
BSD-3-Clause
| 2023-09-14T20:20:16
| 2014-10-02T02:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 48,245
|
py
|
utils.py
|
import mimetypes
import os
import tempfile
import logging
import shutil
import copy
from uuid import uuid4
from urllib.parse import quote
import errno
import urllib
import aiohttp
import asyncio
from asgiref.sync import sync_to_async
from urllib.request import pathname2url, url2pathname
from django.apps import apps
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.contrib.auth.models import User, Group
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
from django.core.files.storage import DefaultStorage
from django.core.validators import validate_email, URLValidator
from hs_access_control.models.community import Community
from mezzanine.conf import settings
from hs_core.signals import pre_create_resource, post_create_resource, pre_add_files_to_resource, \
post_add_files_to_resource
from hs_core.models import AbstractResource, BaseResource, ResourceFile, GeospatialRelation
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files
from django_irods.icommands import SessionException
from django_irods.storage import IrodsStorage
from theme.models import QuotaMessage
logger = logging.getLogger(__name__)
class ResourceFileSizeException(Exception):
pass
class ResourceFileValidationException(Exception):
pass
class QuotaException(Exception):
pass
class ResourceCopyException(Exception):
pass
class ResourceVersioningException(Exception):
pass
def get_resource_types():
resource_types = []
for model in apps.get_models():
if issubclass(model, AbstractResource) and model != BaseResource:
if not getattr(model, 'archived_model', False):
resource_types.append(model)
return resource_types
def get_content_types():
content_types = []
from hs_file_types.models.base import AbstractLogicalFile
for model in apps.get_models():
if issubclass(model, AbstractLogicalFile):
content_types.append(model)
return content_types
def get_resource_instance(app, model_name, pk, or_404=True):
model = apps.get_model(app, model_name)
if or_404:
return get_object_or_404(model, pk=pk)
else:
return model.objects.get(pk=pk)
def get_resource_by_shortkey(shortkey, or_404=True):
try:
res = BaseResource.objects.select_related("raccess").get(short_id=shortkey)
except BaseResource.DoesNotExist:
if or_404:
raise Http404(shortkey)
else:
raise
content = res.get_content_model()
content.raccess = res.raccess
assert content, (res, res.content_model)
return content
def get_resource_by_doi(doi, or_404=True):
try:
res = BaseResource.objects.get(doi=doi)
except BaseResource.DoesNotExist:
if or_404:
raise Http404(doi)
else:
raise
content = res.get_content_model()
assert content, (res, res.content_model)
return content
def user_from_id(user, raise404=True):
if isinstance(user, User):
return user
tgt = None
if str(user).isnumeric():
try:
tgt = User.objects.get(pk=int(user))
except ValueError:
pass
except ObjectDoesNotExist:
pass
else:
try:
tgt = User.objects.get(username__iexact=user)
except ObjectDoesNotExist:
try:
tgt = User.objects.get(email__iexact=user)
except ObjectDoesNotExist:
pass
if tgt is None:
if raise404:
raise Http404('User not found')
else:
raise ObjectDoesNotExist('User not found')
return tgt
def group_from_id(grp):
if isinstance(grp, Group):
return grp
try:
tgt = Group.objects.get(name=grp)
except ObjectDoesNotExist:
try:
tgt = Group.objects.get(pk=int(grp))
except ValueError:
raise Http404('Group not found')
except TypeError:
raise Http404('Group not found')
except ObjectDoesNotExist:
raise Http404('Group not found')
return tgt
def community_from_id(community):
if isinstance(community, Community):
return community
try:
tgt = Community.objects.get(name=community)
except ObjectDoesNotExist:
try:
tgt = Community.objects.get(id=int(community))
except ValueError:
raise Http404('Community not found')
except TypeError:
raise Http404('Community not found')
except ObjectDoesNotExist:
raise Http404('Community not found')
return tgt
def get_user_zone_status_info(user):
"""
This function should be called to determine whether user zone functionality should be
enabled or not on the web site front end
Args:
user: the requesting user
Returns:
enable_user_zone boolean indicating whether user zone functionality should be enabled or
not on the web site front end
"""
if user is None:
return None
if not hasattr(user, 'userprofile') or user.userprofile is None:
return None
enable_user_zone = user.userprofile.create_irods_user_account and settings.REMOTE_USE_IRODS
return enable_user_zone
def is_federated(homepath):
"""
Check if the selected file via the iRODS browser is from a federated zone or not
Args:
homepath: the logical iRODS file name with full logical path, e.g., selected from
iRODS browser
Returns:
True is the selected file indicated by homepath is from a federated zone, False if otherwise
"""
homepath = homepath.strip()
homepath_list = homepath.split('/')
# homepath is an iRODS logical path in the format of
# /irods_zone/home/irods_account_username/collection_relative_path, so homepath_list[1]
# is the irods_zone which we can use to form the fed_proxy_path to check whether
# fed_proxy_path exists to hold hydroshare resources in a federated zone
if homepath_list[1]:
fed_proxy_path = os.path.join(homepath_list[1], 'home',
settings.HS_IRODS_PROXY_USER_IN_USER_ZONE)
fed_proxy_path = '/' + fed_proxy_path
else:
# the test path input is invalid, return False meaning it is not federated
return False
if settings.REMOTE_USE_IRODS:
irods_storage = IrodsStorage('federated')
else:
irods_storage = IrodsStorage()
# if the iRODS proxy user in hydroshare zone can list homepath and the federation zone proxy
# user path, it is federated; otherwise, it is not federated
return irods_storage.exists(homepath) and irods_storage.exists(fed_proxy_path)
def get_federated_zone_home_path(filepath):
"""
Args:
filepath: the iRODS data object file path that included zone name in the format of
/zone_name/home/user_name/file_path
Returns:
the zone name extracted from filepath
"""
if filepath and filepath.startswith('/'):
split_path_strs = filepath.split('/')
# the Zone name should follow the first slash
zone = split_path_strs[1]
return '/{zone}/home/{local_proxy_user}'.format(
zone=zone, local_proxy_user=settings.HS_IRODS_PROXY_USER_IN_USER_ZONE)
else:
return ''
# TODO: replace with a cache facility that has automatic cleanup
# TODO: pass a list rather than a string to allow commas in filenames.
def get_fed_zone_files(irods_fnames):
"""
Get the files from iRODS federated zone to Django server for metadata extraction on-demand
for specific resource types
Args:
irods_fnames: the logical iRODS file names with full logical path separated by comma
Returns:
a list of the named temp files which have been copied over to local Django server
or raise exceptions if input parameter is wrong or iRODS operations fail
Note: application must delete these files after use.
"""
ret_file_list = []
if isinstance(irods_fnames, str):
ifnames = irods_fnames.split(',')
elif isinstance(irods_fnames, list):
ifnames = irods_fnames
else:
raise ValueError("Input parameter to get_fed_zone_files() must be String or List")
irods_storage = IrodsStorage('federated')
for ifname in ifnames:
fname = os.path.basename(ifname.rstrip(os.sep))
# TODO: this is statistically unique but not guaranteed to be unique.
tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)
tmpfile = os.path.join(tmpdir, fname)
try:
os.makedirs(tmpdir)
except OSError as ex:
if ex.errno == errno.EEXIST:
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
else:
raise Exception(str(ex))
irods_storage.getFile(ifname, tmpfile)
ret_file_list.append(tmpfile)
return ret_file_list
# TODO: make the local cache file (and cleanup) part of ResourceFile state?
def get_file_from_irods(resource, file_path, temp_dir=None):
"""
Copy the file (given by file_path) from iRODS (local or federated zone)
over to django (temp directory) which is
necessary for manipulating the file (e.g. metadata extraction, zipping etc.).
Note: The caller is responsible for cleaning the temp directory
:param resource: an instance of CompositeResource
:param file_path: storage path (absolute path) of a file in iRODS
:param temp_dir: (optional) existing temp directory to which the file will be copied from
irods. If temp_dir is None then a new temporary directory will be created.
:return: path of the copied file
"""
istorage = resource.get_irods_storage()
file_name = os.path.basename(file_path)
if temp_dir is not None:
if not temp_dir.startswith(settings.TEMP_FILE_DIR):
raise ValueError("Specified temp directory is not valid")
elif not os.path.exists(temp_dir):
raise ValueError("Specified temp directory doesn't exist")
tmpdir = temp_dir
else:
tmpdir = get_temp_dir()
tmpfile = os.path.join(tmpdir, file_name)
istorage.getFile(file_path, tmpfile)
copied_file = tmpfile
return copied_file
def get_temp_dir():
"""Creates a temporary directory"""
tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
return tmpdir
# TODO: should be ResourceFile.replace
def replace_resource_file_on_irods(new_file, original_resource_file, user):
"""
Replaces the specified resource file with file (new_file) by copying to iRODS
(local or federated zone)
:param new_file: file path for the file to be copied to iRODS
:param original_resource_file: an instance of ResourceFile that is to be replaced
:param user: user who is replacing the resource file.
:return:
"""
ori_res = original_resource_file.resource
istorage = ori_res.get_irods_storage()
ori_storage_path = original_resource_file.storage_path
# Note: this doesn't update metadata at all.
istorage.saveFile(new_file, ori_storage_path, True)
# do this so that the bag will be regenerated prior to download of the bag
resource_modified(ori_res, by_user=user, overwrite_bag=False)
# TODO: should be inside ResourceFile, and federation logic should be transparent.
def get_resource_file_name_and_extension(res_file):
"""
Gets the full file name with path, file base name, and extension of the specified resource file
:param res_file: an instance of ResourceFile for which file extension to be retrieved
:return: (full filename with path, full file base name, file extension)
ex: "/my_path_to/ABC.nc" --> ("/my_path_to/ABC.nc", "ABC.nc", ".nc")
"""
f_fullname = res_file.storage_path
f_basename = os.path.basename(f_fullname)
_, file_ext = os.path.splitext(f_fullname)
return f_fullname, f_basename, file_ext
# TODO: should be classmethod of ResourceFile
def get_resource_files_by_extension(resource, file_extension):
matching_files = []
for res_file in resource.files.all():
_, _, file_ext = get_resource_file_name_and_extension(res_file)
if file_ext == file_extension:
matching_files.append(res_file)
return matching_files
def get_resource_file_by_name(resource, file_name):
for res_file in resource.files.all():
_, fl_name, _ = get_resource_file_name_and_extension(res_file)
if fl_name == file_name:
return res_file
return None
def get_resource_file_by_id(resource, file_id):
return resource.files.filter(id=file_id).first()
def copy_resource_files_and_AVUs(src_res_id, dest_res_id):
"""
Copy resource files and AVUs from source resource to target resource including both
on iRODS storage and on Django database
:param src_res_id: source resource uuid
:param dest_res_id: target resource uuid
:return:
"""
avu_list = ['bag_modified', 'metadata_dirty', 'isPublic', 'resourceType']
src_res = get_resource_by_shortkey(src_res_id)
tgt_res = get_resource_by_shortkey(dest_res_id)
# This makes the assumption that the destination is in the same exact zone.
# Also, bags and similar attached files are not copied.
istorage = src_res.get_irods_storage()
# This makes an exact copy of all physical files.
src_files = os.path.join(src_res.root_path, 'data')
# This has to be one segment short of the source because it is a target directory.
dest_files = tgt_res.root_path
istorage.copyFiles(src_files, dest_files)
src_coll = src_res.root_path
tgt_coll = tgt_res.root_path
for avu_name in avu_list:
value = istorage.getAVU(src_coll, avu_name)
# make formerly public things private
if avu_name == 'isPublic':
istorage.setAVU(tgt_coll, avu_name, 'false')
# bag_modified AVU needs to be set to true for copied resource
elif avu_name == 'bag_modified':
istorage.setAVU(tgt_coll, avu_name, 'true')
# everything else gets copied literally
else:
istorage.setAVU(tgt_coll, avu_name, value)
# link copied resource files to Django resource model
files = src_res.files.all()
# if resource has logical files, then those logical files also need copying
map_logical_files = {}
for src_logical_file in src_res.logical_files:
map_logical_files[src_logical_file] = src_logical_file.get_copy(tgt_res)
def copy_file_to_target_resource(scr_file, save_to_db=True):
kwargs = {}
src_storage_path = scr_file.get_storage_path(resource=src_res)
tgt_storage_path = src_storage_path.replace(src_res.short_id, tgt_res.short_id)
kwargs['content_object'] = tgt_res
kwargs['file_folder'] = scr_file.file_folder
if tgt_res.is_federated:
kwargs['resource_file'] = None
kwargs['fed_resource_file'] = tgt_storage_path
else:
kwargs['resource_file'] = tgt_storage_path
kwargs['fed_resource_file'] = None
if save_to_db:
return ResourceFile.objects.create(**kwargs)
else:
return ResourceFile(**kwargs)
# use bulk_create for files without logical file to copy all files at once
files_bulk_create = []
files_without_logical_file = files.filter(logical_file_object_id__isnull=True)
for f in files_without_logical_file:
file_to_save = copy_file_to_target_resource(f, save_to_db=False)
files_bulk_create.append(file_to_save)
if files_bulk_create:
ResourceFile.objects.bulk_create(files_bulk_create)
# copy files with logical file one at a time
files_with_logical_file = files\
.filter(logical_file_object_id__isnull=False)\
.select_related('logical_file_content_type')
seen_logical_files = {}
for f in files_with_logical_file:
if (f.logical_file_object_id, f.logical_file_content_type.id) not in seen_logical_files:
# accessing logical_file for each file (f.logical_file) generates one database query
seen_logical_files[(f.logical_file_object_id, f.logical_file_content_type.id)] = f.logical_file
logical_file = seen_logical_files[(f.logical_file_object_id, f.logical_file_content_type.id)]
new_resource_file = copy_file_to_target_resource(f)
tgt_logical_file = map_logical_files[logical_file]
tgt_logical_file.add_resource_file(new_resource_file)
for lf in map_logical_files:
if lf.type_name() == 'ModelProgramLogicalFile':
# for any model program logical files in original resource need to copy the model program file types
lf.copy_mp_file_types(tgt_logical_file=map_logical_files[lf])
elif lf.type_name() == 'ModelInstanceLogicalFile':
# for any model instance logical files in original resource need to set the executed_by (FK) relation
lf.copy_executed_by(tgt_logical_file=map_logical_files[lf])
if src_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection resource will not contain "deleted resources"
tgt_res.resources.set(src_res.resources.all())
@sync_to_async
def _get_relations():
return list(GeospatialRelation.objects.all())
@sync_to_async
def _save_relation(relation, json):
return relation.update_from_geoconnex_response(json)
async def get_jsonld_from_geoconnex(relation, client):
relative_id = relation.value.split("ref/").pop()
collection = relative_id.split("/")[0]
id = relative_id.split("/")[1]
url = f"/collections/{collection}/items/{id}?" \
"f=jsonld&lang=en-US&skipGeometry=true"
logger.debug(f"CHECKING RELATION '{relation.text}'")
async with client.get(url) as resp:
return await _save_relation(relation, await resp.json())
async def update_geoconnex_texts(relations=[]):
# Task to update Relations from Geoconnex API
if not relations:
relations = await _get_relations()
validator = URLValidator(regex="geoconnex")
relations = [r for r in relations if isGeoconnexUrl(r.value, validator)]
async with aiohttp.ClientSession("https://reference.geoconnex.us") as client:
await asyncio.gather(*[
get_jsonld_from_geoconnex(relation, client)
for relation in relations
])
logger.debug("DONE CHECKING RELATIONS")
def isGeoconnexUrl(text, validator=None):
if not validator:
validator = URLValidator(regex="geoconnex")
try:
validator(text)
return True
except ValidationError:
return False
def copy_and_create_metadata(src_res, dest_res):
"""
Copy metadata from source resource to target resource except identifier, publisher, and date
which need to be created for the target resource as appropriate. This method is used for
resource copying and versioning.
:param src_res: source resource
:param dest_res: target resource
:return:
"""
# copy metadata from source resource to target resource except three elements
exclude_elements = ['identifier', 'publisher', 'date']
dest_res.metadata.copy_all_elements_from(src_res.metadata, exclude_elements)
# create Identifier element that is specific to the new resource
dest_res.metadata.create_element('identifier', name='hydroShareIdentifier',
url='{0}/resource/{1}'.format(current_site_url(),
dest_res.short_id))
# create date element that is specific to the new resource
dest_res.metadata.create_element('date', type='created', start_date=dest_res.created)
dest_res.metadata.create_element('date', type='modified', start_date=dest_res.updated)
# copy date element to the new resource if exists
src_res_valid_date_filter = src_res.metadata.dates.all().filter(type='valid')
if src_res_valid_date_filter:
res_valid_date = src_res_valid_date_filter[0]
dest_res.metadata.create_element('date', type='valid', start_date=res_valid_date.start_date,
end_date=res_valid_date.end_date)
src_res_avail_date_filter = src_res.metadata.dates.all().filter(type='available')
if src_res_avail_date_filter:
res_avail_date = src_res_avail_date_filter[0]
dest_res.metadata.create_element('date', type='available',
start_date=res_avail_date.start_date,
end_date=res_avail_date.end_date)
# create the key/value metadata
dest_res.extra_metadata = copy.deepcopy(src_res.extra_metadata)
dest_res.save()
# generate metadata and map xml files for logical files in the target resource
for logical_file in dest_res.logical_files:
logical_file.create_aggregation_xml_documents()
# TODO: should be BaseResource.mark_as_modified.
def resource_modified(resource, by_user=None, overwrite_bag=True):
"""
Set an AVU flag that forces the bag to be recreated before fetch.
This indicates that some content of the bag has been edited.
"""
if not by_user:
user = None
else:
if isinstance(by_user, User):
user = by_user
else:
try:
user = User.objects.get(username=by_user)
except User.DoesNotExist:
user = None
if user:
resource.last_changed_by = user
resource.updated = now().isoformat()
# seems this is the best place to sync resource title with metadata title
resource.title = resource.metadata.title.value
resource.save()
res_modified_date = resource.metadata.dates.all().filter(type='modified').first()
if res_modified_date:
resource.metadata.update_element('date', res_modified_date.id)
if overwrite_bag:
create_bag_metadata_files(resource)
# set bag_modified-true AVU pair for the modified resource in iRODS to indicate
# the resource is modified for on-demand bagging.
set_dirty_bag_flag(resource)
# TODO: should be part of BaseResource
def set_dirty_bag_flag(resource):
"""
Set bag_modified=true AVU pair for the modified resource in iRODS
to indicate that the resource is modified for on-demand bagging.
set metadata_dirty (AVU) to 'true' to indicate that metadata has been modified for the
resource so that xml metadata files need to be generated on-demand
This is done so that the bag creation can be "lazy", in the sense that the
bag is recreated only after multiple changes to the bag files, rather than
after each change. It is created when someone attempts to download it.
"""
res_coll = resource.root_path
istorage = resource.get_irods_storage()
res_coll = resource.root_path
istorage.setAVU(res_coll, "bag_modified", "true")
istorage.setAVU(res_coll, "metadata_dirty", "true")
def _validate_email(email):
try:
validate_email(email)
return True
except ValidationError:
return False
def get_profile(user):
return user.userprofile
def current_site_url():
"""Returns fully qualified URL (no trailing slash) for the current site."""
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')
port = getattr(settings, 'MY_SITE_PORT', '')
url = '%s://%s' % (protocol, current_site.domain)
if port:
url += ':%s' % port
return url
def get_file_mime_type(file_name):
# TODO: looks like the mimetypes module can't find all mime types
# We may need to user the python magic module instead
file_name = "{}".format(file_name)
file_format_type = mimetypes.guess_type(file_name)[0]
if not file_format_type:
# TODO: this is probably not the right way to get the mime type
file_format_type = 'application/%s' % os.path.splitext(file_name)[1][1:]
return file_format_type
def check_file_dict_for_error(file_validation_dict):
if 'are_files_valid' in file_validation_dict:
if not file_validation_dict['are_files_valid']:
error_message = file_validation_dict.get('message',
"Uploaded file(s) failed validation.")
raise ResourceFileValidationException(error_message)
def raise_file_size_exception():
from .resource import FILE_SIZE_LIMIT_FOR_DISPLAY
error_msg = 'The resource file is larger than the supported size limit: %s.' \
% FILE_SIZE_LIMIT_FOR_DISPLAY
raise ResourceFileSizeException(error_msg)
def validate_resource_file_size(resource_files):
from .resource import check_resource_files
valid, size = check_resource_files(resource_files)
if not valid:
raise_file_size_exception()
# if no exception, return the total size of all files
return size
def validate_resource_file_type(resource_cls, files):
supported_file_types = resource_cls.get_supported_upload_file_types()
# see if file type checking is needed
if '.*' in supported_file_types:
# all file types are supported
return
supported_file_types = [x.lower() for x in supported_file_types]
for f in files:
file_ext = os.path.splitext(f.name)[1]
if file_ext.lower() not in supported_file_types:
err_msg = "{file_name} is not a supported file type for {res_type} resource"
err_msg = err_msg.format(file_name=f.name, res_type=resource_cls)
raise ResourceFileValidationException(err_msg)
def validate_resource_file_count(resource_cls, files, resource=None):
if len(files) > 0:
if len(resource_cls.get_supported_upload_file_types()) == 0:
err_msg = "Content files are not allowed in {res_type} resource"
err_msg = err_msg.format(res_type=resource_cls)
raise ResourceFileValidationException(err_msg)
err_msg = "Multiple content files are not supported in {res_type} resource"
err_msg = err_msg.format(res_type=resource_cls)
if len(files) > 1:
if not resource_cls.allow_multiple_file_upload():
raise ResourceFileValidationException(err_msg)
if resource is not None and resource.files.all().count() > 0:
if not resource_cls.can_have_multiple_files():
raise ResourceFileValidationException(err_msg)
def convert_file_size_to_unit(size, unit):
"""
Convert file size to unit for quota comparison
:param size: in byte unit
:param unit: should be one of the four: 'KB', 'MB', 'GB', or 'TB'
:return: the size converted to the pass-in unit
"""
unit = unit.lower()
if unit not in ('kb', 'mb', 'gb', 'tb'):
raise ValidationError('Pass-in unit for file size conversion must be one of KB, MB, GB, '
'or TB')
factor = 1024.0
kbsize = size / factor
if unit == 'kb':
return kbsize
mbsize = kbsize / factor
if unit == 'mb':
return mbsize
gbsize = mbsize / factor
if unit == 'gb':
return gbsize
tbsize = gbsize / factor
if unit == 'tb':
return tbsize
def validate_user_quota(user_or_username, size):
"""
validate to make sure the user is not over quota with the newly added size
:param user_or_username: the user to be validated
:param size: the newly added file size to add on top of the user's used quota to be validated.
size input parameter should be in byte unit
:return: raise exception for the over quota case
"""
if user_or_username:
if isinstance(user_or_username, User):
user = user_or_username
else:
try:
user = User.objects.get(username=user_or_username)
except User.DoesNotExist:
user = None
else:
user = None
if user:
# validate it is within quota hard limit
uq = user.quotas.filter(zone='hydroshare').first()
if uq:
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
enforce_flag = qmsg.enforce_quota
if enforce_flag:
hard_limit = qmsg.hard_limit_percent
used_size = uq.add_to_used_value(size)
used_percent = uq.used_percent
rounded_percent = round(used_percent, 2)
rounded_used_val = round(used_size, 4)
if used_percent >= hard_limit or uq.remaining_grace_period == 0:
msg_template_str = '{}{}\n\n'.format(qmsg.enforce_content_prepend,
qmsg.content)
msg_str = msg_template_str.format(used=rounded_used_val,
unit=uq.unit,
allocated=uq.allocated_value,
zone=uq.zone,
percent=rounded_percent)
raise QuotaException(msg_str)
def resource_pre_create_actions(resource_type, resource_title, page_redirect_url_key,
files=(), metadata=None,
requesting_user=None, **kwargs):
from .resource import check_resource_type
from hs_core.views.utils import validate_metadata
if not resource_title:
resource_title = 'Untitled resource'
else:
resource_title = resource_title.strip()
if len(resource_title) == 0:
resource_title = 'Untitled resource'
resource_cls = check_resource_type(resource_type)
if len(files) > 0:
size = validate_resource_file_size(files)
validate_resource_file_count(resource_cls, files)
validate_resource_file_type(resource_cls, files)
# validate it is within quota hard limit
validate_user_quota(requesting_user, size)
if not metadata:
metadata = []
else:
validate_metadata(metadata, resource_type)
page_url_dict = {}
# receivers need to change the values of this dict if file validation fails
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
# Send pre-create resource signal - let any other app populate the empty metadata list object
# also pass title to other apps, and give other apps a chance to populate page_redirect_url
# if they want to redirect to their own page for resource creation rather than use core
# resource creation code
pre_create_resource.send(sender=resource_cls, metadata=metadata, files=files,
title=resource_title,
url_key=page_redirect_url_key, page_url_dict=page_url_dict,
validate_files=file_validation_dict,
user=requesting_user, **kwargs)
if len(files) > 0:
check_file_dict_for_error(file_validation_dict)
return page_url_dict, resource_title, metadata
def resource_post_create_actions(resource, user, metadata, **kwargs):
# receivers need to change the values of this dict if file validation fails
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
# Send post-create resource signal
post_create_resource.send(sender=type(resource), resource=resource, user=user,
metadata=metadata,
validate_files=file_validation_dict, **kwargs)
check_file_dict_for_error(file_validation_dict)
def prepare_resource_default_metadata(resource, metadata, res_title):
add_title = True
for element in metadata:
if 'title' in element:
if 'value' in element['title']:
res_title = element['title']['value']
add_title = False
else:
metadata.remove(element)
break
if add_title:
metadata.append({'title': {'value': res_title}})
add_language = True
for element in metadata:
if 'language' in element:
if 'code' in element['language']:
add_language = False
else:
metadata.remove(element)
break
if add_language:
metadata.append({'language': {'code': 'eng'}})
add_rights = True
for element in metadata:
if 'rights' in element:
if 'statement' in element['rights'] and 'url' in element['rights']:
add_rights = False
else:
metadata.remove(element)
break
if add_rights:
# add the default rights/license element
statement = 'This resource is shared under the Creative Commons Attribution CC BY.'
url = 'http://creativecommons.org/licenses/by/4.0/'
metadata.append({'rights': {'statement': statement, 'url': url}})
metadata.append({'identifier': {'name': 'hydroShareIdentifier',
'url': '{0}/resource/{1}'.format(current_site_url(),
resource.short_id)}})
# remove if there exists the 'type' element as system generates this element
# remove if there exists 'format' elements - since format elements are system generated based
# on resource content files
# remove any 'date' element which is not of type 'valid'. All other date elements are
# system generated
for element in list(metadata):
if 'type' in element or 'format' in element:
metadata.remove(element)
if 'date' in element:
if 'type' in element['date']:
if element['date']['type'] != 'valid':
metadata.remove(element)
metadata.append({'type': {'url': '{0}/terms/{1}'.format(current_site_url(),
resource.__class__.__name__)}})
metadata.append({'date': {'type': 'created', 'start_date': resource.created}})
metadata.append({'date': {'type': 'modified', 'start_date': resource.updated}})
# only add the resource creator as the creator for metadata if there is not already
# creator data in the metadata object
metadata_keys = [list(element.keys())[0].lower() for element in metadata]
if 'creator' not in metadata_keys:
creator_data = get_party_data_from_user(resource.creator)
metadata.append({'creator': creator_data})
def get_user_party_name(user):
user_profile = get_profile(user)
if user.last_name and user.first_name:
if user_profile.middle_name:
party_name = '%s, %s %s' % (user.last_name, user.first_name,
user_profile.middle_name)
else:
party_name = '%s, %s' % (user.last_name, user.first_name)
elif user.last_name:
party_name = user.last_name
elif user.first_name:
party_name = user.first_name
elif user_profile.middle_name:
party_name = user_profile.middle_name
else:
party_name = ''
return party_name
def get_party_data_from_user(user):
party_data = {}
user_profile = get_profile(user)
party_name = get_user_party_name(user)
party_data['name'] = party_name
party_data['email'] = user.email
party_data['hydroshare_user_id'] = user.pk
party_data['phone'] = user_profile.phone_1
party_data['organization'] = user_profile.organization
party_data['identifiers'] = user_profile.identifiers
return party_data
# TODO: make this part of resource api. resource --> self.
def resource_file_add_pre_process(resource, files, user, extract_metadata=False,
source_names=[], **kwargs):
if __debug__:
assert (isinstance(source_names, list))
if resource.raccess.published and not user.is_superuser:
raise ValidationError("Only admin can add files to a published resource")
resource_cls = resource.__class__
if len(files) > 0:
size = validate_resource_file_size(files)
validate_user_quota(resource.get_quota_holder(), size)
validate_resource_file_type(resource_cls, files)
validate_resource_file_count(resource_cls, files, resource)
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
pre_add_files_to_resource.send(sender=resource_cls, files=files, resource=resource, user=user,
source_names=source_names,
validate_files=file_validation_dict,
extract_metadata=extract_metadata, **kwargs)
check_file_dict_for_error(file_validation_dict)
# TODO: make this part of resource api. resource --> self.
def resource_file_add_process(resource, files, user, extract_metadata=False,
source_names=[], **kwargs):
from .resource import add_resource_files
if __debug__:
assert (isinstance(source_names, list))
if resource.raccess.published and not user.is_superuser:
raise ValidationError("Only admin can add files to a published resource")
folder = kwargs.pop('folder', '')
full_paths = kwargs.pop('full_paths', {})
auto_aggregate = kwargs.pop('auto_aggregate', True)
resource_file_objects = add_resource_files(resource.short_id, *files, folder=folder,
source_names=source_names, full_paths=full_paths,
auto_aggregate=auto_aggregate, user=user)
resource.refresh_from_db()
# receivers need to change the values of this dict if file validation fails
# in case of file validation failure it is assumed the resource type also deleted the file
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
post_add_files_to_resource.send(sender=resource.__class__, files=files,
source_names=source_names,
resource=resource, user=user,
validate_files=file_validation_dict,
extract_metadata=extract_metadata,
res_files=resource_file_objects, **kwargs)
check_file_dict_for_error(file_validation_dict)
return resource_file_objects
# TODO: move this to BaseResource
def create_empty_contents_directory(resource):
res_contents_dir = resource.file_path
istorage = resource.get_irods_storage()
if not istorage.exists(res_contents_dir):
istorage.session.run("imkdir", None, '-p', res_contents_dir)
def add_file_to_resource(resource, f, folder='', source_name='',
check_target_folder=False, add_to_aggregation=True, user=None):
"""
Add a ResourceFile to a Resource. Adds the 'format' metadata element to the resource.
:param resource: Resource to which file should be added
:param f: File-like object to add to a resource
:param folder: folder at which the file will live
:param source_name: the logical file name of the resource content file for
federated iRODS resource or the federated zone name;
By default, it is empty. A non-empty value indicates
the file needs to be added into the federated zone, either
from local disk where f holds the uploaded file from local
disk, or from the federated zone directly where f is empty
but source_name has the whole data object
iRODS path in the federated zone
:param check_target_folder: if true and the resource is a composite resource then uploading
a file to the specified folder will be validated before adding the file to the resource
:param add_to_aggregation: if true and the resource is a composite resource then the file
being added to the resource also will be added to a fileset aggregation if such an aggregation
exists in the file path
:param user: user who is adding file to the resource
:return: The identifier of the ResourceFile added.
"""
# validate parameters
if resource.raccess.published:
if user is None or not user.is_superuser:
raise ValidationError("Only admin can add files to a published resource")
if check_target_folder and resource.resource_type != 'CompositeResource':
raise ValidationError("Resource must be a CompositeResource for validating target folder")
if f:
if check_target_folder and folder:
tgt_full_upload_path = os.path.join(resource.file_path, folder)
if not resource.can_add_files(target_full_path=tgt_full_upload_path):
err_msg = "File can't be added to this folder which represents an aggregation"
raise ValidationError(err_msg)
openfile = File(f) if not isinstance(f, UploadedFile) else f
ret = ResourceFile.create(resource, openfile, folder=folder, source=None)
if add_to_aggregation:
if folder and resource.resource_type == 'CompositeResource':
aggregation = resource.get_model_aggregation_in_path(folder)
if aggregation is None:
aggregation = resource.get_fileset_aggregation_in_path(folder)
if aggregation is not None:
# make the added file part of the fileset or model program/instance aggregation
aggregation.add_resource_file(ret)
# add format metadata element if necessary
file_format_type = get_file_mime_type(f.name)
elif source_name:
try:
# create from existing iRODS file
ret = ResourceFile.create(resource, file=None, folder=folder, source=source_name)
except SessionException as ex:
try:
ret.delete()
except Exception:
pass
# raise the exception for the calling function to inform the error on the page interface
raise SessionException(ex.exitcode, ex.stdout, ex.stderr)
# add format metadata element if necessary
file_format_type = get_file_mime_type(source_name)
else:
raise ValueError('Invalid input parameter is passed into this add_file_to_resource() '
'function')
# TODO: generate this from data in ResourceFile rather than extension
if not resource.metadata.formats.filter(value=file_format_type).exists():
resource.metadata.create_element('format', value=file_format_type)
ret.calculate_size()
return ret
class ZipContents(object):
"""
Extract the contents of a zip file one file at a time
using a generator.
"""
def __init__(self, zip_file):
self.zip_file = zip_file
def black_list_path(self, file_path):
return file_path.startswith('__MACOSX/')
def black_list_name(self, file_name):
return file_name == '.DS_Store'
def get_files(self):
temp_dir = tempfile.mkdtemp()
try:
for name_path in self.zip_file.namelist():
if not self.black_list_path(name_path):
name = os.path.basename(name_path)
if name != '':
if not self.black_list_name(name):
self.zip_file.extract(name_path, temp_dir)
file_path = os.path.join(temp_dir, name_path)
logger.debug("Opening {0} as File with name {1}".format(file_path,
name_path))
f = File(file=open(file_path, 'rb'),
name=name_path)
f.size = os.stat(file_path).st_size
yield f
finally:
shutil.rmtree(temp_dir)
def get_file_storage():
return IrodsStorage() if getattr(settings, 'USE_IRODS', False) else DefaultStorage()
def resolve_request(request):
if request.POST:
return request.POST
if request.data:
return request.data
return {}
def check_aggregations(resource, res_files):
"""
A helper to support creating aggregations for a given composite resource when new files are
added to the resource
Checks for aggregations in each folder first, then checks for aggregations in each file
:param resource: resource object
:param res_files: list of ResourceFile objects to check for aggregations creation
:return:
"""
new_logical_files = []
if resource.resource_type == "CompositeResource":
from hs_file_types.utils import set_logical_file_type
# check files for aggregation creation
for res_file in res_files:
if not res_file.has_logical_file or (res_file.logical_file.is_fileset
or res_file.logical_file.is_model_instance):
# create aggregation from file 'res_file'
logical_file = set_logical_file_type(res=resource, user=None, file_id=res_file.pk,
fail_feedback=False)
if logical_file:
new_logical_files.append(logical_file)
return new_logical_files
def build_preview_data_url(resource, folder_path, spatial_coverage):
"""Get a GeoServer layer preview link."""
if resource.raccess.public is True:
try:
geoserver_url = settings.HSWS_GEOSERVER_URL
resource_id = resource.short_id
layer_id = '.'.join('/'.join(folder_path.split('/')[2:]).split('.')[:-1])
for k, v in settings.HSWS_GEOSERVER_ESCAPE.items():
layer_id = layer_id.replace(k, v)
layer_id = quote(f'HS-{resource_id}:{layer_id}')
extent = quote(','.join((
str(spatial_coverage['westlimit']),
str(spatial_coverage['southlimit']),
str(spatial_coverage['eastlimit']),
str(spatial_coverage['northlimit']),
)))
layer_srs = quote(spatial_coverage['projection'][-9:])
preview_data_url = (
f'{geoserver_url}/HS-{resource_id}/wms'
f'?service=WMS&version=1.1&request=GetMap'
f'&layers={layer_id}'
f'&bbox={extent}'
f'&width=800&height=500'
f'&srs={layer_srs}'
f'&format=application/openlayers'
)
except Exception as e:
logger.exception("build_preview_data_url: " + str(e))
preview_data_url = None
else:
preview_data_url = None
return preview_data_url
def encode_resource_url(url):
"""
URL encodes a full resource file/folder url.
:param url: a string url
:return: url encoded string
"""
parsed_url = urllib.parse.urlparse(url)
url_encoded_path = pathname2url(parsed_url.path)
encoded_url = parsed_url._replace(path=url_encoded_path).geturl()
return encoded_url
def decode_resource_url(url):
"""
URL decodes a full resource file/folder url.
:param url: an encoded string url
:return: url decoded string
"""
parsed_url = urllib.parse.urlparse(url)
url_encoded_path = url2pathname(parsed_url.path)
encoded_url = parsed_url._replace(path=url_encoded_path).geturl()
return encoded_url
|
5674780aecfbeffa105efe82ffd1855c00461558
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/test_main.py
|
c130833421d6b41566b15e0524b76f4f8fc595dd
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
test_main.py
|
import test_module
print('This is test_main.py')
print('test_module.__name__ is', test_module.__name__)
print('---')
print('call test_module.func()')
test_module.func()
|
f8d24578762c4b70183a493f19ddd3d6235cd75a
|
81f2e2c5ae46c93a76dd612326089e1219e38694
|
/utils/tests/test_method_timer.py
|
0f9964ce17e0c5b51b30c069ff860a604ab1c07c
|
[
"MIT"
] |
permissive
|
istresearch/scrapy-cluster
|
02a4931f26b215efda1e4934c6e2a2421c2115a2
|
01861c2dca1563aab740417d315cc4ebf9b73f72
|
refs/heads/dev
| 2023-08-13T21:14:17.893387
| 2021-04-07T18:46:01
| 2021-04-07T18:46:01
| 33,957,281
| 1,220
| 408
|
MIT
| 2023-09-07T20:12:24
| 2015-04-14T21:12:04
|
Python
|
UTF-8
|
Python
| false
| false
| 818
|
py
|
test_method_timer.py
|
'''
Offline utility tests
'''
from unittest import TestCase
from scutils.method_timer import MethodTimer
import time
class TestMethodTimer(TestCase):
def test_under(self):
@MethodTimer.timeout(1, False)
def method():
time.sleep(0.5)
return True
result = method()
self.assertTrue(result)
def test_over(self):
@MethodTimer.timeout(1, "STUFF")
def method():
time.sleep(1.5)
return True
result = method()
self.assertEqual(result, "STUFF")
def test_params(self):
@MethodTimer.timeout(1, "STUFF2")
def method(param1, param2, param3):
time.sleep(1.5)
return True
result = method(True, "Stuff", ['item'])
self.assertEqual(result, "STUFF2")
|
7fbb661985a38a53975de4d93a9020aa9f0e1e06
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/tests/spec/crm/test_products.py
|
2748b64f4e7b5e83b9586c29afd4139cce5549d5
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
py
|
test_products.py
|
from hubspot import HubSpot
from hubspot.crm.products import BasicApi, BatchApi, SearchApi, PublicObjectApi
def test_is_discoverable():
apis = HubSpot().crm.products
assert isinstance(apis.basic_api, BasicApi)
assert isinstance(apis.batch_api, BatchApi)
assert isinstance(apis.search_api, SearchApi)
assert isinstance(apis.public_object_api, PublicObjectApi)
assert hasattr(apis, "get_all")
|
02bcf32fa881658071fd26597df124ccdcc8ffba
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/proxito/tests/test_headers.py
|
2f8fd6626d6ca94c353772053c51b4fedf7114ae
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 8,839
|
py
|
test_headers.py
|
import django_dynamic_fixture as fixture
from django.test import override_settings
from django.urls import reverse
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Domain, HTTPHeader
from .base import BaseDocServing
@override_settings(
PUBLIC_DOMAIN="dev.readthedocs.io",
PUBLIC_DOMAIN_USES_HTTPS=True,
)
class ProxitoHeaderTests(BaseDocServing):
def test_redirect_headers(self):
r = self.client.get(
"", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 302)
self.assertEqual(r["X-RTD-Redirect"], "system")
self.assertEqual(
r["Location"],
"https://project.dev.readthedocs.io/en/latest/",
)
self.assertEqual(r["Cache-Tag"], "project")
self.assertEqual(r["X-RTD-Project"], "project")
self.assertEqual(r["X-RTD-Project-Method"], "public_domain")
self.assertEqual(r["X-RTD-Domain"], "project.dev.readthedocs.io")
self.assertIsNone(r.get("X-RTD-Version"))
self.assertIsNone(r.get("X-RTD-Path"))
def test_serve_headers(self):
r = self.client.get(
"/en/latest/", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Cache-Tag"], "project,project:latest")
self.assertEqual(r["X-RTD-Domain"], "project.dev.readthedocs.io")
self.assertEqual(r["X-RTD-Project"], "project")
self.assertEqual(r["X-RTD-Project-Method"], "public_domain")
self.assertEqual(r["X-RTD-Version"], "latest")
self.assertEqual(r["X-RTD-version-Method"], "path")
self.assertEqual(
r["X-RTD-Path"], "/proxito/media/html/project/latest/index.html"
)
def test_subproject_serve_headers(self):
r = self.client.get(
"/projects/subproject/en/latest/",
secure=True,
headers={"host": "project.dev.readthedocs.io"},
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Cache-Tag"], "subproject,subproject:latest")
self.assertEqual(r["X-RTD-Domain"], "project.dev.readthedocs.io")
self.assertEqual(r["X-RTD-Project"], "subproject")
# I think it's not accurate saying that it's `subdomain` the method
# that we use to get the project slug here, since it was in fact the
# URL's path but we don't have that feature built
self.assertEqual(r["X-RTD-Project-Method"], "public_domain")
self.assertEqual(r["X-RTD-Version"], "latest")
self.assertEqual(r["X-RTD-version-Method"], "path")
self.assertEqual(
r["X-RTD-Path"], "/proxito/media/html/subproject/latest/index.html"
)
def test_404_headers(self):
r = self.client.get(
"/foo/bar.html", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 404)
self.assertEqual(r["Cache-Tag"], "project")
self.assertEqual(r["X-RTD-Domain"], "project.dev.readthedocs.io")
self.assertEqual(r["X-RTD-Project"], "project")
self.assertEqual(r["X-RTD-Project-Method"], "public_domain")
self.assertEqual(r["X-RTD-version-Method"], "path")
self.assertIsNone(r.get("X-RTD-Version"))
self.assertIsNone(r.get("X-RTD-Path"))
def test_custom_domain_headers(self):
hostname = "docs.random.com"
self.domain = fixture.get(
Domain,
project=self.project,
domain=hostname,
https=False,
)
r = self.client.get("/en/latest/", headers={"host": hostname})
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Cache-Tag"], "project,project:latest")
self.assertEqual(r["X-RTD-Domain"], self.domain.domain)
self.assertEqual(r["X-RTD-Project"], self.project.slug)
self.assertEqual(r["X-RTD-Project-Method"], "custom_domain")
self.assertEqual(r["X-RTD-Version"], "latest")
self.assertEqual(r["X-RTD-version-Method"], "path")
self.assertEqual(
r["X-RTD-Path"], "/proxito/media/html/project/latest/index.html"
)
def test_footer_headers(self):
version = self.project.versions.get(slug=LATEST)
url = (
reverse("footer_html")
+ f"?project={self.project.slug}&version={version.slug}"
)
r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"})
self.assertEqual(r.status_code, 200)
self.assertEqual(r["Cache-Tag"], "project,project:latest,project:rtd-footer")
def test_user_domain_headers(self):
hostname = "docs.domain.com"
self.domain = fixture.get(
Domain,
project=self.project,
domain=hostname,
https=False,
)
http_header = "X-My-Header"
http_header_secure = "X-My-Secure-Header"
http_header_value = "Header Value; Another Value;"
fixture.get(
HTTPHeader,
domain=self.domain,
name=http_header,
value=http_header_value,
only_if_secure_request=False,
)
fixture.get(
HTTPHeader,
domain=self.domain,
name=http_header_secure,
value=http_header_value,
only_if_secure_request=True,
)
r = self.client.get("/en/latest/", headers={"host": hostname})
self.assertEqual(r.status_code, 200)
self.assertEqual(r[http_header], http_header_value)
self.assertFalse(r.has_header(http_header_secure))
r = self.client.get("/en/latest/", headers={"host": hostname}, secure=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(r[http_header], http_header_value)
self.assertEqual(r[http_header_secure], http_header_value)
def test_hosting_integrations_header(self):
version = self.project.versions.get(slug=LATEST)
version.addons = True
version.save()
r = self.client.get(
"/en/latest/", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.get("X-RTD-Hosting-Integrations"))
self.assertEqual(r["X-RTD-Hosting-Integrations"], "true")
@override_settings(ALLOW_PRIVATE_REPOS=False)
def test_cache_headers_public_version_with_private_projects_not_allowed(self):
r = self.client.get(
"/en/latest/", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_cache_headers_public_version_with_private_projects_allowed(self):
r = self.client.get(
"/en/latest/", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
@override_settings(ALLOW_PRIVATE_REPOS=False)
def test_cache_headers_robots_txt_with_private_projects_not_allowed(self):
r = self.client.get(
"/robots.txt", headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
self.assertEqual(r["Cache-Tag"], "project,project:robots.txt")
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_cache_headers_robots_txt_with_private_projects_allowed(self):
r = self.client.get(
"/robots.txt", headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
self.assertEqual(r["Cache-Tag"], "project,project:robots.txt")
@override_settings(ALLOW_PRIVATE_REPOS=False)
def test_cache_headers_robots_txt_with_private_projects_not_allowed(self):
r = self.client.get(
"/sitemap.xml", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
self.assertEqual(r["Cache-Tag"], "project,project:sitemap.xml")
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_cache_headers_robots_txt_with_private_projects_allowed(self):
r = self.client.get(
"/sitemap.xml", secure=True, headers={"host": "project.dev.readthedocs.io"}
)
self.assertEqual(r.status_code, 200)
self.assertEqual(r["CDN-Cache-Control"], "public")
self.assertEqual(r["Cache-Tag"], "project,project:sitemap.xml")
|
279108c19e2dd074bb8092478daca9942a09626a
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/hybridcompute/v20220510preview/_enums.py
|
3c91a2b34cccb2fcfa0d81b0d063de3abe84398e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 945
|
py
|
_enums.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AssessmentModeTypes',
'PatchModeTypes',
'ResourceIdentityType',
'StatusLevelTypes',
]
class AssessmentModeTypes(str, Enum):
"""
Specifies the assessment mode.
"""
IMAGE_DEFAULT = "ImageDefault"
AUTOMATIC_BY_PLATFORM = "AutomaticByPlatform"
class PatchModeTypes(str, Enum):
"""
Specifies the patch mode.
"""
IMAGE_DEFAULT = "ImageDefault"
AUTOMATIC_BY_PLATFORM = "AutomaticByPlatform"
AUTOMATIC_BY_OS = "AutomaticByOS"
MANUAL = "Manual"
class ResourceIdentityType(str, Enum):
"""
The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class StatusLevelTypes(str, Enum):
"""
The level code.
"""
INFO = "Info"
WARNING = "Warning"
ERROR = "Error"
|
973e4862a1c54da4efa6d5a10d8fe4adf9fb581a
|
26cadb387da6dc71f5536b9d74ad44b7b974d26d
|
/launch_xml/test/launch_xml/test_include.py
|
9fc23fc2e4a05ea6f915783e7573f4e1dddd686a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ros2/launch
|
84971e86f6131976bdfaf872fca12f1a6a377cd6
|
f2b232555900d62c3cec839a49afd4cdc01cda58
|
refs/heads/rolling
| 2023-08-24T14:33:18.237122
| 2023-08-23T17:12:30
| 2023-08-23T17:12:30
| 32,485,326
| 116
| 139
|
Apache-2.0
| 2023-09-14T12:07:30
| 2015-03-18T21:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
test_include.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test parsing an include action."""
import io
from pathlib import Path
import textwrap
from launch import LaunchService
from launch.actions import IncludeLaunchDescription
from launch.frontend import Parser
from launch.launch_description_sources import AnyLaunchDescriptionSource
def test_include():
"""Parse node xml example."""
# Always use posix style paths in launch XML files.
path = (Path(__file__).parent / 'executable.xml').as_posix()
xml_file = \
"""\
<launch>
<include file="{}"/>
</launch>
""".format(path) # noqa: E501
xml_file = textwrap.dedent(xml_file)
root_entity, parser = Parser.load(io.StringIO(xml_file))
ld = parser.parse_description(root_entity)
include = ld.entities[0]
assert isinstance(include, IncludeLaunchDescription)
assert isinstance(include.launch_description_source, AnyLaunchDescriptionSource)
ls = LaunchService(debug=True)
ls.include_launch_description(ld)
assert 0 == ls.run()
if __name__ == '__main__':
test_include()
|
e963ee93e3436e24020a235f4f07d25e6723f2df
|
3afe7348e830a0c5139fb7cf393736e18b59ab4a
|
/src/clusterfuzz/_internal/tests/core/base/bisection_test.py
|
04190fc5c4981965f47476ff0fbc66bada723be9
|
[
"Apache-2.0"
] |
permissive
|
google/clusterfuzz
|
00845899e081dbbb89b70a75ce0b7eba3da73b02
|
6501a839b27a264500244f32bace8bee4d5cb9a2
|
refs/heads/master
| 2023-09-03T17:34:17.821599
| 2023-09-01T16:11:51
| 2023-09-01T16:11:51
| 168,060,021
| 5,420
| 639
|
Apache-2.0
| 2023-09-13T16:40:54
| 2019-01-29T00:19:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,933
|
py
|
bisection_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bisection."""
import datetime
import unittest
from unittest import mock
from clusterfuzz._internal.base import bisection
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import mock_config
from clusterfuzz._internal.tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class RequestBisectionTest(unittest.TestCase):
"""Tests request_bisection."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'clusterfuzz._internal.build_management.build_manager.get_primary_bucket_path',
'clusterfuzz._internal.build_management.build_manager.get_revisions_list',
'clusterfuzz._internal.build_management.revisions.get_component_range_list',
'clusterfuzz._internal.config.local_config.ProjectConfig',
'clusterfuzz._internal.google_cloud_utils.blobs.read_key',
'clusterfuzz._internal.google_cloud_utils.pubsub.PubSubClient.publish',
])
self.mock.ProjectConfig.return_value = mock_config.MockConfig({
'env': {
'PROJECT_NAME': 'test-project',
},
'bisect_service': {
'pubsub_topic': '/projects/project/topics/topic',
}
})
data_types.FuzzTarget(
id='libFuzzer_proj_target',
engine='libFuzzer',
project='proj',
binary='target').put()
self.testcase = data_types.Testcase(
timestamp=datetime.datetime(2021, 1, 1),
crash_type='crash-type',
crash_state='A\nB\nC',
security_flag=True,
bug_information='1337',
job_type='libfuzzer_asan_proj',
fuzzer_name='libFuzzer',
overridden_fuzzer_name='libFuzzer_proj_target',
regression='123:456',
fixed='123:456',
crash_revision=3,
security_severity=data_types.SecuritySeverity.MEDIUM,
additional_metadata='{"last_tested_crash_revision": 4}')
self.testcase.put()
data_types.Job(
name='libfuzzer_asan_proj',
environment_string='MAIN_REPO = https://repo_url').put()
self.mock.read_key.return_value = b'reproducer'
self.mock.get_component_range_list.return_value = [
{
'link_text': 'old:new',
},
]
def _test(self, sanitizer, old_commit='old', new_commit='new', repo_url=''):
"""Test task publication."""
bisection.request_bisection(self.testcase)
publish_calls = self.mock.publish.call_args_list
bisect_types = ('regressed', 'fixed')
self.assertEqual(2, len(publish_calls))
for bisect_type, publish_call in zip(bisect_types, publish_calls):
topic = publish_call[0][1]
message = publish_call[0][2][0]
self.assertEqual('/projects/project/topics/topic', topic)
self.assertEqual(b'reproducer', message.data)
self.assertDictEqual({
'crash_state': 'A\nB\nC',
'crash_type': 'crash-type',
'security': 'True',
'severity': 'Medium',
'fuzz_target': 'target',
'new_commit': new_commit,
'old_commit': old_commit,
'project_name': 'proj',
'repo_url': repo_url,
'sanitizer': sanitizer,
'testcase_id': '1',
'issue_id': '1337',
'type': bisect_type,
'timestamp': '2021-01-01T00:00:00',
}, message.attributes)
testcase = self.testcase.key.get()
self.assertTrue(testcase.get_metadata('requested_regressed_bisect'))
self.assertTrue(testcase.get_metadata('requested_fixed_bisect'))
def test_request_bisection_asan(self):
"""Basic regressed test (asan)."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.put()
self._test('address', repo_url='https://repo_url')
def test_request_bisection_msan(self):
"""Basic regressed test (asan)."""
self.testcase.job_type = 'libfuzzer_msan_proj'
self.testcase.put()
self._test('memory')
def test_request_bisection_ubsan(self):
"""Basic regressed test (ubsan)."""
self.testcase.job_type = 'libfuzzer_ubsan_proj'
self.testcase.put()
self._test('undefined')
def test_request_bisection_blackbox(self):
"""Test request bisection for blackbox."""
self.testcase.job_type = 'blackbox'
self.testcase.overridden_fuzzer_name = None
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_non_security(self):
"""Test request bisection for non-security testcases."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.security_flag = False
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_flaky(self):
"""Test request bisection for flaky testcases."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.one_time_crasher_flag = True
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_no_bug(self):
"""Test request bisection for testcases with no bug attached."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.bug_information = ''
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_invalid_range(self):
"""Test request bisection for testcases with no bug attached."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.regression = 'NA'
self.testcase.fixed = 'NA'
self.testcase.put()
bisection.request_bisection(self.testcase)
publish_calls = self.mock.publish.call_args_list
self.assertEqual(1, len(publish_calls))
publish_call = publish_calls[0]
topic = publish_call[0][1]
message = publish_call[0][2][0]
self.assertEqual('/projects/project/topics/topic', topic)
self.assertEqual(b'', message.data)
self.assertDictEqual({
'testcase_id': '1',
'type': 'invalid',
}, message.attributes)
def test_request_bisection_once_only(self):
"""Test request bisection for testcases isn't repeated if already
requested."""
self.testcase.set_metadata('requested_regressed_bisect', True)
self.testcase.set_metadata('requested_fixed_bisect', True)
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_single_commit_range(self):
"""Request bisection with a single commit (invalid range)."""
self.mock.get_primary_bucket_path.return_value = 'bucket'
self.mock.get_revisions_list.return_value = list(range(6))
self.mock.get_component_range_list.return_value = [
{
'link_text': 'one',
},
]
bisection.request_bisection(self.testcase)
self._test(
'address',
old_commit='one',
new_commit='one',
repo_url='https://repo_url')
self.mock.get_component_range_list.assert_has_calls([
mock.call(123, 456, 'libfuzzer_asan_proj'),
mock.call(0, 3, 'libfuzzer_asan_proj'),
mock.call(123, 456, 'libfuzzer_asan_proj'),
mock.call(4, 5, 'libfuzzer_asan_proj'),
])
|
30719bb57fc34191a821577be22da73f4d605278
|
c8f7773f80acf75345af37c67f0d925cf0234118
|
/python_prototype/tir4.py
|
aa17a3e13b50fc125322b54e4b324ca31a203b08
|
[
"MIT"
] |
permissive
|
uglyDwarf/linuxtrack
|
14a8854b826d57fa28ca276ec6ba5c54a1ddaa31
|
fe9b98b51f6ee6521d38bd1f7edf84839227e588
|
refs/heads/master
| 2023-05-25T02:05:04.332165
| 2023-02-22T20:20:35
| 2023-02-22T20:20:35
| 39,029,490
| 156
| 35
|
MIT
| 2023-05-19T07:27:16
| 2015-07-13T18:21:31
|
C
|
UTF-8
|
Python
| false
| false
| 19,721
|
py
|
tir4.py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
##bc##################################################################
## (C) Copyright 2009, All Rights Reserved.
##
## Name : tir4.py
## Author : DT Austin
## Created : 07/02/2009
## SVN date : $Date$
##
######################################################################
## Description: python device driver for the TIR4 device
##ec##################################################################
import sys
import time
try:
import usb
except:
print("ERROR: python lib USB missing!")
sys.exit(1)
try:
import bulk_config_data
except:
print("ERROR: bulk_config_data missing!")
sys.exit(1)
# public Constants
TIR_VENDOR_ID = 0x131d
TIR_PRODUCT_ID = 0x0156
CROPPED_NUM_VLINES = 288
CROPPED_NUM_HPIX = 710
RAW_NUM_VLINES = 512
RAW_NUM_HPIX = 1024
FRAME_QUEUE_MAX_DEPTH = 2
# #define v4l2_fourcc(a,b,c,d) (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
# #define V4L2_PIX_FMT_TIR4 v4l2_fourcc('T','I','R','4') /* TIR4 compress */
# #define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */
V4L2_PIX_FMT_TIR4 = "TIR4"
V4L2_PIX_FMT_GREY = "RGB1"
# private Constants
NOP_MSGLEN = 0XEF
TBD0_MSGLEN = 0X07
TBD0_MSGID = 0X20
VALID_MIN_MSGLEN = 0x02
VALID_MAX_MSGLEN = 0x3E
VALID_MSGID = 0x1c
DEVICE_STRIPE_LEN = 4
VSYNC_DEVICE_STRIPE = (0x00, 0x00, 0x00, 0x00)
STRIPE_LEN = 3
TIR_INTERFACE_ID = 0
TIR_BULK_IN_EP = 0x82
TIR_BULK_OUT_EP = 0x01
TIR_CONFIGURATION = 0x01
TIR_ALTINTERFACE = 0x00
LINE_NUM_0X100_BIT_MASK = 0X20
START_PIX_0X100_BIT_MASK = 0X80
START_PIX_0X200_BIT_MASK = 0X10
STOP_PIX_0X100_BIT_MASK = 0X40
STOP_PIX_0X200_BIT_MASK = 0X08
BULK_READ_SIZE = 0X4000
BULK_READ_TIMEOUT = 20 # milliseconds
BULK_WRITE_TIMEOUT = 1000 # milliseconds
TIR_LED_MSGID = 0x10
TIR_IR_LED_BIT_MASK = 0x80
TIR_GREEN_LED_BIT_MASK = 0x20
TIR_RED_LED_BIT_MASK = 0x10
TIR_BLUE_LED_BIT_MASK = 0x40
TIR_ALL_LED_BIT_MASK = TIR_IR_LED_BIT_MASK | TIR_GREEN_LED_BIT_MASK | TIR_RED_LED_BIT_MASK | TIR_BLUE_LED_BIT_MASK
READ_DISCONNECT_TIMEOUT = 2 # seconds
# private Static members
vline_offset = 12
hpix_offset = 80
crop_frames = True
# public data structures
class Enumeration(object):
def __init__(self, names):
for number, name in enumerate(names):
setattr(self, name, number)
TIR4EXCEPTION_ENUM = Enumeration(("USB_LIST_FAILED",
"FIND_DEVICE_FAILED",
"CREATE_HANDLE_FAILED",
"CLAIM_FAILED",
"DISCONNECT",
"UNKNOWN_READ_ERROR",
"UNKNOWN_PACKET"))
class TIR4Exception(Exception):
def __init__(self, ID):
self.args = (ID,)
class TIR4Control(object):
def __init__(self):
self.do_reset()
def do_reset(self):
self.readbyteq = ByteQueue()
self.writebyteq = ByteQueue()
self.mp = MessageProcessor()
self.init_step_5percent_callback = None
self.device_notpresent_callback = None
self.on_read_disconnect_watch = False
self.first_read_missing_timestamp = 0
def is_device_present(self):
return (self.find_device() != None)
def find_device(self):
buses = usb.busses()
if not(buses):
raise TIR4Exception(TIR4EXCEPTION_ENUM.USB_LIST_FAILED)
for bus in buses:
for device in bus.devices:
if device.idVendor == TIR_VENDOR_ID:
if device.idProduct == TIR_PRODUCT_ID:
return device
return None
def do_full_init(self):
self.do_init_step_start()
while not self.is_init_step_done():
self.do_init_step()
def do_init_step_start(self):
self.device = self.find_device()
if not(self.device):
raise TIR4Exception(TIR4EXCEPTION_ENUM.FIND_DEVICE_FAILED)
self.device_handle = self.device.open()
if not(self.device_handle):
raise TIR4Exception(TIR4EXCEPTION_ENUM.CREATE_HANDLE_FAILED)
try:
self.device_handle.claimInterface(TIR_INTERFACE_ID)
except usb.USBError:
raise TIR4Exception(TIR4EXCEPTION_ENUM.CLAIM_FAILED)
self.device_handle.setAltInterface(TIR_ALTINTERFACE)
self.desc = self.device_handle.getDescriptor(0x0000002, # type
0x0000000, # index
0x0000009) # length
self.device_handle.releaseInterface()
self.device_handle.setConfiguration(TIR_CONFIGURATION)
self.device_handle.claimInterface(TIR_INTERFACE_ID)
self.device_handle.setAltInterface(TIR_ALTINTERFACE)
self.bulk_config_len = len(bulk_config_data.bulk_config)
self.bulk_config_index = 0
self.five_percent_current_thresh = 5
def do_init_step(self):
if not(self.is_init_step_done()):
packet = bulk_config_data.bulk_config[self.bulk_config_index]
self.nq_write_usb(packet)
self.bulk_config_index += 1
if self.init_step_5percent_callback != None:
if self.get_init_step_percent_complete() > self.five_percent_current_thresh:
self.five_percent_current_thresh += 5
self.init_step_5percent_callback()
def is_init_step_done(self):
return (self.bulk_config_len == self.bulk_config_index)
def get_init_step_percent_complete(self):
return 100.0*self.bulk_config_index/self.bulk_config_len
def set_init_step_5percent_callback(self,func):
self.init_step_5percent_callback = func
def do_read_usb(self):
try:
readbytes = self.device_handle.bulkRead(TIR_BULK_IN_EP,
BULK_READ_SIZE,
BULK_READ_TIMEOUT)
self.readbyteq.append_bytes(readbytes)
self.on_read_disconnect_watch = False
except usb.USBError, errorcode:
if errorcode.args == ('No error',):
if self.on_read_disconnect_watch:
if (time.clock()-self.first_read_missing_timestamp) > READ_DISCONNECT_TIMEOUT:
raise TIR4Exception(TIR4EXCEPTION_ENUM.DISCONNECT)
else:
pass # continue on
else:
self.first_read_missing_timestamp = time.clock()
self.on_read_disconnect_watch = True
elif errorcode.args == ('error reaping URB: No such device',):
raise TIR4Exception(TIR4EXCEPTION_ENUM.DISCONNECT)
else:
print errorcode.args
raise TIR4Exception(TIR4EXCEPTION_ENUM.UNKNOWN_READ_ERROR)
def do_write_usb_queued(self):
bytes_written = self.device_handle.bulkWrite(TIR_BULK_OUT_EP,
self.writebyteq.peek_bytes(),
BULK_WRITE_TIMEOUT)
self.writebyteq.drop_bytes(bytes_written)
def nq_write_usb(self, buf):
self.writebyteq.append_bytes(buf)
self.do_write_usb_queued()
def process_readbyteq(self):
self.mp.add_bytes(self.readbyteq.pop_bytes())
def peek_frames(self):
return self.mp.get_frameq().peek_frames()
def is_frame_available(self):
return not(self.mp.get_frameq().is_empty())
def pop_frame(self):
return self.mp.get_frameq().pop()
def set_vline_offset(self, offset):
global vline_offset
vline_offset = offset
def set_hpix_offset(self, offset):
global hpix_offset
hpix_offset = offset
def get_vline_offset(self):
global vline_offset
return vline_offset
def get_hpix_offset(self):
global hpix_offset
return hpix_offset
def init_leds(self):
self.set_all_led_off()
def set_all_led_off(self):
self.set_led_worker(0,
TIR_ALL_LED_BIT_MASK)
self.ir_led_on = False
self.green_led_on = False
self.red_led_on = False
self.blue_led_on = False
def set_ir_led_on(self, arg):
if arg:
cmd = TIR_IR_LED_BIT_MASK
else:
cmd = 0
self.set_led_worker(cmd,
TIR_IR_LED_BIT_MASK)
self.ir_led_on = arg
def set_green_led_on(self, arg):
if arg:
cmd = TIR_GREEN_LED_BIT_MASK
else:
cmd = 0
self.set_led_worker(cmd,
TIR_GREEN_LED_BIT_MASK)
self.green_led_on = arg
def set_red_led_on(self, arg):
if arg:
cmd = TIR_RED_LED_BIT_MASK
else:
cmd = 0
self.set_led_worker(cmd,
TIR_RED_LED_BIT_MASK)
self.red_led_on = arg
def set_blue_led_on(self, arg):
if arg:
cmd = TIR_BLUE_LED_BIT_MASK
else:
cmd = 0
self.set_led_worker(cmd,
TIR_BLUE_LED_BIT_MASK)
self.blue_led_on = arg
def is_ir_led_on(self, arg):
return self.ir_led_on
def is_green_led_on(self, arg):
return self.green_led_on
def is_red_led_on(self, arg):
return self.red_led_on
def is_blue_led_on(self, arg):
return self.blue_led_on
def set_led_worker(self, cmd, mask):
self.nq_write_usb((TIR_LED_MSGID,
cmd,
mask))
def set_device_notpresent_callback(self, func):
self.device_notpresent_callback = func
def set_crop_frames(self, arg):
global crop_frames
crop_frames = arg
def is_crop_frames(self):
global crop_frames
return crop_frames
def trim(self):
self.mp.trim()
def set_frame_format(self):
#TBD
pass
def get_frame_format(self):
#TBD
pass
# stripes must be added in vline sorted order!
class Blob(list):
def __init__(self,stripe=None):
list.__init__(self)
self.area = 0
if stripe != None:
self.append(stripe)
def __cmp__(self,other):
return cmp(other.get_area(), self.get_area())
def append(self, stripe):
list.append(self,stripe)
self.area += (stripe.hstop-stripe.hstart)
def extend(self, blob):
list.extend(self,blob)
self.area += blob.get_area()
def head(self):
return self[0]
def tail(self):
return self[len(self)-1]
def is_contact(self,arg_stripe):
for self_stripe in reversed(self):
if self_stripe.vline < arg_stripe.vline - 2:
return False
elif self_stripe.is_h_contact(arg_stripe):
return True
return False
def get_center_coords(self):
self.cum_line_area_product = 0
self.cum_2x_hcenter_area_product = 0
for stripe in self:
area = (stripe.hstop-stripe.hstart)
self.cum_line_area_product += stripe.vline*area
self.cum_2x_hcenter_area_product += (stripe.hstop+stripe.hstart)*area
if self.area > 0:
self.vcenter = 1.0*self.cum_line_area_product / self.area
self.hcenter = 1.0*self.cum_2x_hcenter_area_product / self.area / 2
else:
self.vcenter = 1.0*self.cum_line_area_product / (self.area+0.001)
self.hcenter = 1.0*self.cum_2x_hcenter_area_product / (self.area+0.001) / 2
return (self.vcenter, self.hcenter)
def get_area(self):
return self.area
def __str__(self):
returnstr = ""
returnstr += "------ Blob Start ------\n"
for stripe in self:
returnstr += " " + str(stripe) + "\n"
returnstr += "------ Blob End ------\n"
return returnstr
class Frame(list):
def __init__(self,device_stripes=None):
list.__init__(self)
if device_stripes:
for device_stripe in device_stripes:
stripe = device_stripe.do_xlate_device_stripe()
self.append(stripe)
def __str__(self):
returnstr = ""
returnstr += "------ Frame Start ------\n"
for stripe in self:
returnstr += " " + str(stripe) + "\n"
returnstr += "------ Frame End ------\n"
return returnstr
def find_blobs(self):
# fix the ^ case
open_blobs = []
closed_blobs = []
for stripe in self:
# find open blobs that match this stripe's vline-1,
# if they don't close them
for blob in open_blobs:
if blob.tail().vline < stripe.vline - 2:
closed_blobs.append(blob)
open_blobs.remove(blob)
blob_contact_indices = []
for i, blob in zip(range(len(open_blobs)),open_blobs):
if blob.is_contact(stripe):
blob_contact_indices.append(i)
if len(blob_contact_indices) == 0:
newblob = Blob(stripe)
open_blobs.append(newblob)
else:
condensed_blob = Blob()
newblobs = []
for i, blob in zip(range(len(open_blobs)),open_blobs):
if i in blob_contact_indices:
condensed_blob.extend(blob)
else:
newblobs.append(blob)
condensed_blob.append(stripe)
newblobs.append(condensed_blob)
open_blobs = newblobs
result = closed_blobs + open_blobs
result.sort(cmp=lambda x,y: x.__cmp__(y))
return result
class FrameQueue(object):
def __init__(self,frames = None):
if frames:
self.frames = frames
else:
self.frames = []
def pop(self):
return self.frames.pop(0)
def append_frame(self,frame):
self.frames.append(frame)
def append_frames(self,frames):
self.frames += frames
def is_empty(self):
return (len(self.frames) == 0)
def peek_frames(self):
return self.frames
def trim(self):
if len(self.frames) > FRAME_QUEUE_MAX_DEPTH:
self.frames = self.frames[0:FRAME_QUEUE_MAX_DEPTH]
# private data structures
class ByteQueue(object):
def __init__(self, bytes = None):
if bytes:
self.bytes = list(bytes)
else:
self.bytes = []
def pop(self):
return self.bytes.pop(0)
def drop_bytes(self,num):
self.bytes=self.bytes[num:len(self.bytes)]
def pop_bytes(self):
returnval = self.bytes
self.bytes = []
return returnval
def append(self,byte):
self.bytes.append(byte)
def append_bytes(self,bytes):
self.bytes += bytes
def peek_bytes(self):
return self.bytes
def peek_2bytes(self):
return (self.bytes[0], self.bytes[1])
def __len__(self):
return len(self.bytes)
class DeviceStripe(object):
def __init__(self,bytes):
self.bytes = bytes
if len(self.bytes) != DEVICE_STRIPE_LEN:
raise TIR4Exception("ERROR: Attempt to create a device stripe of an invalid length")
def is_vsync(self):
for selfbyte,vsync_byte in zip(self.bytes, VSYNC_DEVICE_STRIPE):
if selfbyte != vsync_byte:
return False
return True
def do_xlate_device_stripe(self):
X=self.bytes[0]
Y=self.bytes[1]
Z=self.bytes[2]
W=self.bytes[3]
line_num = X
line_num_0x100_bit = W & 0x20
if line_num_0x100_bit != 0:
line_num += 0x100
start_pix = Y
start_pix_0x100_bit = W & 0x80
start_pix_0x200_bit = W & 0x10
if start_pix_0x200_bit:
start_pix += 0x200
if start_pix_0x100_bit:
start_pix += 0x100
stop_pix = Z
stop_pix_0x100_bit = W & 0x40
stop_pix_0x200_bit = W & 0x08
if stop_pix_0x200_bit:
stop_pix += 0x200
if stop_pix_0x100_bit:
stop_pix += 0x100
return Stripe((line_num, start_pix, stop_pix))
class Stripe(object):
def __init__(self,init):
if len(init) != STRIPE_LEN:
raise TIR4Exception("ERROR: Attempt to create a stripe of an invalid length")
self.vline = init[0]
self.hstart = init[1]
self.hstop = init[2]
global crop_frames
if crop_frames:
self.do_crop()
def do_crop(self):
global vline_offset
global hpix_offset
self.vline -= vline_offset
if self.vline < 0:
self.v = 0
if self.vline >= CROPPED_NUM_VLINES:
self.vline = CROPPED_NUM_VLINES-1
self.hstart -= hpix_offset
if self.hstart < 0:
self.hstart = 0
if self.hstart >= CROPPED_NUM_HPIX:
self.hstart = CROPPED_NUM_HPIX-1
self.hstop -= hpix_offset
if self.hstop < 0:
self.hstop = 0
if self.hstop >= CROPPED_NUM_HPIX:
self.hstop = CROPPED_NUM_HPIX-1
def is_h_contact(self,stripe):
# tests if this stripe overlaps the argument
# in the h-axis. Vertical overlap not tested!
# note: overlap is true if they share a single common
# pixel
if self.hstop < stripe.hstart:
return False
elif self.hstart > stripe.hstop:
return False
else:
return True
def __str__(self):
returnstr = ""
# returnstr += "(0x%03x, 0x%03x, 0x%03x)" % (self.vline,
# self.hstart,
# self.hstop)
# returnstr += " aka "
returnstr += "(%03d, %04d, %04d)" % (self.vline,
self.hstart,
self.hstop)
return returnstr
# turns raw usb reads into TIR4 native format frames
class MessageProcessor(object):
def __init__(self):
self.inbyteq = ByteQueue()
self.outframeq = FrameQueue()
self.pending_frame = Frame()
self.msglen = 0
self.msgid = -1
self.msgcnt = 0
self.updating = False
self.state_enum = Enumeration(("AWAITING_HEADER",
"PROCESSING_MSG",
"CHOMPING_MSG"))
self.state = self.state_enum.AWAITING_HEADER
def add_bytes(self, bytes):
self.inbyteq.append_bytes(bytes)
self.updating = True
while self.updating:
self.process_pending_bytes()
def process_pending_bytes(self):
if self.state == self.state_enum.AWAITING_HEADER:
if len(self.inbyteq) > VALID_MIN_MSGLEN:
(self.msglen,self.msgid) = self.inbyteq.peek_2bytes()
if self.msglen == NOP_MSGLEN:
# chomp and continue AWAITING_HEADER
self.inbyteq.drop_bytes(VALID_MIN_MSGLEN)
elif (self.msglen <= VALID_MAX_MSGLEN and
self.msglen >= VALID_MIN_MSGLEN and
self.msgid == VALID_MSGID):
self.msgcnt = VALID_MIN_MSGLEN
self.inbyteq.drop_bytes(VALID_MIN_MSGLEN)
self.state = self.state_enum.PROCESSING_MSG
elif self.msglen == TBD0_MSGLEN and self.msgid == TBD0_MSGID:
self.msgcnt = VALID_MIN_MSGLEN
self.inbyteq.drop_bytes(VALID_MIN_MSGLEN)
self.state = self.state_enum.CHOMPING_MSG
else:
# maybe we're off by one?
# drop one and try again
print "Warning READERR: 0x%02x" % self.msglen
self.inbyteq.pop()
else:
self.updating = False
elif self.state == self.state_enum.PROCESSING_MSG:
if len(self.inbyteq) < DEVICE_STRIPE_LEN:
self.updating = False
elif self.msgcnt >= self.msglen:
self.state = self.state_enum.AWAITING_HEADER
else:
ds = DeviceStripe((self.inbyteq.pop(),
self.inbyteq.pop(),
self.inbyteq.pop(),
self.inbyteq.pop()))
self.add_device_stripe(ds)
self.msgcnt += 4
elif self.state == self.state_enum.CHOMPING_MSG:
if len(self.inbyteq) == 0:
self.updating = False
elif self.msgcnt >= self.msglen:
self.state = self.state_enum.AWAITING_HEADER
else:
byte = self.inbyteq.pop()
self.msgcnt += 1
else:
self.updating = False
def add_device_stripe(self,device_stripe):
if device_stripe.is_vsync():
self.outframeq.append_frame(self.pending_frame)
self.pending_frame = Frame()
else:
txs = device_stripe.do_xlate_device_stripe()
self.pending_frame.append(txs)
def get_frameq(self):
return self.outframeq
def trim(self):
self.outframeq.trim()
def runtests(argv=None):
t4 = TIR4Control()
print "t4.is_device_present(): ", t4.is_device_present()
if not(t4.is_device_present()):
sys.exit()
t4.do_full_init()
t4.set_all_led_off()
t4.set_green_led_on(True)
t4.set_ir_led_on(True)
for i in range(0,10):
t4.do_read_usb()
t4.process_readbyteq()
if t4.is_frame_available():
frame = t4.pop_frame()
print frame
if __name__ == "__main__":
sys.exit(runtests(sys.argv))
|
40e9338e644bc04590fbe5fb08912f511f7ba059
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/networkx/algorithms/connectivity/edge_augmentation.py
|
a7f16aaa88538074a72405110553b138eedf7d3c
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 45,102
|
py
|
edge_augmentation.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Jon Crall (erotemic@gmail.com)
"""
Algorithms for finding k-edge-augmentations
A k-edge-augmentation is a set of edges, that once added to a graph, ensures
that the graph is k-edge-connected; i.e. the graph cannot be disconnected
unless k or more edges are removed. Typically, the goal is to find the
augmentation with minimum weight. In general, it is not guaranteed that a
k-edge-augmentation exists.
See Also
--------
:mod:`edge_kcomponents` : algorithms for finding k-edge-connected components
:mod:`connectivity` : algorithms for determening edge connectivity.
"""
import math
import sys
import itertools as it
import networkx as nx
from networkx.utils import not_implemented_for, py_random_state
from collections import defaultdict, namedtuple
__all__ = [
'k_edge_augmentation',
'is_k_edge_connected',
'is_locally_k_edge_connected',
]
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def is_k_edge_connected(G, k):
"""Tests to see if a graph is k-edge-connected.
Is it impossible to disconnect the graph by removing fewer than k edges?
If so, then G is k-edge-connected.
Parameters
----------
G : NetworkX graph
An undirected graph.
k : integer
edge connectivity to test for
Returns
-------
boolean
True if G is k-edge-connected.
See Also
--------
:func:`is_locally_k_edge_connected`
Example
-------
>>> G = nx.barbell_graph(10, 0)
>>> nx.is_k_edge_connected(G, k=1)
True
>>> nx.is_k_edge_connected(G, k=2)
False
"""
if k < 1:
raise ValueError('k must be positive, not {}'.format(k))
# First try to quickly determine if G is not k-edge-connected
if G.number_of_nodes() < k + 1:
return False
elif any(d < k for n, d in G.degree()):
return False
else:
# Otherwise perform the full check
if k == 1:
return nx.is_connected(G)
elif k == 2:
return not nx.has_bridges(G)
else:
return nx.edge_connectivity(G, cutoff=k) >= k
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def is_locally_k_edge_connected(G, s, t, k):
"""Tests to see if an edge in a graph is locally k-edge-connected.
Is it impossible to disconnect s and t by removing fewer than k edges?
If so, then s and t are locally k-edge-connected in G.
Parameters
----------
G : NetworkX graph
An undirected graph.
s : node
Source node
t : node
Target node
k : integer
local edge connectivity for nodes s and t
Returns
-------
boolean
True if s and t are locally k-edge-connected in G.
See Also
--------
:func:`is_k_edge_connected`
Example
-------
>>> from networkx.algorithms.connectivity import is_locally_k_edge_connected
>>> G = nx.barbell_graph(10, 0)
>>> is_locally_k_edge_connected(G, 5, 15, k=1)
True
>>> is_locally_k_edge_connected(G, 5, 15, k=2)
False
>>> is_locally_k_edge_connected(G, 1, 5, k=2)
True
"""
if k < 1:
raise ValueError('k must be positive, not {}'.format(k))
# First try to quickly determine s, t is not k-locally-edge-connected in G
if G.degree(s) < k or G.degree(t) < k:
return False
else:
# Otherwise perform the full check
if k == 1:
return nx.has_path(G, s, t)
else:
localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k)
return localk >= k
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def k_edge_augmentation(G, k, avail=None, weight=None, partial=False):
"""Finds set of edges to k-edge-connect G.
Adding edges from the augmentation to G make it impossible to disconnect G
unless k or more edges are removed. This function uses the most efficient
function available (depending on the value of k and if the problem is
weighted or unweighted) to search for a minimum weight subset of available
edges that k-edge-connects G. In general, finding a k-edge-augmentation is
NP-hard, so solutions are not garuenteed to be minimal. Furthermore, a
k-edge-augmentation may not exist.
Parameters
----------
G : NetworkX graph
An undirected graph.
k : integer
Desired edge connectivity
avail : dict or a set of 2 or 3 tuples
The available edges that can be used in the augmentation.
If unspecified, then all edges in the complement of G are available.
Otherwise, each item is an available edge (with an optional weight).
In the unweighted case, each item is an edge ``(u, v)``.
In the weighted case, each item is a 3-tuple ``(u, v, d)`` or a dict
with items ``(u, v): d``. The third item, ``d``, can be a dictionary
or a real number. If ``d`` is a dictionary ``d[weight]``
correspondings to the weight.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples where the
third item in each tuple is a dictionary.
partial : boolean
If partial is True and no feasible k-edge-augmentation exists, then all
a partial k-edge-augmentation is generated. Adding the edges in a
partial augmentation to G, minimizes the number of k-edge-connected
components and maximizes the edge connectivity between those
components. For details, see :func:`partial_k_edge_augmentation`.
Yields
------
edge : tuple
Edges that, once added to G, would cause G to become k-edge-connected.
If partial is False, an error is raised if this is not possible.
Otherwise, generated edges form a partial augmentation, which
k-edge-connects any part of G where it is possible, and maximally
connects the remaining parts.
Raises
------
NetworkXUnfeasible:
If partial is False and no k-edge-augmentation exists.
NetworkXNotImplemented:
If the input graph is directed or a multigraph.
ValueError:
If k is less than 1
Notes
-----
When k=1 this returns an optimal solution.
When k=2 and ``avail`` is None, this returns an optimal solution.
Otherwise when k=2, this returns a 2-approximation of the optimal solution.
For k>3, this problem is NP-hard and this uses a randomized algorithm that
produces a feasible solution, but provides no guarantees on the
solution weight.
Example
-------
>>> # Unweighted cases
>>> G = nx.path_graph((1, 2, 3, 4))
>>> G.add_node(5)
>>> sorted(nx.k_edge_augmentation(G, k=1))
[(1, 5)]
>>> sorted(nx.k_edge_augmentation(G, k=2))
[(1, 5), (5, 4)]
>>> sorted(nx.k_edge_augmentation(G, k=3))
[(1, 4), (1, 5), (2, 5), (3, 5), (4, 5)]
>>> complement = list(nx.k_edge_augmentation(G, k=5, partial=True))
>>> G.add_edges_from(complement)
>>> nx.edge_connectivity(G)
4
Example
-------
>>> # Weighted cases
>>> G = nx.path_graph((1, 2, 3, 4))
>>> G.add_node(5)
>>> # avail can be a tuple with a dict
>>> avail = [(1, 5, {'weight': 11}), (2, 5, {'weight': 10})]
>>> sorted(nx.k_edge_augmentation(G, k=1, avail=avail, weight='weight'))
[(2, 5)]
>>> # or avail can be a 3-tuple with a real number
>>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)]
>>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail))
[(1, 5), (2, 5), (4, 5)]
>>> # or avail can be a dict
>>> avail = {(1, 5): 11, (2, 5): 10, (4, 3): 1, (4, 5): 51}
>>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail))
[(1, 5), (2, 5), (4, 5)]
>>> # If augmentation is infeasible, then a partial solution can be found
>>> avail = {(1, 5): 11}
>>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True))
[(1, 5)]
"""
try:
if k <= 0:
raise ValueError('k must be a positive integer, not {}'.format(k))
elif G.number_of_nodes() < k + 1:
msg = 'impossible to {} connect in graph with less than {} nodes'
raise nx.NetworkXUnfeasible(msg.format(k, k + 1))
elif avail is not None and len(avail) == 0:
if not nx.is_k_edge_connected(G, k):
raise nx.NetworkXUnfeasible('no available edges')
aug_edges = []
elif k == 1:
aug_edges = one_edge_augmentation(G, avail=avail, weight=weight,
partial=partial)
elif k == 2:
aug_edges = bridge_augmentation(G, avail=avail, weight=weight)
else:
# raise NotImplementedError(
# 'not implemented for k>2. k={}'.format(k))
aug_edges = greedy_k_edge_augmentation(
G, k=k, avail=avail, weight=weight, seed=0)
# Do eager evaulation so we can catch any exceptions
# Before executing partial code.
for edge in list(aug_edges):
yield edge
except nx.NetworkXUnfeasible:
if partial:
# Return all available edges
if avail is None:
aug_edges = complement_edges(G)
else:
# If we can't k-edge-connect the entire graph, try to
# k-edge-connect as much as possible
aug_edges = partial_k_edge_augmentation(G, k=k, avail=avail,
weight=weight)
for edge in aug_edges:
yield edge
else:
raise
def partial_k_edge_augmentation(G, k, avail, weight=None):
"""Finds augmentation that k-edge-connects as much of the graph as possible.
When a k-edge-augmentation is not possible, we can still try to find a
small set of edges that partially k-edge-connects as much of the graph as
possible. All possible edges are generated between remaining parts.
This minimizes the number of k-edge-connected subgraphs in the resulting
graph and maxmizes the edge connectivity between those subgraphs.
Parameters
----------
G : NetworkX graph
An undirected graph.
k : integer
Desired edge connectivity
avail : dict or a set of 2 or 3 tuples
For more details, see :func:`k_edge_augmentation`.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples.
For more details, see :func:`k_edge_augmentation`.
Yields
------
edge : tuple
Edges in the partial augmentation of G. These edges k-edge-connect any
part of G where it is possible, and maximally connects the remaining
parts. In other words, all edges from avail are generated except for
those within subgraphs that have already become k-edge-connected.
Notes
-----
Construct H that augments G with all edges in avail.
Find the k-edge-subgraphs of H.
For each k-edge-subgraph, if the number of nodes is more than k, then find
the k-edge-augmentation of that graph and add it to the solution. Then add
all edges in avail between k-edge subgraphs to the solution.
See Also
--------
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7))
>>> G.add_node(8)
>>> avail = [(1, 3), (1, 4), (1, 5), (2, 4), (2, 5), (3, 5), (1, 8)]
>>> sorted(partial_k_edge_augmentation(G, k=2, avail=avail))
[(1, 5), (1, 8)]
"""
def _edges_between_disjoint(H, only1, only2):
""" finds edges between disjoint nodes """
only1_adj = {u: set(H.adj[u]) for u in only1}
for u, neighbs in only1_adj.items():
# Find the neighbors of u in only1 that are also in only2
neighbs12 = neighbs.intersection(only2)
for v in neighbs12:
yield (u, v)
avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
# Find which parts of the graph can be k-edge-connected
H = G.copy()
H.add_edges_from(
((u, v, {'weight': w, 'generator': (u, v)})
for (u, v), w in zip(avail, avail_w)))
k_edge_subgraphs = list(nx.k_edge_subgraphs(H, k=k))
# Generate edges to k-edge-connect internal subgraphs
for nodes in k_edge_subgraphs:
if len(nodes) > 1:
# Get the k-edge-connected subgraph
C = H.subgraph(nodes).copy()
# Find the internal edges that were available
sub_avail = {
d['generator']: d['weight']
for (u, v, d) in C.edges(data=True)
if 'generator' in d
}
# Remove potential augmenting edges
C.remove_edges_from(sub_avail.keys())
# Find a subset of these edges that makes the compoment
# k-edge-connected and ignore the rest
for edge in nx.k_edge_augmentation(C, k=k, avail=sub_avail):
yield edge
# Generate all edges between CCs that could not be k-edge-connected
for cc1, cc2 in it.combinations(k_edge_subgraphs, 2):
for (u, v) in _edges_between_disjoint(H, cc1, cc2):
d = H.get_edge_data(u, v)
edge = d.get('generator', None)
if edge is not None:
yield edge
@not_implemented_for('multigraph')
@not_implemented_for('directed')
def one_edge_augmentation(G, avail=None, weight=None, partial=False):
"""Finds minimum weight set of edges to connect G.
Equivalent to :func:`k_edge_augmentation` when k=1. Adding the resulting
edges to G will make it 1-edge-connected. The solution is optimal for both
weighted and non-weighted variants.
Parameters
----------
G : NetworkX graph
An undirected graph.
avail : dict or a set of 2 or 3 tuples
For more details, see :func:`k_edge_augmentation`.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples.
For more details, see :func:`k_edge_augmentation`.
partial : boolean
If partial is True and no feasible k-edge-augmentation exists, then the
augmenting edges minimize the number of connected components.
Yields
------
edge : tuple
Edges in the one-augmentation of G
Raises
------
NetworkXUnfeasible:
If partial is False and no one-edge-augmentation exists.
Notes
-----
Uses either :func:`unconstrained_one_edge_augmentation` or
:func:`weighted_one_edge_augmentation` depending on whether ``avail`` is
specified. Both algorithms are based on finding a minimum spanning tree.
As such both algorithms find optimal solutions and run in linear time.
See Also
--------
:func:`k_edge_augmentation`
"""
if avail is None:
return unconstrained_one_edge_augmentation(G)
else:
return weighted_one_edge_augmentation(G, avail=avail, weight=weight,
partial=partial)
@not_implemented_for('multigraph')
@not_implemented_for('directed')
def bridge_augmentation(G, avail=None, weight=None):
"""Finds the a set of edges that bridge connects G.
Equivalent to :func:`k_edge_augmentation` when k=2, and partial=False.
Adding the resulting edges to G will make it 2-edge-connected. If no
constraints are specified the returned set of edges is minimum an optimal,
otherwise the solution is approximated.
Parameters
----------
G : NetworkX graph
An undirected graph.
avail : dict or a set of 2 or 3 tuples
For more details, see :func:`k_edge_augmentation`.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples.
For more details, see :func:`k_edge_augmentation`.
Yields
------
edge : tuple
Edges in the bridge-augmentation of G
Raises
------
NetworkXUnfeasible:
If no bridge-augmentation exists.
Notes
-----
If there are no constraints the solution can be computed in linear time
using :func:`unconstrained_bridge_augmentation`. Otherwise, the problem
becomes NP-hard and is the solution is approximated by
:func:`weighted_bridge_augmentation`.
See Also
--------
:func:`k_edge_augmentation`
"""
if G.number_of_nodes() < 3:
raise nx.NetworkXUnfeasible(
'impossible to bridge connect less than 3 nodes')
if avail is None:
return unconstrained_bridge_augmentation(G)
else:
return weighted_bridge_augmentation(G, avail, weight=weight)
# --- Algorithms and Helpers ---
def _ordered(u, v):
"""Returns the nodes in an undirected edge in lower-triangular order"""
return (u, v) if u < v else (v, u)
def _unpack_available_edges(avail, weight=None, G=None):
"""Helper to separate avail into edges and corresponding weights"""
if weight is None:
weight = 'weight'
if isinstance(avail, dict):
avail_uv = list(avail.keys())
avail_w = list(avail.values())
else:
def _try_getitem(d):
try:
return d[weight]
except TypeError:
return d
avail_uv = [tup[0:2] for tup in avail]
avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1])
for tup in avail]
if G is not None:
# Edges already in the graph are filtered
flags = [not G.has_edge(u, v) for u, v in avail_uv]
avail_uv = list(it.compress(avail_uv, flags))
avail_w = list(it.compress(avail_w, flags))
return avail_uv, avail_w
MetaEdge = namedtuple('MetaEdge', ('meta_uv', 'uv', 'w'))
def _lightest_meta_edges(mapping, avail_uv, avail_w):
"""Maps available edges in the original graph to edges in the metagraph.
Parameters
----------
mapping : dict
mapping produced by :func:`collapse`, that maps each node in the
original graph to a node in the meta graph
avail_uv : list
list of edges
avail_w : list
list of edge weights
Notes
-----
Each node in the metagraph is a k-edge-connected component in the original
graph. We don't care about any edge within the same k-edge-connected
component, so we ignore self edges. We also are only intereseted in the
minimum weight edge bridging each k-edge-connected component so, we group
the edges by meta-edge and take the lightest in each group.
Example
-------
>>> # Each group represents a meta-node
>>> groups = ([1, 2, 3], [4, 5], [6])
>>> mapping = {n: meta_n for meta_n, ns in enumerate(groups) for n in ns}
>>> avail_uv = [(1, 2), (3, 6), (1, 4), (5, 2), (6, 1), (2, 6), (3, 1)]
>>> avail_w = [ 20, 99, 20, 15, 50, 99, 20]
>>> sorted(_lightest_meta_edges(mapping, avail_uv, avail_w))
[MetaEdge(meta_uv=(0, 1), uv=(5, 2), w=15), MetaEdge(meta_uv=(0, 2), uv=(6, 1), w=50)]
"""
grouped_wuv = defaultdict(list)
for w, (u, v) in zip(avail_w, avail_uv):
# Order the meta-edge so it can be used as a dict key
meta_uv = _ordered(mapping[u], mapping[v])
# Group each available edge using the meta-edge as a key
grouped_wuv[meta_uv].append((w, u, v))
# Now that all available edges are grouped, choose one per group
for (mu, mv), choices_wuv in grouped_wuv.items():
# Ignore available edges within the same meta-node
if mu != mv:
# Choose the lightest available edge belonging to each meta-edge
w, u, v = min(choices_wuv)
yield MetaEdge((mu, mv), (u, v), w)
def unconstrained_one_edge_augmentation(G):
"""Finds the smallest set of edges to connect G.
This is a variant of the unweighted MST problem.
If G is not empty, a feasible solution always exists.
Parameters
----------
G : NetworkX graph
An undirected graph.
Yields
------
edge : tuple
Edges in the one-edge-augmentation of G
See Also
--------
:func:`one_edge_augmentation`
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.Graph([(1, 2), (2, 3), (4, 5)])
>>> G.add_nodes_from([6, 7, 8])
>>> sorted(unconstrained_one_edge_augmentation(G))
[(1, 4), (4, 6), (6, 7), (7, 8)]
"""
ccs1 = list(nx.connected_components(G))
C = collapse(G, ccs1)
# When we are not constrained, we can just make a meta graph tree.
meta_nodes = list(C.nodes())
# build a path in the metagraph
meta_aug = list(zip(meta_nodes, meta_nodes[1:]))
# map that path to the original graph
inverse = defaultdict(list)
for k, v in C.graph['mapping'].items():
inverse[v].append(k)
for mu, mv in meta_aug:
yield (inverse[mu][0], inverse[mv][0])
def weighted_one_edge_augmentation(G, avail, weight=None, partial=False):
"""Finds the minimum weight set of edges to connect G if one exists.
This is a variant of the weighted MST problem.
Parameters
----------
G : NetworkX graph
An undirected graph.
avail : dict or a set of 2 or 3 tuples
For more details, see :func:`k_edge_augmentation`.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples.
For more details, see :func:`k_edge_augmentation`.
partial : boolean
If partial is True and no feasible k-edge-augmentation exists, then the
augmenting edges minimize the number of connected components.
Yields
------
edge : tuple
Edges in the subset of avail chosen to connect G.
See Also
--------
:func:`one_edge_augmentation`
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.Graph([(1, 2), (2, 3), (4, 5)])
>>> G.add_nodes_from([6, 7, 8])
>>> # any edge not in avail has an implicit weight of infinity
>>> avail = [(1, 3), (1, 5), (4, 7), (4, 8), (6, 1), (8, 1), (8, 2)]
>>> sorted(weighted_one_edge_augmentation(G, avail))
[(1, 5), (4, 7), (6, 1), (8, 1)]
>>> # find another solution by giving large weights to edges in the
>>> # previous solution (note some of the old edges must be used)
>>> avail = [(1, 3), (1, 5, 99), (4, 7, 9), (6, 1, 99), (8, 1, 99), (8, 2)]
>>> sorted(weighted_one_edge_augmentation(G, avail))
[(1, 5), (4, 7), (6, 1), (8, 2)]
"""
avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
# Collapse CCs in the original graph into nodes in a metagraph
# Then find an MST of the metagraph instead of the original graph
C = collapse(G, nx.connected_components(G))
mapping = C.graph['mapping']
# Assign each available edge to an edge in the metagraph
candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w)
# nx.set_edge_attributes(C, name='weight', values=0)
C.add_edges_from(
(mu, mv, {'weight': w, 'generator': uv})
for (mu, mv), uv, w in candidate_mapping
)
# Find MST of the meta graph
meta_mst = nx.minimum_spanning_tree(C)
if not partial and not nx.is_connected(meta_mst):
raise nx.NetworkXUnfeasible(
'Not possible to connect G with available edges')
# Yield the edge that generated the meta-edge
for mu, mv, d in meta_mst.edges(data=True):
if 'generator' in d:
edge = d['generator']
yield edge
def unconstrained_bridge_augmentation(G):
"""Finds an optimal 2-edge-augmentation of G using the fewest edges.
This is an implementation of the algorithm detailed in [1]_.
The basic idea is to construct a meta-graph of bridge-ccs, connect leaf
nodes of the trees to connect the entire graph, and finally connect the
leafs of the tree in dfs-preorder to bridge connect the entire graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Yields
------
edge : tuple
Edges in the bridge augmentation of G
Notes
-----
Input: a graph G.
First find the bridge components of G and collapse each bridge-cc into a
node of a metagraph graph C, which is guaranteed to be a forest of trees.
C contains p "leafs" --- nodes with exactly one incident edge.
C contains q "isolated nodes" --- nodes with no incident edges.
Theorem: If p + q > 1, then at least :math:`ceil(p / 2) + q` edges are
needed to bridge connect C. This algorithm achieves this min number.
The method first adds enough edges to make G into a tree and then pairs
leafs in a simple fashion.
Let n be the number of trees in C. Let v(i) be an isolated vertex in the
i-th tree if one exists, otherwise it is a pair of distinct leafs nodes
in the i-th tree. Alternating edges from these sets (i.e. adding edges
A1 = [(v(i)[0], v(i + 1)[1]), v(i + 1)[0], v(i + 2)[1])...]) connects C
into a tree T. This tree has p' = p + 2q - 2(n -1) leafs and no isolated
vertices. A1 has n - 1 edges. The next step finds ceil(p' / 2) edges to
biconnect any tree with p' leafs.
Convert T into an arborescence T' by picking an arbitrary root node with
degree >= 2 and directing all edges away from the root. Note the
implementation implicitly constructs T'.
The leafs of T are the nodes with no existing edges in T'.
Order the leafs of T' by DFS prorder. Then break this list in half
and add the zipped pairs to A2.
The set A = A1 + A2 is the minimum augmentation in the metagraph.
To convert this to edges in the original graph
References
----------
.. [1] Eswaran, Kapali P., and R. Endre Tarjan. (1975) Augmentation problems.
http://epubs.siam.org/doi/abs/10.1137/0205044
See Also
--------
:func:`bridge_augmentation`
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7))
>>> sorted(unconstrained_bridge_augmentation(G))
[(1, 7)]
>>> G = nx.path_graph((1, 2, 3, 2, 4, 5, 6, 7))
>>> sorted(unconstrained_bridge_augmentation(G))
[(1, 3), (3, 7)]
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2)])
>>> G.add_node(4)
>>> sorted(unconstrained_bridge_augmentation(G))
[(1, 4), (4, 0)]
"""
# -----
# Mapping of terms from (Eswaran and Tarjan):
# G = G_0 - the input graph
# C = G_0' - the bridge condensation of G. (This is a forest of trees)
# A1 = A_1 - the edges to connect the forest into a tree
# leaf = pendant - a node with degree of 1
# alpha(v) = maps the node v in G to its meta-node in C
# beta(x) = maps the meta-node x in C to any node in the bridge
# component of G corresponding to x.
# find the 2-edge-connected components of G
bridge_ccs = list(nx.connectivity.bridge_components(G))
# condense G into an forest C
C = collapse(G, bridge_ccs)
# Choose pairs of distinct leaf nodes in each tree. If this is not
# possible then make a pair using the single isolated node in the tree.
vset1 = [
tuple(cc) * 2 # case1: an isolated node
if len(cc) == 1 else
sorted(cc, key=C.degree)[0:2] # case2: pair of leaf nodes
for cc in nx.connected_components(C)
]
if len(vset1) > 1:
# Use this set to construct edges that connect C into a tree.
nodes1 = [vs[0] for vs in vset1]
nodes2 = [vs[1] for vs in vset1]
A1 = list(zip(nodes1[1:], nodes2))
else:
A1 = []
# Connect each tree in the forest to construct an arborescence
T = C.copy()
T.add_edges_from(A1)
# If there are only two leaf nodes, we simply connect them.
leafs = [n for n, d in T.degree() if d == 1]
if len(leafs) == 1:
A2 = []
if len(leafs) == 2:
A2 = [tuple(leafs)]
else:
# Choose an arbitrary non-leaf root
try:
root = next(n for n, d in T.degree() if d > 1)
except StopIteration: # no nodes found with degree > 1
return
# order the leaves of C by (induced directed) preorder
v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1]
# connecting first half of the leafs in pre-order to the second
# half will bridge connect the tree with the fewest edges.
half = int(math.ceil(len(v2) / 2.0))
A2 = list(zip(v2[:half], v2[-half:]))
# collect the edges used to augment the original forest
aug_tree_edges = A1 + A2
# Construct the mapping (beta) from meta-nodes to regular nodes
inverse = defaultdict(list)
for k, v in C.graph['mapping'].items():
inverse[v].append(k)
# sort so we choose minimum degree nodes first
inverse = {mu: sorted(mapped, key=lambda u: (G.degree(u), u))
for mu, mapped in inverse.items()}
# For each meta-edge, map back to an arbitrary pair in the original graph
G2 = G.copy()
for mu, mv in aug_tree_edges:
# Find the first available edge that doesn't exist and return it
for u, v in it.product(inverse[mu], inverse[mv]):
if not G2.has_edge(u, v):
G2.add_edge(u, v)
yield u, v
break
def weighted_bridge_augmentation(G, avail, weight=None):
"""Finds an approximate min-weight 2-edge-augmentation of G.
This is an implementation of the approximation algorithm detailed in [1]_.
It chooses a set of edges from avail to add to G that renders it
2-edge-connected if such a subset exists. This is done by finding a
minimum spanning arborescence of a specially constructed metagraph.
Parameters
----------
G : NetworkX graph
An undirected graph.
avail : set of 2 or 3 tuples.
candidate edges (with optional weights) to choose from
weight : string
key to use to find weights if avail is a set of 3-tuples where the
third item in each tuple is a dictionary.
Yields
------
edge : tuple
Edges in the subset of avail chosen to bridge augment G.
Notes
-----
Finding a weighted 2-edge-augmentation is NP-hard.
Any edge not in ``avail`` is considered to have a weight of infinity.
The approximation factor is 2 if ``G`` is connected and 3 if it is not.
Runs in :math:`O(m + n log(n))` time
References
----------
.. [1] Khuller, Samir, and Ramakrishna Thurimella. (1993) Approximation
algorithms for graph augmentation.
http://www.sciencedirect.com/science/article/pii/S0196677483710102
See Also
--------
:func:`bridge_augmentation`
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.path_graph((1, 2, 3, 4))
>>> # When the weights are equal, (1, 4) is the best
>>> avail = [(1, 4, 1), (1, 3, 1), (2, 4, 1)]
>>> sorted(weighted_bridge_augmentation(G, avail))
[(1, 4)]
>>> # Giving (1, 4) a high weight makes the two edge solution the best.
>>> avail = [(1, 4, 1000), (1, 3, 1), (2, 4, 1)]
>>> sorted(weighted_bridge_augmentation(G, avail))
[(1, 3), (2, 4)]
>>> #------
>>> G = nx.path_graph((1, 2, 3, 4))
>>> G.add_node(5)
>>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 1)]
>>> sorted(weighted_bridge_augmentation(G, avail=avail))
[(1, 5), (4, 5)]
>>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)]
>>> sorted(weighted_bridge_augmentation(G, avail=avail))
[(1, 5), (2, 5), (4, 5)]
"""
if weight is None:
weight = 'weight'
# If input G is not connected the approximation factor increases to 3
if not nx.is_connected(G):
H = G.copy()
connectors = list(one_edge_augmentation(H, avail=avail, weight=weight))
H.add_edges_from(connectors)
for edge in connectors:
yield edge
else:
connectors = []
H = G
if len(avail) == 0:
if nx.has_bridges(H):
raise nx.NetworkXUnfeasible('no augmentation possible')
avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H)
# Collapse input into a metagraph. Meta nodes are bridge-ccs
bridge_ccs = nx.connectivity.bridge_components(H)
C = collapse(H, bridge_ccs)
# Use the meta graph to shrink avail to a small feasible subset
mapping = C.graph['mapping']
# Choose the minimum weight feasible edge in each group
meta_to_wuv = {
(mu, mv): (w, uv)
for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w)
}
# Mapping of terms from (Khuller and Thurimella):
# C : G_0 = (V, E^0)
# This is the metagraph where each node is a 2-edge-cc in G.
# The edges in C represent bridges in the original graph.
# (mu, mv) : E - E^0 # they group both avail and given edges in E
# T : \Gamma
# D : G^D = (V, E_D)
# The paper uses ancestor because children point to parents, which is
# contrary to networkx standards. So, we actually need to run
# nx.least_common_ancestor on the reversed Tree.
# Pick an arbitrary leaf from C as the root
try:
root = next(n for n, d in C.degree() if d == 1)
except StopIteration: # no nodes found with degree == 1
return
# Root C into a tree TR by directing all edges away from the root
# Note in their paper T directs edges towards the root
TR = nx.dfs_tree(C, root)
# Add to D the directed edges of T and set their weight to zero
# This indicates that it costs nothing to use edges that were given.
D = nx.reverse(TR).copy()
nx.set_edge_attributes(D, name='weight', values=0)
# The LCA of mu and mv in T is the shared ancestor of mu and mv that is
# located farthest from the root.
lca_gen = nx.tree_all_pairs_lowest_common_ancestor(
TR, root=root, pairs=meta_to_wuv.keys())
for (mu, mv), lca in lca_gen:
w, uv = meta_to_wuv[(mu, mv)]
if lca == mu:
# If u is an ancestor of v in TR, then add edge u->v to D
D.add_edge(lca, mv, weight=w, generator=uv)
elif lca == mv:
# If v is an ancestor of u in TR, then add edge v->u to D
D.add_edge(lca, mu, weight=w, generator=uv)
else:
# If neither u nor v is a ancestor of the other in TR
# let t = lca(TR, u, v) and add edges t->u and t->v
# Track the original edge that GENERATED these edges.
D.add_edge(lca, mu, weight=w, generator=uv)
D.add_edge(lca, mv, weight=w, generator=uv)
# Then compute a minimum rooted branching
try:
# Note the original edges must be directed towards to root for the
# branching to give us a bridge-augmentation.
A = _minimum_rooted_branching(D, root)
except nx.NetworkXException:
# If there is no branching then augmentation is not possible
raise nx.NetworkXUnfeasible('no 2-edge-augmentation possible')
# For each edge e, in the branching that did not belong to the directed
# tree T, add the corresponding edge that **GENERATED** it (this is not
# necesarilly e itself!)
# ensure the third case does not generate edges twice
bridge_connectors = set()
for mu, mv in A.edges():
data = D.get_edge_data(mu, mv)
if 'generator' in data:
# Add the avail edge that generated the branching edge.
edge = data['generator']
bridge_connectors.add(edge)
for edge in bridge_connectors:
yield edge
def _minimum_rooted_branching(D, root):
"""Helper function to compute a minimum rooted branching (aka rooted
arborescence)
Before the branching can be computed, the directed graph must be rooted by
removing the predecessors of root.
A branching / arborescence of rooted graph G is a subgraph that contains a
directed path from the root to every other vertex. It is the directed
analog of the minimum spanning tree problem.
References
----------
[1] Khuller, Samir (2002) Advanced Algorithms Lecture 24 Notes.
https://www.cs.umd.edu/class/spring2011/cmsc651/lec07.pdf
"""
rooted = D.copy()
# root the graph by removing all predecessors to `root`.
rooted.remove_edges_from([(u, root) for u in D.predecessors(root)])
# Then compute the branching / arborescence.
A = nx.minimum_spanning_arborescence(rooted)
return A
def collapse(G, grouped_nodes):
"""Collapses each group of nodes into a single node.
This is similar to condensation, but works on undirected graphs.
Parameters
----------
G : NetworkX Graph
grouped_nodes: list or generator
Grouping of nodes to collapse. The grouping must be disjoint.
If grouped_nodes are strongly_connected_components then this is
equivalent to :func:`condensation`.
Returns
-------
C : NetworkX Graph
The collapsed graph C of G with respect to the node grouping. The node
labels are integers corresponding to the index of the component in the
list of grouped_nodes. C has a graph attribute named 'mapping' with a
dictionary mapping the original nodes to the nodes in C to which they
belong. Each node in C also has a node attribute 'members' with the set
of original nodes in G that form the group that the node in C
represents.
Examples
--------
>>> # Collapses a graph using disjoint groups, but not necesarilly connected
>>> G = nx.Graph([(1, 0), (2, 3), (3, 1), (3, 4), (4, 5), (5, 6), (5, 7)])
>>> G.add_node('A')
>>> grouped_nodes = [{0, 1, 2, 3}, {5, 6, 7}]
>>> C = collapse(G, grouped_nodes)
>>> members = nx.get_node_attributes(C, 'members')
>>> sorted(members.keys())
[0, 1, 2, 3]
>>> member_values = set(map(frozenset, members.values()))
>>> assert {0, 1, 2, 3} in member_values
>>> assert {4} in member_values
>>> assert {5, 6, 7} in member_values
>>> assert {'A'} in member_values
"""
mapping = {}
members = {}
C = G.__class__()
i = 0 # required if G is empty
remaining = set(G.nodes())
for i, group in enumerate(grouped_nodes):
group = set(group)
assert remaining.issuperset(group), (
'grouped nodes must exist in G and be disjoint')
remaining.difference_update(group)
members[i] = group
mapping.update((n, i) for n in group)
# remaining nodes are in their own group
for i, node in enumerate(remaining, start=i + 1):
group = set([node])
members[i] = group
mapping.update((n, i) for n in group)
number_of_groups = i + 1
C.add_nodes_from(range(number_of_groups))
C.add_edges_from((mapping[u], mapping[v]) for u, v in G.edges()
if mapping[u] != mapping[v])
# Add a list of members (ie original nodes) to each node (ie scc) in C.
nx.set_node_attributes(C, name='members', values=members)
# Add mapping dict as graph attribute
C.graph['mapping'] = mapping
return C
def complement_edges(G):
"""Returns only the edges in the complement of G
Parameters
----------
G : NetworkX Graph
Yields
------
edge : tuple
Edges in the complement of G
Example
-------
>>> G = nx.path_graph((1, 2, 3, 4))
>>> sorted(complement_edges(G))
[(1, 3), (1, 4), (2, 4)]
>>> G = nx.path_graph((1, 2, 3, 4), nx.DiGraph())
>>> sorted(complement_edges(G))
[(1, 3), (1, 4), (2, 1), (2, 4), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)]
>>> G = nx.complete_graph(1000)
>>> sorted(complement_edges(G))
[]
"""
if G.is_directed():
for u, v in it.combinations(G.nodes(), 2):
if v not in G.adj[u]:
yield (u, v)
if u not in G.adj[v]:
yield (v, u)
else:
for u, v in it.combinations(G.nodes(), 2):
if v not in G.adj[u]:
yield (u, v)
if sys.version_info[0] == 2:
def _compat_shuffle(rng, input):
"""
python2 workaround so shuffle works the same as python3
References
----------
https://stackoverflow.com/questions/38943038/diff-shuffle-py2-py3
"""
def _randbelow(n):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = rng.getrandbits
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
for i in range(len(input) - 1, 0, -1):
# pick an element in input[:i+1] with which to exchange input[i]
j = _randbelow(i + 1)
input[i], input[j] = input[j], input[i]
else:
def _compat_shuffle(rng, input):
"""wrapper around rng.shuffle for python 2 compatibility reasons"""
rng.shuffle(input)
@py_random_state(4)
@not_implemented_for('multigraph')
@not_implemented_for('directed')
def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None):
"""Greedy algorithm for finding a k-edge-augmentation
Parameters
----------
G : NetworkX graph
An undirected graph.
k : integer
Desired edge connectivity
avail : dict or a set of 2 or 3 tuples
For more details, see :func:`k_edge_augmentation`.
weight : string
key to use to find weights if ``avail`` is a set of 3-tuples.
For more details, see :func:`k_edge_augmentation`.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Yields
------
edge : tuple
Edges in the greedy augmentation of G
Notes
-----
The algorithm is simple. Edges are incrementally added between parts of the
graph that are not yet locally k-edge-connected. Then edges are from the
augmenting set are pruned as long as local-edge-connectivity is not broken.
This algorithm is greedy and does not provide optimality guarantees. It
exists only to provide :func:`k_edge_augmentation` with the ability to
generate a feasible solution for arbitrary k.
See Also
--------
:func:`k_edge_augmentation`
Example
-------
>>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7))
>>> sorted(greedy_k_edge_augmentation(G, k=2))
[(1, 7)]
>>> sorted(greedy_k_edge_augmentation(G, k=1, avail=[]))
[]
>>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7))
>>> avail = {(u, v): 1 for (u, v) in complement_edges(G)}
>>> # randomized pruning process can produce different solutions
>>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=2))
[(1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 4), (2, 6), (3, 7), (5, 7)]
>>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=3))
[(1, 3), (1, 5), (1, 6), (2, 4), (2, 6), (3, 7), (4, 7), (5, 7)]
"""
# Result set
aug_edges = []
done = is_k_edge_connected(G, k)
if done:
return
if avail is None:
# all edges are available
avail_uv = list(complement_edges(G))
avail_w = [1] * len(avail_uv)
else:
# Get the unique set of unweighted edges
avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
# Greedy: order lightest edges. Use degree sum to tie-break
tiebreaker = [sum(map(G.degree, uv)) for uv in avail_uv]
avail_wduv = sorted(zip(avail_w, tiebreaker, avail_uv))
avail_uv = [uv for w, d, uv in avail_wduv]
# Incrementally add edges in until we are k-connected
H = G.copy()
for (u, v) in avail_uv:
done = False
if not is_locally_k_edge_connected(H, u, v, k=k):
# Only add edges in parts that are not yet locally k-edge-connected
aug_edges.append((u, v))
H.add_edge(u, v)
# Did adding this edge help?
if H.degree(u) >= k and H.degree(v) >= k:
done = is_k_edge_connected(H, k)
if done:
break
# Check for feasibility
if not done:
raise nx.NetworkXUnfeasible(
'not able to k-edge-connect with available edges')
# Randomized attempt to reduce the size of the solution
_compat_shuffle(seed, aug_edges)
for (u, v) in list(aug_edges):
# Don't remove if we know it would break connectivity
if H.degree(u) <= k or H.degree(v) <= k:
continue
H.remove_edge(u, v)
aug_edges.remove((u, v))
if not is_k_edge_connected(H, k=k):
# If removing this edge breaks feasibility, undo
H.add_edge(u, v)
aug_edges.append((u, v))
# Generate results
for edge in aug_edges:
yield edge
|
9cbd0c5526e3319c81647104695fe0b54125698c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTriggerOffline/Egamma/test/testEmDQM_cfg.py
|
90db91bc81ddf0165851a2fb530db0979bbe6661
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
testEmDQM_cfg.py
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
process = cms.Process("emdqm")
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'START72_V1::All'
process.load("FWCore.MessageService.MessageLogger_cfi")
# suppress printout of error messages on every event when a collection is missing in the event
process.MessageLogger.cerr.EmDQMInvalidRefs = cms.untracked.PSet(limit = cms.untracked.int32(5))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(2) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:../../../HLTrigger/Configuration/test/outputAForPP.root',
# '/store/relval/CMSSW_7_1_0_pre7/RelValH130GGgluonfusion_13/GEN-SIM-DIGI-RAW-HLTDEBUG/PRE_LS171_V7-v1/00000/C87CDC3A-B1D0-E311-8890-02163E00E6DE.root',
# '/store/relval/CMSSW_7_1_0_pre7/RelValWE_13/GEN-SIM-DIGI-RAW-HLTDEBUG/PRE_LS171_V7-v1/00000/665BE840-B4D0-E311-BBA6-02163E00E694.root',
# '/store/relval/CMSSW_7_1_0_pre7/RelValPhotonJets_Pt_10_13/GEN-SIM-DIGI-RAW-HLTDEBUG/PRE_LS171_V7-v1/00000/C0AB31B9-A2D0-E311-A15D-02163E00E725.root',
)
)
process.load("HLTriggerOffline.Egamma.EgammaValidationAutoConf_cff")
# set output to verbose = all
process.emdqm.verbosity = cms.untracked.uint32(3)
# switch to select between only MC matched histograms or all histograms
process.emdqm.mcMatchedOnly = cms.untracked.bool(False)
# switch for phi plots
process.emdqm.noPhiPlots = cms.untracked.bool(False)
# switch for 2D isolation plots
process.emdqm.noIsolationPlots = cms.untracked.bool(False)
# which trigger object and process should we run on?
#process.emdqm.triggerobject = cms.InputTag("hltTriggerSummaryRAW","","HLTTEST")
process.p = cms.Path(
# require generated particles in fiducial volume
process.egammaSelectors *
process.egammaValidationSequence
)
#----------------------------------------
process.post=DQMEDHarvester("EmDQMPostProcessor",
subDir = cms.untracked.string("HLT/HLTEgammaValidation"),
dataSet = cms.untracked.string("unknown"),
noPhiPlots = cms.untracked.bool(False),
ignoreEmpty = cms.untracked.bool(False),
)
#process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
#----------------------------------------
# DQM service
#----------------------------------------
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.dqmSaver.convention = 'Offline'
process.dqmSaver.workflow = '/RelVal/HLTriggerOffline/Egamma'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)
process.ppost = cms.EndPath(process.post+process.dqmSaver)
#----------------------------------------
# End of original testEmDQM_cfg.py
#----------------------------------------
|
b26457033e7b40855c77b3e4d44e7e680fc58722
|
0fad22883f6464f168e2c5046af3b0a48cdfa6c3
|
/datasets/wflw.py
|
8d9a9cd89f96b74ca175b2e0c0d9c5ee5bd16a9e
|
[
"MIT"
] |
permissive
|
browatbn2/3FabRec
|
ca10cbe2f8efebb26afc1b0fcc74625b2d06cffd
|
c7668c2ffc39d74f41e6bff6e4ec05c687a0c888
|
refs/heads/master
| 2023-08-08T20:34:00.099589
| 2022-06-22T10:47:28
| 2022-06-22T10:47:28
| 251,210,646
| 131
| 20
|
MIT
| 2023-07-23T10:18:01
| 2020-03-30T05:32:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,314
|
py
|
wflw.py
|
import os
import numpy as np
import torch.utils.data as td
import pandas as pd
import config
from csl_common.utils import geometry
from datasets.facedataset import FaceDataset
SUBSETS = ['pose', 'illumination', 'expression', 'make-up', 'occlusion', 'blur']
class WFLW(FaceDataset):
NUM_LANDMARKS = 98
ALL_LANDMARKS = list(range(NUM_LANDMARKS))
LANDMARKS_NO_OUTLINE = list(range(33,NUM_LANDMARKS))
LANDMARKS_ONLY_OUTLINE = list(range(33))
def __init__(self, root, cache_root=None, return_landmark_heatmaps=True, **kwargs):
fullsize_img_dir=os.path.join(root, 'WFLW_images')
super().__init__(root=root, cache_root=cache_root, fullsize_img_dir=fullsize_img_dir,
return_landmark_heatmaps=return_landmark_heatmaps, **kwargs)
def _init(self):
if not self.train:
if self.test_split in SUBSETS:
self.filter_labels({self.test_split:1})
def parse_groundtruth_txt(self, gt_txt_file):
num_lm_cols = self.NUM_LANDMARKS * 2
columns_names = [
'x',
'y',
'x2' ,
'y2',
'pose',
'expression',
'illumination',
'make-up',
'occlusion',
'blur',
'fname'
]
ann = pd.read_csv(gt_txt_file,
header=None,
sep=' ',
usecols=range(num_lm_cols, num_lm_cols+11),
names=columns_names)
ann['w'] = ann['x2'] - ann['x']
ann['h'] = ann['y2'] - ann['y']
landmarks = pd.read_csv(gt_txt_file,
header=None,
sep=' ',
usecols=range(0, num_lm_cols)).values
ann['landmarks'] = [i for i in landmarks.reshape((-1, num_lm_cols//2, 2))]
return ann
def _load_annotations(self, split_name):
split_name = 'train' if self.train else 'test'
annotation_filename = os.path.join(self.cache_root, '{}_{}.pkl'.format(self.name, split_name))
if os.path.isfile(annotation_filename):
ann = pd.read_pickle(annotation_filename)
else:
print('Reading txt file...')
gt_txt_file = os.path.join(self.root,
'WFLW_annotations',
'list_98pt_rect_attr_train_test',
'list_98pt_rect_attr_'+split_name+'.txt')
ann = self.parse_groundtruth_txt(gt_txt_file)
ann.to_pickle(annotation_filename)
print('done.')
return ann
def __getitem__(self, idx):
sample = self.annotations.iloc[idx]
bb = [sample.x, sample.y, sample.x+sample.w, sample.y+sample.h]
bb = geometry.extend_bbox(bb, db=0.1)
face_id = int(sample.name)
landmarks_for_crop = sample.landmarks.astype(np.float32) if self.crop_source == 'lm_ground_truth' else None
return self.get_sample(sample.fname, landmarks_for_crop=landmarks_for_crop, bb=bb, id=face_id,
landmarks_to_return=sample.landmarks.astype(np.float32))
config.register_dataset(WFLW)
if __name__ == '__main__':
from csl_common.utils.nn import Batch
from csl_common.utils.common import init_random
from csl_common.utils.ds_utils import build_transform
from csl_common.vis import vis
import config
init_random(3)
path = config.get_dataset_paths('wflw')[0]
ds = WFLW(root=path, train=False, deterministic=True, use_cache=False, daug=0, image_size=256,
transform=build_transform(deterministic=False, daug=0))
ds.filter_labels({'pose': 1, 'occlusion':0, 'make-up':1})
dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
print(ds)
for data in dl:
batch = Batch(data, gpu=False)
images = vis.to_disp_images(batch.images, denorm=True)
# lms = lmutils.convert_landmarks(to_numpy(batch.landmarks), lmutils.LM98_TO_LM68)
lms = batch.landmarks
images = vis.add_landmarks_to_images(images, lms, draw_wireframe=False, color=(0,255,0), radius=3)
vis.vis_square(images, nCols=10, fx=1., fy=1., normalize=False)
|
ca5ba18fd52f1ca72b58e91e188275f18b6c5d5c
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/flow/tasks/deploy/job/job_handler.py
|
cabc8497634546540e3a863150f086fa9981de9d
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 15,385
|
py
|
job_handler.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import random
import time
from common.exceptions import ApiRequestError
from common.local import get_request_username
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from dataflow.component.exceptions.comp_execptions import JobNotExistsException
from dataflow.flow import exceptions as Errors
from dataflow.flow.api_models import BatchJob, ModelAppJob, StreamJob
from dataflow.flow.utils.language import Bilingual
from dataflow.modeling.job.job_config_controller import ModelingJobConfigController
from dataflow.modeling.model.model_controller import ModelController
from dataflow.models import ProcessingJobInfo
from dataflow.pizza_settings import BATCH_MODULE_NAME, MODEL_APP_MODULE_NAME, STREAM_MODULE_NAME
from dataflow.shared.batch.batch_helper import BatchHelper
from dataflow.shared.jobnavi.jobnavi_helper import JobNaviHelper
from dataflow.shared.log import flow_logger as logger
from dataflow.shared.meta.tag.tag_helper import TagHelper
from dataflow.shared.modeling.modeling_helper import ModelingHelper
from dataflow.shared.stream.stream_helper import StreamHelper
from dataflow.stream.handlers.job_handler import JobHandler as StreamJobHandler
from dataflow.udf.debug.debug_driver import generate_job_config as udf_generate_job_config
class JobHandler(object):
def __init__(self, job_id):
self.job_id = job_id
self._job_info = None
@classmethod
def build_job_params(
cls,
module,
component_type,
project_id,
cluster_group,
code_version,
processings,
job_config,
deploy_config,
):
geog_area_code = TagHelper.get_geog_area_code_by_project(project_id)
jobserver_config = {"geog_area_code": geog_area_code, "cluster_id": JobNaviHelper.get_jobnavi_cluster(module)}
params = {}
if module == STREAM_MODULE_NAME:
params = {
"project_id": project_id,
"code_version": code_version,
"cluster_group": cluster_group,
"job_config": job_config,
"deploy_config": deploy_config,
"processings": processings,
"jobserver_config": jobserver_config,
"component_type": component_type,
}
elif module == BATCH_MODULE_NAME:
params = {
"processing_id": processings[0],
"code_version": code_version,
"cluster_group": cluster_group,
"cluster_name": "",
"deploy_mode": "yarn",
"deploy_config": json.dumps(deploy_config),
"job_config": job_config,
"project_id": project_id,
"jobserver_config": jobserver_config,
}
elif module == MODEL_APP_MODULE_NAME:
params = {
"processing_id": processings[0],
"code_version": code_version,
"cluster_group": cluster_group,
"cluster_name": "",
"deploy_mode": "yarn",
"deploy_config": json.dumps(deploy_config),
"job_config": job_config,
"project_id": project_id,
"jobserver_config": jobserver_config,
}
return params
@classmethod
def create_job(
cls,
module,
component_type,
project_id,
cluster_group,
code_version,
processings,
job_config,
deploy_config,
):
params = JobHandler.build_job_params(
module,
component_type,
project_id,
cluster_group,
code_version,
processings,
job_config,
deploy_config,
)
if module == STREAM_MODULE_NAME:
o_job = StreamJob.create(params, get_request_username())
elif module == BATCH_MODULE_NAME:
o_job = BatchJob.create(params, get_request_username())
elif module == MODEL_APP_MODULE_NAME:
o_job = ModelAppJob.create(params, get_request_username())
return {"job_id": o_job.job_id}
def update_job(
self,
module,
component_type,
project_id,
cluster_group,
code_version,
processings,
job_config,
deploy_config,
):
params = JobHandler.build_job_params(
module,
component_type,
project_id,
cluster_group,
code_version,
processings,
job_config,
deploy_config,
)
if module == STREAM_MODULE_NAME:
o_job = StreamJob(job_id=self.job_id)
o_job.update(params, get_request_username())
elif module == BATCH_MODULE_NAME:
o_job = BatchJob(job_id=self.job_id)
o_job.update(params, get_request_username())
elif module == MODEL_APP_MODULE_NAME:
o_job = ModelAppJob(job_id=self.job_id)
o_job.update(params, get_request_username())
return {"job_id": self.job_id}
def start(self, module, params):
if module == STREAM_MODULE_NAME:
# get code version
self.log(Bilingual(ugettext_noop("获取作业代码版本")))
jar_name = StreamHelper.get_code_version(self.job_id)["jar_name"]
# register
self.log(Bilingual(ugettext_noop("开始注册作业拓扑")))
api_params = {
"job_id": self.job_id,
"jar_name": jar_name,
"geog_area_code": params["tags"][0],
}
conf = StreamHelper.register_job(**api_params)
# submit
api_params = {"conf": conf, "job_id": self.job_id}
self.log(Bilingual(ugettext_noop("开始将作业提交至集群")))
return StreamHelper.submit_job(**api_params)
elif module == BATCH_MODULE_NAME:
index = 1
max_times = 3
while True:
try:
params["job_id"] = self.job_id
BatchHelper.start_job(**params)
break
except ApiRequestError as e:
self.log(
Bilingual(
ugettext_noop("第{n}次尝试 batch.start_job 失败,error={err}".format(n=index, err=e.message))
)
)
index += 1
if index > max_times:
raise Errors.FlowTaskError(
Bilingual(ugettext_noop("连续{n}次尝试 batch.start_job 均失败,流程中止".format(n=index)))
)
time.sleep(random.uniform(1, 2))
return {}
elif module == MODEL_APP_MODULE_NAME:
index = 1
max_times = 3
while True:
try:
params["job_id"] = self.job_id
ModelingHelper.start_job(**params)
break
except ApiRequestError as e:
self.log(
Bilingual(
ugettext_noop("第{n}次尝试 model_app.start_job 失败,error={err}".format(n=index, err=e.message))
)
)
index += 1
if index > max_times:
raise Errors.FlowTaskError(
Bilingual(ugettext_noop("连续{n}次尝试 model_app.start_job 均失败,流程中止".format(n=index)))
)
time.sleep(random.uniform(1, 2))
return {}
def sync_status_inner(self, operate_info, operate):
success_status = "ACTIVE" if operate != "stop" else None
for i in range(36):
api_params = {"job_id": self.job_id, "operate_info": operate_info}
data = StreamHelper.sync_status(**api_params)
if data is None:
raise Errors.FlowTaskError(_("同步 JOB 状态失败,返回状态信息为空"))
job_status = data.get(self.job_id)
if job_status == success_status:
self.log(Bilingual(ugettext_noop("确认完毕")))
return True
time.sleep(5)
def sync_status(self, operate_info):
_operate_info = json.loads(operate_info)
operate = _operate_info["operate"]
if operate == "stop":
self.log(Bilingual(ugettext_noop("确认 JOB 状态是否已清除")))
if not self.sync_status_inner(operate_info, operate):
raise Errors.FlowTaskError(_("多次轮询,JOB 依旧处于激活状态"))
status = "INACTIVE"
else:
self.log(Bilingual(ugettext_noop("确认 JOB 是否处于激活状态")))
if not self.sync_status_inner(operate_info, operate):
# 同步任务失败,开始强制停止任务
res_data = False
count = 0
delta = 2
kill_count = 3
while not res_data and count < kill_count:
try:
res_data = StreamHelper.force_kill(self.context["job_id"], 50)
except Exception as e:
raise Errors.FlowTaskError(_("多次轮询,未检测到 JOB 激活状态,强制停止作业失败,明细({})").format(str(e)))
if not res_data:
time.sleep(delta)
count = count + 1
if count >= kill_count:
raise Errors.FlowTaskError(_("启动超时,强制停止作业失败,请联系管理员"))
raise Errors.FlowTaskError(_("多次轮询,未检测到 JOB 激活状态"))
status = "ACTIVE"
return {self.job_id: status}
def stop(self, module):
if module == STREAM_MODULE_NAME:
api_params = {"job_id": self.job_id}
self.log(Bilingual(ugettext_noop("开始停止作业")))
data = StreamHelper.cancel_job(**api_params)
if not data:
raise Errors.FlowTaskError(_("停止作业失败,返回配置为空"))
self.log(Bilingual(ugettext_noop("停止成功")))
return data
elif module == BATCH_MODULE_NAME:
self.log(Bilingual(ugettext_noop("停止离线调度进程")))
api_param = {"job_id": self.job_id}
BatchHelper.stop_job(**api_param)
return {}
elif module == MODEL_APP_MODULE_NAME:
self.log(Bilingual(ugettext_noop("停止离线调度进程")))
api_param = {"job_id": self.job_id}
ModelingHelper.stop_job(**api_param)
return {}
def force_kill(self, timeout=180):
try:
job_info = ProcessingJobInfo.objects.get(job_id=self.job_id)
except ProcessingJobInfo.DoesNotExist:
return None
if job_info.component_type not in ["flink", "spark_structured_streaming"]:
raise Errors.ValidError(_("当前任务非 flink 任务,不支持 force_kill"))
return StreamHelper.force_kill(self.job_id, timeout)
@property
def job_info(self):
if not self._job_info:
self._job_info = ProcessingJobInfo.objects.get(job_id=self.job_id)
return self._job_info
def delete(self):
if self.job_info.processing_type == STREAM_MODULE_NAME:
StreamHelper.delete_job(self.job_id)
elif self.job_info.processing_type == BATCH_MODULE_NAME:
BatchHelper.delete_job(self.job_id)
elif self.job_info.processing_type == MODEL_APP_MODULE_NAME:
ModelingHelper.delete_job(self.job_id)
else:
raise Errors.FlowTaskError(_("删除作业失败,不支持的 module 类型: %s" % self.job_info.processing_type))
def log(self, msg):
# TODO: 和 node_task 中的方法合二为一
logger.info(msg)
def generate_job_config(self, run_mode, job_type):
if job_type == "flink":
if run_mode == "product":
# flink-code, flink-sql
return StreamJobHandler(self.job_id, False).generate_job_config()
elif run_mode == "debug":
# flink-code-debug, flink-sql-debug
return StreamJobHandler(self.job_id, True).generate_job_config()
elif run_mode == "udf_debug":
# flink-sql-udf-debug
return udf_generate_job_config(self.job_id, job_type)
elif job_type == "spark_structured_streaming":
return StreamJobHandler(self.job_id, False).generate_job_config()
elif job_type == "spark_mllib":
if run_mode == "product":
return ModelingJobConfigController(self.job_id, job_type, run_mode, False).generate_job_config()
elif run_mode == "debug":
return ModelingJobConfigController(self.job_id, job_type, run_mode, True).generate_debug_config()
elif run_mode == "release_debug":
return ModelController().generate_release_debug_config(self.job_id)
elif job_type == "tensorflow":
if run_mode == "product":
return ModelingJobConfigController(self.job_id, job_type, run_mode, False).generate_job_config()
else:
raise Exception("暂不支持tensorflow运行模式:{}".format(run_mode))
elif job_type in ["spark_sql", "hive", "spark_streaming"]:
raise Exception("暂不支持获取 %s 任务类型配置" % job_type)
raise JobNotExistsException(
"作业信息配置不存在,job_type: {job_type}, {run_mode}".format(job_type=job_type, run_mode=run_mode)
)
|
6c11c1eb3e42819c5835e83f39d17c1db0b61051
|
263170e7dca79883314273bb35aef1449e018361
|
/src/pynguin/analyses/__init__.py
|
1493993821b2d34004fb726beec27663265c47f9
|
[
"CC-BY-4.0",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
se2p/pynguin
|
029cfd9c43c08a2f687a816749828054e409646e
|
cc083252c7054824bfaf200533a8b7ad45f7c4fb
|
refs/heads/main
| 2023-08-23T16:58:04.568755
| 2023-08-18T13:11:44
| 2023-08-18T13:11:44
| 282,944,472
| 1,223
| 65
|
MIT
| 2023-08-18T13:12:29
| 2020-07-27T15:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
__init__.py
|
# This file is part of Pynguin.
#
# SPDX-FileCopyrightText: 2019-2023 Pynguin Contributors
#
# SPDX-License-Identifier: MIT
#
"""Provides analyses on the SUT needed by Pynguin."""
|
87612311b829922f4905603cf86284daf6a35012
|
4caa087dcb95a6a7dbe8cc49fde383e9f2aa4426
|
/mmtrack/datasets/reid_dataset.py
|
5ab20fb733d20a641521bcbb2dce25a9cbaf0569
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmtracking
|
1e55c69cc1a264b3c9546c19332a38e9621430ed
|
e79491ec8f0b8c86fda947fbaaa824c66ab2a991
|
refs/heads/master
| 2023-09-01T15:41:04.322684
| 2023-04-25T13:25:18
| 2023-04-25T13:25:18
| 291,213,368
| 3,263
| 604
|
Apache-2.0
| 2023-08-26T04:05:00
| 2020-08-29T06:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,604
|
py
|
reid_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from collections import defaultdict
import numpy as np
import torch
from mmcls.datasets import BaseDataset
from mmdet.datasets import DATASETS
from mmdet.datasets.pipelines import Compose
@DATASETS.register_module()
class ReIDDataset(BaseDataset):
"""Dataset for ReID Dataset.
Args:
pipeline (list): a list of dict, where each element represents
a operation defined in `mmtrack.datasets.pipelines`
triplet_sampler (dict): The sampler for hard mining triplet loss.
"""
def __init__(self, pipeline, triplet_sampler=None, *args, **kwargs):
super().__init__(pipeline=[], *args, **kwargs)
self.triplet_sampler = triplet_sampler
self.pipeline = Compose(pipeline)
# for DistributedGroupSampler and GroupSampler
self.flag = np.zeros(len(self), dtype=np.uint8)
def load_annotations(self):
"""Load annotations from ImageNet style annotation file.
Returns:
list[dict]: Annotation information from ReID api.
"""
assert isinstance(self.ann_file, str)
data_infos = []
with open(self.ann_file) as f:
samples = [x.strip().split(' ') for x in f.readlines()]
for filename, gt_label in samples:
info = dict(img_prefix=self.data_prefix)
info['img_info'] = dict(filename=filename)
info['gt_label'] = np.array(gt_label, dtype=np.int64)
data_infos.append(info)
self._parse_ann_info(data_infos)
return data_infos
def _parse_ann_info(self, data_infos):
"""Parse person id annotations."""
index_tmp_dic = defaultdict(list)
self.index_dic = dict()
for idx, info in enumerate(data_infos):
pid = info['gt_label']
index_tmp_dic[int(pid)].append(idx)
for pid, idxs in index_tmp_dic.items():
self.index_dic[pid] = np.asarray(idxs, dtype=np.int64)
self.pids = np.asarray(list(self.index_dic.keys()), dtype=np.int64)
def triplet_sampling(self, pos_pid, num_ids=8, ins_per_id=4):
"""Triplet sampler for hard mining triplet loss. First, for one
pos_pid, random sample ins_per_id images with same person id.
Then, random sample num_ids - 1 negative ids.
Finally, random sample ins_per_id images for each negative id.
Args:
pos_pid (ndarray): The person id of the anchor.
num_ids (int): The number of person ids.
ins_per_id (int): The number of image for each person.
Returns:
List: Annotation information of num_ids X ins_per_id images.
"""
assert len(self.pids) >= num_ids, \
'The number of person ids in the training set must ' \
'be greater than the number of person ids in the sample.'
pos_idxs = self.index_dic[int(pos_pid)]
idxs_list = []
# select positive samplers
idxs_list.extend(pos_idxs[np.random.choice(
pos_idxs.shape[0], ins_per_id, replace=True)])
# select negative ids
neg_pids = np.random.choice(
[i for i, _ in enumerate(self.pids) if i != pos_pid],
num_ids - 1,
replace=False)
# select negative samplers for each negative id
for neg_pid in neg_pids:
neg_idxs = self.index_dic[neg_pid]
idxs_list.extend(neg_idxs[np.random.choice(
neg_idxs.shape[0], ins_per_id, replace=True)])
triplet_img_infos = []
for idx in idxs_list:
triplet_img_infos.append(copy.deepcopy(self.data_infos[idx]))
return triplet_img_infos
def prepare_data(self, idx):
"""Prepare results for image (e.g. the annotation information, ...)."""
data_info = self.data_infos[idx]
if self.triplet_sampler is not None:
img_infos = self.triplet_sampling(data_info['gt_label'],
**self.triplet_sampler)
results = copy.deepcopy(img_infos)
else:
results = copy.deepcopy(data_info)
return self.pipeline(results)
def evaluate(self,
results,
metric='mAP',
metric_options=None,
logger=None):
"""Evaluate the ReID dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `mAP`.
metric_options: (dict, optional): Options for calculating metrics.
Allowed keys are 'rank_list' and 'max_rank'. Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = dict(rank_list=[1, 5, 10, 20], max_rank=20)
for rank in metric_options['rank_list']:
assert rank >= 1 and rank <= metric_options['max_rank']
if isinstance(metric, list):
metrics = metric
elif isinstance(metric, str):
metrics = [metric]
else:
raise TypeError('metric must be a list or a str.')
allowed_metrics = ['mAP', 'CMC']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported.')
# distance
results = [result.data.cpu() for result in results]
features = torch.stack(results)
n, c = features.size()
mat = torch.pow(features, 2).sum(dim=1, keepdim=True).expand(n, n)
distmat = mat + mat.t()
distmat.addmm_(features, features.t(), beta=1, alpha=-2)
distmat = distmat.numpy()
pids = self.get_gt_labels()
indices = np.argsort(distmat, axis=1)
matches = (pids[indices] == pids[:, np.newaxis]).astype(np.int32)
all_cmc = []
all_AP = []
num_valid_q = 0.
for q_idx in range(n):
# remove self
raw_cmc = matches[q_idx][1:]
if not np.any(raw_cmc):
# this condition is true when query identity
# does not appear in gallery
continue
cmc = raw_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:metric_options['max_rank']])
num_valid_q += 1.
# compute average precision
# reference:
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = raw_cmc.sum()
tmp_cmc = raw_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * raw_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, \
'Error: all query identities do not appear in gallery'
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
eval_results = dict()
if 'mAP' in metrics:
eval_results['mAP'] = np.around(mAP, decimals=3)
if 'CMC' in metrics:
for rank in metric_options['rank_list']:
eval_results[f'R{rank}'] = np.around(
all_cmc[rank - 1], decimals=3)
return eval_results
|
822d74047dc5eaf176a3a714894515d08ffbc018
|
94d3ef554f7931d2aad799eb6bcfa18104dc9bed
|
/ndlib/models/opinions/MajorityRuleModel.py
|
fa56098cad5ea21a48e805ab4ede036683544e2b
|
[
"BSD-2-Clause"
] |
permissive
|
GiulioRossetti/ndlib
|
76b5a86a2b521cd68197218aea235e20bcb8d1f4
|
900cb3727795c97a73e59fdb736aa736c4d17157
|
refs/heads/master
| 2023-09-03T17:09:00.069129
| 2023-07-21T13:57:29
| 2023-07-21T13:57:29
| 59,556,819
| 265
| 82
|
BSD-2-Clause
| 2023-08-17T03:54:33
| 2016-05-24T08:53:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,802
|
py
|
MajorityRuleModel.py
|
from ..DiffusionModel import DiffusionModel
import numpy as np
__author__ = "Alina Sirbu"
__email__ = "alina.sirbu@unipi.it"
class MajorityRuleModel(DiffusionModel):
""" """
def __init__(self, graph, seed=None):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph, seed)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
}
self.parameters = {
"model": {
"q": {
"descr": "Number of randomly chosen voters",
"range": [0, len(self.graph.nodes)],
"optional": False,
}
},
"nodes": {},
"edges": {},
}
self.name = "Majority Rule"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
# One iteration changes the opinion of at most q voters using the following procedure:
# - select randomly q voters
# - compute majority opinion
# - if tie all agents take opinion +1
# - if not tie, all agents take majority opinion
self.clean_initial_status(self.available_statuses.values())
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {
"iteration": 0,
"status": self.status.copy(),
"node_count": node_count.copy(),
"status_delta": status_delta.copy(),
}
else:
return {
"iteration": 0,
"status": {},
"node_count": node_count.copy(),
"status_delta": status_delta.copy(),
}
# select q random nodes
discussion_group = [
list(self.graph.nodes)[i]
for i in np.random.randint(
0, self.graph.number_of_nodes(), self.params["model"]["q"]
)
]
# compute majority
majority_vote = 1
vote_sum = sum([self.status[node] for node in discussion_group])
if vote_sum < (self.params["model"]["q"] / 2.0):
majority_vote = 0 # in case of tie, majority_vote remains 1
# update status of nodes in discussion group
delta = {}
status_delta = {st: 0 for st in self.available_statuses.values()}
for listener in discussion_group:
if majority_vote != self.status[listener]:
delta[listener] = majority_vote
status_delta[self.status[listener]] += 1
for x in self.available_statuses.values():
if x != self.status[listener]:
status_delta[x] -= 1
self.status[listener] = majority_vote
# fix
node_count = {
st: len([n for n in self.status if self.status[n] == st])
for st in self.available_statuses.values()
}
self.actual_iteration += 1
if node_status:
return {
"iteration": self.actual_iteration - 1,
"status": delta.copy(),
"node_count": node_count.copy(),
"status_delta": status_delta.copy(),
}
else:
return {
"iteration": self.actual_iteration - 1,
"status": {},
"node_count": node_count.copy(),
"status_delta": status_delta.copy(),
}
|
763ef31a254c3bda17b307fbcfae7468326ccfb6
|
daf4e74748a639c53b6ecb2415aa155f85f70cbc
|
/examples/benchmarks/chunking_benchmark.py
|
18bd62b3c3c22b7ad690a7e9e25289f4d09664f8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
alexandrainst/danlp
|
e3f10f2947a23ff8b08db559265a144d2daf3fa4
|
a1e9fa70fc5a3ae7ff78877062da3a8a8da80758
|
refs/heads/master
| 2023-06-23T08:29:18.552687
| 2022-02-22T09:34:16
| 2022-02-22T09:34:16
| 193,449,903
| 205
| 43
|
BSD-3-Clause
| 2023-06-12T21:50:50
| 2019-06-24T06:47:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
chunking_benchmark.py
|
import time
import os
from danlp.datasets import DDT
from danlp.models import load_spacy_chunking_model, get_noun_chunks
from spacy.tokens.doc import Doc
from spacy.gold import read_json_object
from utils import print_speed_performance, f1_report
chunker = load_spacy_chunking_model()
def load_test_with_spacy(ddt):
from spacy.cli.converters import conllu2json
conll_path = os.path.join(ddt.dataset_dir, '{}.{}{}'.format(ddt.dataset_name, 'test', ddt.file_extension))
file_as_json = {}
with open(conll_path, 'r') as file:
file_as_string = file.read()
file_as_string = file_as_string.replace("name=", "").replace("|SpaceAfter=No", "")
file_as_json = conllu2json(file_as_string)
return read_json_object(file_as_json)
# load the data :
# * convert to spaCy Docs format
# * convert dependencies to (BIO) noun chunks
ddt = DDT()
corpus = load_test_with_spacy(ddt)
nlp = chunker.model
sentences_tokens = []
chks_true = []
for jobj in corpus:
for sentence in jobj[1]:
sentence = sentence[0]
tokens = sentence[1]
sentences_tokens.append(tokens)
doc = Doc(nlp.vocab, words=tokens)
for i, t in enumerate(doc):
t.head = doc[sentence[3][i]]
t.pos = nlp.vocab.strings.add(sentence[2][i])
t.dep = nlp.vocab.strings.add(sentence[4][i])
bio_chks = get_noun_chunks(doc, bio=True)
chks_true.append(bio_chks)
num_sentences = len(sentences_tokens)
num_tokens = sum([len(s) for s in sentences_tokens])
def benchmark_spacy_mdl():
start = time.time()
chks_pred = []
for sent in sentences_tokens:
bio_chunks = chunker.predict(sent)
chks_pred.append(bio_chunks)
print('**Spacy model**')
print_speed_performance(start, num_sentences, num_tokens)
assert len(chks_pred)==num_sentences
assert sum([len(s) for s in chks_pred])==num_tokens
print(f1_report(chks_true, chks_pred, bio=True))
if __name__ == '__main__':
benchmark_spacy_mdl()
|
e6001ab48036f4ebf314f9fb7711e4fa98e10f93
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/grit/PRESUBMIT.py
|
91cb8a26e7eaa09020dc3474a459574e823b58b4
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
PRESUBMIT.py
|
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""grit unittests presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def RunUnittests(input_api, output_api):
presubmit_path = input_api.PresubmitLocalPath()
return input_api.canned_checks.RunUnitTests(input_api, output_api, [
input_api.os_path.join('grit', 'test_suite_all.py'),
input_api.os_path.join(input_api.PresubmitLocalPath(),
'preprocess_if_expr_test.py')
])
def CheckChangeOnUpload(input_api, output_api):
return RunUnittests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunUnittests(input_api, output_api)
|
8eb5c25d9b2b3e6cc253d19eceb923cd42703c90
|
3750387e046dfd287d02decc846860fae874bf3e
|
/tools/configen/tests/test_modules/__init__.py
|
0de1ec2dee1c8bf205b77d4c1cabcc4c9e43de8e
|
[
"MIT"
] |
permissive
|
facebookresearch/hydra
|
baf152caa30cd1d8a7e76ba2111fb9a49ecbe18c
|
b5ff66134f268164a20712d18b1230f4dd737444
|
refs/heads/main
| 2023-08-28T02:33:18.063795
| 2023-07-25T17:58:43
| 2023-07-25T17:58:43
| 191,632,914
| 7,667
| 692
|
MIT
| 2023-09-11T15:38:52
| 2019-06-12T19:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union
from omegaconf import MISSING
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
@dataclass
class User:
name: str = MISSING
age: int = MISSING
class LibraryClass:
"""
Some class from a user library that is incompatible with OmegaConf config
"""
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, type(self))
class Empty:
def __init__(self):
...
def __eq__(self, other):
return isinstance(other, type(self))
class UntypedArg:
def __init__(self, param):
self.param = param
def __eq__(self, other):
return isinstance(other, type(self)) and other.param == self.param
class IntArg:
def __init__(self, param: int):
self.param = param
def __eq__(self, other):
return isinstance(other, type(self)) and other.param == self.param
class UnionArg:
# Union is not currently supported by OmegaConf, it will be typed as Any
def __init__(self, param: Union[int, float]):
self.param = param
def __eq__(self, other):
return isinstance(other, type(self)) and other.param == self.param
class WithLibraryClassArg:
def __init__(self, num: int, param: LibraryClass):
self.num = num
self.param = param
def __eq__(self, other):
return (
isinstance(other, type(self))
and other.num == self.num
and other.param == self.param
)
@dataclass
class IncompatibleDataclass:
library: LibraryClass = field(default_factory=LibraryClass)
def __eq__(self, other):
return isinstance(other, type(self)) and other.library == self.library
class IncompatibleDataclassArg:
def __init__(self, num: int, incompat: IncompatibleDataclass):
self.num = num
self.incompat = incompat
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.num == other.num
and self.incompat == other.incompat
)
class WithStringDefault:
def __init__(
self,
no_default: str,
default_str: str = "Bond, James Bond",
none_str: Optional[str] = None,
):
self.no_default = no_default
self.default_str = default_str
self.none_str = none_str
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.no_default == other.no_default
and self.default_str == other.default_str
and self.none_str == other.none_str
)
class WithUntypedStringDefault:
def __init__(
self,
default_str="Bond, James Bond",
):
self.default_str = default_str
def __eq__(self, other):
return isinstance(other, type(self)) and self.default_str == other.default_str
class ListValues:
def __init__(
self,
lst: List[str],
enum_lst: List[Color],
passthrough_list: List[LibraryClass],
dataclass_val: List[User],
def_value: List[str] = [],
):
self.lst = lst
self.enum_lst = enum_lst
self.passthrough_list = passthrough_list
self.dataclass_val = dataclass_val
self.def_value = def_value
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.lst == other.lst
and self.enum_lst == other.enum_lst
and self.passthrough_list == other.passthrough_list
and self.dataclass_val == other.dataclass_val
and self.def_value == other.def_value
)
class DictValues:
def __init__(
self,
dct: Dict[str, str],
enum_key: Dict[Color, str],
dataclass_val: Dict[str, User],
passthrough_dict: Dict[str, LibraryClass],
def_value: Dict[str, str] = {},
):
self.dct = dct
self.enum_key = enum_key
self.dataclass_val = dataclass_val
self.passthrough_dict = passthrough_dict
self.def_value = def_value
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.dct == other.dct
and self.enum_key == other.enum_key
and self.dataclass_val == other.dataclass_val
and self.passthrough_dict == other.passthrough_dict
and self.def_value == other.def_value
)
class PeskySentinel:
def __repr__(self):
return "<I am a pesky sentinel>"
pesky = PeskySentinel()
class PeskySentinelUsage:
def __init__(self, foo=pesky):
self.foo = foo
def __eq__(self, other):
return isinstance(other, type(self)) and self.foo == other.foo
class Tuples:
def __init__(
self,
t1: Tuple[float, float],
t2=(1, 2, 3),
t3: Tuple[float, ...] = (0.1, 0.2, 0.3),
):
self.t1 = t1
self.t2 = t2
self.t3 = t3
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.t1 == other.t1
and self.t2 == other.t2
and self.t3 == other.t3
)
|
ab833ed5d96b2e4bbf41bca2d4552b6b729b7f8f
|
e98c36ecaa7d1d4ece30143ae58dbbf020f8072a
|
/samples/Qiskit/ch02_02_random_byte.py
|
c96131d0b2fbd9ba57bd696efc56c8e0e5e411b0
|
[] |
no_license
|
oreilly-qc/oreilly-qc.github.io
|
a01e7652b1ff5635d550844c58b3703dee4c059d
|
ade464819dc08d56630a597fe20f9b1bcab4883c
|
refs/heads/master
| 2022-11-30T16:05:12.382414
| 2022-11-26T18:17:45
| 2022-11-26T18:17:45
| 166,602,053
| 125
| 66
| null | 2022-10-05T16:38:10
| 2019-01-19T22:54:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
ch02_02_random_byte.py
|
## Programming Quantum Computers
## by Eric Johnston, Nic Harrigan and Mercedes Gimeno-Segovia
## O'Reilly Media
##
## More samples like this can be found at http://oreilly-qc.github.io
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer, IBMQ, BasicAer
import math
## Uncomment the next line to see diagrams when running in a notebook
#%matplotlib inline
## Example 2-2: Random byte
# Set up the program
reg = QuantumRegister(8, name='reg')
reg_c = ClassicalRegister(8, name='regc')
qc = QuantumCircuit(reg, reg_c)
qc.reset(reg) # write the value 0
qc.h(reg) # put it into a superposition of 0 and 1
qc.measure(reg, reg_c) # read the result as a digital bit
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc, backend)
result = job.result()
# Convert the result into a random number
counts = result.get_counts(qc)
print('counts:',counts)
for key,val in counts.items():
n = sum([(int(x) << i) for i,x in enumerate(key)])
print('Random number:', n)
#outputstate = result.get_statevector(qc, decimals=3)
#print(outputstate)
qc.draw() # draw the circuit
|
7152bc94577bbed2e179e6eabfacd79aa54d99aa
|
32ef8d7b826185e15fa28fe198f35fe1ebb2e0bc
|
/sendgrid/helpers/mail/content_id.py
|
0fff301070a93dbded20881cca2bc2187e9910e5
|
[
"MIT"
] |
permissive
|
sendgrid/sendgrid-python
|
2165eaa7c0b02bb69143f049252e1303c5752a2c
|
2fe145956a1ee50355f5da8deab401e1e118c736
|
refs/heads/main
| 2023-08-30T18:42:42.884191
| 2023-04-17T08:48:43
| 2023-04-17T08:48:43
| 3,546,794
| 1,470
| 826
|
MIT
| 2023-08-22T23:45:51
| 2012-02-25T19:10:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
content_id.py
|
class ContentId(object):
"""The ContentId of an Attachment."""
def __init__(self, content_id=None):
"""Create a ContentId object
:param content_id: The content id for the attachment.
This is used when the Disposition is set to "inline"
and the attachment is an image, allowing the file to
be displayed within the email body.
:type content_id: string, optional
"""
self._content_id = None
if content_id is not None:
self.content_id = content_id
@property
def content_id(self):
"""The content id for the attachment.
This is used when the Disposition is set to "inline" and the
attachment is an image, allowing the file to be displayed within
the email body.
:rtype: string
"""
return self._content_id
@content_id.setter
def content_id(self, value):
"""The content id for the attachment.
This is used when the Disposition is set to "inline" and the
attachment is an image, allowing the file to be displayed within
the email body.
:param value: The content id for the attachment.
This is used when the Disposition is set to "inline" and the attachment
is an image, allowing the file to be displayed within the email body.
:type value: string
"""
self._content_id = value
def get(self):
"""
Get a JSON-ready representation of this ContentId.
:returns: This ContentId, ready for use in a request body.
:rtype: string
"""
return self.content_id
|
f382a7110c35ec529e72844847334f18e7d80c96
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/builds/migrations/0027_add_privacy_level_automation_rules.py
|
b9816f7e80ef14a5c024c2911f0bb56b13c2c6c9
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 920
|
py
|
0027_add_privacy_level_automation_rules.py
|
# Generated by Django 2.2.12 on 2020-07-08 23:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("builds", "0026_add_hide_version_automation_rule"),
]
operations = [
migrations.AlterField(
model_name="versionautomationrule",
name="action",
field=models.CharField(
choices=[
("activate-version", "Activate version"),
("hide-version", "Hide version"),
("make-version-public", "Make version public"),
("make-version-private", "Make version private"),
("set-default-version", "Set version as default"),
],
help_text="Action to apply to matching versions",
max_length=32,
verbose_name="Action",
),
),
]
|
15682d2f3ef088a4a6c26ef2f9c0c957e097d97f
|
ad1d46b4ec75ef1f00520ff246d0706c6bb7770e
|
/content/tutorials/kerning-heat-map/kerningHeatMap.py
|
055677bff4aacb1e0f9b43e19b1f34bfa3ee7b64
|
[] |
no_license
|
roberto-arista/PythonForDesigners
|
036f69bae73095b6f49254255fc473a8ab7ee7bb
|
1a781ea7c7ee21e9c64771ba3bf5634ad550692c
|
refs/heads/master
| 2022-02-24T15:28:04.167558
| 2021-09-07T10:37:01
| 2021-09-07T10:37:01
| 168,937,263
| 103
| 37
| null | 2022-02-11T02:24:01
| 2019-02-03T11:17:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
kerningHeatMap.py
|
### Modules
# from project folder
from flatKerningDefault import flatKerning
# dependencies
from drawBot import font, fill, stroke
from drawBot import savedState, translate
from drawBot import text, rect, textSize, newPage
from fontParts.world import OpenFont
# from std library
from itertools import product
from string import ascii_uppercase
### Constants
CELL_SIZE = 30
WHITE = 1, 1, 1
BLACK = 0, 0, 0
RED = 1, 0, 0
GREEN = 0, 1, 0
### Functions
def lerp(aa, bb, factor):
return aa + (bb-aa) * factor
def lerpRGB(colorOne, colorTwo, factor):
rr = lerp(colorOne[0], colorTwo[0], factor)
gg = lerp(colorOne[1], colorTwo[1], factor)
bb = lerp(colorOne[2], colorTwo[2], factor)
return rr, gg, bb
def typeQualities(clr=BLACK):
font('Obviously-NarwSemi', 18)
shapeQualities(clr)
def shapeQualities(clr=BLACK):
fill(*clr)
stroke(None)
def kerningHeatMap(kerning, glyphNames, isFirstVertical):
corrections = [kerning[pair]
for pair in product(glyphNames, repeat=2)]
corrections.sort()
minCorrection, maxCorrection = abs(corrections[0]), abs(corrections[-1])
if minCorrection < maxCorrection:
reference = maxCorrection
else:
reference = minCorrection
for jj, glyphY in enumerate(glyphNames):
# vertical captions
with savedState():
translate(-CELL_SIZE, jj*CELL_SIZE)
typeQualities()
text(f'{glyphY}', (CELL_SIZE*.5, CELL_SIZE*.2),
align='center')
# drawing the row
for ii, glyphX in enumerate(glyphNames):
pair = (glyphY, glyphX) if isFirstVertical else (glyphX, glyphY)
correction = kerning[pair]
with savedState():
translate(ii*CELL_SIZE, jj*CELL_SIZE)
# horizontal captions
if jj == 0:
typeQualities()
text(f'{glyphX}', (CELL_SIZE*.5, -CELL_SIZE*.8),
align='center')
# draw the cells
factor = .5 + .5 * abs(correction)/reference
if correction == 0:
rectClr = BLACK
typeClr = WHITE
elif correction < 0:
rectClr = lerpRGB(WHITE, RED, factor)
typeClr = WHITE
else:
rectClr = lerpRGB(WHITE, GREEN, factor)
typeClr = BLACK
shapeQualities(rectClr)
rect(0, 0, CELL_SIZE, CELL_SIZE)
if correction != 0:
corrStr = f'{abs(correction)}'
# just a check for body size
if textSize(corrStr)[0] > CELL_SIZE:
print(f'[WARNING] {pair} text is too big!')
typeQualities(clr=typeClr)
text(corrStr, (CELL_SIZE*.5, CELL_SIZE*.2), align='center')
if __name__ == '__main__':
### Variables
fontName = 'Source Serif Pro Regular.ufo'
glyphNames = ascii_uppercase
### Instructions
thisFont = OpenFont(fontName)
flat = flatKerning(thisFont)
canvasSize = (len(glyphNames)+4)*CELL_SIZE
newPage(canvasSize, canvasSize)
translate(CELL_SIZE*2, CELL_SIZE*2)
kerningHeatMap(flat, glyphNames, isFirstVertical=True)
|
fb52f12e9adfe84ae2b39ed1bfb6c6be28c52406
|
68fc65f2d27495ef251629c351018dfb9c67d2c5
|
/examples/usage_with_zipimport.py
|
7e37da1f9c3ac1320aa78ac3c8735920994aaa85
|
[
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mocobeta/janome
|
5f1b7ec2046d3916069ebeefce35ce98102c4205
|
9d82248e0c0815e367b9604d83ef0de198e017bc
|
refs/heads/master
| 2023-07-12T22:48:47.528908
| 2023-07-01T11:31:23
| 2023-07-01T11:31:23
| 30,792,770
| 837
| 69
|
Apache-2.0
| 2023-07-01T11:25:28
| 2015-02-14T09:47:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
usage_with_zipimport.py
|
"""
This was contributed by @roy-freee.
zipアーカイブから直接ロードする方法
準備: wheelを作る(手動でパッケージをzip圧縮してもいい)
$ pip wheel . --no-deps --no-binary
もしくは、
$ python setup.py bdist_wheel --universal
出来上がった.whlファイルを使う
[制限事項] mmap=False を指定した場合のみ有効です。(NEologd 同梱 janome に zip import は適用できません。)
How to import the zip archive
You first build a wheel via `pip` command or `setup.py bdist_wheel`:
$ pip wheel . --no-deps --no-binary
$ python setup.py bdist_wheel --universal
You can also create a zip archived package by yourself.
[Limitation] only supported on mmap=False.
"""
import janome.tokenizer
from janome.tokenizer import Tokenizer
import sys
import glob
ARCHIVE_NAME = 'Janome-*.whl'
archive_path = glob.glob(ARCHIVE_NAME)[0]
# avoiding conflict to existing package
sys.path.insert(0, archive_path)
# mmap option shold be set to False
t = Tokenizer(mmap=False)
for token in t.tokenize('すもももももももものうち'):
print(token)
print(janome.tokenizer.__file__)
# => Like './Janome-xxx.whl/janome/tokenizer.py'
|
34d18806c61f8c45c06ea5606a679a4171739735
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/transform/full_name_to_last_name.py
|
aa2b2bbaa7ebde292168ec87151bea413ea7323f
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,803
|
py
|
full_name_to_last_name.py
|
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PersonFullName
from featuretools.primitives.base import TransformPrimitive
class FullNameToLastName(TransformPrimitive):
"""Determines the first name from a person's name.
Description:
Given a list of names, determines the last name. If
only a single name is provided, assume this is a first name, and
return `nan`. This assumes all titles will be followed by a period.
Examples:
>>> full_name_to_last_name = FullNameToLastName()
>>> names = ['Woolf Spector', 'Oliva y Ocana, Dona. Fermina',
... 'Ware, Mr. Frederick', 'Peter, Michael J', 'Mr. Brown']
>>> full_name_to_last_name(names).to_list()
['Spector', 'Oliva y Ocana', 'Ware', 'Peter', 'Brown']
"""
name = "full_name_to_last_name"
input_types = [ColumnSchema(logical_type=PersonFullName)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def full_name_to_last_name(x):
titles_pattern = r"([A-Z][a-z]+)\. "
df = pd.DataFrame({"names": x})
# extract initial names
pattern = r"(^.+?,|^[A-Z][a-z]+\. [A-Z][a-z]+$| [A-Z][a-z]+$| [A-Z][a-z]+[/-][A-Z][a-z]+$)"
df["last_name"] = df["names"].str.extract(pattern)
# remove titles
df["last_name"] = df["last_name"].str.replace(
titles_pattern,
"",
regex=True,
)
# clean up white space and leftover commas
df["last_name"] = df["last_name"].str.replace(",", "").str.strip()
return df["last_name"]
return full_name_to_last_name
|
257b3a2ead4bbf916b884845cf0138ab8aaa11f9
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/python/mindspore/profiler/parser/framework_struct.py
|
370068f5e58eaa36e0dece06a34c02aedf40c08e
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
framework_struct.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Thr parser for parsing framework files."""
from mindspore.profiler.common.struct_type import StructType
# Note: All keys should be named with lower camel case, which are the same as those in C++.
TASK_DESC_STRUCT = dict(
magicNumber=StructType.UINT16,
dataTag=StructType.UINT16,
taskType=StructType.UINT32,
opName=[StructType.UINT64] * 16, # opName is a mix data
opType=[StructType.UINT64] * 8, # opType is a mix data
curIterNum=StructType.UINT64,
timeStamp=StructType.UINT64,
shapeType=StructType.UINT32,
blockDims=StructType.UINT32,
modelId=StructType.UINT32,
streamId=StructType.UINT32,
taskId=StructType.UINT32,
threadId=StructType.UINT32,
reserve=[StructType.UINT8] * 16
)
STEP_INFO_STRUCT = dict(
magicNumber=StructType.UINT16,
dataTag=StructType.UINT16,
modelId=StructType.UINT32,
streamId=StructType.UINT32,
taskId=StructType.UINT32,
timeStamp=StructType.UINT64,
curIterNum=StructType.UINT64,
threadId=StructType.UINT32,
tag=StructType.UINT8,
reserve=[StructType.UINT8] * 27
)
TENSOR_DATA_STRUCT = dict(
magicNumber=StructType.UINT16,
dataTag=StructType.UINT16,
modelId=StructType.UINT32,
curIterNum=StructType.UINT64,
streamId=StructType.UINT32,
taskId=StructType.UINT32,
tensorNum=[StructType.UINT8] * 4, # Note: Here the memory is aligned. The actual memory usage is 1, 3 is padding.
tensorData=[[StructType.UINT32] * 11] * 5,
reserve=[StructType.UINT8] * 8 # Note: Here the memory is aligned. The actual memory usage is 4, 4 is padding.
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.