text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import logging
import numpy as np
import pandas as pd
from advantages import compute_advantages
from augmenter import augment_with_advantages
from tools.metadata import get_last_patch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _dataset_to_features(dataset_df, advantages=None):
""" Transforms a mined pandas DataFrame into a feature matrix. This method assumes the following
format of a DataFrame:
columns: [match_id,radiant_win,radiant_team,dire_team,avg_mmr,num_mmr,game_mode,lobby_type]
Args:
dataset_df: pandas DataFrame to be transformed
advantages: if given, the synergy and counters matrix are used to compute synergy and
counter rating for every game which are appended to the feature matrix
Returns:
[X, y], where X is feature matrix, y are outputs
"""
last_patch_info = get_last_patch()
heroes_released = last_patch_info['heroes_released']
synergy_matrix, counter_matrix = None, None
if advantages:
x_matrix = np.zeros((dataset_df.shape[0], 2 * heroes_released + 3))
[synergy_matrix, counter_matrix] = advantages
else:
x_matrix = np.zeros((dataset_df.shape[0], 2 * heroes_released))
y_matrix = np.zeros(dataset_df.shape[0])
dataset_np = dataset_df.values
for i, row in enumerate(dataset_np):
radiant_win = row[1]
radiant_heroes = map(int, row[2].split(','))
dire_heroes = map(int, row[3].split(','))
for j in range(5):
x_matrix[i, radiant_heroes[j] - 1] = 1
x_matrix[i, dire_heroes[j] - 1 + heroes_released] = 1
if advantages:
x_matrix[i, -3:] = augment_with_advantages(synergy_matrix,
counter_matrix,
radiant_heroes,
dire_heroes)
y_matrix[i] = 1 if radiant_win else 0
return [x_matrix, y_matrix]
def read_dataset(csv_path,
low_mmr=None,
high_mmr=None,
advantages=False):
""" Reads pandas DataFrame from csv_path, filters games between low_mmr and high_mmr if given
and appends synergy and counter features
Args:
csv_path: path to read pandas DataFrame from
low_mmr: lower MMR bound
high_mmr: higher MMR bound
advantages: if True, advantages are recalculated and saved to files, else it is read from
they are read from files
Returns:
[feature_matrix, [synergy_matrix, counter_matrix]]
"""
global logger
dataset_df = pd.read_csv(csv_path)
if low_mmr:
dataset_df = dataset_df[dataset_df.avg_mmr > low_mmr]
if high_mmr:
dataset_df = dataset_df[dataset_df.avg_mmr < high_mmr]
logger.info("The dataset contains %d games", len(dataset_df))
if advantages:
logger.info("Computing advantages...")
advantages_list = compute_advantages(dataset_df)
else:
logger.info("Loading advantages from files...")
synergies = np.loadtxt('pretrained/synergies_all.csv')
counters = np.loadtxt('pretrained/counters_all.csv')
advantages_list = [synergies, counters]
logger.info("Transforming dataframe in feature map...")
feature_map = _dataset_to_features(dataset_df, advantages=advantages_list)
return [feature_map, advantages_list]
|
{
"content_hash": "395d8211414d2b95472a95e6240d3cb2",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 100,
"avg_line_length": 35.6530612244898,
"alnum_prop": 0.6202060675443618,
"repo_name": "andreiapostoae/dota2-predictor",
"id": "6f0c9d62d9bf96424f49109103c90605b97ccdec",
"size": "3494",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "preprocessing/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5579"
},
{
"name": "HTML",
"bytes": "9291"
},
{
"name": "JavaScript",
"bytes": "23814"
},
{
"name": "Python",
"bytes": "58047"
}
],
"symlink_target": ""
}
|
"""This example gets all child ad units of the effective root ad unit."""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate services.
network_service = client.GetService('NetworkService', version='v201805')
inventory_service = client.GetService('InventoryService', version='v201805')
# Set the parent ad unit's ID for all children ad units to be fetched from.
current_network = network_service.getCurrentNetwork()
parent_ad_unit_id = current_network['effectiveRootAdUnitId']
# Create a statement to select ad units under the parent ad unit.
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('parentId = :parentId')
.OrderBy('id', ascending=True)
.WithBindVariable('parentId', parent_ad_unit_id))
# Retrieve a small amount of ad units at a time, paging
# through until all ad units have been retrieved.
while True:
response = inventory_service.getAdUnitsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for ad_unit in response['results']:
# Print out some information for each ad unit.
print('Ad unit with ID "%s" and name "%s" was found.\n' %
(ad_unit['id'], ad_unit['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "d042df8cf80f9ee84c8afdcc8f721751",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 38.69047619047619,
"alnum_prop": 0.6923076923076923,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "37f23fea29e5dea24ea635e8b8eb2354de35737a",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201805/inventory_service/get_top_level_ad_units.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
}
|
"""Accesses the google.monitoring.v3 AlertPolicyService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.monitoring_v3.gapic import alert_policy_service_client_config
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic.transports import alert_policy_service_grpc_transport
from google.cloud.monitoring_v3.proto import alert_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-monitoring', ).version
class AlertPolicyServiceClient(object):
"""
The AlertPolicyService API is used to manage (list, create, delete,
edit) alert policies in Stackdriver Monitoring. An alerting policy is
a description of the conditions under which some aspect of your
system is considered to be \"unhealthy\" and the ways to notify
people or services about this state. In addition to using this API, alert
policies can also be managed through
`Stackdriver Monitoring <https://cloud.google.com/monitoring/docs/>`_,
which can be reached by clicking the \"Monitoring\" tab in
`Cloud Console <https://console.cloud.google.com/>`_.
"""
SERVICE_ADDRESS = 'monitoring.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.monitoring.v3.AlertPolicyService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AlertPolicyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
@classmethod
def alert_policy_path(cls, project, alert_policy):
"""Return a fully-qualified alert_policy string."""
return google.api_core.path_template.expand(
'projects/{project}/alertPolicies/{alert_policy}',
project=project,
alert_policy=alert_policy,
)
@classmethod
def alert_policy_condition_path(cls, project, alert_policy, condition):
"""Return a fully-qualified alert_policy_condition string."""
return google.api_core.path_template.expand(
'projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}',
project=project,
alert_policy=alert_policy,
condition=condition,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=alert_policy_service_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.AlertPolicyServiceGrpcTransport,
Callable[[~.Credentials, type], ~.AlertPolicyServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=alert_policy_service_grpc_transport.
AlertPolicyServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
self.transport = alert_policy_service_grpc_transport.AlertPolicyServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_alert_policies(self,
name,
filter_=None,
order_by=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the existing alerting policies for the project.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.AlertPolicyServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_alert_policies(name):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_alert_policies(name, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
name (str): The project whose alert policies are to be listed. The format is
projects/[PROJECT_ID]
Note that this field names the parent container in which the alerting
policies to be listed are stored. To retrieve a single alerting policy
by name, use the
``GetAlertPolicy``
operation, instead.
filter_ (str): If provided, this field specifies the criteria that must be met by
alert policies to be included in the response.
For more details, see [sorting and
filtering](/monitoring/api/v3/sorting-and-filtering).
order_by (str): A comma-separated list of fields by which to sort the result. Supports
the same set of field references as the ``filter`` field. Entries can be
prefixed with a minus sign to sort by the field in descending order.
For more details, see [sorting and
filtering](/monitoring/api/v3/sorting-and-filtering).
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if 'list_alert_policies' not in self._inner_api_calls:
self._inner_api_calls[
'list_alert_policies'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_alert_policies,
default_retry=self._method_configs[
'ListAlertPolicies'].retry,
default_timeout=self._method_configs['ListAlertPolicies']
.timeout,
client_info=self._client_info,
)
request = alert_service_pb2.ListAlertPoliciesRequest(
name=name,
filter=filter_,
order_by=order_by,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_alert_policies'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='alert_policies',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_alert_policy(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets a single alerting policy.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.AlertPolicyServiceClient()
>>>
>>> name = client.alert_policy_path('[PROJECT]', '[ALERT_POLICY]')
>>>
>>> response = client.get_alert_policy(name)
Args:
name (str): The alerting policy to retrieve. The format is
projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if 'get_alert_policy' not in self._inner_api_calls:
self._inner_api_calls[
'get_alert_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_alert_policy,
default_retry=self._method_configs['GetAlertPolicy'].retry,
default_timeout=self._method_configs['GetAlertPolicy']
.timeout,
client_info=self._client_info,
)
request = alert_service_pb2.GetAlertPolicyRequest(name=name, )
return self._inner_api_calls['get_alert_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def create_alert_policy(self,
name,
alert_policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a new alerting policy.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.AlertPolicyServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``alert_policy``:
>>> alert_policy = {}
>>>
>>> response = client.create_alert_policy(name, alert_policy)
Args:
name (str): The project in which to create the alerting policy. The format is
``projects/[PROJECT_ID]``.
Note that this field names the parent container in which the alerting
policy will be written, not the name of the created policy. The alerting
policy that is returned will have a name that contains a normalized
representation of this name as a prefix but adds a suffix of the form
``/alertPolicies/[POLICY_ID]``, identifying the policy in the container.
alert_policy (Union[dict, ~google.cloud.monitoring_v3.types.AlertPolicy]): The requested alerting policy. You should omit the ``name`` field in this
policy. The name will be returned in the new policy, including
a new [ALERT_POLICY_ID] value.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.AlertPolicy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if 'create_alert_policy' not in self._inner_api_calls:
self._inner_api_calls[
'create_alert_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_alert_policy,
default_retry=self._method_configs[
'CreateAlertPolicy'].retry,
default_timeout=self._method_configs['CreateAlertPolicy']
.timeout,
client_info=self._client_info,
)
request = alert_service_pb2.CreateAlertPolicyRequest(
name=name,
alert_policy=alert_policy,
)
return self._inner_api_calls['create_alert_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_alert_policy(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes an alerting policy.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.AlertPolicyServiceClient()
>>>
>>> name = client.alert_policy_path('[PROJECT]', '[ALERT_POLICY]')
>>>
>>> client.delete_alert_policy(name)
Args:
name (str): The alerting policy to delete. The format is:
projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
For more information, see ``AlertPolicy``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if 'delete_alert_policy' not in self._inner_api_calls:
self._inner_api_calls[
'delete_alert_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_alert_policy,
default_retry=self._method_configs[
'DeleteAlertPolicy'].retry,
default_timeout=self._method_configs['DeleteAlertPolicy']
.timeout,
client_info=self._client_info,
)
request = alert_service_pb2.DeleteAlertPolicyRequest(name=name, )
self._inner_api_calls['delete_alert_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_alert_policy(self,
alert_policy,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates an alerting policy. You can either replace the entire policy with
a new one or replace only certain fields in the current alerting policy by
specifying the fields to be updated via ``updateMask``. Returns the
updated alerting policy.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.AlertPolicyServiceClient()
>>>
>>> # TODO: Initialize ``alert_policy``:
>>> alert_policy = {}
>>>
>>> response = client.update_alert_policy(alert_policy)
Args:
alert_policy (Union[dict, ~google.cloud.monitoring_v3.types.AlertPolicy]): Required. The updated alerting policy or the updated values for the
fields listed in ``update_mask``.
If ``update_mask`` is not empty, any fields in this policy that are
not in ``update_mask`` are ignored.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.AlertPolicy`
update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): Optional. A list of alerting policy field names. If this field is not
empty, each listed field in the existing alerting policy is set to the
value of the corresponding field in the supplied policy (``alert_policy``),
or to the field's default value if the field is not in the supplied
alerting policy. Fields not listed retain their previous value.
Examples of valid field masks include ``display_name``, ``documentation``,
``documentation.content``, ``documentation.mime_type``, ``user_labels``,
``user_label.nameofkey``, ``enabled``, ``conditions``, ``combiner``, etc.
If this field is empty, then the supplied alerting policy replaces the
existing policy. It is the same as deleting the existing policy and
adding the supplied policy, except for the following:
+ The new policy will have the same ``[ALERT_POLICY_ID]`` as the former
policy. This gives you continuity with the former policy in your
notifications and incidents.
+ Conditions in the new policy will keep their former ``[CONDITION_ID]`` if
the supplied condition includes the `name` field with that
`[CONDITION_ID]`. If the supplied condition omits the `name` field,
then a new `[CONDITION_ID]` is created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.AlertPolicy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if 'update_alert_policy' not in self._inner_api_calls:
self._inner_api_calls[
'update_alert_policy'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_alert_policy,
default_retry=self._method_configs[
'UpdateAlertPolicy'].retry,
default_timeout=self._method_configs['UpdateAlertPolicy']
.timeout,
client_info=self._client_info,
)
request = alert_service_pb2.UpdateAlertPolicyRequest(
alert_policy=alert_policy,
update_mask=update_mask,
)
return self._inner_api_calls['update_alert_policy'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
{
"content_hash": "afd3c9f2f8e88cce87889523b20986aa",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 160,
"avg_line_length": 46.945111492281306,
"alnum_prop": 0.5872702692827652,
"repo_name": "tseaver/gcloud-python",
"id": "5c73e8e88a19fa7d1732d4a9274a82656a7b7f14",
"size": "27944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/google/cloud/monitoring_v3/gapic/alert_policy_service_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "93642"
},
{
"name": "Python",
"bytes": "2874989"
},
{
"name": "Shell",
"bytes": "4436"
}
],
"symlink_target": ""
}
|
import datetime
from django.test import TestCase
from haystack import connections
from haystack.inputs import Exact
from haystack.models import SearchResult
from haystack.query import SQ
from core.models import MockModel, AnotherMockModel
class ElasticsearchSearchQueryTestCase(TestCase):
def setUp(self):
super(ElasticsearchSearchQueryTestCase, self).setUp()
self.sq = connections['default'].get_query()
def test_build_query_all(self):
self.assertEqual(self.sq.build_query(), '*:*')
def test_build_query_single_word(self):
self.sq.add_filter(SQ(content='hello'))
self.assertEqual(self.sq.build_query(), '(hello)')
def test_build_query_boolean(self):
self.sq.add_filter(SQ(content=True))
self.assertEqual(self.sq.build_query(), '(True)')
def test_regression_slash_search(self):
self.sq.add_filter(SQ(content='hello/'))
self.assertEqual(self.sq.build_query(), '(hello\\/)')
def test_build_query_datetime(self):
self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))
self.assertEqual(self.sq.build_query(), '(2009-05-08T11:28:00)')
def test_build_query_multiple_words_and(self):
self.sq.add_filter(SQ(content='hello'))
self.sq.add_filter(SQ(content='world'))
self.assertEqual(self.sq.build_query(), '((hello) AND (world))')
def test_build_query_multiple_words_not(self):
self.sq.add_filter(~SQ(content='hello'))
self.sq.add_filter(~SQ(content='world'))
self.assertEqual(self.sq.build_query(), '(NOT ((hello)) AND NOT ((world)))')
def test_build_query_multiple_words_or(self):
self.sq.add_filter(~SQ(content='hello'))
self.sq.add_filter(SQ(content='hello'), use_or=True)
self.assertEqual(self.sq.build_query(), '(NOT ((hello)) OR (hello))')
def test_build_query_multiple_words_mixed(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(content='hello'), use_or=True)
self.sq.add_filter(~SQ(content='world'))
self.assertEqual(self.sq.build_query(), u'(((why) OR (hello)) AND NOT ((world)))')
def test_build_query_phrase(self):
self.sq.add_filter(SQ(content='hello world'))
self.assertEqual(self.sq.build_query(), '(hello AND world)')
self.sq.add_filter(SQ(content__exact='hello world'))
self.assertEqual(self.sq.build_query(), u'((hello AND world) AND ("hello world"))')
def test_build_query_boost(self):
self.sq.add_filter(SQ(content='hello'))
self.sq.add_boost('world', 5)
self.assertEqual(self.sq.build_query(), "(hello) world^5")
def test_build_query_multiple_filter_types(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(pub_date__lte=Exact('2009-02-10 01:59:00')))
self.sq.add_filter(SQ(author__gt='daniel'))
self.sq.add_filter(SQ(created__lt=Exact('2009-02-12 12:13:00')))
self.sq.add_filter(SQ(title__gte='B'))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(self.sq.build_query(), u'((why) AND pub_date:([* TO "2009-02-10 01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12 12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))')
def test_build_query_multiple_filter_types_with_datetimes(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))
self.sq.add_filter(SQ(author__gt='daniel'))
self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))
self.sq.add_filter(SQ(title__gte='B'))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(self.sq.build_query(), u'((why) AND pub_date:([* TO "2009-02-10T01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12T12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))')
def test_build_query_in_filter_multiple_words(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"]))
self.assertEqual(self.sq.build_query(), u'((why) AND title:("A Famous Paper" OR "An Infamous Article"))')
def test_build_query_in_filter_datetime(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))
self.assertEqual(self.sq.build_query(), u'((why) AND pub_date:("2009-07-06T01:56:21"))')
def test_build_query_in_with_set(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(title__in=set(["A Famous Paper", "An Infamous Article"])))
self.assertTrue('((why) AND title:(' in self.sq.build_query())
self.assertTrue('"A Famous Paper"' in self.sq.build_query())
self.assertTrue('"An Infamous Article"' in self.sq.build_query())
def test_build_query_wildcard_filter_types(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(title__startswith='haystack'))
self.assertEqual(self.sq.build_query(), u'((why) AND title:(haystack*))')
def test_clean(self):
self.assertEqual(self.sq.clean('hello world'), 'hello world')
self.assertEqual(self.sq.clean('hello AND world'), 'hello and world')
self.assertEqual(self.sq.clean('hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / world'), 'hello and or not to \\+ \\- \\&& \\|| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\" \\~ \\* \\? \\: \\\\ \\/ world')
self.assertEqual(self.sq.clean('so please NOTe i am in a bAND and bORed'), 'so please NOTe i am in a bAND and bORed')
def test_build_query_with_models(self):
self.sq.add_filter(SQ(content='hello'))
self.sq.add_model(MockModel)
self.assertEqual(self.sq.build_query(), '(hello)')
self.sq.add_model(AnotherMockModel)
self.assertEqual(self.sq.build_query(), u'(hello)')
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
self.sq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))
# Reset to default.
self.sq.set_result_class(None)
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
def test_in_filter_values_list(self):
self.sq.add_filter(SQ(content='why'))
self.sq.add_filter(SQ(title__in=MockModel.objects.values_list('id', flat=True)))
self.assertEqual(self.sq.build_query(), u'((why) AND title:("1" OR "2" OR "3"))')
|
{
"content_hash": "cd9f8355ecda066f9b59af66803c2bed",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 251,
"avg_line_length": 49.170212765957444,
"alnum_prop": 0.6245492571758258,
"repo_name": "zhangtianyi1234/django-haystack",
"id": "e44ed674614ffcf78dad532eaa492980fe2905db",
"size": "6933",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/elasticsearch_tests/tests/elasticsearch_query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
disthelpers
-----------
The ``guidata.disthelpers`` module provides helper functions for Python
package distribution on Microsoft Windows platforms with ``py2exe`` or on
all platforms thanks to ``cx_Freeze``.
"""
import sys
import os
import os.path as osp
import shutil
import traceback
import atexit
import imp
from subprocess import Popen, PIPE
# Local imports
# from guidata.configtools import get_module_path
def get_module_path(modname):
"""Return module *modname* base path"""
module = sys.modules.get(modname, __import__(modname))
return osp.abspath(osp.dirname(module.__file__))
#==============================================================================
# Dependency management
#==============================================================================
def get_changeset(path, rev=None):
"""Return Mercurial repository *path* revision number"""
args = ['hg', 'parent']
if rev is not None:
args += ['--rev', str(rev)]
process = Popen(args, stdout=PIPE, stderr=PIPE, cwd=path, shell=True)
try:
return process.stdout.read().splitlines()[0].split()[1]
except IndexError:
raise RuntimeError(process.stderr.read())
def prepend_module_to_path(module_path):
"""
Prepend to sys.path module located in *module_path*
Return string with module infos: name, revision, changeset
Use this function:
1) In your application to import local frozen copies of internal libraries
2) In your py2exe distributed package to add a text file containing the returned string
"""
if not osp.isdir(module_path):
# Assuming py2exe distribution
return
sys.path.insert(0, osp.abspath(module_path))
changeset = get_changeset(module_path)
name = osp.basename(module_path)
prefix = "Prepending module to sys.path"
message = prefix + ("%s [revision %s]" % (name, changeset)
).rjust(80 - len(prefix), ".")
print >> sys.stderr, message
return message
def prepend_modules_to_path(module_base_path):
"""Prepend to sys.path all modules located in *module_base_path*"""
if not osp.isdir(module_base_path):
# Assuming py2exe distribution
return
fnames = [osp.join(module_base_path, name)
for name in os.listdir(module_base_path)]
messages = [prepend_module_to_path(dirname)
for dirname in fnames if osp.isdir(dirname)]
return os.linesep.join(messages)
#==============================================================================
# Distribution helpers
#==============================================================================
def _remove_later(fname):
"""Try to remove file later (at exit)"""
def try_to_remove(fname):
if osp.exists(fname):
os.remove(fname)
atexit.register(try_to_remove, osp.abspath(fname))
def get_visual_studio_dlls(architecture=None, python_version=None):
"""Get the list of Microsoft Visual C++ 2008 DLLs associated to
architecture and Python version, create the manifest file.
architecture: integer (32 or 64) -- if None, take the Python build arch
python_version: X.Y"""
if python_version is None:
python_version = '2.7'
print >>sys.stderr, "Warning/disthelpers: assuming Python 2.7 target"
if python_version in ('2.6', '2.7'):
# Python 2.6-2.7 were built with Visual Studio 9.0.21022.8
# (i.e. Visual Studio 2008, not Visual Studio 2008 SP1!)
version = "9.0.21022.8"
key = "1fc8b3b9a1e18e3b"
#TODO: add here the future version of Python (including Python 3)
else:
raise RuntimeError,\
"Unsupported Python version %s" % python_version
if architecture is None:
architecture = 64 if sys.maxsize > 2**32 else 32
atype = "" if architecture == 64 else "win32"
arch = "amd64" if architecture == 64 else "x86"
manifest = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) Microsoft Corporation. All rights reserved. -->
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<noInheritable/>
<assemblyIdentity
type="%(atype)s"
name="Microsoft.VC90.CRT"
version="%(version)s"
processorArchitecture="%(arch)s"
publicKeyToken="%(key)s"
/>
<file name="msvcr90.dll" />
<file name="msvcp90.dll" />
<file name="msvcm90.dll" />
</assembly>
""" % dict(version=version, key=key, atype=atype, arch=arch)
vc90man = "Microsoft.VC90.CRT.manifest"
file(vc90man, 'w').write(manifest)
_remove_later(vc90man)
filelist = [vc90man]
vc_str = '%s_Microsoft.VC90.CRT_%s_%s' % (arch, key, version)
winsxs = osp.join(os.environ['windir'], 'WinSxS')
for fname in os.listdir(winsxs):
path = osp.join(winsxs, fname)
if osp.isdir(path) and fname.lower().startswith(vc_str.lower()):
for dllname in os.listdir(path):
filelist.append(osp.join(path, dllname))
break
else:
raise RuntimeError, "Microsoft Visual C++ DLLs version %s "\
"were not found" % version
return filelist
def create_vs2008_data_files(architecture=None, python_version=None,
verbose=False):
"""Including Microsoft Visual C++ 2008 DLLs"""
filelist = get_visual_studio_dlls(architecture=architecture,
python_version=python_version)
print create_vs2008_data_files.__doc__
if verbose:
for name in filelist:
print " ", name
return [("Microsoft.VC90.CRT", filelist),]
def to_include_files(data_files):
"""Convert data_files list to include_files list
data_files:
* this is the ``py2exe`` data files format
* list of tuples (dest_dirname, (src_fname1, src_fname2, ...))
include_files:
* this is the ``cx_Freeze`` data files format
* list of tuples ((src_fname1, dst_fname1),
(src_fname2, dst_fname2), ...))
"""
include_files = []
for dest_dir, fnames in data_files:
for source_fname in fnames:
dest_fname = osp.join(dest_dir, osp.basename(source_fname))
include_files.append((source_fname, dest_fname))
return include_files
def strip_version(version):
"""Return version number with digits only
(Windows does not support strings in version numbers)"""
return version.split('beta')[0].split('alpha'
)[0].split('rc')[0].split('dev')[0]
def remove_dir(dirname):
"""Remove directory *dirname* and all its contents
Print details about the operation (progress, success/failure)"""
print "Removing directory '%s'..." % dirname,
try:
shutil.rmtree(dirname, ignore_errors=True)
print "OK"
except Exception:
print "Failed!"
traceback.print_exc()
class Distribution(object):
"""Distribution object
Help creating an executable using ``py2exe`` or ``cx_Freeze``
"""
DEFAULT_EXCLUDES = ['Tkconstants', 'Tkinter', 'tcl', 'tk', 'wx',
'_imagingtk', 'curses', 'PIL._imagingtk', 'ImageTk',
'PIL.ImageTk', 'FixTk', 'bsddb', 'email',
'pywin.debugger', 'pywin.debugger.dbgcon',
'matplotlib']
DEFAULT_INCLUDES = []
DEFAULT_BIN_EXCLUDES = ['MSVCP90.dll', 'w9xpopen.exe',
'MSVCP80.dll', 'MSVCR80.dll']
DEFAULT_BIN_INCLUDES = []
DEFAULT_BIN_PATH_INCLUDES = []
DEFAULT_BIN_PATH_EXCLUDES = []
def __init__(self):
self.name = None
self.version = None
self.description = None
self.target_name = None
self._target_dir = None
self.icon = None
self.data_files = []
self.includes = self.DEFAULT_INCLUDES
self.excludes = self.DEFAULT_EXCLUDES
self.bin_includes = self.DEFAULT_BIN_INCLUDES
self.bin_excludes = self.DEFAULT_BIN_EXCLUDES
self.bin_path_includes = self.DEFAULT_BIN_PATH_INCLUDES
self.bin_path_excludes = self.DEFAULT_BIN_PATH_EXCLUDES
self.vs2008 = os.name == 'nt'
self._py2exe_is_loaded = False
self._qframer.qt_added = False
self._pyside_added = False
# Attributes relative to cx_Freeze:
self.executables = []
@property
def target_dir(self):
"""Return target directory (default: 'dist')"""
dirname = self._target_dir
if dirname is None:
return 'dist'
else:
return dirname
@target_dir.setter # analysis:ignore
def target_dir(self, value):
self._target_dir = value
def setup(self, name, version, description, script,
target_name=None, target_dir=None, icon=None,
data_files=None, includes=None, excludes=None,
bin_includes=None, bin_excludes=None,
bin_path_includes=None, bin_path_excludes=None, vs2008=None):
"""Setup distribution object
Notes:
* bin_path_excludes is specific to cx_Freeze (ignored if it's None)
* if vs2008 is None, it's set to True by default on Windows
platforms, False on non-Windows platforms
"""
self.name = name
self.version = strip_version(version) if os.name == 'nt' else version
self.description = description
assert osp.isfile(script)
self.script = script
self.target_name = target_name
self.target_dir = target_dir
self.icon = icon
if data_files is not None:
self.data_files += data_files
if includes is not None:
self.includes += includes
if excludes is not None:
self.excludes += excludes
if bin_includes is not None:
self.bin_includes += bin_includes
if bin_excludes is not None:
self.bin_excludes += bin_excludes
if bin_path_includes is not None:
self.bin_path_includes += bin_path_includes
if bin_path_excludes is not None:
self.bin_path_excludes += bin_path_excludes
if vs2008 is not None:
self.vs2008 = vs2008
if self.vs2008:
try:
self.data_files += create_vs2008_data_files()
except IOError:
print >>sys.stderr, "Setting the vs2008 option to False "\
"will avoid this error"
raise
# cx_Freeze:
self.add_executable(self.script, self.target_name, icon=self.icon)
def add_text_data_file(self, filename, contents):
"""Create temporary data file *filename* with *contents*
and add it to *data_files*"""
file(filename, 'wb').write(contents)
self.data_files += [("", (filename, ))]
_remove_later(filename)
def add_data_file(self, filename, destdir=''):
self.data_files += [(destdir, (filename, ))]
#------ Adding packages
def add_qframer.qt(self):
"""Include module qframer.qt to the distribution"""
if self._qframer.qt_added:
return
self._qframer.qt_added = True
self.includes += ['sip', 'qframer.qt.Qt', 'qframer.qt.QtSvg', 'qframer.qt.QtNetwork']
import qframer.qt
pyqt_path = osp.dirname(qframer.qt.__file__)
# Configuring qframer.qt
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.vs2008:
vc90man = "Microsoft.VC90.CRT.manifest"
pyqt_tmp = 'pyqt_tmp'
if osp.isdir(pyqt_tmp):
shutil.rmtree(pyqt_tmp)
os.mkdir(pyqt_tmp)
vc90man_pyqt = osp.join(pyqt_tmp, vc90man)
man = file(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
file(vc90man_pyqt, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyqt_path,
"plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.vs2008 and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyqt)
self.data_files.append( (dirpath[len(pyqt_path)+len(os.pathsep):],
filelist) )
if self.vs2008:
atexit.register(remove_dir, pyqt_tmp)
# Including french translation
fr_trans = osp.join(pyqt_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_pyside(self):
"""Include module PySide to the distribution"""
if self._pyside_added:
return
self._pyside_added = True
self.includes += ['PySide.QtDeclarative', 'PySide.QtHelp',
'PySide.QtMultimedia', 'PySide.QtNetwork',
'PySide.QtOpenGL', 'PySide.QtScript',
'PySide.QtScriptTools', 'PySide.QtSql',
'PySide.QtSvg', 'PySide.QtTest',
'PySide.QtUiTools', 'PySide.QtWebKit',
'PySide.QtXml', 'PySide.QtXmlPatterns']
import PySide
pyside_path = osp.dirname(PySide.__file__)
# Configuring PySide
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.vs2008:
vc90man = "Microsoft.VC90.CRT.manifest"
os.mkdir('pyside_tmp')
vc90man_pyside = osp.join('pyside_tmp', vc90man)
man = file(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
file(vc90man_pyside, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyside_path, "plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.vs2008 and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyside)
self.data_files.append(
(dirpath[len(pyside_path)+len(os.pathsep):], filelist) )
# Replacing dlls found by cx_Freeze by the real PySide Qt dlls:
# (http://qt-project.org/wiki/Packaging_PySide_applications_on_Windows)
dlls = [osp.join(pyside_path, fname)
for fname in os.listdir(pyside_path)
if osp.splitext(fname)[1] == '.dll']
self.data_files.append( ('', dlls) )
if self.vs2008:
atexit.register(remove_dir, 'pyside_tmp')
# Including french translation
fr_trans = osp.join(pyside_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_qt_bindings(self):
"""Include Qt bindings, i.e. qframer.qt or PySide"""
try:
imp.find_module('qframer.qt')
self.add_modules('qframer.qt')
except ImportError:
self.add_modules('PySide')
def add_matplotlib(self):
"""Include module Matplotlib to the distribution"""
if 'matplotlib' in self.excludes:
self.excludes.pop(self.excludes.index('matplotlib'))
try:
import matplotlib.numerix # analysis:ignore
self.includes += ['matplotlib.numerix.ma',
'matplotlib.numerix.fft',
'matplotlib.numerix.linear_algebra',
'matplotlib.numerix.mlab',
'matplotlib.numerix.random_array']
except ImportError:
pass
self.add_module_data_files('matplotlib', ('mpl-data', ),
('.conf', '.glade', '', '.png', '.svg',
'.xpm', '.ppm', '.npy', '.afm', '.ttf'))
def add_modules(self, *module_names):
"""Include module *module_name*"""
for module_name in module_names:
print "Configuring module '%s'" % module_name
if module_name == 'qframer.qt':
self.add_qframer.qt()
elif module_name == 'PySide':
self.add_pyside()
elif module_name == 'scipy.io':
self.includes += ['scipy.io.matlab.streams']
elif module_name == 'matplotlib':
self.add_matplotlib()
elif module_name == 'h5py':
import h5py
for attr in ['_stub', '_sync', 'utils', '_conv', '_proxy',
'defs']:
if hasattr(h5py, attr):
self.includes.append('h5py.%s' % attr)
if self.bin_path_excludes is not None and os.name == 'nt':
# Specific to cx_Freeze on Windows: avoid including a zlib dll
# built with another version of Microsoft Visual Studio
self.bin_path_excludes += [r'C:\Program Files',
r'C:\Program Files (x86)']
self.data_files.append( # necessary for cx_Freeze only
('', (osp.join(get_module_path('h5py'), 'zlib1.dll'), ))
)
elif module_name in ('docutils', 'rst2pdf', 'sphinx'):
self.includes += ['docutils.writers.null',
'docutils.languages.en',
'docutils.languages.fr']
if module_name == 'rst2pdf':
self.add_module_data_files("rst2pdf", ("styles", ),
('.json', '.style'),
copy_to_root=True)
if module_name == 'sphinx':
import sphinx.ext
for fname in os.listdir(osp.dirname(sphinx.ext.__file__)):
if osp.splitext(fname)[1] == '.py':
modname = 'sphinx.ext.%s' % osp.splitext(fname)[0]
self.includes.append(modname)
elif module_name == 'guidata':
self.add_module_data_files('guidata', ("images", ),
('.png', '.svg'), copy_to_root=False)
try:
imp.find_module('qframer.qt')
self.add_qframer.qt()
except ImportError:
self.add_pyside()
elif module_name == 'guiqwt':
self.add_module_data_files('guiqwt', ("images", ),
('.png', '.svg'), copy_to_root=False)
if os.name == 'nt':
# Specific to cx_Freeze: including manually MinGW DLLs
self.bin_includes += ['libgcc_s_dw2-1.dll',
'libstdc++-6.dll']
else:
try:
# Modules based on the same scheme as guidata and guiqwt
self.add_module_data_files(module_name, ("images", ),
('.png', '.svg'), copy_to_root=False)
except IOError:
raise RuntimeError("Module not supported: %s" % module_name)
def add_module_data_dir(self, module_name, data_dir_name, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files in *data_dir_name* for module *module_name*
and add them to *data_files*
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
module_dir = get_module_path(module_name)
nstrip = len(module_dir) + len(osp.sep)
data_dir = osp.join(module_dir, data_dir_name)
if not osp.isdir(data_dir):
raise IOError, "Directory not found: %s" % data_dir
for dirpath, _dirnames, filenames in os.walk(data_dir):
dirname = dirpath[nstrip:]
if osp.basename(dirpath) in exclude_dirs:
continue
if not copy_to_root:
dirname = osp.join(module_name, dirname)
pathlist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1].lower() in extensions]
self.data_files.append( (dirname, pathlist) )
if verbose:
for name in pathlist:
print " ", name
def add_module_data_files(self, module_name, data_dir_names, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files for module *module_name* and add them to *data_files*
*data_dir_names*: list of dirnames, e.g. ('images', )
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
print "Adding module '%s' data files in %s (%s)"\
% (module_name, ", ".join(data_dir_names), ", ".join(extensions))
module_dir = get_module_path(module_name)
for data_dir_name in data_dir_names:
self.add_module_data_dir(module_name, data_dir_name, extensions,
copy_to_root, verbose, exclude_dirs)
translation_file = osp.join(module_dir, "locale", "fr", "LC_MESSAGES",
"%s.mo" % module_name)
if osp.isfile(translation_file):
self.data_files.append((osp.join(module_name, "locale", "fr",
"LC_MESSAGES"), (translation_file, )))
print "Adding module '%s' translation file: %s" % (module_name,
osp.basename(translation_file))
def build(self, library, cleanup=True, create_archive=None):
"""Build executable with given library.
library:
* 'py2exe': deploy using the `py2exe` library
* 'cx_Freeze': deploy using the `cx_Freeze` library
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
if library == 'py2exe':
self.build_py2exe(cleanup=cleanup,
create_archive=create_archive)
elif library == 'cx_Freeze':
self.build_cx_freeze(cleanup=cleanup,
create_archive=create_archive)
else:
raise RuntimeError, "Unsupported library %s" % library
def __cleanup(self):
"""Remove old build and dist directories"""
remove_dir("build")
if osp.isdir("dist"):
remove_dir("dist")
remove_dir(self.target_dir)
def __create_archive(self, option):
"""Create a ZIP archive
option:
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
name = self.target_dir
os.system('zip "%s.zip" -r "%s"' % (name, name))
if option == 'move':
shutil.rmtree(name)
def build_py2exe(self, cleanup=True, compressed=2, optimize=2,
company_name=None, copyright=None, create_archive=None, bundle_files=3, zipfile=None):
"""Build executable with py2exe
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
from distutils.core import setup
import py2exe # Patching distutils -- analysis:ignore
self._py2exe_is_loaded = True
if cleanup:
self.__cleanup()
sys.argv += ["py2exe"]
options = dict(compressed=compressed, optimize=optimize,
includes=self.includes, excludes=self.excludes,
dll_excludes=self.bin_excludes,
dist_dir=self.target_dir,
bundle_files=bundle_files)
windows = dict(name=self.name, description=self.description,
script=self.script, icon_resources=[(0, self.icon)],
bitmap_resources=[], other_resources=[],
dest_base=osp.splitext(self.target_name)[0],
version=self.version,
company_name=company_name, copyright=copyright)
setup(data_files=self.data_files, windows=[windows,],
options=dict(py2exe=options))
if create_archive:
self.__create_archive(create_archive)
def add_executable(self, script, target_name, icon=None):
"""Add executable to the cx_Freeze distribution
Not supported for py2exe"""
from cx_Freeze import Executable
base = None
if script.endswith('.pyw') and os.name == 'nt':
base = 'win32gui'
self.executables += [Executable(self.script, base=base, icon=self.icon,
targetName=self.target_name)]
def build_cx_freeze(self, cleanup=True, create_archive=None):
"""Build executable with cx_Freeze
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
assert not self._py2exe_is_loaded, \
"cx_Freeze can't be executed after py2exe"
from cx_Freeze import setup
if cleanup:
self.__cleanup()
sys.argv += ["build"]
build_exe = dict(include_files=to_include_files(self.data_files),
includes=self.includes, excludes=self.excludes,
bin_excludes=self.bin_excludes,
bin_includes=self.bin_includes,
bin_path_includes=self.bin_path_includes,
bin_path_excludes=self.bin_path_excludes,
build_exe=self.target_dir)
setup(name=self.name, version=self.version,
description=self.description, executables=self.executables,
options=dict(build_exe=build_exe))
if create_archive:
self.__create_archive(create_archive)
|
{
"content_hash": "d48bbeeac01963e7c147f21b9675a416",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 107,
"avg_line_length": 42.69631901840491,
"alnum_prop": 0.5429628565270493,
"repo_name": "dragondjf/QMarkdowner",
"id": "806999fbdfdb4e9474c92249372003910c6f5627",
"size": "28025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distribution/disthelpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "298457"
},
{
"name": "HTML",
"bytes": "28594"
},
{
"name": "JavaScript",
"bytes": "466716"
},
{
"name": "Python",
"bytes": "13167142"
}
],
"symlink_target": ""
}
|
import ipaddress
import pyroute2
from octavia.common import constants as consts
from octavia.common import exceptions
def _find_interface(ip_address, rtnl_api, normalized_addr):
"""Find the interface using a routing netlink API.
:param ip_address: The IP address to search with.
:param rtnl_api: A pyroute2 rtnl_api instance. (IPRoute, NetNS, etc.)
:returns: The interface name if found, None if not found.
:raises exceptions.InvalidIPAddress: Invalid IP address provided.
"""
for addr in rtnl_api.get_addr(address=ip_address):
# Save the interface index as IPv6 records don't list a textual
# interface
interface_idx = addr['index']
# Search through the attributes of each address record
for attr in addr['attrs']:
# Look for the attribute name/value pair for the address
if attr[0] == 'IFA_ADDRESS':
# Compare the normalized address with the address we are
# looking for. Since we have matched the name above, attr[1]
# is the address value
if normalized_addr == ipaddress.ip_address(attr[1]).compressed:
# Lookup the matching interface name by getting the
# interface with the index we found in the above address
# search
lookup_int = rtnl_api.get_links(interface_idx)
# Search through the attributes of the matching interface
# record
for int_attr in lookup_int[0]['attrs']:
# Look for the attribute name/value pair that includes
# the interface name
if int_attr[0] == consts.IFLA_IFNAME:
# Return the matching interface name that is in
# int_attr[1] for the matching interface attribute
# name
return int_attr[1]
# We didn't find an interface with that IP address.
return None
def get_interface_name(ip_address, net_ns=None):
"""Gets the interface name from an IP address.
:param ip_address: The IP address to lookup.
:param net_ns: The network namespace to find the interface in.
:returns: The interface name.
:raises exceptions.InvalidIPAddress: Invalid IP address provided.
:raises octavia.common.exceptions.NotFound: No interface was found.
"""
# We need to normalize the address as IPv6 has multiple representations
# fe80:0000:0000:0000:f816:3eff:fef2:2058 == fe80::f816:3eff:fef2:2058
try:
normalized_addr = ipaddress.ip_address(ip_address).compressed
except ValueError as e:
raise exceptions.InvalidIPAddress(ip_addr=ip_address) from e
if net_ns:
with pyroute2.NetNS(net_ns) as rtnl_api:
interface = _find_interface(ip_address, rtnl_api, normalized_addr)
else:
with pyroute2.IPRoute() as rtnl_api:
interface = _find_interface(ip_address, rtnl_api, normalized_addr)
if interface is not None:
return interface
raise exceptions.NotFound(resource='IP address', id=ip_address)
|
{
"content_hash": "d5aeeb9f5b14f5244a1df9aefec7cb30",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 45.16901408450704,
"alnum_prop": 0.6245712503897723,
"repo_name": "openstack/octavia",
"id": "6311b695e694150f7cc5b7b540b4f5b9f2beb6ea",
"size": "3801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/amphorae/backends/utils/network_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
}
|
"""
tinykit
-------
A toolkit for TinyDB with additional features (transaction, model, etc).
"""
import codecs
import os
import re
from setuptools import setup
from setuptools import find_packages
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r') as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="tinykit",
version=find_version("tinykit", "__init__.py"),
url="http://github.com/iromli/tinykit",
license="MIT",
author="Isman Firmansyah",
author_email="isman.firmansyah@gmail.com",
description="",
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
install_requires=[
"jsonmodels",
"six",
"tinydb",
"tinyrecord",
],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
include_package_data=True,
)
|
{
"content_hash": "23f3e189cbd931b4650d54766ad0380a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 28.37735849056604,
"alnum_prop": 0.5957446808510638,
"repo_name": "iromli/timo",
"id": "7a8be24256fb383c139297b9cc74866e1c2f59f7",
"size": "1504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5632"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
self.stdout.write('Working...')
self.stdout.flush()
self.stdout.write('OK')
|
{
"content_hash": "2b02220b419cec4b990777ecb6e252fa",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 26.875,
"alnum_prop": 0.6604651162790698,
"repo_name": "elena/django",
"id": "bafc30d1287825d215b151cfb0f9c3299771c1f3",
"size": "215",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tests/user_commands/management/commands/outputwrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43253"
},
{
"name": "HTML",
"bytes": "171768"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11016010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import numpy as np
from rl_coach.core_types import ObservationType
from rl_coach.filters.observation.observation_filter import ObservationFilter
from rl_coach.spaces import ObservationSpace
class ObservationClippingFilter(ObservationFilter):
"""
Clips the observation values to a given range of values.
For example, if the observation consists of measurements in an arbitrary range,
and we want to control the minimum and maximum values of these observations,
we can define a range and clip the values of the measurements.
"""
def __init__(self, clipping_low: float=-np.inf, clipping_high: float=np.inf):
"""
:param clipping_low: The minimum value to allow after normalizing the observation
:param clipping_high: The maximum value to allow after normalizing the observation
"""
super().__init__()
self.clip_min = clipping_low
self.clip_max = clipping_high
def filter(self, observation: ObservationType, update_internal_state: bool=True) -> ObservationType:
observation = np.clip(observation, self.clip_min, self.clip_max)
return observation
def get_filtered_observation_space(self, input_observation_space: ObservationSpace) -> ObservationSpace:
return input_observation_space
|
{
"content_hash": "e7eecc4cda0bbb4af4fafe522220f5db",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 108,
"avg_line_length": 43.3,
"alnum_prop": 0.7259430331023865,
"repo_name": "NervanaSystems/coach",
"id": "8c5c0ae44f82d130ceee9228dd797bb6d8669bfb",
"size": "1891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_coach/filters/observation/observation_clipping_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "CSS",
"bytes": "6493"
},
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Jupyter Notebook",
"bytes": "91174"
},
{
"name": "Makefile",
"bytes": "5036"
},
{
"name": "Python",
"bytes": "1926733"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
}
|
from __future__ import division
import copy
import itertools as it
import numpy as np
import networkx as nx
from pgmpy.extern.six.moves import filter, range
from pgmpy.inference import Inference
from pgmpy.models import MarkovModel
from pgmpy.factors.discrete import DiscreteFactor
class Mplp(Inference):
"""
Class for performing approximate inference using Max-Product Linear Programming method.
We derive message passing updates that result in monotone decrease of the dual of the
MAP LP Relaxation.
Parameters
----------
model: MarkovModel for which inference is to be performed.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import DiscreteFactor
>>> import numpy as np
>>> student = MarkovModel()
>>> student.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('E', 'F')])
>>> factor_a = DiscreteFactor(['A'], cardinality=[2], value=np.array([0.54577, 1.8323]))
>>> factor_b = DiscreteFactor(['B'], cardinality=[2], value=np.array([0.93894, 1.065]))
>>> factor_c = DiscreteFactor(['C'], cardinality=[2], value=np.array([0.89205, 1.121]))
>>> factor_d = DiscreteFactor(['D'], cardinality=[2], value=np.array([0.56292, 1.7765]))
>>> factor_e = DiscreteFactor(['E'], cardinality=[2], value=np.array([0.47117, 2.1224]))
>>> factor_f = DiscreteFactor(['F'], cardinality=[2], value=np.array([1.5093, 0.66257]))
>>> factor_a_b = DiscreteFactor(['A', 'B'], cardinality=[2, 2], value=np.array([1.3207, 0.75717, 0.75717, 1.3207]))
>>> factor_b_c = DiscreteFactor(['B', 'C'], cardinality=[2, 2], value=np.array([0.00024189, 4134.2, 4134.2, 0.00024189]))
>>> factor_c_d = DiscreteFactor(['C', 'D'], cardinality=[2, 2], value=np.array([0.0043227, 231.34, 231.34, 0.0043227]))
>>> factor_d_e = DiscreteFactor(['E', 'F'], cardinality=[2, 2], value=np.array([31.228, 0.032023, 0.032023, 31.228]))
>>> student.add_factors(factor_a, factor_b, factor_c, factor_d, factor_e, factor_f, factor_a_b,
... factor_b_c, factor_c_d, factor_d_e)
>>> mplp = Mplp(student)
"""
def __init__(self, model):
if not isinstance(model, MarkovModel):
raise TypeError('Only MarkovModel is supported')
super(Mplp, self).__init__(model)
self.model = model
# S = \{c \cap c^{'} : c, c^{'} \in C, c \cap c^{'} \neq \emptyset\}
self.intersection_set_variables = set()
# We generate the Intersections of all the pairwise edges taken one at a time to form S
for edge_pair in it.combinations(model.edges(), 2):
self.intersection_set_variables.add(frozenset(edge_pair[0]) & frozenset(edge_pair[1]))
# The corresponding optimization problem = \min_{\delta}{dual_lp(\delta)} where:
# dual_lp(\delta) = \sum_{i \in V}{max_{x_i}(Objective[nodes])} + \sum_{f /in F}{max_{x_f}(Objective[factors])
# Objective[nodes] = \theta_i(x_i) + \sum_{f \mid i \in f}{\delta_{fi}(x_i)}
# Objective[factors] = \theta_f(x_f) - \sum_{i \in f}{\delta_{fi}(x_i)}
# In a way Objective stores the corresponding optimization problem for all the nodes and the factors.
# Form Objective and cluster_set in the form of a dictionary.
self.objective = {}
self.cluster_set = {}
for factor in model.get_factors():
scope = frozenset(factor.scope())
self.objective[scope] = factor
# For every factor consisting of more that a single node, we initialize a cluster.
if len(scope) > 1:
self.cluster_set[scope] = self.Cluster(self.intersection_set_variables, factor)
# dual_lp(\delta) is the dual linear program
self.dual_lp = sum([np.amax(self.objective[obj].values) for obj in self.objective])
# Best integral value of the primal objective is stored here
self.best_int_objective = 0
# Assignment of the nodes that results in the "maximum" integral value of the primal objective
self.best_assignment = {}
# Results of the "maximum" integral value of the primal objective.
self.best_decoded_result = {}
# This sets the minimum width between the dual objective decrements. Default value = 0.0002. This can be
# changed in the map_query() method.
self.dual_threshold = 0.0002
# This sets the threshold for the integrality gap below which we say that the solution is satisfactory.
# Default value = 0.0002. This can be changed in the map_query() method.
self.integrality_gap_threshold = 0.0002
class Cluster(object):
"""
Inner class for representing a cluster.
A cluster is a subset of variables.
Parameters
----------
set_of_variables: tuple
This is the set of variables that form the cluster.
intersection_set_variables: set containing frozensets.
collection of intersection of all pairs of cluster variables.
For eg: \{\{C_1 \cap C_2\}, \{C_2 \cap C_3\}, \{C_3 \cap C_1\} \} for clusters C_1, C_2 & C_3.
cluster_potential: DiscreteFactor
Each cluster has a initial probability distribution provided beforehand.
"""
def __init__(self, intersection_set_variables, cluster_potential):
"""
Initialization of the current cluster
"""
# The variables with which the cluster is made of.
self.cluster_variables = frozenset(cluster_potential.scope())
# The cluster potentials must be specified before only.
self.cluster_potential = copy.deepcopy(cluster_potential)
# Generate intersection sets for this cluster; S(c)
self.intersection_sets_for_cluster_c = [intersect.intersection(self.cluster_variables)
for intersect in intersection_set_variables
if intersect.intersection(self.cluster_variables)]
# Initialize messages from this cluster to its respective intersection sets
# \lambda_{c \rightarrow \s} = 0
self.message_from_cluster = {}
for intersection in self.intersection_sets_for_cluster_c:
# Present variable. It can be a node or an edge too. (that is ['A'] or ['A', 'C'] too)
present_variables = list(intersection)
# Present variables cardinality
present_variables_card = cluster_potential.get_cardinality(present_variables)
present_variables_card = [present_variables_card[var] for var in present_variables]
# We need to create a new factor whose messages are blank
self.message_from_cluster[intersection] = \
DiscreteFactor(present_variables, present_variables_card, np.zeros(np.prod(present_variables_card)))
def _update_message(self, sending_cluster):
"""
This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17
"""
# The new updates will take place for the intersection_sets of this cluster.
# The new updates are:
# \delta_{f \rightarrow i}(x_i) = - \delta_i^{-f} +
# 1/{\| f \|} max_{x_{f-i}}\left[{\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')} \right ]
# Step. 1) Calculate {\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')}
objective_cluster = self.objective[sending_cluster.cluster_variables]
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
objective_cluster += self.objective[current_intersect]
updated_results = []
objective = []
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
# Step. 2) Maximize step.1 result wrt variables present in the cluster but not in the current intersect.
phi = objective_cluster.maximize(list(sending_cluster.cluster_variables - current_intersect),
inplace=False)
# Step. 3) Multiply 1/{\| f \|}
intersection_length = len(sending_cluster.intersection_sets_for_cluster_c)
phi *= (1 / intersection_length)
objective.append(phi)
# Step. 4) Subtract \delta_i^{-f}
# These are the messages not emanating from the sending cluster but going into the current intersect.
# which is = Objective[current_intersect_node] - messages from the cluster to the current intersect node.
updated_results.append(phi + -1 * (self.objective[current_intersect] + -1 * sending_cluster.
message_from_cluster[current_intersect]))
# This loop is primarily for simultaneous updating:
# 1. This cluster's message to each of the intersects.
# 2. The value of the Objective for intersection_nodes.
index = -1
cluster_potential = copy.deepcopy(sending_cluster.cluster_potential)
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
index += 1
sending_cluster.message_from_cluster[current_intersect] = updated_results[index]
self.objective[current_intersect] = objective[index]
cluster_potential += (-1) * updated_results[index]
# Here we update the Objective for the current factor.
self.objective[sending_cluster.cluster_variables] = cluster_potential
def _local_decode(self):
"""
Finds the index of the maximum values for all the single node dual objectives.
Reference:
code presented by Sontag in 2012 here: http://cs.nyu.edu/~dsontag/code/README_v2.html
"""
# The current assignment of the single node factors is stored in the form of a dictionary
decoded_result_assignment = {node: np.argmax(self.objective[node].values)
for node in self.objective if len(node) == 1}
# Use the original cluster_potentials of each factor to find the primal integral value.
# 1. For single node factors
integer_value = sum([self.factors[variable][0].values[decoded_result_assignment[frozenset([variable])]]
for variable in self.variables])
# 2. For clusters
for cluster_key in self.cluster_set:
cluster = self.cluster_set[cluster_key]
index = [tuple([variable, decoded_result_assignment[frozenset([variable])]])
for variable in cluster.cluster_variables]
integer_value += cluster.cluster_potential.reduce(index, inplace=False).values
# Check if this is the best assignment till now
if self.best_int_objective < integer_value:
self.best_int_objective = integer_value
self.best_assignment = decoded_result_assignment
def _is_converged(self, dual_threshold=None, integrality_gap_threshold=None):
"""
This method checks the integrality gap to ensure either:
* we have found a near to exact solution or
* stuck on a local minima.
Parameters
----------
dual_threshold: double
This sets the minimum width between the dual objective decrements. If the decrement is lesser
than the threshold, then that means we have stuck on a local minima.
integrality_gap_threshold: double
This sets the threshold for the integrality gap below which we say that the solution
is satisfactory.
References
----------
code presented by Sontag in 2012 here: http://cs.nyu.edu/~dsontag/code/README_v2.html
"""
# Find the new objective after the message updates
new_dual_lp = sum([np.amax(self.objective[obj].values) for obj in self.objective])
# Update the dual_gap as the difference between the dual objective of the previous and the current iteration.
self.dual_gap = abs(self.dual_lp - new_dual_lp)
# Update the integrality_gap as the difference between our best result vs the dual objective of the lp.
self.integrality_gap = abs(self.dual_lp - self.best_int_objective)
# As the decrement of the dual_lp gets very low, we assume that we might have stuck in a local minima.
if dual_threshold and self.dual_gap < dual_threshold:
return True
# Check the threshold for the integrality gap
elif integrality_gap_threshold and self.integrality_gap < integrality_gap_threshold:
return True
else:
self.dual_lp = new_dual_lp
return False
def find_triangles(self):
"""
Finds all the triangles present in the given model
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import DiscreteFactor
>>> from pgmpy.inference import Mplp
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> mplp = Mplp(mm)
>>> mplp.find_triangles()
"""
return list(filter(lambda x: len(x) == 3, nx.find_cliques(self.model)))
def _update_triangles(self, triangles_list):
"""
From a set of variables forming a triangle in the model, we form the corresponding Clusters.
These clusters are then appended to the code.
Parameters
----------
triangle_list : list
The list of variables forming the triangles to be updated. It is of the form of
[['var_5', 'var_8', 'var_7'], ['var_4', 'var_5', 'var_7']]
"""
new_intersection_set = []
for triangle_vars in triangles_list:
cardinalities = [self.cardinality[variable] for variable in triangle_vars]
current_intersection_set = [frozenset(intersect) for intersect in it.combinations(triangle_vars, 2)]
current_factor = DiscreteFactor(triangle_vars, cardinalities, np.zeros(np.prod(cardinalities)))
self.cluster_set[frozenset(triangle_vars)] = self.Cluster(current_intersection_set, current_factor)
# add new factors
self.model.factors.append(current_factor)
# add new intersection sets
new_intersection_set.extend(current_intersection_set)
# add new factors in objective
self.objective[frozenset(triangle_vars)] = current_factor
def _get_triplet_scores(self, triangles_list):
"""
Returns the score of each of the triplets found in the current model
Parameters
---------
triangles_list: list
The list of variables forming the triangles to be updated. It is of the form of
[['var_5', 'var_8', 'var_7'], ['var_4', 'var_5', 'var_7']]
Return: {frozenset({'var_8', 'var_5', 'var_7'}): 5.024, frozenset({'var_5', 'var_4', 'var_7'}): 10.23}
"""
triplet_scores = {}
for triplet in triangles_list:
# Find the intersection sets of the current triplet
triplet_intersections = [intersect for intersect in it.combinations(triplet, 2)]
# Independent maximization
ind_max = sum([np.amax(self.objective[frozenset(intersect)].values) for intersect in triplet_intersections])
# Joint maximization
joint_max = self.objective[frozenset(triplet_intersections[0])]
for intersect in triplet_intersections[1:]:
joint_max += self.objective[frozenset(intersect)]
joint_max = np.amax(joint_max.values)
# score = Independent maximization solution - Joint maximization solution
score = ind_max - joint_max
triplet_scores[frozenset(triplet)] = score
return triplet_scores
def _run_mplp(self, no_iterations):
"""
Updates messages until either Mplp converges or if doesn't converges; halts after no_iterations.
Parameters
--------
no_iterations: integer
Number of maximum iterations that we want MPLP to run.
"""
for niter in range(no_iterations):
# We take the clusters in the order they were added in the model and update messages for all factors whose
# scope is greater than 1
for factor in self.model.get_factors():
if len(factor.scope()) > 1:
self._update_message(self.cluster_set[frozenset(factor.scope())])
# Find an integral solution by locally maximizing the single node beliefs
self._local_decode()
# If mplp converges to a global/local optima, we break.
if self._is_converged(self.dual_threshold, self.integrality_gap_threshold) and niter >= 16:
break
def _tighten_triplet(self, max_iterations, later_iter, max_triplets, prolong):
"""
This method finds all the triplets that are eligible and adds them iteratively in the bunch of max_triplets
Parameters
----------
max_iterations: integer
Maximum number of times we tighten the relaxation
later_iter: integer
Number of maximum iterations that we want MPLP to run. This is lesser than the initial number
of iterations.
max_triplets: integer
Maximum number of triplets that can be added atmost in one iteration.
prolong: bool
It sets the continuation of tightening after all the triplets are exhausted
"""
# Find all the triplets that are possible in the present model
triangles = self.find_triangles()
# Evaluate scores for each of the triplets found above
triplet_scores = self._get_triplet_scores(triangles)
# Arrange the keys on the basis of increasing order of the values of the dict. triplet_scores
sorted_scores = sorted(triplet_scores, key=triplet_scores.get)
for niter in range(max_iterations):
if self._is_converged(integrality_gap_threshold=self.integrality_gap_threshold):
break
# add triplets that are yet not added.
add_triplets = []
for triplet_number in (range(len(sorted_scores))):
# At once, we can add atmost 5 triplets
if triplet_number >= max_triplets:
break
add_triplets.append(sorted_scores.pop())
# Break from the tighten triplets loop if there are no triplets to add if the prolong is set to False
if not add_triplets and prolong is False:
break
# Update the eligible triplets to tighten the relaxation
self._update_triangles(add_triplets)
# Run MPLP for a maximum of later_iter times.
self._run_mplp(later_iter)
def get_integrality_gap(self):
"""
Returns the integrality gap of the current state of the Mplp algorithm. The lesser it is, the closer we are
towards the exact solution.
Example:
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import DiscreteFactor
>>> from pgmpy.inference import Mplp
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> mplp = Mplp(mm)
>>> mplp.map_query()
>>> int_gap = mplp.get_integrality_gap()
"""
return self.integrality_gap
def query(self):
raise NotImplementedError("map_query() is the only query method available.")
def map_query(self, init_iter=1000, later_iter=20, dual_threshold=0.0002, integrality_gap_threshold=0.0002,
tighten_triplet=True, max_triplets=5, max_iterations=100, prolong=False):
"""
MAP query method using Max Product LP method.
This returns the best assignment of the nodes in the form of a dictionary.
Parameters
----------
init_iter: integer
Number of maximum iterations that we want MPLP to run for the first time.
later_iter: integer
Number of maximum iterations that we want MPLP to run for later iterations
dual_threshold: double
This sets the minimum width between the dual objective decrements. If the decrement is lesser
than the threshold, then that means we have stuck on a local minima.
integrality_gap_threshold: double
This sets the threshold for the integrality gap below which we say that the solution
is satisfactory.
tighten_triplet: bool
set whether to use triplets as clusters or not.
max_triplets: integer
Set the maximum number of triplets that can be added at once.
max_iterations: integer
Maximum number of times we tighten the relaxation. Used only when tighten_triplet is set True.
prolong: bool
If set False: The moment we exhaust of all the triplets the tightening stops.
If set True: The tightening will be performed max_iterations number of times irrespective of the
triplets.
Reference:
Section 3.3: The Dual Algorithm; Tightening LP Relaxation for MAP using Message Passing (2008)
By Sontag Et al.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors import DiscreteFactor
>>> import numpy as np
>>> student = MarkovModel()
>>> student.add_edges_from([('A', 'B'), ('B', 'C'), ('C', 'D'), ('E', 'F')])
>>> factor_a = DiscreteFactor(['A'], cardinality=[2], value=np.array([0.54577, 1.8323]))
>>> factor_b = DiscreteFactor(['B'], cardinality=[2], value=np.array([0.93894, 1.065]))
>>> factor_c = DiscreteFactor(['C'], cardinality=[2], value=np.array([0.89205, 1.121]))
>>> factor_d = DiscreteFactor(['D'], cardinality=[2], value=np.array([0.56292, 1.7765]))
>>> factor_e = DiscreteFactor(['E'], cardinality=[2], value=np.array([0.47117, 2.1224]))
>>> factor_f = DiscreteFactor(['F'], cardinality=[2], value=np.array([1.5093, 0.66257]))
>>> factor_a_b = DiscreteFactor(['A', 'B'], cardinality=[2, 2], value=np.array([1.3207, 0.75717, 0.75717, 1.3207]))
>>> factor_b_c = DiscreteFactor(['B', 'C'], cardinality=[2, 2], value=np.array([0.00024189, 4134.2, 4134.2, 0.0002418]))
>>> factor_c_d = DiscreteFactor(['C', 'D'], cardinality=[2, 2], value=np.array([0.0043227, 231.34, 231.34, 0.0043227]))
>>> factor_d_e = DiscreteFactor(['E', 'F'], cardinality=[2, 2], value=np.array([31.228, 0.032023, 0.032023, 31.228]))
>>> student.add_factors(factor_a, factor_b, factor_c, factor_d, factor_e, factor_f, factor_a_b,
... factor_b_c, factor_c_d, factor_d_e)
>>> mplp = Mplp(student)
>>> result = mplp.map_query()
Return: {'B': 0.93894, 'C': 1.121, 'A': 1.8323, 'F': 1.5093, 'D': 1.7765, 'E': 2.12239}
"""
self.dual_threshold = dual_threshold
self.integrality_gap_threshold = integrality_gap_threshold
# Run MPLP initially for a maximum of init_iter times.
self._run_mplp(init_iter)
# If triplets are to be used for the tightening, we proceed as follows
if tighten_triplet:
self._tighten_triplet(max_iterations, later_iter, max_triplets, prolong)
# Get the best result from the best assignment
self.best_decoded_result = {factor.scope()[0]: factor.values[self.best_assignment[frozenset(factor.scope())]]
for factor in self.model.factors if len(factor.scope()) == 1}
return self.best_decoded_result
|
{
"content_hash": "fad2ee855de4d8c4bbf090ef5db6f8f0",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 128,
"avg_line_length": 50.34516765285996,
"alnum_prop": 0.5990989226248775,
"repo_name": "abinashpanda/pgmpy",
"id": "1a8b776e43823c62433e3e6acc50ce74193b46fe",
"size": "25525",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/inference/mplp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1273659"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
admin.autodiscover()
import hello.views
# Examples:
# url(r'^$', 'gettingstarted.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
urlpatterns = [
url(r'^$', hello.views.index, name='index'),
url(r'^db', hello.views.db, name='db'),
path('admin/', admin.site.urls),
]
|
{
"content_hash": "5cd9f4b7108602acadb3c2cf3e357cf8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 23.764705882352942,
"alnum_prop": 0.6683168316831684,
"repo_name": "joshfriend/heroku-buildpack-python",
"id": "36492ed3cfea168237908825cb6ac0b8b4207d42",
"size": "404",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/fixtures/collectstatic/gettingstarted/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1491"
},
{
"name": "Shell",
"bytes": "85143"
}
],
"symlink_target": ""
}
|
class Solution:
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
nr = len(matrix)
nc = len(matrix[0])
for row in matrix:
lo = 0
hi = nc-1
if target < row[lo]:
break
elif target > row[hi]:
continue
while(lo<=hi):
mid = (lo+hi)/2
if target == row[mid]:
return True
elif target > row[mid]:
lo = mid + 1
else:
hi = mid-1
return False
|
{
"content_hash": "497652882fe252898b9a8ddc66f61d67",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 43,
"avg_line_length": 28.782608695652176,
"alnum_prop": 0.3972809667673716,
"repo_name": "saai/LeetcodePythonSolutions",
"id": "ea4eafe2ebab7f99a3e0a0ecddb1f5edcd55aa31",
"size": "662",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "array/searchMatrix1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "533"
},
{
"name": "Python",
"bytes": "160609"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyramid_assetviews'
copyright = u'2011, Charlie Choiniere'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyramid_assetviewsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyramid_assetviews.tex', u'pyramid\\_assetviews Documentation',
u'Charlie Choiniere', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyramid_assetviews', u'pyramid_assetviews Documentation',
[u'Charlie Choiniere'], 1)
]
|
{
"content_hash": "9f7a38575d579aebf3c3a3bcc8e912c9",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.80788177339902,
"alnum_prop": 0.7079579579579579,
"repo_name": "nek4life/pyramid_assetviews",
"id": "80712ea98c4b9a6f0f0d6e8494d770ec95f91867",
"size": "7089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13438"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
}
|
import numpy as np
# Local imports
from simple_rl.mdp.StateClass import State
''' GymStateClass.py: Contains a State class for Gym. '''
class GymState(State):
''' Gym State class '''
def __init__(self, data=[], is_terminal=False):
self.data = data
State.__init__(self, data=data, is_terminal=is_terminal)
def to_rgb(self, x_dim, y_dim):
# 3 by x_length by y_length array with values 0 (0) --> 1 (255)
board = np.zeros(shape=[3, x_dim, y_dim])
# print self.data, self.data.shape, x_dim, y_dim
return self.data
|
{
"content_hash": "68115689cb2cde42c497e3a3736e9010",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 30.210526315789473,
"alnum_prop": 0.6114982578397212,
"repo_name": "david-abel/simple_rl",
"id": "638a508c070cceb8da5deef69d6cd5f4102f633a",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_rl/tasks/gym/GymStateClass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "196326"
},
{
"name": "Python",
"bytes": "433150"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class YcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ycalendar", parent_name="mesh3d", **kwargs):
super(YcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
[
"gregorian",
"chinese",
"coptic",
"discworld",
"ethiopian",
"hebrew",
"islamic",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"jalali",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs
)
|
{
"content_hash": "b11dc6fd7b90c86a9242319178913d77",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 31.96969696969697,
"alnum_prop": 0.3886255924170616,
"repo_name": "plotly/python-api",
"id": "e30ff3acc112c028bcbb77600a4107d206ace312",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/mesh3d/_ycalendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
{
"content_hash": "d5c6f962deaa0b6838033f496946a78f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 165,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.7056603773584905,
"repo_name": "antoinecarme/pyaf",
"id": "5a97c3d42354bad7c4e25206ba695365c00c2d40",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_Lag1Trend/cycle_5/ar_12/test_artificial_32_Difference_Lag1Trend_5_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""OAuth2 implementations for Toon."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import config_flow
def register_oauth2_implementations(
hass: HomeAssistant, client_id: str, client_secret: str
) -> None:
"""Register Toon OAuth2 implementations."""
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Eneco Toon",
tenant_id="eneco",
issuer="identity.toon.eu",
),
)
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Engie Electrabel Boxx",
tenant_id="electrabel",
issuer="identity.toon.eu",
),
)
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Viesgo",
tenant_id="viesgo",
),
)
class ToonLocalOAuth2Implementation(config_entry_oauth2_flow.LocalOAuth2Implementation):
"""Local OAuth2 implementation for Toon."""
def __init__(
self,
hass: HomeAssistant,
client_id: str,
client_secret: str,
name: str,
tenant_id: str,
issuer: str | None = None,
) -> None:
"""Local Toon Oauth Implementation."""
self._name = name
self.tenant_id = tenant_id
self.issuer = issuer
super().__init__(
hass=hass,
domain=tenant_id,
client_id=client_id,
client_secret=client_secret,
authorize_url="https://api.toon.eu/authorize",
token_url="https://api.toon.eu/token",
)
@property
def name(self) -> str:
"""Name of the implementation."""
return f"{self._name} via Configuration.yaml"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
data = {"tenant_id": self.tenant_id}
if self.issuer is not None:
data["issuer"] = self.issuer
return data
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Initialize local Toon auth implementation."""
data = {
"grant_type": "authorization_code",
"code": external_data["code"],
"redirect_uri": external_data["state"]["redirect_uri"],
"tenant_id": self.tenant_id,
}
if self.issuer is not None:
data["issuer"] = self.issuer
return await self._token_request(data)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
data = {
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
"tenant_id": self.tenant_id,
}
new_token = await self._token_request(data)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
headers = {}
data["client_id"] = self.client_id
data["tenant_id"] = self.tenant_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
if self.issuer is not None:
data["issuer"] = self.issuer
headers["issuer"] = self.issuer
resp = await session.post(self.token_url, data=data, headers=headers)
resp.raise_for_status()
resp_json = cast(dict, await resp.json())
# The Toon API returns "expires_in" as a string for some tenants.
# This is not according to OAuth specifications.
resp_json["expires_in"] = float(resp_json["expires_in"])
return resp_json
|
{
"content_hash": "9332a93d7d294f0db717f67590fcb581",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 88,
"avg_line_length": 31.115107913669064,
"alnum_prop": 0.5824277456647399,
"repo_name": "w1ll1am23/home-assistant",
"id": "95cde38621501c5f88101497cd7862375252d257",
"size": "4325",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/toon/oauth2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from time import time as now, sleep
#apetools
from apetools.parsers import oatbran
from basepollster import BasePollster, CSV_JOIN, ZERO
class RssiPollerEnum(object):
"""
A Holder of rssi-poller constants
"""
__slots__ = ()
rssi = 'RSSI'
rssipoller = 'rssipoller'
# end class RssiPollerEnum
class RssiPoller(BasePollster):
"""
A DevicePoller for RSSI
"""
def __init__(self, *args, **kwargs):
super(RssiPoller, self).__init__(*args, **kwargs)
return
@property
def name(self):
"""
:return: the name to look for in the logs
"""
if self._name is None:
self._name = RssiPollerEnum.rssipoller
return self._name
@property
def expression(self):
"""
:return: uncompiled expression to match RSSI
"""
if self._expression is None:
self._expression = (oatbran.NAMED(n=RssiPollerEnum.rssi,
e=oatbran.INTEGER))
return self._expression
def run(self):
"""
:postcondition: the poller is sending rssi values to the output
"""
interval = self.interval
if self.use_header:
self.output.write("timestamp,rssi\n")
while True:
if self.event is not None:
self.event.wait()
start_time = now()
datum = CSV_JOIN.format(self.timestamp(), self.device.rssi)
self.output.writeline(datum)
self.logger.debug(datum)
sleep(max(interval - now() + start_time, ZERO))
return
# end RssiPoller
|
{
"content_hash": "83d49ed04f0de3a4d962d7f9a7dd130d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 26.918032786885245,
"alnum_prop": 0.5627283800243605,
"repo_name": "rsnakamura/oldape",
"id": "8225b55ee2580632c90813754d4d8ab2a71c91eb",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apetools/watchers/rssipoller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "1076570"
},
{
"name": "Shell",
"bytes": "47671"
}
],
"symlink_target": ""
}
|
from braintree.account_updater_daily_report import AccountUpdaterDailyReport
from braintree.configuration import Configuration
from braintree.connected_merchant_paypal_status_changed import ConnectedMerchantPayPalStatusChanged
from braintree.connected_merchant_status_transitioned import ConnectedMerchantStatusTransitioned
from braintree.disbursement import Disbursement
from braintree.dispute import Dispute
from braintree.error_result import ErrorResult
from braintree.granted_payment_instrument_update import GrantedPaymentInstrumentUpdate
from braintree.local_payment_completed import LocalPaymentCompleted
from braintree.local_payment_expired import LocalPaymentExpired
from braintree.local_payment_funded import LocalPaymentFunded
from braintree.local_payment_reversed import LocalPaymentReversed
from braintree.merchant_account import MerchantAccount
from braintree.oauth_access_revocation import OAuthAccessRevocation
from braintree.partner_merchant import PartnerMerchant
from braintree.payment_method_customer_data_updated_metadata import PaymentMethodCustomerDataUpdatedMetadata
from braintree.resource import Resource
from braintree.revoked_payment_method_metadata import RevokedPaymentMethodMetadata
from braintree.subscription import Subscription
from braintree.transaction import Transaction
from braintree.transaction_review import TransactionReview
from braintree.validation_error_collection import ValidationErrorCollection
class WebhookNotification(Resource):
class Kind(object):
AccountUpdaterDailyReport = "account_updater_daily_report"
Check = "check"
ConnectedMerchantPayPalStatusChanged = "connected_merchant_paypal_status_changed"
ConnectedMerchantStatusTransitioned = "connected_merchant_status_transitioned"
Disbursement = "disbursement"
DisbursementException = "disbursement_exception"
DisputeAccepted = "dispute_accepted"
DisputeDisputed = "dispute_disputed"
DisputeExpired = "dispute_expired"
DisputeLost = "dispute_lost"
DisputeOpened = "dispute_opened"
DisputeWon = "dispute_won"
GrantedPaymentMethodRevoked = "granted_payment_method_revoked"
GrantorUpdatedGrantedPaymentMethod = "grantor_updated_granted_payment_method"
LocalPaymentCompleted = "local_payment_completed"
LocalPaymentExpired = "local_payment_expired"
LocalPaymentFunded = "local_payment_funded"
LocalPaymentReversed = "local_payment_reversed"
OAuthAccessRevoked = "oauth_access_revoked"
PartnerMerchantConnected = "partner_merchant_connected"
PartnerMerchantDeclined = "partner_merchant_declined"
PartnerMerchantDisconnected = "partner_merchant_disconnected"
PaymentMethodCustomerDataUpdated = "payment_method_customer_data_updated"
PaymentMethodRevokedByCustomer = "payment_method_revoked_by_customer"
RecipientUpdatedGrantedPaymentMethod = "recipient_updated_granted_payment_method"
SubMerchantAccountApproved = "sub_merchant_account_approved"
SubMerchantAccountDeclined = "sub_merchant_account_declined"
SubscriptionCanceled = "subscription_canceled"
SubscriptionChargedSuccessfully = "subscription_charged_successfully"
SubscriptionChargedUnsuccessfully = "subscription_charged_unsuccessfully"
SubscriptionExpired = "subscription_expired"
SubscriptionTrialEnded = "subscription_trial_ended"
SubscriptionWentActive = "subscription_went_active"
SubscriptionWentPastDue = "subscription_went_past_due"
TransactionDisbursed = "transaction_disbursed"
TransactionReviewed = "transaction_reviewed"
TransactionSettled = "transaction_settled"
TransactionSettlementDeclined = "transaction_settlement_declined"
@staticmethod
def parse(signature, payload):
return Configuration.gateway().webhook_notification.parse(signature, payload)
@staticmethod
def verify(challenge):
return Configuration.gateway().webhook_notification.verify(challenge)
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "source_merchant_id" not in attributes:
self.source_merchant_id = None
if "api_error_response" in attributes["subject"]:
node_wrapper = attributes["subject"]["api_error_response"]
else:
node_wrapper = attributes["subject"]
if "subscription" in node_wrapper:
self.subscription = Subscription(gateway, node_wrapper['subscription'])
elif "merchant_account" in node_wrapper:
self.merchant_account = MerchantAccount(gateway, node_wrapper['merchant_account'])
elif "transaction" in node_wrapper:
self.transaction = Transaction(gateway, node_wrapper['transaction'])
elif "transaction_review" in node_wrapper:
self.transaction_review = TransactionReview(node_wrapper['transaction_review'])
elif "connected_merchant_status_transitioned" in node_wrapper:
self.connected_merchant_status_transitioned = ConnectedMerchantStatusTransitioned(gateway, node_wrapper['connected_merchant_status_transitioned'])
elif "connected_merchant_paypal_status_changed" in node_wrapper:
self.connected_merchant_paypal_status_changed = ConnectedMerchantPayPalStatusChanged(gateway, node_wrapper['connected_merchant_paypal_status_changed'])
elif "partner_merchant" in node_wrapper:
self.partner_merchant = PartnerMerchant(gateway, node_wrapper['partner_merchant'])
elif "oauth_application_revocation" in node_wrapper:
self.oauth_access_revocation = OAuthAccessRevocation(node_wrapper["oauth_application_revocation"])
elif "disbursement" in node_wrapper:
self.disbursement = Disbursement(gateway, node_wrapper['disbursement'])
elif "dispute" in node_wrapper:
self.dispute = Dispute(node_wrapper['dispute'])
elif "account_updater_daily_report" in node_wrapper:
self.account_updater_daily_report = AccountUpdaterDailyReport(gateway, node_wrapper['account_updater_daily_report'])
elif "granted_payment_instrument_update" in node_wrapper:
self.granted_payment_instrument_update = GrantedPaymentInstrumentUpdate(gateway, node_wrapper["granted_payment_instrument_update"])
elif attributes["kind"] in [WebhookNotification.Kind.GrantedPaymentMethodRevoked, WebhookNotification.Kind.PaymentMethodRevokedByCustomer]:
self.revoked_payment_method_metadata = RevokedPaymentMethodMetadata(gateway, node_wrapper)
elif "local_payment" in node_wrapper and attributes["kind"] == WebhookNotification.Kind.LocalPaymentCompleted:
self.local_payment_completed = LocalPaymentCompleted(gateway, node_wrapper["local_payment"])
elif "local_payment_expired" in node_wrapper and attributes["kind"] == WebhookNotification.Kind.LocalPaymentExpired:
self.local_payment_expired = LocalPaymentExpired(gateway, node_wrapper["local_payment_expired"])
elif "local_payment_funded" in node_wrapper and attributes["kind"] == WebhookNotification.Kind.LocalPaymentFunded:
self.local_payment_funded = LocalPaymentFunded(gateway, node_wrapper["local_payment_funded"])
elif "local_payment_reversed" in node_wrapper and attributes["kind"] == WebhookNotification.Kind.LocalPaymentReversed:
self.local_payment_reversed = LocalPaymentReversed(gateway, node_wrapper["local_payment_reversed"])
elif "payment_method_customer_data_updated_metadata" in node_wrapper and attributes["kind"] == WebhookNotification.Kind.PaymentMethodCustomerDataUpdated:
self.payment_method_customer_data_updated_metadata = PaymentMethodCustomerDataUpdatedMetadata(gateway, node_wrapper["payment_method_customer_data_updated_metadata"])
if "errors" in node_wrapper:
self.errors = ValidationErrorCollection(node_wrapper['errors'])
self.message = node_wrapper['message']
|
{
"content_hash": "5c243b3cee7b23b03f06ba0276809842",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 177,
"avg_line_length": 65.82113821138212,
"alnum_prop": 0.7565464426877471,
"repo_name": "braintree/braintree_python",
"id": "85fa191eff7d4e8b5ecae935f4fa99f3aba52865",
"size": "8096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "braintree/webhook_notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "1338636"
},
{
"name": "Ruby",
"bytes": "2099"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
}
|
"""
Sense-T data platform v2 API client library
"""
from __future__ import absolute_import, unicode_literals, print_function
from .api import API
__version__ = '2.0.4'
__author__ = 'Ionata Digital'
__license__ = 'MIT'
api = API()
|
{
"content_hash": "519415fbc038fc02a16ff9b11b68bf23",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 21.09090909090909,
"alnum_prop": 0.6681034482758621,
"repo_name": "ionata/senset-data-portal",
"id": "e27dd17444e898b003c5e8f57dad12d9eedc68ed",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sensetdp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74147"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopifier_admin', '0011_user_permissions'),
]
operations = [
migrations.AddField(
model_name='userlog',
name='user_agent',
field=models.CharField(blank=True, max_length=254),
),
]
|
{
"content_hash": "6a5f979e8a92106502c17ded78d59b55",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 22.22222222222222,
"alnum_prop": 0.6025,
"repo_name": "HiDevLab/shopifier",
"id": "d642fccd9fa03d8d54b254ff009be779a05e8f08",
"size": "472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shopifier/admin/migrations/0012_userlog_user_agent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178833"
},
{
"name": "HTML",
"bytes": "119308"
},
{
"name": "JavaScript",
"bytes": "2480828"
},
{
"name": "Python",
"bytes": "233338"
}
],
"symlink_target": ""
}
|
import importlib
import os.path
import pkgutil
import sys
import time
import types
import warnings
from collections import deque, Hashable
__all__ = ['crawl_pythonpath', 'scrape_docstrings']
def crawl_pythonpath(verbose=False):
"""Returns a list of all accessible modules, as module objects.
Includes builtin modules, stdlib modules, and anything else on sys.path."""
mod_names = set(sys.builtin_module_names)
blacklist = set(['this','antigravity']) # Don't import joke modules.
error_cb = lambda name: blacklist.add(name)
tic = time.time()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for mod_finder, name, ispkg in pkgutil.walk_packages(onerror=error_cb):
if not ispkg and '.' in name:
continue
mod_names.add(name)
mod_names -= blacklist
if verbose:
print 'Found %d modules in %g secs.' % (len(mod_names), time.time() - tic)
tic = time.time()
mods = []
for m in sorted(mod_names):
try:
mods.append(importlib.import_module(m))
except ImportError:
pass
if verbose:
print 'Imported %d modules in %g secs.' % (len(mods), time.time() - tic)
return mods
def scrape_docstrings(*root_modules, **kwargs):
"""Given a python module, produces a sequence of (name, docstring) pairs.
Submodules are visited in breadth-first order,
which produces names with the least nesting.
Note: submodules not imported by the root will not be visited!
"""
verbose = kwargs.get('verbose', False)
docs = {}
seen = set()
for i, root in enumerate(root_modules, start=1):
if verbose:
sys.stdout.write('\rScraping %d of %d...' % (i, len(root_modules)))
sys.stdout.flush()
_scrape_one(root, seen, docs)
if verbose:
print '\rFinished %d of %d. ' % (i, len(root_modules))
return docs
def _scrape_one(root, seen, docs):
if hasattr(root, '__file__'):
root_dir = os.path.dirname(root.__file__)
else:
root_dir = ''
queue = deque([(root, root.__name__)])
while queue:
mod, name = queue.popleft()
# have we seen it before?
try:
if mod in seen:
continue
except:
continue # Unhashable type. Don't bother with it.
# We haven't seen it before.
seen.add(mod)
# Does it have a docstring?
if hasattr(mod, '__doc__'):
d = mod.__doc__
if d and isinstance(d, basestring):
docs[name] = d
# Does it have sub-fields?
if (not hasattr(mod, '__dict__') or
not isinstance(mod.__dict__, types.DictType)):
continue
# iterate over sub-fields
for k,v in mod.__dict__.iteritems():
if k.startswith('__') or not dir(v) or not isinstance(v, Hashable):
continue
if isinstance(v, types.ModuleType):
# Make sure it's a submodule of the root module.
if not hasattr(v, '__file__') or not v.__file__.startswith(root_dir):
continue
field_name = name + '.' + k
queue.append((v, field_name))
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('module', nargs='*', help='Module(s) to scrape.')
args = ap.parse_args()
if args.module:
mods = map(importlib.import_module, args.module)
else:
mods = crawl_pythonpath(verbose=True)
for k, v in sorted(scrape_docstrings(*mods).iteritems()):
print k, len(v)
|
{
"content_hash": "645f779cbb5207883b75c9ea19d6b0d7",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 30.10810810810811,
"alnum_prop": 0.6361460203470976,
"repo_name": "perimosocordiae/modsearch",
"id": "36382eff715d3eea95f547fe0dff05dcfe03bc49",
"size": "3342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6213"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
import json
import sys
from oslo_config import cfg
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from mistral.db.sqlalchemy import model_base as mb
from mistral.db.sqlalchemy import types as st
from mistral import exceptions as exc
from mistral.services import security
from mistral_lib import utils
# Definition objects.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _get_hash_function_by(column_name):
def calc_hash(context):
val = context.current_parameters[column_name] or {}
if isinstance(val, dict):
# If the value is a dictionary we need to make sure to have
# keys in the same order in a string representation.
hash_base = json.dumps(sorted(val.items()))
else:
hash_base = str(val)
return hashlib.sha256(hash_base.encode('utf-8')).hexdigest()
return calc_hash
def validate_long_type_length(cls, field_name, value):
"""Makes sure the value does not exceeds the maximum size."""
if value:
# Get the configured limit.
size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb
# If the size is unlimited.
if size_limit_kb < 0:
return
size_kb = int(sys.getsizeof(str(value)) / 1024)
if size_kb > size_limit_kb:
msg = (
"Field size limit exceeded"
" [class={}, field={}, size={}KB, limit={}KB]"
).format(
cls.__name__,
field_name,
size_kb,
size_limit_kb
)
LOG.error(msg)
raise exc.SizeLimitExceededException(msg)
def validate_name_has_no_spaces(name):
"""Makes sure name does not contain spaces."""
if name:
if " " in name:
msg = (
"Name '{}' must not contain spaces"
).format(name)
LOG.error(msg)
raise exc.InvalidModelException(msg)
def register_length_validator(attr_name):
"""Register an event listener on the attribute.
This event listener will validate the size every
time a 'set' occurs.
"""
for cls in utils.iter_subclasses(Execution):
if hasattr(cls, attr_name):
event.listen(
getattr(cls, attr_name),
'set',
lambda t, v, o, i: validate_long_type_length(cls, attr_name, v)
)
def register_name_validator():
"""Register an event listener on the attribute.
This event listener will validate that name of object does not
contains spaces every time a 'set' occurs.
"""
for cls in utils.iter_subclasses(Definition):
event.listen(
getattr(cls, "name"),
'set',
lambda t, v, o, i: validate_name_has_no_spaces(v)
)
class Definition(mb.MistralSecureModelBase):
__abstract__ = True
id = mb.id_column()
name = sa.Column(sa.String(255))
namespace = sa.Column(sa.String(255), nullable=True)
definition = sa.Column(st.MediumText(), nullable=True)
spec = sa.Column(st.JsonMediumDictType())
tags = sa.Column(st.JsonListType())
is_system = sa.Column(sa.Boolean())
# There's no WorkbookExecution so we safely omit "Definition" in the name.
class Workbook(Definition):
"""Contains info about workbook (including definition in Mistral DSL)."""
__tablename__ = 'workbooks_v2'
__table_args__ = (
sa.UniqueConstraint(
'name',
'namespace',
'project_id'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
class WorkflowDefinition(Definition):
"""Contains info about workflow (including definition in Mistral DSL)."""
__tablename__ = 'workflow_definitions_v2'
__table_args__ = (
sa.UniqueConstraint(
'name',
'namespace',
'project_id'
),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
workbook_name = sa.Column(sa.String(255))
class ActionDefinition(Definition):
"""Contains info about registered Actions."""
__tablename__ = 'action_definitions_v2'
__table_args__ = (
sa.UniqueConstraint(
'name',
'namespace',
'project_id'),
sa.Index('%s_is_system' % __tablename__, 'is_system'),
sa.Index('%s_action_class' % __tablename__, 'action_class'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
workbook_name = sa.Column(sa.String(255))
# Main properties.
description = sa.Column(sa.Text())
input = sa.Column(sa.Text())
# Service properties.
action_class = sa.Column(sa.String(200))
attributes = sa.Column(st.JsonDictType())
class CodeSource(mb.MistralSecureModelBase):
"""Contains info about registered CodeSources."""
__tablename__ = 'code_sources'
__table_args__ = (
sa.UniqueConstraint(
'name',
'namespace',
'project_id'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
id = mb.id_column()
name = sa.Column(sa.String(255))
content = sa.Column(sa.Text())
version = sa.Column(sa.Integer())
namespace = sa.Column(sa.String(255), nullable=True)
tags = sa.Column(st.JsonListType())
class DynamicActionDefinition(mb.MistralSecureModelBase):
"""Contains info about registered Dynamic Actions."""
__tablename__ = 'dynamic_action_definitions'
__table_args__ = (
sa.UniqueConstraint(
'name',
'namespace',
'project_id'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
id = mb.id_column()
name = sa.Column(sa.String(255))
namespace = sa.Column(sa.String(255), nullable=True)
class_name = sa.Column(sa.String(255))
code_source_name = sa.Column(sa.String(255))
DynamicActionDefinition.code_source_id = sa.Column(
sa.String(36),
sa.ForeignKey(CodeSource.id, ondelete='CASCADE'),
nullable=False
)
DynamicActionDefinition.code_source = relationship(
CodeSource,
remote_side=CodeSource.id,
lazy='select'
)
# Execution objects.
class Execution(mb.MistralSecureModelBase):
__abstract__ = True
# Common properties.
id = mb.id_column()
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255), nullable=True)
workflow_name = sa.Column(sa.String(255))
workflow_namespace = sa.Column(sa.String(255))
workflow_id = sa.Column(sa.String(80))
state = sa.Column(sa.String(20))
state_info = sa.Column(sa.Text(), nullable=True)
tags = sa.Column(st.JsonListType())
# Internal properties which can be used by engine.
runtime_context = sa.Column(st.JsonLongDictType())
class ActionExecution(Execution):
"""Contains action execution information."""
__tablename__ = 'action_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at')
)
# Main properties.
spec = sa.Column(st.JsonMediumDictType())
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.Column(st.JsonLongDictType(), nullable=True)
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
last_heartbeat = sa.Column(
sa.DateTime,
default=lambda: utils.utc_now_sec() + datetime.timedelta(
seconds=CONF.action_heartbeat.first_heartbeat_timeout
)
)
is_sync = sa.Column(sa.Boolean(), default=None, nullable=True)
class WorkflowExecution(Execution):
"""Contains workflow execution information."""
__tablename__ = 'workflow_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
)
# Main properties.
spec = sa.orm.deferred(sa.Column(st.JsonMediumDictType()))
accepted = sa.Column(sa.Boolean(), default=False)
input = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True))
params = sa.orm.deferred(sa.Column(st.JsonLongDictType()))
# Initial workflow context containing workflow variables, environment,
# openstack security context etc.
# NOTES:
# * Data stored in this structure should not be copied into inbound
# contexts of tasks. No need to duplicate it.
# * This structure does not contain workflow input.
context = sa.orm.deferred(sa.Column(st.JsonLongDictType()))
class TaskExecution(Execution):
"""Contains task runtime information."""
__tablename__ = 'task_executions_v2'
__table_args__ = (
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_state' % __tablename__, 'state'),
sa.Index('%s_updated_at' % __tablename__, 'updated_at'),
sa.UniqueConstraint('unique_key')
)
# Main properties.
spec = sa.orm.deferred(sa.Column(st.JsonMediumDictType()))
action_spec = sa.Column(st.JsonLongDictType())
unique_key = sa.Column(sa.String(255), nullable=True)
type = sa.Column(sa.String(10))
started_at = sa.Column(sa.DateTime, nullable=True)
finished_at = sa.Column(sa.DateTime, nullable=True)
# Whether the task is fully processed (publishing and calculating commands
# after it). It allows to simplify workflow controller implementations
# significantly.
processed = sa.Column(sa.BOOLEAN, default=False)
# Set to True if the completion of the task led to starting new
# tasks.
# The value of this property should be ignored if the task
# is not completed.
has_next_tasks = sa.Column(sa.Boolean, default=False)
# The names of the next tasks.
# [(task_name, event)]
next_tasks = sa.Column(st.JsonListType())
# Set to True if the task finished with an error and the error
# is handled (e.g. with 'on-error' clause for direct workflows)
# so that the error shouldn't bubble up to the workflow level.
# The value of this property should be ignored if the task
# is not completed.
error_handled = sa.Column(sa.Boolean, default=False)
# Data Flow properties.
in_context = sa.Column(st.JsonLongDictType())
published = sa.Column(st.JsonLongDictType())
@property
def executions(self):
return (
self.action_executions
if not self.spec.get('workflow')
else self.workflow_executions
)
def to_dict(self):
d = super(TaskExecution, self).to_dict()
utils.datetime_to_str_in_dict(d, 'started_at')
utils.datetime_to_str_in_dict(d, 'finished_at')
return d
for cls in utils.iter_subclasses(Execution):
event.listen(
# Catch and trim Execution.state_info to always fit allocated size.
# Note that the limit is 65500 which is less than 65535 (2^16 -1).
# The reason is that utils.cut() is not exactly accurate in case if
# the value is not a string, but, for example, a dictionary. If we
# limit it exactly to 65535 then once in a while it may go slightly
# beyond the allowed maximum size. It may depend on the order of
# keys in a string representation and other things that are hidden
# inside utils.cut_dict() method.
cls.state_info,
'set',
lambda t, v, o, i: utils.cut(v, 65500),
retval=True
)
# Many-to-one for 'ActionExecution' and 'TaskExecution'.
ActionExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.action_executions = relationship(
ActionExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=ActionExecution.task_execution_id,
lazy='select',
passive_deletes=True
)
sa.Index(
'%s_task_execution_id' % ActionExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'WorkflowExecution' and 'TaskExecution'.
WorkflowExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.workflow_executions = relationship(
WorkflowExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=WorkflowExecution.task_execution_id,
lazy='select',
passive_deletes=True
)
sa.Index(
'%s_task_execution_id' % WorkflowExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'WorkflowExecution' and 'WorkflowExecution'
WorkflowExecution.root_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowExecution.id, ondelete='SET NULL'),
nullable=True
)
WorkflowExecution.root_execution = relationship(
WorkflowExecution,
remote_side=WorkflowExecution.id,
lazy='select'
)
# Many-to-one for 'TaskExecution' and 'WorkflowExecution'.
TaskExecution.workflow_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowExecution.id, ondelete='CASCADE')
)
WorkflowExecution.task_executions = relationship(
TaskExecution,
backref=backref('workflow_execution', remote_side=[WorkflowExecution.id]),
cascade='all, delete-orphan',
foreign_keys=TaskExecution.workflow_execution_id,
lazy='select',
passive_deletes=True
)
sa.Index(
'%s_workflow_execution_id' % TaskExecution.__tablename__,
TaskExecution.workflow_execution_id
)
sa.Index(
'%s_workflow_execution_id_name' % TaskExecution.__tablename__,
TaskExecution.workflow_execution_id, TaskExecution.name
)
# Other objects.
class DelayedCall(mb.MistralModelBase):
"""Contains info about delayed calls."""
__tablename__ = 'delayed_calls_v2'
id = mb.id_column()
factory_method_path = sa.Column(sa.String(200), nullable=True)
target_method_name = sa.Column(sa.String(80), nullable=False)
method_arguments = sa.Column(st.JsonDictType())
serializers = sa.Column(st.JsonDictType())
key = sa.Column(sa.String(250), nullable=True)
auth_context = sa.Column(st.JsonMediumDictType())
execution_time = sa.Column(sa.DateTime, nullable=False)
processing = sa.Column(sa.Boolean, default=False, nullable=False)
sa.Index(
'%s_execution_time' % DelayedCall.__tablename__,
DelayedCall.execution_time
)
class ScheduledJob(mb.MistralModelBase):
"""Contains info about scheduled jobs."""
__tablename__ = 'scheduled_jobs_v2'
id = mb.id_column()
run_after = sa.Column(sa.Integer)
# The full name of the factory function that returns/builds a Python
# (target) object whose method should be called. Optional.
target_factory_func_name = sa.Column(sa.String(200), nullable=True)
# May take two different forms:
# 1. Full path of a target function that should be called. For example,
# "mistral.utils.random_sleep".
# 2. Name of a method to call on a target object, if
# "target_factory_func_name" is specified.
func_name = sa.Column(sa.String(80), nullable=False)
func_args = sa.Column(st.JsonDictType())
func_arg_serializers = sa.Column(st.JsonDictType())
auth_ctx = sa.Column(st.JsonDictType())
execute_at = sa.Column(sa.DateTime, nullable=False)
captured_at = sa.Column(sa.DateTime, nullable=True)
key = sa.Column(sa.String(250), nullable=True)
class Environment(mb.MistralSecureModelBase):
"""Contains environment variables for workflow execution."""
__tablename__ = 'environments_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.Index('%s_name' % __tablename__, 'name'),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
)
# Main properties.
id = mb.id_column()
name = sa.Column(sa.String(200))
description = sa.Column(sa.Text())
variables = sa.Column(st.JsonLongDictType())
class CronTrigger(mb.MistralSecureModelBase):
"""Contains info about cron triggers."""
__tablename__ = 'cron_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('name', 'project_id'),
sa.UniqueConstraint(
'workflow_input_hash', 'workflow_name', 'pattern', 'project_id',
'workflow_params_hash', 'remaining_executions',
'first_execution_time'
),
sa.Index(
'%s_next_execution_time' % __tablename__,
'next_execution_time'
),
sa.Index('%s_project_id' % __tablename__, 'project_id'),
sa.Index('%s_scope' % __tablename__, 'scope'),
sa.Index('%s_workflow_name' % __tablename__, 'workflow_name'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
pattern = sa.Column(
sa.String(100),
nullable=True,
default='0 0 30 2 0' # Set default to 'never'.
)
first_execution_time = sa.Column(sa.DateTime, nullable=True)
next_execution_time = sa.Column(sa.DateTime, nullable=False)
workflow_name = sa.Column(sa.String(255))
remaining_executions = sa.Column(sa.Integer)
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow = relationship('WorkflowDefinition', lazy='joined')
workflow_params = sa.Column(st.JsonDictType())
workflow_params_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_params')
)
workflow_input = sa.Column(st.JsonDictType())
workflow_input_hash = sa.Column(
sa.CHAR(64),
default=_get_hash_function_by('workflow_input')
)
trust_id = sa.Column(sa.String(80))
def to_dict(self):
d = super(CronTrigger, self).to_dict()
utils.datetime_to_str_in_dict(d, 'first_execution_time')
utils.datetime_to_str_in_dict(d, 'next_execution_time')
return d
# Register all hooks related to secure models.
mb.register_secure_model_hooks()
# TODO(rakhmerov): This is a bad solution. It's hard to find in the code,
# configure flexibly etc. Fix it.
# Register an event listener to verify that the size of all the long columns
# affected by the user do not exceed the limit configuration.
for attr_name in ['input', 'output', 'params', 'published']:
register_length_validator(attr_name)
register_name_validator()
class ResourceMember(mb.MistralModelBase):
"""Contains info about resource members."""
__tablename__ = 'resource_members_v2'
__table_args__ = (
sa.UniqueConstraint(
'resource_id',
'resource_type',
'member_id'
),
)
id = mb.id_column()
resource_id = sa.Column(sa.String(80), nullable=False)
resource_type = sa.Column(
sa.String(50),
nullable=False,
default='workflow'
)
project_id = sa.Column(sa.String(80), default=security.get_project_id)
member_id = sa.Column(sa.String(80), nullable=False)
status = sa.Column(sa.String(20), nullable=False, default="pending")
class EventTrigger(mb.MistralSecureModelBase):
"""Contains info about event triggers."""
__tablename__ = 'event_triggers_v2'
__table_args__ = (
sa.UniqueConstraint('exchange', 'topic', 'event', 'workflow_id',
'project_id'),
sa.Index('%s_project_id_workflow_id' % __tablename__, 'project_id',
'workflow_id'),
)
id = mb.id_column()
name = sa.Column(sa.String(200))
workflow_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowDefinition.id)
)
workflow = relationship('WorkflowDefinition', lazy='joined')
workflow_params = sa.Column(st.JsonDictType())
workflow_input = sa.Column(st.JsonDictType())
exchange = sa.Column(sa.String(80), nullable=False)
topic = sa.Column(sa.String(80), nullable=False)
event = sa.Column(sa.String(80), nullable=False)
trust_id = sa.Column(sa.String(80))
class NamedLock(mb.MistralModelBase):
"""Contains info about named locks.
Usage of named locks is based on properties of READ COMMITTED
transactions of the most generally used SQL databases such as
Postgres, MySQL, Oracle etc.
The locking scenario is as follows:
1. Transaction A (TX-A) inserts a row with unique 'id' and
some value that identifies a locked object stored in 'name'.
2. Transaction B (TX-B) and any subsequent transactions tries
to insert a row with unique 'id' and the same value of 'name'
field and it waits till TX-A is completed due to transactional
properties of READ COMMITTED.
3. If TX-A then immediately deletes the record and commits then
TX-B and or one of the subsequent transactions are released
and its 'insert' is completed.
4. Then the scenario repeats with step #2 where the role of TX-A
will be playing a transaction that just did insert.
Practically, this table should never contain any committed rows.
All its usage is around the play with transactional storages.
"""
__tablename__ = 'named_locks'
sa.UniqueConstraint('name')
id = mb.id_column()
name = sa.Column(sa.String(255))
sa.UniqueConstraint(NamedLock.name)
|
{
"content_hash": "bce9086b5dd4ff993ab6606f7370fe32",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 79,
"avg_line_length": 31.13884992987377,
"alnum_prop": 0.63728492928565,
"repo_name": "openstack/mistral",
"id": "2d24045d14301c3bd34fbfa358470991a8c7ea52",
"size": "22879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/db/v2/sqlalchemy/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
}
|
import os
import re
import subprocess
import unittest
import sys
import selenium_utilities
import selenium_constants
class PDiffTest(unittest.TestCase):
"""A perceptual diff test class, for running perceptual diffs on any
number of screenshots."""
def __init__(self, name, num_screenshots, screenshot_name, pdiff_path,
gen_dir, ref_dir, options):
unittest.TestCase.__init__(self, name)
self.name = name
self.num_screenshots = num_screenshots
self.screenshot_name = screenshot_name
self.pdiff_path = pdiff_path
self.gen_dir = gen_dir
self.ref_dir = ref_dir
self.options = options
def shortDescription(self):
"""override unittest.TestCase shortDescription for our own descriptions."""
return "Screenshot comparison for: " + self.name
def PDiffTest(self):
"""Runs a generic Perceptual Diff test."""
# Get arguments for perceptual diff.
pixel_threshold = "10"
alpha_threshold = "1.0"
use_colorfactor = False
use_downsample = False
use_edge = True
edge_threshold = "5"
for opt in self.options:
if opt.startswith("pdiff_threshold"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_mac") and
sys.platform == "darwin"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_win") and
sys.platform == 'win32' or sys.platform == "cygwin"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_linux") and
sys.platform[:5] == "linux"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("colorfactor")):
colorfactor = selenium_utilities.GetArgument(opt)
use_colorfactor = True
elif (opt.startswith("downsample")):
downsample_factor = selenium_utilities.GetArgument(opt)
use_downsample = True
elif (opt.startswith("pdiff_edge_ignore_off")):
use_edge = False
elif (opt.startswith("pdiff_edge_threshold")):
edge_threshold = selenium_utilities.GetArgument(opt)
results = []
# Loop over number of screenshots.
for screenshot_no in range(self.num_screenshots):
# Find reference image.
shotname = self.screenshot_name + str(screenshot_no + 1)
J = os.path.join
platform_img_path = J(self.ref_dir,
selenium_constants.PLATFORM_SCREENSHOT_DIR,
shotname + '_reference.png')
reg_img_path = J(self.ref_dir,
selenium_constants.DEFAULT_SCREENSHOT_DIR,
shotname + '_reference.png')
if os.path.exists(platform_img_path):
ref_img_path = platform_img_path
elif os.path.exists(reg_img_path):
ref_img_path = reg_img_path
else:
self.fail('Reference image for ' + shotname + ' not found.')
# Find generated image.
gen_img_path = J(self.gen_dir, shotname + '.png')
diff_img_path = J(self.gen_dir, 'cmp_' + shotname + '.png')
self.assertTrue(os.path.exists(gen_img_path),
'Generated screenshot for ' + shotname + ' not found.\n')
# Run perceptual diff
arguments = [self.pdiff_path,
ref_img_path,
gen_img_path,
"-output", diff_img_path,
"-fov", "45",
"-alphaThreshold", alpha_threshold,
# Turn on verbose output for the percetual diff so we
# can see how far off we are on the threshold.
"-verbose",
# Set the threshold to zero so we can get a count
# of the different pixels. This causes the program
# to return failure for most images, but we can compare
# the values ourselves below.
"-threshold", "0"]
if use_colorfactor:
arguments += ["-colorfactor", colorfactor]
if use_downsample:
arguments += ["-downsample", downsample_factor]
if use_edge:
arguments += ["-ignoreEdges", edge_threshold]
pdiff_pipe = subprocess.Popen(arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pdiff_stdout, pdiff_stderr) = pdiff_pipe.communicate()
result = pdiff_pipe.returncode
# Find out how many pixels were different by looking at the output.
pixel_re = re.compile("(\d+) pixels are different", re.DOTALL)
pixel_match = pixel_re.search(pdiff_stdout)
different_pixels = "0"
if pixel_match:
different_pixels = pixel_match.group(1)
results += [(shotname, int(different_pixels))]
all_tests_passed = True
msg = "Pixel threshold is %s. Failing screenshots:\n" % pixel_threshold
for name, pixels in results:
if pixels >= int(pixel_threshold):
all_tests_passed = False
msg += " %s, differing by %s\n" % (name, str(pixels))
self.assertTrue(all_tests_passed, msg)
|
{
"content_hash": "e58b340c5e296f6159d442d1b1c8389e",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 39.10606060606061,
"alnum_prop": 0.6030608291359938,
"repo_name": "rwatson/chromium-capsicum",
"id": "0a91f55c90eefd7b11345823512ed3e1e9ed7642",
"size": "5162",
"binary": false,
"copies": "1",
"ref": "refs/heads/chromium-capsicum",
"path": "o3d/tests/selenium/pdiff_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from hadoop.io import SequenceFile, Text
from hadoop.io.SequenceFile import CompressionType
import msgpack
import re
import itertools
writer = SequenceFile.createWriter ('pg10.seq', Text, Text, compression_type=CompressionType.BLOCK)
key = Text()
value = Text()
with open ('pg10.txt', 'r') as f:
for pos, line in enumerate(iter (f.readline, '')):
line = line.strip()
pos = msgpack.packb (pos)
line = msgpack.packb (line)
key._bytes = pos
key._length = len(pos)
value._bytes = line
value._length = len(line)
writer.append (key, value)
writer.close()
|
{
"content_hash": "2adf93106a0f200ab2247051be015773",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 99,
"avg_line_length": 23.444444444444443,
"alnum_prop": 0.6382306477093207,
"repo_name": "kmatzen/HadoopPythonMR",
"id": "c5df39758152129cbc19407bfd28c28e379c3f76",
"size": "656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SequenceFileConverter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42624"
},
{
"name": "C++",
"bytes": "587"
},
{
"name": "Java",
"bytes": "8785"
},
{
"name": "Python",
"bytes": "3472"
},
{
"name": "Shell",
"bytes": "995"
}
],
"symlink_target": ""
}
|
import pprint
import requests
import sklearn
from sklearn import tree
# Attribute Information of the Pima Indians Diabetes Data Set:
# Ref: http://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes
# 1. Number of times pregnant
# 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# 3. Diastolic blood pressure (mm Hg)
# 4. Triceps skin fold thickness (mm)
# 5. 2-Hour serum insulin (mu U/ml)
# 6. Body mass index (weight in kg/(height in m)^2)
# 7. Diabetes pedigree function
# 8. Age (years)
# 9. Class variable (0 or 1)
dataset = requests.get('http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data')
content = dataset.content
multivariate_array = []
label_arr = []
for row in content.split("\n"):
attrib_arr = []
for attrib in row.split(","):
attrib_arr.append(attrib)
label_arr.append(attrib_arr[-1])
attrib_arr.pop()
multivariate_array.append(attrib_arr)
# Clean blank entities
multivariate_array.pop()
label_arr.pop()
pp = pprint.PrettyPrinter(indent=4)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(multivariate_array, label_arr)
patient_prediction = clf.predict([[11, 175, 60, 48, 0, 40.6, 0.289, 24]])
print patient_prediction
|
{
"content_hash": "c7922df0a0f792825baa060ba63dc3e6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 131,
"avg_line_length": 27.57777777777778,
"alnum_prop": 0.7316680096696213,
"repo_name": "creativcoder/AlgorithmicProblems",
"id": "9884b0f30196d75110a4d7d420c8aac9aa701b8d",
"size": "1242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/machine_learning/binary_classify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25234"
},
{
"name": "C++",
"bytes": "15735"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "916"
},
{
"name": "Python",
"bytes": "14882"
}
],
"symlink_target": ""
}
|
import requests
from ricecooker.config import LOGGER
# DEFAULT_STUDIO_URL = 'https://develop.studio.learningequality.org'
# DEFAULT_STUDIO_URL = 'http://127.0.0.1:8080'
DEFAULT_STUDIO_URL = "https://studio.learningequality.org"
# TODO https://studio.learningequality.org/api/get_node_path/ca8f380/18932/41b2549
# TODO https://studio.learningequality.org/api/language
# TODO `api/get_total_size/(?P<ids>[^/]*)` where ids are split by commas or run this script:
class StudioApi(object):
"""
Helper class whose methods allow access to Studo API endpoints for reports,
corrections, and other automation.
"""
def __init__(
self, token, username=None, password=None, studio_url=DEFAULT_STUDIO_URL
):
self.studio_url = studio_url.rstrip("/")
self.token = token
self.licenses_by_id = self.get_licenses()
if username and password:
self.session = self._create_logged_in_session(username, password)
else:
self.session = None
def _create_logged_in_session(self, username, password):
LOGIN_ENDPOINT = self.studio_url + "/accounts/login/"
session = requests.session()
session.headers.update({"referer": self.studio_url})
session.headers.update({"User-Agent": "Mozilla/5.0 Firefox/63.0"})
session.get(LOGIN_ENDPOINT)
csrftoken = session.cookies.get("csrftoken")
session.headers.update({"csrftoken": csrftoken})
session.headers.update({"referer": LOGIN_ENDPOINT})
post_data = {
"csrfmiddlewaretoken": csrftoken,
"username": username,
"password": password,
}
response2 = session.post(LOGIN_ENDPOINT, data=post_data)
assert response2.status_code == 200, "Login POST failed"
return session
def get_channel(self, channel_id):
"""
Calls the /api/channel/{{channel_id}} endpoint to get the channel info.
Returns a dictionary of useful information like:
- `name` and `description`
- `main_tree` {"id": studio_id} where `studio_id` is the root of the channel's main tree
- `staging_tree`: {"id": studio_id} for the root of the staging tree
- `trash_tree`: tree where deleted nodes go
- `ricecooker_version`: string that indicates what version of riccooker
created this channel. If `Null` this means it's a manually uploaded
channel or a derivative channel
"""
CHANNEL_ENDPOINT = self.studio_url + "/api/channel/"
# TODO: add TokenAuth to this entpoint so can use without session login
# headers = {"Authorization": "Token {0}".format(self.token)}
url = CHANNEL_ENDPOINT + channel_id
LOGGER.info(" GET " + url)
response = self.session.get(url)
channel_data = response.json()
return channel_data
def get_channel_root_studio_id(self, channel_id, tree="main"):
"""
Return the `studio_id` for the root of the tree `tree` for `channel_id`.
"""
channel_data = self.get_channel(channel_id)
tree_key = tree + "_tree"
tree_data = channel_data[tree_key]
return tree_data["id"]
def get_licenses(self):
LICENSES_LIST_ENDPOINT = self.studio_url + "/api/license"
headers = {"Authorization": "Token {0}".format(self.token)}
response = requests.get(LICENSES_LIST_ENDPOINT, headers=headers)
licenses_list = response.json()
licenses_dict = {}
for license in licenses_list:
licenses_dict[license["id"]] = license
return licenses_dict
def get_nodes_by_ids_complete(self, studio_id):
"""
Get the complete JSON representation of a content node from the Studio API.
"""
NODES_ENDPOINT = self.studio_url + "/api/get_nodes_by_ids_complete/"
headers = {"Authorization": "Token {0}".format(self.token)}
url = NODES_ENDPOINT + studio_id
LOGGER.info(" GET " + url)
response = requests.get(url, headers=headers)
studio_node = response.json()[0]
return studio_node
def get_nodes_by_ids_bulk(self, studio_ids):
"""
A more efficient version of `get_nodes_by_ids_complete` that GETs tree
content node data in chunks of 10 from the Studio API.
"""
CHUNK_SIZE = 25
NODES_ENDPOINT = self.studio_url + "/api/get_nodes_by_ids_complete/"
headers = {"Authorization": "Token {0}".format(self.token)}
studio_nodes = []
studio_ids_chunks = [
studio_ids[i : i + CHUNK_SIZE]
for i in range(0, len(studio_ids), CHUNK_SIZE)
]
for studio_ids_chunk in studio_ids_chunks:
studio_ids_csv = ",".join(studio_ids_chunk)
url = NODES_ENDPOINT + studio_ids_csv
LOGGER.info(" GET " + url)
response = requests.get(url, headers=headers)
chunk_nodes = response.json()
for chunk_node in chunk_nodes:
if "children" in chunk_node:
child_nodes = self.get_nodes_by_ids_bulk(chunk_node["children"])
chunk_node["children"] = child_nodes
studio_nodes.extend(chunk_nodes)
return studio_nodes
def get_tree_for_studio_id(self, studio_id):
"""
Returns the full json tree (recusive calls to /api/get_nodes_by_ids_complete)
"""
channel_root = self.get_nodes_by_ids_complete(studio_id)
if "children" in channel_root:
children_refs = channel_root["children"]
studio_nodes = self.get_nodes_by_ids_bulk(children_refs)
channel_root["children"] = studio_nodes
return channel_root
def get_contentnode(self, studio_id):
"""
Return the `studio_id` for the root of the tree `tree` for `channel_id`.
"""
return self.get_nodes_by_ids_complete(studio_id)
def put_contentnode(self, data):
"""
Send a PUT requests to /api/contentnode to update Studio node to data.
"""
CONTENTNODE_ENDPOINT = self.studio_url + "/api/contentnode"
REQUIRED_FIELDS = ["id", "tags", "prerequisite", "parent"]
assert data_has_required_keys(
data, REQUIRED_FIELDS
), "missing necessary attributes"
# studio_id = data['id']
url = CONTENTNODE_ENDPOINT
# print(' semantic PATCH using PUT ' + url)
csrftoken = self.session.cookies.get("csrftoken")
self.session.headers.update({"x-csrftoken": csrftoken})
response = self.session.put(url, json=[data])
node_data = response.json()
return node_data
def delete_contentnode(self, data, channel_id, trash_studio_id=None):
"""
Send a POST requests to /api/move_nodes/ to delete Studio node spcified
in `data` in the channel specified in `channel_id`. For efficiency, you
can provide `trash_studio_id` which is the studio id the trash tree for
the channel.
"""
MOVE_NODES_ENDPOINT = self.studio_url + "/api/move_nodes/"
REQUIRED_FIELDS = ["id"]
assert data_has_required_keys(
data, REQUIRED_FIELDS
), "missing necessary attributes"
if trash_studio_id is None:
channel_data = self.get_channel(channel_id)
trash_studio_id = channel_data["trash_tree"]["id"]
post_data = {
"nodes": [data],
"target_parent": trash_studio_id,
"channel_id": channel_id,
}
url = MOVE_NODES_ENDPOINT
# print(' semantic DELETE using POST to ' + url)
csrftoken = self.session.cookies.get("csrftoken")
self.session.headers.update({"x-csrftoken": csrftoken})
response = self.session.post(url, json=post_data)
deleted_datas = response.json()
return deleted_datas
def copy_contentnode(self, data, target_parent, channel_id):
"""
Send a POST requests to /api/duplicate_node_inline/ to copy node `data`
to the target parent folder `target_parent` in channel `channel_id`.
"""
DUPLICATE_NODE_INLINE_ENDPOINT = self.studio_url + "/api/duplicate_nodes/"
REQUIRED_FIELDS = ["id"]
assert data_has_required_keys(data, REQUIRED_FIELDS), "no studio_id in data"
post_data = {
"node_ids": [data["id"]],
"target_parent": target_parent,
"channel_id": channel_id,
}
url = DUPLICATE_NODE_INLINE_ENDPOINT
# print(' semantic COPY using POST to ' + url)
csrftoken = self.session.cookies.get("csrftoken")
self.session.headers.update({"x-csrftoken": csrftoken})
response = self.session.post(url, json=post_data)
copied_data_list = response.json()
return copied_data_list
def data_has_required_keys(data, required_keys):
verdict = True
for key in required_keys:
if key not in data:
verdict = False
return verdict
|
{
"content_hash": "1c8a0984d0accef91ecab1b63d417d9d",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 98,
"avg_line_length": 41.44292237442922,
"alnum_prop": 0.6066549140590568,
"repo_name": "learningequality/ricecooker",
"id": "fd5c77876461caefed0196bbd3580aca2275c201",
"size": "9076",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ricecooker/utils/libstudio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1901"
},
{
"name": "HTML",
"bytes": "1065"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "704340"
},
{
"name": "Shell",
"bytes": "1713"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.contrib.auth.decorators import login_required, permission_required
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from tracker.models import *
#from tracker.forms import *
#import tracker.graph as flot
# Create your views here.
@login_required
def tracker_page(request):
user = User.objects.get(username=request.user)
pad = user.pad.name
notifications = user.events.order_by('-date')[:6]
todos = user.todo.filter(done=False)
daily = user.daily_events()
tomorrow = user.tomorrow_events()
later = user.later_events()
c = {
'pad': pad,
'notifications': notifications,
'todos': todos,
'daily': daily,
'tomorrow': tomorrow,
'later': later,
}
return render(request, 'tracker_page.html', c)
@login_required
def door_opener(request):
#if connected
if True:
#send message to electronic card to open
messages.success(request, 'Door opened')
else:
messages.warning(request, 'Opening failed')
return redirect('tracker-page')
|
{
"content_hash": "4de742c82416dadf704e0e817df564ab",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 29.348837209302324,
"alnum_prop": 0.6735340729001584,
"repo_name": "KenN7/LifeTracker",
"id": "0e19975087d0e00f404aac3bdb739e9919d2cd51",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lifetracker/tracker/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1804"
},
{
"name": "HTML",
"bytes": "181109"
},
{
"name": "JavaScript",
"bytes": "335820"
},
{
"name": "Makefile",
"bytes": "285"
},
{
"name": "Python",
"bytes": "23733"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
}
|
import gevent
import gevent.queue
import gevent.wsgi
import os
import sys
import logging
import pdb
import json
from pprint import pprint
import functools
import socket
import time
import errno
import re
import copy
from lxml import etree
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import pycassa
import Queue
from collections import deque
import kombu
import kazoo
from kazoo.client import KazooState
from copy import deepcopy
from datetime import datetime
from pycassa.util import *
from vnc_api import vnc_api
from novaclient import exceptions as nc_exc
def stub(*args, **kwargs):
pass
class FakeApiConfigLog(object):
_all_logs = []
send = stub
def __init__(self, *args, **kwargs):
FakeApiConfigLog._all_logs.append(kwargs['api_log'])
@classmethod
def _print(cls):
for log in cls._all_logs:
x = copy.deepcopy(log.__dict__)
#body = x.pop('body')
#pprint(json.loads(body))
pprint(x)
print "\n"
# class FakeApiConfigLog
class FakeWSGIHandler(gevent.wsgi.WSGIHandler):
logger = logging.getLogger('FakeWSGIHandler')
logger.addHandler(logging.FileHandler('api_server.log'))
def __init__(self, socket, address, server):
super(FakeWSGIHandler, self).__init__(socket, address, server)
#server.log = open('api_server.log', 'a')
class LoggerWriter(object):
def write(self, message):
FakeWSGIHandler.logger.log(logging.INFO, message)
server.log = LoggerWriter()
class CassandraCFs(object):
_all_cfs = {}
@classmethod
def add_cf(cls, name, cf):
CassandraCFs._all_cfs[name] = cf
# end add_cf
@classmethod
def get_cf(cls, name):
return CassandraCFs._all_cfs[name]
# end get_cf
# end CassandraCFs
class FakeCF(object):
def __init__(*args, **kwargs):
self = args[0]
self._name = args[3]
self._rows = OrderedDict({})
self.column_validators = {}
CassandraCFs.add_cf(self._name, self)
# end __init__
def get_range(self, *args, **kwargs):
for key in self._rows:
yield (key, self.get(key))
# end get_range
def get(
self, key, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False):
if not key in self._rows:
raise pycassa.NotFoundException
if columns:
col_dict = {}
for col_name in columns:
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
col_dict[col_name] = (col_value, col_tstamp)
else:
col_dict[col_name] = col_value
else:
col_dict = {}
for col_name in self._rows[key].keys():
if column_start and column_start not in col_name:
continue
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
col_dict[col_name] = (col_value, col_tstamp)
else:
col_dict[col_name] = col_value
return col_dict
# end get
def multiget(
self, keys, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False):
result = {}
for key in keys:
try:
result[key] = {}
for col_name in self._rows[key]:
if column_start and column_start not in col_name:
continue
result[key][col_name] = copy.deepcopy(self._rows[key][col_name])
except KeyError:
pass
return result
# end multiget
def insert(self, key, col_dict):
if key not in self._rows:
self._rows[key] = {}
tstamp = datetime.now()
for col_name in col_dict.keys():
self._rows[key][col_name] = (col_dict[col_name], tstamp)
# end insert
def remove(self, key, columns=None):
try:
if columns:
# for each entry in col_name delete each that element
for col_name in columns:
del self._rows[key][col_name]
else:
del self._rows[key]
except KeyError:
# pycassa remove ignores non-existing keys
pass
# end remove
def xget(self, key, column_start=None, column_finish=None,
include_timestamp=False):
col_names = []
if key in self._rows:
col_names = self._rows[key].keys()
for col_name in col_names:
if column_start and column_start not in col_name:
continue
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
yield (col_name, (col_value, col_tstamp))
else:
yield (col_name, col_value)
# end xget
def batch(self):
return self
# end batch
def send(self):
pass
# end send
# end class FakeCF
class FakeNovaClient(object):
@staticmethod
def initialize(*args, **kwargs):
return FakeNovaClient
class flavors:
@staticmethod
def find(ram):
return None
# end class flavors
class images:
@staticmethod
def find(name):
return None
# end class images
class servers:
@staticmethod
def create(name, image, flavor, nics, *args, **kwargs):
vm = vnc_api.VirtualMachine(name)
FakeNovaClient.vnc_lib.virtual_machine_create(vm)
for network in nics:
if 'nic-id' in network:
vn = FakeNovaClient.vnc_lib.virtual_network_read(
id=network['net-id'])
vmi = vnc_api.VirtualMachineInterface(vn.name, parent_obj=vm)
vmi.set_virtual_network(vn)
FakeNovaClient.vnc_lib.virtual_machine_interface_create(vmi)
ip_address = FakeNovaClient.vnc_lib.virtual_network_ip_alloc(
vn, count=1)[0]
ip_obj = vnc_api.InstanceIp(ip_address, ip_address)
ip_obj.add_virtual_network(vn)
ip_obj.add_virtual_machine_interface(vmi)
FakeNovaClient.vnc_lib.instance_ip_create(ip_obj)
elif 'port-id' in network:
vmi = FakeNovaClient.vnc_lib.virtual_machine_interface_read(id=network['port-id'])
vmi.add_virtual_machine(vm)
FakeNovaClient.vnc_lib.virtual_machine_interface_update(vmi)
# end for network
vm.id = vm.uuid
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.get = stub
return vm
# end create
@staticmethod
def find(id):
try:
vm = FakeNovaClient.vnc_lib.virtual_machine_read(id=id)
except vnc_api.NoIdError:
raise nc_exc.NotFound(404, "")
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.status = 'OK'
return vm
# end find
# end class servers
@staticmethod
def delete_vm(vm):
for if_ref in vm.get_virtual_machine_interfaces() or vm.get_virtual_machine_interface_back_refs():
intf = FakeNovaClient.vnc_lib.virtual_machine_interface_read(
id=if_ref['uuid'])
for ip_ref in intf.get_instance_ip_back_refs() or []:
FakeNovaClient.vnc_lib.instance_ip_delete(id=ip_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_interface_delete(
id=if_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_delete(id=vm.uuid)
# end delete_vm
# end class FakeNovaClient
class FakeIfmapClient(object):
# _graph is dict of ident_names where val for each key is
# dict with keys 'ident' and 'links'
# 'ident' has ident xml element
# 'links' is a dict with keys of concat(<meta-name>' '<ident-name>')
# and vals of dict with 'meta' which has meta xml element and
# 'other' which has other ident xml element
# eg. cls._graph['contrail:network-ipam:default-domain:default-project:
# ipam2'] =
# 'ident': <Element identity at 0x2b3e280>,
# 'links': {'contrail:id-perms': {'meta': <Element metadata at 0x2b3eb40>},
# 'contrail:project-network-ipam
# contrail:project:default-domain:default-project':
# {'other': <Element identity at 0x2b3eaa0>,
# 'meta': <Element metadata at 0x2b3ea50>},
# 'contrail:virtual-network-network-ipam contrail:
# virtual-network:default-domain:default-project:vn2':
# {'other': <Element identity at 0x2b3ee10>,
# 'meta': <Element metadata at 0x2b3e410>}}}
_graph = {}
_published_messages = [] # all messages published so far
_subscribe_lists = [] # list of all subscribers indexed by session-id
_PUBLISH_ENVELOPE = \
"""<?xml version="1.0" encoding="UTF-8"?> """\
"""<env:Envelope xmlns:"""\
"""env="http://www.w3.org/2003/05/soap-envelope" xmlns:"""\
"""ifmap="http://www.trustedcomputinggroup.org/2010/IFMAP/2" """\
"""xmlns:contrail="http://www.contrailsystems.com/"""\
"""vnc_cfg.xsd" """\
"""xmlns:meta="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP-METADATA/2"> """\
"""<env:Body> %(body)s </env:Body> </env:Envelope>"""
_RSP_ENVELOPE = \
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?> """\
"""<env:Envelope xmlns:ifmap="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP/2" """\
"""xmlns:env="http://www.w3.org/2003/05/soap-envelope" """\
"""xmlns:meta="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP-METADATA/2" """\
"""xmlns:contrail="http://www.contrailsystems.com/vnc_cfg.xsd"> """\
"""<env:Body><ifmap:response> %(result)s """\
"""</ifmap:response></env:Body></env:Envelope>"""
@classmethod
def reset(cls):
cls._graph = {}
cls._published_messages = [] # all messages published so far
cls._subscribe_lists = [] # list of all subscribers indexed by session-id
# end reset
@staticmethod
def initialize(*args, **kwargs):
pass
# end initialize
@classmethod
def _update_publish(cls, upd_root):
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(upd_root))
from_name = upd_root[0].attrib['name']
if not from_name in cls._graph:
cls._graph[from_name] = {'ident': upd_root[0], 'links': {}}
if len(upd_root) == 2:
meta_name = re.sub("{.*}", "contrail:", upd_root[1][0].tag)
link_key = meta_name
link_info = {'meta': upd_root[1]}
cls._graph[from_name]['links'][link_key] = link_info
elif len(upd_root) == 3:
meta_name = re.sub("{.*}", "contrail:", upd_root[2][0].tag)
to_name = upd_root[1].attrib['name']
link_key = '%s %s' % (meta_name, to_name)
link_info = {'meta': upd_root[2], 'other': upd_root[1]}
cls._graph[from_name]['links'][link_key] = link_info
# reverse mapping only for strong refs
# currently refs from same type to each other is weak ref
from_type = from_name.split(':')[1]
to_type = to_name.split(':')[1]
if not to_name in cls._graph:
cls._graph[to_name] = {'ident': upd_root[1], 'links': {}}
link_key = '%s %s' % (meta_name, from_name)
link_info = {'meta': upd_root[2], 'other': upd_root[0]}
cls._graph[to_name]['links'][link_key] = link_info
else:
raise Exception("Unknown ifmap update: %s" %
(etree.tostring(upd_root)))
subscribe_result = etree.Element('updateResult')
subscribe_result.append(subscribe_item)
return subscribe_result
# end _update_publish
@classmethod
def _delete_publish(cls, del_root):
from_name = del_root[0].attrib['name']
if 'filter' in del_root.attrib:
meta_name = del_root.attrib['filter']
if len(del_root) == 1:
link_key = meta_name
elif len(del_root) == 2:
to_name = del_root[1].attrib['name']
link_key = '%s %s' % (meta_name, to_name)
else:
raise Exception("Unknown ifmap delete: %s" %
(etree.tostring(del_root)))
link_keys = [link_key]
else: # delete all metadata on this ident or between pair of idents
if len(del_root) == 1:
link_keys = cls._graph[from_name]['links'].keys()
elif len(del_root) == 2:
to_name = del_root[1].attrib['name']
link_keys = []
if from_name in cls._graph:
all_link_keys = cls._graph[from_name]['links'].keys()
for link_key in all_link_keys:
link_info = cls._graph[from_name]['links'][link_key]
if 'other' in link_info:
if link_key.split()[1] == to_name:
link_keys.append(link_key)
else:
raise Exception("Unknown ifmap delete: %s" %
(etree.tostring(del_root)))
subscribe_result = etree.Element('deleteResult')
for link_key in link_keys:
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(del_root))
link_info = cls._graph[from_name]['links'][link_key]
# generate id1, id2, meta for poll for the case where
# del of ident for all metas requested but we have a
# ref meta to another ident
if len(del_root) == 1 and 'other' in link_info:
to_ident_elem = link_info['other']
subscribe_item.append(to_ident_elem)
subscribe_item.append(deepcopy(link_info['meta']))
subscribe_result.append(subscribe_item)
if 'other' in link_info:
other_name = link_info['other'].attrib['name']
meta_name = re.sub(
"{.*}", "contrail:", link_info['meta'][0].tag)
rev_link_key = '%s %s' % (meta_name, from_name)
from_type = from_name.split(':')[1]
other_type = other_name.split(':')[1]
if other_name in cls._graph:
del cls._graph[other_name]['links'][rev_link_key]
if not cls._graph[other_name]['links']:
del cls._graph[other_name]
del cls._graph[from_name]['links'][link_key]
# delete ident if no links left
if from_name in cls._graph and not cls._graph[from_name]['links']:
del cls._graph[from_name]
if len(subscribe_result) == 0:
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(del_root))
subscribe_result.append(subscribe_item)
return subscribe_result
# end _delete_publish
@staticmethod
def call(method, body):
cls = FakeIfmapClient
if method == 'publish':
pub_env = cls._PUBLISH_ENVELOPE % {
'body': body._PublishRequest__operations}
env_root = etree.fromstring(pub_env)
poll_result = etree.Element('pollResult')
for pub_root in env_root[0]:
# pub_root = env_root[0][0]
if pub_root.tag == 'update':
subscribe_result = cls._update_publish(pub_root)
elif pub_root.tag == 'delete':
subscribe_result = cls._delete_publish(pub_root)
else:
raise Exception(
"Unknown ifmap publish: %s"
% (etree.tostring(pub_root)))
poll_result.append(subscribe_result)
cls._published_messages.append(poll_result)
for sl in cls._subscribe_lists:
if sl is not None:
sl.put(poll_result)
result = etree.Element('publishReceived')
result_env = cls._RSP_ENVELOPE % {'result': etree.tostring(result)}
return result_env
elif method == 'search':
# grab ident string; lookup graph with match meta and return
srch_id_str = body._SearchRequest__identifier
mch = re.match('<identity name="(.*)" type', srch_id_str)
start_name = mch.group(1)
match_links = body._SearchRequest__parameters['match-links']
all_link_keys = set()
for match_link in match_links.split(' or '):
link_keys = set(
[link_key for link_key in cls._graph[start_name]
['links'].keys()
if re.match(match_link, link_key)])
all_link_keys |= link_keys
result_items = []
for link_key in all_link_keys:
r_item = etree.Element('resultItem')
link_info = cls._graph[start_name]['links'][link_key]
if 'other' in link_info:
r_item.append(cls._graph[start_name]['ident'])
r_item.append(link_info['other'])
r_item.append(link_info['meta'])
else:
r_item.append(cls._graph[start_name]['ident'])
r_item.append(link_info['meta'])
result_items.append(copy.deepcopy(r_item))
search_result = etree.Element('searchResult')
search_result.extend(result_items)
search_str = etree.tostring(search_result)
search_env = cls._RSP_ENVELOPE % {'result': search_str}
return search_env
#ifmap_cf = CassandraCFs.get_cf('ifmap_id_table')
#srch_uuid = ifmap_cf.get(ifmap_id)['uuid']
#uuid_cf = CassandraCFs.get_cf('uuid_table')
#obj_json = uuid_cf.get(srch_uuid)['obj_json']
elif method == 'poll':
session_id = int(body._PollRequest__session_id)
item = cls._subscribe_lists[session_id].get(True)
poll_str = etree.tostring(item)
poll_env = cls._RSP_ENVELOPE % {'result': poll_str}
return poll_env
elif method == 'newSession':
result = etree.Element('newSessionResult')
result.set("session-id", str(len(cls._subscribe_lists)))
result.set("ifmap-publisher-id", "111")
result.set("max-poll-result-size", "7500000")
result_env = cls._RSP_ENVELOPE % {'result': etree.tostring(result)}
cls._subscribe_lists.append(None)
return result_env
elif method == 'subscribe':
session_id = int(body._SubscribeRequest__session_id)
subscriber_queue = Queue.Queue()
for msg in cls._published_messages:
subscriber_queue.put(msg)
cls._subscribe_lists[session_id] = subscriber_queue
result = etree.Element('subscribeReceived')
result_env = cls._RSP_ENVELOPE % {'result': etree.tostring(result)}
return result_env
else:
print method
# end call
@staticmethod
def call_async_result(method, body):
return FakeIfmapClient.call(method, body)
# end class FakeIfmapClient
class FakeKombu(object):
_queues = {}
class Exchange(object):
def __init__(self, *args, **kwargs):
pass
# end __init__
# end Exchange
class Queue(object):
_sync_q = gevent.queue.Queue()
class Message(object):
def __init__(self, msg_dict):
self.payload = msg_dict
# end __init__
def ack(self, *args, **kwargs):
pass
# end ack
# end class Message
def __init__(self, entity, q_name, q_exchange):
self._name = q_name
self._exchange = q_exchange
FakeKombu._queues[q_name] = self
# end __init__
def __call__(self, *args):
class BoundQueue(object):
def delete(self):
pass
# end delete
return BoundQueue()
# end __call__
def put(self, msg_dict, serializer):
msg_obj = self.Message(msg_dict)
self._sync_q.put(msg_obj)
# end put
def get(self):
rv = self._sync_q.get()
# In real systems, rabbitmq is little slow, hence add some wait to mimic
gevent.sleep(0.001)
return rv
# end get
# end class Queue
class Connection(object):
class SimpleQueue(object):
_simple_queues = {}
def __init__(self, q_obj):
if q_obj._name in self._simple_queues:
self._q_obj = self._simple_queues[q_obj._name]._q_obj
else:
self._simple_queues[q_obj._name] = self
self._q_obj = q_obj
# end __init__
def put(self, *args, **kwargs):
self._q_obj.put(*args, **kwargs)
# end put
def get(self, *args, **kwargs):
return self._q_obj.get()
# end get
def __enter__(self):
return self
# end __enter__
def __exit__(self, *args, **kwargs):
pass
# end __exit__
# end class SimpleQueue
def __init__(self, *args, **kwargs):
pass
# end __init__
def channel(self):
pass
# end channel
# end class FakeKombu
class FakeRedis(object):
class Pubsub(object):
def __init__(self, *args, **kwargs):
self._event = gevent.event.Event()
# end __init__
def subscribe(self, *args, **kwargs):
pass
# end subscribe
def listen(self, *args, **kwargs):
self._event.wait()
# end listen
# end FakePubsub
def __init__(self, *args, **kwargs):
self._kv_store = {}
# end __init__
def pubsub(self, *args, **kwargs):
return FakeRedis.Pubsub()
# end pubsub
def publish(self, *args, **kwargs):
pass
# end publish
def set(self, key, value):
self._kv_store[key] = deepcopy(value)
# end set
def get(self, key):
return deepcopy(self._kv_store[key])
# end get
def delete(self, keys):
for key in keys:
try:
del self._kv_store[key]
except KeyError:
pass
# end delete
def setnx(self, key, value):
self.set(key, deepcopy(value))
return True
# end setnx
def hexists(self, key, hkey):
if key in self._kv_store:
if hkey in self._kv_store[key]:
return True
return False
# end hexists
def hset(self, key, hkey, value):
if key not in self._kv_store:
self._kv_store[key] = {}
self._kv_store[key][hkey] = deepcopy(value)
# end hset
def hget(self, key, hkey):
if key not in self._kv_store:
return json.dumps(None)
if hkey not in self._kv_store[key]:
return json.dumps(None)
return deepcopy(self._kv_store[key][hkey])
# end hget
def hgetall(self, key):
return deepcopy(self._kv_store[key])
# end hgetall
def hdel(self, key, hkey):
del self._kv_store[key][hkey]
# end hdel
# end FakeRedis
class FakeExtensionManager(object):
_entry_pt_to_classes = {}
class FakeExtObj(object):
def __init__(self, cls, *args, **kwargs):
self.obj = cls(*args, **kwargs)
def __init__(self, child, ep_name, **kwargs):
if ep_name not in self._entry_pt_to_classes:
return
classes = self._entry_pt_to_classes[ep_name]
self._ep_name = ep_name
self._ext_objs = []
for cls in classes:
ext_obj = FakeExtensionManager.FakeExtObj(cls, **kwargs)
self._ext_objs.append(ext_obj)
# end __init__
def map(self, cb):
for ext_obj in self._ext_objs:
cb(ext_obj)
def map_method(self, method_name, *args, **kwargs):
for ext_obj in self._ext_objs:
method = getattr(ext_obj.obj, method_name, None)
if not method:
continue
method(*args, **kwargs)
# end class FakeExtensionManager
class FakeKeystoneClient(object):
class Tenants(object):
_tenants = {}
def add_tenant(self, id, name):
self.id = id
self.name = name
self._tenants[id] = self
def list(self):
return self._tenants.values()
def get(self, id):
return self._tenants[str(uuid.UUID(id))]
def __init__(self, *args, **kwargs):
self.tenants = FakeKeystoneClient.Tenants()
pass
# end class FakeKeystoneClient
fake_keystone_client = FakeKeystoneClient()
def get_keystone_client(*args, **kwargs):
return fake_keystone_client
def get_free_port():
tmp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_sock.bind(('', 0))
free_port = tmp_sock.getsockname()[1]
tmp_sock.close()
return free_port
# end get_free_port
def block_till_port_listened(server_ip, server_port):
svr_running = False
while not svr_running:
try:
s = socket.create_connection((server_ip, server_port))
s.close()
svr_running = True
except Exception as err:
if err.errno == errno.ECONNREFUSED:
print "port %s not up, retrying in 2 secs" % (server_port)
gevent.sleep(2)
# end block_till_port_listened
def Fake_uuid_to_time(time_uuid_in_db):
ts = time.mktime(time_uuid_in_db.timetuple())
return ts
# end of Fake_uuid_to_time
class FakeKazooClient(object):
class Election(object):
__init__ = stub
def run(self, cb, func, *args, **kwargs):
cb(func, *args, **kwargs)
def __init__(self, *args, **kwargs):
self.add_listener = stub
self.start = stub
self._values = {}
self.state = KazooState.CONNECTED
# end __init__
def create(self, path, value='', *args, **kwargs):
self._values[path] = value
# end create
def delete(self, path, recursive=False):
if not recursive:
try:
del self._values[path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
else:
for path_key in self._values.keys():
if path in path_key:
del self._values[path_key]
# end delete
class ZookeeperClientMock(object):
def __init__(self, *args, **kwargs):
self._count = 0
self._values = {}
# end __init__
def is_connected(self):
return True
def alloc_from(self, path, max_id):
self._count = self._count + 1
return self._count
# end alloc_from
def alloc_from_str(self, path, value=''):
self._count = self._count + 1
zk_val = "%(#)010d" % {'#': self._count}
self._values[path + zk_val] = value
return zk_val
# end alloc_from_str
def delete(self, path):
try:
del self._values[path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
# end delete
def read(self, path):
try:
return self._values[path]
except Exception as err:
raise pycassa.NotFoundException
# end read
def get_children(self, path):
return []
# end get_children
def read_node(self, path):
try:
return self.read(path)
except pycassa.NotFoundException:
return None
# end read_node
def create_node(self, path, value=''):
self._values[path] = value
# end create_node
def delete_node(self, path, recursive=False):
if not recursive:
try:
del self._values[path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
else:
for path_key in self._values.keys():
if path in path_key:
del self._values[path_key]
# end delete_node
def master_election(self, path, pid, func, *args, **kwargs):
func(*args, **kwargs)
# end master_election
# end Class ZookeeperClientMock
|
{
"content_hash": "df23d968bbe8d3dd4271c5881c377577",
"timestamp": "",
"source": "github",
"line_count": 894,
"max_line_length": 106,
"avg_line_length": 32.9407158836689,
"alnum_prop": 0.5355020543991307,
"repo_name": "Juniper/contrail-dev-controller",
"id": "f917509d83de343c2974acdb5366e31bcf688c61",
"size": "29518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/common/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "242661"
},
{
"name": "C++",
"bytes": "12643864"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Java",
"bytes": "143864"
},
{
"name": "Lua",
"bytes": "6835"
},
{
"name": "Objective-C",
"bytes": "28773"
},
{
"name": "Python",
"bytes": "2243464"
},
{
"name": "Shell",
"bytes": "37954"
}
],
"symlink_target": ""
}
|
"""Utility methods for package db."""
import yaml
def yaml_load(path):
with open(path, 'r', encoding='utf-8') as f:
# data_yaml = yaml.load(f, Loader=yaml.FullLoader)
data_yaml = yaml.load(f, Loader=yaml.FullLoader)
return data_yaml
|
{
"content_hash": "189898aa5d2a0ac763615435bbcd5d50",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 58,
"avg_line_length": 26,
"alnum_prop": 0.6423076923076924,
"repo_name": "verejnedigital/verejne.digital",
"id": "41ce334cfeb5d3ccf20f267f1effb96a5a3c361c",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/db/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19661"
},
{
"name": "JavaScript",
"bytes": "256025"
},
{
"name": "PHP",
"bytes": "21109"
},
{
"name": "Python",
"bytes": "293787"
},
{
"name": "SCSS",
"bytes": "34305"
},
{
"name": "Shell",
"bytes": "6726"
},
{
"name": "Smarty",
"bytes": "5018"
}
],
"symlink_target": ""
}
|
"""Core module. Provides the basic operations needed in sympy.
"""
from sympify import sympify
from cache import cacheit
from basic import Basic, Atom, C, preorder_traversal
from singleton import S
from expr import Expr, AtomicExpr
from symbol import Symbol, Wild, Dummy, symbols, var
from numbers import Number, Float, Rational, Integer, NumberSymbol,\
RealNumber, Real, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo
from power import Pow, integer_nthroot
from mul import Mul, prod
from add import Add
from mod import Mod
from relational import ( Rel, Eq, Ne, Lt, Le, Gt, Ge,
Equality, GreaterThan, LessThan, Unequality, StrictGreaterThan,
StrictLessThan )
from multidimensional import vectorize
from function import Lambda, WildFunction, Derivative, diff, FunctionClass, \
Function, Subs, expand, PoleError, count_ops, \
expand_mul, expand_log, expand_func,\
expand_trig, expand_complex, expand_multinomial, nfloat, \
expand_power_base, expand_power_exp
from sets import (Set, Interval, Union, EmptySet, FiniteSet, ProductSet,
Intersection)
from evalf import PrecisionExhausted, N
from containers import Tuple, Dict
from exprtools import gcd_terms, factor_terms, factor_nc
# expose singletons
Catalan = S.Catalan
EulerGamma = S.EulerGamma
GoldenRatio = S.GoldenRatio
|
{
"content_hash": "95d6a0ac1511173d6583a2696bccc12d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 38.470588235294116,
"alnum_prop": 0.7622324159021406,
"repo_name": "srjoglekar246/sympy",
"id": "8b5d3c4f060d8974f698b843a4e51c3f2c7cb5b3",
"size": "1308",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/core/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10283965"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from server.modules import (accounts, groups)
import pickle
import time
"""
Manages and stores messages between users and in groups.
"""
class MessageManager:
def __init__(self):
self.callbacks = {}
try:
with open('messages.pickle', 'rb') as f:
self.messages = pickle.load(f)
except FileNotFoundError:
self.messages = []
"""
Gets all messages between two users.
"""
def get_all_with_users(self, username1, username2):
messages = []
for message in self.messages:
if message["sender"] == username1 and message["receiver"]["type"] == "user" and message["receiver"]["username"] == username2:
messages.append(message)
elif message["sender"] == username2 and message["receiver"]["type"] == "user" and message["receiver"]["username"] == username1:
messages.append(message)
return messages
"""
Gets all messages in a group.
"""
def get_all_in_group(self, group):
groups.manager.validate_group(group)
messages = []
for message in self.messages:
if message["receiver"]["type"] == "group" and message["receiver"]["id"] == group:
messages.append(message)
return messages
"""
Sends a message.
"""
def send(self, sender, receiver_type, text, username=None, group=None):
message = {
"id": len(self.messages),
"sender": sender,
"receiver": {
"type": receiver_type
},
"timestamp": time.time(),
"text": text
}
if receiver_type == "user":
accounts.manager.validate_user(username)
message["receiver"]["username"] = username
elif receiver_type == "group":
groups.manager.validate_group(group)
message["receiver"]["id"] = group
else:
raise Exception("Invalid recipient type")
self.messages.append(message)
# If a callback was set to send a message immediately, invoke it now.
if receiver_type == "user":
self.call_callback(sender, message)
self.call_callback(username, message)
elif receiver_type == "group":
for user in groups.manager.get_group(group)["users"]:
self.call_callback(user, message)
self.save()
"""
Sets a callback to be invoked when a given user gets a message.
"""
def set_callback(self, username, callback):
self.callbacks[username] = callback
"""
Removes a callback.
"""
def remove_callback(self, username):
del self.callbacks[username]
"""
Invokes a message callback.
"""
def call_callback(self, username, message):
try:
if username in self.callbacks:
return self.callbacks[username](message)
except:
pass
"""
Saves the message database.
"""
def save(self):
with open('messages.pickle', 'wb') as f:
pickle.dump(self.messages, f)
manager = MessageManager()
|
{
"content_hash": "1e1c9121b4d0f5ba14fa96bc8fd4de16",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 139,
"avg_line_length": 29.542056074766354,
"alnum_prop": 0.5631129389433723,
"repo_name": "cs460-group1/chat-server",
"id": "9bfe4feaf04f0a314213a4899a330e4751d94200",
"size": "3161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/modules/messages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25616"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
readme = open('README.rst').read()
def get_version():
try:
import subprocess
p = subprocess.Popen('hg id -t', shell=True, stdout=subprocess.PIPE)
tag = p.stdout.read()[1:].strip()
return tag
except:
return 'dev'
setup(
name='eops',
version=get_version(),
description="""""",
long_description=readme,
author='Haltu',
packages=find_packages(),
include_package_data=True,
license="MIT",
zip_safe=False,
keywords='',
install_requires=[
],
)
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
{
"content_hash": "abb144db2ebdc06eb89c3bd6e7c42016",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 21.896551724137932,
"alnum_prop": 0.6125984251968504,
"repo_name": "haltu/eca-eops",
"id": "9e2eb97c65e734bf41518f4035f342760932f18e",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eops/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10800"
},
{
"name": "HTML",
"bytes": "16540"
},
{
"name": "JavaScript",
"bytes": "51606"
},
{
"name": "Python",
"bytes": "19139"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: opendj_backendprop
short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
description:
- This module will update settings for OpenDJ with the command set-backend-prop.
- It will check first via de get-backend-prop if configuration needs to be applied.
version_added: "2.2"
author:
- Werner Dijkerman
options:
opendj_bindir:
description:
- The path to the bin directory of OpenDJ.
required: false
default: /opt/opendj/bin
hostname:
description:
- The hostname of the OpenDJ server.
required: true
port:
description:
- The Admin port on which the OpenDJ instance is available.
required: true
username:
description:
- The username to connect to.
required: false
default: cn=Directory Manager
password:
description:
- The password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
passwordfile:
description:
- Location to the password file which holds the password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
backend:
description:
- The name of the backend on which the property needs to be updated.
required: true
name:
description:
- The configuration setting to update.
required: true
value:
description:
- The value for the configuration item.
required: true
state:
description:
- If configuration needs to be added/updated
required: false
default: "present"
'''
EXAMPLES = '''
- name: "Add or update OpenDJ backend properties"
action: opendj_backendprop
hostname=localhost
port=4444
username="cn=Directory Manager"
password=password
backend=userRoot
name=index-entry-limit
value=5000
'''
RETURN = '''
'''
import subprocess
class BackendProp(object):
def __init__(self, module):
self._module = module
def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
my_command = [
opendj_bindir + '/dsconfig',
'get-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'-n', '-X', '-s'
] + password_method
process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
return stdout
else:
self._module.fail_json(msg="Error message: " + str(stderr))
def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name,name, value):
my_command = [
opendj_bindir + '/dsconfig',
'set-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'--set', name + ":" + value,
'-n', '-X'
] + password_method
process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
return True
else:
self._module.fail_json(msg="Error message: " + stderr)
def validate_data(self, data=None, name=None, value=None):
for config_line in data.split('\n'):
if config_line:
split_line = config_line.split()
if split_line[0] == name:
if split_line[1] == value:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
hostname=dict(required=True),
port=dict(required=True),
username=dict(default="cn=Directory Manager", required=False),
password=dict(required=False, no_log=True),
passwordfile=dict(required=False, type="path"),
backend=dict(required=True),
name=dict(required=True),
value=dict(required=True),
state=dict(default="present"),
),
supports_check_mode=True
)
opendj_bindir = module.params['opendj_bindir']
hostname = module.params['hostname']
port = module.params['port']
username = module.params['username']
password = module.params['password']
passwordfile = module.params['passwordfile']
backend_name = module.params['backend']
name = module.params['name']
value = module.params['value']
state = module.params['state']
if module.params["password"] is not None:
password_method = ['-w', password]
elif module.params["passwordfile"] is not None:
password_method = ['-j', passwordfile]
else:
module.fail_json(msg="No credentials are given. Use either 'password' or 'passwordfile'")
if module.params["passwordfile"] and module.params["password"]:
module.fail_json(msg="only one of 'password' or 'passwordfile' can be set")
opendj = BackendProp(module)
validate = opendj.get_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name)
if validate:
if not opendj.validate_data(data=validate, name=name, value=value):
if module.check_mode:
module.exit_json(changed=True)
if opendj.set_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name,
name=name,
value=value):
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
{
"content_hash": "60b1a024efbb9210de6adf4b914f353e",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 111,
"avg_line_length": 34.605,
"alnum_prop": 0.5597457014882242,
"repo_name": "nwiizo/workspace_2017",
"id": "893bbfdd47d243379eab8996c624fa16550e2e22",
"size": "7676",
"binary": false,
"copies": "47",
"ref": "refs/heads/master",
"path": "ansible-modules-extras/identity/opendj/opendj_backendprop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
}
|
import os
import pickle
import evolve_interactive as evolve
import neat
evolve.W = 1000
evolve.H = 1000
with open("genome-4432-586.bin", "rb") as f:
g = pickle.load(f)
print(g)
node_names = {0: 'x', 1: 'y', 2: 'gray'}
# visualize.draw_net(g, view=True, filename="picture-net.gv",
# show_disabled=False, prune_unused=True, node_names=node_names)
# Determine path to configuration file.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'interactive_config_gray')
# Note that we provide the custom stagnation class to the Config constructor.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, evolve.InteractiveStagnation,
config_path)
net = neat.nn.FeedForwardNetwork.create(g, config)
# pb = evolve.PictureBreeder(128, 128, 1500, 1500, 1280, 1024, 'color', 4)
# pb.make_high_resolution(g, config)
names = {-1: 'x0', -2: 'y0', 0: 'gray'}
for node, act_func, agg_func, bias, response, links in net.node_evals:
node_inputs = []
for i, w in links:
input_name = names[i] if i in names else 'node%d' % i
node_inputs.append('%s * %.3f' % (input_name, w))
s = '%s(%s)' % (agg_func.__name__, ', '.join(node_inputs))
node_name = names[node] if node in names else 'node%d' % node
print('%s = %s(%.3f + %.3f * %s)' % (node_name, act_func.__name__, bias, response, s))
|
{
"content_hash": "e60fbdfbab3b0e9db0fb28481da79c36",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 94,
"avg_line_length": 39.02564102564103,
"alnum_prop": 0.5992115637319316,
"repo_name": "CodeReclaimers/neat-python",
"id": "8c77f1f224db15d788b722406f08c7c9e4dd4558",
"size": "1522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/picture2d/render.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "246529"
}
],
"symlink_target": ""
}
|
import re
import sys
import os
from pathlib import Path
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.transforms import Transform
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.util.nodes import set_source_info, process_index_entry
sys.path.append(str(Path(__file__).parent))
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
needs_sphinx = '1.3'
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
# Domain for Emacs Lisp
'elisp',
# Cross-references to info nodes
'info'
]
# Project metadata
project = 'Flycheck'
copyright = ' 2014-2016, Sebastian Wiesner and Flycheck contributors'
author = 'Sebastian Wiesner'
def read_version():
"""Extract version number from ``flycheck.el`` and return it as string."""
version_pattern = re.compile(r'^;;\s*Version:\s+(\d.+)$', re.MULTILINE)
flycheck = Path(__file__).resolve().parent.parent.joinpath('flycheck.el')
with flycheck.open(encoding='utf-8') as source:
match = version_pattern.search(source.read())
if match:
return match.group(1)
else:
raise ValueError('Failed to parse Flycheck version from '
'Version: of flycheck.el')
def read_minimum_emacs_version():
"""Extract minimum Emacs version from ``flycheck.el``."""
version_pattern = re.compile(
r'^;; Package-Requires:.*\(emacs\s*"([^"]+)"\).*$', re.MULTILINE)
flycheck = Path(__file__).resolve().parent.parent.joinpath('flycheck.el')
with flycheck.open(encoding='utf-8') as source:
match = version_pattern.search(source.read())
if match:
return match.group(1)
else:
raise ValueError('Vailed to parse minimum Emacs version from '
'Package-Requires of flycheck.el!')
release = read_version()
version = '.'.join(release.split('.')[:2])
# Source settings
source_suffix = '.rst'
master_doc = 'index'
rst_prolog = """\
.. role:: elisp(code)
:language: elisp
.. |min-emacs| replace:: {emacs_version}
""".format(emacs_version=read_minimum_emacs_version())
# Build settings
exclude_patterns = ['_build']
default_role = 'any'
primary_domain = 'el'
templates_path = ['_templates']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Warn about all undefined references, but exclude references to built-in
# symbols which we don't document here.
# TODO: Resolve built-in symbols to the Emacs Lisp references?
nitpicky = True
nitpick_ignore = [
('any', 'default-directory'),
('any', 'package-initialize'),
('any', 'package-archives'),
('any', 'user-init-file'),
('any', 'user-emacs-directory'),
]
# HTML settings
html_theme = 'alabaster'
html_theme_options = {
'logo': 'logo.png',
'logo_name': False,
'description': 'Syntax checking for GNU Emacs',
'github_user': 'flycheck',
'github_repo': 'flycheck',
'github_type': 'star',
'github_banner': True,
'travis_button': False,
# Google Analytics ID for our documentation. On ReadTheDocs it's set via
# the Admin interface so we'll skip it here.
'analytics_id': 'UA-71100672-2' if not ON_RTD else None,
}
html_sidebars = {
'**': [
'about.html',
'tables.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_static_path = ['_static']
html_favicon = '_static/favicon.ico'
# Ignore localhost when checking links
linkcheck_ignore = [r'http://localhost:\d+/?']
# Cross-reference remote Sphinx sites
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None)
}
extlinks = {
'gh': ('https://github.com/%s', ''),
'flyc': ('https://github.com/flycheck/%s', '')
}
# While still have work to do :)
# FIXME: Remove when the old Texinfo manual is completed ported
todo_include_todos = True
class SupportedLanguage(Directive):
required_arguments = 1
final_argument_whitespace = True
has_content = True
option_spec = {
'index_as': directives.unchanged
}
def run(self):
language = self.arguments[0]
indexed_languages = self.options.get('index_as') or language
index_specs = ['pair: {}; language'.format(l)
for l in indexed_languages.splitlines()]
name = nodes.fully_normalize_name(language)
target = 'language-{}'.format(name)
targetnode = nodes.target('', '', ids=[target])
self.state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = []
indexnode['inline'] = False
set_source_info(self, indexnode)
for spec in index_specs:
indexnode['entries'].extend(process_index_entry(spec, target))
sectionnode = nodes.section()
sectionnode['names'].append(name)
title, messages = self.state.inline_text(language, self.lineno)
titlenode = nodes.title(language, '', *title)
sectionnode += titlenode
sectionnode += messages
self.state.document.note_implicit_target(sectionnode, sectionnode)
self.state.nested_parse(self.content, self.content_offset, sectionnode)
return [indexnode, targetnode, sectionnode]
class SyntaxCheckerConfigurationFile(Directive):
required_arguments = 1
final_argument_whitespace = True
def run(self):
option = self.arguments[0]
wrapper = nodes.paragraph()
docname = self.state.document.settings.env.docname
template = ViewList("""\
.. index:: single: Configuration file; {0}
.. el:defcustom:: {0}
Configuration file for this syntax checker. See
:ref:`flycheck-checker-config-files`.
""".format(option).splitlines(), docname)
self.state.nested_parse(template, self.content_offset, wrapper)
return wrapper.children.copy()
class IssueReferences(Transform):
ISSUE_PATTERN = re.compile(r'\[GH-(\d+)\]')
ISSUE_URL_TEMPLATE = 'https://github.com/flycheck/flycheck/issues/{}'
default_priority = 999
def apply(self):
docname = self.document.settings.env.docname
if docname != 'changes':
# Only transform issue references in changelo
return
for node in self.document.traverse(nodes.Text):
parent = node.parent
new_nodes = []
last_issue_ref_end = 0
text = str(node)
for match in self.ISSUE_PATTERN.finditer(text):
# Extract the text between the last issue reference and the
# current issue reference and put it into a new text node
head = text[last_issue_ref_end:match.start()]
if head:
new_nodes.append(nodes.Text(head))
# Adjust the position of the last issue reference in the
# text
last_issue_ref_end = match.end()
# Extract the issue text and the issue numer
issuetext = match.group(0)
issue_id = match.group(1)
# Turn the issue into a proper reference
refnode = nodes.reference()
refnode['refuri'] = self.ISSUE_URL_TEMPLATE.format(issue_id)
refnode.append(nodes.inline(
issuetext, issuetext, classes=['xref', 'issue']))
new_nodes.append(refnode)
# No issue references were found, move on to the next node
if not new_nodes:
continue
# Extract the remaining text after the last issue reference
tail = text[last_issue_ref_end:]
if tail:
new_nodes.append(nodes.Text(tail))
parent.replace(node, new_nodes)
def build_offline_html(app):
from sphinx.builders.html import StandaloneHTMLBuilder
build_standalone = isinstance(app.builder, StandaloneHTMLBuilder)
if app.config.flycheck_offline_html and build_standalone:
app.info('Building offline documentation without external resources!')
app.builder.theme_options['github_banner'] = 'false'
app.builder.theme_options['github_button'] = 'false'
app.builder.theme_options['analytics_id'] = None
def add_offline_to_context(app, _pagename, _templatename, context, _doctree):
# Expose offline setting in HTML context
context['flycheck_offline_html'] = app.config.flycheck_offline_html
def setup(app):
app.add_object_type('syntax-checker', 'checker',
'pair: %s; Syntax checker')
app.add_directive('supported-language', SupportedLanguage)
app.add_directive('syntax-checker-config-file',
SyntaxCheckerConfigurationFile)
app.add_transform(IssueReferences)
# Build offline HTML that loads no external resources, for use in 3rd party
# packages, see https://github.com/flycheck/flycheck/issues/999
app.add_config_value('flycheck_offline_html', False, 'html')
app.connect('builder-inited', build_offline_html)
app.connect('html-page-context', add_offline_to_context)
|
{
"content_hash": "5de021759eb1f5b0f303fe5abc1d6b49",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 79,
"avg_line_length": 32.98566308243728,
"alnum_prop": 0.6349016625013583,
"repo_name": "sysint64/emacs-config",
"id": "358a8ec759ef019f8c978891964bda5074f0b41b",
"size": "9941",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "flycheck/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "385"
},
{
"name": "C",
"bytes": "211"
},
{
"name": "C++",
"bytes": "380"
},
{
"name": "CSS",
"bytes": "5467"
},
{
"name": "Clojure",
"bytes": "1"
},
{
"name": "CoffeeScript",
"bytes": "165"
},
{
"name": "Coq",
"bytes": "316"
},
{
"name": "D",
"bytes": "3230"
},
{
"name": "Elixir",
"bytes": "18"
},
{
"name": "Emacs Lisp",
"bytes": "2622362"
},
{
"name": "Erlang",
"bytes": "320"
},
{
"name": "Fortran",
"bytes": "408"
},
{
"name": "Go",
"bytes": "833"
},
{
"name": "Groovy",
"bytes": "30"
},
{
"name": "HTML",
"bytes": "2301"
},
{
"name": "Haskell",
"bytes": "1244"
},
{
"name": "JavaScript",
"bytes": "236"
},
{
"name": "Lua",
"bytes": "175"
},
{
"name": "Makefile",
"bytes": "11374"
},
{
"name": "PHP",
"bytes": "672"
},
{
"name": "Perl",
"bytes": "88"
},
{
"name": "PowerShell",
"bytes": "153"
},
{
"name": "Processing",
"bytes": "69"
},
{
"name": "Protocol Buffer",
"bytes": "43"
},
{
"name": "Puppet",
"bytes": "484"
},
{
"name": "Python",
"bytes": "39778"
},
{
"name": "R",
"bytes": "137"
},
{
"name": "Racket",
"bytes": "189"
},
{
"name": "Ruby",
"bytes": "4042"
},
{
"name": "Rust",
"bytes": "829"
},
{
"name": "Scala",
"bytes": "235"
},
{
"name": "Scheme",
"bytes": "79"
},
{
"name": "Shell",
"bytes": "2494"
},
{
"name": "TeX",
"bytes": "114"
},
{
"name": "TypeScript",
"bytes": "78"
},
{
"name": "Verilog",
"bytes": "177"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginsView(ProjectView):
required_scope = 'project:write'
def handle(self, request, organization, project):
if request.POST:
enabled = set(request.POST.getlist('plugin'))
for plugin in plugins.configurable_for_project(project, version=None):
if plugin.slug in enabled:
plugin.enable(project)
else:
plugin.disable(project)
messages.add_message(
request, messages.SUCCESS, _('Your settings were saved successfully.')
)
return self.redirect(request.path)
|
{
"content_hash": "c4dda1a3df61c9bbcaba0e016f74843d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 86,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.6431924882629108,
"repo_name": "looker/sentry",
"id": "0eb33ddea61c54134142ad35a6f96e86f4279b3a",
"size": "852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/project_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
from django.urls import path
from . import views
urlpatterns = [
# Homepage
path("", views.homepage, name="pontoon.homepage"),
]
|
{
"content_hash": "9239694729d73526e1c4f430403205ca",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 17.375,
"alnum_prop": 0.6762589928057554,
"repo_name": "jotes/pontoon",
"id": "2d8ca036aed26c378b769645a8c1d93734cdf1fd",
"size": "139",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pontoon/homepage/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226580"
},
{
"name": "Dockerfile",
"bytes": "2640"
},
{
"name": "FreeMarker",
"bytes": "35248"
},
{
"name": "HTML",
"bytes": "151639"
},
{
"name": "JavaScript",
"bytes": "1332848"
},
{
"name": "Makefile",
"bytes": "3551"
},
{
"name": "Python",
"bytes": "1391398"
},
{
"name": "Shell",
"bytes": "3676"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
from six.moves import xrange
import pydocumentdb.document_client as document_client
import pydocumentdb.documents as documents
import test.test_config as test_config
from pydocumentdb.errors import HTTPFailure
class _config:
host = test_config._test_config.host
master_key = test_config._test_config.masterKey
PARTITION_KEY = 'key'
UNIQUE_PARTITION_KEY = 'uniquePartitionKey'
FIELD = 'field'
TEST_DATABASE_NAME = 'testDb'
DOCUMENTS_COUNT = 400
DOCS_WITH_SAME_PARTITION_KEY = 200
docs_with_numeric_id = 0
sum = 0
class _helper:
@classmethod
def clean_up_database(cls):
client = document_client.DocumentClient(_config.host,
{'masterKey': _config.master_key})
query_iterable = client.QueryDatabases(
'SELECT * FROM root r WHERE r.id=\'{}\''.format(_config.TEST_DATABASE_NAME))
it = iter(query_iterable)
test_db = next(it, None)
if test_db is not None:
client.DeleteDatabase(test_db['_self'])
class AggregateQueryTestSequenceMeta(type):
def __new__(mcs, name, bases, dict):
def _run_one(query, expected_result):
def test(self):
self._execute_query_and_validate_results(mcs.client, mcs.collection_link, query, expected_result)
return test
def _setup():
if (not _config.master_key or not _config.host):
raise Exception(
"You must specify your Azure DocumentDB account values for "
"'masterKey' and 'host' at the top of this class to run the "
"tests.")
_helper.clean_up_database()
mcs.client = document_client.DocumentClient(_config.host,
{'masterKey': _config.master_key})
created_db = mcs.client.CreateDatabase({'id': _config.TEST_DATABASE_NAME})
created_collection = _create_collection(mcs.client, created_db)
mcs.collection_link = _get_collection_link(created_db, created_collection)
# test documents
document_definitions = []
values = [None, False, True, "abc", "cdfg", "opqrs", "ttttttt", "xyz", "oo", "ppp"]
for value in values:
d = {_config.PARTITION_KEY: value}
document_definitions.append(d)
for i in xrange(_config.DOCS_WITH_SAME_PARTITION_KEY):
d = {_config.PARTITION_KEY: _config.UNIQUE_PARTITION_KEY,
'resourceId': i,
_config.FIELD: i + 1}
document_definitions.append(d)
_config.docs_with_numeric_id = \
_config.DOCUMENTS_COUNT - len(values) - _config.DOCS_WITH_SAME_PARTITION_KEY
for i in xrange(_config.docs_with_numeric_id):
d = {_config.PARTITION_KEY: i + 1}
document_definitions.append(d)
_config.sum = _config.docs_with_numeric_id \
* (_config.docs_with_numeric_id + 1) / 2.0
_insert_doc(mcs.collection_link, document_definitions, mcs.client)
def _generate_test_configs():
aggregate_query_format = 'SELECT VALUE {}(r.{}) FROM r WHERE {}'
aggregate_orderby_query_format = 'SELECT VALUE {}(r.{}) FROM r WHERE {} ORDER BY r.{}'
aggregate_configs = [
['AVG', _config.sum / _config.docs_with_numeric_id,
'IS_NUMBER(r.{})'.format(_config.PARTITION_KEY)],
['AVG', None, 'true'],
['COUNT', _config.DOCUMENTS_COUNT, 'true'],
['MAX', 'xyz', 'true'],
['MIN', None, 'true'],
['SUM', _config.sum, 'IS_NUMBER(r.{})'.format(_config.PARTITION_KEY)],
['SUM', None, 'true']
]
for operator, expected, condition in aggregate_configs:
_all_tests.append([
'{} {}'.format(operator, condition),
aggregate_query_format.format(operator, _config.PARTITION_KEY, condition),
expected])
_all_tests.append([
'{} {} OrderBy'.format(operator, condition),
aggregate_orderby_query_format.format(operator, _config.PARTITION_KEY, condition,
_config.PARTITION_KEY),
expected])
aggregate_single_partition_format = 'SELECT VALUE {}(r.{}) FROM r WHERE r.{} = \'{}\''
aggregate_orderby_single_partition_format = 'SELECT {}(r.{}) FROM r WHERE r.{} = \'{}\''
same_partiton_sum = _config.DOCS_WITH_SAME_PARTITION_KEY * (_config.DOCS_WITH_SAME_PARTITION_KEY + 1) / 2.0
aggregate_single_partition_configs = [
['AVG', same_partiton_sum / _config.DOCS_WITH_SAME_PARTITION_KEY],
['COUNT', _config.DOCS_WITH_SAME_PARTITION_KEY],
['MAX', _config.DOCS_WITH_SAME_PARTITION_KEY],
['MIN', 1],
['SUM', same_partiton_sum]
]
for operator, expected in aggregate_single_partition_configs:
_all_tests.append([
'{} SinglePartition {}'.format(operator, 'SELECT VALUE'),
aggregate_single_partition_format.format(
operator, _config.FIELD, _config.PARTITION_KEY, _config.UNIQUE_PARTITION_KEY), expected])
_all_tests.append([
'{} SinglePartition {}'.format(operator, 'SELECT'),
aggregate_orderby_single_partition_format.format(
operator, _config.FIELD, _config.PARTITION_KEY, _config.UNIQUE_PARTITION_KEY),
Exception()])
def _run_all():
for test_name, query, expected_result in _all_tests:
test_name = "test_%s" % test_name
dict[test_name] = _run_one(query, expected_result)
def _create_collection(client, created_db):
collection_definition = {
'id': 'sample collection',
'indexingPolicy': {
'includedPaths': [
{
'path': '/',
'indexes': [
{
'kind': 'Range',
'dataType': 'Number'
},
{
'kind': 'Range',
'dataType': 'String'
}
]
}
]
},
'partitionKey': {
'paths': [
'/{}'.format(_config.PARTITION_KEY)
],
'kind': documents.PartitionKind.Hash
}
}
collection_options = {'offerThroughput': 10100}
created_collection = client.CreateCollection(_get_database_link(created_db),
collection_definition,
collection_options)
return created_collection
def _insert_doc(collection_link, document_definitions, client):
created_docs = []
for d in document_definitions:
created_doc = client.CreateDocument(collection_link, d)
created_docs.append(created_doc)
return created_docs
def _get_database_link(database, is_name_based=True):
if is_name_based:
return 'dbs/' + database['id']
else:
return database['_self']
def _get_collection_link(database, document_collection, is_name_based=True):
if is_name_based:
return _get_database_link(database) + '/colls/' + document_collection['id']
else:
return document_collection['_self']
_all_tests = []
_setup()
_generate_test_configs()
_run_all()
return type.__new__(mcs, name, bases, dict)
class AggregationQueryTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
_helper.clean_up_database()
def _execute_query_and_validate_results(self, client, collection_link, query, expected):
print('Running test with query: ' + query)
# executes the query and validates the results against the expected results
options = {'enableCrossPartitionQuery': 'true'}
result_iterable = client.QueryDocuments(collection_link, query, options)
def _verify_result():
######################################
# test next() behavior
######################################
it = result_iterable.__iter__()
def invokeNext():
return next(it)
# validate that invocations of next() produces the same results as expected
item = invokeNext()
self.assertEqual(item, expected)
# after the result set is exhausted, invoking next must raise a StopIteration exception
self.assertRaises(StopIteration, invokeNext)
######################################
# test fetch_next_block() behavior
######################################
fetched_res = result_iterable.fetch_next_block()
fetched_size = len(fetched_res)
self.assertEqual(fetched_size, 1)
self.assertEqual(fetched_res[0], expected)
# no more results will be returned
self.assertEqual(result_iterable.fetch_next_block(), [])
if isinstance(expected, Exception):
self.assertRaises(HTTPFailure, _verify_result)
else:
_verify_result()
__metaclass__ = AggregateQueryTestSequenceMeta
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "46d0800ab1e421d4d7093facc4d732e2",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 119,
"avg_line_length": 40.62948207171315,
"alnum_prop": 0.5118650715826633,
"repo_name": "rnagpal/azure-documentdb-python",
"id": "c7c033eaeaf00de11e32d64ce248b54c3909347e",
"size": "11320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/aggregate_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "609226"
}
],
"symlink_target": ""
}
|
from .render import CodePygmentsRenderer # noqa
|
{
"content_hash": "d61fb09c3cc9038ccb90b27f3ee25345",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.8163265306122449,
"repo_name": "AddisonSchiller/modular-file-renderer",
"id": "5a0dd791973766b3acf33aac3afeb8a0a1b99977",
"size": "49",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "mfr/extensions/codepygments/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93955"
},
{
"name": "HTML",
"bytes": "28280"
},
{
"name": "Java",
"bytes": "835161"
},
{
"name": "JavaScript",
"bytes": "1238281"
},
{
"name": "Jupyter Notebook",
"bytes": "1202318"
},
{
"name": "Mako",
"bytes": "35815"
},
{
"name": "Python",
"bytes": "233412"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
xwalk_dir = os.path.dirname(os.path.abspath(__file__))
def CopyToPathWithName(root, name, final_path, rename):
if name == '':
return False
origin_path = os.path.join(root, name)
if not os.path.exists(origin_path):
print('Error: \'' + origin_path + '\' not found.')
sys.exit(6)
if not os.path.exists(final_path):
os.makedirs(final_path)
# Get the extension.
# Need to take care of special case, such as 'img.9.png'
name_components = name.split('.')
name_components[0] = rename
new_name = '.'.join(name_components)
final_path_with_name = os.path.join(final_path, new_name)
shutil.copyfile(origin_path, final_path_with_name)
return True
def CopyDrawables(image_dict, orientation, sanitized_name, name, app_root):
drawable = os.path.join(xwalk_dir, sanitized_name, 'res', 'drawable')
if orientation == 'landscape':
drawable = drawable + '-land'
elif orientation == 'portrait':
drawable = drawable + '-port'
drawable_ldpi = drawable + '-ldpi'
drawable_mdpi = drawable + '-mdpi'
drawable_hdpi = drawable + '-hdpi'
drawable_xhdpi = drawable + '-xhdpi'
image_075x = image_dict.get('0.75x', '')
image_1x = image_dict.get('1x', '')
image_15x = image_dict.get('1.5x', '')
image_2x = image_dict.get('2x', '')
# Copy all supported images: 0.75x, 1x, 1.5x, 2x.
has_image = False
if image_075x:
if CopyToPathWithName(app_root, image_075x, drawable_ldpi, name):
has_image = True
if image_1x:
if CopyToPathWithName(app_root, image_1x, drawable_mdpi, name):
has_image = True
if image_15x:
if CopyToPathWithName(app_root, image_15x, drawable_hdpi, name):
has_image = True
if image_2x:
if CopyToPathWithName(app_root, image_2x, drawable_xhdpi, name):
has_image = True
# If no supported images found, find the closest one as 1x.
if not has_image:
closest = ''
delta = sys.maxsize
for(k, v) in image_dict.items():
items = k.split('x')
if len(items) == 2:
float_value = sys.maxsize
try:
float_value = float(items[0])
except ValueError:
continue
if abs(float_value - 1) < delta:
closest = v
if CopyToPathWithName(app_root, closest, drawable_mdpi, name):
delta = float_value
def CustomizeDrawable(image, orientation, sanitized_name, app_root, name):
# Parse the image.
# The format of image: 'image-1x.png [1x], image-75x.png 0.75x,
# image-15x.png 1.5x, image-2x.png 2x'
image_list = image.split(',')
# The first image: 'image-1x.png', the density is not provided.
image_pair_1 = image_list[0].strip()
items = image_pair_1.split(' ')
image_1x = ''
if len(items) == 1:
image_1x = items[0]
image_list.pop(0)
# The dictionary which contains the image pair.
image_dict = {'1x': image_1x}
for image_pair in image_list:
items = image_pair.strip().split(' ')
if len(items) >= 2:
x = items[len(items) - 1]
image_item = items[0]
image_dict[x] = image_item
CopyDrawables(image_dict, orientation, sanitized_name, name, app_root)
def CustomizeForeground(image, orientation, sanitized_name, app_root):
CustomizeDrawable(image, orientation, sanitized_name,
app_root, "launchscreen_img")
def CustomizeBackground(background_color,
background_image,
orientation,
sanitized_name,
app_root):
background_path = os.path.join(xwalk_dir, sanitized_name, 'res',
'drawable', 'launchscreen_bg.xml')
if not os.path.isfile(background_path):
print('Error: launchscreen_bg.xml is missing in the build tool.')
sys.exit(6)
has_background = False
background_file = open(background_path, 'r')
content = background_file.read()
background_file.close()
# Fill the background_color.
if background_color:
content = content.replace('#000000', background_color, 1)
has_background = True
# Fill the background_image.
if background_image:
CustomizeDrawable(background_image, orientation, sanitized_name,
app_root, "launchscreen_bg_img")
# Only set Background Image once for each orientation.
tmp = '<item>\n' \
' <bitmap\n' \
' android:src=\"@drawable/launchscreen_bg_img\"\n' \
' android:tileMode=\"repeat\" />\n' \
'</item>\n'
content = content.replace('<!-- Background Image -->', tmp, 1)
has_background = True
if has_background:
background_file = open(background_path, 'w')
background_file.write(content)
background_file.close()
return has_background
def CustomizeByOrientation(manifest, orientation, sanitized_name, app_root):
background_color = manifest.GetLaunchScreenBackgroundColor(orientation)
background_image = manifest.GetLaunchScreenBackgroundImage(orientation)
image = manifest.GetLaunchScreenImage(orientation)
# Customize background: background_color, background_image.
has_background = CustomizeBackground(background_color, background_image,
orientation, sanitized_name, app_root)
# Customize foreground: image.
CustomizeForeground(image, orientation, sanitized_name, app_root)
return has_background
def CustomizeLaunchScreen(manifest, sanitized_name):
if manifest is None:
return False
app_root = os.path.dirname(manifest.input_path)
default = CustomizeByOrientation(manifest, 'default',
sanitized_name, app_root)
portrait = CustomizeByOrientation(manifest, 'portrait',
sanitized_name, app_root)
landscape = CustomizeByOrientation(manifest, 'landscape',
sanitized_name, app_root)
return default or portrait or landscape
|
{
"content_hash": "33d21beaa9f32d3254fcd836cfdf811e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 77,
"avg_line_length": 35.63253012048193,
"alnum_prop": 0.6417582417582418,
"repo_name": "huningxin/crosswalk",
"id": "719d02fe4495fa915475f9af7d01c7ba5aa4dd29",
"size": "6102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tools/android/customize_launch_screen.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41897"
},
{
"name": "C++",
"bytes": "1728091"
},
{
"name": "CSS",
"bytes": "485"
},
{
"name": "Java",
"bytes": "948712"
},
{
"name": "JavaScript",
"bytes": "64943"
},
{
"name": "Objective-C",
"bytes": "688"
},
{
"name": "Objective-C++",
"bytes": "16628"
},
{
"name": "Python",
"bytes": "241234"
},
{
"name": "Shell",
"bytes": "6329"
}
],
"symlink_target": ""
}
|
import time
import datetime
class FeedTime(object):
'''
The
Attributes:
hour_of_the_day: hour to be fed in 24 hour clock
minute_of_the_day: minute of the hour to be fed
'''
def __init__(self, hour_of_the_day, minute_of_the_day):
self.hour_of_the_day = hour_of_the_day
self.minute_of_the_day = minute_of_the_day
def to_epoch_time(self):
'''
Return time of feed as seconds since epoch
'''
return time.mktime(self.to_datetime().timetuple())
def to_datetime(self):
'''
Return the feed time as a datetime
Note that this will use the current date
'''
feed_time = datetime.time(self.hour_of_the_day, self.minute_of_the_day, 0)
return datetime.datetime.combine(datetime.datetime.today(), feed_time)
def __eq__(self, other):
'''
Override equality check to test against 'time.struct_time'
TODO: fix this type problem here
'''
if type(other) is type(self):
return self.hour_of_the_day == other.hour_of_the_day and self.minute_of_the_day == other.minute_of_the_day
# TODO check for/coerce time/datetime type
# for now assume its a time tuple https://docs.python.org/2/library/time.html#time.struct_time
if self.hour_of_the_day == other[3] and self.minute_of_the_day == other[4]:
return True
else:
return False
def to_display(self):
return "<FeedTime: hour={:d} minute={:d}>".format(self.hour_of_the_day, self.minute_of_the_day)
|
{
"content_hash": "1b3db9df20965d337bf31e479549af4c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 112,
"avg_line_length": 31.76086956521739,
"alnum_prop": 0.6577686516084873,
"repo_name": "rob-murray/cat-bot",
"id": "51bace7c14b846082c964b41fdd6eca0b6c79cf8",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cat_bot/models/feed_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22893"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
import base64
from writers import adml_writer
from writers.admx_writer import AdmxElementType
def GetWriter(config):
'''Factory method for creating ADMLWriter objects for the Chrome OS platform.
See the constructor of TemplateWriter for description of arguments.
'''
return ChromeOSADMLWriter(['chrome_os'], config)
class ChromeOSADMLWriter(adml_writer.ADMLWriter):
''' Class for generating Chrome OS ADML policy templates. It is used by the
PolicyTemplateGenerator to write the ADML file.
'''
# Overridden.
# These ADML files are used to generate GPO for Active Directory managed
# Chrome OS devices.
def IsPolicySupported(self, policy):
return self.IsCrOSManagementSupported(policy, 'active_directory') and \
super(ChromeOSADMLWriter, self).IsPolicySupported(policy)
# Overridden.
def _GetAdmxElementType(self, policy):
return AdmxElementType.GetType(policy, allow_multi_strings=True)
|
{
"content_hash": "62aab1046582fa60c54118d1a6a0c044",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 33.535714285714285,
"alnum_prop": 0.7657082002129926,
"repo_name": "chromium/chromium",
"id": "24b30e8e8833a615b65062afb950733f9b882c54",
"size": "1103",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "components/policy/tools/template_writers/writers/chromeos_adml_writer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Tests of neo.io.blackrockio
"""
# needed for python 3 compatibility
from __future__ import absolute_import
import os
import sys
import re
import warnings
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
from neo import NeuralynxIO, AnalogSignal, SpikeTrain, Event
from neo.test.iotest.common_io_test import BaseTestIO
from neo.core import Segment
class CommonTests(BaseTestIO):
ioclass = NeuralynxIO
files_to_test = []
files_to_download = [
'Cheetah_v5.5.1/original_data/CheetahLogFile.txt',
'Cheetah_v5.5.1/original_data/CheetahLostADRecords.txt',
'Cheetah_v5.5.1/original_data/Events.nev',
'Cheetah_v5.5.1/original_data/STet3a.nse',
'Cheetah_v5.5.1/original_data/STet3b.nse',
'Cheetah_v5.5.1/original_data/Tet3a.ncs',
'Cheetah_v5.5.1/original_data/Tet3b.ncs',
'Cheetah_v5.5.1/plain_data/STet3a.txt',
'Cheetah_v5.5.1/plain_data/STet3b.txt',
'Cheetah_v5.5.1/plain_data/Tet3a.txt',
'Cheetah_v5.5.1/plain_data/Tet3b.txt',
'Cheetah_v5.5.1/plain_data/Events.txt',
'Cheetah_v5.5.1/README.txt',
'Cheetah_v5.7.4/original_data/CSC1.ncs',
'Cheetah_v5.7.4/original_data/CSC2.ncs',
'Cheetah_v5.7.4/original_data/CSC3.ncs',
'Cheetah_v5.7.4/original_data/CSC4.ncs',
'Cheetah_v5.7.4/original_data/CSC5.ncs',
'Cheetah_v5.7.4/original_data/Events.nev',
'Cheetah_v5.7.4/plain_data/CSC1.txt',
'Cheetah_v5.7.4/plain_data/CSC2.txt',
'Cheetah_v5.7.4/plain_data/CSC3.txt',
'Cheetah_v5.7.4/plain_data/CSC4.txt',
'Cheetah_v5.7.4/plain_data/CSC5.txt',
'Cheetah_v5.7.4/plain_data/Events.txt',
'Cheetah_v5.7.4/README.txt']
def setUp(self):
super(CommonTests, self).setUp()
data_dir = os.path.join(self.local_test_dir,
'Cheetah_v{}'.format(self.cheetah_version))
self.sn = os.path.join(data_dir, 'original_data')
self.pd = os.path.join(data_dir, 'plain_data')
if not os.path.exists(self.sn):
raise unittest.SkipTest('data file does not exist:' + self.sn)
class TestCheetah_v551(CommonTests, unittest.TestCase):
cheetah_version = '5.5.1'
def test_read_block(self):
"""Read data in a certain time range into one block"""
t_start, t_stop = 3 * pq.s, 4 * pq.s
nio = NeuralynxIO(self.sn, use_cache='never')
block = nio.read_block(t_starts=[t_start], t_stops=[t_stop])
self.assertEqual(len(nio.parameters_ncs), 2)
self.assertTrue(
{'event_id': 11, 'name': 'Starting Recording', 'nttl': 0} in
nio.parameters_nev['Events.nev']['event_types'])
# Everything put in one segment
self.assertEqual(len(block.segments), 1)
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate.units,
pq.CompoundUnit('32*kHz'))
self.assertEqual(seg.analogsignals[0].t_start, t_start)
self.assertEqual(seg.analogsignals[0].t_stop, t_stop)
self.assertEqual(len(seg.spiketrains), 2)
# Testing different parameter combinations
block = nio.read_block(lazy=True)
self.assertEqual(len(block.segments[0].analogsignals[0]), 0)
self.assertEqual(len(block.segments[0].spiketrains[0]), 0)
block = nio.read_block(cascade=False)
self.assertEqual(len(block.segments), 0)
block = nio.read_block(electrode_list=[0])
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.channel_indexes[-1].units), 1)
block = nio.read_block(t_starts=None, t_stops=None, events=True,
waveforms=True)
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.segments[0].spiketrains), 2)
self.assertEqual(len(block.segments[0].spiketrains[0].waveforms),
len(block.segments[0].spiketrains[0]))
self.assertGreater(len(block.segments[0].events), 0)
self.assertEqual(len(block.channel_indexes[-1].units), 2)
block = nio.read_block(t_starts=[t_start], t_stops=[t_stop],
unit_list=[0], electrode_list=[0])
self.assertEqual(len(block.channel_indexes[-1].units), 1)
block = nio.read_block(t_starts=[t_start], t_stops=[t_stop],
unit_list=False)
self.assertEqual(len(block.channel_indexes[-1].units), 0)
def test_read_segment(self):
"""Read data in a certain time range into one block"""
nio = NeuralynxIO(self.sn, use_cache='never')
seg = nio.read_segment(t_start=None, t_stop=None)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate.units,
pq.CompoundUnit('32*kHz'))
self.assertEqual(len(seg.spiketrains), 2)
# Testing different parameter combinations
seg = nio.read_segment(lazy=True)
self.assertEqual(len(seg.analogsignals[0]), 0)
self.assertEqual(len(seg.spiketrains[0]), 0)
seg = nio.read_segment(cascade=False)
self.assertEqual(len(seg.analogsignals), 0)
self.assertEqual(len(seg.spiketrains), 0)
seg = nio.read_segment(electrode_list=[0])
self.assertEqual(len(seg.analogsignals), 1)
seg = nio.read_segment(t_start=None, t_stop=None, events=True,
waveforms=True)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.spiketrains), 2)
self.assertTrue(len(seg.spiketrains[0].waveforms) > 0)
self.assertTrue(len(seg.events) > 0)
def test_read_ncs_data(self):
t_start, t_stop = 0, 500 * 512 # in samples
nio = NeuralynxIO(self.sn, use_cache='never')
seg = Segment('testsegment')
for el_id, el_dict in nio.parameters_ncs.iteritems():
filepath = nio.parameters_ncs[el_id]['recording_file_name']
filename = filepath.split('/')[-1].split('\\')[-1].split('.')[0]
nio.read_ncs(filename, seg, t_start=t_start, t_stop=t_stop)
anasig = seg.filter({'electrode_id': el_id},
objects=AnalogSignal)[0]
target_data = np.zeros((16679, 512))
with open(self.pd + '/%s.txt' % filename) as datafile:
for i, line in enumerate(datafile):
line = line.strip('\xef\xbb\xbf')
entries = line.split()
target_data[i, :] = np.asarray(entries[4:])
target_data = target_data.reshape((-1, 1)) * el_dict['ADBitVolts']
np.testing.assert_array_equal(target_data[:len(anasig)],
anasig.magnitude)
def test_read_nse_data(self):
t_start, t_stop = None, None # in samples
nio = NeuralynxIO(self.sn, use_cache='never')
seg = Segment('testsegment')
for el_id, el_dict in nio.parameters_nse.iteritems():
filepath = nio.parameters_nse[el_id]['recording_file_name']
filename = filepath.split('/')[-1].split('\\')[-1].split('.')[0]
nio.read_nse(filename, seg, t_start=t_start, t_stop=t_stop,
waveforms=True)
spiketrain = seg.filter({'electrode_id': el_id},
objects=SpikeTrain)[0]
# target_data = np.zeros((500, 32))
# timestamps = np.zeros(500)
entries = []
with open(self.pd + '/%s.txt' % filename) as datafile:
for i, line in enumerate(datafile):
line = line.strip('\xef\xbb\xbf')
entries.append(line.split())
entries = np.asarray(entries, dtype=float)
target_data = entries[:-1, 11:]
timestamps = entries[:-1, 0]
timestamps = (timestamps * pq.microsecond -
nio.parameters_global['t_start'])
np.testing.assert_array_equal(timestamps.magnitude,
spiketrain.magnitude)
np.testing.assert_array_equal(target_data,
spiketrain.waveforms)
def test_read_nev_data(self):
t_start, t_stop = 0 * pq.s, 1000 * pq.s
nio = NeuralynxIO(self.sn, use_cache='never')
seg = Segment('testsegment')
filename = 'Events'
nio.read_nev(filename + '.nev', seg, t_start=t_start, t_stop=t_stop)
timestamps = []
nttls = []
names = []
event_ids = []
with open(self.pd + '/%s.txt' % filename) as datafile:
for i, line in enumerate(datafile):
line = line.strip('\xef\xbb\xbf')
entries = line.split('\t')
nttls.append(int(entries[5]))
timestamps.append(int(entries[3]))
names.append(entries[10].rstrip('\r\n'))
event_ids.append(int(entries[4]))
timestamps = (np.array(timestamps) * pq.microsecond -
nio.parameters_global['t_start'])
# masking only requested spikes
mask = np.where(timestamps < t_stop)[0]
# return if no event fits criteria
if len(mask) == 0:
return
timestamps = timestamps[mask]
nttls = np.asarray(nttls)[mask]
names = np.asarray(names)[mask]
event_ids = np.asarray(event_ids)[mask]
for i in range(len(timestamps)):
events = seg.filter({'nttl': nttls[i]}, objects=Event)
events = [e for e in events
if (e.annotations['marker_id'] == event_ids[i] and
e.labels == names[i])]
self.assertTrue(len(events) == 1)
self.assertTrue(timestamps[i] in events[0].times)
def test_read_ntt_data(self):
pass
# TODO: Implement test_read_ntt_data once ntt files are available
class TestCheetah_v574(TestCheetah_v551, CommonTests, unittest.TestCase):
cheetah_version = '5.7.4'
def test_read_block(self):
"""Read data in a certain time range into one block"""
t_start, t_stop = 3 * pq.s, 4 * pq.s
nio = NeuralynxIO(self.sn, use_cache='never')
block = nio.read_block(t_starts=[t_start], t_stops=[t_stop])
self.assertEqual(len(nio.parameters_ncs), 5)
self.assertTrue(
{'event_id': 19, 'name': 'Starting Recording', 'nttl': 0} in
nio.parameters_nev['Events.nev']['event_types'])
self.assertTrue(
{'event_id': 19, 'name': 'Stopping Recording', 'nttl': 0} in
nio.parameters_nev['Events.nev']['event_types'])
# Everything put in one segment
self.assertEqual(len(block.segments), 1)
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 5)
self.assertEqual(seg.analogsignals[0].sampling_rate.units,
pq.CompoundUnit('32*kHz'))
self.assertAlmostEqual(seg.analogsignals[0].t_start, t_start, places=4)
self.assertAlmostEqual(seg.analogsignals[0].t_stop, t_stop, places=4)
self.assertEqual(len(seg.spiketrains), 0) # no nse files available
# Testing different parameter combinations
block = nio.read_block(lazy=True)
self.assertEqual(len(block.segments[0].analogsignals[0]), 0)
block = nio.read_block(cascade=False)
self.assertEqual(len(block.segments), 0)
block = nio.read_block(electrode_list=[0])
self.assertEqual(len(block.segments[0].analogsignals), 1)
block = nio.read_block(t_starts=None, t_stops=None, events=True,
waveforms=True)
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.segments[0].spiketrains), 0)
self.assertGreater(len(block.segments[0].events), 0)
self.assertEqual(len(block.channel_indexes), 5)
def test_read_segment(self):
"""Read data in a certain time range into one block"""
nio = NeuralynxIO(self.sn, use_cache='never')
seg = nio.read_segment(t_start=None, t_stop=None)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 5)
self.assertEqual(seg.analogsignals[0].sampling_rate.units,
pq.CompoundUnit('32*kHz'))
self.assertEqual(len(seg.spiketrains), 0)
# Testing different parameter combinations
seg = nio.read_segment(lazy=True)
self.assertEqual(len(seg.analogsignals[0]), 0)
self.assertEqual(len(seg.spiketrains), 0)
seg = nio.read_segment(cascade=False)
self.assertEqual(len(seg.analogsignals), 0)
self.assertEqual(len(seg.spiketrains), 0)
seg = nio.read_segment(electrode_list=[0])
self.assertEqual(len(seg.analogsignals), 1)
seg = nio.read_segment(t_start=None, t_stop=None, events=True,
waveforms=True)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.spiketrains), 0)
self.assertTrue(len(seg.events) > 0)
class TestGaps(CommonTests, unittest.TestCase):
cheetah_version = '5.5.1'
def test_gap_handling(self):
nio = NeuralynxIO(self.sn, use_cache='never')
block = nio.read_block(t_starts=None, t_stops=None)
# known gap values
n_gaps = 1
self.assertEqual(len(block.segments), n_gaps + 1)
# one channel index for analogsignals for each of the 3 segments and
# one for spiketrains
self.assertEqual(len(block.channel_indexes), len(block.segments) + 1)
self.assertEqual(len(block.channel_indexes[-1].units), 2)
for unit in block.channel_indexes[-1].units:
self.assertEqual(len(unit.spiketrains), n_gaps + 1)
anasig_channels = [i for i in block.channel_indexes
if 'analogsignal' in i.name]
self.assertEqual(len(anasig_channels), n_gaps + 1)
def test_gap_warning(self):
nio = NeuralynxIO(self.sn, use_cache='never')
with reset_warning_registry():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
nio.read_block(t_starts=None, t_stops=None)
self.assertGreater(len(w), 0)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertEqual('Substituted t_starts and t_stops in order to'
' skip gap in recording session.',
str(w[0].message))
def test_analogsignal_shortening_warning(self):
nio = NeuralynxIO(self.sn, use_cache='never')
with reset_warning_registry():
with warnings.catch_warnings(record=True) as w:
seg = Segment('testsegment')
nio.read_ncs(os.path.join(self.sn, 'Tet3a.ncs'), seg)
self.assertGreater(len(w), 0)
self.assertTrue(issubclass(w[0].category, UserWarning))
self.assertTrue('Analogsignalarray was shortened due to gap in'
' recorded data of file'
in str(w[0].message))
# This class is copied from
# 'http://bugs.python.org/file40031/reset_warning_registry.py' by Eli Collins
# and is related to http://bugs.python.org/issue21724 Python<3.4
class reset_warning_registry(object):
"""
context manager which archives & clears warning registry for duration of
context.
:param pattern:
optional regex pattern, causes manager to only reset modules whose
names match this pattern. defaults to ``".*"``.
"""
#: regexp for filtering which modules are reset
_pattern = None
#: dict mapping module name -> old registry contents
_backup = None
def __init__(self, pattern=None):
self._pattern = re.compile(pattern or ".*")
def __enter__(self):
# archive and clear the __warningregistry__ key for all modules
# that match the 'reset' pattern.
pattern = self._pattern
backup = self._backup = {}
for name, mod in list(sys.modules.items()):
if pattern.match(name):
reg = getattr(mod, "__warningregistry__", None)
if reg:
backup[name] = reg.copy()
reg.clear()
return self
def __exit__(self, *exc_info):
# restore warning registry from backup
modules = sys.modules
backup = self._backup
for name, content in backup.items():
mod = modules.get(name)
if mod is None:
continue
reg = getattr(mod, "__warningregistry__", None)
if reg is None:
setattr(mod, "__warningregistry__", content)
else:
reg.clear()
reg.update(content)
# clear all registry entries that we didn't archive
pattern = self._pattern
for name, mod in list(modules.items()):
if pattern.match(name) and name not in backup:
reg = getattr(mod, "__warningregistry__", None)
if reg:
reg.clear()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7d3a1f50caa396f6642b2b5e1838db45",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 79,
"avg_line_length": 39.2,
"alnum_prop": 0.5830343126261494,
"repo_name": "CINPLA/python-neo",
"id": "ecc54430b013c9f69a8f72c49043f48cacdff239",
"size": "17860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/test/iotest/test_neuralynxio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1685331"
}
],
"symlink_target": ""
}
|
import datetime
from psoc_4m_base_network import *
class PSOC_4M_MOISTURE_UNIT(PSOC_BASE_4M):
def __init__(self,instrument):
self.system_id = 0x201
PSOC_BASE_4M.__init__( self, instrument, self.system_id)
# additional write address definitions definitions
self.check_one_wire_presence_addr = 27
self.make_soil_temperature_addr = 28
self.make_air_temp_humidity_addr = 29
self.force_moisture_reading_addr = 30
self.update_moisture_sensor_configuration_addr = 31
self.update_flash_addr = 33
self.clear_moisture_flag_addr = 34
self.sensor_length = 16
self.new_measurement_flag_start = 20
self.new_measurement_flag_list = [ "NEW_MOISTURE_DATA_FLAG"]
# status
self.status_start = 13
self.status_list = [
"ONE_WIRE_DEVICE_FOUND",
"NEW_MOISTURE_DATA_FLAG"
]
self.moisture_control_start = 15
self.moisture_control_list = [
"AIR_HUMIDITY_FLOAT" ,
"AIR_TEMP_FLOAT",
"MOISTURE_SOIL_TEMP_FLOAT",
"RESISTOR_FLOAT",
]
self.capacitance_mask_start = 23
self.capacitance_mask_list = [ "CAPACITANCE_MASK"]
# Moisture Data
self.moisture_data_start = 30
self.moisture_data_number = 16
self.moisture_data_resistive_start = 70
self.moisture_resistive_configuration_number = 16
# Moisture Configuration Data
self.moisture_configuration_start = 110
self.moisture_configuration_number = 16
#
#
# Read Variables
#
#
def check_status( self, address):
return_value = {}
data = self.instrument.read_registers( address, self.status_start, len(self.status_list) )
for i in range(0,len(self.status_list)):
return_value[ self.status_list[i] ] = data[i]
return return_value
def read_moisture_control(self, address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_control_start, len(self.moisture_control_list) )
for i in range(0,len(self.moisture_control_list)):
return_value[ self.moisture_control_list[i] ] = data[i]
return return_value
def read_moisture_data( self ,address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_data_start ,self.moisture_configuration_number )
return data
def read_moisture_resistive_data( self ,address ):
return_value = {}
data = self.instrument.read_floats( address, self.moisture_data_resistive_start ,self.moisture_resistive_configuration_number )
return data
def read_moisture_configuration( self, address ):
return_value = {}
data = self.instrument.read_registers( address,self.moisture_configuration_start,self.moisture_configuration_number )
return data
def check_one_wire_presence ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.check_one_wire_presence_addr, [0] )
def make_soil_temperature ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.make_soil_temperature_addr, [0] )
def make_air_temp_humidity( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.make_air_temp_humidity_addr, [0] )
def clear_new_moisture_data_flag( self, address):
self.instrument.write_registers( address, self.clear_moisture_flag_addr, [0] )
def force_moisture_reading ( self, address): #sampling rate is in minutes
self.instrument.write_registers(address, self.force_moisture_reading_addr, [0] )
def update_moisture_sensor_configuration ( self,address, sensor_data ): # sensor data consisting of 0,1,2
if len( sensor_data) != self.sensor_length :
raise
valid_data = set([0,1,2])
for i in sensor_data:
if i not in valid_data:
raise
self.instrument.write_registers( address, self.update_moisture_sensor_configuration_addr ,sensor_data )
if __name__ == "__main__":
import new_instrument_network
import time
new_instrument = new_instrument_network.new_instrument_network()
new_instrument.set_ip(ip= "192.168.1.82", port = 5005)
psoc_moisture = PSOC_4M_MOISTURE_UNIT( new_instrument )
#psoc_moisture.update_current_time( 40 )
print psoc_moisture.clear_new_moisture_data_flag(40)
print psoc_moisture.check_status(40)
print psoc_moisture.check_one_wire_presence(40)
time.sleep(.3)
print psoc_moisture.make_soil_temperature(40)
time.sleep(.3)
print psoc_moisture.make_air_temp_humidity(40)
time.sleep(.3)
print psoc_moisture.make_air_temp_humidity(40)
time.sleep(.3)
# test read functions first
print psoc_moisture.check_status(40)
print psoc_moisture.read_moisture_control(40)
#psoc_moisture. update_moisture_sensor_configuration ( 40,[ 2,1,1,0,1,2,1,0,0,0,0,0,0,0,0,0] )
print psoc_moisture.read_moisture_configuration( 40 )
print psoc_moisture.force_moisture_reading(40)
time.sleep(1.)
print psoc_moisture.read_moisture_data(40)
print psoc_moisture.read_moisture_resistive_data(40)
quit()
#print psoc_moisture.force_moisture_reading(40)
time.sleep(1)
print psoc_moisture.read_moisture_resistive_data( 40 )
print psoc_moisture.read_moisture_data(40)
print psoc_moisture.check_status(40)
'''
# test directed actions
#psoc_moisture.check_one_wire_presence(40)
#psoc_moisture.make_soil_temperature(40)
psoc_moisture.force_moisture_reading(40)
'''
'''
print "new_data_flag",psoc_moisture.check_new_data_flag( 40)
print "capacitance_mask", psoc_moisture.read_capacitor_mask(40)
print psoc_moisture.read_moisture_control( 40 )
print psoc_moisture.read_moisture_configuration( 40 )
psoc_moisture.change_capacitance_sensor_mask( 40, 0xf)
psoc_moisture. update_moisture_sensor_configuration ( 40,[ 2,1,1,1,1,0,0,0] )
psoc_moisture.update_flash(40)
print "capacitance_mask", psoc_moisture.read_capacitor_mask(40)
print psoc_moisture.read_moisture_control( 40 )
print psoc_moisture.read_moisture_configuration( 40 )
print "force moisture measurement", psoc_moisture.force_moisture_reading(40)
quit()
print psoc_moisture.read_moisture_data(40)
'''
|
{
"content_hash": "7fd9ae2be0370541c121a52edaccdfd8",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 138,
"avg_line_length": 34.85,
"alnum_prop": 0.5620190426503195,
"repo_name": "glenn-edgar/local_controller_3",
"id": "a4aea303404910b4c00c09233ae8c2af36e5cafc",
"size": "7669",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "io_control_py3/moisture/psoc_4m_moisture_sensor_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2510"
},
{
"name": "CSS",
"bytes": "4575415"
},
{
"name": "HTML",
"bytes": "2215958"
},
{
"name": "JavaScript",
"bytes": "9981211"
},
{
"name": "Makefile",
"bytes": "5136"
},
{
"name": "PHP",
"bytes": "124476"
},
{
"name": "Python",
"bytes": "4396570"
},
{
"name": "Shell",
"bytes": "569"
},
{
"name": "Smalltalk",
"bytes": "252"
},
{
"name": "TeX",
"bytes": "3153"
},
{
"name": "TypeScript",
"bytes": "11006"
}
],
"symlink_target": ""
}
|
import sys
import tkinter as tk
import tkplot
import threading
from queue import Queue, Empty
import serial
import struct
from collections import namedtuple
import time
import csv
import math
def execute_delayed(root, generator):
"""For each yielded value wait the given amount of time (in seconds)
without pausing the Tkinter main loop.
See 'slowmotion' in http://effbot.org/zone/tkinter-generator-polyline.htm
"""
try:
root.after(int(next(generator) * 100), execute_delayed, root, generator)
except StopIteration:
pass
class Status(namedtuple('Status', ['local_time', 'weight_reading'])):
@property
def temp(self):
return self.weight_reading
START_TIME = time.time()
def local_time():
return time.time() - START_TIME
class Arduino:
def __init__(self, port, filename):
self.filename = filename
self.port = port
self.status = Queue()
self.command = Queue()
self.thread = threading.Thread(target=self.interact, daemon=True)
self.started = threading.Event()
self.last_status = None
self._power = None
def line_status(self, line):
weight_reading = float(line.strip())
return Status(local_time(), weight_reading)
def interact(self):
with open(self.filename, 'wb') as f:
self.serial = serial.Serial(self.port, 9600)
try:
self.started.set()
while True:
try:
while True:
None
self.serial.write(self.command.get_nowait())
except Empty:
pass
line = self.serial.readline()
try:
status = self.line_status(line)
except ValueError:
continue
f.write(line)
f.flush()
self.status.put_nowait(status)
self.last_status = status
finally:
self.started.clear()
def iter_status(self):
assert(self.started.is_set())
try:
while True:
status = self.status.get_nowait()
yield status
except Empty:
pass
def __str__(self):
return "<{} {}>".format(self.__class__.__name__, self.last_status if self.started.is_set() else '(stopped)')
@property
def calibrationWeight(self):
assert(self.started.is_set())
return self._power
@calibrationWeight.setter
def calibrationWeight(self, calibrationWeight):
assert(self.started.is_set())
assert(0 <= calibrationWeight <= 2**24)
command = struct.pack('4sc', str.encode(str(int(calibrationWeight))), b'\n')
self.command.put(command)
def start(self):
self.thread.start()
self.started.wait()
class HeatPlot(tkplot.TkPlot):
def __init__(self, root):
tkplot.TkPlot.__init__(self, root, (9, 6))
self.plot = self.figure.add_subplot(111)
self.plot.set_xlabel("Time (s)")
self.plot.set_ylabel("Weight (g)")
self.plot.set_xlim(0, 1)
self.plot.set_ylim(0, 110)
self.weight_reading_line, = self.plot.plot([], [], label="Weight, g", linewidth=3.0)
self.plot.legend(handles=[self.weight_reading_line], bbox_to_anchor=(0.3, 1))
self.figure.tight_layout()
def update(self, status):
time = [s.local_time for s in status]
weight_reading = [s.weight_reading for s in status]
time = time[-200:]
weight_reading = weight_reading[-200:]
if time:
self.plot.set_xlim(min(time), max(time))
self.plot.set_ylim(0, max(110, round(max(weight_reading) / 50.0 + 0.5) * 50 + 10))
self.weight_reading_line.set_xdata(time)
self.weight_reading_line.set_ydata(weight_reading)
self.figure.canvas.draw()
class Krosnis:
def __init__(self, root, port, experiment):
self.root = root
self.root.title("Scales - {}".format(experiment))
self.experiment = experiment
self.update_period = 1.0
self.plot = HeatPlot(self.root)
self.plot.pack(fill=tk.BOTH, expand=1)
self.toolbar = tk.Frame(self.root)
self.toolbar.pack(fill=tk.X)
self.label = tk.Label(self.toolbar)
self.label.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1)
self.power_val = tk.StringVar()
self.power_val.set('0')
self.power = tk.Entry(self.toolbar, textvariable=self.power_val)
self.power.bind('<Return>', self.set_calibrationWeight)
self.power.pack(side=tk.LEFT)
self.power.focus_set()
self.set_power = tk.Button(self.toolbar, text='Set weight', command=self.set_calibrationWeight)
self.set_power.pack(side=tk.LEFT)
# self.setpoint_val = tk.StringVar()
# self.setpoint_val.set('0.0')
# self.setpoint = tk.Entry(self.toolbar, textvariable=self.setpoint_val)
# self.setpoint.bind('<Return>', self.set_setpoint)
# self.setpoint.pack(side=tk.LEFT)
# self.setpoint.focus_set()
# self.set_setpoint = tk.Button(self.toolbar, text='Set temperature', command=self.set_setpoint)
# self.set_setpoint.pack(side=tk.LEFT)
self.arduino = Arduino(port, "experiments/{}_raw.csv".format(experiment))
self.every_status = []
self.th0 = 0
def set_status(self, status):
self.label.config(text=status)
def set_calibrationWeight(self, event=None):
self.arduino.calibrationWeight = float(self.power_val.get())
# def set_setpoint(self, event=None):
# self.arduino.setpoint = float(self.setpoint_val.get())
def start(self):
_self = self
def shell():
self = _self
threading.Thread(target=shell, daemon=True).start()
execute_delayed(self.root, self.sample())
def time_deviation(self):
if self.every_status:
t0 = self.every_status[0].time
t0_local = self.every_status[0].local_time
t_sum = 0
for s in self.every_status:
t_sum += (s.time - t0) - (s.local_time - t0_local)
return t_sum / len(self.every_status)
else:
return 0
def control(self):
pass
def sample(self):
self.arduino.start()
with open("experiments/{}.csv".format(self.experiment), 'w') as f:
csvf = csv.writer(f)
csvf.writerow(Status._fields)
while True:
try:
for s in self.arduino.iter_status():
if self.th0 is None:
self.th0 = s.temp
csvf.writerow(s)
self.set_status(str(s))
self.every_status.append(s)
f.flush()
self.plot.update(self.every_status)
self.control()
yield self.update_period
except Exception as e:
print(e)
def run(port, experiment):
root = tk.Tk()
root.geometry("1000x700")
win = Krosnis(root, port, experiment)
win.start()
tk.mainloop()
if __name__ == "__main__":
run(sys.argv[1], sys.argv[2])
|
{
"content_hash": "1c623990f05ed2f63d220381f3b64cbc",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 116,
"avg_line_length": 32.334782608695654,
"alnum_prop": 0.5600376495898884,
"repo_name": "Miceuz/ScaleHack",
"id": "1482a620e189d6da0b23ac05c42a99e15c432560",
"size": "7461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph/weight-graph.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3962"
},
{
"name": "Python",
"bytes": "9253"
}
],
"symlink_target": ""
}
|
import tkinter as tk
def callback(ev):
print(ev.x, ev.y)
root = tk.Tk()
frame = tk.Frame(root, width=320, height=240)
frame.bind('<Motion>', callback)
frame.pack()
root.mainloop()
|
{
"content_hash": "ac1c16a93693c2a04f22217747dc74e8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 14.538461538461538,
"alnum_prop": 0.6666666666666666,
"repo_name": "Yuvv/LearnTestDemoTempMini",
"id": "099028d6546f75234aa2aa1719d2e77ec90b9efd",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-tkinter/iTkinter/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "154280"
},
{
"name": "C++",
"bytes": "15779"
},
{
"name": "CSS",
"bytes": "13766"
},
{
"name": "HTML",
"bytes": "22273"
},
{
"name": "JavaScript",
"bytes": "17383"
},
{
"name": "PLpgSQL",
"bytes": "1493"
},
{
"name": "Python",
"bytes": "198537"
},
{
"name": "SQLPL",
"bytes": "1960"
},
{
"name": "Shell",
"bytes": "5487"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table('blog_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=255)),
('content', self.gf('markitup.fields.MarkupField')(no_rendered_field=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('is_published', self.gf('django.db.models.fields.BooleanField')(default=True)),
('_content_rendered', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('blog', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table('blog_post')
models = {
'blog.post': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'Post'},
'_content_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('markitup.fields.MarkupField', [], {'no_rendered_field': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['blog']
|
{
"content_hash": "bee486ef226fa7466d6665cdd4cd4d9a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 138,
"avg_line_length": 50.18604651162791,
"alnum_prop": 0.5885078776645042,
"repo_name": "fatiherikli/dbpatterns",
"id": "18e5694f9f8aa424c909bf2a75b29e11396cd90c",
"size": "2182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/dbpatterns/blog/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50678"
},
{
"name": "Elixir",
"bytes": "489"
},
{
"name": "Gherkin",
"bytes": "6360"
},
{
"name": "HTML",
"bytes": "49402"
},
{
"name": "JavaScript",
"bytes": "735986"
},
{
"name": "Python",
"bytes": "106434"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
import optimus.project_specific
|
{
"content_hash": "c799b6554b18fcbbda1caf7f7618b74a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.875,
"repo_name": "PFWhite/optimus",
"id": "781e2472c21d470b1cf28a15b1f177be223bee1b",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimus/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21396"
}
],
"symlink_target": ""
}
|
import datetime
import logging
import os
import random
import sys
import unittest
import mock
import test_env_handlers
import webapp2
import webtest
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import duration_pb2
from test_support import test_case
from components import auth
from components import utils
from components import prpc
from components.prpc import encoding
from proto.api import swarming_pb2 # pylint: disable=no-name-in-module
from proto.api.internal.bb import backend_pb2
from proto.api.internal.bb import common_pb2
from proto.api.internal.bb import launcher_pb2
from proto.config import config_pb2
from server import config
from server import task_queues
from server import task_request
from server import task_result
from server import task_scheduler
from server import pools_config
from server import realms
from server import service_accounts
import handlers_bot
import handlers_prpc
def _decode(raw, dst):
# Skip escaping characters.
assert raw[:5] == ')]}\'\n', raw[:5]
return encoding.get_decoder(encoding.Encoding.JSON)(raw[5:], dst)
def _encode(d):
# Skip escaping characters.
raw = encoding.get_encoder(encoding.Encoding.JSON)(d)
assert raw[:5] == ')]}\'\n', raw[:5]
return raw[5:]
class TaskBackendAPIServiceTest(test_env_handlers.AppTestBase):
# These test fail with 'Unknown bot ID, not in config'
# Need to run in sequential_test_runner.py
no_run = 1
def setUp(self):
super(TaskBackendAPIServiceTest, self).setUp()
s = prpc.Server()
s.add_interceptor(auth.prpc_interceptor)
s.add_service(handlers_prpc.TaskBackendAPIService())
# TODO(crbug/1236848) call handlers_prpc.get_routes() when
# the Backend is ready and added.
routes = s.get_routes() + handlers_bot.get_routes()
self.app = webtest.TestApp(
webapp2.WSGIApplication(routes + handlers_bot.get_routes(), debug=True),
extra_environ={
'REMOTE_ADDR': self.source_ip,
'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
},
)
self._headers = {
'Content-Type': encoding.Encoding.JSON[1],
'Accept': encoding.Encoding.JSON[1],
}
now = datetime.datetime(2019, 1, 2, 3)
test_case.mock_now(self, now, 0)
self.mock_tq_tasks()
# Test helpers.
def _req_dim_prpc(self, key, value, exp_secs=None):
# type: (str, str, Optional[int]) -> common_pb2.RequestedDimension
dim = common_pb2.RequestedDimension(key=key, value=value)
if exp_secs is not None:
dim.expiration.seconds = exp_secs
return dim
def _basic_run_task_request(self):
return backend_pb2.RunTaskRequest(
secrets=launcher_pb2.BuildSecrets(build_token='tok'),
realm='some:realm',
build_id='42423',
agent_args=['-fantasia', 'pegasus'],
backend_config=struct_pb2.Struct(
fields={
'priority':
struct_pb2.Value(number_value=1),
'wait_for_capacity':
struct_pb2.Value(bool_value=True),
'bot_ping_tolerance':
struct_pb2.Value(number_value=70),
'service_account':
struct_pb2.Value(string_value='who@serviceaccount.com'),
'agent_binary_cipd_filename':
struct_pb2.Value(string_value='agent'),
'agent_binary_cipd_pkg':
struct_pb2.Value(string_value='agent/package/${platform}'),
'agent_binary_cipd_vers':
struct_pb2.Value(string_value='latest'),
}),
grace_period=duration_pb2.Duration(seconds=60),
execution_timeout=duration_pb2.Duration(seconds=60),
start_deadline=timestamp_pb2.Timestamp(
seconds=int(utils.time_time() + 120)),
dimensions=[self._req_dim_prpc('pool', 'default')],
backend_token='token-token-token',
buildbucket_host='cow-buildbucket.appspot.com',
)
# Tests
def test_run_task(self):
self.set_as_user()
# Mocks for process_task_requests()
# adds pool configs for which user is a `scheduling_user`
self.mock_default_pool_acl([])
self.mock(realms, 'check_tasks_create_in_realm', lambda *_: True)
self.mock(realms, 'check_pools_create_task', lambda *_: True)
self.mock(realms, 'check_tasks_act_as', lambda *_: True)
self.mock(service_accounts, 'has_token_server', lambda: True)
request = self._basic_run_task_request()
request_id = 'cf60878f-8f2a-4f1e-b1f5-8b5ec88813a9'
request.request_id = request_id
self.app.post('/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request), self._headers)
self.assertEqual(1, task_request.TaskRequest.query().count())
self.assertEqual(1, task_request.BuildToken.query().count())
self.assertEqual(1, task_request.SecretBytes.query().count())
request_idempotency_key = 'request_id/%s/%s' % (
request_id, auth.get_current_identity().to_bytes())
self.assertIsNotNone(
memcache.get(request_idempotency_key, namespace='backend_run_task'))
# Test requests are correctly deduped if `request_id` matches.
self.app.post('/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request), self._headers)
self.assertEqual(1, task_request.TaskRequest.query().count())
self.assertEqual(1, task_request.BuildToken.query().count())
self.assertEqual(1, task_request.SecretBytes.query().count())
# Test tasks with different `request_id`s are not deduped.
request.request_id = 'cf60878f-8f2a-4f1e-b1f5-8b5ec88813a8'
self.app.post('/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request), self._headers)
self.assertEqual(2, task_request.TaskRequest.query().count())
self.assertEqual(2, task_request.BuildToken.query().count())
self.assertEqual(2, task_request.SecretBytes.query().count())
def test_run_task_exceptions_bad_conversion(self):
self.set_as_user()
request = backend_pb2.RunTaskRequest()
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertIn(('X-Prpc-Grpc-Code', '3'), raw_resp._headerlist)
self.assertIn('must be a valid package', raw_resp.body)
def test_run_task_exceptions_schedule_request_error(self):
self.set_as_user()
# Mocks for process_task_requests()
# adds pool configs for which user is a `scheduling_user`
self.mock_default_pool_acl([])
self.mock(realms, 'check_tasks_create_in_realm', lambda *_: True)
self.mock(realms, 'check_pools_create_task', lambda *_: True)
self.mock(realms, 'check_tasks_act_as', lambda *_: True)
self.mock(service_accounts, 'has_token_server', lambda: True)
# pylint: disable=unused-argument
def mocked_schedule_request(_,
start_time=0,
secret_bytes=None,
build_token=None):
raise TypeError('chicken')
self.mock(task_scheduler, 'schedule_request', mocked_schedule_request)
request = self._basic_run_task_request()
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertIn(('X-Prpc-Grpc-Code', '3'), raw_resp._headerlist)
self.assertEqual(raw_resp.body, 'chicken')
def test_run_task_exceptions_auth_error(self):
request = self._basic_run_task_request()
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/RunTask',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '403 Forbidden')
self.assertIn(('X-Prpc-Grpc-Code', '7'), raw_resp._headerlist)
self.assertEqual(raw_resp.body, 'User cannot create tasks.')
@mock.patch('components.utils.enqueue_task')
def test_cancel_tasks(self, mocked_enqueue_task):
self.mock_default_pool_acl([])
mocked_enqueue_task.return_value = True
# Create bot
self.set_as_bot()
self.bot_poll()
# Create two tasks, one COMPLETED, one PENDING
# first request
self.set_as_user()
_, first_id = self.client_create_task_raw(
name='first',
tags=['project:yay', 'commit:post'],
properties=dict(idempotent=True))
self.set_as_bot()
self.bot_run_task()
# second request
self.set_as_user()
_, second_id = self.client_create_task_raw(
name='second',
user='jack@localhost',
tags=['project:yay', 'commit:pre'])
request = backend_pb2.CancelTasksRequest(task_ids=[
backend_pb2.TaskID(id=str(first_id)),
backend_pb2.TaskID(id=str(second_id)),
backend_pb2.TaskID(id='1d69b9f088008810'), # Does not exist.
])
target = 'swarming://%s' % app_identity.get_application_id()
expected_response = backend_pb2.CancelTasksResponse(tasks=[
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id=first_id),
status=common_pb2.SUCCESS),
# Task to cancel this should be enqueued
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id=second_id),
status=common_pb2.SCHEDULED),
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id='1d69b9f088008810'),
summary_html='Swarming task 1d69b9f088008810 not found',
status=common_pb2.INFRA_FAILURE),
])
self.mock_auth_db([auth.Permission('swarming.pools.cancelTask')])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/CancelTasks',
_encode(request),
self._headers,
expect_errors=False)
resp = backend_pb2.CancelTasksResponse()
_decode(raw_resp.body, resp)
self.assertEqual(resp, expected_response)
utils.enqueue_task.assert_has_calls([
mock.call(
'/internal/taskqueue/important/tasks/cancel-children-tasks',
'cancel-children-tasks',
payload=mock.ANY),
mock.call(
'/internal/taskqueue/important/tasks/cancel',
'cancel-tasks',
payload='{"kill_running": true, "tasks": ["%s"]}' % second_id),
])
def test_cancel_tasks_permission_denied(self):
self.mock_default_pool_acl([])
# Create task
self.set_as_user()
_, first_id = self.client_create_task_raw(
name='first',
tags=['project:yay', 'commit:post'],
properties=dict(idempotent=True))
request = backend_pb2.CancelTasksRequest(task_ids=[
backend_pb2.TaskID(id=str(first_id)),
])
self.mock_auth_db([])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/CancelTasks',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '403 Forbidden')
self.assertIn(('X-Prpc-Grpc-Code', '7'), raw_resp._headerlist)
self.assertEqual(raw_resp.body, ('user "user@example.com" does not have '
'permission "swarming.pools.cancelTask"'))
@mock.patch('handlers_prpc._CANCEL_TASKS_LIMIT', 2)
def test_cancel_tasks_too_many(self):
self.set_as_user()
request = backend_pb2.CancelTasksRequest(task_ids=[
backend_pb2.TaskID(id=str("1")),
backend_pb2.TaskID(id=str("2")),
backend_pb2.TaskID(id=str("3")),
])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/CancelTasks',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertIn(('X-Prpc-Grpc-Code', '3'), raw_resp._headerlist)
self.assertEqual(raw_resp.body, ('Requesting 3 tasks for cancellation '
'when the allowed max is 2.'))
def test_fetch_tasks(self):
self.mock_default_pool_acl([])
# Create bot
self.set_as_bot()
self.bot_poll()
# Create two tasks, one COMPLETED, one PENDING
self.set_as_user()
# first request
_, first_id = self.client_create_task_raw(
name='first',
tags=['project:yay', 'commit:post'],
properties=dict(idempotent=True))
self.set_as_bot()
self.bot_run_task()
# Clear cache to test fetching from datastore path.
ndb.get_context().clear_cache()
memcache.flush_all()
# second request
self.set_as_user()
_, second_id = self.client_create_task_raw(
name='second',
user='jack@localhost',
tags=['project:yay', 'commit:pre'])
request = backend_pb2.FetchTasksRequest(task_ids=[
backend_pb2.TaskID(id=str(first_id)),
backend_pb2.TaskID(id=str(second_id)),
backend_pb2.TaskID(id='1d69b9f088008810'), # Does not exist.
])
target = 'swarming://%s' % app_identity.get_application_id()
expected_response = backend_pb2.FetchTasksResponse(tasks=[
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id=first_id),
status=common_pb2.SUCCESS),
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id=second_id),
status=common_pb2.SCHEDULED),
backend_pb2.Task(
id=backend_pb2.TaskID(target=target, id='1d69b9f088008810'),
summary_html='Swarming task 1d69b9f088008810 not found',
status=common_pb2.INFRA_FAILURE),
])
self.mock_auth_db([auth.Permission('swarming.pools.listTasks')])
raw_resp = self.app.post('/prpc/swarming.backend.TaskBackend/FetchTasks',
_encode(request), self._headers)
resp = backend_pb2.FetchTasksResponse()
_decode(raw_resp.body, resp)
self.assertEqual(resp, expected_response)
def test_fetch_tasks_forbidden(self):
self.mock_default_pool_acl([])
# Create task
self.set_as_user()
# first request
_, first_id = self.client_create_task_raw(
name='first',
tags=['project:yay', 'commit:post'],
properties=dict(idempotent=True))
request = backend_pb2.FetchTasksRequest(task_ids=[
backend_pb2.TaskID(id=str(first_id)),
])
self.mock_auth_db([])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/FetchTasks',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '403 Forbidden')
self.assertIn(('X-Prpc-Grpc-Code', '7'), raw_resp._headerlist)
self.assertEqual(raw_resp.body, ('user "user@example.com" does not have '
'permission "swarming.pools.listTasks"'))
@mock.patch('handlers_prpc._FETCH_TASKS_LIMIT', 2)
def test_fetch_tasks_too_many(self):
self.set_as_user()
request = backend_pb2.FetchTasksRequest(task_ids=[
backend_pb2.TaskID(id='1'),
backend_pb2.TaskID(id='2'),
backend_pb2.TaskID(id='3'),
])
self.mock_auth_db([])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/FetchTasks',
_encode(request),
self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertIn(('X-Prpc-Grpc-Code', '3'), raw_resp._headerlist)
self.assertEqual(raw_resp.body,
'Requesting 3 tasks when the allowed max is 2.')
def test_validate_configs(self):
request = backend_pb2.ValidateConfigsRequest(
configs=[
backend_pb2.ValidateConfigsRequest.ConfigContext(
target='the-one-with-bad-values',
config_json=struct_pb2.Struct(fields={
'priority':
struct_pb2.Value(
number_value=task_request.MAXIMUM_PRIORITY + 1),
'bot_ping_tolerance':
struct_pb2.Value(
number_value=\
task_request._MAX_BOT_PING_TOLERANCE_SECS),
'service_account':
struct_pb2.Value(string_value='bot'),
'agent_binary_cipd_filename':
struct_pb2.Value(string_value='agent'),
'agent_binary_cipd_pkg':
struct_pb2.Value(
string_value='agent/package/${platform}'),
'agent_binary_cipd_vers':
struct_pb2.Value(string_value='3'),
})),
])
raw_resp = self.app.post(
'/prpc/swarming.backend.TaskBackend/ValidateConfigs',
_encode(request), self._headers)
resp = backend_pb2.ValidateConfigsResponse()
_decode(raw_resp.body, resp)
self.assertEqual(
resp,
backend_pb2.ValidateConfigsResponse(
config_errors=[
backend_pb2.ValidateConfigsResponse.ErrorDetail(
index=0,
error="priority (256) must be between 0 and 255 (inclusive)")]
))
class PRPCTest(test_env_handlers.AppTestBase):
# These test fail with 'Unknown bot ID, not in config'
# Need to run in sequential_test_runner.py
no_run = 1
"""Tests the pRPC handlers."""
def setUp(self):
super(PRPCTest, self).setUp()
# handlers_bot is necessary to run fake tasks.
routes = handlers_prpc.get_routes() + handlers_bot.get_routes()
self.app = webtest.TestApp(
webapp2.WSGIApplication(routes, debug=True),
extra_environ={
'REMOTE_ADDR': self.source_ip,
'SERVER_SOFTWARE': os.environ['SERVER_SOFTWARE'],
},
)
self._headers = {
'Content-Type': encoding.Encoding.JSON[1],
'Accept': encoding.Encoding.JSON[1],
}
self.now = datetime.datetime(2010, 1, 2, 3, 4, 5)
self.mock_now(self.now)
self.mock_default_pool_acl([])
self.mock_tq_tasks()
def _test_bot_events_simple(self, request):
self.set_as_bot()
self.do_handshake()
self.set_as_user()
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(request), self._headers)
expected = swarming_pb2.BotEventsResponse(
events=[
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401445),
bot=swarming_pb2.Bot(
bot_id='bot1',
pools=['default'],
info=swarming_pb2.BotInfo(
supplemental=struct_pb2.Struct(
fields={
'running_time': struct_pb2.Value(number_value=1234.0),
'sleep_streak': struct_pb2.Value(number_value=0),
'started_ts': struct_pb2.Value(number_value=1410990411.11),
}),
external_ip='192.168.2.2',
authenticated_as='bot:whitelisted-ip',
version='123',
),
dimensions=[
swarming_pb2.StringListPair(key='id', values=['bot1']),
swarming_pb2.StringListPair(key='os', values=['Amiga']),
swarming_pb2.StringListPair(key='pool', values=['default']),
]),
event=swarming_pb2.BOT_NEW_SESSION,
),
])
resp = swarming_pb2.BotEventsResponse()
_decode(raw_resp.body, resp)
self.assertEqual(unicode(expected), unicode(resp))
def test_botevents_empty(self):
# Minimum request, all optional fields left out.
self._test_bot_events_simple(swarming_pb2.BotEventsRequest(bot_id=u'bot1'))
def test_botevents_empty_time(self):
msg = swarming_pb2.BotEventsRequest(bot_id=u'bot1')
msg.start_time.FromDatetime(self.now)
msg.end_time.FromDatetime(self.now + datetime.timedelta(seconds=1))
self._test_bot_events_simple(msg)
def test_botevents_missing(self):
# No such bot.
msg = swarming_pb2.BotEventsRequest(bot_id=u'unknown')
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '404 Not Found')
self.assertEqual(raw_resp.body, 'Bot does not exist')
def test_botevents_invalid_page_size(self):
msg = swarming_pb2.BotEventsRequest(bot_id=u'bot1', page_size=-1)
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertEqual(raw_resp.body, 'page_size must be positive')
def test_botevents_invalid_bot_id(self):
# Missing bot_id
msg = swarming_pb2.BotEventsRequest()
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertEqual(raw_resp.body, 'specify bot_id')
def test_botevents_start_end(self):
msg = swarming_pb2.BotEventsRequest(bot_id=u'bot1')
msg.start_time.FromDatetime(self.now)
msg.end_time.FromDatetime(self.now)
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers,
expect_errors=True)
self.assertEqual(raw_resp.status, '400 Bad Request')
self.assertEqual(raw_resp.body, 'start_time must be before end_time')
def test_botevents(self):
# Run one task.
self.mock(random, 'getrandbits', lambda _: 0x88)
self.set_as_bot()
self.mock_now(self.now, 0)
params = self.do_handshake()
self.mock_now(self.now, 30)
self.bot_poll(params=params)
self.set_as_user()
now_60 = self.mock_now(self.now, 60)
self.client_create_task_raw()
self.set_as_bot()
self.mock_now(self.now, 120)
res = self.bot_poll(params=params)
now_180 = self.mock_now(self.now, 180)
response = self.bot_complete_task(task_id=res['manifest']['task_id'])
self.assertEqual({u'must_stop': False, u'ok': True}, response)
self.mock_now(self.now, 240)
params['event'] = 'bot_rebooting'
params['message'] = 'for the best'
# TODO(maruel): https://crbug.com/913953
response = self.post_json('/swarming/api/v1/bot/event', params)
self.assertEqual({}, response)
# Do not filter by time.
self.set_as_privileged_user()
msg = swarming_pb2.BotEventsRequest(bot_id=u'bot1', page_size=1001)
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers)
resp = swarming_pb2.BotEventsResponse()
_decode(raw_resp.body, resp)
dimensions = [
swarming_pb2.StringListPair(key='id', values=['bot1']),
swarming_pb2.StringListPair(key='os', values=['Amiga']),
swarming_pb2.StringListPair(key='pool', values=['default']),
]
common_info = swarming_pb2.BotInfo(
supplemental=struct_pb2.Struct(
fields={
'bot_group_cfg_version': struct_pb2.Value(string_value='default'),
'running_time': struct_pb2.Value(number_value=1234.0),
'sleep_streak': struct_pb2.Value(number_value=0),
'started_ts': struct_pb2.Value(number_value=1410990411.11),
}),
external_ip='192.168.2.2',
authenticated_as='bot:whitelisted-ip',
version=self.bot_version,
)
events = [
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401685),
bot=swarming_pb2.Bot(bot_id='bot1',
pools=[u'default'],
info=common_info,
dimensions=dimensions),
event=swarming_pb2.BOT_REBOOTING_HOST,
event_msg='for the best',
),
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401625),
bot=swarming_pb2.Bot(bot_id='bot1',
pools=[u'default'],
status=swarming_pb2.BUSY,
current_task_id='5cfcee8008811',
info=common_info,
dimensions=dimensions),
event=swarming_pb2.TASK_COMPLETED,
),
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401565),
bot=swarming_pb2.Bot(bot_id='bot1',
pools=[u'default'],
current_task_id='5cfcee8008811',
status=swarming_pb2.BUSY,
info=common_info,
dimensions=dimensions),
event=swarming_pb2.INSTRUCT_START_TASK,
),
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401475),
bot=swarming_pb2.Bot(bot_id='bot1',
pools=[u'default'],
status=swarming_pb2.IDLE,
info=common_info,
dimensions=dimensions),
event=swarming_pb2.INSTRUCT_IDLE,
),
swarming_pb2.BotEvent(
event_time=timestamp_pb2.Timestamp(seconds=1262401445),
bot=swarming_pb2.Bot(
bot_id='bot1',
pools=[u'default'],
info=swarming_pb2.BotInfo(
supplemental=struct_pb2.Struct(
fields={
'running_time':
struct_pb2.Value(number_value=1234.0),
'sleep_streak':
struct_pb2.Value(number_value=0),
'started_ts':
struct_pb2.Value(number_value=1410990411.11),
}),
external_ip='192.168.2.2',
authenticated_as='bot:whitelisted-ip',
version='123',
),
dimensions=dimensions),
event=swarming_pb2.BOT_NEW_SESSION,
),
]
self.assertEqual(len(events), len(resp.events))
for i, event in enumerate(events):
self.assertEqual(unicode(event), unicode(resp.events[i]))
# Now test with a subset. It will retrieve events 1 and 2.
msg = swarming_pb2.BotEventsRequest(bot_id=u'bot1')
msg.start_time.FromDatetime(now_60)
msg.end_time.FromDatetime(now_180 + datetime.timedelta(seconds=1))
raw_resp = self.app.post(
'/prpc/swarming.v1.BotAPI/Events', _encode(msg), self._headers)
resp = swarming_pb2.BotEventsResponse()
_decode(raw_resp.body, resp)
self.assertEqual(
unicode(swarming_pb2.BotEventsResponse(events=events[1:3])),
unicode(resp))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.FATAL)
unittest.main()
|
{
"content_hash": "1b1c956413be37e2c6f4a50071bdf747",
"timestamp": "",
"source": "github",
"line_count": 724,
"max_line_length": 80,
"avg_line_length": 37.38259668508287,
"alnum_prop": 0.6094956585996675,
"repo_name": "luci/luci-py",
"id": "c450636a3262325331a4fd43249c578b9cefd299",
"size": "27262",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "appengine/swarming/handlers_prpc_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('marketing', '0009_vehicle_ad_type'),
]
operations = [
migrations.AddField(
model_name='lead',
name='flight',
field=models.ForeignKey(default=True, blank=True, to='marketing.Flight'),
),
]
|
{
"content_hash": "c9c3225b2188d9632ce5b3c079fd370a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6004901960784313,
"repo_name": "edilio/dental",
"id": "c8be52b9caab476b12c4a75129d9f54ca9d75022",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marketing/migrations/0010_lead_flight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15255"
},
{
"name": "Python",
"bytes": "37248"
}
],
"symlink_target": ""
}
|
"""The tests for the REST binary sensor platform."""
import asyncio
from os import path
from unittest.mock import patch
import httpx
import respx
from homeassistant import config as hass_config
import homeassistant.components.binary_sensor as binary_sensor
from homeassistant.const import (
ATTR_ENTITY_ID,
CONTENT_TYPE_JSON,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
async def test_setup_missing_basic_config(hass):
"""Test setup with configuration missing required entries."""
assert await async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": "rest"}}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "localhost",
"method": "GET",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
@respx.mock
async def test_setup_failed_connect(hass):
"""Test setup when connection error occurs."""
respx.get("http://localhost").mock(side_effect=httpx.RequestError)
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
@respx.mock
async def test_setup_timeout(hass):
"""Test setup when connection timeout occurs."""
respx.get("http://localhost").mock(side_effect=asyncio.TimeoutError())
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "localhost",
"method": "GET",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
@respx.mock
async def test_setup_minimum(hass):
"""Test setup with minimum configuration."""
respx.get("http://localhost") % 200
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
@respx.mock
async def test_setup_minimum_resource_template(hass):
"""Test setup with minimum configuration (resource_template)."""
respx.get("http://localhost") % 200
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource_template": "{% set url = 'http://localhost' %}{{ url }}",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
@respx.mock
async def test_setup_duplicate_resource_template(hass):
"""Test setup with duplicate resources."""
respx.get("http://localhost") % 200
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"resource_template": "http://localhost",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
@respx.mock
async def test_setup_get(hass):
"""Test setup with valid configuration."""
respx.get("http://localhost").respond(status_code=200, json={})
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.key }}",
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
"authentication": "basic",
"username": "my username",
"password": "my password",
"headers": {"Accept": CONTENT_TYPE_JSON},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
@respx.mock
async def test_setup_get_digest_auth(hass):
"""Test setup with valid configuration."""
respx.get("http://localhost").respond(status_code=200, json={})
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.key }}",
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
"authentication": "digest",
"username": "my username",
"password": "my password",
"headers": {"Accept": CONTENT_TYPE_JSON},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
@respx.mock
async def test_setup_post(hass):
"""Test setup with valid configuration."""
respx.post("http://localhost").respond(status_code=200, json={})
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "POST",
"value_template": "{{ value_json.key }}",
"payload": '{ "device": "toaster"}',
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
"authentication": "basic",
"username": "my username",
"password": "my password",
"headers": {"Accept": CONTENT_TYPE_JSON},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
@respx.mock
async def test_setup_get_off(hass):
"""Test setup with valid off configuration."""
respx.get("http://localhost").respond(
status_code=200,
headers={"content-type": "text/json"},
json={"dog": False},
)
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.dog }}",
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
state = hass.states.get("binary_sensor.foo")
assert state.state == STATE_OFF
@respx.mock
async def test_setup_get_on(hass):
"""Test setup with valid on configuration."""
respx.get("http://localhost").respond(
status_code=200,
headers={"content-type": "text/json"},
json={"dog": True},
)
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.dog }}",
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
state = hass.states.get("binary_sensor.foo")
assert state.state == STATE_ON
@respx.mock
async def test_setup_with_exception(hass):
"""Test setup with exception."""
respx.get("http://localhost").respond(status_code=200, json={})
assert await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"value_template": "{{ value_json.dog }}",
"name": "foo",
"verify_ssl": "true",
"timeout": 30,
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
state = hass.states.get("binary_sensor.foo")
assert state.state == STATE_OFF
await async_setup_component(hass, "homeassistant", {})
await hass.async_block_till_done()
respx.clear()
respx.get("http://localhost").mock(side_effect=httpx.RequestError)
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["binary_sensor.foo"]},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.foo")
assert state.state == STATE_UNAVAILABLE
@respx.mock
async def test_reload(hass):
"""Verify we can reload reset sensors."""
respx.get("http://localhost") % 200
await async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "rest",
"method": "GET",
"name": "mockrest",
"resource": "http://localhost",
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.mockrest")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"rest/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"rest",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mockreset") is None
assert hass.states.get("binary_sensor.rollout")
@respx.mock
async def test_setup_query_params(hass):
"""Test setup with query params."""
respx.get("http://localhost", params={"search": "something"}) % 200
assert await async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "rest",
"resource": "http://localhost",
"method": "GET",
"params": {"search": "something"},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
{
"content_hash": "9208234a603deaa2f2930a430bb6a1a6",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 83,
"avg_line_length": 28.573604060913706,
"alnum_prop": 0.5280689287617694,
"repo_name": "partofthething/home-assistant",
"id": "9adb04ea40c2e6e37c7bfe391f3878c7d0d7af91",
"size": "11258",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/rest/test_binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
"""Main test class for binreconfiguration"""
|
{
"content_hash": "cdb66125fb0826008417ccb45bfecd11",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 44,
"alnum_prop": 0.7727272727272727,
"repo_name": "vialette/binreconfiguration",
"id": "e74509d6b781fb64b06f3faaf3c1c5a6f22c0167",
"size": "44",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bin_reconfiguration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7466"
},
{
"name": "Python",
"bytes": "79670"
}
],
"symlink_target": ""
}
|
from __future__ import division
from bisect import bisect_left, bisect_right
import statistics
from .utils import cached_property
class Stats(object):
fields = (
"min", "max", "mean", "stddev", "rounds", "median", "iqr", "q1", "q3", "iqr_outliers", "stddev_outliers",
"outliers", "ld15iqr", "hd15iqr"
)
def __init__(self):
self.data = []
def __bool__(self):
return bool(self.data)
def __nonzero__(self):
return bool(self.data)
@cached_property
def as_dict(self):
return dict(
(field, getattr(self, field))
for field in self.fields
)
def update(self, duration):
self.data.append(duration)
@cached_property
def sorted_data(self):
return sorted(self.data)
@cached_property
def total(self):
return sum(self.data)
@cached_property
def min(self):
return min(self.data)
@cached_property
def max(self):
return max(self.data)
@cached_property
def mean(self):
return statistics.mean(self.data)
@cached_property
def stddev(self):
if len(self.data) > 1:
return statistics.stdev(self.data)
else:
return 0
@property
def stddev_outliers(self):
"""
Count of StdDev outliers: what's beyond (Mean - StdDev, Mean - StdDev)
"""
count = 0
q0 = self.mean - self.stddev
q4 = self.mean + self.stddev
for val in self.data:
if val < q0 or val > q4:
count += 1
return count
@cached_property
def rounds(self):
return len(self.data)
@cached_property
def median(self):
return statistics.median(self.data)
@cached_property
def ld15iqr(self):
"""
Tukey-style Lowest Datum within 1.5 IQR under Q1.
"""
if len(self.data) == 1:
return self.data[0]
else:
return self.sorted_data[bisect_left(self.sorted_data, self.q1 - 1.5 * self.iqr)]
@cached_property
def hd15iqr(self):
"""
Tukey-style Highest Datum within 1.5 IQR over Q3.
"""
if len(self.data) == 1:
return self.data[0]
else:
pos = bisect_right(self.sorted_data, self.q3 + 1.5 * self.iqr)
if pos == len(self.data):
return self.sorted_data[-1]
else:
return self.sorted_data[pos]
@cached_property
def q1(self):
rounds = self.rounds
data = self.sorted_data
# See: https://en.wikipedia.org/wiki/Quartile#Computing_methods
if rounds == 1:
return data[0]
elif rounds % 2: # Method 3
n, q = rounds // 4, rounds % 4
if q == 1:
return 0.25 * data[n - 1] + 0.75 * data[n]
else:
return 0.75 * data[n] + 0.25 * data[n + 1]
else: # Method 2
return statistics.median(data[:rounds // 2])
@cached_property
def q3(self):
rounds = self.rounds
data = self.sorted_data
# See: https://en.wikipedia.org/wiki/Quartile#Computing_methods
if rounds == 1:
return data[0]
elif rounds % 2: # Method 3
n, q = rounds // 4, rounds % 4
if q == 1:
return 0.75 * data[3 * n] + 0.25 * data[3 * n + 1]
else:
return 0.25 * data[3 * n + 1] + 0.75 * data[3 * n + 2]
else: # Method 2
return statistics.median(data[rounds // 2:])
@cached_property
def iqr(self):
return self.q3 - self.q1
@property
def iqr_outliers(self):
"""
Count of Tukey outliers: what's beyond (Q1 - 1.5IQR, Q3 + 1.5IQR)
"""
count = 0
q0 = self.q1 - 1.5 * self.iqr
q4 = self.q3 + 1.5 * self.iqr
for val in self.data:
if val < q0 or val > q4:
count += 1
return count
@cached_property
def outliers(self):
return "%s;%s" % (self.stddev_outliers, self.iqr_outliers)
|
{
"content_hash": "727549f30062df87ec0a04c6e66d968b",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 113,
"avg_line_length": 26.08176100628931,
"alnum_prop": 0.5196527610320714,
"repo_name": "aldanor/pytest-benchmark",
"id": "de242a5604c79bb755202fe974451675ae1b7abd",
"size": "4147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytest_benchmark/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1489"
},
{
"name": "PowerShell",
"bytes": "3053"
},
{
"name": "Python",
"bytes": "148307"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from fancypages.utils import FP_NODE_MODEL
from fancypages.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'fancypages_htmlblock', (
(u'contentblock_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True,
primary_key=True)),
('source', self.gf('django.db.models.fields.CharField')(max_length=10000, default='Your html goes here.')),
))
db.send_create_signal('fancypages', ['HtmlBlock'])
def backwards(self, orm):
db.delete_table(u'fancypages_htmlblock')
models = {
u'assets.imageasset': {
'Meta': {'object_name': 'ImageAsset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['{}']".format(AUTH_USER_MODEL)}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': (
'django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fancypages.carouselblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'CarouselBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_1': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_10': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_2': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_3': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_4': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_5': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_6': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_7': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_8': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_9': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link_url_1': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_10': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_2': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_3': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_4': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_5': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_6': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_7': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_8': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_9': (
'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'fancypages.container': {
'Meta': {'unique_together': "(('name', 'content_type', 'object_id', 'language_code'),)",
'object_name': 'Container'},
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '7'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.contentblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'ContentBlock'},
'container': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.fancypage': {
'Meta': {'object_name': 'FancyPage'},
'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'related_name': "'pages'", 'symmetrical': 'False', 'to': "orm['fancypages.PageGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'node': ('django.db.models.fields.related.OneToOneField', [],
{'related_name': "'page'", 'unique': 'True', 'null': 'True',
'to': "orm['{}']".format(FP_NODE_MODEL)}),
'page_type': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'pages'", 'null': 'True',
'to': "orm['fancypages.PageType']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.fourcolumnlayoutblock': {
'Meta': {'object_name': 'FourColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.horizontalseparatorblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'HorizontalSeparatorBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.imageandtextblock': {
'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': (
'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'image_text_blocks'", 'null': 'True',
'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "u'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.imageblock': {
'Meta': {'object_name': 'ImageBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': (
'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [],
{'blank': 'True', 'related_name': "'image_blocks'", 'null': 'True',
'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.orderedcontainer': {
'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'fancypages.pagegroup': {
'Meta': {'object_name': 'PageGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.pagenavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PageNavigationBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'is_relative': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
FP_NODE_MODEL: {
'Meta': {'object_name': FP_NODE_MODEL.split('.')[1]},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': (
'django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'fancypages.pagetype': {
'Meta': {'object_name': 'PageType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': (
'shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.tabblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TabBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.textblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TextBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"})
},
'fancypages.htmlblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'HtmlBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'default': "'Your html goes here.'"})
},
'fancypages.threecolumnlayoutblock': {
'Meta': {'object_name': 'ThreeColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.titletextblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TitleTextBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}),
'title': (
'django.db.models.fields.CharField', [], {'default': "'Your title goes here.'", 'max_length': '100'})
},
'fancypages.twitterblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TwitterBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.twocolumnlayoutblock': {
'Meta': {'object_name': 'TwoColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'})
},
'fancypages.videoblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'VideoBlock',
'_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['fancypages']
|
{
"content_hash": "ea21c2724da18a7078928bd69aafdd19",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 125,
"avg_line_length": 71.86520376175548,
"alnum_prop": 0.5163794983642311,
"repo_name": "socradev/django-fancypages",
"id": "fd4c4a5c1f28aa13562290a877066b47bdf15421",
"size": "22949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fancypages/migrations/0013_add_html_block.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "187646"
},
{
"name": "HTML",
"bytes": "64967"
},
{
"name": "JavaScript",
"bytes": "561457"
},
{
"name": "Makefile",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "526740"
}
],
"symlink_target": ""
}
|
class NoSkypeConversationException(Exception):
pass
class NoSkypeFriendException(Exception):
pass
|
{
"content_hash": "31134bd40b515f5678d060d9edca4e44",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 46,
"avg_line_length": 21.2,
"alnum_prop": 0.8113207547169812,
"repo_name": "nickpack/Igor",
"id": "262f2bce8dd4bddd4ecd60a5021bba4e85f035ea",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "igorexceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75413"
},
{
"name": "Shell",
"bytes": "5396"
}
],
"symlink_target": ""
}
|
"""Implements a servo in MPF."""
import asyncio
from typing import Optional
from mpf.platforms.interfaces.stepper_platform_interface import StepperPlatformInterface
from mpf.core.delays import DelayManager
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
@DeviceMonitor(_current_position="position", _target_position="target_position", _is_homed="is_homed")
class Stepper(SystemWideDevice):
"""Represents an stepper motor based axis in a pinball machine.
Args: Same as the Device parent class.
"""
config_section = 'steppers'
collection = 'steppers'
class_label = 'stepper'
__slots__ = ["hw_stepper", "platform", "_target_position", "_current_position", "_ball_search_started",
"_ball_search_old_target", "_is_homed", "_is_moving", "_move_task", "delay"]
def __init__(self, machine, name):
"""Initialise stepper."""
self.hw_stepper = None # type: Optional[StepperPlatformInterface]
self.platform = None # type: Optional[Stepper]
self._target_position = 0 # in user units
self._current_position = 0 # in user units
self._ball_search_started = False
self._ball_search_old_target = 0
self._is_homed = False
self._is_moving = asyncio.Event()
self._move_task = None # type: Optional[asyncio.Task]
self.delay = DelayManager(machine)
super().__init__(machine, name)
async def _initialize(self):
await super()._initialize()
self.platform = self.machine.get_platform_sections('stepper_controllers', self.config['platform'])
self.platform.assert_has_feature("steppers")
# first target is the reset position but we might get an early target during startup via events
self._target_position = self.config['reset_position']
for position in self.config['named_positions']:
self.machine.events.add_handler(self.config['named_positions'][position],
self.event_move_to_position,
position=position)
if not self.platform.features['allow_empty_numbers'] and self.config['number'] is None:
self.raise_config_error("Stepper must have a number.", 2)
self.hw_stepper = await self.platform.configure_stepper(self.config['number'],
self.config['platform_settings'])
if self.config['include_in_ball_search']:
self.machine.events.add_handler("ball_search_started",
self._ball_search_start)
self.machine.events.add_handler("ball_search_stopped",
self._ball_search_stop)
if self.config['homing_mode'] == "switch" and not self.config['homing_switch']:
self.raise_config_error("Cannot use homing_mode switch without a homing_switch. Please add homing_switch or"
" use homing_mode hardware.", 1)
self._move_task = self.machine.clock.loop.create_task(self._run())
self._move_task.add_done_callback(Util.raise_exceptions)
def validate_and_parse_config(self, config, is_mode_config, debug_prefix: str = None):
"""Validate stepper config."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
platform = self.machine.get_platform_sections(
'stepper_controllers', getattr(config, "platform", None))
config['platform_settings'] = platform.validate_stepper_section(
self, config.get('platform_settings', None))
self._configure_device_logging(config)
return config
async def _run(self):
# wait for switches to be initialised
await self.machine.events.wait_for_event("init_phase_3")
# first home the stepper
self.debug_log("Homing stepper")
await self._home()
# run the loop at least once
self._is_moving.set()
while True:
# wait until we should be moving
await self._is_moving.wait()
self._is_moving.clear()
# store target position in local variable since it may change in the meantime
target_position = self._target_position
delta = target_position - self._current_position
if delta != 0:
self.debug_log("Got move command. Current position: %s Target position: %s Delta: %s",
self._current_position, target_position, delta)
# move stepper
self.hw_stepper.move_rel_pos(delta)
# wait for the move to complete
await self.hw_stepper.wait_for_move_completed()
else:
self.debug_log("Got move command. Stepper already at target. Not moving.")
# set current position
self._current_position = target_position
# post ready event
self._post_ready_event()
self.debug_log("Move completed")
def _move_to_absolute_position(self, position):
"""Move servo to position."""
self.debug_log("Moving to position %s", position)
if self.config['pos_min'] <= position <= self.config['pos_max']:
self._target_position = position
self._is_moving.set()
else:
raise ValueError("_move_to_absolute_position: position argument beyond limits")
async def _home(self):
"""Home an axis, resetting 0 position."""
self._is_homed = False
self._is_moving.set()
if self.config['homing_mode'] == "hardware":
self.info_log("Homing stepper using hardware homing.")
self.hw_stepper.home(self.config['homing_direction'])
await self.hw_stepper.wait_for_move_completed()
else:
self.info_log("Homing stepper using switch homing with switch %s.", self.config["homing_switch"])
# move the stepper manually
if self.config['homing_direction'] == "clockwise":
self.hw_stepper.move_vel_mode(1)
else:
self.hw_stepper.move_vel_mode(-1)
# wait until home switch becomes active
await self.machine.switch_controller.wait_for_switch(self.config['homing_switch'],
only_on_change=False)
self.hw_stepper.stop()
self.hw_stepper.set_home_position()
self.info_log("Stepper reached home.")
self._is_homed = True
self._is_moving.clear()
# home position is 0
self._current_position = 0
def _post_ready_event(self):
if not self._ball_search_started:
self.machine.events.post('stepper_' + self.name + "_ready", position=self._current_position)
'''event: stepper_(name)_ready'''
def stop_device(self):
"""Stop motor."""
self.hw_stepper.stop()
self._is_moving.clear()
if self._move_task:
self._move_task.cancel()
self._move_task = None
@event_handler(1)
def event_reset(self, **kwargs):
"""Event handler for reset event."""
del kwargs
self.reset()
def reset(self):
"""Move to reset position."""
self._move_to_absolute_position(self.config['reset_position'])
@event_handler(5)
def event_move_to_position(self, position=None, **kwargs):
"""Event handler for move_to_position event."""
del kwargs
if position is None:
raise AssertionError("move_to_position event is missing a position.")
self.move_to_position(position)
def move_to_position(self, position):
"""Move stepper to a position."""
self._target_position = position
if self._ball_search_started:
return
self._move_to_absolute_position(position)
def _ball_search_start(self, **kwargs):
del kwargs
# we do not touch self._position during ball search so we can reset to
# it later
self._ball_search_old_target = self._target_position
self._ball_search_started = True
self._ball_search_go_to_min()
def _ball_search_go_to_min(self):
self._move_to_absolute_position(self.config['ball_search_min'])
self.delay.add(name="ball_search", callback=self._ball_search_go_to_max, ms=self.config['ball_search_wait'])
def _ball_search_go_to_max(self):
self._move_to_absolute_position(self.config['ball_search_max'])
self.delay.add(name="ball_search", callback=self._ball_search_go_to_min, ms=self.config['ball_search_wait'])
def _ball_search_stop(self, **kwargs):
del kwargs
# stop delay
self.delay.remove("ball_search")
self._ball_search_started = False
# move to last position
self._target_position = self._ball_search_old_target
self._move_to_absolute_position(self._target_position)
|
{
"content_hash": "2841cebcdd0cd7a1c29a0f8f40ac8b4d",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 120,
"avg_line_length": 41.89592760180995,
"alnum_prop": 0.6004968139107895,
"repo_name": "missionpinball/mpf",
"id": "9352f2ebbf047a0eadd24135afa70a49baaff779",
"size": "9259",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/devices/stepper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
}
|
from sys import argv
script,filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C(^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()#文字截断
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally, we close it."
target.close()
|
{
"content_hash": "e5085e8fe2902b81fc2b4e5b63b83a14",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 49,
"avg_line_length": 21.96875,
"alnum_prop": 0.6543385490753911,
"repo_name": "Jumpers/MysoftAutoTest",
"id": "3f0275a6249795fad79e5f9f29ed2e32f02f7505",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Step1-PythonBasic/Practices/dengt/p16_DT.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95779"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.core.resources import File
from resource_management.core.resources import Execute
from resource_management.libraries.script import Script
from resource_management.core.source import StaticFile
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class ServiceCheck(Script):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ServiceCheckWindows(ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
service = "STORM"
Execute(format("cmd /C {smoke_cmd} {service}", smoke_cmd=smoke_cmd, service=service), user=params.storm_user, logoutput=True)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ServiceCheckDefault(ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
unique = get_unique_id_and_date()
File("/tmp/wordCount.jar",
content=StaticFile("wordCount.jar"),
owner=params.storm_user
)
cmd = ""
if params.nimbus_seeds_supported:
# Because this command is guaranteed to run on one of the hosts with storm client, there is no need
# to specify "-c nimbus.seeds={nimbus_seeds}"
cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique}")
elif params.nimbus_host is not None:
cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
Execute(cmd,
logoutput=True,
path=params.storm_bin_dir,
user=params.storm_user
)
Execute(format("storm kill WordCount{unique}"),
path=params.storm_bin_dir,
user=params.storm_user
)
if __name__ == "__main__":
ServiceCheck().execute()
|
{
"content_hash": "0092c6c8b5e05f6644cf1d9afda35266",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 129,
"avg_line_length": 35.6025641025641,
"alnum_prop": 0.7385667987036371,
"repo_name": "radicalbit/ambari",
"id": "80ea0f5bee03cfd99ef1fe5cc6ac975162e78037",
"size": "2799",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/service_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
import logging
import os
import types
import weakref
from collections import defaultdict
import archinfo
import cle
from cle.address_translator import AT
l = logging.getLogger("angr.project")
# This holds the default execution engine for a given CLE loader backend.
# All the builtins right now use SimEngineVEX. This may not hold for long.
def global_default(): return {'any': SimEngineVEX}
default_engines = defaultdict(global_default)
def register_default_engine(loader_backend, engine, arch='any'):
"""
Register the default execution engine to be used with a given CLE backend.
Usually this is the SimEngineVEX, but if you're operating on something that isn't
going to be lifted to VEX, you'll need to make sure the desired engine is registered here.
:param loader_backend: The loader backend (a type)
:param type engine: The engine to use for the loader backend (a type)
:param arch: The architecture to associate with this engine. Optional.
:return:
"""
if not isinstance(loader_backend, type):
raise TypeError("loader_backend must be a type")
if not isinstance(engine, type):
raise TypeError("engine must be a type")
default_engines[loader_backend][arch] = engine
def get_default_engine(loader_backend, arch='any'):
"""
Get some sort of sane default for a given loader and/or arch.
Can be set with register_default_engine()
:param loader_backend:
:param arch:
:return:
"""
matches = default_engines[loader_backend]
for k,v in matches.items():
if k == arch or k == 'any':
return v
return None
projects = weakref.WeakValueDictionary()
def fake_project_unpickler(name):
if name not in projects:
raise AngrError("Project %s has not been opened." % name)
return projects[name]
fake_project_unpickler.__safe_for_unpickling__ = True
class Project(object):
"""
This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between
them, and perform analyses on them.
:param thing: The path to the main executable object to analyze, or a CLE Loader object.
The following parameters are optional.
:param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'.
:param ignore_functions: A list of function names that, when imported from shared libraries, should
never be stepped into in analysis (calls will return an unconstrained value).
:param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are
available with said simprocedures.
:param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap
it with a simprocedure.
:param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures.
:param arch: The target architecture (auto-detected otherwise).
:param simos: a SimOS class to use for this project.
:param bool translation_cache: If True, cache translated basic blocks rather than re-translating them.
:param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation
will try to read code from the current state instead of the original memory,
regardless of the current memory protections.
:type support_selfmodifying_code: bool
Any additional keyword arguments passed will be passed onto ``cle.Loader``.
:ivar analyses: The available analyses.
:type analyses: angr.analysis.Analyses
:ivar entry: The program entrypoint.
:ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results.
:type factory: AngrObjectFactory
:ivar filename: The filename of the executable.
:ivar loader: The program loader.
:type loader: cle.Loader
:ivar surveyors: The available surveyors.
:type surveyors: angr.surveyors.surveyor.Surveyors
"""
def __init__(self, thing,
default_analysis_mode=None,
ignore_functions=None,
use_sim_procedures=True,
exclude_sim_procedures_func=None,
exclude_sim_procedures_list=(),
arch=None, simos=None,
load_options=None,
translation_cache=True,
support_selfmodifying_code=False,
**kwargs):
# Step 1: Load the binary
if load_options is None: load_options = {}
load_options.update(kwargs)
if isinstance(thing, cle.Loader):
if load_options:
l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!")
self.loader = thing
self.filename = self.loader.main_object.binary
elif hasattr(thing, 'read') and hasattr(thing, 'seek'):
l.info("Loading binary from stream")
self.filename = None
self.loader = cle.Loader(thing, **load_options)
elif not isinstance(thing, (unicode, str)) or not os.path.exists(thing) or not os.path.isfile(thing):
raise Exception("Not a valid binary file: %s" % repr(thing))
else:
# use angr's loader, provided by cle
l.info("Loading binary %s", thing)
self.filename = thing
self.loader = cle.Loader(self.filename, **load_options)
# Step 2: determine its CPU architecture, ideally falling back to CLE's guess
if isinstance(arch, str):
self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this
elif isinstance(arch, archinfo.Arch):
self.arch = arch
elif arch is None:
self.arch = self.loader.main_object.arch
else:
raise ValueError("Invalid arch specification.")
# Step 3: Set some defaults and set the public and private properties
if not default_analysis_mode:
default_analysis_mode = 'symbolic'
if not ignore_functions:
ignore_functions = []
if isinstance(exclude_sim_procedures_func, types.LambdaType):
l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to Project causes the resulting object to be un-serializable.")
self._sim_procedures = {}
self._default_analysis_mode = default_analysis_mode
self._exclude_sim_procedures_func = exclude_sim_procedures_func
self._exclude_sim_procedures_list = exclude_sim_procedures_list
self._should_use_sim_procedures = use_sim_procedures
self._support_selfmodifying_code = support_selfmodifying_code
self._ignore_functions = ignore_functions
self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below
if self._support_selfmodifying_code:
if translation_cache is True:
translation_cache = False
l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.")
# Look up the default engine.
engine_cls = get_default_engine(type(self.loader.main_object))
if not engine_cls:
raise AngrError("No engine associated with loader %s" % str(type(self.loader.main_object)))
engine = engine_cls(
stop_points=self._sim_procedures,
use_cache=translation_cache,
support_selfmodifying_code=support_selfmodifying_code)
procedure_engine = SimEngineProcedure()
hook_engine = SimEngineHook(self)
failure_engine = SimEngineFailure(self)
syscall_engine = SimEngineSyscall(self)
unicorn_engine = SimEngineUnicorn(self._sim_procedures)
self.entry = self.loader.main_object.entry
self.factory = AngrObjectFactory(
self,
engine,
procedure_engine,
[failure_engine, syscall_engine, hook_engine, unicorn_engine, engine])
self.analyses = Analyses(self)
self.surveyors = Surveyors(self)
self.kb = KnowledgeBase(self, self.loader.main_object)
if self.filename is not None:
projects[self.filename] = self
# Step 4: determine the guest OS
if isinstance(simos, type) and issubclass(simos, SimOS):
self._simos = simos(self) #pylint:disable=invalid-name
elif simos is None:
self._simos = os_mapping[self.loader.main_object.os](self)
else:
raise ValueError("Invalid OS specification or non-matching architecture.")
# Step 5: Register simprocedures as appropriate for library functions
for obj in self.loader.initial_load_objects:
self._register_object(obj)
# Step 6: Run OS-specific configuration
self._simos.configure_project()
def _register_object(self, obj):
"""
This scans through an objects imports and hooks them with simprocedures from our library whenever possible
"""
# Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols
missing_libs = []
for lib_name in self.loader.missing_dependencies:
try:
missing_libs.append(SIM_LIBRARIES[lib_name])
except KeyError:
l.info("There are no simprocedures for missing library %s :(", lib_name)
# Step 2: Categorize every "import" symbol in each object.
# If it's IGNORED, mark it for stubbing
# If it's blacklisted, don't process it
# If it matches a simprocedure we have, replace it
for reloc in obj.imports.itervalues():
# Step 2.1: Quick filter on symbols we really don't care about
func = reloc.symbol
if func is None:
continue
if not func.is_function:
continue
if not reloc.resolved:
l.debug("Ignoring unresolved import '%s' from %s ...?", func.name, reloc.owner_obj)
continue
if self.is_hooked(reloc.symbol.resolvedby.rebased_addr):
l.debug("Already hooked %s (%s)", func.name, reloc.owner_obj)
continue
# Step 2.2: If this function has been resolved by a static dependency,
# check if we actually can and want to replace it with a SimProcedure.
# We opt out of this step if it is blacklisted by ignore_functions, which
# will cause it to be replaced by ReturnUnconstrained later.
if func.resolved and func.resolvedby.owner_obj is not self.loader.extern_object and \
func.name not in self._ignore_functions:
if self._check_user_blacklists(func.name):
continue
owner_name = func.resolvedby.owner_obj.provides
if isinstance(self.loader.main_object, cle.backends.pe.PE):
owner_name = owner_name.lower()
if owner_name not in SIM_LIBRARIES:
continue
sim_lib = SIM_LIBRARIES[owner_name]
if not sim_lib.has_implementation(func.name):
continue
l.info("Using builtin SimProcedure for %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
# Step 2.3: If 2.2 didn't work, check if the symbol wants to be resolved
# by a library we already know something about. Resolve it appropriately.
# Note that _check_user_blacklists also includes _ignore_functions.
# An important consideration is that even if we're stubbing a function out,
# we still want to try as hard as we can to figure out where it comes from
# so we can get the calling convention as close to right as possible.
elif reloc.resolvewith is not None and reloc.resolvewith in SIM_LIBRARIES:
sim_lib = SIM_LIBRARIES[reloc.resolvewith]
if self._check_user_blacklists(func.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get_stub(func.name, self.arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
# Step 2.4: If 2.3 didn't work (the symbol didn't request a provider we know of), try
# looking through each of the SimLibraries we're using to resolve unresolved
# functions. If any of them know anything specifically about this function,
# resolve it with that. As a final fallback, just ask any old SimLibrary
# to resolve it.
elif missing_libs:
for sim_lib in missing_libs:
if sim_lib.has_metadata(func.name):
if self._check_user_blacklists(func.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get_stub(func.name, self.arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
break
else:
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", func.name)
self.hook_symbol(func.name, missing_libs[0].get(func.name, self.arch))
# Step 2.5: If 2.4 didn't work (we have NO SimLibraries to work with), just
# use the vanilla ReturnUnconstrained, assuming that this isn't a weak func
elif not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", func.name)
self.hook_symbol(func.name, SIM_PROCEDURES['stubs']['ReturnUnconstrained']())
def _check_user_blacklists(self, f):
"""
Has symbol name `f` been marked for exclusion by any of the user
parameters?
"""
return not self._should_use_sim_procedures or \
f in self._exclude_sim_procedures_list or \
f in self._ignore_functions or \
(self._exclude_sim_procedures_func is not None and self._exclude_sim_procedures_func(f))
#
# Public methods
# They're all related to hooking!
#
def hook(self, addr, hook=None, length=0, kwargs=None):
"""
Hook a section of code with a custom function. This is used internally to provide symbolic
summaries of library functions, and can be used to instrument execution or to modify
control flow.
When hook is not specified, it returns a function decorator that allows easy hooking.
Usage::
# Assuming proj is an instance of angr.Project, we will add a custom hook at the entry
# point of the project.
@proj.hook(proj.entry)
def my_hook(state):
print "Welcome to execution!"
:param addr: The address to hook.
:param hook: A :class:`angr.project.Hook` describing a procedure to run at the
given address. You may also pass in a SimProcedure class or a function
directly and it will be wrapped in a Hook object for you.
:param length: If you provide a function for the hook, this is the number of bytes
that will be skipped by executing the hook by default.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
"""
if hook is None:
# if we haven't been passed a thing to hook with, assume we're being used as a decorator
return self._hook_decorator(addr, length=length, kwargs=kwargs)
if kwargs is None: kwargs = {}
l.debug('hooking %#x with %s', addr, hook)
if self.is_hooked(addr):
l.warning("Address is already hooked [hook(%#x, %s)]. Not re-hooking.", addr, hook)
return
if isinstance(hook, type):
if once("hook_instance_warning"):
l.critical("Hooking with a SimProcedure instance is deprecated! Please hook with an instance.")
hook = hook(**kwargs)
if callable(hook):
hook = SIM_PROCEDURES['stubs']['UserHook'](user_func=hook, length=length, **kwargs)
self._sim_procedures[addr] = hook
def is_hooked(self, addr):
"""
Returns True if `addr` is hooked.
:param addr: An address.
:returns: True if addr is hooked, False otherwise.
"""
return addr in self._sim_procedures
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %#x is not hooked", addr)
return None
return self._sim_procedures[addr]
def unhook(self, addr):
"""
Remove a hook.
:param addr: The address of the hook.
"""
if not self.is_hooked(addr):
l.warning("Address %#x not hooked", addr)
return
del self._sim_procedures[addr]
def hook_symbol(self, symbol_name, obj, kwargs=None):
"""
Resolve a dependency in a binary. Uses the "externs object" (project.loader.extern_object) to
allocate an address for a new symbol in the binary, and then tells the loader to re-perform
the relocation process, taking into account the new symbol.
:param symbol_name: The name of the dependency to resolve.
:param obj: The thing with which to satisfy the dependency.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:returns: The address of the new symbol.
:rtype: int
"""
if type(obj) in (int, long):
# this is pretty intensely sketchy
l.info("Instructing the loader to re-point symbol %s at address %#x", symbol_name, obj)
self.loader.provide_symbol(self.loader.extern_object, symbol_name, AT.from_mva(obj, self.loader.extern_object).to_rva())
return obj
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.error("Could not find symbol %s", symbol_name)
return None
hook_addr, _ = self._simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
if self.is_hooked(hook_addr):
l.warning("Re-hooking symbol %s", symbol_name)
self.unhook(hook_addr)
self.hook(hook_addr, obj, kwargs=kwargs)
return hook_addr
def hook_symbol_batch(self, hooks):
"""
Hook many symbols at once.
:param dict hooks: A mapping from symbol name to hook
"""
if once("hook_symbol_batch warning"):
l.critical("Due to advances in technology, hook_symbol_batch is no longer necessary for performance. Please use hook_symbol several times.")
for x in hooks:
self.hook_symbol(x, hooks[x])
def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
hook_addr, _ = self._simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr)
#
# A convenience API (in the style of triton and manticore) for symbolic execution.
#
def execute(self, *args, **kwargs):
"""
This function is a symbolic execution helper in the simple style
supported by triton and manticore. It designed to be run after
setting up hooks (see Project.hook), in which the symbolic state
can be checked.
This function can be run in three different ways:
- When run with no parameters, this function begins symbolic execution
from the entrypoint.
- It can also be run with a "state" parameter specifying a SimState to
begin symbolic execution from.
- Finally, it can accept any arbitrary keyword arguments, which are all
passed to project.factory.full_init_state.
If symbolic execution finishes, this function returns the resulting
simulation manager.
"""
if args:
state = args[0]
else:
state = self.factory.full_init_state(**kwargs)
pg = self.factory.simgr(state)
self._executing = True
return pg.step(until=lambda lpg: not self._executing)
def terminate_execution(self):
"""
Terminates a symbolic execution that was started with Project.execute().
"""
self._executing = False
#
# Private methods related to hooking
#
def _hook_decorator(self, addr, length=0, kwargs=None):
"""
Return a function decorator that allows easy hooking. Please refer to hook() for its usage.
:return: The function decorator.
"""
def hook_decorator(func):
self.hook(addr, func, length=length, kwargs=kwargs)
return hook_decorator
#
# Pickling
#
def __getstate__(self):
try:
analyses, surveyors = self.analyses, self.surveyors
self.analyses, self.surveyors = None, None
return dict(self.__dict__)
finally:
self.analyses, self.surveyors = analyses, surveyors
def __setstate__(self, s):
self.__dict__.update(s)
self.analyses = Analyses(self)
self.surveyors = Surveyors(self)
from .errors import AngrError
from .factory import AngrObjectFactory
from .simos import SimOS, os_mapping
from .analyses.analysis import Analyses
from .surveyors import Surveyors
from .knowledge_base import KnowledgeBase
from .engines import SimEngineFailure, SimEngineSyscall, SimEngineProcedure, SimEngineVEX, SimEngineUnicorn, SimEngineHook
from .misc.ux import once
from .procedures import SIM_PROCEDURES, SIM_LIBRARIES
|
{
"content_hash": "d0f6113356a8a65244ac5f9e8cbf1c54",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 152,
"avg_line_length": 43.41360294117647,
"alnum_prop": 0.6130753270948893,
"repo_name": "Ruide/angr-dev",
"id": "b411f1a7c258a46a12975dc4545a4962de602b8d",
"size": "23617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/angr/project.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2962"
},
{
"name": "Batchfile",
"bytes": "4542"
},
{
"name": "C",
"bytes": "18511978"
},
{
"name": "C++",
"bytes": "295194"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "12558"
},
{
"name": "OpenEdge ABL",
"bytes": "2415"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "5611416"
},
{
"name": "Shell",
"bytes": "41791"
}
],
"symlink_target": ""
}
|
import argparse
import errno
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
from io import StringIO
from lib.config import PLATFORM, get_target_arch, get_chromedriver_version, \
get_env_var, s3_config, get_zip_name
from lib.util import electron_gyp, execute, get_electron_version, \
parse_version, scoped_cwd, s3put
from lib.github import GitHub
ELECTRON_REPO = 'brave/electron'
ELECTRON_VERSION = get_electron_version()
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION)
SYMBOLS_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'symbols')
DSYM_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'dsym')
PDB_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'pdb')
def main():
args = parse_args()
if not args.publish_release:
if not dist_newer_than_head():
run_python_script('create-dist.py')
build_version = get_electron_build_version()
if not ELECTRON_VERSION.startswith(build_version):
error = 'Tag name ({0}) should match build version ({1})\n'.format(
ELECTRON_VERSION, build_version)
sys.stderr.write(error)
sys.stderr.flush()
return 1
github = GitHub(auth_token())
releases = github.repos(ELECTRON_REPO).releases.get()
tag_exists = False
for release in releases:
if not release['draft'] and release['tag_name'] == args.version:
tag_exists = True
break
release = create_or_get_release_draft(github, releases, args.version,
tag_exists)
if args.publish_release:
# Upload the Node SHASUMS*.txt.
run_python_script('upload-node-checksums.py', '-v', ELECTRON_VERSION)
# Upload the index.json.
run_python_script('upload-index-json.py')
# Create and upload the Electron SHASUMS*.txt
release_electron_checksums(github, release)
# Press the publish button.
publish_release(github, release['id'])
# Do not upload other files when passed "-p".
return
# Upload Electron with GitHub Releases API.
upload_electron(github, release, os.path.join(DIST_DIR, DIST_NAME))
upload_electron(github, release, os.path.join(DIST_DIR, SYMBOLS_NAME))
if PLATFORM == 'darwin':
upload_electron(github, release, os.path.join(DIST_DIR, DSYM_NAME))
elif PLATFORM == 'win32':
upload_electron(github, release, os.path.join(DIST_DIR, PDB_NAME))
# Upload free version of ffmpeg.
ffmpeg = get_zip_name('ffmpeg', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, ffmpeg))
# Upload chromedriver and mksnapshot for minor version update.
if parse_version(args.version)[2] == '0':
chromedriver = get_zip_name('chromedriver', get_chromedriver_version())
upload_electron(github, release, os.path.join(DIST_DIR, chromedriver))
mksnapshot = get_zip_name('mksnapshot', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, mksnapshot))
if PLATFORM == 'win32' and not tag_exists:
# Upload PDBs to Windows symbol server.
run_python_script('upload-windows-pdb.py')
# Upload node headers.
run_python_script('upload-node-headers.py', '-v', args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-v', '--version', help='Specify the version',
default=ELECTRON_VERSION)
parser.add_argument('-p', '--publish-release',
help='Publish the release',
action='store_true')
return parser.parse_args()
def run_python_script(script, *args):
script_path = os.path.join(SOURCE_ROOT, 'script', script)
return execute([sys.executable, script_path] + list(args))
def get_electron_build_version():
if get_target_arch() == 'arm' or os.environ.has_key('CI'):
# In CI we just build as told.
return ELECTRON_VERSION
if PLATFORM == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif PLATFORM == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', 'R', PROJECT_NAME)
return subprocess.check_output([electron, '--version']).strip()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def get_text_with_editor(name):
editor = os.environ.get('EDITOR', 'nano')
initial_message = '\n# Please enter the body of your release note for %s.' \
% name
t = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False)
t.write(initial_message)
t.close()
subprocess.call([editor, t.name])
text = ''
for line in open(t.name, 'r'):
if len(line) == 0 or line[0] != '#':
text += line
os.unlink(t.name)
return text
def create_or_get_release_draft(github, releases, tag, tag_exists):
# Search for existing draft.
for release in releases:
if release['draft']:
return release
if tag_exists:
tag = 'do-not-publish-me'
return create_release_draft(github, tag)
def create_release_draft(github, tag):
name = '{0} {1}'.format(PROJECT_NAME, tag)
if os.environ.has_key('CI'):
body = '(placeholder)'
else:
body = get_text_with_editor(name)
if body == '':
sys.stderr.write('Quit due to empty release note.\n')
sys.exit(0)
data = dict(tag_name=tag, name=name, body=body, draft=True)
r = github.repos(ELECTRON_REPO).releases.post(data=data)
return r
def release_electron_checksums(github, release):
checksums = run_python_script('merge-electron-checksums.py',
'-v', ELECTRON_VERSION)
upload_io_to_github(github, release, 'SHASUMS256.txt',
StringIO(checksums.decode('utf-8')), 'text/plain')
def upload_electron(github, release, file_path):
# Delete the original file before uploading in CI.
filename = os.path.basename(file_path)
if os.environ.has_key('CI'):
try:
for asset in release['assets']:
if asset['name'] == filename:
github.repos(ELECTRON_REPO).releases.assets(asset['id']).delete()
except Exception:
pass
# Upload the file.
with open(file_path, 'rb') as f:
upload_io_to_github(github, release, filename, f, 'application/zip')
# Upload the checksum file.
upload_sha256_checksum(release['tag_name'], file_path)
# Upload ARM assets without the v7l suffix for backwards compatibility
# TODO Remove for 2.0
if 'armv7l' in filename:
arm_filename = filename.replace('armv7l', 'arm')
arm_file_path = os.path.join(os.path.dirname(file_path), arm_filename)
shutil.copy2(file_path, arm_file_path)
upload_electron(github, release, arm_file_path)
def upload_io_to_github(github, release, name, io, content_type):
params = {'name': name}
headers = {'Content-Type': content_type}
github.repos(ELECTRON_REPO).releases(release['id']).assets.post(
params=params, headers=headers, data=io, verify=False)
def upload_sha256_checksum(version, file_path):
bucket, access_key, secret_key = s3_config()
checksum_path = '{}.sha256sum'.format(file_path)
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
sha256.update(f.read())
filename = os.path.basename(file_path)
with open(checksum_path, 'w') as checksum:
checksum.write('{} *{}'.format(sha256.hexdigest(), filename))
s3put(bucket, access_key, secret_key, os.path.dirname(checksum_path),
'atom-shell/tmp/{0}'.format(version), [checksum_path])
def publish_release(github, release_id):
data = dict(draft=False)
github.repos(ELECTRON_REPO).releases(release_id).patch(data=data)
def auth_token():
token = get_env_var('GITHUB_TOKEN')
message = ('Error: Please set the $ELECTRON_GITHUB_TOKEN '
'environment variable, which is your personal token')
assert token, message
return token
if __name__ == '__main__':
import sys
sys.exit(main())
|
{
"content_hash": "3946a13276d28f7b63800f3f0faf9d33",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 32.90874524714829,
"alnum_prop": 0.6577700751010976,
"repo_name": "posix4e/electron",
"id": "00b44c2429e82ad302fd55dcc7caa27e92f8db85",
"size": "8678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/upload.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6658"
},
{
"name": "C++",
"bytes": "2237308"
},
{
"name": "HTML",
"bytes": "2862"
},
{
"name": "JavaScript",
"bytes": "488614"
},
{
"name": "Objective-C",
"bytes": "12487"
},
{
"name": "Objective-C++",
"bytes": "147105"
},
{
"name": "PowerShell",
"bytes": "99"
},
{
"name": "Python",
"bytes": "76046"
},
{
"name": "Shell",
"bytes": "2593"
}
],
"symlink_target": ""
}
|
from ....datasources.meta import dicts, filters
from ....dependencies import DependentSet
from ....features.meta import aggregators
class Revision(DependentSet):
def __init__(self, name, revision_datasources):
super().__init__(name)
self.datasources = revision_datasources
self.dict_words = aggregators.len(self.datasources.dict_words)
"`int` : A count of the number of dictionary words in the revision"
self.non_dict_words = \
aggregators.len(self.datasources.non_dict_words)
"`int` : A count of the number of non-dictionary words in the revision"
if hasattr(self.datasources, 'parent'):
self.parent = Revision(name + ".parent", self.datasources.parent)
"""
:class:`~revscoring.languages.features.dictionary.Revision` : The
parent revision
"""
if hasattr(self.datasources, 'diff'):
self.diff = Diff(name + ".diff", self.datasources.diff)
"""
:class:`~revscoring.languages.features.dictionary.Diff` : The
diff between the parent and current revision.
"""
class Diff(DependentSet):
def __init__(self, name, diff_datasources):
super().__init__(name)
self.datasources = diff_datasources
# Simple counts (based on wikitext.edit.diff)
self.dict_words_added = \
aggregators.len(self.datasources.dict_words_added)
"`int` : A count of the number of dictionary words added"
self.dict_words_removed = \
aggregators.len(self.datasources.dict_words_removed)
"`int` : A count of the number of dictionary words removed"
self.non_dict_words_added = \
aggregators.len(self.datasources.non_dict_words_added)
"`int` : A count of the number of non-dictionary words added"
self.non_dict_words_removed = \
aggregators.len(self.datasources.non_dict_words_removed)
"`int` : A count of the number of non-dictionary words removed"
# Word frequency deltas
dict_word_delta_values = dicts.values(self.datasources.dict_word_delta)
self.dict_word_delta_sum = aggregators.sum(
dict_word_delta_values,
name=name + ".dict_word_delta_sum",
returns=int
)
"`int` : The sum of word frequency deltas for dictionary words"
self.dict_word_delta_increase = aggregators.sum(
filters.positive(dict_word_delta_values),
name=name + ".dict_word_delta_increase",
returns=int
)
"""
`int` : The sum of word frequency delta increases for dictionary words
"""
self.dict_word_delta_decrease = aggregators.sum(
filters.negative(dict_word_delta_values),
name=name + ".dict_word_delta_decrease",
returns=int
)
"""
`int` : The sum of word frequency delta decreases for dictionary
words
"""
non_dict_word_delta_values = \
dicts.values(self.datasources.non_dict_word_delta)
self.non_dict_word_delta_sum = aggregators.sum(
non_dict_word_delta_values,
name=name + ".non_dict_word_delta_sum",
returns=int
)
"`int` : The sum of word frequency deltas for non-dictionary words"
self.non_dict_word_delta_increase = aggregators.sum(
filters.positive(non_dict_word_delta_values),
name=name + ".non_dict_word_delta_increase",
returns=int
)
"""
`int` : The sum of word frequency delta increases for non-dictionary
words
"""
self.non_dict_word_delta_decrease = aggregators.sum(
filters.negative(non_dict_word_delta_values),
name=name + ".non_dict_word_delta_decrease",
returns=int
)
"""
`int` : The sum of word frequency delta decreases for non-dictionary
words
"""
# Proportional word frequency deltas
dict_word_prop_delta_values = \
dicts.values(self.datasources.dict_word_prop_delta)
self.dict_word_prop_delta_sum = aggregators.sum(
dict_word_prop_delta_values,
name=name + ".dict_word_prop_delta_sum"
)
"""
`float` : The sum of word frequency proportional delta for
dictionary words
"""
self.dict_word_prop_delta_increase = aggregators.sum(
filters.positive(dict_word_prop_delta_values),
name=name + ".dict_word_prop_delta_increase"
)
"""
`float` : The sum of word frequency proportional delta increases for
dictionary words
"""
self.dict_word_prop_delta_decrease = aggregators.sum(
filters.negative(dict_word_prop_delta_values),
name=name + ".dict_word_prop_delta_decrease"
)
"""
`float` : The sum of word frequency proportional delta decreases for
dictionary words
"""
non_dict_word_prop_delta_values = \
dicts.values(self.datasources.non_dict_word_prop_delta)
self.non_dict_word_prop_delta_sum = aggregators.sum(
non_dict_word_prop_delta_values,
name=name + ".non_dict_word_prop_delta_sum"
)
"""
`float` : The sum of word frequency proportional delta for
non-dictionary words
"""
self.non_dict_word_prop_delta_increase = aggregators.sum(
filters.positive(non_dict_word_prop_delta_values),
name=name + ".non_dict_word_prop_delta_increase"
)
"""
`float` : The sum of word frequency proportional delta increase for
non-dictionary words
"""
self.non_dict_word_prop_delta_decrease = aggregators.sum(
filters.negative(non_dict_word_prop_delta_values),
name=name + ".non_dict_word_prop_delta_decrease"
)
"""
`float` : The sum of word frequency proportional delta decrease for
non-dictionary words
"""
|
{
"content_hash": "79b07a2020b540935662540b509624cf",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 38.91139240506329,
"alnum_prop": 0.5936890045543266,
"repo_name": "yafeunteun/wikipedia-spam-classifier",
"id": "f12f37b3aef8af4eda127fb5254af544c5f5c44f",
"size": "6148",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "revscoring/revscoring/languages/features/dictionary/features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7262"
},
{
"name": "Jupyter Notebook",
"bytes": "971575"
},
{
"name": "Makefile",
"bytes": "7446"
},
{
"name": "Python",
"bytes": "796831"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
}
|
from typing import Any, Optional
import pandas as pd
from pandas_profiling.report.presentation.core.item_renderer import ItemRenderer
class Sample(ItemRenderer):
def __init__(
self, name: str, sample: pd.DataFrame, caption: Optional[str] = None, **kwargs
):
super().__init__(
"sample", {"sample": sample, "caption": caption}, name=name, **kwargs
)
def __repr__(self) -> str:
return "Sample"
def render(self) -> Any:
raise NotImplementedError()
|
{
"content_hash": "aa5cb05ad242bd28605dde410249dd14",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 25.9,
"alnum_prop": 0.6196911196911197,
"repo_name": "JosPolfliet/pandas-profiling",
"id": "597c7d24801a5ef2121c9b2327542be9e7d3cc3a",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pandas_profiling/report/presentation/core/sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24527"
},
{
"name": "PowerShell",
"bytes": "3111"
},
{
"name": "Python",
"bytes": "61100"
}
],
"symlink_target": ""
}
|
import copy
from oslo.config import cfg
cfg.CONF.import_opt('max_resources_per_stack', 'heat.common.config')
from heat.common import exception
from heat.common import template_format
from heat.common import urlfetch
from heat.db import api as db_api
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
from heat.tests.common import HeatTestCase
class NestedStackTest(HeatTestCase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://server.test/the.template
Parameters:
KeyName: foo
'''
nested_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Foo:
Value: bar
'''
update_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Bar:
Value: foo
'''
def setUp(self):
super(NestedStackTest, self).setUp()
self.m.StubOutWithMock(urlfetch, 'get')
utils.setup_dummy_db()
def create_stack(self, template):
t = template_format.parse(template)
stack = self.parse_stack(t)
stack.create()
self.assertEqual(stack.state, (stack.CREATE, stack.COMPLETE))
return stack
def parse_stack(self, t):
ctx = utils.dummy_context('test_username', 'aaaa', 'password')
stack_name = 'test_stack'
tmpl = parser.Template(t)
stack = parser.Stack(ctx, stack_name, tmpl)
stack.store()
return stack
def test_nested_stack_create(self):
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn(self.nested_template)
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
rsrc = stack['the_nested']
nested_name = utils.PhysName(stack.name, 'the_nested')
self.assertEqual(nested_name, rsrc.physical_resource_name())
arn_prefix = ('arn:openstack:heat::aaaa:stacks/%s/' %
rsrc.physical_resource_name())
self.assertTrue(rsrc.FnGetRefId().startswith(arn_prefix))
self.assertEqual('bar', rsrc.FnGetAtt('Outputs.Foo'))
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Foo')
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Outputs.Bar')
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Bar')
rsrc.delete()
self.assertTrue(rsrc.FnGetRefId().startswith(arn_prefix))
self.m.VerifyAll()
def test_nested_stack_create_exceeds_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 1)
resource._register_class('GenericResource',
generic_rsrc.GenericResource)
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn('''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Resources:
NestedResource:
Type: GenericResource
Outputs:
Foo:
Value: bar
''')
self.m.ReplayAll()
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
stack.create()
self.assertEquals(stack.state, (stack.CREATE, stack.FAILED))
self.assertIn('Maximum resources per stack exceeded',
stack.status_reason)
self.m.VerifyAll()
def test_nested_stack_create_equals_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 2)
resource._register_class('GenericResource',
generic_rsrc.GenericResource)
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn('''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Resources:
NestedResource:
Type: GenericResource
Outputs:
Foo:
Value: bar
''')
self.m.ReplayAll()
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
stack.create()
self.assertEquals(stack.state, (stack.CREATE, stack.COMPLETE))
self.assertIn('NestedResource',
stack.resources['the_nested'].nested().resources)
self.m.VerifyAll()
def test_nested_stack_update(self):
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn(self.nested_template)
urlfetch.get('https://server.test/new.template').MultipleTimes().\
AndReturn(self.update_template)
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
rsrc = stack['the_nested']
original_nested_id = rsrc.resource_id
t = template_format.parse(self.test_template)
new_res = copy.deepcopy(t['Resources']['the_nested'])
new_res['Properties']['TemplateURL'] = (
'https://server.test/new.template')
prop_diff = {'TemplateURL': 'https://server.test/new.template'}
updater = rsrc.handle_update(new_res, {}, prop_diff)
updater.run_to_completion()
self.assertEqual(True, rsrc.check_update_complete(updater))
# Expect the physical resource name staying the same after update,
# so that the nested was actually updated instead of replaced.
self.assertEqual(original_nested_id, rsrc.resource_id)
db_nested = db_api.stack_get(stack.context,
rsrc.resource_id)
# Owner_id should be preserved during the update process.
self.assertEqual(stack.id, db_nested.owner_id)
self.assertEqual('foo', rsrc.FnGetAtt('Outputs.Bar'))
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Foo')
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Outputs.Foo')
self.assertRaises(
exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'Bar')
rsrc.delete()
self.m.VerifyAll()
def test_nested_stack_update_equals_resource_limit(self):
resource._register_class('GenericResource',
generic_rsrc.GenericResource)
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn(self.nested_template)
urlfetch.get('https://server.test/new.template').MultipleTimes().\
AndReturn('''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Resources:
NestedResource:
Type: GenericResource
Outputs:
Bar:
Value: foo
''')
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
cfg.CONF.set_override('max_resources_per_stack', 2)
rsrc = stack['the_nested']
original_nested_id = rsrc.resource_id
t = template_format.parse(self.test_template)
new_res = copy.deepcopy(t['Resources']['the_nested'])
new_res['Properties']['TemplateURL'] = (
'https://server.test/new.template')
prop_diff = {'TemplateURL': 'https://server.test/new.template'}
updater = rsrc.handle_update(new_res, {}, prop_diff)
updater.run_to_completion()
self.assertEqual(True, rsrc.check_update_complete(updater))
self.assertEqual('foo', rsrc.FnGetAtt('Outputs.Bar'))
rsrc.delete()
self.m.VerifyAll()
def test_nested_stack_update_exceeds_limit(self):
resource._register_class('GenericResource',
generic_rsrc.GenericResource)
urlfetch.get('https://server.test/the.template').MultipleTimes().\
AndReturn(self.nested_template)
urlfetch.get('https://server.test/new.template').MultipleTimes().\
AndReturn('''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Resources:
NestedResource:
Type: GenericResource
Outputs:
Bar:
Value: foo
''')
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
cfg.CONF.set_override('max_resources_per_stack', 1)
rsrc = stack['the_nested']
original_nested_id = rsrc.resource_id
t = template_format.parse(self.test_template)
new_res = copy.deepcopy(t['Resources']['the_nested'])
new_res['Properties']['TemplateURL'] = (
'https://server.test/new.template')
prop_diff = {'TemplateURL': 'https://server.test/new.template'}
ex = self.assertRaises(exception.RequestLimitExceeded,
rsrc.handle_update, new_res, {}, prop_diff)
self.assertIn(exception.StackResourceLimitExceeded.message,
str(ex))
rsrc.delete()
self.m.VerifyAll()
def test_nested_stack_suspend_resume(self):
urlfetch.get('https://server.test/the.template').AndReturn(
self.nested_template)
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
rsrc = stack['the_nested']
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual(rsrc.state, (rsrc.SUSPEND, rsrc.COMPLETE))
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual(rsrc.state, (rsrc.RESUME, rsrc.COMPLETE))
rsrc.delete()
self.m.VerifyAll()
def test_nested_stack_three_deep(self):
root_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
'''
depth1_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
'''
depth2_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
'''
urlfetch.get(
'https://server.test/depth1.template').AndReturn(
depth1_template)
urlfetch.get(
'https://server.test/depth2.template').AndReturn(
depth2_template)
urlfetch.get(
'https://server.test/depth3.template').AndReturn(
self.nested_template)
self.m.ReplayAll()
self.create_stack(root_template)
self.m.VerifyAll()
def test_nested_stack_four_deep(self):
root_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
'''
depth1_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
'''
depth2_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
'''
depth3_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth4.template'
Parameters:
KeyName: foo
'''
urlfetch.get(
'https://server.test/depth1.template').AndReturn(
depth1_template)
urlfetch.get(
'https://server.test/depth2.template').AndReturn(
depth2_template)
urlfetch.get(
'https://server.test/depth3.template').AndReturn(
depth3_template)
urlfetch.get(
'https://server.test/depth4.template').AndReturn(
self.nested_template)
self.m.ReplayAll()
t = template_format.parse(root_template)
stack = self.parse_stack(t)
stack.create()
self.assertEqual((stack.CREATE, stack.FAILED), stack.state)
self.assertIn('Recursion depth exceeds', stack.status_reason)
self.m.VerifyAll()
def test_nested_stack_four_wide(self):
root_template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
Parameters:
KeyName: foo
Nested2:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
Parameters:
KeyName: foo
Nested3:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
Nested4:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth4.template'
Parameters:
KeyName: foo
'''
urlfetch.get(
'https://server.test/depth1.template').InAnyOrder().AndReturn(
self.nested_template)
urlfetch.get(
'https://server.test/depth2.template').InAnyOrder().AndReturn(
self.nested_template)
urlfetch.get(
'https://server.test/depth3.template').InAnyOrder().AndReturn(
self.nested_template)
urlfetch.get(
'https://server.test/depth4.template').InAnyOrder().AndReturn(
self.nested_template)
self.m.ReplayAll()
self.create_stack(root_template)
self.m.VerifyAll()
def test_nested_stack_infinite_recursion(self):
template = '''
HeatTemplateFormat: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/the.template'
'''
urlfetch.get(
'https://server.test/the.template').MultipleTimes().AndReturn(
template)
self.m.ReplayAll()
t = template_format.parse(template)
stack = self.parse_stack(t)
stack.create()
self.assertEqual(stack.state, (stack.CREATE, stack.FAILED))
self.assertIn('Recursion depth exceeds', stack.status_reason)
self.m.VerifyAll()
class ResDataResource(generic_rsrc.GenericResource):
def handle_create(self):
db_api.resource_data_set(self, "test", 'A secret value', True)
class ResDataNestedStackTest(NestedStackTest):
nested_template = '''
HeatTemplateFormatVersion: "2012-12-12"
Parameters:
KeyName:
Type: String
Resources:
nested_res:
Type: "res.data.resource"
Outputs:
Foo:
Value: bar
'''
def setUp(self):
resource._register_class("res.data.resource", ResDataResource)
super(ResDataNestedStackTest, self).setUp()
def test_res_data_delete(self):
urlfetch.get('https://server.test/the.template').AndReturn(
self.nested_template)
self.m.ReplayAll()
stack = self.create_stack(self.test_template)
res = stack['the_nested'].nested()['nested_res']
stack.delete()
self.assertEqual(stack.state, (stack.DELETE, stack.COMPLETE))
self.assertRaises(exception.NotFound, db_api.resource_data_get, res,
'test')
|
{
"content_hash": "1809d2ffe19198d4cea11d94d99f3d4c",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 77,
"avg_line_length": 32.0041067761807,
"alnum_prop": 0.619273707173104,
"repo_name": "JioCloud/heat",
"id": "b81f30d93bea5213eec538c106e122bb8435a9b0",
"size": "16205",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heat/tests/test_nested_stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2807748"
},
{
"name": "Shell",
"bytes": "21618"
}
],
"symlink_target": ""
}
|
from exceptions import NotImplementedError, ValueError
from numpy import add, multiply, divide
from numpy.linalg import norm
from AbstractFunction import AbstractFunction
class AbstractFeasibleDirectionsDescender:
"""
Abstract class for feasible direction descend optimization method.
"""
def __init__(self, function = None):
"""
Initialize descender with differentialbe function R^n -> R
and constraints.
"""
if not isinstance(function, AbstractFunction):
raise ValueError("Parameter {param} should be instance of {classname}," +
" but it has type {actual_type}".format(
param="function", classname="AbstractFunction",
actual_type=type(function)))
self.function = function
def _normalize(self, vector):
"""
Normalize vector: divide it by its norm.
"""
return divide(vector, norm(vector))
def _point_fits_constraints(self, point):
"""
Check wether point fits set constraints or not.
"""
return self.constraints.point_fits_constraints(point)
def get_start_point(self):
"""
Get start point for descend algorithm.
"""
raise NotImplementedError
def get_descent_direction(self, current_x):
"""
Determine feasible descent search direction from the point current_x.
Direction is feasible if there is a scalar step > 0, such that
f (x + t * direction ) fits constraints for all 0 < t <= step.
"""
raise NotImplementedError
def get_step_length(self, x, direction):
"""
Determine step length for the next step of descend algorithm such that
f(x + step * direction) < f(x) and
self._fits_constraints(x + step * direction) is True
"""
raise NotImplementedError
def termination_criterion(self, x):
"""
Check wether algorithm is needed to be terminated.
"""
raise NotImplementedError
def find_infimum(self):
"""
Find infimum.
Algorithm of feasible directions descend.
"""
# Step 0 (Initialization)
x = [self.get_start_point()]
k = 0
current_x = x[0]
# Step 1 (Termination check)
while not self.termination_criterion(x):
# Step 2 (Direction determination)
direction = self.get_descent_direction(current_x)
# Step 3 (Step length determination)
step_length = self.get_step_length(current_x, direction)
# Step 4 (Update)
current_x = add(x[-1], multiply(direction, step_length)).tolist()
x.append(current_x)
k += 1
# Output
return x
|
{
"content_hash": "7757a1334bd6d230b956bf108e198f81",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 30.11764705882353,
"alnum_prop": 0.653515625,
"repo_name": "char-lie/feasible_directions",
"id": "07223b5f0d45bb603d6411ff17caa7591929a2b9",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/AbstractFeasibleDirectionsDescender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13477"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
}
|
import sys
import os
from win32com.shell import shellcon, shell
# Should be programdata files folder
ROOT_FOLDER = os.path.join(
shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, None, 0), "ope")
TMP_FOLDER = os.path.join(ROOT_FOLDER, "tmp")
LOG_FOLDER = os.path.join(TMP_FOLDER, "log")
SCREEN_SHOTS_FOLDER = os.path.join(TMP_FOLDER, "screen_shots")
GIT_FOLDER = os.path.join(ROOT_FOLDER, "ope_laptop_binaries")
BINARIES_FOLDER = os.path.join(ROOT_FOLDER, "Services")
STUDENT_DATA_FOLDER = os.path.join(ROOT_FOLDER, "student_data")
LOCK_SCREEN_WIDGET_FOLDER = os.path.join(TMP_FOLDER, "lock_screen_widget")
# The base function called
CMD_FUNCTION = ""
global APP_FOLDER
APP_FOLDER = None
def get_app_folder():
global APP_FOLDER
ret = ""
# Adjusted to save APP_FOLDER - issue #6 - app_folder not returning the same folder later in the app?
if APP_FOLDER is None:
# return the folder this app is running in.
# Logger.info("Application: get_app_folder called...")
if getattr(sys, 'frozen', False):
# Running in pyinstaller bundle
ret = sys._MEIPASS
# Logger.info("Application: sys._MEIPASS " + sys._MEIPASS)
# Adjust to use sys.executable to deal with issue #6 - path different if cwd done
# ret = os.path.dirname(sys.executable)
# Logger.info("AppPath: sys.executable " + ret)
else:
ret = os.path.dirname(os.path.abspath(__file__))
# Logger.info("AppPath: __file__ " + ret)
APP_FOLDER = ret
# Add this folder to the os path so that resources can be found more reliably
#text_dir = os.path.join(APP_FOLDER, "kivy\\core\\text")
#os.environ["PATH"] = os.environ["PATH"] + ";" + ret + ";" + text_dir
#print("-- ADJUSTING SYS PATH -- " + os.environ["PATH"])
else:
ret = APP_FOLDER
return ret
get_app_folder()
def get_dict_value(source_dict, key_name, default=""):
ret = default
if key_name in source_dict:
ret = source_dict[key_name]
return ret
def pop_force_flag(only_for=None):
# See if the -f is present in the params and remove it if it is.
ret = False
global CMD_FUNCTION
# only_for - just return param if it is the root call, otherwise False
if not only_for is None:
if CMD_FUNCTION != only_for:
return ret
for i in range(len(sys.argv)):
p = sys.argv[i]
if p.lower() == "-f":
#print("Found force flag!")
ret = True
sys.argv.remove(p)
break
return ret
def get_param(param_index=1, default_value="", only_for=None):
# Get the requested parameter or default value if non existent
ret = default_value
global CMD_FUNCTION
# only_for - just return param if it is the root call
if not only_for is None:
if CMD_FUNCTION != only_for:
return ret
if len(sys.argv) >=param_index + 1:
ret = sys.argv[param_index]
return ret
def test_params():
force_on = pop_force_flag()
print("Force: " + str(force_on))
print("Params:")
for i in range(len(sys.argv)):
print("Param " + str(i) + "=>" + str(sys.argv[i]))
print(len(sys.argv))
return
if __name__ == "__main__":
# Test the params
test_params()
|
{
"content_hash": "748d79210d89213dd83d1f483c27ce26",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 105,
"avg_line_length": 31.5188679245283,
"alnum_prop": 0.6114935648009578,
"repo_name": "operepo/ope",
"id": "4502d89c240d4e410ad0c98985f0d8e184fe62b4",
"size": "3341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client_tools/svc/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AL",
"bytes": "40379"
},
{
"name": "Awk",
"bytes": "22377"
},
{
"name": "Batchfile",
"bytes": "81725"
},
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "200907"
},
{
"name": "CMake",
"bytes": "8149"
},
{
"name": "CSS",
"bytes": "103747"
},
{
"name": "Dockerfile",
"bytes": "47152"
},
{
"name": "Emacs Lisp",
"bytes": "90665"
},
{
"name": "HTML",
"bytes": "37373861"
},
{
"name": "Java",
"bytes": "916104"
},
{
"name": "JavaScript",
"bytes": "9115492"
},
{
"name": "Makefile",
"bytes": "7428"
},
{
"name": "NewLisp",
"bytes": "111955"
},
{
"name": "PHP",
"bytes": "5053"
},
{
"name": "Perl",
"bytes": "45839826"
},
{
"name": "PostScript",
"bytes": "192210"
},
{
"name": "PowerShell",
"bytes": "2870"
},
{
"name": "Procfile",
"bytes": "114"
},
{
"name": "Prolog",
"bytes": "248055"
},
{
"name": "Python",
"bytes": "9037346"
},
{
"name": "QML",
"bytes": "125647"
},
{
"name": "QMake",
"bytes": "7566"
},
{
"name": "Raku",
"bytes": "7174577"
},
{
"name": "Roff",
"bytes": "25148"
},
{
"name": "Ruby",
"bytes": "162111"
},
{
"name": "Shell",
"bytes": "2574077"
},
{
"name": "Smalltalk",
"bytes": "77031"
},
{
"name": "SystemVerilog",
"bytes": "83394"
},
{
"name": "Tcl",
"bytes": "7061959"
},
{
"name": "Vim script",
"bytes": "27705984"
},
{
"name": "kvlang",
"bytes": "60630"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0007_auto_20180624_1435'),
]
operations = [
migrations.AddField(
model_name='user',
name='reviewer',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
|
{
"content_hash": "4e02d5bc817410376dd08fc9ddfb7f21",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 22.5625,
"alnum_prop": 0.5817174515235457,
"repo_name": "pytexas/PyTexasBackend",
"id": "f221016099dc6fa7dea6f3dcd6359e75e47ba0a0",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conference/profiles/migrations/0008_user_reviewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1680"
},
{
"name": "HTML",
"bytes": "4339"
},
{
"name": "JavaScript",
"bytes": "12017"
},
{
"name": "Python",
"bytes": "16982"
},
{
"name": "Shell",
"bytes": "273"
},
{
"name": "Vue",
"bytes": "10974"
}
],
"symlink_target": ""
}
|
from redis_store import redis_db
import tornadoredis
redis_pub = tornadoredis.Client()
redis_pub.connect()
class GameChannel(object):
@classmethod
def gen_key(cls):
counter = redis_db.incr('channel:count', 1)
counter = int(counter) + 1
channel_key = 'channel:' + str(counter)
return channel_key
@classmethod
def new_channel(cls, *args):
# TODO: Verify that player is not already in the list
# If exists, reject the call
channel_key = cls.gen_key()
for arg in args:
# subscribe to the channel
cls.subscribe(channel_key, arg)
return channel_key
@classmethod
def send_message(cls, channel_key, data, sender=None):
redis_db.publish(channel_key, data)
# for subscriber in redis_db.smembers(channel_key):
# if sender is None or subscriber != sender:
# ClientManager.send_message(subscriber, data)
@classmethod
def subscribe(cls, channel_key, subscriber_key):
redis_db.sadd(channel_key, subscriber_key)
@classmethod
def unsubscribe(cls, channel_key, subscriber_key):
redis_db.srem(channel_key, subscriber_key)
@classmethod
def close_channel(cls, channel_key):
redis_db.delete(channel_key)
|
{
"content_hash": "b9ab5aee87f1854ac212feccd6b53632",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 62,
"avg_line_length": 29.613636363636363,
"alnum_prop": 0.6362240982348427,
"repo_name": "sampathweb/game-server",
"id": "007340b78681d8ab9522701c43a432deff8d3a8b",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ref/channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57"
},
{
"name": "Python",
"bytes": "38149"
}
],
"symlink_target": ""
}
|
from .kao_module import KaoModule
from .namespaced_class import NamespacedClass
|
{
"content_hash": "8698c09a7f5b93fcc2cd7d63372708f3",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 40,
"alnum_prop": 0.85,
"repo_name": "cloew/KaoModules",
"id": "cfbdce08076682a518696e4af189caa172ab5c44",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kao_modules/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2238"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import os
from getopt import getopt
try:
import czipfile as zipfile
except ImportError:
import zipfile
import reststore
from reststore import config
def command_web():
from reststore import webapp
webapp.run()
return 0
def command_get(FilesClass, hexdigest):
fs = FilesClass()
try:
print(fs[hexdigest])
except KeyError:
print("Could not find a file for %s..." % hexdigest, file=sys.stderr)
return -1
return 0
def command_read(FilesClass, hexdigest, outfile=sys.stdout):
fs = FilesClass()
try:
with open(fs[hexdigest], 'rb') as f:
outfile.write(f.read())
except KeyError:
print("Could not find a file for %s..." % hexdigest, file=sys.stderr)
return -1
return 0
def command_put(FilesClass, filepaths):
for filepath in filepaths:
try:
with open(filepath, 'rb') as f:
data = f.read()
except Exception as exc:
print("Failed to read file %s - %s" % (filepath, exc),
file=sys.stderr)
return -1
fs = FilesClass()
hexdigest = fs.put(data)
print("%s: %s" % (hexdigest, filepath))
return 0
def command_unzip(FilesClass, filepath, password=None, flush_every=1000):
"""Add files from the zip file at filepath"""
if not zipfile.is_zipfile(filepath):
raise TypeError("Not a zipfile %s" % filepath)
fs = FilesClass()
zf = zipfile.ZipFile(filepath)
if password is not None:
zf.setpassword(password)
datalen = 0
for i, name in enumerate(zf.namelist()):
data = zf.read(name, pwd=password)
datalen += len(data)
hexdigest = fs.bulk_put(data)
print("%s: %s" % (hexdigest, name))
if i % flush_every == 0:
print("flush %s bytes of data..." % datalen)
txdatalen = fs.bulk_flush()
print("sent %s bytes of compressed data" % txdatalen)
print("flush ...")
fs.bulk_flush()
def command_list(FilesClass, select_from=0, select_to=-1):
fs = FilesClass()
for hexdigest in fs.select(select_from, select_to):
print(hexdigest)
return 0
def command_len(FilesClass):
fs = FilesClass()
print(len(fs))
return 0
defaults = {}
for interface, kwargs in config.values.items():
c = {"%s_%s" % (interface, key) : value for key, value in kwargs.items()}
defaults.update(c)
__help__ = """
NAME reststore - control over the reststore
SYNOPSIS
reststore [COMMAND]
Commands:
get [FILE-OPTIONS] [HEXDIGEST]
Return a filepath to the data behind hexdigest.
arguments
HEXDIGEST of the data to lookup in reststore.
read [FILE-OPTIONS] [HEXDIGEST] > stdout
Attempt to retrieve a file and write it out to stdout. A check is
made in the local reststore first, if the file is in available, an
attempt to read the file from the web reststore is made.
arguments
HEXDIGEST of the data to lookup in reststore.
put [FILE-OPTIONS] FILEPATH(s)
Put a file into the reststore.
arguments
Path(s) of files to be loaded into the reststore.
unzip [OPTIONS FILE-OPTIONS] ZIPFILE
Extra files from a zipfile straight into the reststore.
arguments
A path to the zip file to extract into the reststore.
options
--password=
Define a password for unzipping the zip file.
--flush=1000
Number of files to read into memory before flushing through
to the reststore.
list [OPTIONS FILE-OPTIONS]
list out hexdigests found in the reststore.
options
--select=[A:B]
List all of the hashes between A:B. Hashes are stored
chronologically. 0 is the first file inserted, -1 is the last
file inserted. i.e. select the last 1000 hexdigests -1001:-1
len [FILE-OPTIONS]
print out the number of files stored in the reststore.
web [OPTIONS FILE-OPTIONS] [[HOST:][PORT]]
Run the RESTful web app.
arguments
HOST:PORT defaults to %(webapp_host)s:%(webapp_port)s
options
--server=%(webapp_server)s
Choose the server adapter to use.
--debug=%(webapp_debug)s
Run in debug mode.
--quiet=%(webapp_quiet)s
Run in quite mode.
--proxy_requests=%(webapp_proxy_requests)s
If True, this web app will proxy requests through to
the authoritative server defined by the client uri.
File options:
--name=%(files_name)s
Set the default reststore name (i.e. domain or realm)
--hash_function=%(files_hash_function)s
Set the hash function to be used
--tune_size=%(files_tune_size)s
Set the approximate size the reststore may grow up to.
--root=%(files_root)s
Set the root for the reststore.
--assert_data_ok=%(files_assert_data_ok)s
Do extra checks when reading and writing data.
--weboff
This flag forces access to a local repository only.
--uri=%(client_uri)s
The uri to the upstream reststore web server.
""" % defaults
def main(args):
if not args:
print("No arguments provided" , file=sys.stderr)
return -1
if '-h' in args or '--help' in args:
print(__help__)
return 0
command = args.pop(0)
try:
opts, args = getopt(args, '', [
'server=', 'debug=', 'quiet=', 'proxy_requests=',
'name=', 'hash_function=', 'tune_size=', 'root=', 'assert_data_ok=',
'uri=',
'password=', 'flush=',
'select=',
'weboff',
])
except Exception as exc:
print("Getopt error: %s" % (exc), file=sys.stderr)
return -1
webapp_config = config.values['webapp']
files_config = config.values['files']
client_config = config.values['client']
list_command = dict()
unzip_command = dict()
FilesClass = reststore.FilesClient
for opt, arg in opts:
if opt in ['--server']:
webapp_config['server'] = arg
elif opt in ['--quiet']:
webapp_config['quite'] = arg.lower() != 'false'
elif opt in ['--debug']:
webapp_config['debug'] = arg.lower() != 'false'
elif opt in ['--proxy_requests']:
webapp_config['proxy_requests'] = arg.lower() != 'false'
elif opt in ['--name']:
files_config['name'] = arg
elif opt in ['--hash_function']:
files_config['hash_function'] = arg
elif opt in ['--tune_size']:
try:
files_config['tune_size'] = int(arg)
except ValueError:
print("%s is not a valid int" % arg, file=sys.stderr)
return -1
elif opt in ['--root']:
files_config['root'] = arg
elif opt in ['--assert_data_ok']:
files_config['assert_data_ok'] = arg.lower() != 'false'
elif opt in ['--password']:
unzip_command['password'] = arg
elif opt in ['--flush']:
try:
unzip_command['flush_every'] = int(arg)
except ValueError as err:
print("Failed to convert int for %s" % arg, file=sys.stderr)
return -1
elif opt in ['--select']:
try:
a, b = arg.split(':')
if not a:
a = 0
if not b:
b = -1
except Exception as err:
print("Failed to split select range %s" % arg, file=sys.stderr)
return -1
try:
list_command = dict(select_from = int(a),
select_to = int(b))
except ValueError as err:
print("Failed to convert int for %s" % arg, file=sys.stderr)
return -1
elif opt in ['--uri']:
client_config['uri'] = arg
elif opt in ['--weboff']:
FilesClass = reststore.Files
if command == 'web':
if args:
hostport = args[0]
host = webapp_config['host']
port = webapp_config['port']
if ':' in hostport:
host, p = hostport.split(':')
# may not have a port value
if p:
port = p
else:
port = hostport
try:
port = int(port)
except ValueError:
print("failed to convert port to int (%s)" % port)
return -1
webapp_config['host'] = host
webapp_config['port'] = port
return command_web()
elif command == 'get':
hexdigest = args[0]
return command_get(FilesClass, hexdigest)
elif command == 'read':
hexdigest = args[0]
return command_read(FilesClass, hexdigest)
elif command == 'put':
filepaths = args
return command_put(FilesClass, filepaths)
elif command == 'unzip':
filepath = args[0]
return command_unzip(FilesClass, filepath, **unzip_command)
elif command == 'list':
return command_list(FilesClass, **list_command)
elif command == 'len':
return command_len(FilesClass)
else:
print("%s is not a valid command " % command, file=sys.stderr)
return -1
entry = lambda :main(sys.argv[1:])
if __name__ == "__main__":
sys.exit(entry())
|
{
"content_hash": "72b4b1e0436c785cd54244c5b0c73419",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 80,
"avg_line_length": 31.12063492063492,
"alnum_prop": 0.5459553198000612,
"repo_name": "provoke-vagueness/reststore",
"id": "e8e242392d10e9d6126e26b1ea80aa9ee510e987",
"size": "9803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reststore/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37779"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals, division
import pygame, pygame.gfxdraw, padlib, copy, programs, demomap as Map, player as Player, misc, config
from pygame.locals import *
class main(object):
def __init__(self):
pygame.init()
pygame.display.gl_set_attribute(GL_MULTISAMPLEBUFFERS,1)
pygame.display.gl_set_attribute(GL_MULTISAMPLESAMPLES,4)
pygame.display.set_caption('Haxxit! The h4x0r game!')
config.display = pygame.display.set_mode((640, 480))
config.display.fill(pygame.color.THECOLORS['black'])
config.map = Map.map()
config.player = Player.Programs()
config.player.gen()
config.game = misc.game()
config.draw = misc.draw()
while not config.game.state.Quit in config.game.states:
##### Draw the game #####
config.display.fill(pygame.color.THECOLORS['black'])
# This generates the squares of the board or takes the squares from an array (if already generated)
config.map.genBoard()
# This executes before the level has started, ie when selecting what programs you're going to use
if config.game.state.Map.turn.Init in config.game.states:
config.map.genSpawns()
else: # This executes after the player has started the level, ie when actually engaging in battle
config.map.genGame()
##### Generate the sidebar #####
config.draw.sidebar()
##### Draw the attack, if attacking #####
config.draw.attack()
##### End drawing, update screen #####
pygame.display.update()
##### Run Events #####
config.player.events()
if __name__=='__main__':
main()
|
{
"content_hash": "e972b71ccdc3df423cb5584bb93c6bdc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 111,
"avg_line_length": 44.875,
"alnum_prop": 0.6050139275766017,
"repo_name": "smartboyathome/Haxxit-Old",
"id": "344a94a3d119eff45c92b24bcd00cd28e05f6809",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57387"
}
],
"symlink_target": ""
}
|
COMMAND_STATEMENT = """
"""
TEMPLATE = """
{{ Paste to Kontakt Script Editor }}
on init
{{ primitive variables }}
declare $i
declare %ia[ 2 ]
declare ~r
declare ?ra[ 2 ]
declare @s
declare !sa[ 2 ]
{{ ui variable }}
declare ui_button $button
declare ui_knob $knob( 0, 1000, 1 )
declare ui_file_selector $selector
declare ui_label $label(1,1)
declare ui_level_meter $level_meter
declare ui_menu $menu
declare ui_slider $slider (0,100)
declare ui_switch $switch
declare ui_table %table[10](2,2,100)
declare ui_text_edit @text_edit
declare ui_value_edit $value_edit(0,100,$VALUE_EDIT_MODE_NOTE_NAMES)
declare ui_waveform $waveform(6,6)
declare ui_xy ?xy[4]
{{
ui variable names
$button
$knob
$selector
$label
$level_meter
$menu
$slider
$switch
%table[10]
@text_edit
$value_edit
$waveform
?xy[4]
}}
{testcode}
end on
on async_complete
{testcode}
end on
on controller
{testcode}
end on
on listener
{testcode}
end on
on note
{testcode}
end on
on persistence_changed
{testcode}
end on
on pgs_changed
{testcode}
end on
on poly_at
{testcode}
end on
on release
{testcode}
end on
on rpn
{testcode}
end on
on nrpn
{testcode}
end on
on ui_control( $knob )
{testcode}
end on
on ui_update
{testcode}
end on
"""
print( TEMPLATE.format(
testcode = COMMAND_STATEMENT
))
|
{
"content_hash": "58e202c564c39e6549494214ea45e48a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 68,
"avg_line_length": 11.654545454545454,
"alnum_prop": 0.7082683307332294,
"repo_name": "r-koubou/KSPSyntaxParser",
"id": "39e77059445ddc4e3d57fb896c145b51c6bda3e9",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/tools/GenCommandTestCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "533945"
},
{
"name": "Python",
"bytes": "1763"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
# get_ipython().magic('matplotlib inline')
# plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
# plt.rcParams['image.interpolation'] = 'nearest'
# plt.rcParams['image.cmap'] = 'gray'
#
# # for auto-reloading external modules
# # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# get_ipython().magic('load_ext autoreload')
# get_ipython().magic('autoreload 2')
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# In[4]:
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
# ## Batch normalization: Forward
# In the file `cs231n/layers.py`, implement the batch normalization forward pass in the function `batchnorm_forward`. Once you have done so, run the following to test your implementation.
# In[11]:
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
# np.random.seed(231)
# N, D1, D2, D3 = 200, 50, 60, 3
# X = np.random.randn(N, D1)
# W1 = np.random.randn(D1, D2)
# W2 = np.random.randn(D2, D3)
# a = np.maximum(0, X.dot(W1)).dot(W2)
#
# print('Before batch normalization:')
# print(' means: ', a.mean(axis=0))
# print(' stds: ', a.std(axis=0))
#
# # Means should be close to zero and stds close to one
# print('After batch normalization (gamma=1, beta=0)')
# a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
# print(' mean: ', a_norm.mean(axis=0))
# print(' std: ', a_norm.std(axis=0))
#
# # Now means should be close to beta and stds close to gamma
# gamma = np.asarray([1.0, 2.0, 3.0])
# beta = np.asarray([11.0, 12.0, 13.0])
# a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
# print('After batch normalization (nontrivial gamma, beta)')
# print(' means: ', a_norm.mean(axis=0))
# print(' stds: ', a_norm.std(axis=0))
#
#
# # In[12]:
#
# # Check the test-time forward pass by running the training-time
# # forward pass many times to warm up the running averages, and then
# # checking the means and variances of activations after a test-time
# # forward pass.
# np.random.seed(231)
# N, D1, D2, D3 = 200, 50, 60, 3
# W1 = np.random.randn(D1, D2)
# W2 = np.random.randn(D2, D3)
#
# bn_param = {'mode': 'train'}
# gamma = np.ones(D3)
# beta = np.zeros(D3)
# for t in range(50):
# X = np.random.randn(N, D1)
# a = np.maximum(0, X.dot(W1)).dot(W2)
# batchnorm_forward(a, gamma, beta, bn_param)
# bn_param['mode'] = 'test'
# X = np.random.randn(N, D1)
# a = np.maximum(0, X.dot(W1)).dot(W2)
# a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
#
# # Means should be close to zero and stds close to one, but will be
# # noisier than training-time forward passes.
# print('After batch normalization (test-time):')
# print(' means: ', a_norm.mean(axis=0))
# print(' stds: ', a_norm.std(axis=0))
#
#
# # ## Batch Normalization: backward
# # Now implement the backward pass for batch normalization in the function `batchnorm_backward`.
# #
# # To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.
# #
# # Once you have finished, run the following to numerically check your backward pass.
#
# # In[29]:
#
# # Gradient check batchnorm backward pass
# np.random.seed(231)
# N, D = 4, 5
# x = 5 * np.random.randn(N, D) + 12
# gamma = np.random.randn(D)
# beta = np.random.randn(D)
# dout = np.random.randn(N, D)
#
# bn_param = {'mode': 'train'}
# fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
# fg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]
# fb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]
#
# dx_num = eval_numerical_gradient_array(fx, x, dout)
# da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)
# db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)
#
# _, cache = batchnorm_forward(x, gamma, beta, bn_param)
# dx, dgamma, dbeta = batchnorm_backward(dout, cache)
#
# print('dx error: ', rel_error(dx_num, dx))
# print('dgamma error: ', rel_error(da_num, dgamma))
# print('dbeta error: ', rel_error(db_num, dbeta))
#
#
# # ## Batch Normalization: alternative backward (OPTIONAL, +3 points extra credit)
# # In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.
# #
# # Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function `batchnorm_backward_alt` and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.
# #
# # NOTE: This part of the assignment is entirely optional, but we will reward 3 points of extra credit if you can complete it.
#
# # In[ ]:
#
# np.random.seed(231)
# N, D = 100, 500
# x = 5 * np.random.randn(N, D) + 12
# gamma = np.random.randn(D)
# beta = np.random.randn(D)
# dout = np.random.randn(N, D)
#
# bn_param = {'mode': 'train'}
# out, cache = batchnorm_forward(x, gamma, beta, bn_param)
#
# t1 = time.time()
# dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
# t2 = time.time()
# dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
# t3 = time.time()
#
# print('dx difference: ', rel_error(dx1, dx2))
# print('dgamma difference: ', rel_error(dgamma1, dgamma2))
# print('dbeta difference: ', rel_error(dbeta1, dbeta2))
# print('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))
# ## Fully Connected Nets with Batch Normalization
# Now that you have a working implementation for batch normalization, go back to your `FullyConnectedNet` in the file `cs2312n/classifiers/fc_net.py`. Modify your implementation to add batch normalization.
#
# Concretely, when the flag `use_batchnorm` is `True` in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
#
# HINT: You might find it useful to define an additional helper layer similar to those in the file `cs231n/layer_utils.py`. If you decide to do so, do it in the file `cs231n/classifiers/fc_net.py`.
# In[33]:
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0: print()
# # Batchnorm for deep networks
# Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
# In[30]:
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
# Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
# In[ ]:
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# # Batch normalization and initialization
# We will now run a small experiment to study the interaction of batch normalization and weight initialization.
#
# The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
# In[ ]:
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# In[ ]:
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gca().set_ylim(1.0, 3.5)
plt.gcf().set_size_inches(10, 15)
plt.show()
# # Question:
# Describe the results of this experiment, and try to give a reason why the experiment gave the results that it did.
# # Answer:
#
|
{
"content_hash": "77cc9a54939f7d1ed20f03cfc089e36e",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 471,
"avg_line_length": 37.247956403269754,
"alnum_prop": 0.6788588149231894,
"repo_name": "DavidQiuChao/CS231nHomeWorks",
"id": "a21802818e27bc06874afc790de4e71827b02e5f",
"size": "16020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment2/BatchNormalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2182712"
},
{
"name": "Python",
"bytes": "235723"
},
{
"name": "Shell",
"bytes": "3033"
}
],
"symlink_target": ""
}
|
from django import forms
from compactapp.models import Area
import xlrd
class SimpleAnalysis(forms.Form):
""""Provides the user with the Simple Analysis form."""
areas = Area.objects.values('name')
area_name = list(entry['name'] for entry in areas)
#Gets all the US States from the database and puts them in a list.
area_name.insert(0, 'US')
#Appends the value "US" as this is not in the database, but is a valid input.
area_number = list(range(0, len(area_name)))
area_list = zip(area_number, area_name)
#Zips all state names into a list of tuples, which is required to populate the dropdown menu.
industry_file_location = "FinalNAICStoIOMatch.xlsx"
workbook = xlrd.open_workbook(industry_file_location)
sheet = workbook.sheet_by_name('NAICS to Sector')
sector_ID = list(item for item in range(0,66))
sector_description = list(item for item in sheet.col_values(1))
sector_NAICS = list(item for item in sheet.col_values(3))
#Gets NAICS and industry names from the excel sheet provided by Eric.
industries = ["{} | NAICS Code {}".format(desc, int(NAICS)) for desc, NAICS in zip(sector_description, sector_NAICS)]
industries = zip(sector_ID, industries)
#Formats the menu item values for Industries and zips them into a list of tuples, which is required to populate the dropdown menu.
year_choices = [2015]
year_id = list(range(0, len(year_choices)))
# This is so unpythonic, but is more scalable for when we add more years of data. Again, required to populate the dropdown menu.
industry = forms.ChoiceField(choices=industries)
area = forms.ChoiceField(choices=area_list)
year = forms.ChoiceField(choices=zip(year_id, year_choices))
class AdvancedAnalysis(forms.Form):
areas = Area.objects.values('name')
area_name = list(entry['name'] for entry in areas)
area_name.insert(0, 'US')
area_number = list(range(0, len(area_name)))
area_list = zip(area_number, area_name)
# Extremely unpythonic repetition of above code, however, Flask will not render the state list if I attempt to reuse the same code, because reasons
year_choices = [2015]
year_id = list(range(0, len(year_choices)))
naics = forms.CharField(min_length=3, max_length=6, initial='111')
area = forms.ChoiceField(choices=area_list)
year = forms.ChoiceField(choices=zip(year_id, year_choices))
revenue = forms.IntegerField(min_value=0, max_value=999999999999, initial=0)
employees = forms.IntegerField(min_value=0, max_value=999999, initial=0)
wages_annual = forms.IntegerField(min_value=0, max_value=999999999999, initial=0)
emp_based_lc = forms.BooleanField(required=False)
wage_based_lc = forms.BooleanField(required=False)
|
{
"content_hash": "94b45418935165a2c7ecc72b312cca22",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 151,
"avg_line_length": 48.421052631578945,
"alnum_prop": 0.7134057971014492,
"repo_name": "justbreakingstuff/compact",
"id": "5e45424982c8be833e604f5f6bf0060a1fc14b2a",
"size": "2760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "compact/compactapp/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3593"
},
{
"name": "HTML",
"bytes": "20393"
},
{
"name": "Python",
"bytes": "124324"
}
],
"symlink_target": ""
}
|
import itertools
import sys
import types
import warnings
import weakref
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.units import Quantity, QuantityInfo
from astropy.utils import ShapedLikeNDArray, isiterable
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaAttribute, MetaData
from . import conf, groups
from .column import (
BaseColumn,
Column,
FalseArray,
MaskedColumn,
_auto_names,
_convert_sequence_data_to_array,
col_copy,
)
from .connect import TableRead, TableWrite
from .index import (
Index,
SlicedIndex,
TableILoc,
TableIndices,
TableLoc,
TableLocIndices,
_IndexModeContext,
get_index,
)
from .info import TableInfo
from .mixins.registry import get_mixin_handler
from .ndarray_mixin import NdarrayMixin # noqa: F401
from .pprint import TableFormatter
from .row import Row
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = [
"Table.read",
"Table.write",
"Table._read",
"Table.convert_bytestring_to_unicode",
"Table.convert_unicode_to_bytestring",
]
__doctest_requires__ = {"*pandas": ["pandas>=1.1"]}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = "O" if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, "shape") else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, "info", None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
"Illegal key or index value for {} object".format(
self.__class__.__name__
)
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
"Cannot replace column '{}'. Use Table.replace_column() instead.".format(
item
)
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist"""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list"""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder("=")
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"):
data[col.info.name].mask = col.mask
return data
def __init__(
self,
data=None,
masked=False,
names=None,
dtype=None,
meta=None,
copy=True,
rows=None,
copy_indices=True,
units=None,
descriptions=None,
**kwargs,
):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError("Cannot specify dtype when copy=False")
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError("Cannot supply both `data` and `rows` values")
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, "__astropy_table__"):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError(
"__init__() got unexpected keyword argument {!r}".format(
list(kwargs.keys())[0]
)
)
if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names:
data = None
if isinstance(data, self.Row):
data = data._table[data._index : data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (
names_from_list_of_dict or _get_names_from_list_of_dict(data)
)
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError("Can not initialize a Table with a scalar")
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError(
"dtype was specified but could not be "
"parsed for column names"
)
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f"Data type {type(data)} not allowed to init Table")
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute("unit", units)
self._set_column_attribute("description", descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(
f"sequence of {attr} values must match number of columns"
)
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(
f"invalid column name {name} for setting {attr} attribute"
)
# Special case: ignore unit if it is an empty or blank string
if attr == "unit" and isinstance(value, str):
if value.strip() == "":
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict(
(key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items()
)
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table(
[
getattr(col, "mask", FalseArray(col.shape))
for col in self.itercols()
],
names=self.colnames,
copy=False,
)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [
col.filled(fill_value) if hasattr(col, "filled") else col
for col in self.itercols()
]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
"""
Return the indices associated with columns of the table
as a TableIndices object.
"""
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
"""
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
"""
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
"""
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
"""
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
'Cannot create an index on column "{}", of type "{}"'.format(
col.info.name, type(col)
)
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
"""
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
"""
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
"""
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
if np.dtype(dtype) != object:
raise ValueError("Datatype coercion is not allowed")
out = np.array(None, dtype=object)
out[()] = self
return out
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, "dtype"), (names, "names")):
if not isiterable(inp_list):
raise ValueError(f"{inp_str} must be a list or None")
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(
self, data, copy=True, default_name=None, dtype=None, name=None
):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (
original_data.__class__.__module__
+ "."
+ original_data.__class__.__name__
)
raise TypeError(
"Mixin handler for object of type "
f"{fully_qualified_name} "
"did not return a valid mixin column"
)
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif "info" in getattr(data, "__dict__", ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute. If not copying, take a slice
# to ensure we get a new instance and we do not share metadata
# like info.
col = col_copy(data, copy_indices=self._init_indices) if copy else data[:]
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, "dtype"):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = (
masked_col_cls
if isinstance(data, np.ma.MaskedArray)
else self.ColumnClass
)
else:
col_cls = self.ColumnClass
try:
col = col_cls(
name=name,
data=data,
dtype=dtype,
copy=copy,
copy_indices=self._init_indices,
)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError("unable to convert data to Column for Table")
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = (
[data[name] for name in data_names]
if struct
else [data[:, i] for i in range(n_cols)]
)
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f"Inconsistent data column lengths: {lengths}")
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(
table, newcols, verify=False, names=self.columns.keys()
)
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError("Cannot have None for column name")
if len(set(names)) != len(names):
raise ValueError("Duplicate column names")
table.columns = table.TableColumns(
(name, col) for name, col in zip(names, cols)
)
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, "mask"):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(
self,
html=False,
descr_vals=None,
max_width=None,
tableid=None,
show_dtype=True,
max_lines=None,
tableclass=None,
):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(self)}")
descr = " ".join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f"<i>{xml_escape(descr)}</i>\n"
else:
descr = f"<{descr}>\n"
if tableid is None:
tableid = f"table{id(self)}"
data_lines, outs = self.formatter._pformat_table(
self,
tableid=tableid,
html=html,
max_width=max_width,
show_name=True,
show_unit=None,
show_dtype=show_dtype,
max_lines=max_lines,
tableclass=tableclass,
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(
html=True, max_width=-1, tableclass=conf.default_notebook_table_class
)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f"<div>{out}</div>"
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return "\n".join(self.pformat())
def __bytes__(self):
return str(self).encode("utf-8")
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, "mask") and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(
max_lines, max_width, show_name, show_unit, show_dtype, align
)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()), copy=False)
else:
return self
def show_in_notebook(
self,
tableid=None,
css=None,
display_length=50,
table_class="astropy-default",
show_row_index="idx",
):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from IPython.display import HTML
from .jsviewer import JSViewer
if tableid is None:
tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}"
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == "astropy-default":
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(
html=True,
max_width=-1,
tableid=tableid,
max_lines=-1,
show_dtype=False,
tableclass=table_class,
)
columns = display_table.columns.values()
sortable_columns = [
i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc"
]
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(
self,
max_lines=5000,
jsviewer=False,
browser="default",
jskwargs={"use_local_files": True},
tableid=None,
table_class="display compact",
css=None,
show_row_index="idx",
):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import tempfile
import webbrowser
from urllib.parse import urljoin
from urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "table.html")
with open(path, "w") as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(
tmp,
format="jsviewer",
css=css,
max_lines=max_lines,
jskwargs=jskwargs,
table_id=tableid,
table_class=table_class,
)
else:
self.write(tmp, format="html")
try:
br = webbrowser.get(None if browser == "default" else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin("file:", pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
tableid=tableid,
tableclass=tableclass,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(
max_lines,
max_width,
show_name,
show_unit,
show_dtype,
html,
tableid,
align,
tableclass,
)
def more(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__(
[self[x] for x in item], copy_indices=self._copy_indices
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif (isinstance(item, np.ndarray) and item.size == 0) or (
isinstance(item, (tuple, list)) and not item
):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (
isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (
not getattr(self, "_setitem_inplace", False)
and not conf.replace_inplace
):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (
isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or (
isinstance(item, tuple) # output from np.where
and all(isinstance(x, np.ndarray) for x in item)
)
):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError(
"Right side value needs {} elements (one for each column)".format(
n_cols
)
)
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif isinstance(item, (list, tuple, np.ndarray)) and all(
isinstance(x, str) for x in item
):
self.remove_columns(item)
elif (
isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i"
):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError("illegal key or index value")
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception(
"Masked attribute is read-only (use t = Table(t, masked=True)"
" to convert to a masked table)"
)
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (
isinstance(names, (tuple, list))
and names
and all(isinstance(x, str) for x in names)
)
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(
self,
col,
index=None,
name=None,
rename_duplicate=False,
copy=True,
default_name=None,
):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f"col{len(self.columns)}"
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(
col, name=name, copy=copy, default_name=default_name
)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError("Empty table cannot have column set to scalar value")
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, "shape", ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape, subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape, subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError("Inconsistent data column lengths")
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + "_" + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(
self, cols, indexes=None, names=None, copy=True, rename_duplicate=False
):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError("Number of indexes must match number of cols")
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError("Number of names must match number of cols")
default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes, kind="stable")):
self.add_column(
cols[ii],
index=indexes[ii],
name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate,
copy=copy,
)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
if "refcount" in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if "always" in warns:
warnings.warn(
f"replaced column '{name}'", TableReplaceWarning, stacklevel=3
)
if "slice" in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = (
"replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if "refcount" in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = (
"replaced column '{}' and the number of references "
"to the column changed.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if "attributes" in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = "replaced column '{}' and column attributes {} changed.".format(
name, changed_attrs
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f"column name {name} is not in the table")
if self[name].info.indices:
raise ValueError("cannot replace a table index column")
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError("length of new column must match table length")
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f"{name} is not a valid column name")
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f"columns {invalid_names} do not exist")
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
"""
for name in self._set_of_names_in_colnames(names):
del self.columns[name]
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, "utf-8"))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in (
col.info.attr_names - col.info._attrs_no_copy - {"dtype"}
):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype("S", "U", np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype("U", "S", np.char.encode)
def keep_columns(self, names):
"""
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
"""
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
del self.columns[colname]
def rename_column(self, name, new_name):
"""
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
"""
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
"""
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
"""
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError(
"input 'new_names' must be a tuple or a list of column names"
)
if len(names) != len(new_names):
raise ValueError(
"input 'names' and 'new_names' list arguments must be the same length"
)
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError(
"right hand side must be a sequence of values with "
"the same length as the number of selected columns"
)
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError(
f"Index {index} is out of bounds for table with length {N}"
)
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError("keys in mask should match keys in vals")
if vals and any(name not in colnames for name in vals):
raise ValueError("Keys in vals must all be valid column names")
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, "dtype"):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError("Mismatch between number of vals and columns")
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError("Mismatch between number of masks and columns")
else:
mask = [False] * len(self.columns)
else:
raise TypeError("Vals must be an iterable or mapping or None")
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if (
mask_
and isinstance(col, Column)
and not isinstance(col, MaskedColumn)
):
col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError(
"Incorrect length for column {} after inserting {}"
" (expected {}, got {})".format(name, val, len(newcol), N + 1)
)
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, "mask"):
newcol[index] = np.ma.masked
else:
raise TypeError(
"mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name)
)
columns[name] = newcol
except Exception as err:
raise ValueError(
"Unable to insert row because of exception in column '{}':\n{}".format(
name, err
)
) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts="silent")
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
"""
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
"""
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, "_groups"):
out._groups = groups.TableGroups(
out, indices=self._groups._indices, keys=self._groups._keys
)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError("cannot compare tables with different column names")
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
eq = self[name] == other[name]
if (
warns
and issubclass(warns[-1].category, FutureWarning)
and "elementwise comparison failed" in str(warns[-1].message)
):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f"unable to compare column {name}") from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (
isinstance(eq, np.ndarray)
and eq.dtype is np.dtype("bool")
and len(eq) == len(self)
):
raise TypeError(
f"comparison for column {name} returned {eq} "
"instead of the expected boolean ndarray"
)
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError(
"index must be None, False, True or a table column name"
)
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.time import TimeBase, TimeDelta
from . import serialize
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = (
col_copy(col, copy_indices=False) if col.info.indices else col
)
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype("timedelta64[ns]")
nat = np.timedelta64("NaT")
else:
new_col = col.datetime64.copy()
nat = np.datetime64("NaT")
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
# fmt: off
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)'
)
# fmt: on
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, "isnative", True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder("=")
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ["i", "u"]:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace("i", "I").replace("u", "U")
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to"
f" {out[name].dtype}",
TableReplaceWarning,
stacklevel=3,
)
elif column.dtype.kind not in ["f", "c"]:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs["index"] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or "index"
while index_name in names:
index_name = "_" + index_name + "_"
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f"`units` contains additional columns: {not_found}")
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ["u", "i"] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(
data=data, name=name, mask=mask, unit=unit, copy=False
)
continue
if data.dtype.kind == "O":
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b""
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == "M":
from astropy.time import Time
out[name] = Time(data, format="datetime64")
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = "isot"
# Numpy timedelta64
elif data.dtype.kind == "m":
from astropy.time import TimeDelta
data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format="sec")
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, "unit", None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(
f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning,
)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
{
"content_hash": "d06bce745a2038fd37447a90fe1c1652",
"timestamp": "",
"source": "github",
"line_count": 4246,
"max_line_length": 96,
"avg_line_length": 36.0032972209138,
"alnum_prop": 0.5402106364885196,
"repo_name": "astropy/astropy",
"id": "5d39b7b01c2722548f0dbef524f8a02911807a8d",
"size": "152934",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "astropy/table/table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11039709"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "79917"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12402561"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from ginga.cairow.ImageViewCanvasTypesCairo import *
#END
|
{
"content_hash": "432794a2f57fe0d8dc420ebc61f85001",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 52,
"avg_line_length": 15,
"alnum_prop": 0.8166666666666667,
"repo_name": "Rbeaty88/ginga",
"id": "38a4963dea131d6ac0f801d438bbeb913e76aafb",
"size": "383",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ginga/gtkw/ImageViewCanvasTypesGtk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2102613"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
class QuestionViewTests(TestCase):
def test_revenue(self):
try:
from revenue.models import Revenue
except ImportError:
self.fail('No such revenue model')
|
{
"content_hash": "7b4b6bcc41e2868877d26b68747107aa",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 29,
"alnum_prop": 0.6551724137931034,
"repo_name": "bahadir/packathon2016",
"id": "5a812b261b441db50ef53c4b3b8eadf8ed7129e6",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packathon/revenue/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3402"
}
],
"symlink_target": ""
}
|
"""
r4 - a wrapper around p4 adding new, custom functionality.
Prerequisites:
* P4 python bindings. I used the version at
ftp://ftp.perforce.com/perforce/r10.1/bin.tools/p4python.tgz
(The python bindings have their own installation instructions, but
basically you need to first install the Perforce C++ API.)
"""
from __future__ import with_statement
import P4
import os
import fnmatch
import sys
import pprint
import itertools
# I usually prefer optparse, but this is a special case where we don't
# care about all the extra stuff optparse does for us since we're not
# exactly parsing arguments to an executable program, but to a command
# in an executable.
import getopt
# --------------------
# Maps command names to our custom implementations.
# --------------------
g_command_table = {}
def def_r4_command(name, commandobj):
g_command_table[name] = commandobj
def get_r4_command(name):
return g_command_table.get(name, None)
def all_r4_commands():
return sorted(g_command_table.keys())
# --------------------
# Build maps that lets us translate between paths in depot syntax,
# client syntax and local syntax, like //depot/users/wiseman ->
# /home/wiseman/work/wiseman and vice versa.
# --------------------
g_cached_depot_to_local_map = None
g_translation_map = None
def add_translation_map(fromm, to, map):
global g_translation_map
g_translation_map[fromm, to] = map
g_translation_map[to, fromm] = map.reverse()
def get_translation_map(fromm, to):
global g_translation_map
if not g_translation_map:
ensure_translation_map()
return g_translation_map[fromm, to]
# I got the inspiration for this from the "P4::Map class" section of
# <http://www.perforce.com/perforce/conferences/us/2009/Presentations/Knop-AdventuresinScriptingLand-paper.pdf>.
# Just note that he's trying to map from //depot/... to the local
# syntax of the file *on the depot server*, whereas I'm mapping
# between depot syntax, client syntax and local syntax on the *client
# machine*.
def ensure_translation_map(p4=None):
"""If we haven't done so already, builds the following maps for
translating between different syntaxes:
depot <--> client
client <--> local
depot <--> local
"""
global g_translation_map
if not g_translation_map:
g_translation_map = {}
if not p4:
p4 = get_p4_connection()
client_info = p4.run_client('-o')[0]
client_name = client_info['Client']
client_root = client_info['Root']
client_views = client_info['View']
depot_to_client_map = P4.Map()
depot_to_client_map.insert(client_views)
add_translation_map('depot', 'client', depot_to_client_map)
client_to_local_map = P4.Map()
client_to_local_map.insert('//%s/...' % (client_name,), os.path.join(client_root, '...'))
add_translation_map('client', 'local', client_to_local_map)
add_translation_map('depot', 'local', P4.Map.join(depot_to_client_map, client_to_local_map))
def translate_local_to_depot(path):
return get_translation_map('local', 'client').translate(path)
def translate_depot_to_local(path):
return get_translation_map('depot', 'local').translate(path)
# --------------------
# .r4ignore and processing ignores.
# --------------------
IGNORE_FILE = '.r4ignore'
g_home_ignore_patterns = None
def ignore_patterns_for(path):
# Cache the contents of ~/.r4ignore.
global g_home_ignore_patterns
if g_home_ignore_patterns is None:
g_home_ignore_patterns = try_load_ignore_patterns(os.path.join(os.path.expanduser('~'), IGNORE_FILE))
patterns = g_home_ignore_patterns
patterns += try_load_ignore_patterns(os.path.join(path, IGNORE_FILE))
return patterns
def try_load_ignore_patterns(path):
patterns = []
if os.path.exists(path):
try:
with open(path, 'r') as f:
patterns = f.readlines()
# Strip the trailing newline from each line.
patterns = [p[:-1] for p in patterns]
patterns = [p for p in patterns if len(p) > 0 and p[0] != '#']
except IOError:
# Ignore errors.
pass
return patterns
def is_ignored(ignore_patterns, filename):
"Checks whether a filename matches any ignore pattern."
for pattern in ignore_patterns:
if fnmatch.fnmatch(filename, pattern):
return True
return False
# --------------------
# Process commands
# --------------------
def handle_command(command, args, p4=None):
# If we have a custom implementation, use it, otherwise fall back
# to regular p4.
handler = get_r4_command(command)
if handler:
return handler.run_command(command, args, p4=p4)
else:
run_standard_p4_command(command, args)
def run_standard_p4_command(command, args):
"""Runs a standard p4 command. Uses exec, so this will be the
last function you ever call. Used to transfer control to stock p4
for non-custom commands.
"""
if command:
args = [command] + args
os.execvp('p4', ['p4'] + args)
class MissingOrWrongArguments(Exception):
pass
# --------------------
# Custom commands
# --------------------
# Base class for all custom commands.
class R4Command:
def __init__(self):
pass
def short_description(self):
raise NotImplementedError
def long_description(self):
raise NotImplementedError
def usage(self):
raise NotImplementedError
def run(self, p4, command, args):
raise NotImplementedError
def run_command(self, command, args, p4=None):
try:
if not p4:
p4 = get_p4_connection()
return self.run(p4, command, args)
except MissingOrWrongArguments:
self.print_usage(stream=sys.stderr)
sys.stderr.write('Missing/wrong number of arguments.\n')
except getopt.GetoptError, e:
self.print_usage(stream=sys.stderr)
sys.stderr.write('%s\n' % (e,))
def print_usage(self, stream=sys.stdout):
# Replace '%prog' in usage text with the name of the program.
stream.write('Usage: %s\n' % (self.usage().replace('prog', sys.argv[0])))
class R4Status(R4Command):
def short_description(self):
return 'Print the status of working copy files and directories'
def usage(self):
return 'status [ --no-ignore ] [ path ... ]'
def long_description(self):
return """
status -- %s
r4 %s
Lists all locally modified files under the specified paths (if no
paths are supplied the current working directory is used).
The --no-ignore flag forces files that are ignored because they
matched a pattern in a .r4ignore file to be printed with an 'I'
prefix.
The first column in the output is one character wide, and
indicates the file's status:
'?' Item is not under version control
'A' Added
'D' Deleted
'M' Modified
'O' Opened for editing--may be unchanged, branched or integrated
'I' Ignored (only with --no-ignore)
""" % (self.short_description(), self.usage())
def run(self, p4, command, args):
no_ignores = False
optlist, args = getopt.getopt(args, '', ['no-ignore'])
for opt, value in optlist:
if opt == '--no-ignore':
no_ignores = True
# Was a path specified or should we just use the current
# directory?
if len(args) == 0:
dirs = ['.']
else:
dirs = args
for dir in dirs:
# Build a list of files that are in p4 under the directory
# we're checking.
info = p4.run('have', os.path.join(dir, '...'))
have_paths = set([i['path'] for i in info])
# Build a list of opened files that have been modified
# under the directory we're checking.
diff_info = p4.run_diff('-sa', os.path.join(dir, '...'))
# run_diff returns a list that contains dictionaries
# interspersed with strings containing diff output. The
# dicts have the filenames, so here we extract the
# filenames from dicts. The 'clientFile' keys gives us a
# local pathname.
modified_files = [i['clientFile'] for i in diff_info if isinstance(i, dict)]
# Build a list of files that have been marked for addition
# or deletion, but are not yet committed.
opened_info = p4.run_opened(os.path.join(dir, '...'))
# opened's output's 'depotFile' is a depot path.
added_files = [translate_depot_to_local(i['depotFile']) for i in opened_info if i['action'] == 'add']
deleted_files = [translate_depot_to_local(i['depotFile']) for i in opened_info if i['action'] == 'delete']
opened_files = [translate_depot_to_local(i['depotFile']) for i in opened_info]
for dirpath, dirnames, filenames in os.walk(dir, topdown=True):
# Get rid of the ignored files.
ignore_patterns = ignore_patterns_for(dirpath)
if not no_ignores:
filenames = [f for f in filenames if not is_ignored(ignore_patterns, f)]
for dirname in dirnames[:]:
if is_ignored(ignore_patterns, dirname):
dirnames.remove(dirname)
if no_ignores:
print 'I %s' % (os.path.normpath(os.path.join(dirpath, dirname)),)
dirnames.sort()
# If there are any files marked for delete in this
# directory, add them to the list of files to print
# status info for.
dirpath = os.path.normpath(dirpath)
for f in deleted_files:
if os.path.dirname(f) == os.path.abspath(dirpath):
filenames += [os.path.basename(f)]
# Print the status info for each file in this
# directory.
for f in sorted(filenames):
full_path = os.path.abspath(os.path.join(dirpath, f))
print_path = os.path.normpath(os.path.join(dirpath, f))
if no_ignores and is_ignored(ignore_patterns, f):
print 'I %s' % (print_path,)
elif full_path in added_files:
print 'A %s' % (print_path,)
elif full_path in deleted_files:
print 'D %s' % (print_path,)
elif full_path in modified_files:
print 'M %s' % (print_path,)
elif full_path in opened_files:
print 'O %s' % (print_path,)
elif not full_path in have_paths:
print '? %s' % (print_path,)
class R4Blame(R4Command):
def short_description(self):
return 'Show what revision and author last modified each line of a file--TBD'
def usage(self):
return 'blame'
def long_description(self):
return """
blame -- %s
r4 %s
Annotates each line in the given file with information on the
revision which last modified the line.
""" % (self.short_description(), self.usage())
def run(self, p4, command, args):
print args
annotate_info = p4.run_annotate('-i', args[0])
filelog_info = p4.run('filelog', '-i', args[0])
pprint.pprint(annotate_info)
pprint.pprint(filelog_info)
class R4Bisect(R4Command):
def short_description(self):
return 'Efficiently finds the change that introduced a bug--TBD'
def usage(self):
return 'bisect <subcommand> <options>'
def long_description(self):
return """
bisect -- %s
r4 %s
An implementation of git's bisect command for Perforce.
""" % (self.short_description(), self.usage())
class R4Grep(R4Command):
def short_description(self):
return 'Search across revisions of files for lines matching a pattern'
def usage(self):
return 'grep [ -i ] [ -l ] [ -v ] pattern file[revRange]...'
def long_description(self):
return """
grep -- %s
r4 %s
Searches the named files for lines containing a match to the given
pattern. By default, grep prints the matching lines.
The pattern can be a Perl-style regular expression.
If a file is specified without a revision, then all revisions of
the file are searched.
Example:
$ r4 grep ALL Makefile
//depot/project/Makefile#1: ALL := tools
//depot/project/Makefile#2: ALL := tools scripts
//depot/project/Makefile#3: ALL := tools scripts tests
//depot/project/Makefile#5: ALL := tools scripts tests
You can use revision specifiers and revision ranges to control
which revisions of a file will be searched.
Examples:
r4 grep pattern file#head
r4 grep pattern file#4
r4 grep pattern file#12,20
r4 grep pattern file@release_4
Note that p4 wildcards can be used, giving the ability to do
recursive greps.
Examples:
r4 grep pattern ./...
r4 grep pattern ./.../file
The -i/--ignore-case flag causes the matching to be done while
ignoring case distinctions.
The -l/--files-with-matches flag suppresses normal output and
instead just prints the names of each file from which output would
normally have been printed. File names are printed with revision
specifiers or revision ranges indicating which revisions of the
file contain matches.
Example:
$ r4 grep -l ALL Makefile
//depot/project/Makefile#1,9
//depot/project/Makefile#11
The -v/--invert-match flag inverts the sense of matching, to
select non-matching lines.
""" % (self.short_description(), self.usage())
def run(self, p4, command, args):
import re
# Process options.
case_sensitive = True
just_list_filenames = False
invert_matches = False
options, args = getopt.getopt(args, 'ilv', ['ignore-case', 'files-with-matches',
'invert-match'])
for option, value in options:
if option in ['-i', '--ignore-case']:
case_sensitive = False
elif option in ['-l', '--files-with-matches']:
just_list_filenames = True
elif option in ['-v', '--invert-match']:
invert_matches = True
if len(args) < 2:
raise MissingOrWrongArguments('Missing/wrong number of arguments.')
regex = args[0]
files = args[1:]
# Build our regex.
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
regexp = re.compile(regex, re_flags)
got_match = False
match_ranges = []
path = None
# Search through files.
for file in files:
annotate_info = p4.run_annotate('-a', file)
# Each element in annotate_info is a dictionary, and there are two types:
#
# 1. Info on the file who's data follows. Contains
# 'depotFile' members and others.
#
# 2. Info on a line in the file. Contains 'upper',
# 'lower' and 'data' members.
for line in annotate_info:
if 'depotFile' in line:
# Finish up the previous file, if there was one.
if path:
if just_list_filenames and got_match:
for lower, upper in coalesce_revision_ranges(match_ranges):
print '%s%s' % (path, (canonicalize_revision_range(lower, upper)))
# Prepare to handle the new file.
got_match = False
match_ranges = []
path = strip_revision_specifiers(line['depotFile'])
else:
re_matches = regexp.search(line['data'])
if invert_matches: re_matches = not re_matches
if re_matches:
if just_list_filenames:
got_match = True
match_ranges.append((line['lower'], line['upper']))
else:
revisions = canonicalize_revision_range(line['lower'], line['upper'])
sys.stdout.write('%s%s: %s' % (path, revisions, line['data']))
# Finish up the previous file, if there was one.
if path:
if just_list_filenames and got_match:
for lower, upper in coalesce_revision_ranges(match_ranges):
print '%s%s' % (path, (canonicalize_revision_range(lower, upper)))
def canonicalize_revision_range(lower, upper):
"""Given a revision range, returns a string containing a canonical
revision specifier for that range. Collapses "degenerate" ranges
to a single revision number.
"""
if upper == lower:
return '#%s' % (lower,)
else:
return '#%s,%s' % (lower, upper)
def strip_revision_specifiers(path):
"""Given a file specification that may contain a revision
specifier, returns just the file specification without a revision
specifier.
"""
at_pos = path.rfind('@')
if at_pos != -1:
return path[0:at_pos]
else:
hash_pos = path.rfind('#')
if hash_pos != -1:
return path[0:hash_pos]
return path
class R4Help(R4Command):
"""Custom hooks for 'p4 help ...' so we can display information on
our custom commands.
"""
def run(self, p4, command, args):
# User did "r4 help commands", so we want to list our custom
# commands too.
if len(args) == 1 and args[0] == 'commands':
help_output = p4.run('help', 'commands')
for h in help_output:
print h.replace('p4', 'r4')
print ' Custom commands:\n'
for command in all_r4_commands():
try:
print '\t%-11s %s' % (command, get_r4_command(command).short_description())
except NotImplementedError:
pass
# User did "r4 help <custom command>" so we want to display
# the custom command's help text. "r4 help help" is a special
# case where we want to default to p4 behavior.
elif len(args) == 1 and args[0] in all_r4_commands() and args[0] != 'help':
help_text = get_r4_command(args[0]).long_description()
print help_text
else:
help_output = p4.run('help', *args)
for h in help_output:
print h.replace('p4', 'r4')
def get_p4_connection():
p4 = P4.P4()
p4.exception_level = p4.RAISE_ERROR
p4.connect()
return p4
def coalesce_revision_ranges(ranges):
"""Returns the smallest set of revision ranges that cover the
ranges passed in. E.g., ((1, 10), (11, 13)) -> (1, 13)
"""
# Fuck it, let's just brute force this.
# Build a bitmap.
max_r = None
for l, r in ranges:
if not max_r or int(r) > max_r:
max_r = int(r)
revisions = [False] * (max_r + 1)
for l, r in ranges:
for i in range(int(l), int(r) + 1):
revisions[i] = True
# Scan the bitmap.
new_ranges = []
start_l = None
pos = 0
while pos < len(revisions):
state = revisions[pos]
if start_l:
if state == False:
new_ranges.append((start_l, pos - 1))
start_l = None
if not start_l:
if state == True:
start_l = pos
pos += 1
if start_l:
new_ranges.append((start_l, pos - 1))
return new_ranges
# Hook our custom implementations to the command names.
def_r4_command('status', R4Status())
def_r4_command('help', R4Help())
def_r4_command('grep', R4Grep())
def_r4_command('bisect', R4Bisect())
def_r4_command('blame', R4Blame())
if __name__ == '__main__':
try:
p4 = get_p4_connection()
if len(sys.argv) > 1:
command = sys.argv[1]
args = sys.argv[2:]
sys.exit(handle_command(command, args, p4=p4))
else:
run_standard_p4_command(None, [])
except P4.P4Exception, e:
sys.stderr.write('%s\n' % (e,))
sys.exit(1)
|
{
"content_hash": "d535015359d384015cda3b2b14315a95",
"timestamp": "",
"source": "github",
"line_count": 625,
"max_line_length": 118,
"avg_line_length": 33.312,
"alnum_prop": 0.5743996157540826,
"repo_name": "wiseman/r4",
"id": "f02c829e92207485d6c0a170184a86c168fc1dc0",
"size": "20875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20875"
}
],
"symlink_target": ""
}
|
__author__ = "maggie.sun@intel.com, ryan.lei@intel.com"
import os
import xlsxwriter
import xlrd
import re
from Config import QPs, DnScaleRatio, QualityList, VbaBinFile, CvxH_WtRows,\
CvxH_WtLastCol, LoggerName, CalcBDRateInExcel, CvxH_WtCols, CvxHDataRows, CvxHDataStartCol
from Utils import GetShortContentName, CalcRowsClassAndContentDict
from CalcBDRate import BD_RATE
import logging
subloggername = "PostAnalysisSummary"
loggername = LoggerName + '.' + '%s' % subloggername
logger = logging.getLogger(loggername)
# give all paths including convex hull result file (only one file for each
# content) to generate summary file for all contents in Input path
# assume all content's result has same test settings
################################################################################
### Helper Functions ###########################################################
def GetRDSummaryFileName(encMethod, codecName, preset, path):
filetype = 'xlsm' if CalcBDRateInExcel else 'xlsx'
name = 'ConvexHullRDSummary_ScaleAlgosNum_%d_%s_%s_%s.%s'\
% (len(DnScaleRatio), encMethod, codecName, preset, filetype)
return os.path.join(path, name)
def GetConvexHullDataSummaryFileName(encMethod, codecName, preset, path):
name = 'ConvexHullData_ScaleAlgosNum_%d_%s_%s_%s.xlsx'\
% (len(DnScaleRatio), encMethod, codecName, preset)
return os.path.join(path, name)
def SweepScalingAlgosInOneResultFile(resultfiles):
dnscls = []
upscls = []
# here assume all result files includes same combinations of dn and up scaling algos
# that is, same number of sheet and sheet names
file = resultfiles[0]
if os.path.isfile(file):
rdwb = xlrd.open_workbook(file)
else:
return dnscls, upscls
if rdwb is not None:
shtnms = rdwb.sheet_names()
for shtname in shtnms:
item = re.findall(r"(.+)\-\-(.+)", shtname)
dnsl = item[0][0]
upsl = item[0][1]
dnscls.append(dnsl)
upscls.append(upsl)
return dnscls, upscls
def CopyResultDataToSummaryFile_Onesheet(sht, wt_cols, resultfiles):
rdrows = CvxH_WtRows
rd_endcol = CvxH_WtLastCol
shtname = sht.get_name()
sht.write(1, 0, 'Content Class')
sht.write(1, 1, 'Content Name')
sht.write(1, 2, 'QP')
for residx, col in zip(range(len(DnScaleRatio)), wt_cols):
sht.write(0, col, 'Scaling Ratio = %.2f' % (DnScaleRatio[residx]))
sht.write(1, col, 'Bitrate(kbps)')
qtynames = ['%s' % qty for qty in QualityList]
sht.write_row(1, col + 1, qtynames)
# copy the results data from each content's result file to corresponding
# location in summary excel file
for (cls, clip_list), row_class in zip(ClipDict.items(), Rows_Class):
sht.write(row_class, 0, cls)
rows_content = [i * len(QPs['AS']) for i in range(len(clip_list))]
for clip, row_cont in zip(clip_list, rows_content):
key = GetShortContentName(clip.file_name)
sht.write(row_class + row_cont, 1, key)
rdwb = None
for resfile in resultfiles:
if key in resfile:
rdwb = xlrd.open_workbook(resfile)
rdsht = rdwb.sheet_by_name(shtname)
for i, rdrow in zip(range(len(QPs['AS'])), rdrows):
data = rdsht.row_values(rdrow, 0, rd_endcol + 1)
sht.write_row(row_class + row_cont + i, 2, data)
break
assert rdwb is not None
if rdwb is None:
logger.warning("not find convex hull result file for content:%s"
% clip.file_name)
def CalBDRateWithExcel_OneSheet(sht, cols, cols_bdmtrs, cellformat):
row_refst = 0
bdstep = len(QPs['AS']) - 1
for cols_bd, residx in zip(cols_bdmtrs, range(1, len(DnScaleRatio))):
sht.write(0, cols_bd, 'BD-Rate %.2f vs. %.2f' % (DnScaleRatio[residx],
DnScaleRatio[0]))
sht.write_row(1, cols_bd, QualityList)
for (cls, clip_list), row_class in zip(ClipDict.items(), Rows_Class):
rows_content = [i * len(QPs['AS']) for i in range(len(clip_list))]
for row_cont in rows_content:
for y in range(len(QualityList)):
refbr_b = xlrd.cellnameabs(row_class + row_cont + row_refst,
cols[0])
refbr_e = xlrd.cellnameabs(row_class + row_cont + row_refst
+ bdstep, cols[0])
refq_b = xlrd.cellnameabs(row_class + row_cont + row_refst,
cols[0] + 1 + y)
refq_e = xlrd.cellnameabs(row_class + row_cont + row_refst
+ bdstep, cols[0] + 1 + y)
testbr_b = xlrd.cellnameabs(row_class + row_cont + row_refst,
cols[residx])
testbr_e = xlrd.cellnameabs(row_class + row_cont + row_refst
+ bdstep, cols[residx])
testq_b = xlrd.cellnameabs(row_class + row_cont + row_refst,
cols[residx] + 1 + y)
testq_e = xlrd.cellnameabs(row_class + row_cont + row_refst
+ bdstep, cols[residx] + 1 + y)
#formula = '=bdrate(%s:%s,%s:%s,%s:%s,%s:%s)' % (
#refbr_b, refbr_e, refq_b, refq_e, testbr_b, testbr_e,
# testq_b, testq_e)
formula = '=bdRateExtend(%s:%s,%s:%s,%s:%s,%s:%s)'\
% (refbr_b, refbr_e, refq_b, refq_e, testbr_b,
testbr_e, testq_b, testq_e)
sht.write_formula(row_class + row_cont, cols_bd + y, formula,
cellformat)
def CalBDRateWithPython_OneSheet(sht, cols_bdmtrs, resultfiles, cellformat):
row_refst = 0
bdstep = len(QPs['AS']) - 1
assert row_refst + bdstep < len(CvxH_WtRows)
shtname = sht.get_name()
rdrows = CvxH_WtRows
rdcols = CvxH_WtCols
for cols_bd, residx in zip(cols_bdmtrs, range(1, len(DnScaleRatio))):
sht.write(0, cols_bd, 'BD-Rate %.2f vs. %.2f' % (DnScaleRatio[residx],
DnScaleRatio[0]))
sht.write_row(1, cols_bd, QualityList)
for (cls, clip_list), row_class in zip(ClipDict.items(), Rows_Class):
rows_content = [i * len(QPs['AS']) for i in range(len(clip_list))]
for row_cont, clip in zip(rows_content, clip_list):
key = GetShortContentName(clip.file_name)
for resfile in resultfiles:
if key in resfile:
rdwb = xlrd.open_workbook(resfile)
rdsht = rdwb.sheet_by_name(shtname)
break
for y in range(len(QualityList)):
refbrs = rdsht.col_values(rdcols[0], rdrows[row_refst], rdrows[row_refst + bdstep] + 1)
refqtys = rdsht.col_values(rdcols[0] + 1 + y, rdrows[row_refst], rdrows[row_refst + bdstep] + 1)
testbrs = rdsht.col_values(rdcols[residx], rdrows[row_refst], rdrows[row_refst + bdstep] + 1)
testqtys = rdsht.col_values(rdcols[residx] + 1 + y, rdrows[row_refst], rdrows[row_refst + bdstep] + 1)
bdrate = BD_RATE(refbrs, refqtys, testbrs, testqtys)
if (bdrate != 'Error'):
bdrate /= 100.0
sht.write(row_class + row_cont, cols_bd + y, bdrate, cellformat)
else:
sht.write(row_class + row_cont, cols_bd + y, bdrate)
def GenerateFormula_SumRows(shtname, rows, col):
cells = ''
for row in rows:
location = xlrd.cellnameabs(row, col)
cells = cells + '\'%s\'!%s,' % (shtname, location)
cells = cells[:-1] # remove the last ,
formula = '=SUM(%s)/%d' % (cells, len(rows))
return formula
def GenerateFormula_SumRows_Weighted(rows, col, weight_rows, weight_col, num):
cells = ''
for row, wtrow in zip(rows, weight_rows):
location = xlrd.cellnameabs(row, col)
weight = xlrd.cellnameabs(wtrow, weight_col)
cells = cells + '%s * %s,' % (location, weight)
cells = cells[:-1] # remove the last ,
formula = '=SUM(%s)/%d' % (cells, num)
return formula
def WriteBitrateQtyAverageSheet(wb, rdshts, rdcols):
avg_sht = wb.add_worksheet('Average')
avg_sht.write(2, 0, 'Content Class')
avg_sht.write(2, 1, 'Content Number')
avg_sht.write(2, 2, 'QP')
colstart = 3
cols_res = [colstart]
step = len(QualityList) + 1 + 1 # 1 for bitrate, 1 for interval
colres_2nd_start = colstart + step
step = len(upScalAlgos) * (len(QualityList) + 1) + 1 # + 1 for interval
cols_res += [step * i + colres_2nd_start for i in range(len(DnScaleRatio) - 1)]
step = len(QualityList) + 1 # + 1 for bitrate
cols_upscl = [step * i for i in range(len(upScalAlgos))]
for residx, col_res in zip(range(len(DnScaleRatio)), cols_res):
avg_sht.write(0, col_res, 'ScalingRatio = %.2f' % (DnScaleRatio[residx]))
if residx == 0:
avg_sht.write(1, col_res + 1, 'None')
avg_sht.write(2, col_res, 'Bitrate(kbps)')
avg_sht.write_row(2, col_res + 1, QualityList)
else:
for dnsc, upsc, col_upscl in zip(dnScalAlgos, upScalAlgos, cols_upscl):
avg_sht.write(1, col_res + col_upscl + 1, '%s--%s' % (dnsc, upsc))
avg_sht.write(2, col_res + col_upscl, 'Bitrate(kbps)')
avg_sht.write_row(2, col_res + col_upscl + 1, QualityList)
startrow = 3
step = len(QPs['AS'])
rows_class_avg = [startrow + step * i for i in range(len(ClipDict))]
totalnum_content = 0
for (cls, clip_list), row_class, rdclassrow in zip(ClipDict.items(),
rows_class_avg,
Rows_Class):
avg_sht.write(row_class, 0, cls)
totalnum_content = totalnum_content + len(clip_list)
avg_sht.write(row_class, 1, len(clip_list))
avg_sht.write_column(row_class, 2, QPs['AS'])
rows_content = [i * len(QPs['AS']) for i in range(len(clip_list))]
for rdcol, col_res, residx in zip(rdcols, cols_res, range(len(DnScaleRatio))):
for i in range(len(QPs['AS'])):
sum_rows = [rdclassrow + row_cont + i for row_cont in rows_content]
for col_upscl, sht in zip(cols_upscl, rdshts):
shtname = sht.get_name()
# write bitrate average formula.
formula = GenerateFormula_SumRows(shtname, sum_rows, rdcol)
avg_sht.write_formula(row_class + i, col_res + col_upscl,
formula)
# write quality average formula
for j in range(len(QualityList)):
formula = GenerateFormula_SumRows(shtname, sum_rows,
rdcol + 1 + j)
avg_sht.write_formula(row_class + i,
col_res + col_upscl + 1 + j,
formula)
# for first resolution, no down and up scaling. only need
# one set of bitrate/quality data
if residx == 0:
break
# write total average
last_class_row = rows_class_avg[-1] + len(QPs['AS']) + 1 # 1 for 1 row of interval
avg_sht.write(last_class_row, 0, 'Total')
avg_sht.write(last_class_row, 1, totalnum_content)
avg_sht.write_column(last_class_row, 2, QPs['AS'])
weight_rows = [row_class for row_class in rows_class_avg]
for col_res, residx in zip(cols_res, range(len(DnScaleRatio))):
for i in range(len(QPs['AS'])):
sum_rows = [row_class + i for row_class in rows_class_avg]
for col_upscl in cols_upscl:
# bitrate average
formula = GenerateFormula_SumRows_Weighted(sum_rows,
col_res + col_upscl,
weight_rows, 1,
totalnum_content)
avg_sht.write_formula(last_class_row + i, col_res + col_upscl,
formula)
# quality average
for j in range(len(QualityList)):
formula = GenerateFormula_SumRows_Weighted(sum_rows,
col_res +
col_upscl + 1 + j,
weight_rows, 1,
totalnum_content)
avg_sht.write_formula(last_class_row + i,
col_res + col_upscl + 1 + j, formula)
# for first resolution, no down and up scaling. only need one
# set of bitrate/quality data
if residx == 0:
break
def WriteBDRateAverageSheet(wb, rdshts, rd_cols_bdmtrs, cellformat):
# write bdrate average sheet
bdavg_sht = wb.add_worksheet('Average_BDRate')
bdavg_sht.write(2, 0, 'Content Class')
bdavg_sht.write(2, 1, 'Content Number')
startcol = 2
startrow = 3
colintval_scalalgo = 1
colintval_dnscalres = 1
step_upscl = len(QualityList) + colintval_scalalgo
cols_upscl_bd = [step_upscl * i for i in range(len(upScalAlgos))]
step_res = len(upScalAlgos) * step_upscl + colintval_dnscalres
cols_res_bd = [step_res * i + startcol for i in range(len(DnScaleRatio) - 1)]
rows_class_rdavg = [startrow + i for i in range(len(ClipDict))]
for residx, col_res_bd in zip(range(1, len(DnScaleRatio)), cols_res_bd):
bdavg_sht.write(0, col_res_bd, 'BD-Rate %.2f vs. %.2f'
% (DnScaleRatio[residx], DnScaleRatio[0]))
for dnsc, upsc, col_upscl_bd in zip(dnScalAlgos, upScalAlgos, cols_upscl_bd):
bdavg_sht.write(1, col_res_bd + col_upscl_bd, '%s--%s' % (dnsc, upsc))
bdavg_sht.write_row(2, col_res_bd + col_upscl_bd, QualityList)
totalnum_content = 0
for (cls, clip_list), row_class, rdclassrow in zip(ClipDict.items(),
rows_class_rdavg,
Rows_Class):
bdavg_sht.write(row_class, 0, cls)
totalnum_content = totalnum_content + len(clip_list)
bdavg_sht.write(row_class, 1, len(clip_list))
rows_content = [i * len(QPs['AS']) for i in range(len(clip_list))]
sum_rows = [rdclassrow + row_cont for row_cont in rows_content]
for rdcol, col_res in zip(rd_cols_bdmtrs, cols_res_bd):
# write average bd rate
for col_upscl, sht in zip(cols_upscl_bd, rdshts):
shtname = sht.get_name()
for j in range(len(QualityList)):
formula = GenerateFormula_SumRows(shtname, sum_rows, rdcol + j)
bdavg_sht.write_formula(row_class, col_res + col_upscl + j,
formula, cellformat)
# write total average
last_row = rows_class_rdavg[-1] + 1
bdavg_sht.write(last_row, 0, 'Total')
bdavg_sht.write(last_row, 1, totalnum_content)
sum_rows = [row_class for row_class in rows_class_rdavg]
for col_res in cols_res_bd:
for col_upscl in cols_upscl_bd:
for j in range(len(QualityList)):
formula = GenerateFormula_SumRows_Weighted(sum_rows,
col_res + col_upscl + j,
sum_rows, 1,
totalnum_content)
bdavg_sht.write_formula(last_row, col_res + col_upscl + j,
formula, cellformat)
#######################################################################
#######################################################################
# GenerateSummaryExcelFile is to
# 1. summarize all contents convexhull results into one file
# 2. calculate average of bitrate and quality metrics for each content class
# 3. calculate BD rate across different scaling ratios for all scaling
# algorithms in convex hull test
# 4. calcualte average BD rate for each content class
# Arguments description:
# content_paths is where test contents located, which used for generating convex
# hull results.
# resultfiles is a list of all convex hull RD result files generated by
# runninging '-f convexhull'
# summary_outpath is the folder where output summary file will be
def GenerateSumRDExcelFile(encMethod, codecName, preset, summary_outpath,
resultfiles, clip_list):
global dnScalAlgos, upScalAlgos
# find all scaling algos tested in results file,
# IMPORTANT: expect up and down scaling algos are the same for every content
dnScalAlgos, upScalAlgos = SweepScalingAlgosInOneResultFile(resultfiles)
if not os.path.exists(summary_outpath):
os.makedirs(summary_outpath)
smfile = GetRDSummaryFileName(encMethod, codecName, preset, summary_outpath)
wb = xlsxwriter.Workbook(smfile)
# shts is for all scaling algorithms' convex hull test results
shts = []
for dnsc, upsc in zip(dnScalAlgos, upScalAlgos):
shtname = dnsc + '--' + upsc
sht = wb.add_worksheet(shtname)
shts.append(sht)
# below variables define summary file data layout format.
# if to change them, modify CopyResultsDataToSummaryFile_Onesheet() and
# CalcRowsCategAndContentDict() accordingly
colstart = 3
colInterval = 2
rowstart = 2
# to generate rows number of starting of each class: Rows_Class
global ClipDict, Rows_Class
ClipDict, Rows_Class = CalcRowsClassAndContentDict(rowstart, clip_list,
len(QPs['AS']))
# cols is column number of results files
step = colInterval + 1 + len(QualityList) # 1 is for bitrate
sum_wtcols = [step * i + colstart for i in range(len(DnScaleRatio))]
if CalcBDRateInExcel:
wb.add_vba_project(VbaBinFile)
cellformat = wb.add_format()
cellformat.set_num_format('0.00%')
#cols_bdmtrs is the column number to write the bdrate data
step = len(QualityList) + 1
start_col_bd = sum_wtcols[-1] + step + 1
cols_bdmtrs = [start_col_bd + i * step for i in range(len(DnScaleRatio) - 1)]
# -1 because first resolution is used as reference
for sht in shts:
CopyResultDataToSummaryFile_Onesheet(sht, sum_wtcols, resultfiles)
# calculate bd rate in each scaling sheet
if CalcBDRateInExcel:
CalBDRateWithExcel_OneSheet(sht, sum_wtcols, cols_bdmtrs, cellformat)
else:
CalBDRateWithPython_OneSheet(sht, cols_bdmtrs, resultfiles, cellformat)
# calculate average bitrate and quality metrics for each category and
# write to "average" sheet
WriteBitrateQtyAverageSheet(wb, shts, sum_wtcols)
# calculate average bd metrics and write to a new sheet
WriteBDRateAverageSheet(wb, shts, cols_bdmtrs, cellformat)
wb.close()
return smfile
def GenerateSumCvxHullExcelFile(encMethod, codecName, preset, summary_outpath,
resultfiles, EnablePreInterpolation = False):
if not os.path.exists(summary_outpath):
os.makedirs(summary_outpath)
smfile = GetConvexHullDataSummaryFileName(encMethod, codecName, preset,
summary_outpath)
wb = xlsxwriter.Workbook(smfile)
cvx_cols = 4
if EnablePreInterpolation:
cvx_cols = 6
# shts is for all scaling algorithms' convex hull test results
shts = []
cols = [3 + i * cvx_cols for i in range(len(QualityList))]
for dnsc, upsc in zip(dnScalAlgos, upScalAlgos):
shtname = dnsc + '--' + upsc
sht = wb.add_worksheet(shtname)
shts.append(sht)
# write headers
sht.write(0, 0, 'Content Class')
sht.write(0, 1, 'Content Name')
sht.write(0, 2, 'Num RD Points')
for qty, col in zip(QualityList, cols):
sht.write(0, col, 'Resolution')
sht.write(0, col + 1, 'QP')
sht.write(0, col + 2, 'Bitrate(kbps)')
sht.write(0, col + 3, qty)
if EnablePreInterpolation:
sht.write(0, col + 4, 'Int_Bitrate(kbps)')
sht.write(0, col + 5, 'Int_' + qty)
# copy convexhull data from each content's result file to corresponding
# location in summary excel file
row = 1
rdcolstart = CvxHDataStartCol + 1
for (cls, clip_list) in ClipDict.items():
sht.write(row, 0, cls)
for clip in clip_list:
key = GetShortContentName(clip.file_name)
sht.write(row, 1, key)
for resfile in resultfiles:
if key in resfile:
rdwb = xlrd.open_workbook(resfile)
rdsht = rdwb.sheet_by_name(shtname)
maxNumQty = 0; maxNumIntQty = 0
for rdrow, col in zip(CvxHDataRows, cols):
qtys = []; brs = []; qps = []; ress = []
int_qtys = []; int_brs = []
numQty = 0
for qty in rdsht.row_values(rdrow)[rdcolstart:]:
if qty == '':
break
else:
qtys.append(qty)
numQty = numQty + 1
maxNumQty = max(maxNumQty, numQty)
for br in rdsht.row_values(rdrow + 1)[rdcolstart:]:
if br == '':
break
else:
brs.append(br)
for qp in rdsht.row_values(rdrow + 2)[rdcolstart:]:
if qp == '':
break
else:
qps.append(qp)
for res in rdsht.row_values(rdrow + 3)[rdcolstart:]:
if res == '':
break
else:
ress.append(res)
if EnablePreInterpolation:
numQty = 0
for qty in rdsht.row_values(rdrow + 4)[
rdcolstart:]:
if qty == '':
break
else:
int_qtys.append(qty)
numQty = numQty + 1
maxNumIntQty = max(maxNumIntQty, numQty)
for br in rdsht.row_values(rdrow + 5)[rdcolstart:]:
if br == '':
break
else:
int_brs.append(br)
sht.write_column(row, col, ress)
sht.write_column(row, col + 1, qps)
sht.write_column(row, col + 2, brs)
sht.write_column(row, col + 3, qtys)
if EnablePreInterpolation:
sht.write_column(row, col + 4, int_brs)
sht.write_column(row, col + 5, int_qtys)
sht.write(row, 2, max(maxNumQty, maxNumIntQty))
row = row + max(maxNumQty, maxNumIntQty)
break
wb.close()
return smfile
|
{
"content_hash": "fac4de1ea8f40d507429344c5b306896",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 122,
"avg_line_length": 48.59411764705882,
"alnum_prop": 0.5168462252350402,
"repo_name": "tdaede/awcy",
"id": "ad987c63e4ad59334cbc1863a2a3976528a8ef17",
"size": "25330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convexhull_framework/src/PostAnalysis_Summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25"
},
{
"name": "CSS",
"bytes": "9661"
},
{
"name": "Dockerfile",
"bytes": "6487"
},
{
"name": "HTML",
"bytes": "9044"
},
{
"name": "JavaScript",
"bytes": "40783"
},
{
"name": "MATLAB",
"bytes": "1967"
},
{
"name": "Python",
"bytes": "20645"
},
{
"name": "Shell",
"bytes": "13066"
},
{
"name": "TypeScript",
"bytes": "132282"
}
],
"symlink_target": ""
}
|
"""
Mongodb-based Trials Object
===========================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evalute function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
import copy
try:
import dill as cPickle
except ImportError:
import cPickle
import hashlib
import logging
import optparse
import os
import shutil
import signal
import socket
import subprocess
import sys
import time
import urlparse
import warnings
import numpy
import pymongo
import gridfs
from bson import SON
logger = logging.getLogger(__name__)
from .base import JOB_STATES
from .base import (JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE,
JOB_STATE_ERROR)
from .base import Trials
from .base import trials_from_docs
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
import plotting
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class BanditSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the alotted time
"""
def read_pw():
username = 'hyperopt'
password = open(os.path.join(os.getenv('HOME'), ".hyperopt")).read()[:-1]
return dict(
username=username,
password=password)
def authenticate_for_db(db):
d = read_pw()
db.authenticate(d['username'], d['password'])
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol=url[:url.find(':')]
ftp_url='ftp'+url[url.find(':'):]
# -- parse the string as if it were an ftp address
tmp = urlparse.urlparse(ftp_url)
logger.info( 'PROTOCOL %s'% protocol)
logger.info( 'USERNAME %s'% tmp.username)
logger.info( 'HOSTNAME %s'% tmp.hostname)
logger.info( 'PORT %s'% tmp.port)
logger.info( 'PATH %s'% tmp.path)
try:
_, dbname, collection = tmp.path.split('/')
except:
print >> sys.stderr, "Failed to parse '%s'"%(str(tmp.path))
raise
logger.info( 'DB %s'% dbname)
logger.info( 'COLLECTION %s'% collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
logger.info( 'PASS %s'% password)
return (protocol, tmp.username, password, tmp.hostname, tmp.port, dbname,
collection)
def connection_with_tunnel(host='localhost',
auth_dbname='admin', port=27017,
ssh=False, user='hyperopt', pw=None):
if ssh:
local_port=numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
['ssh', '-NTf', '-L',
'%i:%s:%i'%(local_port, '127.0.0.1', port),
host],
#stdin=subprocess.PIPE,
#stdout=subprocess.PIPE,
#stderr=subprocess.PIPE,
)
# -- give the subprocess time to set up
time.sleep(.5)
connection = pymongo.Connection('127.0.0.1', local_port,
document_class=SON)
else:
connection = pymongo.Connection(host, port, document_class=SON)
if user:
if user == 'hyperopt':
authenticate_for_db(connection[auth_dbname])
else:
raise NotImplementedError()
ssh_tunnel=None
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection = parse_url(s)
if protocol == 'mongo':
ssh=False
elif protocol in ('mongo+ssh', 'ssh+mongo'):
ssh=True
else:
raise ValueError('unrecognized protocol for MongoJobs', protocol)
connection, tunnel = connection_with_tunnel(
ssh=ssh,
user=user,
pw=pw,
host=host,
port=port,
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs(object):
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn=conn
self.tunnel=tunnel
self.config_name = config_name
# TODO: rename jobs -> coll throughout
coll = property(lambda s : s.jobs)
@classmethod
def alloc(cls, dbname, host='localhost',
auth_dbname='admin', port=27017,
jobs_coll='jobs', gfs_coll='fs', ssh=False, user=None, pw=None):
connection, tunnel = connection_with_tunnel(
host, auth_dbname, port, ssh, user, pw)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll='fs', config_name='spec'):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ['exp_key', 'result.loss', 'book_time']:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index('exp_key', unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if not r.get('MIA', False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if r.get('MIA', False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job, safe=True):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# this call adds an _id field to cpy
_id = self.jobs.insert(cpy, safe=safe, check_keys=True)
# so now we return the dict with the _id field
assert _id == cpy['_id']
return cpy
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete(self, job, safe=True):
"""Delete job[s]"""
try:
self.jobs.remove(job, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all(self, cond={}, safe=True):
"""Delete all jobs and attachments"""
try:
for d in self.jobs.find(spec=cond, fields=['_id', '_attachments']):
logger.info('deleting job %s' % d['_id'])
for name, file_id in d.get('_attachments', []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error('failed to remove attachment %s:%s' % (
name, file_id))
self.jobs.remove(d, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all_error_jobs(self, safe=True):
return self.delete_all(cond={'state': JOB_STATE_ERROR}, safe=safe)
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(cond) #copy is important, will be modified, but only the top-level
if exp_key is not None:
cond['exp_key'] = exp_key
#having an owner of None implies state==JOB_STATE_NEW, so this effectively
#acts as a filter to make sure that only new jobs get reserved.
if cond.get('owner') is not None:
raise ValueError('refusing to reserve owned job')
else:
cond['owner'] = None
cond['state'] = JOB_STATE_NEW #theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{'$set':
{'owner': host_id,
'book_time': now,
'state': JOB_STATE_RUNNING,
'refresh_time': now,
}
},
new=True,
safe=True,
upsert=False)
except pymongo.errors.OperationFailure, e:
logger.error('Error during reserve_job: %s'%str(e))
rval = None
return rval
def refresh(self, doc, safe=False):
self.update(doc, dict(refresh_time=coarse_utcnow()), safe=False)
def update(self, doc, dct, safe=True, collection=None):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
safe=True means error-checking is done. safe=False means this function will succeed
regardless of what happens with the db.
"""
if collection is None:
collection = self.coll
dct = copy.deepcopy(dct)
if '_id' not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if '_id' in dct:
if dct['_id'] != doc['_id']:
raise ValueError('cannot update the _id field')
del dct['_id']
if 'version' in dct:
if dct['version'] != doc['version']:
warnings.warn('Ignoring "version" field in update dictionary')
if 'version' in doc:
doc_query = dict(_id=doc['_id'], version=doc['version'])
dct['version'] = doc['version']+1
else:
doc_query = dict(_id=doc['_id'])
dct['version'] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(
doc_query,
{'$set': dct},
safe=True,
upsert=False,
multi=False,)
except pymongo.errors.OperationFailure, e:
# translate pymongo failure into generic failure
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if safe:
server_doc = collection.find_one(
dict(_id=doc['_id'], version=doc['version']))
if server_doc is None:
raise OperationFailure('updated doc not found : %s'
% str(doc))
elif server_doc != doc:
if 0:# This is all commented out because it is tripping on the fact that
# str('a') != unicode('a').
# TODO: eliminate false alarms and catch real ones
mismatching_keys = []
for k, v in server_doc.items():
if k in doc:
if doc[k] != v:
mismatching_keys.append((k, v, doc[k]))
else:
mismatching_keys.append((k, v, '<missing>'))
for k,v in doc.items():
if k not in server_doc:
mismatching_keys.append((k, '<missing>', v))
raise OperationFailure('local and server doc documents are out of sync: %s'%
repr((doc, server_doc, mismatching_keys)))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], basestring), name_id
return str(name_id[0])
return map(as_str, doc.get('_attachments', []))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get('_attachments', [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))
logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (
len(blob), str(new_file_id), doc['_id'], name))
new_attachments = ([a for a in attachments if a[0] != name]
+ [(name, new_file_id)])
try:
ii = 0
doc = self.update(doc, {'_attachments': new_attachments},
collection=collection)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning("Leak during set_attachment: old_file_id=%s" % (
name_matches[ii][1]))
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
#return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get('_attachments', [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure('Attachment not found: %s' % name)
if len(file_ids) > 1:
raise OperationFailure('multiple name matches', (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get('_attachments', [])
file_id = None
for i,a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure('Attachment not found: %s' % name)
#print "Deleting", file_id
del attachments[i]
self.update(doc, {'_attachments':attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
async = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None,
refresh=True):
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh)
return rval
def refresh_tids(self, tids):
""" Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
if exp_key != None:
query = {'exp_key' : exp_key}
else:
query = {}
t0 = time.time()
query['state'] = {'$ne': JOB_STATE_ERROR}
if tids is not None:
query['tid'] = {'$in': list(tids)}
orig_trials = getattr(self, '_trials', [])
_trials = orig_trials[:] #copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query,
fields=['_id', 'version']))
# -- pull down a fresh list of ids from mongo
if db_data:
#make numpy data arrays
db_data = numpy.rec.array([(x['_id'], int(x['version']))
for x in db_data],
names=['_id', 'version'])
db_data.sort(order=['_id', 'version'])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array([(x['_id'],
int(x['version'])) for x in _trials],
names=['_id', 'version'])
existing_data.sort(order=['_id', 'version'])
#which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data['_id'], existing_data['_id'])
existing_in_db = fast_isin(existing_data['_id'], db_data['_id'])
#filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
#new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
#having removed the new and out of data data,
#concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data['_id'] == db_data['_id']).all()
assert (existing_data['version'] <= db_data['version']).all()
except:
reportpath = os.path.join(os.getcwd(),
'hyperopt_refresh_crash_report_' + \
str(numpy.random.randint(1e8)) + '.pkl')
logger.error('HYPEROPT REFRESH ERROR: writing error file to %s' % reportpath)
_file = open(reportpath, 'w')
cPickle.dump({'db_data': db_data,
'existing_data': existing_data},
_file)
_file.close()
raise
same_version = existing_data['version'] == db_data['version']
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
#actually get the updated records
update_ids = new_data['_id'].tolist() + version_changes['_id'].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query['_id'] = {'$in': update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
#this case is for performance, though should be able to be removed
#without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug('Refresh data download took %f seconds for %d ids' %
(time.time() - t0, num_new))
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t['tid'] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t['tid'] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j['_id'] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]['spec'] for _idx in jobsort]
self._results = [_trials[_idx]['result'] for _idx in jobsort]
self._miscs = [_trials[_idx]['misc'] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc, safe=True))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError('invalid state', arg)
query = dict(state=arg)
else:
assert hasattr(arg, '__iter__')
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={'$in': states})
if exp_key != None:
query['exp_key'] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
if cond is None:
cond = {}
else:
cond = dict(cond)
if self._exp_key:
cond['exp_key'] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, N):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a BanditAlgo that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {'a': 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query,
{'$inc' : {'last_id': N}},
upsert=True,
safe=True)
if doc is None:
logger.warning('no last_id found, re-trying')
time.sleep(1.0)
lid = doc.get('last_id', 0)
return range(lid, lid + N)
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments(object):
def __contains__(_self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(_self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(_self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(_self, name):
try:
return self.handle.get_attachment(
doc=trial,
name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(_self, name, value):
self.handle.set_attachment(
doc=trial,
blob=value,
name=name,
collection=self.handle.db.jobs)
def __delitem__(_self, name):
raise NotImplementedError('delete trial_attachment')
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments()
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query['exp_key'] = self._exp_key
class Attachments(object):
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname
for fname in gfs.list()
if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker(object):
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(self, mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename='logfile.txt',
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(
fmt='%(levelname)s (%(name)s): %(message)s'))
self.log_handler.setLevel(logging.INFO)
def run_one(self,
host_id=None,
reserve_timeout=None,
erase_created_workdir=False,
):
if host_id == None:
host_id = '%s:%i'%(socket.gethostname(), os.getpid()),
job = None
start_time = time.time()
mj = self.mj
while job is None:
if (reserve_timeout
and (time.time() - start_time) > reserve_timeout):
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = (1 +
numpy.random.rand()
* (float(self.poll_interval) - 1.0))
logger.info('no job found, sleeping for %.1fs' % interval)
time.sleep(interval)
logger.debug('job found: %s' % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job['misc'])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job['exp_key'], refresh=False),
read_only=False,
current_trial=job)
if self.workdir is None:
workdir = job['misc'].get('workdir', os.getcwd())
if workdir is None:
workdir = ''
workdir = os.path.join(workdir, str(job['_id']))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
cwd = os.getcwd()
sentinal = None
if not os.path.isdir(workdir):
# -- figure out the closest point to the workdir in the filesystem
closest_dir = ''
for wdi in os.path.split(workdir):
if os.path.isdir(os.path.join(closest_dir, wdi)):
closest_dir = os.path.join(closest_dir, wdi)
else:
break
assert closest_dir != workdir
# -- touch a sentinal file so that recursive directory
# removal stops at the right place
sentinal = os.path.join(closest_dir, wdi + '.inuse')
logger.debug("touching sentinal file: %s" % sentinal)
open(sentinal, 'w').close()
# -- now just make the rest of the folders
logger.debug("making workdir: %s" % workdir)
os.makedirs(workdir)
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job['misc']['cmd']
cmd_protocol = cmd[0]
try:
if cmd_protocol == 'cpickled fn':
worker_fn = cPickle.loads(cmd[1])
elif cmd_protocol == 'call evaluate':
bandit = cPickle.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'token_load':
cmd_toks = cmd[1].split('.')
cmd_module = '.'.join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == 'bandit_json evaluate':
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'driver_attachment':
#name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = cPickle.loads(blob)
worker_fn = json_call(bandit_name,
args=bandit_args,
kwargs=bandit_kwargs).evaluate
elif cmd_protocol == 'domain_attachment':
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = cPickle.loads(blob)
except BaseException, e:
logger.info('Error while unpickling. Try installing dill via "pip install dill" for enhanced pickling support.')
raise
worker_fn = domain.evaluate
else:
raise ValueError('Unrecognized cmd protocol', cmd_protocol)
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException, e:
#XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info('job exception: %s' % str(e))
ctrl.checkpoint()
mj.update(job,
{'state': JOB_STATE_ERROR,
'error': (str(type(e)), str(e))},
safe=True)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
os.chdir(cwd)
logger.info('job finished: %s' % str(job['_id']))
attachments = result.pop('attachments', {})
for aname, aval in attachments.items():
logger.info(
'mongoexp: saving attachment name=%s (%i bytes)' % (
aname, len(aval)))
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {'state': JOB_STATE_DONE}, safe=True)
if sentinal:
if erase_created_workdir:
logger.debug('MongoWorker.run_one: rmtree %s' % workdir)
shutil.rmtree(workdir)
# -- put it back so that recursive removedirs works
os.mkdir(workdir)
# -- recursive backtrack to sentinal
logger.debug('MongoWorker.run_one: removedirs %s'
% workdir)
os.removedirs(workdir)
# -- remove sentinal
logger.debug('MongoWorker.run_one: rm %s' % sentinal)
os.remove(sentinal)
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
'set_attachment deprecated. Use `self.attachments[name] = value`')
def exec_import(cmd_module, cmd):
worker_fn = None
exec('import %s; worker_fn = %s' % (cmd_module, cmd))
return worker_fn
def as_mongo_str(s):
if s.startswith('mongo://'):
return s
else:
return 'mongo://%s' % s
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
last_job_timeout = time.time() + float(options.last_job_timeout)
else:
last_job_timeout = None
def sighandler_shutdown(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise WaitQuit(signum)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
if N > 1:
proc = None
cons_errs = 0
if last_job_timeout and time.time() > last_job_timeout:
logger.info("Exiting due to last_job_timeout")
return
while N and cons_errs < int(options.max_consecutive_failures):
try:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [sys.argv[0],
'--poll-interval=%s' % options.poll_interval,
'--max-jobs=1',
'--mongo=%s' % options.mongo,
'--reserve-timeout=%s' % options.reserve_timeout]
if options.workdir is not None:
sub_argv.append('--workdir=%s' % options.workdir)
if options.exp_key is not None:
sub_argv.append('--exp-key=%s' % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
except Shutdown:
#this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
#proc.terminate() is only available as of 2.6
os.kill(proc.pid, signal.SIGTERM)
return proc.wait()
else:
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
else:
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info("exiting with N=%i after %i consecutive exceptions" %(
N, cons_errs))
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(
as_mongo_str(options.mongo) + '/jobs')
mworker = MongoWorker(mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this workers's jobs")
parser.add_option("--last-job-timeout",
dest='last_job_timeout',
metavar='T',
default=None,
help="Do not reserve a job after T seconds have passed")
parser.add_option("--max-consecutive-failures",
dest="max_consecutive_failures",
metavar='N',
default=4,
help="stop if N consecutive jobs fail (default: 4)")
parser.add_option("--max-jobs",
dest='max_jobs',
default=sys.maxint,
help="stop after running this many jobs (default: inf)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=5,
help="check work queue every 1 < T < N seconds (default: 5")
parser.add_option("--reserve-timeout",
dest='reserve_timeout',
metavar='T',
default=120.0,
help="poll database for up to T seconds to reserve a job")
parser.add_option("--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR")
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
|
{
"content_hash": "059f66c8a6ad7d3c41a4dc69846351ec",
"timestamp": "",
"source": "github",
"line_count": 1300,
"max_line_length": 136,
"avg_line_length": 36.44692307692308,
"alnum_prop": 0.5511280893185032,
"repo_name": "jaberg/hyperopt",
"id": "9dbf9542d1f0024f9cd2b6285081d8de8f16ca58",
"size": "47381",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hyperopt/mongoexp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "360635"
}
],
"symlink_target": ""
}
|
import pytest
from apostello.mail import ApostelloEmailBackend
from site_config.models import SiteConfiguration
@pytest.mark.django_db
def test_apostello_mail_backend():
"""Test email backend pulling from settings and db."""
# test migration pulled env var
mail_backend = ApostelloEmailBackend()
assert mail_backend.host == "smtp.test.apostello"
# test Siteconfiguration change
s = SiteConfiguration.get_solo()
s.get_solo()
s.email_host = "smtp.test2.apostello"
s.save()
mail_backend = ApostelloEmailBackend()
assert mail_backend.host == "smtp.test2.apostello"
|
{
"content_hash": "fb4f80f850c8596f0a24afa494f5a10e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 32,
"alnum_prop": 0.7286184210526315,
"repo_name": "monty5811/apostello",
"id": "4e42e1f226d24f736a846102bcba52650eb9d6b4",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_apostello_email_backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18413"
},
{
"name": "Elm",
"bytes": "484874"
},
{
"name": "HTML",
"bytes": "21141"
},
{
"name": "JavaScript",
"bytes": "31346"
},
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "372217"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timezone
from time import mktime
import parsedatetime
from recurrent.event_parser import RecurringEvent
from dateutil import rrule
class InvalidDatetimeException(Exception):
pass
class InvalidRecurringDateException(Exception):
pass
def strftime(timestamp_string, strftime_format):
dt = datetime.now()
if timestamp_string and timestamp_string != '':
if isinstance(timestamp_string, int): # Unix time
dt = datetime.utcfromtimestamp(timestamp_string)
else:
parsed = parsedatetime.Calendar().parse(timestamp_string)
if len(parsed) > 1:
dt = datetime.fromtimestamp(mktime(parsed[0]))
else:
dt = datetime.fromtimestamp(mktime(parsed))
return dt.strftime(strftime_format)
def utc_strftime(timestamp_string, strftime_format):
dt = datetime.now(timezone.utc)
if timestamp_string and timestamp_string != '':
if isinstance(timestamp_string, int): # Unix time
dt = datetime.utcfromtimestamp(timestamp_string)
else:
parsed = parsedatetime.Calendar().parse(timestamp_string)
if len(parsed) > 1:
dt = datetime.fromtimestamp(mktime(parsed[0]))
else:
dt = datetime.fromtimestamp(mktime(parsed))
return dt.strftime(strftime_format)
def recurring_date(event, now_date=None, strftime_format='%Y-%m-%d'):
if now_date != None:
time_struct, parse_status = parsedatetime.Calendar().parse(now_date)
if not parse_status:
raise InvalidDatetimeException('Failed to parse "%s"' % (now_date))
now_date = datetime(*time_struct[:6])
else:
now_date = datetime.today()
actually_now = datetime.strptime(datetime.today().strftime(strftime_format),
strftime_format)
r = RecurringEvent(now_date=now_date)
if not r.parse(event):
raise InvalidRecurringDateException('Invalid recurring date "%s"' %
(event))
rr = rrule.rrulestr(r.get_RFC_rrule())
res = rr.after(actually_now, inc=True)
return res.strftime(strftime_format)
|
{
"content_hash": "c28032dd55c63d032750c962c58b836d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 34.65625,
"alnum_prop": 0.6366095581605049,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "224b051ef63621f8813446585a05f3dce323085a",
"size": "2812",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/pubsub2inbox/filters/date.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
"""
When a Sympy expressions needs to be evaluated it is,
for performance reasons, preferable that the
numerics are performed in a compiled language
the codeexport module provide classes enabling
templates to be used as blueprints for generating, compiling
and importing a binary which performs the computations.
Both C, C++ and Fortran is considered, but since
the codegeneration uses templates, one can easily extend
the functionality to other languages.
The approach taken here differs from sympy.utilities.codegen.codegen
through the use of Mako templates and classes which controlls
the how they are rendered. Which method is best depends on
the problem at hand (personal opinion).
"""
# stdlib imports
import tempfile
import shutil
import re
import os
from collections import namedtuple
from functools import partial
# External imports
import sympy
from pycompilation.util import (
import_module_from_file, copy, make_dirs
)
from pycompilation.compilation import (
FortranCompilerRunner, CCompilerRunner,
CppCompilerRunner, link_py_so, compile_sources
)
# Intrapackage imports
from .util import render_mako_template_to, download_files, defaultnamedtuple
Loop = namedtuple('Loop', ('counter', 'bounds', 'body'))
# DummyGroup instances are used in transformation from sympy expression
# into code. It is used to protect symbols from being operated upon.
DummyGroup = namedtuple('DummyGroup', 'basename symbols')
# ArrayifyGroup instances defines what expressions should be
# arrayified and what offset should be used
# arg `dim` is for broadcasting (Fortran)
ArrayifyGroup = defaultnamedtuple(
'ArrayifyGroup', 'basename code_tok offset dim', [None, 0])
def _dummify_expr(expr, basename, symbs):
"""
Useful to robustify prior to e.g. regexp substitution of
code strings
"""
dummies = sympy.symbols(basename+':'+str(len(symbs)))
for i, s in enumerate(symbs):
expr = expr.subs({s: dummies[i]})
return expr
def syntaxify_getitem(syntax, scode, basename, token, offset=None,
dim=0, match_regex=r'(\d+)'):
r"""
Parameters
----------
syntax : str
Either 'C' or 'F' for C or Fortran respectively
scode: str
Code string to transformed.
basename : str
Name of (array) variable in scode.
token: str
Name of (array) variable in code.
Examples
--------
>>> syntaxify_getitem('C', 'y_i = x_i+i;', 'y', 'yout',
... offset='CONST', match_regex=r'_(\w)')
'yout[i+CONST] = x_i+i;'
>>> syntaxify_getitem('F', 'y7 = x7+i;', 'y', 'yout',
... offset=-3, dim=-1)
'yout(7-3,:) = x7+i;'
"""
if syntax == 'C':
assert dim == 0 # C does not support broadcasting
if isinstance(offset, int):
offset_str = '{0:+d}'.format(offset)
elif offset is None:
offset_str = ''
else:
offset_str = '+'+str(offset)
c_tgt = token+r'[\1'+offset_str+']'
if dim > 0:
f_tgt = token+'('+':,'*dim+r'\1'+offset_str+')' # slow!
else:
f_tgt = token+'('+r'\1'+offset_str+',:'*-dim+')' # fast!
tgt = {'C': c_tgt, 'F': f_tgt}.get(syntax)
return re.sub(basename+match_regex, tgt, scode)
class Interceptor(object):
"""
This is a wrapper for dynamically loaded extension modules
which share the same name (they end up overwriting the same
python object).
"""
def __init__(self, binary_path):
self._binary_path = binary_path
self._binary_mod = import_module_from_file(self._binary_path)
def __getattr__(self, key):
if key == '__file__':
return self._binary_mod.__file__
if self._binary_mod.__file__ != self._binary_path:
# Avoid singleton behaviour. (Python changes binary path
# inplace without changing id of Python object)
self._binary_mod = import_module_from_file(self._binary_path)
return getattr(self._binary_mod, key)
class Generic_Code(object):
""" Base class representing code generating object.
Attributes
----------
syntax : str
Any of the supported syntaxes ('C' or 'F').
tempdir_basename: basename of tempdirs created in e.g. /tmp/
basedir : str
The path to the directory which relative (source).
Notes
-----
Regarding syntax:
- C99 is assumed for 'C'
- Fortran 2008 (free form) is assumed for 'F'
"""
CompilerRunner = None # Set to compilation.CompilerRunner subclass
syntax = None
fort = False # a form of fortran code? (decisive for linking)
tempdir_basename = 'generic_code'
basedir = None
_cached_files = None
build_files = None
source_files = None
templates = None
obj_files = None
extension_name = 'generic_extension'
so_file = None
extension_name = None
compile_kwargs = None # kwargs passed to CompilerRunner
list_attributes = (
'_written_files', # Track what files are written
'build_files', # Files to be copied prior to compilation
'source_files',
'templates',
'obj_files',
'_cached_files', # Files to be removed between compilations
)
def __init__(self, tempdir=None, save_temp=False, logger=None):
"""
Arguments:
- `tempdir`: Optional path to dir to write code files
- `save_temp`: Save generated code files when garbage
collected? (Default: False)
- `logger`: optional logging.Logger instance.
"""
if self.syntax == 'C':
self.wcode = partial(sympy.ccode, contract=False)
elif self.syntax == 'F':
self.wcode = partial(
sympy.fcode, source_format='free', contract=False)
self.basedir = self.basedir or "."
# setting basedir to:
# os.path.dirname(sys.modules[self.__class__.__module__].__file__)
# gives problems when using e.g. pudb.
if tempdir:
self._tempdir = tempdir
self._remove_tempdir_on_clean = False
else:
self._tempdir = tempfile.mkdtemp(self.tempdir_basename)
self._remove_tempdir_on_clean = True
self._save_temp = save_temp
self.logger = logger
if not os.path.isdir(self._tempdir):
os.makedirs(self._tempdir)
self._remove_tempdir_on_clean = True
# Initialize lists
for lstattr in self.list_attributes:
setattr(self, lstattr, getattr(
self, lstattr, None) or [])
self.compile_kwargs = self.compile_kwargs or {}
# If .pyx files in self.templates, add .c file to _cached_files
self._cached_files += [x.replace('_template', '').replace(
'.pyx', '.c') for x in self.templates if x.endswith('.pyx')]
self.write_code()
def variables(self):
"""
Returns dictionary of variables for substituion
suitable for use in the templates (formated according
to the syntax of the language)
"""
# To be overloaded
return {}
def as_arrayified_code(self, expr, dummy_groups=(),
arrayify_groups=(), **kwargs):
for basename, symbols in dummy_groups:
expr = _dummify_expr(expr, basename, symbols)
scode = self.wcode(expr, **kwargs)
for basename, code_tok, offset, dim in arrayify_groups:
scode = syntaxify_getitem(
self.syntax, scode, basename, code_tok, offset, dim)
return scode
def get_cse_code(self, exprs, basename=None,
dummy_groups=(), arrayify_groups=()):
""" Get arrayified code for common subexpression.
Parameters
----------
exprs : list of sympy expressions
basename : str
Stem of variable names (default: cse).
dummy_groups : tuples
"""
if basename is None:
basename = 'cse'
cse_defs, cse_exprs = sympy.cse(
exprs, symbols=sympy.numbered_symbols(basename))
# Let's convert the new expressions into (arrayified) code
cse_defs_code = [
(vname, self.as_arrayified_code(
vexpr, dummy_groups, arrayify_groups))
for vname, vexpr in cse_defs
]
cse_exprs_code = [self.as_arrayified_code(
x, dummy_groups, arrayify_groups) for x in cse_exprs]
return cse_defs_code, cse_exprs_code
def write_code(self):
for path in self._cached_files:
# Make sure we start in a clean state
rel_path = os.path.join(self._tempdir, path)
if os.path.exists(rel_path):
os.unlink(rel_path)
for path in self.build_files:
# Copy files
srcpath = os.path.join(self.basedir, path)
dstpath = os.path.join(self._tempdir, os.path.basename(path))
copy(srcpath, dstpath)
self._written_files.append(dstpath)
subs = self.variables()
for path in self.templates:
# Render templates
srcpath = os.path.join(self.basedir, path)
outpath = os.path.join(
self._tempdir,
os.path.basename(path).replace('_template', ''))
render_mako_template_to(srcpath, outpath, subs)
self._written_files.append(outpath)
_mod = None
@property
def mod(self):
""" Cached compiled binary of the Generic_Code class.
To clear cache invoke :meth:`clear_mod_cache`.
"""
if self._mod is None:
self._mod = self.compile_and_import_binary()
return self._mod
def clear_mod_cache(self):
self._mod = None
def compile_and_import_binary(self):
"""
Returnes a module instance of the extension module.
Consider using the `mod` property instead.
Do ::
>>> mod = codeinstnc.compile_and_import_binary() # doctest: +SKIP
>>> x = mod.cb('foo') # doctest: +SKIP
Don't ::
>>> cb = codeinstnc.compile_and_import_binary().cb # doctest: +SKIP
Since that circumvents measurments avoiding singleton
behaviour when multiple different versions of the same
extension module have been compiled. (the shared object
has the same name and identifier)
"""
self._compile()
return Interceptor(self.binary_path)
@property
def binary_path(self):
return os.path.join(self._tempdir, self.so_file)
def clean(self):
""" Delete temp dir if not save_temp set at __init__ """
if not self._save_temp:
if hasattr(self, '_written_files'):
map(os.unlink, self._written_files)
if getattr(self, '_remove_tempdir_on_clean', False):
shutil.rmtree(self._tempdir)
def __del__(self):
"""
When Generic_Code object is collected by GC
self._tempdir is (possibly) deleted
"""
self.clean()
def _compile(self):
self._compile_obj()
self._compile_so()
def _compile_obj(self, sources=None):
sources = sources or self.source_files
compile_sources(sources, self.CompilerRunner,
cwd=self._tempdir,
logger=self.logger,
**self.compile_kwargs)
def _compile_so(self):
so_file = link_py_so(self.obj_files,
so_file=self.so_file,
cwd=self._tempdir,
fort=self.fort,
logger=self.logger,
**self.compile_kwargs)
self.so_file = self.so_file or so_file
class Cython_Code(Generic_Code):
"""
Uses Cython's build_ext and distutils
to simplify compilation
Could be rewritten to use pyx2obj
"""
def _compile(self):
from Cython.Distutils import build_ext
from setuptools import setup
from setuptools import Extension
sources = [os.path.join(
self._tempdir, os.path.basename(x).replace(
'_template', '')) for x in self.source_files]
setup(
script_name='DUMMY_SCRIPT_NAME',
script_args=['build_ext', '--build-lib', self._tempdir],
include_dirs=self._include_dirs,
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension(
self.extension_name,
sources,
libraries=self._libraries,
library_dirs=self._library_dirs,
include_dirs=self._include_dirs),
]
)
class C_Code(Generic_Code):
"""
C code class
"""
default_integer = 'int'
default_real = 'double'
syntax = 'C'
CompilerRunner = CCompilerRunner
class Cpp_Code(C_Code):
CompilerRunner = CppCompilerRunner
class F90_Code(Generic_Code):
"""
Fortran 90 code class
"""
fort = True
# Assume `use iso_c_binding`
default_integer = 'integer(c_int)'
default_real = 'real(c_double)'
syntax = 'F'
CompilerRunner = FortranCompilerRunner
def __init__(self, *args, **kwargs):
self._cached_files = self._cached_files or []
# self._cached_files += [
# x+'.mod' for x in self._get_module_files(self.source_files)]
self._cached_files += [
x+'.mod' for x in self._get_module_files(self.templates)]
super(F90_Code, self).__init__(*args, **kwargs)
def _get_module_files(self, files):
names = []
for f in files:
with open(os.path.join(self.basedir, f), 'rt') as fh:
for line in fh:
stripped_lower = line.strip().lower()
if stripped_lower.startswith('module'):
names.append(
stripped_lower.split('module')[1].strip())
return names
def make_PCEExtension_for_prebuilding_Code(
name, Code, prebuild_sources, srcdir,
downloads=None, **kwargs):
"""
If subclass of codeexport.Generic_Code needs to have some of it
sources compiled to objects and cached in a `prebuilt/` directory
at invocation of `setup.py build_ext` this convenience function
makes setting up a PCEExtension easier. Use together with
cmdclass = {'build_ext': pce_build_ext}.
files called ".metadata*" will be added to dist_files
"""
import glob
from .dist import PCEExtension
build_files = []
dist_files = [(os.path.join(srcdir, x[0]), x[1]) for
x in getattr(Code, 'dist_files', [])]
for attr in ('build_files', 'templates'):
for cf in getattr(Code, attr, []) or []:
if not cf.startswith('prebuilt'):
build_files.append(os.path.join(srcdir, cf))
dist_files.append((os.path.join(srcdir, cf), None))
def prebuilder(build_temp, ext_fullpath, ext,
src_paths, **prebuilder_kwargs):
build_temp = os.path.abspath(build_temp)
if not os.path.isdir(build_temp):
make_dirs(build_temp)
if downloads:
websrc, src_md5 = downloads
download_dir = os.path.join(build_temp, srcdir)
if not os.path.isdir(download_dir):
make_dirs(download_dir)
download_files(websrc, src_md5.keys(), src_md5,
cwd=download_dir, logger=ext.logger)
for p in src_paths:
if p not in build_files:
copy(os.path.join(srcdir, p),
os.path.join(build_temp, srcdir),
dest_is_dir=True, create_dest_dirs=True,
only_update=ext.only_update,
logger=ext.logger)
dst = os.path.abspath(os.path.join(
os.path.dirname(ext_fullpath), 'prebuilt/'))
make_dirs(dst, logger=ext.logger)
objs = compile_sources(
[os.path.join(srcdir, x) for x in src_paths], destdir=dst,
cwd=build_temp, metadir=dst, only_update=True,
logger=ext.logger, **prebuilder_kwargs)
glb = os.path.join(ext_fullpath, '.metadata*')
dist_files.extend(glob.glob(glb))
for obj in objs:
# Copy prebuilt objects into lib for distriubtion
copy(os.path.join(build_temp, obj),
dst, dest_is_dir=True, create_dest_dirs=True,
only_update=ext.only_update,
logger=ext.logger)
return objs
compile_kwargs = Code.compile_kwargs.copy()
logger = kwargs.pop('logger', True)
compile_kwargs.update(kwargs)
return PCEExtension(
name,
[],
build_files=build_files,
dist_files=dist_files,
build_callbacks=[
(
prebuilder,
(prebuild_sources,), compile_kwargs
),
],
logger=logger,
link_ext=False
)
|
{
"content_hash": "8dd96183fa177bc48f7ab06a41c243a2",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 76,
"avg_line_length": 32.31954887218045,
"alnum_prop": 0.5810166337094336,
"repo_name": "bjodah/pycodeexport",
"id": "2b0cfdddbdea269cedb8708b0bab58d19c1761c7",
"size": "17218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycodeexport/codeexport.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30796"
},
{
"name": "Shell",
"bytes": "8982"
}
],
"symlink_target": ""
}
|
from godot import exposed, rpcsync
from godot.bindings import Area2D, Vector2
DEFAULT_SPEED = 80
@exposed
class Ball(Area2D):
@rpcsync
def _reset_ball(self, for_left):
print('RESET BALL', for_left)
self.position = self.screen_size / 2
if for_left:
self.direction = Vector2(-1, 0)
else:
self.direction = Vector2(1, 0)
self.ball_speed = DEFAULT_SPEED
@rpcsync
def stop(self):
self.stopped = True
def _process(self, delta):
# ball will move normally for both players
# even if it's sightly out of sync between them
# so each player sees the motion as smooth and not jerky
if not self.stopped:
self.translate(self.direction * self.ball_speed * delta)
# check screen bounds to make ball bounce
if ((self.position.y < 0 and self.direction.y < 0) or
(self.position.y > self.screen_size.y and self.direction.y > 0)):
self.direction.y = -self.direction.y
if (self.is_network_master()):
# only master will decide when the ball is out in the left side (it's own side)
# this makes the game playable even if latency is high and ball is going fast
# otherwise ball might be out in the other player's screen but not this one
if self.position.x < 0:
self.get_parent().rpc("update_score", False)
self.rpc("_reset_ball", False)
else:
# only the slave will decide when the ball is out in the right side (it's own side)
# this makes the game playable even if latency is high and ball is going fast
# otherwise ball might be out in the other player's screen but not this one
if (self.position.x > self.screen_size.x):
self.get_parent().rpc("update_score", True)
self.rpc("_reset_ball", True)
@rpcsync
def bounce(self, left, random):
import pdb; pdb.set_trace()
print('===================================>BOUNCE', left, random)
# using sync because both players can make it bounce
if (self.left):
self.direction.x = abs(self.direction.x)
else:
self.direction.x = -abs(self.direction.x)
self.ball_speed *= 1.1
self.direction.y = random * 2.0 - 1
self.direction = self.direction.normalized()
def _ready(self):
self.direction = Vector2(1, 0)
self.ball_speed = DEFAULT_SPEED
self.stopped = False
self.screen_size = self.get_viewport_rect().size
self.set_process(True) # REMOVE ME
|
{
"content_hash": "02dd1b16e7114972da1827b3bffaaa20",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 95,
"avg_line_length": 39.13235294117647,
"alnum_prop": 0.5888763622698234,
"repo_name": "razvanc-r/godot-python",
"id": "d30d3a215902e727837c53ba96419e2a7864ef69",
"size": "2661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pong_multiplayer/ball.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2543"
},
{
"name": "C++",
"bytes": "61189"
},
{
"name": "GDScript",
"bytes": "1454"
},
{
"name": "Makefile",
"bytes": "4174"
},
{
"name": "Python",
"bytes": "1015802"
}
],
"symlink_target": ""
}
|
import bisect
import logbook
import datetime
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from zipline.data.loader import load_market_data
from zipline.utils import tradingcalendar
from zipline.assets import AssetFinder
from zipline.assets.asset_writer import (
AssetDBWriterFromList,
AssetDBWriterFromDictionary,
AssetDBWriterFromDataFrame)
from zipline.errors import (
NoFurtherDataError
)
log = logbook.Logger('Trading')
# The financial simulations in zipline depend on information
# about the benchmark index and the risk free rates of return.
# The benchmark index defines the benchmark returns used in
# the calculation of performance metrics such as alpha/beta. Many
# components, including risk, performance, transforms, and
# batch_transforms, need access to a calendar of trading days and
# market hours. The TradingEnvironment maintains two time keeping
# facilities:
# - a DatetimeIndex of trading days for calendar calculations
# - a timezone name, which should be local to the exchange
# hosting the benchmark index. All dates are normalized to UTC
# for serialization and storage, and the timezone is used to
# ensure proper rollover through daylight savings and so on.
#
# User code will not normally need to use TradingEnvironment
# directly. If you are extending zipline's core financial
# components and need to use the environment, you must import the module and
# build a new TradingEnvironment object, then pass that TradingEnvironment as
# the 'env' arg to your TradingAlgorithm.
class TradingEnvironment(object):
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(
self,
load=None,
bm_symbol='^GSPC',
exchange_tz="US/Eastern",
max_date=None,
env_trading_calendar=tradingcalendar,
asset_db_path=':memory:'
):
"""
@load is function that returns benchmark_returns and treasury_curves
The treasury_curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
"""
self.trading_day = env_trading_calendar.trading_day.copy()
# `tc_td` is short for "trading calendar trading days"
tc_td = env_trading_calendar.trading_days
if max_date:
self.trading_days = tc_td[tc_td <= max_date].copy()
else:
self.trading_days = tc_td.copy()
self.first_trading_day = self.trading_days[0]
self.last_trading_day = self.trading_days[-1]
self.early_closes = env_trading_calendar.get_early_closes(
self.first_trading_day, self.last_trading_day)
self.open_and_closes = env_trading_calendar.open_and_closes.loc[
self.trading_days]
self.prev_environment = self
self.bm_symbol = bm_symbol
if not load:
load = load_market_data
self.benchmark_returns, self.treasury_curves = \
load(self.trading_day, self.trading_days, self.bm_symbol)
if max_date:
tr_c = self.treasury_curves
# Mask the treasury curves down to the current date.
# In the case of live trading, the last date in the treasury
# curves would be the day before the date considered to be
# 'today'.
self.treasury_curves = tr_c[tr_c.index <= max_date]
self.exchange_tz = exchange_tz
self.engine = engine = create_engine('sqlite:///%s' % asset_db_path)
AssetDBWriterFromDictionary().init_db(engine)
self.asset_finder = AssetFinder(engine)
def write_data(self,
engine=None,
equities_data=None,
futures_data=None,
exchanges_data=None,
root_symbols_data=None,
equities_df=None,
futures_df=None,
exchanges_df=None,
root_symbols_df=None,
equities_identifiers=None,
futures_identifiers=None,
exchanges_identifiers=None,
root_symbols_identifiers=None,
allow_sid_assignment=True):
""" Write the supplied data to the database.
Parameters
----------
equities_data: dict, optional
A dictionary of equity metadata
futures_data: dict, optional
A dictionary of futures metadata
exchanges_data: dict, optional
A dictionary of exchanges metadata
root_symbols_data: dict, optional
A dictionary of root symbols metadata
equities_df: pandas.DataFrame, optional
A pandas.DataFrame of equity metadata
futures_df: pandas.DataFrame, optional
A pandas.DataFrame of futures metadata
exchanges_df: pandas.DataFrame, optional
A pandas.DataFrame of exchanges metadata
root_symbols_df: pandas.DataFrame, optional
A pandas.DataFrame of root symbols metadata
equities_identifiers: list, optional
A list of equities identifiers (sids, symbols, Assets)
futures_identifiers: list, optional
A list of futures identifiers (sids, symbols, Assets)
exchanges_identifiers: list, optional
A list of exchanges identifiers (ids or names)
root_symbols_identifiers: list, optional
A list of root symbols identifiers (ids or symbols)
"""
if engine:
self.engine = engine
# If any pandas.DataFrame data has been provided,
# write it to the database.
if (equities_df is not None or futures_df is not None or
exchanges_df is not None or root_symbols_df is not None):
self._write_data_dataframes(equities_df, futures_df,
exchanges_df, root_symbols_df)
if (equities_data is not None or futures_data is not None or
exchanges_data is not None or root_symbols_data is not None):
self._write_data_dicts(equities_data, futures_data,
exchanges_data, root_symbols_data)
# These could be lists or other iterables such as a pandas.Index.
# For simplicity, don't check whether data has been provided.
self._write_data_lists(equities_identifiers,
futures_identifiers,
exchanges_identifiers,
root_symbols_identifiers,
allow_sid_assignment=allow_sid_assignment)
def _write_data_lists(self, equities=None, futures=None, exchanges=None,
root_symbols=None, allow_sid_assignment=True):
AssetDBWriterFromList(equities, futures, exchanges, root_symbols)\
.write_all(self.engine, allow_sid_assignment=allow_sid_assignment)
def _write_data_dicts(self, equities=None, futures=None, exchanges=None,
root_symbols=None, allow_sid_assignment=True):
AssetDBWriterFromDictionary(equities, futures, exchanges, root_symbols)\
.write_all(self.engine)
def _write_data_dataframes(self, equities=None, futures=None,
exchanges=None, root_symbols=None):
AssetDBWriterFromDataFrame(equities, futures, exchanges, root_symbols)\
.write_all(self.engine)
def normalize_date(self, test_date):
test_date = pd.Timestamp(test_date, tz='UTC')
return pd.tseries.tools.normalize_date(test_date)
def utc_dt_in_exchange(self, dt):
return pd.Timestamp(dt).tz_convert(self.exchange_tz)
def exchange_dt_in_utc(self, dt):
return pd.Timestamp(dt, tz=self.exchange_tz).tz_convert('UTC')
def is_market_hours(self, test_date):
if not self.is_trading_day(test_date):
return False
mkt_open, mkt_close = self.get_open_and_close(test_date)
return test_date >= mkt_open and test_date <= mkt_close
def is_trading_day(self, test_date):
dt = self.normalize_date(test_date)
return (dt in self.trading_days)
def next_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=1)
while dt <= self.last_trading_day:
dt += delta
if dt in self.trading_days:
return dt
return None
def previous_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=-1)
while self.first_trading_day < dt:
dt += delta
if dt in self.trading_days:
return dt
return None
def add_trading_days(self, n, date):
"""
Adds n trading days to date. If this would fall outside of the
trading calendar, a NoFurtherDataError is raised.
:Arguments:
n : int
The number of days to add to date, this can be positive or
negative.
date : datetime
The date to add to.
:Returns:
new_date : datetime
n trading days added to date.
"""
if n == 1:
return self.next_trading_day(date)
if n == -1:
return self.previous_trading_day(date)
idx = self.get_index(date) + n
if idx < 0 or idx >= len(self.trading_days):
raise NoFurtherDataError(
msg='Cannot add %d days to %s' % (n, date)
)
return self.trading_days[idx]
def days_in_range(self, start, end):
mask = ((self.trading_days >= start) &
(self.trading_days <= end))
return self.trading_days[mask]
def opens_in_range(self, start, end):
return self.open_and_closes.market_open.loc[start:end]
def closes_in_range(self, start, end):
return self.open_and_closes.market_close.loc[start:end]
def minutes_for_days_in_range(self, start, end):
"""
Get all market minutes for the days between start and end, inclusive.
"""
start_date = self.normalize_date(start)
end_date = self.normalize_date(end)
all_minutes = []
for day in self.days_in_range(start_date, end_date):
day_minutes = self.market_minutes_for_day(day)
all_minutes.append(day_minutes)
# Concatenate all minutes and truncate minutes before start/after end.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def next_open_and_close(self, start_date):
"""
Given the start_date, returns the next open and close of
the market.
"""
next_open = self.next_trading_day(start_date)
if next_open is None:
raise NoFurtherDataError(
msg=("Attempt to backtest beyond available history. "
"Last known date: %s" % self.last_trading_day)
)
return self.get_open_and_close(next_open)
def previous_open_and_close(self, start_date):
"""
Given the start_date, returns the previous open and close of the
market.
"""
previous = self.previous_trading_day(start_date)
if previous is None:
raise NoFurtherDataError(
msg=("Attempt to backtest beyond available history. "
"First known date: %s" % self.first_trading_day)
)
return self.get_open_and_close(previous)
def next_market_minute(self, start):
"""
Get the next market minute after @start. This is either the immediate
next minute, or the open of the next market day after start.
"""
next_minute = start + datetime.timedelta(minutes=1)
if self.is_market_hours(next_minute):
return next_minute
return self.next_open_and_close(start)[0]
def previous_market_minute(self, start):
"""
Get the next market minute before @start. This is either the immediate
previous minute, or the close of the market day before start.
"""
prev_minute = start - datetime.timedelta(minutes=1)
if self.is_market_hours(prev_minute):
return prev_minute
return self.previous_open_and_close(start)[1]
def get_open_and_close(self, day):
index = self.open_and_closes.index.get_loc(day.date())
todays_minutes = self.open_and_closes.values[index]
return todays_minutes[0], todays_minutes[1]
def market_minutes_for_day(self, stamp):
market_open, market_close = self.get_open_and_close(stamp)
return pd.date_range(market_open, market_close, freq='T')
def open_close_window(self, start, count, offset=0, step=1):
"""
Return a DataFrame containing `count` market opens and closes,
beginning with `start` + `offset` days and continuing `step` minutes at
a time.
"""
# TODO: Correctly handle end of data.
start_idx = self.get_index(start) + offset
stop_idx = start_idx + (count * step)
index = np.arange(start_idx, stop_idx, step)
return self.open_and_closes.iloc[index]
def market_minute_window(self, start, count, step=1):
"""
Return a DatetimeIndex containing `count` market minutes, starting with
`start` and continuing `step` minutes at a time.
"""
if not self.is_market_hours(start):
raise ValueError("market_minute_window starting at "
"non-market time {minute}".format(minute=start))
all_minutes = []
current_day_minutes = self.market_minutes_for_day(start)
first_minute_idx = current_day_minutes.searchsorted(start)
minutes_in_range = current_day_minutes[first_minute_idx::step]
# Build up list of lists of days' market minutes until we have count
# minutes stored altogether.
while True:
if len(minutes_in_range) >= count:
# Truncate off extra minutes
minutes_in_range = minutes_in_range[:count]
all_minutes.append(minutes_in_range)
count -= len(minutes_in_range)
if count <= 0:
break
if step > 0:
start, _ = self.next_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
else:
_, start = self.previous_open_and_close(start)
current_day_minutes = self.market_minutes_for_day(start)
minutes_in_range = current_day_minutes[::step]
# Concatenate all the accumulated minutes.
return pd.DatetimeIndex(
np.concatenate(all_minutes), copy=False, tz='UTC',
)
def trading_day_distance(self, first_date, second_date):
first_date = self.normalize_date(first_date)
second_date = self.normalize_date(second_date)
# TODO: May be able to replace the following with searchsorted.
# Find leftmost item greater than or equal to day
i = bisect.bisect_left(self.trading_days, first_date)
if i == len(self.trading_days): # nothing found
return None
j = bisect.bisect_left(self.trading_days, second_date)
if j == len(self.trading_days):
return None
return j - i
def get_index(self, dt):
"""
Return the index of the given @dt, or the index of the preceding
trading day if the given dt is not in the trading calendar.
"""
ndt = self.normalize_date(dt)
if ndt in self.trading_days:
return self.trading_days.searchsorted(ndt)
else:
return self.trading_days.searchsorted(ndt) - 1
class SimulationParameters(object):
def __init__(self, period_start, period_end,
capital_base=10e3,
emission_rate='daily',
data_frequency='daily',
env=None):
self.period_start = period_start
self.period_end = period_end
self.capital_base = capital_base
self.emission_rate = emission_rate
self.data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self.arena = 'backtest'
if env is not None:
self.update_internal_from_env(env=env)
def update_internal_from_env(self, env):
assert self.period_start <= self.period_end, \
"Period start falls after period end."
assert self.period_start <= env.last_trading_day, \
"Period start falls after the last known trading day."
assert self.period_end >= env.first_trading_day, \
"Period end falls before the first known trading day."
self.first_open = self._calculate_first_open(env)
self.last_close = self._calculate_last_close(env)
start_index = env.get_index(self.first_open)
end_index = env.get_index(self.last_close)
# take an inclusive slice of the environment's
# trading_days.
self.trading_days = env.trading_days[start_index:end_index + 1]
def _calculate_first_open(self, env):
"""
Finds the first trading day on or after self.period_start.
"""
first_open = self.period_start
one_day = datetime.timedelta(days=1)
while not env.is_trading_day(first_open):
first_open = first_open + one_day
mkt_open, _ = env.get_open_and_close(first_open)
return mkt_open
def _calculate_last_close(self, env):
"""
Finds the last trading day on or before self.period_end
"""
last_close = self.period_end
one_day = datetime.timedelta(days=1)
while not env.is_trading_day(last_close):
last_close = last_close - one_day
_, mkt_close = env.get_open_and_close(last_close)
return mkt_close
@property
def days_in_period(self):
"""return the number of trading days within the period [start, end)"""
return len(self.trading_days)
def __repr__(self):
return """
{class_name}(
period_start={period_start},
period_end={period_end},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
period_start=self.period_start,
period_end=self.period_end,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
|
{
"content_hash": "8260d50a7f29c1f3965b7e3b4de3df12",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 80,
"avg_line_length": 36.90503875968992,
"alnum_prop": 0.608465052775298,
"repo_name": "michaeljohnbennett/zipline",
"id": "35817162598c0e4b6535189496e057a374d71cd9",
"size": "19626",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zipline/finance/trading.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1354050"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
}
|
import pytest
import torch
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules import GatedSum
class TestGatedSum(AllenNlpTestCase):
def test_gated_sum_can_run_forward(self):
a = torch.FloatTensor([1, 2, 3, 4, 5])
b = -a + 0.1
weight_value = 2
gate_value = torch.sigmoid(torch.FloatTensor([1]))
expected = gate_value * a + (1 - gate_value) * b
with torch.no_grad(): # because we want to change the weight
gated_sum = GatedSum(a.size(-1))
gated_sum._gate.weight *= 0
gated_sum._gate.weight += weight_value
gated_sum._gate.bias *= 0
out = gated_sum(a, b)
numpy.testing.assert_almost_equal(expected.data.numpy(), out.data.numpy(), decimal=5)
with pytest.raises(ValueError):
GatedSum(a.size(-1))(a, b.unsqueeze(0))
with pytest.raises(ValueError):
GatedSum(100)(a, b)
def test_input_output_dim(self):
dim = 77
gated_sum = GatedSum(dim)
numpy.testing.assert_equal(gated_sum.get_input_dim(), dim)
numpy.testing.assert_equal(gated_sum.get_output_dim(), dim)
|
{
"content_hash": "31ae470c1ba8920e1f15be59d48b1244",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 97,
"avg_line_length": 32.32432432432432,
"alnum_prop": 0.6070234113712375,
"repo_name": "allenai/allennlp",
"id": "c1de01c1ec04f8847c262b76f6ec5aeaa4d301c9",
"size": "1196",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/modules/gated_sum_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
}
|
from pymoku import InvalidOperationException
def stream_init(moku, uname, api_key, str_id1, str_id2, npoints=100, mode='lines', line={}):
line = ';'.join([ '='.join(i) for i in list(line.items())])
settings = [
('plotly.uname', uname),
('plotly.api_key', api_key),
('plotly.strid1', str_id1),
('plotly.strid2', str_id2),
('plotly.displaysize', str(npoints)),
('plotly.mode', mode),
('plotly.line', line),
]
moku._set_properties(settings)
def stream_url(moku):
return moku._get_property_single('plotly.url')
def plot_frame(dataframe, uname=None, api_key=None, mode='lines', line={}):
try:
import plotly.plotly as ply
import plotly.tools as ptls
from plotly.graph_objs import Scatter, Layout, Data, Figure
except ImportError:
raise InvalidOperationException("Please install the Python plotly bindings")
if uname and api_key:
ply.sign_in(uname, api_key)
c1 = dataframe.ch1
c2 = dataframe.ch2
x = list(range(len(c1)))
t1 = Scatter(x=x, y=c1, mode=mode, line=line)
t2 = Scatter(x=x, y=c2, mode=mode, line=line)
layout = Layout(title="Moku:Lab Frame Grab")
data = Data([t1, t2])
fig = Figure(data=data, layout=layout)
return ply.plot(fig)
|
{
"content_hash": "c7a264f95fdd1640a7bf7744b1508ab9",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 92,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6818565400843882,
"repo_name": "benizl/pymoku",
"id": "f823c6094a403ab6a62faccb2e76b2e2b2d997a0",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymoku/plotly_support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "219652"
}
],
"symlink_target": ""
}
|
from gcloud import storage
from oauth2client.service_account import ServiceAccountCredentials
import requests
from io import BytesIO
from PIL import Image
import os
from backend.api_keys import cloud_client_id, cloud_client_email, cloud_private_key_id, cloud_private_key, \
cloud_bucket_name
from backend.log_util import log
from backend.firebase_db_util import get_id_of_feed
max_image_size = 4000000
size = 512, 512
image_path = "image.jpg"
credentials_dict = {
'type': 'service_account',
'client_id': cloud_client_id,
'client_email': cloud_client_email,
'private_key_id': cloud_private_key_id,
'private_key': cloud_private_key,
}
credentials = ServiceAccountCredentials.from_json_keyfile_dict(credentials_dict)
client = storage.Client(credentials=credentials, project=cloud_bucket_name)
def store_image_in_gcloud(url, feed):
try:
# Get image with timeout
r = requests.get(url, stream=True, timeout=10)
if r.status_code == 200:
# Check that the file is not bigger than max size
content = r.raw.read(max_image_size + 1, decode_content=True)
if len(content) > max_image_size:
raise ValueError('File is too big (maybe it\'s the video instead of the thumb), not downloading!')
else:
raise ValueError("Wrong status code at download")
# Resize the image to save bandwidth and storage
im = Image.open(BytesIO(content))
im.thumbnail(size)
im.save(image_path, format="JPEG")
# Upload the image to the firebase storage
bucket = client.get_bucket(cloud_bucket_name)
blob = bucket.blob(get_storage_path(feed))
blob.upload_from_filename(image_path, content_type="image/jpg")
# Cleanup ressources
log("Debug", "Uploaded image")
del content
del im
os.remove(image_path)
# Return the download url of the image
return "https://storage.googleapis.com/" + cloud_bucket_name + "/" + get_storage_path(feed)
except Exception as e:
log("Warning", "Couldn't up- / download image" + format(e))
return None
def remove_image_from_gcloud(feed):
try:
# Delete the image from the firebase storage
bucket = client.get_bucket(cloud_bucket_name)
blob = bucket.blob(get_storage_path(feed))
blob.delete()
log("Debug", "Image deleted")
except Exception as e:
log("Warning", "Deleting image failed: " + format(e))
def get_storage_path(feed):
return "thumbs/" + feed.scope + "/" + get_id_of_feed(feed) + ".jpg"
|
{
"content_hash": "65183f1df55188664b30179241ec4a4b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 114,
"avg_line_length": 34.82666666666667,
"alnum_prop": 0.6573506891271057,
"repo_name": "l3d00m/pietsmiet_xposter",
"id": "988a9c9916fe960474a16c63729b8ae439b1f5e3",
"size": "2612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/cloud_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20588"
}
],
"symlink_target": ""
}
|
import a
import b
import c
import d
import e
import f
# Colours
if platform.system() == 'Windows':
col_default = 0x07
col_red = 0x0C
col_green = 0x0A
else:
col_default = '\033[0m'
col_red = '\033[91m'
col_green = '\033[92m'
col_current = None
def set_text_colour(col):
global col_current
if col_current is None or col_current != col:
if not sys.stdout.isatty():
pass # not on a terminal (e.g. output is being piped to file)
elif (platform.system() == 'Windows'):
# set the text colour using the Win32 API
handle = ctypes.windll.kernel32.GetStdHandle(-11) # STD_OUTPUT_HANDLE
ctypes.windll.kernel32.SetConsoleTextAttribute(handle, col)
else:
# set the text colour using a character code
sys.stdout.write(col)
col_current = col
def report(text, col = col_default):
set_text_colour(col)
print(text)
set_text_colour(col_default)
|
{
"content_hash": "8cccb2fbc62533237f08b5906e186df8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 81,
"avg_line_length": 26.405405405405407,
"alnum_prop": 0.6202661207778914,
"repo_name": "github/codeql",
"id": "e6fa77a005f2af2a284f0870d64f50559706ed35",
"size": "977",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/ql/test/query-tests/Metrics/duplicate/with_import2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trekking', '0037_auto_20220204_1533'),
]
operations = [
migrations.AlterField(
model_name='trek',
name='accessibility_infrastructure',
field=models.TextField(blank=True, help_text='Any specific accessibility infrastructure', verbose_name='Accessibility infrastructure'),
),
]
|
{
"content_hash": "9fea169f7328592f762f0a467173e141",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 147,
"avg_line_length": 28.4375,
"alnum_prop": 0.6461538461538462,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "39018b470e672b03c9cecf4ae85d71662e7db9ab",
"size": "505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/trekking/migrations/0038_auto_20220204_1537.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
}
|
from src.utils.google_api import get_service
def _get_time_str(datetime_):
"""
日付を Google の Calendar API が扱う形式の文字列に変換する
:param datetime datetime_: 日時
"""
# マイクロ秒を削除
datetime_ = datetime_.replace(microsecond=0)
datetime_str = datetime_.isoformat()
if datetime_.tzinfo is None:
datetime_str += "+09:00" # タイムゾーンの文字列を加える
return datetime_str
def get_events(calendar_id, time_min, time_max):
"""
任意のカレンダーの指定範囲にあるイベントの一覧を返す
* https://developers.google.com/google-apps/calendar/v3/reference/events/list
* https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/calendar_v3.events.html#list # NOQA
:param str calendar_id: カレンダーID
:param datetime time_min: 値より以降の終了日時のカレンダーを取得
:param datetime time_max: 値より以前の開始日時のカレンダーを取得
"""
# カレンダーAPIに接続
service = get_service("calendar", "v3")
# 指定したカレンダーにある指定範囲のイベントを取得する
event_results = (
service.events()
.list(
calendarId=calendar_id,
timeMin=_get_time_str(time_min), # 日時を文字列にする
timeMax=_get_time_str(time_max),
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = event_results.get("items", [])
print(events)
return events
|
{
"content_hash": "89e13b1fee767fa952a3a21c4b901145",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 135,
"avg_line_length": 27.458333333333332,
"alnum_prop": 0.6441578148710166,
"repo_name": "beproud/bp-cron",
"id": "aef9605fde4466bbf936f08297f2162d38eb029e",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils/google_calendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "23022"
}
],
"symlink_target": ""
}
|
import os
import operator
import sys
import contextlib
import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from pkg_resources import (
resource_listdir,
resource_exists,
normalize_path,
working_set,
evaluate_marker,
add_activation_listener,
require,
EntryPoint,
)
from setuptools import Command
from setuptools.extern.more_itertools import unique_everseen
class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
if module in self._visited:
return None
self._visited.add(module)
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build (deprecated)"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
(
'test-suite=',
's',
"Run single test, case or suite (e.g. 'module.test_suite')",
),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(unique_everseen(paths))
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires)
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
er_d = dist.fetch_build_eggs(
v
for k, v in dist.extras_require.items()
if k.startswith(':') and evaluate_marker(k[1:])
)
return itertools.chain(ir_d, tr_d, er_d)
def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
"entry point independent of test runner are encouraged to use "
"tox.",
log.WARN,
)
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
test = unittest.main(
None,
None,
self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
|
{
"content_hash": "2c6ef65df65517c17e497d37cba1d209",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 32.095238095238095,
"alnum_prop": 0.5729475766567754,
"repo_name": "martbhell/wasthereannhlgamelastnight",
"id": "4a389e4d071d3753f14a8b74338def21f6b54299",
"size": "8088",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/lib/setuptools/command/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "730"
},
{
"name": "HTML",
"bytes": "8959"
},
{
"name": "JavaScript",
"bytes": "3318"
},
{
"name": "Python",
"bytes": "5989638"
}
],
"symlink_target": ""
}
|
"""QR decomposition functions."""
from __future__ import division, print_function, absolute_import
import numpy
# Local imports
from .lapack import get_lapack_funcs
from .misc import _datacopied
__all__ = ['qr', 'qr_multiply', 'rq']
def safecall(f, name, *args, **kwargs):
"""Call a LAPACK routine, determining lwork automatically and handling
error return values"""
lwork = kwargs.get("lwork", None)
if lwork in (None, -1):
kwargs['lwork'] = -1
ret = f(*args, **kwargs)
kwargs['lwork'] = ret[-2][0].real.astype(numpy.int)
ret = f(*args, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2]
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
check_finite=True):
"""
Compute QR decomposition of a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic', 'raw'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes). The final option 'raw'
(added in SciPy 0.11) makes the function return two matrices
(Q, TAU) in the internal format used by LAPACK.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
``A P = Q R`` as above, but where P is chosen such that the diagonal
of R is non-increasing.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
if ``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : int ndarray
Of shape (N,) for ``pivoting=True``. Not returned if
``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> np.allclose(a, np.dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> np.allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = np.abs(np.diag(r4))
>>> np.all(d[1:] <= d[:-1])
True
>>> np.allclose(a[:, p4], np.dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used internally by qr_multiply
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError("Mode argument should be one of ['full', 'r',"
"'economic', 'raw']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError("expected 2D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
overwrite_a=False, overwrite_c=False):
"""
Calculate the QR decomposition and multiply Q with a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular. Multiply Q with a vector or a matrix c.
Parameters
----------
a : (M, N), array_like
Input array
c : array_like
Input array to be multiplied by ``q``.
mode : {'left', 'right'}, optional
``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if
mode is 'right'.
The shape of c must be appropriate for the matrix multiplications,
if mode is 'left', ``min(a.shape) == c.shape[0]``,
if mode is 'right', ``a.shape[0] == c.shape[1]``.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition, see the documentation of qr.
conjugate : bool, optional
Whether Q should be complex-conjugated. This might be faster
than explicit conjugation.
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
overwrite_c : bool, optional
Whether data in c is overwritten (may improve performance).
If this is used, c must be big enough to keep the result,
i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'.
Returns
-------
CQ : ndarray
The product of ``Q`` and ``c``.
R : (K, N), ndarray
R array of the resulting QR factorization where ``K = min(M, N)``.
P : (N,) ndarray
Integer pivot array. Only returned when ``pivoting=True``.
Raises
------
LinAlgError
Raised if QR decomposition fails.
Notes
-----
This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``,
``?UNMQR``, and ``?GEQP3``.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.linalg import qr_multiply, qr
>>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]])
>>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1)
>>> qc
array([[-1., 1., -1.],
[-1., -1., 1.],
[-1., -1., -1.],
[-1., 1., 1.]])
>>> r1
array([[-6., -3., -5. ],
[ 0., -1., -1.11022302e-16],
[ 0., 0., -1. ]])
>>> piv1
array([1, 0, 2], dtype=int32)
>>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1)
>>> np.allclose(2*q2 - qc, np.zeros((4, 3)))
True
"""
if mode not in ['left', 'right']:
raise ValueError("Mode argument can only be 'left' or 'right' but "
"not '{}'".format(mode))
c = numpy.asarray_chkfinite(c)
if c.ndim < 2:
onedim = True
c = numpy.atleast_2d(c)
if mode == "left":
c = c.T
else:
onedim = False
a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr
M, N = a.shape
if mode == 'left':
if c.shape[0] != min(M, N + overwrite_c*(M-N)):
raise ValueError('Array shapes are not compatible for Q @ c'
' operation: {} vs {}'.format(a.shape, c.shape))
else:
if M != c.shape[1]:
raise ValueError('Array shapes are not compatible for c @ Q'
' operation: {} vs {}'.format(c.shape, a.shape))
raw = qr(a, overwrite_a, None, "raw", pivoting)
Q, tau = raw[0]
gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
if gor_un_mqr.typecode in ('s', 'd'):
trans = "T"
else:
trans = "C"
Q = Q[:, :min(M, N)]
if M > N and mode == "left" and not overwrite_c:
if conjugate:
cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
cc[:, :N] = c.T
else:
cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
cc[:N, :] = c
trans = "N"
if conjugate:
lr = "R"
else:
lr = "L"
overwrite_c = True
elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
cc = c.T
if mode == "left":
lr = "R"
else:
lr = "L"
else:
trans = "N"
cc = c
if mode == "left":
lr = "L"
else:
lr = "R"
cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
overwrite_c=overwrite_c)
if trans != "N":
cQ = cQ.T
if mode == "right":
cQ = cQ[:, :min(M, N)]
if onedim:
cQ = cQ.ravel()
return (cQ,) + raw[1:]
def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True):
"""
Compute RQ decomposition of a matrix.
Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
R : float or complex ndarray
Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``.
Q : float or complex ndarray
Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned
if ``mode='r'``.
Raises
------
LinAlgError
If decomposition fails.
Notes
-----
This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf,
sorgrq, dorgrq, cungrq and zungrq.
If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead
of (N,N) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(6, 9)
>>> r, q = linalg.rq(a)
>>> np.allclose(a, r @ q)
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> np.allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if mode not in ['full', 'r', 'economic']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork,
overwrite_a=overwrite_a)
if not mode == 'economic' or N < M:
R = numpy.triu(rq, N-M)
else:
R = numpy.triu(rq[-M:, -M:])
if mode == 'r':
return R
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
if N < M:
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork,
overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork,
overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork,
overwrite_a=1)
return R, Q
|
{
"content_hash": "97ae402938aaffa1776d2e3ff62282ab",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 79,
"avg_line_length": 31.83254716981132,
"alnum_prop": 0.5345632362747277,
"repo_name": "lhilt/scipy",
"id": "32782f630032b17c1e1a3facf6bad4192b8ec9e8",
"size": "13497",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy/linalg/decomp_qr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4396416"
},
{
"name": "C++",
"bytes": "643592"
},
{
"name": "Fortran",
"bytes": "5368331"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12378541"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import re, random, string, sys, math, os, datetime
BASE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../", "../")
sys.path.append(BASE_DIR)
import simplejson as json
import datetime
# ONE-TIME UPDATE OF post.created to draw from json['created_utc']
# rather than json['created']
ENV = os.environ['CS_ENV']
with open(os.path.join(BASE_DIR, "config") + "/{env}.json".format(env=ENV), "r") as config:
DBCONFIG = json.loads(config.read())
### LOAD SQLALCHEMY SESSION
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import and_, or_
from app.models import *
from utils.common import *
db_engine = create_engine("mysql://{user}:{password}@{host}/{database}".format(
host = DBCONFIG['host'],
user = DBCONFIG['user'],
password = DBCONFIG['password'],
database = DBCONFIG['database']))
Base.metadata.bind = db_engine
DBSession = sessionmaker(bind=db_engine)
db_session = DBSession()
print("Found {0} actions for possible conversion...".format(db_session.query(ExperimentAction).filter(and_(ExperimentAction.action=="Intervention",ExperimentAction.action_subject_id!=None)).count()))
added_comment_things = []
for action in db_session.query(ExperimentAction).filter(and_(ExperimentAction.action=="Intervention",ExperimentAction.action_subject_id!=None)).all():
thing_exists = db_session.query(ExperimentThing).filter(ExperimentThing.id == action.action_subject_id).count() > 0
if(thing_exists != True):
comment_thing = ExperimentThing(
id = action.action_subject_id,
experiment_id = action.experiment_id,
object_type = ThingType.COMMENT.value,
object_created = datetime.datetime.fromtimestamp(json.loads(action.metadata_json)['action_object_created_utc']),
metadata_json = json.dumps({"group":"treatment", "submission_id":action.action_object_id})
)
added_comment_things.append(comment_thing)
db_session.add(comment_thing)
print("Updating dates for {0} posts".format(len(added_comment_things)))
db_session.commit()
|
{
"content_hash": "ffdcc3587fb94d8c7d3687c921e1c6d4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 199,
"avg_line_length": 44.54347826086956,
"alnum_prop": 0.7227916056612982,
"repo_name": "c4fcm/CivilServant",
"id": "f515535db3d2f5827d1fb6a6bdff9a31490fd277",
"size": "2049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/data_migrations/08.10.2016.add_experiment_things_for_comments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "923"
},
{
"name": "Python",
"bytes": "209127"
}
],
"symlink_target": ""
}
|
import json
import uuid
import mock
import netaddr
from quark.drivers import unmanaged
from quark import network_strategy
from quark.tests import test_base
class TestUnmanagedDriver(test_base.TestBase):
def setUp(self):
super(TestUnmanagedDriver, self).setUp()
self.strategy = {"public_network": {"bridge": "xenbr0",
"subnets": {"4": "public"}}}
strategy_json = json.dumps(self.strategy)
self.driver = unmanaged.UnmanagedDriver()
unmanaged.STRATEGY = network_strategy.JSONStrategy(strategy_json)
def test_load_config(self):
self.driver.load_config()
def test_get_name(self):
self.assertEqual(self.driver.get_name(), "UNMANAGED")
def test_get_connection(self):
self.driver.get_connection()
def test_select_ipam_strategy(self):
strategy = self.driver.select_ipam_strategy(1, "ANY")
self.assertEqual(strategy, "ANY")
def test_create_network(self):
self.driver.create_network(context=self.context,
network_name="testwork")
def test_delete_network(self):
self.driver.delete_network(context=self.context, network_id=1)
def test_diag_network(self):
self.assertEqual(self.driver.diag_network(context=self.context,
network_id=2), {})
def test_diag_port(self):
self.assertEqual(self.driver.diag_port(context=self.context,
network_id=2), {})
def test_create_port(self):
self.driver.create_port(context=self.context,
network_id="public_network", port_id=2)
def test_update_port(self):
self.driver.update_port(context=self.context,
network_id="public_network", port_id=2)
@mock.patch("quark.cache.security_groups_client.SecurityGroupsClient")
def test_update_port_with_security_groups_removal(self, redis_cli):
mock_client = mock.MagicMock()
redis_cli.return_value = mock_client
port_id = str(uuid.uuid4())
device_id = str(uuid.uuid4())
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF").value
security_groups = []
self.driver.update_port(
context=self.context, network_id="public_network", port_id=port_id,
device_id=device_id, mac_address=mac_address,
security_groups=security_groups)
self.assertEqual(mock_client.serialize_groups.call_count, 0)
mock_client.delete_vif_rules.assert_called_once_with(
device_id, mac_address)
@mock.patch("quark.cache.security_groups_client.SecurityGroupsClient")
def test_update_port_with_security_groups(self, redis_cli):
mock_client = mock.MagicMock()
redis_cli.return_value = mock_client
port_id = str(uuid.uuid4())
device_id = str(uuid.uuid4())
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF").value
security_groups = [str(uuid.uuid4())]
payload = {}
mock_client.serialize_groups.return_value = payload
self.driver.update_port(
context=self.context, network_id="public_network", port_id=port_id,
device_id=device_id, mac_address=mac_address,
security_groups=security_groups)
mock_client.serialize_groups.assert_called_once_with(security_groups)
mock_client.apply_rules.assert_called_once_with(
device_id, mac_address, payload)
@mock.patch("quark.cache.security_groups_client.SecurityGroupsClient")
def test_delete_port(self, sg_cli):
device_id = str(uuid.uuid4())
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF").value
mock_client = mock.MagicMock()
sg_cli.return_value = mock_client
self.driver.delete_port(context=self.context, port_id=2,
mac_address=mac_address, device_id=device_id)
mock_client.delete_vif.assert_called_once_with(
device_id, mac_address)
@mock.patch("quark.cache.security_groups_client.SecurityGroupsClient")
def test_delete_port_redis_is_dead(self, sg_cli):
device_id = str(uuid.uuid4())
mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF").value
mock_client = mock.MagicMock()
sg_cli.return_value = mock_client
mock_client.delete_vif.side_effect = Exception
try:
self.driver.delete_port(context=self.context, port_id=2,
mac_address=mac_address,
device_id=device_id)
mock_client.delete_vif.assert_called_once_with(
device_id, mac_address)
except Exception:
# This test fails without the exception handling in
# _delete_port_security_groups
self.fail("This shouldn't have raised")
def test_create_security_group(self):
self.driver.create_security_group(context=self.context,
group_name="mygroup")
def test_delete_security_group(self):
self.driver.delete_security_group(context=self.context,
group_id=3)
def test_update_security_group(self):
self.driver.update_security_group(context=self.context,
group_id=3)
def test_create_security_group_rule(self):
rule = {'ethertype': 'IPv4', 'direction': 'ingress'}
self.driver.create_security_group_rule(context=self.context,
group_id=3,
rule=rule)
def test_delete_security_group_rule(self):
rule = {'ethertype': 'IPv4', 'direction': 'ingress'}
self.driver.delete_security_group_rule(context=self.context,
group_id=3,
rule=rule)
|
{
"content_hash": "0495e10360a47b8d62c01a7637cde395",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 41.46896551724138,
"alnum_prop": 0.5915516381174123,
"repo_name": "lmaycotte/quark",
"id": "e1b77273e9e7b33670ee8d20b39240f69c8cf8a4",
"size": "6622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quark/tests/test_unmanaged_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1207653"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
}
|
from app import cache
from flask import Blueprint, jsonify, render_template, current_app
from app.models.example import ExampleModel
Example = Blueprint('exampleController', __name__, template_folder='templates', static_folder='static')
@Example.route('/')
def example_index():
return render_template('example/example.html')
@cache.cached(timeout=60)
@Example.route('/data', methods=['GET'])
def example_data():
a = ExampleModel()
graph = a.get_some_graph()
current_app.logger.info("[INFO] Downloaded some graph! yay!")
return jsonify(graph.serialize)
@Example.route('/reset')
def example_reset():
a = ExampleModel()
a.reset_cars()
return render_template('example/example.html', reset=True)
|
{
"content_hash": "788b99c7185dd2e33420925f71828f6b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 23.677419354838708,
"alnum_prop": 0.7084468664850136,
"repo_name": "mobile2015/neoPyth",
"id": "eeef62545af6cfef58f4000ed7f214b1937f7ccc",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/controllers/example.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "203"
},
{
"name": "HTML",
"bytes": "21008"
},
{
"name": "JavaScript",
"bytes": "2451"
},
{
"name": "Python",
"bytes": "35683"
}
],
"symlink_target": ""
}
|
class Singleton(object):
#
# Manage instance
#
# @var object
__Instance = None
#
# Name of the self
#
# @var string
__Name = 'Singleton'
#
# callback
#
def __new__(self,*args):
if (not self.__Instance):
self.__Instance = super(Singleton, self).__new__(self,*args)
else:
self.__init__ = self.__doNothing
return self.__Instance
def __doNothing(self,*args):
'''
This method do nothing. is used to override the __init__ method
and then, do not re-declare values that may be declared at first
use of __init__ (When no instance was made).
'''
pass
#
# Sets name of the self
#
def setName(self, value = 'Singleton'):
self.__Name = value
#
# Returns name of the class
#
# @return string
def getName(self):
return self.__Name
#
# Returns Id of the object
#
# @return integer
def getId(self):
"""
Returns singleton Id
"""
return id(self)
|
{
"content_hash": "b0ebaaaa974d2b26a8812dcde18905ab",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 66,
"avg_line_length": 16.454545454545453,
"alnum_prop": 0.6187845303867403,
"repo_name": "dksr/REMIND",
"id": "7380070f72dbcc96a7874d2f22728cefa293e553",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/base/utils/Singleton.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "99620"
},
{
"name": "C++",
"bytes": "884"
},
{
"name": "Java",
"bytes": "41062"
},
{
"name": "Perl",
"bytes": "127558"
},
{
"name": "Prolog",
"bytes": "6322"
},
{
"name": "Python",
"bytes": "1338870"
},
{
"name": "Shell",
"bytes": "1278"
}
],
"symlink_target": ""
}
|
import IECore
import Gaffer
import GafferUI
class _BaseStatePlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
menuButton = GafferUI.MenuButton( menu=menu, image = "drawingStyles.png", hasFrame=False )
GafferUI.PlugValueWidget.__init__( self, menuButton, plug, **kw )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __menuDefinition( self ) :
m = IECore.MenuDefinition()
for n in [ "solid", "wireframe", "points" ] :
plug = self.getPlug()[n]["enabled"]
m.append(
"/" + n.capitalize(),
{
"command" : IECore.curry( _BaseStatePlugValueWidget.__togglePlug, plug=plug ),
"checkBox" : plug.getValue(),
}
)
return m
@staticmethod
def __togglePlug( menu, plug ) :
plug.setValue( not plug.getValue() )
GafferUI.PlugValueWidget.registerCreator( GafferUI.View3D.staticTypeId(), "baseState", _BaseStatePlugValueWidget )
|
{
"content_hash": "a2f5da9d5f4e105756f6d0a148688d8a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 114,
"avg_line_length": 22.68888888888889,
"alnum_prop": 0.66307541625857,
"repo_name": "paulondc/gaffer",
"id": "a0189a1a1c1fdcff61b0fa8b5c311accfd55b8c6",
"size": "2838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUI/View3DToolbar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15447"
},
{
"name": "C++",
"bytes": "2630344"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Objective-C",
"bytes": "107529"
},
{
"name": "Python",
"bytes": "2745422"
},
{
"name": "Shell",
"bytes": "6943"
},
{
"name": "Slash",
"bytes": "32856"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib import admin
from django.db.models import signals
class Volunteer(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField(max_length=254, unique=True)
is_group = models.BooleanField(default=False, verbose_name="Group")
organization = models.CharField(null=True, max_length=50, blank=True)
group_count = models.IntegerField(null=True, blank=True)
times_clocked_out = models.IntegerField(default=0, editable=False)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.last_name) + ", {}".format(self.first_name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Site(models.Model):
name = models.CharField(max_length=50, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Activity(models.Model):
class Meta:
verbose_name_plural = "activities"
name = models.CharField(max_length=100, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
def print_out(sender, instance, created, **kwargs):
if instance.end is not None:
volunteer = instance.volunteer
if volunteer.is_group:
hours = ((instance.end - instance.start).total_seconds() / 3600) * volunteer.group_count
else:
hours = (instance.end - instance.start).total_seconds() / 3600
if instance.hours != hours:
instance.hours = hours
instance.save()
volunteer = instance.volunteer
volunteer.times_clocked_out += 1
volunteer.save()
class TimeEntry(models.Model):
class Meta:
verbose_name_plural = "Time Entries"
volunteer = models.ForeignKey(Volunteer, on_delete=models.CASCADE)
start = models.DateTimeField(verbose_name="Start Time")
end = models.DateTimeField(null=True, blank=True, verbose_name="End Time")
site = models.ForeignKey(Site, on_delete=models.CASCADE)
activity = models.ForeignKey(Activity, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
hours = models.DecimalField(null=True, blank=True, decimal_places=2, max_digits=12)
def __str__(self):
return "{}".format(self.id) + " {}".format(self.volunteer) + " {}".format(self.start.strftime('%m/%d/%Y'))
def delete(self, *args, **kwargs):
self.active = False
self.save()
signals.post_save.connect(print_out, sender=TimeEntry)
|
{
"content_hash": "86e0890f8cefdbb2ce78c1c78b75570a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 114,
"avg_line_length": 37.5,
"alnum_prop": 0.670354609929078,
"repo_name": "jantaylor/road-home-time-tracker",
"id": "a964077a42f0f560150fb336e6aff7b882f16c97",
"size": "3525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timetracker/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48002"
},
{
"name": "HTML",
"bytes": "47396"
},
{
"name": "JavaScript",
"bytes": "234901"
},
{
"name": "Python",
"bytes": "74218"
}
],
"symlink_target": ""
}
|
"""Generate ast module from specification
This script generates the ast module from a simple specification,
which makes it easy to accomodate changes in the grammar. This
approach would be quite reasonable if the grammar changed often.
Instead, it is rather complex to generate the appropriate code. And
the Node interface has changed more often than the grammar.
"""
import fileinput
import re
import sys
from StringIO import StringIO
SPEC = "ast.txt"
COMMA = ", "
def load_boilerplate(file):
f = open(file)
buf = f.read()
f.close()
i = buf.find('### ''PROLOGUE')
pro = buf[i+12:].strip()
return pro, ''
def strip_default(arg):
"""Return the argname from an 'arg = default' string"""
i = arg.find('=')
if i == -1:
return arg
t = arg[:i].strip()
return t
P_NODE = 1
P_OTHER = 2
P_NESTED = 3
P_NONE = 4
class NodeInfo:
"""Each instance describes a specific AST node"""
def __init__(self, name, args):
self.name = name
self.args = args.strip()
self.argnames = self.get_argnames()
self.argprops = self.get_argprops()
self.nargs = len(self.argnames)
self.init = []
def get_argnames(self):
if '(' in self.args:
i = self.args.find('(')
j = self.args.rfind(')')
args = self.args[i+1:j]
else:
args = self.args
return [strip_default(arg.strip())
for arg in args.split(',') if arg]
def get_argprops(self):
"""Each argument can have a property like '*' or '!'
XXX This method modifies the argnames in place!
"""
d = {}
hardest_arg = P_NODE
for i in range(len(self.argnames)):
arg = self.argnames[i]
if arg.endswith('*'):
arg = self.argnames[i] = arg[:-1]
d[arg] = P_OTHER
hardest_arg = max(hardest_arg, P_OTHER)
elif arg.endswith('!'):
arg = self.argnames[i] = arg[:-1]
d[arg] = P_NESTED
hardest_arg = max(hardest_arg, P_NESTED)
elif arg.endswith('&'):
arg = self.argnames[i] = arg[:-1]
d[arg] = P_NONE
hardest_arg = max(hardest_arg, P_NONE)
else:
d[arg] = P_NODE
self.hardest_arg = hardest_arg
if hardest_arg > P_NODE:
self.args = self.args.replace('*', '')
self.args = self.args.replace('!', '')
self.args = self.args.replace('&', '')
return d
def gen_source(self):
buf = StringIO()
self._gen_init(buf)
print >> buf
self._gen_walkChildren(buf)
print >> buf
bufAux = StringIO()
self._gen_repr(bufAux)
buf.seek(0, 0)
bufAux.seek(0, 0)
return buf.read(), bufAux.read()
def _gen_init(self, buf):
print >> buf, "# --------------------------------------------------------"
print >> buf, "class %s:\n" % self.name
if self.args:
print >> buf, " def __init__ (self, %s, lineno):\n" % (self.args)
else:
print >> buf, " def __init__(self, lineno):\n"
print >>buf, " self.nodeName = \"%s\";" % self.name
if self.argnames:
for name in self.argnames:
print >> buf, " self.%s = %s;" % (name, name)
print >> buf, " self.lineno = lineno;"
# Copy the lines in self.init, indented four spaces. The rstrip()
# business is to get rid of the four spaces if line happens to be
# empty, so that reindent.py is happy with the output.
for line in self.init:
print >> buf, line.rstrip()
def _gen_walkChildren(self, buf):
print >> buf, " def walkChildren(self, handler, args):"
if len(self.argnames) == 0:
print >> buf, " return;"
else:
if self.hardest_arg < P_NESTED:
for c in self.argnames:
print >>buf, " ret = handler.visit(self.%s, args);" % c
print >>buf, " if ret: self.%s = ret" % c
else:
for name in self.argnames:
if self.argprops[name] == P_NESTED:
print >> buf, " for i_%(name)s in range(len(self.%(name)s)):\n" % {'name':name}
print >> buf, " ret = handler.visit(self.%(name)s[i_%(name)s], args);" % {'name': name}
print >> buf, " if ret: self.%(name)s[i_%(name)s] = ret\n" % {'name': name}
else:
print >> buf, " ret = handler.visit(self.%s, args);" % name
print >> buf, " if ret: self.%s = ret" % name
def _gen_repr(self, buf):
# can't use actual type, or extend prototype because it's inside the
# non-debug no-symbol-leaking big function
print >> buf, "if (node.nodeName=== '%s'):" % self.name
if self.argnames:
fmts = []
for name in self.argnames:
fmts.append(name + "=%s")
fmt = COMMA.join(fmts)
vals = ["astDump(node.%s)" % name for name in self.argnames]
vals = COMMA.join(vals)
print >> buf, ' return sprintf("%s(%s)", %s)' % \
(self.name, fmt, vals)
else:
print >> buf, ' return "%s()"' % self.name
print >> buf, "}"
rx_init = re.compile('init\((.*)\):')
def parse_spec(file):
classes = {}
cur = None
for line in fileinput.input(file):
if line.strip().startswith('#'):
continue
mo = rx_init.search(line)
if mo is None:
if cur is None:
# a normal entry
try:
name, args = line.split(':')
except ValueError:
continue
classes[name] = NodeInfo(name, args)
cur = None
else:
# some code for the __init__ method
cur.init.append(line)
else:
# some extra code for a Node's __init__ method
name = mo.group(1)
cur = classes[name]
return sorted(classes.values(), key=lambda n: n.name)
def main():
prologue, epilogue = load_boilerplate(sys.argv[0])
mainf = open(sys.argv[1], "w")
auxf = open(sys.argv[2], "w")
print >>mainf, prologue
print >>mainf
print >>auxf, """// This file is automatically generated by pgen/astgen.py
function astDump(node)
{
if node is None: return "None";
if isinstance(node, str): return Node
if isinstance(node, bool): return Node
if isinstance(node, int): return Node
if (Object.prototype.toString.apply(node) === '[object Array]')
ret = ''
for i in range(len(node)):
ret += astDump(node[i]);
if i != len(node) - 1):
ret += ",";
return ret
"""
classes = parse_spec(SPEC)
for info in classes:
a,b = info.gen_source()
print >>mainf, a
print >>auxf, b
print >>mainf, epilogue
print >>auxf, "}\n"
mainf.close()
auxf.close()
if __name__ == "__main__":
main()
sys.exit(0)
"""
### PROLOGUE
# abstract syntax node definitions
#
# This file is automatically generated by pgen/astgen.py
OP_ASSIGN = 'OP_ASSIGN';
OP_DELETE = 'OP_DELETE';
OP_APPLY = 'OP_APPLY';
SC_LOCAL = 1;
SC_GLOBAL = 2;
SC_FREE = 3;
SC_CELL = 4;
SC_UNKNOWN = 5;
CO_OPTIMIZED = 0x0001;
CO_NEWLOCALS = 0x0002;
CO_VARARGS = 0x0004;
CO_VARKEYWORDS = 0x0008;
CO_NESTED = 0x0010;
CO_GENERATOR = 0x0020;
CO_GENERATOR_ALLOWED = 0;
CO_FUTURE_DIVISION = 0x2000;
CO_FUTURE_ABSIMPORT = 0x4000;
CO_FUTURE_WITH_STATEMENT = 0x8000;
CO_FUTURE_PRINT_FUNCTION = 0x10000;
def flatten(seq):
l = []
for i in range(len(seq)):
if len(seq[i]) > 0:
subf = flatten(seq[i])
l += subf
else:
l.append(seq[i])
return l
#"""
|
{
"content_hash": "59ea2d2f38e8e3e9a955ad469bd08f9b",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 122,
"avg_line_length": 31.234615384615385,
"alnum_prop": 0.5084349218076591,
"repo_name": "spaceone/pyjs",
"id": "a3fdb324131093e0cc6d0d62caf7e29fbaaf7471",
"size": "8121",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pgen/astgen.skult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515374"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.