text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""empty message
Revision ID: 83b13b4b5717
Revises: e8f30efa9a81
Create Date: 2017-08-02 18:12:38.316902
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '83b13b4b5717'
down_revision = 'e8f30efa9a81'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('site_settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('key', sa.String(length=80), nullable=False),
sa.Column('value', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('site_settings')
# ### end Alembic commands ###
|
{
"content_hash": "9497ceb3212a58cd5ad4092529c5c436",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 25.058823529411764,
"alnum_prop": 0.676056338028169,
"repo_name": "massgo/league",
"id": "8f9db77e60fc8c00aa3e963be15daecf4b87f651",
"size": "852",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/migrations/versions/83b13b4b5717_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "49851"
},
{
"name": "JavaScript",
"bytes": "143"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "107289"
},
{
"name": "Shell",
"bytes": "1400"
}
],
"symlink_target": ""
}
|
import datetime
import json
from cba import get_request
from django.utils.encoding import force_unicode
from django.utils.functional import Promise
class LazyEncoder(json.JSONEncoder):
"""Encodes django's lazy i18n strings.
"""
def default(self, obj):
if isinstance(obj, Promise):
return force_unicode(obj)
return obj
def time_it(func, logger, log_message, func_args=None):
start = datetime.datetime.now()
if func_args:
result = func(*func_args)
else:
result = func()
end = datetime.datetime.now()
logger.debug("{}: {}".format(log_message, end - start))
return result
def get_from_session(key, default=None):
"""Gets a value from the session.
key
The key under which the value has been saved. When the key
doesn't exist the method returns ``default``.
default
The value the method returns if key is not existing.
"""
request = get_request()
return request.session.get("cba", {}).get(key, default)
def set_to_session(key, value):
"""Saves a value to the session.
key
The key under which the value is saved.
value
The value to be saved.
"""
request = get_request()
request.session.setdefault("cba", {})[key] = value
def display_components(root):
for component in root.components:
print component
display_components(component)
|
{
"content_hash": "188900f66292b3965c9ed909ab3135c6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 70,
"avg_line_length": 24.796610169491526,
"alnum_prop": 0.6288448393711552,
"repo_name": "diefenbach/django-cba",
"id": "256556a5b41fdbac7631e53b5f31751c3fc48230",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cba/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3021200"
},
{
"name": "HTML",
"bytes": "24065"
},
{
"name": "JavaScript",
"bytes": "3163620"
},
{
"name": "Python",
"bytes": "41129"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
import semantic_version
from requests.exceptions import ConnectionError
from tsuru_unit_agent import heartbeat, tasks
from tsuru_unit_agent.client import Client
TEMP_ENV_FILE = "/tmp/app_envs"
def run_action(args):
client = Client(args.url, args.token)
envs = None
try:
envs, supported_tsuru = client.register_unit(args.app_name)
save_apprc_file(envs, supported_tsuru)
except ConnectionError:
envs = tasks.parse_apprc_file()
yaml_data = tasks.load_app_yaml()
tasks.write_circus_conf(envs=envs)
tasks.run_restart_hooks('before', yaml_data, envs=envs)
tasks.execute_start_script(args.start_cmd, envs=envs, with_shell=False)
tasks.run_restart_hooks('after', yaml_data, envs=envs)
remove_temp_env_file()
def deploy_action(args):
heartbeat.StderrHeartbeat().start()
client = Client(args.url, args.token)
envs, supported_tsuru = client.register_unit(args.app_name)
save_apprc_file(envs, supported_tsuru)
tasks.execute_start_script(args.start_cmd)
yaml_data = tasks.load_app_yaml()
client.post_app_yaml(args.app_name, yaml_data)
tasks.run_build_hooks(yaml_data, envs=envs)
remove_temp_env_file()
yaml_data["procfile"] = tasks.load_procfile()
yaml_data["processes"] = tasks.parse_procfile()
client.register_unit(args.app_name, yaml_data)
tasks.write_circus_conf(envs=envs)
def save_apprc_file(envs, supported_tsuru):
no_apprc_version = semantic_version.Version("0.17.0")
supported_version = semantic_version.Version(supported_tsuru)
port_envs = {"port": "8888", "PORT": "8888"}
if supported_version < no_apprc_version:
tasks.save_apprc_file(envs)
else:
tasks.save_apprc_file(port_envs)
tasks.save_apprc_file(envs, file_path=TEMP_ENV_FILE)
def remove_temp_env_file():
try:
os.unlink(TEMP_ENV_FILE)
except OSError:
pass
actions = {
'run': run_action,
'deploy': deploy_action
}
def parse_args(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='Runs tsuru-unit-agent.')
parser.add_argument('url', help='URL for tsuru API server')
parser.add_argument('token', help='Authentication token for tsuru API server')
parser.add_argument('app_name', help='The app name')
parser.add_argument('start_cmd', help='Command to run after notifying tsuru API server')
parser.add_argument('action', default='run', nargs='?', choices=actions.keys(), help='Action being executed')
return parser.parse_args(args)
def main():
args = parse_args()
actions[args.action](args)
if __name__ == '__main__':
main()
|
{
"content_hash": "3ef79aff0335527bd8dba5d358d3ed58",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 113,
"avg_line_length": 30.53932584269663,
"alnum_prop": 0.6817512877115526,
"repo_name": "tsuru/tsuru-unit-agent",
"id": "995d0be41eae8181d403545c324d52c536b30967",
"size": "2718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsuru_unit_agent/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "165"
},
{
"name": "Python",
"bytes": "77279"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
deprecated:
removed_in: "2.12"
why: Updated modules released with increased functionality
alternative: Use M(gcp_compute_instance) instead.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
service_account_email:
version_added: "1.5.1"
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
type: bool
default: 'no'
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
version_added: "1.7"
state:
description:
- desired state of the resource
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to C(yes) if the instance can forward ip packets (useful for
gateways)
type: bool
default: 'no'
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
type: bool
default: 'yes'
preemptible:
version_added: "2.1"
description:
- if set to C(yes), instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
type: bool
default: 'no'
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author:
- Eric Johnson (@erjohnso) <erjohnso@google.com>
- Tom Melendez (@supertom) <supertom@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
- gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except Exception:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except Exception:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return ({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i + 1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(default='debian-8'),
image_family=dict(),
external_projects=dict(type='list'),
instance_names=dict(),
machine_type=dict(default='n1-standard-1'),
metadata=dict(),
name=dict(aliases=['base_name']),
num_instances=dict(type='int'),
network=dict(default='default'),
subnetwork=dict(),
persistent_boot_disk=dict(type='bool', default=False),
disks=dict(type='list'),
state=dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags=dict(type='list'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete=dict(type='bool', default=True),
disk_size=dict(type='int', default=10),
preemptible=dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main()
|
{
"content_hash": "d3d00d3484917e32a4225263db8ae83e",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 150,
"avg_line_length": 36.757333333333335,
"alnum_prop": 0.6088218224027858,
"repo_name": "SergeyCherepanov/ansible",
"id": "d5bd6abf28008704c878f5221d0b9f2e5c28d216",
"size": "27709",
"binary": false,
"copies": "42",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/google/_gce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import pickle
import sys
import os
if __name__ == '__main__':
from peyotl.ott import OTT
multimapping = set()
picklefn = sys.argv[1]
if os.path.exists(picklefn):
sys.exit('{} already exists'.format(picklefn))
ott = OTT()
ncbi2ott = {}
for ott_id, info in ott.ott_id_to_info.items():
ncbi = info.get('ncbi')
if ncbi is not None:
if ncbi in ncbi2ott:
prev = ncbi2ott[ncbi]
if isinstance(prev, list):
prev.append(ott_id)
else:
ncbi2ott[ncbi] = [prev, ott_id]
multimapping.add(ncbi)
else:
ncbi2ott[ncbi] = ott_id
with open(picklefn, 'wb') as fo:
pickle.dump(ncbi2ott, fo)
if multimapping:
sys.stderr.write('{i:d} ncbi IDs mapped to multiple OTT IDs\n'.format(i=len(multimapping)))
|
{
"content_hash": "8e97db1a52aea7c14172fe7b43ba3566",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 99,
"avg_line_length": 33.370370370370374,
"alnum_prop": 0.5338512763596004,
"repo_name": "rvosa/peyotl",
"id": "2e0f44ba5ccf660f96b7a3a48d5185368951a67d",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/process-ott/create-ncbi-to-ott.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "842998"
},
{
"name": "Shell",
"bytes": "13517"
},
{
"name": "XSLT",
"bytes": "573"
}
],
"symlink_target": ""
}
|
import subprocess
import inspect
import os
import urllib3
import re
import threading
import ipaddress
import argparse
import time
import getpass
TDVT_SDK_NAME = "connector-plugin-sdk"
TDVT_SDK_REPO = "https://github.com/tableau/" + TDVT_SDK_NAME
TDVT_SDK_BRANCH = "tdvt-2.1.9"
TDVT_ES_SCHEME = "simple_lower"
TDVT_RUN_DIR = "run"
TIMEOUTS = {"_default_": 5, "checkout_tdvt_sdk": 60, "setup_workspace": 10, "add_data_source": 300, "run_tdvt": 1800}
TDVT_LAUNCHER = os.path.join(TDVT_SDK_NAME, "tdvt", "tdvt_launcher.py")
#
# configs
TDS_SRC_DIR = "tds"
TACO_SRC_DIR = "C:\\Users\\" + getpass.getuser() + "\\Documents\\My Tableau Repository\\Connectors"
TACO_SIGNED = True
ES_URL = "http://elastic-admin:elastic-password@127.0.0.1:9200"
def interact(proc, interactive):
# no straigh forward non-blocking read on Win -> char reader in own thread
def read_stdout(buff, condition):
c = " "
while c != "":
c = proc.stdout.read(1)
condition.acquire()
buff.append('\0' if c == "" else c)
condition.notify()
condition.release()
buff = [' ']
condition = threading.Condition()
reader = threading.Thread(target=read_stdout, args=(buff, condition))
reader.start()
interactive.reverse()
while len(interactive) > 0 and reader.is_alive():
token, answer = interactive.pop()
condition.acquire()
while buff[-1] != '\0':
output = "".join(buff)
if token not in output:
condition.wait()
else:
condition.release()
proc.stdin.write(answer + '\n')
proc.stdin.flush()
break
reader.join()
def exe(args, interactive = None, raise_on_retcode = True):
with subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True) as proc:
if interactive is not None:
interact(proc, interactive)
try:
caller = inspect.stack()[1][3]
proc.wait(TIMEOUTS[caller] if caller in TIMEOUTS.keys() else TIMEOUTS["_default_"])
except subprocess.TimeoutExpired as e:
proc.kill()
stdout, stderr = proc.communicate()
if proc.returncode != 0 and raise_on_retcode:
print("command stdout: \n" + stdout)
print("command stderr: \n" + stderr)
raise Exception("Command exited with code %s: '%s' !" % (proc.returncode, args))
return (proc.returncode, stdout, stderr)
def checkout_tdvt_sdk():
git_args = ["git", "clone", "--depth", "1", "--branch", TDVT_SDK_BRANCH, TDVT_SDK_REPO]
exe(git_args)
def setup_workspace():
tdvt_args = ["py", "-3", TDVT_LAUNCHER, "action", "--setup"]
exe(tdvt_args)
def install_tds_files(tds_src_dir, elastic_url):
TDVT_TDS_DIR = "tds"
def dbname(es_host):
try:
ipaddress.ip_address(es_host)
return es_host
except ValueError as ve:
pos = es_host.find(".")
return es_host if pos <= 0 else es_host[:pos]
es_url = urllib3.util.parse_url(elastic_url)
if ':' in es_url.auth:
(es_user, es_pass) = es_url.auth.split(':')
else:
(es_user, es_pass) = es_url.auth, ""
for (dirpath, dirnames, filenames) in os.walk(tds_src_dir, topdown=True, followlinks=False):
if dirpath != tds_src_dir:
break
for filename in filenames:
if not (filename.endswith(".tds") or filename.endswith(".password")):
continue
with open(os.path.join(tds_src_dir, filename)) as src:
content = src.read()
if filename.endswith(".tds"):
content = content.replace("caption='127.0.0.1'", "caption='" + es_url.host + "'")
content = content.replace("dbname='elasticsearch'", "dbname='" + dbname(es_url.host) + "'")
content = content.replace("server='127.0.0.1'", "server='" + es_url.host + "'")
content = content.replace("port='9200'", "port='" + str(es_url.port) + "'")
if es_user != "elastic":
content = content.replace("username='elastic'", "username='" + es_user + "'")
if es_url.scheme.lower() == "https":
content = content.replace("sslmode=''", "sslmode='require'")
elif filename.endswith(".password"):
content = content.replace("<REDACTED>", es_pass)
else:
continue # shouldn't happen
with open(os.path.join(TDVT_TDS_DIR, filename), "w") as dest:
dest.write(content)
def latest_tabquery():
TABLEAU_INSTALL_FOLDER = os.path.join("C:\\", "Program Files", "Tableau")
TABQUERY_UNDERPATH = os.path.join("bin", "tabquerytool.exe")
latest = ""
for (dirpath, dirnames, filenames) in os.walk(TABLEAU_INSTALL_FOLDER, topdown=True):
if dirpath != TABLEAU_INSTALL_FOLDER:
pass #break
for dirname in dirnames:
if re.match("^Tableau 202[0-9]\.[0-9]$", dirname):
if dirname > latest:
latest = dirname
tabquery_path = os.path.join(TABLEAU_INSTALL_FOLDER, latest, TABQUERY_UNDERPATH)
os.stat(tabquery_path) # check if the executable's there
return tabquery_path
def config_tdvt_override_ini():
TDVT_INI_PATH = os.path.join("config", "tdvt", "tdvt_override.ini")
tabquery_path = latest_tabquery()
tabquery_path_line = "TAB_CLI_EXE_X64 = " + tabquery_path
updated_lines = []
with open(TDVT_INI_PATH) as ini:
for line in ini.readlines():
l = line if not line.startswith("TAB_CLI_EXE_X64") else tabquery_path_line
l += '\n'
updated_lines.append(l)
if len(updated_lines) <= 0:
print("WARNING: empty ini file under: " + TDVT_INI_PATH)
updated_lines.append("[DEFAULT]\n")
updated_lines.append(tabquery_path_line + '\n')
with open(TDVT_INI_PATH, "w") as ini:
ini.writelines(updated_lines)
def add_data_source():
tdvt_args = ["py", "-3", TDVT_LAUNCHER, "action", "--add_ds", "elastic"]
interactive = [("connection per tds (standard).", "n"), ("to skip selecting one now:", TDVT_ES_SCHEME),
("Overwrite existing ini file?(y/n)", "y")]
exe(tdvt_args, interactive)
def config_elastic_ini():
ELASTIC_INI = os.path.join("config", "elastic.ini")
cmdline_override = "CommandLineOverride = -DConnectPluginsPath=%s -DDisableVerifyConnectorPluginSignature=%s" % \
(TACO_SRC_DIR, TACO_SIGNED)
updated_lines = []
with open(ELASTIC_INI) as ini:
for line in ini.readlines():
updated_lines.append(line)
if line.startswith("LogicalQueryFormat"):
updated_lines.append(cmdline_override)
with open(ELASTIC_INI, "w") as ini:
ini.writelines(updated_lines)
def run_tdvt():
tdvt_args = ["py", "-3", TDVT_LAUNCHER, "run", "elastic"]
_, stdout, __ = exe(tdvt_args, raise_on_retcode = False)
print(stdout)
def parse_args():
parser = argparse.ArgumentParser(description="TDVT runner of the Tableau connector for Elasticsearch.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--taco-dir", help="Directory containing the connector file.",
default=TACO_SRC_DIR)
parser.add_argument("-s", "--signed", help="Is the .taco signed?", action="store_true", default=TACO_SIGNED)
parser.add_argument("-r", "--run-dir", help="Directory to run the testing under.",
default=TDVT_RUN_DIR)
parser.add_argument("-u", "--url", help="Elasticsearch URL.", type=str, default=ES_URL)
parser.add_argument("-c", "--clean", help="Clean-up run directory", action="store_true", default=False)
return parser.parse_args()
def main():
started_at = time.time()
args = parse_args()
cwd = os.getcwd()
if args.clean:
import shutil # dependency!
shutil.rmtree(args.run_dir)
if not os.path.isdir(args.run_dir):
os.makedirs(args.run_dir)
os.chdir(args.run_dir)
if not os.path.isdir(TDVT_SDK_REPO):
checkout_tdvt_sdk()
setup_workspace()
tds_src = TDS_SRC_DIR if os.path.isabs(TDS_SRC_DIR) else os.path.join(cwd, TDS_SRC_DIR)
install_tds_files(tds_src, args.url)
config_tdvt_override_ini()
add_data_source()
if args.taco_dir != TACO_SRC_DIR and args.signed != TACO_SIGNED:
config_elastic_ini()
run_tdvt()
print("Test run took %.2f seconds." % (time.time() - started_at))
if __name__ == "__main__":
main()
# vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 tw=118 expandtab :
|
{
"content_hash": "5ac8791e5f31aaa30e0ec037ebc0c590",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 117,
"avg_line_length": 36.54132231404959,
"alnum_prop": 0.5933506728485808,
"repo_name": "nknize/elasticsearch",
"id": "3135e11765bd0d8c208d6d5099456603c79d32f8",
"size": "9099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "x-pack/plugin/sql/connectors/tableau/tdvt/tdvt_run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "12298"
},
{
"name": "Batchfile",
"bytes": "16353"
},
{
"name": "Emacs Lisp",
"bytes": "3341"
},
{
"name": "FreeMarker",
"bytes": "45"
},
{
"name": "Groovy",
"bytes": "251795"
},
{
"name": "HTML",
"bytes": "5348"
},
{
"name": "Java",
"bytes": "36849935"
},
{
"name": "Perl",
"bytes": "7116"
},
{
"name": "Python",
"bytes": "76127"
},
{
"name": "Shell",
"bytes": "102829"
}
],
"symlink_target": ""
}
|
'''
Project: Farnsworth
Author: Karandeep Singh Nagra
'''
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from utils.funcs import convert_to_url
from base.models import UserProfile
class Manager(models.Model):
'''
The Manager model. Contains title, incumbent, and duties.
'''
title = models.CharField(
unique=True,
blank=False,
null=False,
max_length=255,
help_text="The title of this management position.",
)
url_title = models.CharField(
blank=False,
null=False,
max_length=255,
help_text="The unique URL key for this manager. Autogenerated from custom interface.",
)
incumbent = models.ForeignKey(
UserProfile,
blank=True,
null=True,
on_delete=models.SET_NULL,
help_text="The incumbent for this position.",
)
semester_hours = models.DecimalField(
max_digits=5,
decimal_places=2,
default=settings.DEFAULT_WORKSHIFT_HOURS,
help_text="Number of hours this manager receives during the fall and spring.",
)
summer_hours = models.DecimalField(
max_digits=5,
decimal_places=2,
default=settings.DEFAULT_WORKSHIFT_HOURS,
help_text="Number of hours this manager receives during the summer.",
)
compensation = models.TextField(
blank=True,
null=True,
help_text="The compensation for this manager.",
)
duties = models.TextField(
blank=True,
null=True,
help_text="The duties of this manager.",
)
email = models.EmailField(
blank=True,
null=True,
max_length=255,
help_text="The e-mail address of this manager.",
)
president = models.BooleanField(
default=False,
help_text="Whether this manager has president privileges (edit managers, bylaws, etc.).",
)
workshift_manager = models.BooleanField(
default=False,
help_text="Whether this manager has workshift manager privileges (assign workshifts, etc.).",
)
active = models.BooleanField(
default=True,
help_text="Whether this is an active manager position (visible in directory, etc.).",
)
def __unicode__(self):
return self.title
def is_manager(self):
return True
def get_view_url(self):
return reverse("managers:view_manager", kwargs={"managerTitle": self.url_title})
def get_edit_url(self):
return reverse("managers:edit_manager", kwargs={"managerTitle": self.url_title})
def __init__(self, *args, **kwargs):
if "title" in kwargs:
kwargs.setdefault("url_title", convert_to_url(kwargs["title"]))
super(Manager, self).__init__(*args, **kwargs)
class Meta:
ordering = ['title']
class RequestType(models.Model):
'''
A request type to specify relevant managers and name.
'''
name = models.CharField(
max_length=255,
unique=True,
blank=False,
null=False,
help_text="Name of the request type.",
)
url_name = models.CharField(
max_length=255,
unique=True,
blank=False,
null=False,
help_text="Unique URL key for this manager. Autogenerated from custom interface.",
)
managers = models.ManyToManyField(
Manager,
help_text="Managers to whom this type of request is made.",
)
enabled = models.BooleanField(
default=True,
help_text="Whether this type of request is currently accepted. Toggle this to off to temporarily disable accepting this type of request.",
)
glyphicon = models.CharField(
max_length=100,
blank=True,
null=True,
help_text="Glyphicon for this request type (e.g., cutlery). Check Bootstrap documentation for more info.",
)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
def is_requesttype(self):
return True
def __init__(self, *args, **kwargs):
if "name" in kwargs:
kwargs.setdefault("url_name", convert_to_url(kwargs["name"]))
super(RequestType, self).__init__(*args, **kwargs)
def get_view_url(self):
return reverse("managers:requests", kwargs={"requestType": self.url_name})
class Request(models.Model):
'''
The Request model. Contains an owner, body, post_date, change_date, and relevant
manager.
'''
owner = models.ForeignKey(
UserProfile,
blank=False,
null=False,
help_text="The user who made this request.",
)
body = models.TextField(
blank=False,
null=False,
help_text="The body of this request.",
)
post_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this request was posted.",
)
change_date = models.DateTimeField(
auto_now=True,
help_text="The last time this request was modified.",
)
request_type = models.ForeignKey(
RequestType,
blank=False,
null=False,
help_text="The type of request this is.",
)
OPEN = 'O'
CLOSED = 'C'
FILLED = 'F'
EXPIRED = 'E'
STATUS_CHOICES = (
(OPEN, "Open"),
(CLOSED, "Closed"),
(FILLED, "Filled"),
(EXPIRED, "Expired"),
)
status = models.CharField(
max_length=1,
choices=STATUS_CHOICES,
default=OPEN,
help_text="Status of this request."
)
number_of_responses = models.PositiveSmallIntegerField(
default=0,
help_text="The number of responses to this request.",
)
upvotes = models.ManyToManyField(
UserProfile,
blank=True,
help_text="Up votes for this request.",
related_name="up_votes",
)
followers = models.ManyToManyField(
User,
blank=True,
help_text="Users following this request.",
related_name="request_followers",
)
private = models.BooleanField(
default=False,
help_text="Only show this request to the manager, other members cannot view it.",
)
def __unicode__(self):
return "{0.name} request by {1.owner}".format(self.request_type, self)
@property
def filled(self):
return self.status == self.FILLED
@property
def open(self):
return self.status == self.OPEN
@property
def closed(self):
return self.status == self.CLOSED
@property
def expired(self):
return self.status == self.EXPIRED
class Meta:
ordering = ['-post_date']
def is_request(self):
return True
def get_view_url(self):
return reverse("managers:view_request", kwargs={"request_pk": self.pk})
class Response(models.Model):
'''
The Response model. A response to a request. Very similar to Request.
'''
owner = models.ForeignKey(
UserProfile,
blank=False,
null=False,
help_text="The user who posted this response."
)
body = models.TextField(
blank=False,
null=False,
help_text="The body of this response."
)
post_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this response was posted."
)
request = models.ForeignKey(
Request,
blank=False,
null=False,
help_text="The request to which this is a response."
)
manager = models.BooleanField(
default=False,
help_text="Whether this is a relevant manager response."
)
CLOSED = 'C'
REOPENED = 'O'
FILLED = 'F'
EXPIRED = 'E'
NONE = 'N'
ACTION_CHOICES = (
(NONE, "None"),
(CLOSED, "Mark closed (won't fill)"),
(REOPENED, "Mark open"),
(FILLED, "Mark filled"),
(EXPIRED, "Mark expired"),
)
action = models.CharField(
max_length=1,
choices=ACTION_CHOICES,
default=NONE,
help_text="A mark action (e.g., 'Marked closed'), if any."
)
def __unicode__(self):
return self.owner.user.get_full_name()
def display_action(self):
if self.action != self.NONE:
return '<div class="text-center"><hr style="width: 75%; margin-top: 0; margin-bottom: 0;" /></div><div class="field_wrapper text-info">Action: {0}</div>'.format(
self.get_action_display()
)
return ""
class Meta:
ordering = ['post_date']
def is_response(self):
return True
class Announcement(models.Model):
'''
Model for manager announcements.
'''
manager = models.ForeignKey(
Manager,
blank=False,
null=False,
help_text="The manager who made this announcement.",
)
incumbent = models.ForeignKey(
UserProfile,
blank=False,
null=False,
help_text="The incumbent who made this announcement.",
)
body = models.TextField(
blank=False,
null=False,
help_text="The body of the announcement.",
)
post_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this announcement was posted.",
)
pinned = models.BooleanField(
default=False,
help_text="Whether this announcment should be pinned permanently.",
)
change_date = models.DateTimeField(
auto_now_add=True,
help_text="The last time this request was modified.",
)
def __unicode__(self):
return self.incumbent.user.get_full_name()
class Meta:
ordering = ['-post_date']
def is_announcement(self):
return True
def update_request(sender, instance, **kwargs):
instance.number_of_responses = instance.response_set.count()
def update_response(sender, instance, created, **kwargs):
response = instance
if created:
actions = {
Response.CLOSED: Request.CLOSED,
Response.REOPENED: Request.OPEN,
Response.FILLED: Request.FILLED,
Response.EXPIRED: Request.EXPIRED,
}
response.request.status = actions.get(
response.action, response.request.status,
)
response.request.save()
models.signals.pre_save.connect(update_request, sender=Request)
models.signals.post_save.connect(update_response, sender=Response)
|
{
"content_hash": "cff51da41f1556efb8f41caedbebf0fc",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 173,
"avg_line_length": 28.746630727762803,
"alnum_prop": 0.5918424753867791,
"repo_name": "knagra/farnsworth",
"id": "0105db7c90be08852dc98cb74589b4e6fe323ef5",
"size": "10665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "managers/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "189846"
},
{
"name": "HTML",
"bytes": "3022838"
},
{
"name": "JavaScript",
"bytes": "936810"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "742542"
}
],
"symlink_target": ""
}
|
"""Tests for calculator example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# import google3
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow_fold.loom.calculator_example import calculator
from tensorflow_fold.loom.calculator_example import calculator_pb2
def evaluate_expression(string):
return calculator.evaluate_expression(
text_format.Parse(string, calculator_pb2.CalculatorExpression()))
class CalculatorTest(tf.test.TestCase):
def test_generated_expression_depth(self):
random.seed(0xdeadbeef) # Make RandomExpression deterministic.
for _ in xrange(1000):
expression = calculator.random_expression(5)
calculator.validate_expression(expression)
self.assertTrue(calculator.expression_depth(expression) <= 5)
def test_eval(self):
self.assertEqual(0, evaluate_expression(
"""op: DIV left<number: 3> right<number: 0>"""))
# Division by zero defaults to zero.
for n in xrange(10):
self.assertEqual(n, evaluate_expression(
"""number: {n}""".format(n=n)))
self.assertEqual(3 + n, evaluate_expression(
"""op: PLUS left<number: 3> right<number: {n}>""".format(n=n)))
self.assertEqual(2 * n, evaluate_expression(
"""op: PLUS left<number: {n}> right<number: {n}>""".format(n=n)))
self.assertEqual(0, evaluate_expression(
"""op: MINUS left<number: {n}> right<number: {n}>""".format(n=n)))
self.assertEqual(n * n * n, evaluate_expression(
"""op: TIMES
left<number: {n}>
right<op: TIMES left<number: {n}> right<number: {n}>>
""".format(n=n)))
self.assertEqual(n, evaluate_expression(
"""op: DIV left<number: {x}> right<number: 5>""".format(
x=5 * n + 3)))
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "90c7226442ccd073ebe44e6b06dc739f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 36.7962962962963,
"alnum_prop": 0.6552591847005536,
"repo_name": "pklfz/fold",
"id": "8fc85456fd709312e9757a5f718a1b9f7231ff15",
"size": "2584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow_fold/loom/calculator_example/calculator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "863"
},
{
"name": "C++",
"bytes": "79760"
},
{
"name": "Jupyter Notebook",
"bytes": "62090"
},
{
"name": "Protocol Buffer",
"bytes": "7198"
},
{
"name": "Python",
"bytes": "519629"
},
{
"name": "Shell",
"bytes": "10812"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
class TestElementWiseAddOp(unittest.TestCase):
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def check_forward_backward(self):
def test_with_place(place):
out_grad = np.random.random_sample(self.x.shape).astype(np.float32)
x_grad = out_grad
sum_axis = list(range(0, len(self.x.shape)))
del sum_axis[self.axis]
y_grad = np.sum(out_grad, axis=tuple(sum_axis))
var_dict = locals()
var_dict['y'] = self.y
var_dict['x'] = self.x
var_dict['out'] = self.out
var_dict['y@GRAD'] = y_grad
var_dict['x@GRAD'] = x_grad
var_dict['out@GRAD'] = out_grad
var_names = ['x', 'y', 'out', 'y@GRAD', 'x@GRAD', 'out@GRAD']
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
for name in ground_truth:
block.create_var(
name=name,
dtype='float32',
shape=ground_truth[name].shape)
elementwise_add_op = block.append_op(
type="elementwise_add",
inputs={
"X": block.var('x'),
"Y": block.var('y'),
},
outputs={"Out": block.var('out'), },
attrs={"axis": self.axis, })
# generate backward op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
elementwise_add_op.desc, set(), [])
grad_op_desc = grad_op_desc_list[0]
new_op_desc = block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
for var_name in grad_op_desc.output_arg_names():
block.desc.var(var_name.encode("ascii"))
grad_op_desc.infer_var_type(block.desc)
grad_op_desc.infer_shape(block.desc)
for arg in grad_op_desc.output_arg_names():
grad_var = block.desc.find_var(arg.encode("ascii"))
grad_var.set_dtype(core.VarDesc.VarType.FP32)
exe = fluid.Executor(place)
out = exe.run(program,
feed={
name: var_dict[name]
for name in ['x', 'y', 'out@GRAD']
},
fetch_list=['x@GRAD', 'y@GRAD'])
self.__assert_close(x_grad, out[0], "x@GRAD")
self.__assert_close(y_grad, out[1], "y@GRAD", atol=1.4)
places = [core.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu(
"elementwise_add"):
places.append(core.CUDAPlace(0))
for place in places:
test_with_place(place)
def test_check_forward_backward_with_scale_and_bias(self):
np.random.seed(123)
self.x = np.random.random((4, 32, 220, 220)).astype(np.float32)
self.y = np.random.random((32)).astype(np.float32)
self.out = self.x + self.y.reshape(1, 32, 1, 1)
self.axis = 1
self.check_forward_backward()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d8c2f5846af1dbf32d168957ac8bb467",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 40.08791208791209,
"alnum_prop": 0.4925986842105263,
"repo_name": "QiJune/Paddle",
"id": "9f452ffde74ee18d14f155fb5ed53fee57f12f49",
"size": "4261",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274815"
},
{
"name": "C++",
"bytes": "8855056"
},
{
"name": "CMake",
"bytes": "304904"
},
{
"name": "Cuda",
"bytes": "1181169"
},
{
"name": "Dockerfile",
"bytes": "8142"
},
{
"name": "Go",
"bytes": "109508"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "4527408"
},
{
"name": "Shell",
"bytes": "165381"
}
],
"symlink_target": ""
}
|
'''
Entry point. Init logging, initialize component factory,
start asyncio event loop, manage components lifecycle
'''
import logging
import yaml
import pytoml
import json
import sys
from .loadtest import LoadTest
LOG = logging.getLogger(__name__)
def init_logging(debug=False, filename='bfg.log'):
''' Configure logging: verbose or not '''
default_formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s: %(message)s", "%H:%M:%S")
dbg_formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s: %(message)s")
dbg_handler = logging.FileHandler(filename)
dbg_handler.setLevel(debug)
dbg_handler.setFormatter(dbg_formatter)
cmd_handler = logging.StreamHandler(sys.stdout)
cmd_handler.setLevel(logging.DEBUG if debug else logging.INFO)
cmd_handler.setFormatter(dbg_formatter if debug else default_formatter)
warn_handler = logging.StreamHandler(sys.stdout)
warn_handler.setLevel(logging.WARN)
warn_handler.setFormatter(dbg_formatter)
logger = logging.getLogger("hyper")
logger.setLevel(logging.WARNING)
logger = logging.getLogger("") # configure root logger
logger.setLevel(logging.DEBUG)
logger.addHandler(cmd_handler)
logger.addHandler(dbg_handler)
logging.getLogger().addHandler(dbg_handler)
def main():
''' Run test '''
config_filename = "load.yaml"
if len(sys.argv) > 1:
config_filename = sys.argv[1]
filename_components = config_filename.split('.')
if len(filename_components) > 1:
extension = filename_components[-1]
with open(config_filename, 'rb') as fin:
if extension == 'toml':
config = pytoml.load(fin)
elif extension in ['yaml', 'yml']:
config = yaml.load(fin)
elif extension == 'json':
config = json.load(fin)
else:
print("Config file has unsupported format: %s" % extension)
else:
print(
"Config file should have one of the following extensions:"
" .toml, .json, .yaml")
return 1
init_logging()
lt = LoadTest(config)
lt.run_test()
if __name__ == '__main__':
main()
|
{
"content_hash": "3ab7bbe32b45119f39155c2293672bbc",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 75,
"avg_line_length": 29.276315789473685,
"alnum_prop": 0.6368539325842697,
"repo_name": "direvius/bfg",
"id": "6eb729a873c5f0279e5ee022a6e3c3618e3e3968",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bfg/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50193"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import sys
from os.path import abspath, dirname, join
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep386ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep386ver:
return pep386ver + '.dev'
return pep386ver
release = django_release()
# The "development version" of Django
django_next_version = '1.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'six': ('http://pythonhosted.org/six/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
html_translator_class = "djangodocs.DjangoHTMLTranslator"
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': ('\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}')
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'django', 'Django Documentation', ['Django Software Foundation'], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- ticket options ------------------------------------------------------------
ticket_url = 'https://code.djangoproject.com/ticket/%s'
|
{
"content_hash": "30c883df8820eb54b8563489277ab327",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 105,
"avg_line_length": 32.504347826086956,
"alnum_prop": 0.699215266630997,
"repo_name": "nielsvanoch/django",
"id": "e6ced398e67772e4ebb5b5e39d577364fa27d3fa",
"size": "11700",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Python",
"bytes": "10145425"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
import fieldmaptrack
import fieldmaptrack.beam as beam
import fieldmaptrack.track as track
import fieldmaptrack.dipole_analysis as analysis
import fieldmaptrack.multipoles as multipoles
import math
import numpy as np
beam_energy = 3 # [GeV]
hard_edge_length = 1000 # [mm]
peak_field = -0.6 # [T]
quad_K = -0.78 # [1/m^2]
min_rz = 600 # [mm]
s_step = 1.0 # [mm]
init_rx = 0.0 # [mm]
init_ry = 0.0 # [mm]
init_rz = 0.0 # [mm]
perp_grid = np.linspace(-5,5,11)
fit_monomials = [0,1,2,3,4,5]
gradient = - quad_K * beam.Beam.calc_brho(energy = beam_energy)[0] / 1000.0 # [T/mm]
def dipole(rx, ry, rz):
if rz >= -hard_edge_length/2.0 and rz <= hard_edge_length/2.0:
return (0,peak_field,0)
else:
return (0,0,0)
def dipole_with_gradient(rx, ry, rz):
bx = gradient * ry
by = peak_field + gradient * rx
if rz >= -hard_edge_length/2.0 and rz <= hard_edge_length/2.0:
return (0,by,0)
else:
return (0,0,0)
ebeam = beam.Beam(energy = beam_energy)
fieldmap = fieldmaptrack.FieldMap(field_function = dipole_with_gradient, rotation = 0.0)
ref_traj = track.Trajectory(beam = ebeam, fieldmap = fieldmap)
new_traj = track.Trajectory(beam = ebeam, fieldmap = fieldmap)
ref_traj.calc_trajectory(min_rz = min_rz, s_step = s_step,
init_rz = init_rz, init_rx = init_rx, init_ry = init_ry)
new_traj.calc_trajectory(min_rz = min_rz, s_step = s_step,
init_rz = init_rz, init_rx = init_rx + 10, init_ry = init_ry)
import matplotlib.pyplot as plt
plt.plot(ref_traj.rz, ref_traj.rx)
plt.plot(new_traj.rz, new_traj.rx)
plt.show()
sf = track.SerretFrenetCoordSystem(ref_traj, point_idx = 500);
idx, alpha, dx = sf.find_intersection(new_traj)
new_traj.px
multipoles = multipoles.Multipoles(trajectory = trajectory, perpendicular_grid=perp_grid, normal_field_fitting_monomials=fit_monomials)
multipoles.calc_multipoles(is_ref_trajectory_flag = False)
import matplotlib.pyplot as plt
plt.plot(trajectory.s, multipoles.normal_multipoles[1])
#plt.plot(trajectory.s, trajectory.by)
plt.show()
print('ok')
#
#
# config = analysis.Config()
#
# # raw-field analysis
# config.config_label = 'bc_model2_controlgap_50mm_modelo_mecanico_inclinado_450urad' # identification label
# config.fmap_filename = '/home/fac_files/data/sirius/si/magnet_modelling/bc/fieldmaps/2014-10-07_Dipolo_Anel_BC_Modelo2_gap_lateral_50mm_modelo_mecanico_-50_50mm_-2000_2000mm.txt'
# config.beam_energy = 3.0 # [GeV] electron beam energy
# config.model_hardedge_length = 828.08 # [mm] model hard-edge length of the magnet
# config.model_nominal_angle = 4.10351 # 2.76654 # [deg] model nominal deflection angle of the magnet
# config.traj_rk_s_step = 0.1 # [mm] step in s for the 4th-order RK integration
# config.traj_center_sagitta_flag = False # centers trajectory sagitta in good field region of the magnet
# config.traj_force_midplane_flag = True # forces trajectory on midplane (setting ry = py = 0)
# config.traj_init_rx = 0.0 # initial rx
# config.traj_load_filename = None
#
# config.multipoles_main_monomials = [0,1]
# config.multipoles_perpendicular_grid = np.linspace(-10,10,41) # grid of points on perpendicular line to ref trajectory [mm]
# config.multipoles_normal_field_fitting_monomials = (0,1,2,3,4,5,6,7,8,9,10) # monomials to include in the polynomial fit of multipoles
# config.multipoles_r0 = 11.7 # [mm] horizontal position at which polynomial fields are calculated relative to the principal multipole
#
# config.model_segmentation = 6 * [828.08/6]
# #config.model_segmentation = (106.01, 106.01, 53.005, 53.005, 53.005, 53.005) #828.08,)
#
# if __name__ == "__main__":
#
# print('DIPOLE ANALYSIS')
# print('===============')
#
# print('{0:<35s} {1}'.format('label:', config.config_label))
#
# config = analysis.raw_fieldmap_analysis(config)
# config = analysis.trajectory_analysis(config)
# #config = analysis.multipoles_analysis(config)
# #config = analysis.model_analysis(config)
#
# #print()
# #print()
# #import math
# #brho = 10.00692271077752
# #theta_x = (180.0/math.pi) * math.atan(config.traj.px[-1]/config.traj.pz[-1])
# #print('deflection [deg]: ' + str(theta_x * 2))
# #print('K [1/m^2] : ' + str(-config.multipoles.normal_multipoles_integral[1]*2/0.82808/brho))
|
{
"content_hash": "59248bf18012a7c7ba2f3fdb8ebc0e37",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 188,
"avg_line_length": 40.92857142857143,
"alnum_prop": 0.6400523560209425,
"repo_name": "lnls-fac/fieldmaptrack",
"id": "651f2d41cd4d70a1bba4e31897a854b13e3e94a8",
"size": "4609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1037228"
},
{
"name": "Makefile",
"bytes": "473"
},
{
"name": "Python",
"bytes": "200576"
}
],
"symlink_target": ""
}
|
import logging
import mock
import unittest
from mock import patch
from svc_monitor.svc_monitor import SvcMonitor
from pysandesh.sandesh_base import Sandesh
from vnc_api.vnc_api import ServiceInstance, ServiceInstanceType
from vnc_api.vnc_api import ServiceScaleOutType
from vnc_api.vnc_api import ServiceTemplate, ServiceTemplateType
class Arguments(object):
def __init__(self):
self.disc_server_ip = None
self.disc_server_port = None
self.collectors = None
self.http_server_port = 0
self.log_local = None
self.log_category = None
self.log_level = None
self.log_file = '/var/log/contrail/svc_monitor.log'
self.use_syslog = False
self.syslog_facility = Sandesh._DEFAULT_SYSLOG_FACILITY
self.cluster_id = None
self.si_netns_scheduler_driver = \
'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler'
self.analytics_server_ip = '127.0.0.1'
self.analytics_server_port = '8081'
class SvcMonitorInitTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('svc_monitor.svc_monitor.UveSvcInstanceConfigTrace')
@patch.object(SvcMonitor, '_cassandra_init')
def test_vm_instance(self, uve_mock, cassandra_init_mock):
logging.debug("init")
self._api_client = mock.Mock()
arguments = Arguments()
with patch.object(logging.handlers, 'RotatingFileHandler'):
self._svc_monitor = SvcMonitor(self._api_client, arguments)
self._svc_monitor._svc_si_cf = mock.Mock()
self._svc_monitor._svc_vm_cf = mock.Mock()
self._svc_monitor._novaclient_get = mock.Mock()
identities = {
'service-template': 'default-domain:test:template1',
'service-instance': 'default-domain:test:service1'
}
tmpl_attr = ServiceTemplateType()
tmpl_attr.service_mode = 'in-network-nat'
tmpl_attr.service_type = 'firewall'
tmpl_attr.image_name = 'test-template'
tmpl_attr.service_virtualization_type = 'virtual-machine'
template = ServiceTemplate(service_template_properties=tmpl_attr)
svc_attr = ServiceInstanceType()
svc_attr.left_virtual_network = 'default-project:demo:test'
svc_attr.right_virtual_network = 'default-project:admin:public'
svc_attr.scale_out = ServiceScaleOutType()
service = ServiceInstance('test-instance',
service_instance_properties=svc_attr)
self._api_client.service_template_read.return_value = template
self._api_client.service_instance_read.return_value = service
self._svc_monitor.\
_addmsg_service_instance_service_template(identities)
expected = {
'instance_name': 'test-instance_1',
'si_fq_str': 'default-domain:default-project:test-instance',
'instance_type': 'virtual-machine'
}
self._svc_monitor._svc_vm_cf.insert.assert_called_with(
mock.ANY, expected)
|
{
"content_hash": "a83a0a5221086006b2599bc335d6f3fe",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 40.43037974683544,
"alnum_prop": 0.6255479023168441,
"repo_name": "JioCloud/contrail-controller",
"id": "5a0463af65854c0cf9cff9ed1e936f942f6f312a",
"size": "3194",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/config/svc-monitor/svc_monitor/tests/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Script that trains Weave models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(
featurizer='Weave')
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
n_atom_feat = 75
n_pair_feat = 14
# Batch size of models
batch_size = 64
n_feat = 128
graph = dc.nn.SequentialWeaveGraph(
max_atoms=max_atoms, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat)
graph.add(dc.nn.WeaveLayer(max_atoms, 75, 14))
#graph.add(dc.nn.WeaveLayer(max_atoms, 50, 50))
graph.add(dc.nn.WeaveConcat(batch_size, n_output=n_feat))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(dc.nn.WeaveGather(batch_size, n_input=n_feat, gaussian_expand=False))
model = dc.models.MultitaskGraphClassifier(
graph,
len(tox21_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20, log_every_N_batches=5)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
{
"content_hash": "a641baa9db4c8eec7ba6a634e91a01ed",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 29.65625,
"alnum_prop": 0.7276080084299262,
"repo_name": "joegomes/deepchem",
"id": "9d2fe0955f31451e47849dcb760372da1520506f",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tox21/tox21_weave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1424453"
},
{
"name": "Shell",
"bytes": "4837"
}
],
"symlink_target": ""
}
|
"""Contains functions for evaluation and summarization of metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
def _get_latest_eval_step_value(update_ops):
"""Gets the eval step `Tensor` value after running `update_ops`.
Args:
update_ops: A list of `Tensors` or a dictionary of names to `Tensors`,
which are run before reading the eval step value.
Returns:
A `Tensor` representing the value for the evaluation step.
"""
if isinstance(update_ops, dict):
update_ops = list(update_ops.values())
with ops.control_dependencies(update_ops):
return array_ops.identity(_get_or_create_eval_step().read_value())
class _MultiStepStopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, steps_per_run=1):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
steps_per_run: Number of steps executed per run call.
"""
self._num_evals = num_evals
self._evals_completed = None
self._steps_per_run_initial_value = steps_per_run
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def begin(self):
self._steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
def after_create_session(self, session, coord):
# Update number of steps to run in the first run call
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._steps_per_run_initial_value, self._num_evals)
self._steps_per_run_variable.load(steps, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs({
'evals_completed': self._evals_completed
})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
# Update number of steps to run in the next iteration
if self._num_evals is None:
steps = self._steps_per_run_initial_value
else:
steps = min(self._num_evals - evals_completed,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=run_context.session)
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, log_progress=True):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
log_progress: Whether to log evaluation progress, defaults to True.
"""
# The number of evals to run for.
self._num_evals = num_evals
self._evals_completed = None
self._log_progress = log_progress
# Reduce logging frequency if there are 20 or more evaluations.
self._log_frequency = (1 if (num_evals is None or num_evals < 20)
else math.floor(num_evals / 10.))
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def before_run(self, run_context):
return session_run_hook.SessionRunArgs({
'evals_completed': self._evals_completed
})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
if self._log_progress:
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
if ((evals_completed % self._log_frequency) == 0 or
(self._num_evals == evals_completed)):
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
def _evaluate_once(checkpoint_path,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
hooks=None,
config=None):
"""Evaluates the model at the given checkpoint path.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_path: The path to a checkpoint to use for evaluation.
master: The BNS address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = _get_or_create_eval_step()
# Prepare the run hooks.
hooks = list(hooks or [])
if eval_ops is not None:
if any(isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks):
steps_per_run_variable = \
basic_session_run_hooks.get_or_create_steps_per_run_variable()
update_eval_step = state_ops.assign_add(
eval_step,
math_ops.cast(steps_per_run_variable, dtype=eval_step.dtype),
use_locking=True)
else:
update_eval_step = state_ops.assign_add(eval_step, 1, use_locking=True)
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
eval_step_value = _get_latest_eval_step_value(eval_ops)
for h in hooks:
if isinstance(h, (_StopAfterNEvalsHook, _MultiStepStopAfterNEvalsHook)):
h._set_evals_completed_tensor(eval_step_value) # pylint: disable=protected-access
logging.info('Starting evaluation at ' +
time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime()))
# Prepare the session creator.
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hooks.append(final_ops_hook)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))
return final_ops_hook.final_ops_values
|
{
"content_hash": "c402ba99f4f25353b11bb45a23db774b",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 90,
"avg_line_length": 38.95075757575758,
"alnum_prop": 0.685694836137314,
"repo_name": "jbedorf/tensorflow",
"id": "35f0b6e26492912c2c80799de2a99d29bf737793",
"size": "10968",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/evaluation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
""" Testing predictions uploading from stdin
"""
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
import bigmler.tests.stdin_input_steps as stdin
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestStdin(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
teardown_class()
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully building a model from data streamed to stdin:
Given I create a BigML source from stdin using train "<data>" file and logging in "<output_dir>"
Then I check that the source has been created
Examples:
| data | output_dir |
| ../data/iris.csv | ./scenario_st_1 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', 'scenario_st_1']]
for example in examples:
print("\nTesting with:\n", example)
stdin.i_create_source_from_stdin(self, data=example[0], output_dir=example[1])
test_pred.i_check_create_source(self)
def test_scenario2(self):
"""
Scenario: Successfully building predictions for data streamed to stdin:
Given I create BigML resources uploading train "<data>" file to test "<test>" read from stdin with name "<name>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file | name |
| ../data/iris.csv | ../data/test_iris.csv | ./scenario_st_2/predictions.csv | ./check_files/predictions_iris.csv | Source name: áéí |
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', 'data/test_iris.csv', 'scenario_st_2/predictions.csv', 'check_files/predictions_iris.csv', 'Source name: áéí']]
for example in examples:
print("\nTesting with:\n", example)
stdin.i_create_all_resources_to_test_from_stdin(self, data=example[0], test=example[1], name=example[4], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
|
{
"content_hash": "2ced2a937845bc1a9f5c93aeb248e09e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 162,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.5631514408338443,
"repo_name": "bigmlcom/bigmler",
"id": "e611724e532c7151d1de00cec4fd5ffd8a9a91b3",
"size": "3892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigmler/tests/test_08_stdin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26465"
},
{
"name": "JavaScript",
"bytes": "73784"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Python",
"bytes": "2081730"
},
{
"name": "R",
"bytes": "71763"
}
],
"symlink_target": ""
}
|
'''
Create A Progress Bar for Downloads
Create a progress bar for applications that can keep track of a download in progress. The progress bar will be on a separate thread and will communicate with the main thread using delegates.
'''
import threading
import sys
import urllib.request
import os
class Download(object):
def __init__(self, obj):
self.obj = obj
def file(self, link):
headers = { 'User-Agent' : 'Mozilla/5.0' }
save_to = "add path"
file_name = link.split("/")[-1]
with urllib.request.urlopen(urllib.request.Request(link, None, headers)) as url:
meta = url.info()
file_size = meta.get("Content-length")
progress_dl = 0
block_size = 1024
with open(os.path.join(save_to, file_name), 'wb') as f:
while float(file_size) != float(progress_dl):
buffer = url.read(block_size)
if not buffer:
break
progress_dl += len(buffer)
f.write(buffer)
#calling DownloadProgress class
arr = [progress_dl, file_size]
t1 = threading.Thread(target = self.obj.run, args = [arr,])
t1.start()
t1.join()
class DownloadProgress(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self, arr):
progress_dl = arr[0]
file_size = arr[1]
p = float(progress_dl) / float(file_size)
status = r"{0} [{1:.2%}]".format(progress_dl, p)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
url = "add url"
t1 = DownloadProgress()
Download(t1).file(url)
|
{
"content_hash": "ed4ba284ad8b17ba7f9b52eedaf5c2c5",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 190,
"avg_line_length": 29.516666666666666,
"alnum_prop": 0.5499717673630717,
"repo_name": "pragalakis/100-python-projects",
"id": "2e4008e2af1c41f627f571ab4d7669b3bd14b21c",
"size": "1771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threading/progress-bar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65618"
}
],
"symlink_target": ""
}
|
import struct
import time
import io
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import file_generator
from cherrypy.lib import is_closable_iterator
from cherrypy.lib import set_vary_header
def decode(encoding=None, default_encoding='utf-8'):
"""Replace or extend the list of charsets used to decode a request entity.
Either argument may be a single string or a list of strings.
encoding
If not None, restricts the set of charsets attempted while decoding
a request entity to the given set (even if a different charset is
given in the Content-Type request header).
default_encoding
Only in effect if the 'encoding' argument is not given.
If given, the set of charsets attempted while decoding a request
entity is *extended* with the given value(s).
"""
body = cherrypy.request.body
if encoding is not None:
if not isinstance(encoding, list):
encoding = [encoding]
body.attempt_charsets = encoding
elif default_encoding:
if not isinstance(default_encoding, list):
default_encoding = [default_encoding]
body.attempt_charsets = body.attempt_charsets + default_encoding
class UTF8StreamEncoder:
def __init__(self, iterator):
self._iterator = iterator
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
res = next(self._iterator)
if isinstance(res, str):
res = res.encode('utf-8')
return res
def close(self):
if is_closable_iterator(self._iterator):
self._iterator.close()
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError(self, attr)
return getattr(self._iterator, attr)
class ResponseEncoder:
default_encoding = 'utf-8'
failmsg = 'Response body could not be encoded with %r.'
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, str):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
body = []
for chunk in self.body:
if isinstance(chunk, str):
try:
chunk = chunk.encode(encoding, self.errors)
except (LookupError, UnicodeError):
return False
body.append(chunk)
self.body = body
return True
def find_acceptable_charset(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log('response.stream %r' %
response.stream, 'TOOLS.ENCODE')
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers['Content-Length']
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log('Specified encoding %r' %
encoding, 'TOOLS.ENCODE')
if (not charsets) or '*' in charsets or encoding in charsets:
if self.debug:
cherrypy.log('Attempting encoding %r' %
encoding, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log('Attempting default encoding %r' %
self.default_encoding, 'TOOLS.ENCODE')
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(500, self.failmsg %
self.default_encoding)
else:
for element in encs:
if element.qvalue > 0:
if element.value == '*':
# Matches any charset. Try our default.
if self.debug:
cherrypy.log('Attempting default encoding due '
'to %r' % element, 'TOOLS.ENCODE')
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log('Attempting encoding %s (qvalue >'
'0)' % element, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
if '*' not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log('Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE')
if encoder(iso):
return iso
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = 'Your client did not send an Accept-Charset header.'
else:
msg = 'Your client sent this Accept-Charset header: %s.' % ac
_charsets = ', '.join(sorted(self.attempted_charsets))
msg += ' We tried these charsets: %s.' % (_charsets,)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
self.body = prepare_iter(self.body)
ct = response.headers.elements('Content-Type')
if self.debug:
cherrypy.log('Content-Type: %r' % [str(h)
for h in ct], 'TOOLS.ENCODE')
if ct and self.add_charset:
ct = ct[0]
if self.text_only:
if ct.value.lower().startswith('text/'):
if self.debug:
cherrypy.log(
'Content-Type %s starts with "text/"' % ct,
'TOOLS.ENCODE')
do_find = True
else:
if self.debug:
cherrypy.log('Not finding because Content-Type %s '
'does not start with "text/"' % ct,
'TOOLS.ENCODE')
do_find = False
else:
if self.debug:
cherrypy.log('Finding because not text_only',
'TOOLS.ENCODE')
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.debug:
cherrypy.log('Setting Content-Type %s' % ct,
'TOOLS.ENCODE')
response.headers['Content-Type'] = str(ct)
return self.body
def prepare_iter(value):
"""
Ensure response body is iterable and resolves to False when empty.
"""
if isinstance(value, text_or_bytes):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
# Don't use isinstance here; io.IOBase which has an ABC takes
# 1000 times as long as, say, isinstance(value, str)
elif hasattr(value, 'read'):
value = file_generator(value)
elif value is None:
value = []
return value
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See http://www.gzip.org/zlib/rfc-gzip.html
yield b'\x1f\x8b' # ID1 and ID2: gzip marker
yield b'\x08' # CM: compression method
yield b'\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack('<L', int(time.time()) & int('FFFFFFFF', 16))
yield b'\x02' # XFL: max compression, slowest algo
yield b'\xff' # OS: unknown
crc = zlib.crc32(b'')
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack('<L', crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack('<L', size & int('FFFFFFFF', 16))
def decompress(body):
import gzip
zbuf = io.BytesIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'],
debug=False):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
The provided list of mime-types must be of one of the following form:
* `type/subtype`
* `type/*`
* `type/*+subtype`
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, 'Accept-Encoding')
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, 'cached', False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log('Non-zero identity qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log('Zero gzip qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if ct not in mime_types:
# If the list of provided mime-types contains tokens
# such as 'text/*' or 'application/*+xml',
# we go through them and find the most appropriate one
# based on the given content-type.
# The pattern matching is only caring about the most
# common cases, as stated above, and doesn't support
# for extra parameters.
found = False
if '/' in ct:
ct_media_type, ct_sub_type = ct.split('/')
for mime_type in mime_types:
if '/' in mime_type:
media_type, sub_type = mime_type.split('/')
if ct_media_type == media_type:
if sub_type == '*':
found = True
break
elif '+' in sub_type and '+' in ct_sub_type:
ct_left, ct_right = ct_sub_type.split('+')
left, right = sub_type.split('+')
if left == '*' and ct_right == right:
found = True
break
if not found:
if debug:
cherrypy.log('Content-Type %s not in mime_types %r' %
(ct, mime_types), context='TOOLS.GZIP')
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, 'identity, gzip').set_response()
|
{
"content_hash": "f4a2fcd7af59a2c466b3f4172f1a9f4e",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 79,
"avg_line_length": 37.73271889400922,
"alnum_prop": 0.5244870542256962,
"repo_name": "Safihre/cherrypy",
"id": "54a7a8a8b1365a722b3bef4f0d6ef0ecd0fd0157",
"size": "16376",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cherrypy/lib/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "510"
},
{
"name": "Python",
"bytes": "1029257"
}
],
"symlink_target": ""
}
|
def makeList(stop, increment):
i = 0
numbers = []
while i < stop:
numbers.append(i)
i += increment
return numbers
def alt(stop, increment):
return range(0, stop, increment)
print makeList(9, 2)
print alt(9, 2)
|
{
"content_hash": "a41466160ef0c8b6228df4c7a8c6b811",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 33,
"avg_line_length": 15.785714285714286,
"alnum_prop": 0.6742081447963801,
"repo_name": "vanonselenp/Learning",
"id": "a12a49b26059b59dc1479e8c656e593719a5b355",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/LPTHW/ex33.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "33637"
},
{
"name": "HTML",
"bytes": "11006"
},
{
"name": "JavaScript",
"bytes": "53"
},
{
"name": "Python",
"bytes": "26164"
},
{
"name": "Racket",
"bytes": "9279"
}
],
"symlink_target": ""
}
|
import functools
import threading
from dragonflow.common import exceptions as df_exceptions
from dragonflow.tests import base
class TestDbApi(base.BaseTestCase):
def test_simple_create_get(self):
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.driver.create_key('test_table', 'k1', 'v1')
self.assertEqual('v1', self.driver.get_key('test_table', 'k1'))
def test_get_not_found(self):
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.assertRaises(df_exceptions.DBKeyNotFound,
functools.partial(self.driver.get_key,
'test_table', 'k1'))
def test_delete_key(self):
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.driver.create_key('test_table', 'k1', 'v1')
self.driver.create_key('test_table', 'k2', 'v2')
self.driver.delete_key('test_table', 'k1')
self.assertRaises(df_exceptions.DBKeyNotFound,
functools.partial(self.driver.get_key,
'test_table', 'k1'))
self.assertEqual('v2', self.driver.get_key('test_table', 'k2'))
def test_delete_table(self):
self.driver.create_table('test_table')
self.driver.create_key('test_table', 'k1', 'v1')
self.driver.delete_table('test_table')
self.assertRaises(df_exceptions.DBKeyNotFound,
functools.partial(self.driver.get_key,
'test_table', 'k1'))
def test_set_key(self):
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.driver.create_key('test_table', 'k1', 'v1')
self.driver.create_key('test_table', 'k2', 'v2')
self.assertEqual('v1', self.driver.get_key('test_table', 'k1'))
self.driver.set_key('test_table', 'k1', 'v1_2')
self.assertEqual('v1_2', self.driver.get_key('test_table', 'k1'))
self.assertEqual('v2', self.driver.get_key('test_table', 'k2'))
def test_get_all_entries(self):
self.assertEqual([], self.driver.get_all_entries('test_table'))
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.assertEqual([], self.driver.get_all_entries('test_table'))
self.driver.create_key('test_table', 'k1', 'v1')
self.driver.create_key('test_table', 'k2', 'v2')
self.assertItemsEqual(['v1', 'v2'],
self.driver.get_all_entries('test_table'))
def test_get_all_keys(self):
self.assertEqual([], self.driver.get_all_keys('test_table'))
self.driver.create_table('test_table')
self.addCleanup(self.driver.delete_table, 'test_table')
self.assertEqual([], self.driver.get_all_keys('test_table'))
self.driver.create_key('test_table', 'k1', 'v1')
self.driver.create_key('test_table', 'k2', 'v2')
self.assertItemsEqual(['k1', 'k2'],
self.driver.get_all_keys('test_table'))
def test_allocate_unique_key(self):
unique_keys = [0, 0]
def get_unique_key(idx):
unique_keys[idx] = self.driver.allocate_unique_key('test_table')
thread1 = threading.Thread(target=functools.partial(get_unique_key, 0))
thread2 = threading.Thread(target=functools.partial(get_unique_key, 1))
thread1.start()
thread2.start()
thread1.join(5)
thread2.join(5)
self.assertNotEqual(unique_keys[0], unique_keys[1])
self.assertFalse(thread1.is_alive())
self.assertFalse(thread2.is_alive())
|
{
"content_hash": "3878c198d2325bfadb202cd99e193bc2",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 45.92857142857143,
"alnum_prop": 0.5977190254017626,
"repo_name": "openstack/dragonflow",
"id": "8658e26be0627c05764a80bfb006576bce2a687a",
"size": "4431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragonflow/tests/database/test_db_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2386"
},
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "1740942"
},
{
"name": "Ruby",
"bytes": "4449"
},
{
"name": "Shell",
"bytes": "70410"
}
],
"symlink_target": ""
}
|
import json
from django.core.urlresolvers import reverse
from tests.models import Foo
from tests.serializers import FooSerializer
from tests.views import FooViewSet
import pytest
pytestmark = pytest.mark.django_db
def test_empty_list(client):
result = {
'prev': '',
'next': '',
'count': 0,
'results': []
}
resp = client.get(reverse('foo-list'))
res = json.dumps(resp.content)
for k in result.iterkeys():
assert k in res
assert res[k] == result[k]
def test_paginated_list(client):
Foo.objects.bulk_create([Foo(name=str(i)) for i in xrange(20)])
result = {
'prev': '',
'next': reverse('foo-list') + '?page_number=2',
'count': '21',
'results': [{'name': str(i), 'id': i+1} for i in xrange(9)]
}
resp = client.get(reverse('foo-list'))
res = json.dumps(resp.content)
for k in result.iterkeys():
assert k in res
assert res[k] == resutl[k]
|
{
"content_hash": "cd0d29ed8c68a74eb04f0aae2783c05a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 67,
"avg_line_length": 20.770833333333332,
"alnum_prop": 0.5797392176529589,
"repo_name": "WorkHorseIndustries/drf-raw-pagination",
"id": "59438f24d3d51c524724649a7265d1f43cb34de7",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7626"
}
],
"symlink_target": ""
}
|
'''
Copyright(C) 2016 Engineering Department, University of Cambridge, UK.
License
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author
Gilles Degottex <gad27@cam.ac.uk>
'''
from analysis import *
from synthesis import *
|
{
"content_hash": "6990282dc5e7b60df976131533b36747",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.7523939808481532,
"repo_name": "etosha/pulsemodel",
"id": "20a80be14fb781406101d76f212f944217e52c25",
"size": "731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4725"
},
{
"name": "Python",
"bytes": "27763"
}
],
"symlink_target": ""
}
|
from antlr4.IntervalSet import IntervalSet
from antlr4.Token import Token
class ATN(object):
INVALID_ALT_NUMBER = 0
# Used for runtime deserialization of ATNs from strings#/
def __init__(self, grammarType , maxTokenType ):
# The type of the ATN.
self.grammarType = grammarType
# The maximum value for any symbol recognized by a transition in the ATN.
self.maxTokenType = maxTokenType
self.states = []
# Each subrule/rule is a decision point and we must track them so we
# can go back later and build DFA predictors for them. This includes
# all the rules, subrules, optional blocks, ()+, ()* etc...
self.decisionToState = []
# Maps from rule index to starting state number.
self.ruleToStartState = []
# Maps from rule index to stop state number.
self.ruleToStopState = None
self.modeNameToStartState = dict()
# For lexer ATNs, this maps the rule index to the resulting token type.
# For parser ATNs, this maps the rule index to the generated bypass token
# type if the
# {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
# deserialization option was specified; otherwise, this is {@code null}.
self.ruleToTokenType = None
# For lexer ATNs, this is an array of {@link LexerAction} objects which may
# be referenced by action transitions in the ATN.
self.lexerActions = None
self.modeToStartState = []
# Compute the set of valid tokens that can occur starting in state {@code s}.
# If {@code ctx} is null, the set of tokens will not include what can follow
# the rule surrounding {@code s}. In other words, the set will be
# restricted to tokens reachable staying within {@code s}'s rule.
def nextTokensInContext(self, s, ctx):
from antlr4.LL1Analyzer import LL1Analyzer
anal = LL1Analyzer(self)
return anal.LOOK(s, ctx=ctx)
# Compute the set of valid tokens that can occur starting in {@code s} and
# staying in same rule. {@link Token#EPSILON} is in set if we reach end of
# rule.
def nextTokensNoContext(self, s):
if s.nextTokenWithinRule is not None:
return s.nextTokenWithinRule
s.nextTokenWithinRule = self.nextTokensInContext(s, None)
s.nextTokenWithinRule.readonly = True
return s.nextTokenWithinRule
def nextTokens(self, s, ctx = None):
if ctx==None:
return self.nextTokensNoContext(s)
else:
return self.nextTokensInContext(s, ctx)
def addState(self, state):
if state is not None:
state.atn = self
state.stateNumber = len(self.states)
self.states.append(state)
def removeState(self, state):
self.states[state.stateNumber] = None # just free mem, don't shift states in list
def defineDecisionState(self, s):
self.decisionToState.append(s)
s.decision = len(self.decisionToState)-1
return s.decision
def getDecisionState(self, decision):
if len(self.decisionToState)==0:
return None
else:
return self.decisionToState[decision]
# Computes the set of input symbols which could follow ATN state number
# {@code stateNumber} in the specified full {@code context}. This method
# considers the complete parser context, but does not evaluate semantic
# predicates (i.e. all predicates encountered during the calculation are
# assumed true). If a path in the ATN exists from the starting state to the
# {@link RuleStopState} of the outermost context without matching any
# symbols, {@link Token#EOF} is added to the returned set.
#
# <p>If {@code context} is {@code null}, it is treated as
# {@link ParserRuleContext#EMPTY}.</p>
#
# @param stateNumber the ATN state number
# @param context the full parse context
# @return The set of potentially valid input symbols which could follow the
# specified state in the specified context.
# @throws IllegalArgumentException if the ATN does not contain a state with
# number {@code stateNumber}
#/
def getExpectedTokens(self, stateNumber, ctx ):
if stateNumber < 0 or stateNumber >= len(self.states):
raise Exception("Invalid state number.")
s = self.states[stateNumber]
following = self.nextTokens(s)
if Token.EPSILON not in following:
return following
expected = IntervalSet()
expected.addSet(following)
expected.removeOne(Token.EPSILON)
while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
invokingState = self.states[ctx.invokingState]
rt = invokingState.transitions[0]
following = self.nextTokens(rt.followState)
expected.addSet(following)
expected.removeOne(Token.EPSILON)
ctx = ctx.parentCtx
if Token.EPSILON in following:
expected.addOne(Token.EOF)
return expected
|
{
"content_hash": "fed6ab3abcaa3e2cc69e0238af9d0ec7",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 89,
"avg_line_length": 43.347457627118644,
"alnum_prop": 0.6596285434995113,
"repo_name": "wjkohnen/antlr4",
"id": "8dad40990c7cfec937205e4144d8d2c6ef8f9062",
"size": "5308",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "runtime/Python2/src/antlr4/atn/ATN.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "168278"
},
{
"name": "Batchfile",
"bytes": "2846"
},
{
"name": "C",
"bytes": "9558"
},
{
"name": "C#",
"bytes": "1118402"
},
{
"name": "C++",
"bytes": "991092"
},
{
"name": "CMake",
"bytes": "18680"
},
{
"name": "GAP",
"bytes": "110866"
},
{
"name": "Go",
"bytes": "365856"
},
{
"name": "Java",
"bytes": "2736104"
},
{
"name": "JavaScript",
"bytes": "421304"
},
{
"name": "Makefile",
"bytes": "1513"
},
{
"name": "Objective-C",
"bytes": "408"
},
{
"name": "Objective-C++",
"bytes": "27915"
},
{
"name": "PowerShell",
"bytes": "6138"
},
{
"name": "Python",
"bytes": "1351882"
},
{
"name": "Shell",
"bytes": "9122"
},
{
"name": "Swift",
"bytes": "949011"
}
],
"symlink_target": ""
}
|
import os
from .. import env
class RequirementsSpec(object):
'''
Reads depedencies from a requirements.txt file
and returns an Environment object from it.
'''
msg = None
extensions = set(['.txt', ])
def __init__(self, filename=None, name=None, **kwargs):
self.filename = filename
self.name = name
self.msg = None
def _valid_file(self):
if os.path.exists(self.filename):
return True
else:
self.msg = "There is no requirements.txt"
return False
def _valid_name(self):
if self.name is None:
self.msg = "Environment with requirements.txt file needs a name"
return False
else:
return True
def can_handle(self):
return self._valid_file() and self._valid_name()
@property
def environment(self):
dependencies = []
with open(self.filename) as reqfile:
for line in reqfile:
line = line.strip()
if not line or line.startswith('#'):
continue
dependencies.append(line)
return env.Environment(
name=self.name,
dependencies=dependencies
)
|
{
"content_hash": "0fcd4cef5a4acf90dbfab8f39c7c9f66",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.5511182108626198,
"repo_name": "zooba/PTVS",
"id": "cf56aacd0fa0a76c048db80a7971cd0ee4fd7288",
"size": "1351",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda_env/specs/requirements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
"""
Base classes for Messages, provides NodeMessage (base class for message), and Attribute - message attribute descriptor
"""
from foolscap.api import Copyable, RemoteCopy
import sys
import simplejson
class Attribute(object):
"""
Helper class to define Message attributes, to make easy for foolscap to build Copyable objects
"""
def __init__(self, name, value=None):
try:
f = sys._getframe(1)
msg = f.f_locals['self']
msg.attrs[name] = self
except KeyError, e:
pass
self.name = name
self.value = value
def __ne__(self, obj):
if not isinstance(obj, Attribute):
value = obj
else:
value = obj.value
return self.value != value
def __eq__(self, obj):
if not isinstance(obj, Attribute):
value = obj
else:
value = obj.value
return self.value == value
def __float__(self):
return float(self.value)
def __repr__(self):
return repr(self.value)
def __getitem__(self, name):
return self.value[name]
def __str__(self):
return str(self.value)
def getValue(self):
"""
Return value of Attribute
"""
return self.value
class _BaseMessage(object):
"""
Base class for message: _Message and NodeMessage
"""
def __init__(self):
# NodeMessage will call __setattr__ on __init__, self.attrs raise KeyError
self.__dict__['attrs'] = {}
def set(self, name, value):
self.attrs[name].value = value
def setAttribute(self, name, value):
Attribute(name, value)
def toJson(self):
d = {}
for k,v in self.attrs.items():
# ignore all hidden fields
if not k.startswith('_'):
d[k] = v.getValue()
return simplejson.dumps(d)
def fromJson(self, json):
d = simplejson.loads(json)
for k,v in d.items():
Attribute(k, v)
class _Message(_BaseMessage, Copyable, RemoteCopy):
"""
Foolscap message description, we're trying to wrap any NodeSet message into this one
"""
typeToCopy = copytype = 'node-message-0xdeadbeaf'
def __getattr__(self, name):
if self.attrs.has_key(name):
return self.attrs[name]
elif self.__dict__.has_key(name):
return self.__dict__[name]
else:
raise KeyError("getattr() - Class %s has no property %s" % (self, name))
def getStateToCopy(self):
d = {}
for k, v in self.attrs.items():
d[str(v.name)] = v.value
return d
def setCopyableState(self, state):
for k,v in state.items():
item = Attribute(k)
item.value = v
def __eq__(self, obj):
for k,v in self.attrs.items():
try:
i = getattr(obj, k)
if i != v:
return False
except KeyError, e:
return False
return True
class NodeMessage(_BaseMessage):
"""
Base class for NodeSet messages
"""
"""
@ivar _attrs: dict of message attributes
"""
#attrs = {'_delivery_mode': Attribute('_delivery_mode', 'all')}
def __init__(self):
_BaseMessage.__init__(self)
Attribute('_delivery_mode', 'all')
def __str__(self):
return str(self.__class__)
def __repr__(self):
return repr(self.__class__)
def __getattr__(self, name):
if self.__dict__.has_key('attrs') and self.attrs.has_key(name):
return self.attrs[name]
elif self.__dict__.has_key(name):
return self.__dict__[name]
else:
raise KeyError("getattr() - Class %s has no property %s" % (self, name))
def __setattr__(self, name, value):
if self.__dict__.has_key('attrs') and self.attrs.has_key(name):
Attribute(name, value)
elif self.__dict__.has_key(name):
self.__dict__[name] = value
else:
raise KeyError("setattr() - Class %s has no property %s" % (self, name))
|
{
"content_hash": "601eb225101166ff22453ee279a5f6fd",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 118,
"avg_line_length": 26.24852071005917,
"alnum_prop": 0.5047339945897205,
"repo_name": "selfsk/nodeset.core",
"id": "47e5bc12b37da2c5f9ac079e8629d686569cdbae",
"size": "4436",
"binary": false,
"copies": "1",
"ref": "refs/heads/core",
"path": "src/nodeset/core/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145871"
}
],
"symlink_target": ""
}
|
class Machine(object):
"""
.. module:: useful_1
:platform: Unix, Windows
:synopsis: A useful module indeed.
.. moduleauthor:: Andrew Carter <andrew@invalid.com>
"""
def __init__(self):
pass
# def __repr__(self):
# pass
#
# Properties
#
# These should be only public constants, e.g.
# - Accepted inputs for language codes
#
# Methods
#
def test_docstring():
"""
.. function:: format_exception(etype, value, tb[, limit=None])
Format the exception with a traceback.
:param etype: exception type
:param value: exception value
:param tb: traceback object
:param limit: maximum number of stack frames to show
:type limit: integer or None
:rtype: list of strings
"""
|
{
"content_hash": "d42d06a26c0c7e36b28aaa10432d96b1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 20.547619047619047,
"alnum_prop": 0.5457705677867902,
"repo_name": "nathanleiby/python-web-hooks",
"id": "0c1842458e9dd0006be215646aed623d7e8eda24",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_hooks/Machine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "54458"
},
{
"name": "Python",
"bytes": "10738"
},
{
"name": "Shell",
"bytes": "6518"
}
],
"symlink_target": ""
}
|
from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['srcName', 'srcTarget', 'srcLevel', 'srcX', 'srcY', 'srcZ', 'dstName', 'dstTarget', 'dstLevel', 'dstX', 'dstY', 'dstZ', 'srcWidth', 'srcHeight', 'srcDepth'])
def glCopyImageSubDataEXT(srcName, srcTarget, srcLevel, srcX, srcY, srcZ, dstName, dstTarget, dstLevel, dstX, dstY, dstZ, srcWidth, srcHeight, srcDepth):
pass
|
{
"content_hash": "81ad84dcee4ebd430ea571d2c966aa59",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 184,
"avg_line_length": 63.833333333333336,
"alnum_prop": 0.7075718015665796,
"repo_name": "cydenix/OpenGLCffi",
"id": "d93ff236be53398b26e61ed628e2d80c59723a5a",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GLES2/EXT/EXT/copy_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='post_title',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
|
{
"content_hash": "08960933477620ada9521b9b162efcef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 21.736842105263158,
"alnum_prop": 0.576271186440678,
"repo_name": "judgegrubb/python-blogging",
"id": "33c72f843ff06f62080bf56551c3b635346901e2",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0002_post_post_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1374"
},
{
"name": "HTML",
"bytes": "5371"
},
{
"name": "Python",
"bytes": "10755"
}
],
"symlink_target": ""
}
|
"""Support for SolarEdge-local Monitoring API."""
from __future__ import annotations
from contextlib import suppress
from copy import copy
from dataclasses import dataclass
from datetime import timedelta
import logging
import statistics
from requests.exceptions import ConnectTimeout, HTTPError
from solaredge_local import SolarEdge
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_IP_ADDRESS,
CONF_NAME,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_WATT_HOUR,
FREQUENCY_HERTZ,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
DOMAIN = "solaredge_local"
UPDATE_DELAY = timedelta(seconds=10)
INVERTER_MODES = (
"SHUTTING_DOWN",
"ERROR",
"STANDBY",
"PAIRING",
"POWER_PRODUCTION",
"AC_CHARGING",
"NOT_PAIRED",
"NIGHT_MODE",
"GRID_MONITORING",
"IDLE",
)
@dataclass
class SolarEdgeLocalSensorEntityDescription(SensorEntityDescription):
"""Describes SolarEdge-local sensor entity."""
extra_attribute: str | None = None
SENSOR_TYPES: tuple[SolarEdgeLocalSensorEntityDescription, ...] = (
SolarEdgeLocalSensorEntityDescription(
key="gridvoltage",
name="Grid Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:current-ac",
),
SolarEdgeLocalSensorEntityDescription(
key="dcvoltage",
name="DC Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:current-dc",
),
SolarEdgeLocalSensorEntityDescription(
key="gridfrequency",
name="Grid Frequency",
native_unit_of_measurement=FREQUENCY_HERTZ,
icon="mdi:current-ac",
),
SolarEdgeLocalSensorEntityDescription(
key="currentPower",
name="Current Power",
native_unit_of_measurement=POWER_WATT,
icon="mdi:solar-power",
),
SolarEdgeLocalSensorEntityDescription(
key="energyThisMonth",
name="Energy This Month",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:solar-power",
),
SolarEdgeLocalSensorEntityDescription(
key="energyThisYear",
name="Energy This Year",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:solar-power",
),
SolarEdgeLocalSensorEntityDescription(
key="energyToday",
name="Energy Today",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:solar-power",
),
SolarEdgeLocalSensorEntityDescription(
key="energyTotal",
name="Lifetime Energy",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:solar-power",
),
SolarEdgeLocalSensorEntityDescription(
key="optimizers",
name="Optimizers Online",
native_unit_of_measurement="optimizers",
icon="mdi:solar-panel",
extra_attribute="optimizers_connected",
),
SolarEdgeLocalSensorEntityDescription(
key="optimizercurrent",
name="Average Optimizer Current",
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
icon="mdi:solar-panel",
),
SolarEdgeLocalSensorEntityDescription(
key="optimizerpower",
name="Average Optimizer Power",
native_unit_of_measurement=POWER_WATT,
icon="mdi:solar-panel",
),
SolarEdgeLocalSensorEntityDescription(
key="optimizertemperature",
name="Average Optimizer Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
icon="mdi:solar-panel",
device_class=SensorDeviceClass.TEMPERATURE,
),
SolarEdgeLocalSensorEntityDescription(
key="optimizervoltage",
name="Average Optimizer Voltage",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
icon="mdi:solar-panel",
),
)
SENSOR_TYPE_INVERTER_TEMPERATURE = SolarEdgeLocalSensorEntityDescription(
key="invertertemperature",
name="Inverter Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
extra_attribute="operating_mode",
device_class=SensorDeviceClass.TEMPERATURE,
)
SENSOR_TYPES_ENERGY_IMPORT: tuple[SolarEdgeLocalSensorEntityDescription, ...] = (
SolarEdgeLocalSensorEntityDescription(
key="currentPowerimport",
name="current import Power",
native_unit_of_measurement=POWER_WATT,
icon="mdi:arrow-collapse-down",
),
SolarEdgeLocalSensorEntityDescription(
key="totalEnergyimport",
name="total import Energy",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:counter",
),
)
SENSOR_TYPES_ENERGY_EXPORT: tuple[SolarEdgeLocalSensorEntityDescription, ...] = (
SolarEdgeLocalSensorEntityDescription(
key="currentPowerexport",
name="current export Power",
native_unit_of_measurement=POWER_WATT,
icon="mdi:arrow-expand-up",
),
SolarEdgeLocalSensorEntityDescription(
key="totalEnergyexport",
name="total export Energy",
native_unit_of_measurement=ENERGY_WATT_HOUR,
icon="mdi:counter",
),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default="SolarEdge"): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Create the SolarEdge Monitoring API sensor."""
ip_address = config[CONF_IP_ADDRESS]
platform_name = config[CONF_NAME]
# Create new SolarEdge object to retrieve data.
api = SolarEdge(f"http://{ip_address}/")
# Check if api can be reached and site is active.
try:
status = api.get_status()
_LOGGER.debug("Credentials correct and site is active")
except AttributeError:
_LOGGER.error("Missing details data in solaredge status")
return
except (ConnectTimeout, HTTPError):
_LOGGER.error("Could not retrieve details from SolarEdge API")
return
# Create solaredge data service which will retrieve and update the data.
data = SolarEdgeData(hass, api)
# Changing inverter temperature unit.
inverter_temp_description = copy(SENSOR_TYPE_INVERTER_TEMPERATURE)
if status.inverters.primary.temperature.units.farenheit:
inverter_temp_description.native_unit_of_measurement = TEMP_FAHRENHEIT
# Create entities
entities = [
SolarEdgeSensor(platform_name, data, description)
for description in (*SENSOR_TYPES, inverter_temp_description)
]
try:
if status.metersList[0]:
entities.extend(
[
SolarEdgeSensor(platform_name, data, description)
for description in SENSOR_TYPES_ENERGY_IMPORT
]
)
except IndexError:
_LOGGER.debug("Import meter sensors are not created")
try:
if status.metersList[1]:
entities.extend(
[
SolarEdgeSensor(platform_name, data, description)
for description in SENSOR_TYPES_ENERGY_EXPORT
]
)
except IndexError:
_LOGGER.debug("Export meter sensors are not created")
add_entities(entities, True)
class SolarEdgeSensor(SensorEntity):
"""Representation of an SolarEdge Monitoring API sensor."""
entity_description: SolarEdgeLocalSensorEntityDescription
def __init__(
self,
platform_name,
data,
description: SolarEdgeLocalSensorEntityDescription,
):
"""Initialize the sensor."""
self.entity_description = description
self._platform_name = platform_name
self._data = data
self._attr_name = f"{platform_name} ({description.name})"
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if extra_attr := self.entity_description.extra_attribute:
try:
return {extra_attr: self._data.info[self.entity_description.key]}
except KeyError:
pass
return None
def update(self):
"""Get the latest data from the sensor and update the state."""
self._data.update()
self._attr_native_value = self._data.data[self.entity_description.key]
class SolarEdgeData:
"""Get and update the latest data."""
def __init__(self, hass, api):
"""Initialize the data object."""
self.hass = hass
self.api = api
self.data = {}
self.info = {}
@Throttle(UPDATE_DELAY)
def update(self):
"""Update the data from the SolarEdge Monitoring API."""
try:
status = self.api.get_status()
_LOGGER.debug("Status from SolarEdge: %s", status)
except ConnectTimeout:
_LOGGER.error("Connection timeout, skipping update")
return
except HTTPError:
_LOGGER.error("Could not retrieve status, skipping update")
return
try:
maintenance = self.api.get_maintenance()
_LOGGER.debug("Maintenance from SolarEdge: %s", maintenance)
except ConnectTimeout:
_LOGGER.error("Connection timeout, skipping update")
return
except HTTPError:
_LOGGER.error("Could not retrieve maintenance, skipping update")
return
temperature = []
voltage = []
current = []
power = 0
for optimizer in maintenance.diagnostics.inverters.primary.optimizer:
if not optimizer.online:
continue
temperature.append(optimizer.temperature.value)
voltage.append(optimizer.inputV)
current.append(optimizer.inputC)
if not voltage:
temperature.append(0)
voltage.append(0)
current.append(0)
else:
power = statistics.mean(voltage) * statistics.mean(current)
if status.sn:
self.data["energyTotal"] = round(status.energy.total, 2)
self.data["energyThisYear"] = round(status.energy.thisYear, 2)
self.data["energyThisMonth"] = round(status.energy.thisMonth, 2)
self.data["energyToday"] = round(status.energy.today, 2)
self.data["currentPower"] = round(status.powerWatt, 2)
self.data["invertertemperature"] = round(
status.inverters.primary.temperature.value, 2
)
self.data["dcvoltage"] = round(status.inverters.primary.voltage, 2)
self.data["gridfrequency"] = round(status.frequencyHz, 2)
self.data["gridvoltage"] = round(status.voltage, 2)
self.data["optimizers"] = status.optimizersStatus.online
self.info["optimizers"] = status.optimizersStatus.total
self.info["invertertemperature"] = INVERTER_MODES[status.status]
with suppress(IndexError):
if status.metersList[1]:
self.data["currentPowerimport"] = status.metersList[1].currentPower
self.data["totalEnergyimport"] = status.metersList[1].totalEnergy
with suppress(IndexError):
if status.metersList[0]:
self.data["currentPowerexport"] = status.metersList[0].currentPower
self.data["totalEnergyexport"] = status.metersList[0].totalEnergy
if maintenance.system.name:
self.data["optimizertemperature"] = round(statistics.mean(temperature), 2)
self.data["optimizervoltage"] = round(statistics.mean(voltage), 2)
self.data["optimizercurrent"] = round(statistics.mean(current), 2)
self.data["optimizerpower"] = round(power, 2)
|
{
"content_hash": "52073708425947d97cd2fa96a21c2197",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 87,
"avg_line_length": 32.905913978494624,
"alnum_prop": 0.6435748713340413,
"repo_name": "toddeye/home-assistant",
"id": "d07a95683eb5365ba01ccc9884489456e2c36fa1",
"size": "12241",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/solaredge_local/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import login
class BasketInSessionMiddlwareTestCases(TestCase):
"""
'ccbasket.middleware.BasketInSessionMiddleware' must always create a
basket object in the session.
The name of this object defaults to 'basket' but it can be overridden
by settings.CCBASKET_SESSION_KEY_NAME
"""
def test_basket_present(self):
"""There is always a basket object in the session"""
# client has no basket in session
self.assertFalse(self.client.session.get('basket'))
# make a request
r = self.client.get(reverse('ccbasket:basket'))
# response was 200
self.assertEqual(200, r.status_code)
# now there is a basket in the sesssion
self.assertTrue(self.client.session.get('basket'))
def test_basket_present_using_ccbasket_session_key_name(self):
""" Test the basket name can be set to anything using the
CCBASKET_SESSION_KEY_NAME settings
"""
settings.CCBASKET_SESSION_KEY_NAME = 'roderick'
# make a request
r = self.client.get(reverse('ccbasket:basket'))
# now there is a basket in the sesssion called roderick
self.assertTrue(self.client.session.get('roderick'))
# clean up
del(settings.CCBASKET_SESSION_KEY_NAME)
def test_basket_session_key_is_changed_after_login(self):
"""Session keys change when a user logs in"""
# make a user
u = User()
u.username = 'test'
u.set_password('test')
u.save()
# now get a session_id
r = self.client.get(reverse('ccbasket:basket'))
sid_pre_login = self.client.session.session_key
# it is the same as the basket_session_key
self.assertEqual(sid_pre_login,
self.client.session['basket'].session_key)
# now login
self.client.login(username='test', password='test')
# get the new session id
r = self.client.get(reverse('ccbasket:basket'))
sid_post_login = self.client.session.session_key
# it is the same as the basket_session_key
self.assertEqual(sid_post_login,
self.client.session['basket'].session_key)
# the session id has changed
self.failIfEqual(sid_pre_login, sid_post_login)
|
{
"content_hash": "6728d748d770916167fda7a9bcb28aee",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 39.317460317460316,
"alnum_prop": 0.6467501009285426,
"repo_name": "designcc/django-ccbasket",
"id": "fd1b3892842d0e37d2af3da56dfac809be58faa1",
"size": "2501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccbasket/tests/test_middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "132367"
},
{
"name": "Python",
"bytes": "125514"
}
],
"symlink_target": ""
}
|
"""Convert USDA data to CSV like format.
Copyright 2017 Joshua Powers <mrpowersj@gmail.com>
"""
import argparse
import os
def convert_to_csv(line):
"""Converts a single line of USDA output."""
# Replace the NULLs first
line = line.replace('~~', 'NULL')
line = line.replace('^^', 'NULL')
# Quote the strings
line = line.replace('~', '"')
# Add commas
line = line.replace('^', ',')
return line
def convert(filename):
"""Convert USDA format to CSV format."""
with open(filename, 'r') as data_file:
raw_data = data_file.readlines()
csv_filename = '%s/%s.csv' % (os.path.dirname(filename),
os.path.basename(filename).split('.')[0])
with open(csv_filename, 'w') as csv_file:
for line in raw_data:
csv_file.write(convert_to_csv(line))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('filename',
help='path of filename to convert')
ARGS = PARSER.parse_args()
convert(ARGS.filename)
|
{
"content_hash": "4e0e64470a184a05174e2be93481da37",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 26.8,
"alnum_prop": 0.5867537313432836,
"repo_name": "powersj/usda-nnd",
"id": "2acc24d5aa87f46c5bb4ed8746f8caf9709ff5f4",
"size": "1095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/convert2csv.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1588"
}
],
"symlink_target": ""
}
|
"""
================================================
ABElectronics IO Pi 32-Channel Port Expander
Version 1.0 Created 29/02/2015
Requires python 3 smbus to be installed with: sudo apt-get install python-smbus
================================================
Each MCP23017 chip is split into two 8-bit ports. port 0 controls
pins 1 to 8 while port 1 controls pins 9 to 16.
When writing to or reading from a port the least significant bit represents
the lowest numbered pin on the selected port.
"""
class IoPi(object):
# Define registers values from datasheet
IODIRA = 0x00 # IO direction A - 1= input 0 = output
IODIRB = 0x01 # IO direction B - 1= input 0 = output
# Input polarity A - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLA = 0x02
# Input polarity B - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLB = 0x03
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port A.
GPINTENA = 0x04
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port B.
GPINTENB = 0x05
# Default value for port A - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALA = 0x06
# Default value for port B - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALB = 0x07
# Interrupt control register for port A. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONA = 0x08
# Interrupt control register for port B. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONB = 0x09
IOCON = 0x0A # see datasheet for configuration register
GPPUA = 0x0C # pull-up resistors for port A
GPPUB = 0x0D # pull-up resistors for port B
# The INTF register reflects the interrupt condition on the port A pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFA = 0x0E
# The INTF register reflects the interrupt condition on the port B pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFB = 0x0F
# The INTCAP register captures the GPIO port A value at the time the
# interrupt occurred.
INTCAPA = 0x10
# The INTCAP register captures the GPIO port B value at the time the
# interrupt occurred.
INTCAPB = 0x11
GPIOA = 0x12 # data port A
GPIOB = 0x13 # data port B
OLATA = 0x14 # output latches A
OLATB = 0x15 # output latches B
# variables
address = 0x20 # I2C address
port_a_dir = 0x00 # port a direction
port_b_dir = 0x00 # port b direction
portaval = 0x00 # port a value
portbval = 0x00 # port b value
porta_pullup = 0x00 # port a pull-up resistors
portb_pullup = 0x00 # port a pull-up resistors
porta_polarity = 0x00 # input polarity for port a
portb_polarity = 0x00 # input polarity for port b
intA = 0x00 # interrupt control for port a
intB = 0x00 # interrupt control for port a
# initial configuration - see IOCON page in the MCP23017 datasheet for
# more information.
config = 0x22
global _bus
def __init__(self, bus, address=0x20):
"""
init object with smbus object, i2c address, default is 0x20, 0x21 for
IOPi board,
Load default configuration, all pins are inputs with pull-ups disabled
"""
self._bus = bus
self.address = address
self._bus.write_byte_data(self.address, self.IOCON, self.config)
self.portaval = self._bus.read_byte_data(self.address, self.GPIOA)
self.portbval = self._bus.read_byte_data(self.address, self.GPIOB)
self._bus.write_byte_data(self.address, self.IODIRA, 0xFF)
self._bus.write_byte_data(self.address, self.IODIRB, 0xFF)
self.set_port_pullups(0,0x00)
self.set_port_pullups(1,0x00)
self.invert_port(0, 0x00)
self.invert_port(1, 0x00)
return
# local methods
def __updatebyte(self, byte, bit, value):
"""
internal method for setting the value of a single bit within a byte
"""
if value == 0:
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
def __checkbit(self, byte, bit):
"""
internal method for reading the value of a single bit within a byte
"""
if byte & (1 << bit):
return 1
else:
return 0
# public methods
def set_pin_direction(self, pin, direction):
"""
set IO direction for an individual pin
pins 1 to 16
direction 1 = input, 0 = output
"""
pin = pin - 1
if pin < 8:
self.port_a_dir = self.__updatebyte(self.port_a_dir, pin, direction)
self._bus.write_byte_data(self.address, self.IODIRA, self.port_a_dir)
else:
self.port_b_dir = self.__updatebyte(self.port_b_dir, pin - 8, direction)
self._bus.write_byte_data(self.address, self.IODIRB, self.port_b_dir)
return
def set_port_direction(self, port, direction):
"""
set direction for an IO port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
1 = input, 0 = output
"""
if port == 1:
self._bus.write_byte_data(self.address, self.IODIRB, direction)
self.port_b_dir = direction
else:
self._bus.write_byte_data(self.address, self.IODIRA, direction)
self.port_a_dir = direction
return
def set_pin_pullup(self, pinval, value):
"""
set the internal 100K pull-up resistors for an individual pin
pins 1 to 16
value 1 = enabled, 0 = disabled
"""
pin = pinval - 1
if pin < 8:
self.porta_pullup = self.__updatebyte(self.porta_pullup, pin, value)
self._bus.write_byte_data(self.address, self.GPPUA, self.porta_pullup)
else:
self.portb_pullup = self.__updatebyte(self.portb_pullup,pin - 8,value)
self._bus.write_byte_data(self.address, self.GPPUB, self.portb_pullup)
return
def set_port_pullups(self, port, value):
"""
set the internal 100K pull-up resistors for the selected IO port
"""
if port == 1:
self.portb_pullup = value
self._bus.write_byte_data(self.address, self.GPPUB, value)
else:
self.porta_pullup = value
self._bus.write_byte_data(self.address, self.GPPUA, value)
return
def write_pin(self, pin, value):
"""
write to an individual pin 1 - 16
"""
pin = pin - 1
if pin < 8:
self.portaval = self.__updatebyte(self.portaval, pin, value)
self._bus.write_byte_data(self.address, self.GPIOA, self.portaval)
else:
self.portbval = self.__updatebyte(self.portbval, pin - 8, value)
self._bus.write_byte_data(self.address, self.GPIOB, self.portbval)
return
def write_port(self, port, value):
"""
write to all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self._bus.write_byte_data(self.address, self.GPIOB, value)
self.portbval = value
else:
self._bus.write_byte_data(self.address, self.GPIOA, value)
self.portaval = value
return
def read_pin(self, pinval):
"""
read the value of an individual pin 1 - 16
returns 0 = logic level low, 1 = logic level high
"""
pin = pinval - 1
if pin < 8:
self.portaval = self._bus.read_byte_data(self.address, self.GPIOA)
return self.__checkbit(self.portaval, pin)
else:
pin = pin - 8
self.portbval = self._bus.read_byte_data(self.address, self.GPIOB)
return self.__checkbit(self.portbval, pin)
def read_port(self, port):
"""
read all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
returns number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self.portbval = self._bus.read_byte_data(self.address, self.GPIOB)
return self.portbval
else:
self.portaval = self._bus.read_byte_data(self.address, self.GPIOA)
return self.portaval
def invert_port(self, port, polarity):
"""
invert the polarity of the pins on a selected port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
if port == 1:
self._bus.write_byte_data(self.address, self.IPOLB, polarity)
self.portb_polarity = polarity
else:
self._bus.write_byte_data(self.address, self.IPOLA, polarity)
self.porta_polarity = polarity
return
def invert_pin(self, pin, polarity):
"""
invert the polarity of the selected pin
pins 1 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
pin = pin - 1
if pin < 8:
self.porta_polarity = self.__updatebyte(
self.portaval,
pin,
polarity)
self._bus.write_byte_data(self.address, self.IPOLA, self.porta_polarity)
else:
self.portb_polarity = self.__updatebyte(
self.portbval,
pin -
8,
polarity)
self._bus.write_byte_data(self.address, self.IPOLB, self.portb_polarity)
return
def mirror_interrupts(self, value):
"""
1 = The INT pins are internally connected, 0 = The INT pins are not
connected. INTA is associated with PortA and INTB is associated with
PortB
"""
if value == 0:
self.config = self.__updatebyte(self.config, 6, 0)
self._bus.write_byte_data(self.address, self.IOCON, self.config)
if value == 1:
self.config = self.__updatebyte(self.config, 6, 1)
self._bus.write_byte_data(self.address, self.IOCON, self.config)
return
def set_interrupt_polarity(self, value):
"""
This sets the polarity of the INT output pins - 1 = Active-high.
0 = Active-low.
"""
if value == 0:
self.config = self.__updatebyte(self.config, 1, 0)
self._bus.write_byte_data(self.address, self.IOCON, self.config)
if value == 1:
self.config = self.__updatebyte(self.config, 1, 1)
self._bus.write_byte_data(self.address, self.IOCON, self.config)
return
return
def set_interrupt_type(self, port, value):
"""
Sets the type of interrupt for each pin on the selected port
1 = interrupt is fired when the pin matches the default value
0 = the interrupt is fired on state change
"""
if port == 0:
self._bus.write_byte_data(self.address, self.INTCONA, value)
else:
self._bus.write_byte_data(self.address, self.INTCONB, value)
return
def set_interrupt_defaults(self, port, value):
"""
These bits set the compare value for pins configured for
interrupt-on-change on the selected port.
If the associated pin level is the opposite from the register bit, an
interrupt occurs.
"""
if port == 0:
self._bus.write_byte_data(self.address, self.DEFVALA, value)
else:
self._bus.write_byte_data(self.address, self.DEFVALB, value)
return
def set_interrupt_on_port(self, port, value):
"""
Enable interrupts for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 0:
self._bus.write_byte_data(self.address, self.GPINTENA, value)
self.intA = value
else:
self._bus.write_byte_data(self.address, self.GPINTENB, value)
self.intB = value
return
def set_interrupt_on_pin(self, pin, value):
"""
Enable interrupts for the selected pin
Pin = 1 to 16
Value 0 = interrupt disabled, 1 = interrupt enabled
"""
pin = pin - 1
if pin < 8:
self.intA = self.__updatebyte(self.intA, pin, value)
self._bus.write_byte_data(self.address, self.GPINTENA, self.intA)
else:
self.intB = self.__updatebyte(self.intB, pin - 8, value)
self._bus.write_byte_data(self.address, self.GPINTENB, self.intB)
return
def read_interrupt_status(self, port):
"""
read the interrupt status for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 8 to 16
"""
if port == 0:
return self._bus.read_byte_data(self.address, self.INTFA)
else:
return self._bus.read_byte_data(self.address, self.INTFB)
def read_interrupt_capture(self, port):
"""
read the value from the selected port at the time of the last
interrupt trigger
port 0 = pins 1 to 8, port 1 = pins 8 to 16
"""
if port == 0:
return self._bus.read_byte_data(self.address, self.INTCAPA)
else:
return self._bus.read_byte_data(self.address, self.INTCAPB)
def reset_interrupts(self):
"""
set the interrupts A and B to 0
"""
self.read_interrupt_capture(0)
self.read_interrupt_capture(1)
return
|
{
"content_hash": "3e2143a0c3d9ce438d62e798ff2d768e",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 85,
"avg_line_length": 37.43523316062176,
"alnum_prop": 0.5904498269896193,
"repo_name": "GentlemanBrewing/ADCLibraries-MCP3424",
"id": "7e8855f3668d464797b95dc82c4148872482ee0a",
"size": "14470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IOPi/ABE_IoPi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17962"
},
{
"name": "Python",
"bytes": "116618"
}
],
"symlink_target": ""
}
|
"""DialogFlow API Detect Intent Python sample with text inputs.
Examples:
python detect_intent_texts.py -h
python detect_intent_texts.py --project-id PROJECT_ID \
--session-id SESSION_ID \
"hello" "book a meeting room" "Mountain View"
python detect_intent_texts.py --project-id PROJECT_ID \
--session-id SESSION_ID \
"tomorrow" "10 AM" "2 hours" "10 people" "A" "yes"
"""
import argparse
import uuid
# [START dialogflow_es_detect_intent_text]
def detect_intent_texts(project_id, session_id, texts, language_code):
"""Returns the result of detect intent with texts as inputs.
Using the same `session_id` between requests allows continuation
of the conversation."""
from google.cloud import dialogflow
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
print("Session path: {}\n".format(session))
for text in texts:
text_input = dialogflow.TextInput(text=text, language_code=language_code)
query_input = dialogflow.QueryInput(text=text_input)
response = session_client.detect_intent(
request={"session": session, "query_input": query_input}
)
print("=" * 20)
print("Query text: {}".format(response.query_result.query_text))
print(
"Detected intent: {} (confidence: {})\n".format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence,
)
)
print("Fulfillment text: {}\n".format(response.query_result.fulfillment_text))
# [END dialogflow_es_detect_intent_text]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--project-id", help="Project/agent id. Required.", required=True
)
parser.add_argument(
"--session-id",
help="Identifier of the DetectIntent session. " "Defaults to a random UUID.",
default=str(uuid.uuid4()),
)
parser.add_argument(
"--language-code",
help='Language code of the query. Defaults to "en-US".',
default="en-US",
)
parser.add_argument("texts", nargs="+", type=str, help="Text inputs.")
args = parser.parse_args()
detect_intent_texts(
args.project_id, args.session_id, args.texts, args.language_code
)
|
{
"content_hash": "a0c0e3fddc0ebe9c21f0242f6e8da3b5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 86,
"avg_line_length": 32.21052631578947,
"alnum_prop": 0.6474673202614379,
"repo_name": "googleapis/python-dialogflow",
"id": "01cc2b95d0566f10b895690d7dfa026c12db71dd",
"size": "3047",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/detect_intent_texts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
from openstack.network.v2 import (qos_minimum_bandwidth_rule as
_qos_minimum_bandwidth_rule)
from openstack.tests.functional import base
class TestQoSMinimumBandwidthRule(base.BaseFunctionalTest):
QOS_POLICY_ID = None
QOS_IS_SHARED = False
QOS_POLICY_DESCRIPTION = "QoS policy description"
RULE_ID = None
RULE_MIN_KBPS = 1200
RULE_MIN_KBPS_NEW = 1800
RULE_DIRECTION = 'egress'
def setUp(self):
super(TestQoSMinimumBandwidthRule, self).setUp()
self.QOS_POLICY_NAME = self.getUniqueString()
qos_policy = self.conn.network.create_qos_policy(
description=self.QOS_POLICY_DESCRIPTION,
name=self.QOS_POLICY_NAME,
shared=self.QOS_IS_SHARED,
)
self.QOS_POLICY_ID = qos_policy.id
qos_min_bw_rule = self.conn.network.create_qos_minimum_bandwidth_rule(
self.QOS_POLICY_ID, direction=self.RULE_DIRECTION,
min_kbps=self.RULE_MIN_KBPS,
)
assert isinstance(qos_min_bw_rule,
_qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule)
self.assertEqual(self.RULE_MIN_KBPS, qos_min_bw_rule.min_kbps)
self.assertEqual(self.RULE_DIRECTION, qos_min_bw_rule.direction)
self.RULE_ID = qos_min_bw_rule.id
def tearDown(self):
rule = self.conn.network.delete_qos_minimum_bandwidth_rule(
self.RULE_ID,
self.QOS_POLICY_ID)
qos_policy = self.conn.network.delete_qos_policy(self.QOS_POLICY_ID)
self.assertIsNone(rule)
self.assertIsNone(qos_policy)
super(TestQoSMinimumBandwidthRule, self).tearDown()
def test_find(self):
sot = self.conn.network.find_qos_minimum_bandwidth_rule(
self.RULE_ID,
self.QOS_POLICY_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.RULE_DIRECTION, sot.direction)
self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps)
def test_get(self):
sot = self.conn.network.get_qos_minimum_bandwidth_rule(
self.RULE_ID,
self.QOS_POLICY_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id)
self.assertEqual(self.RULE_DIRECTION, sot.direction)
self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps)
def test_list(self):
rule_ids = [o.id for o in
self.conn.network.qos_minimum_bandwidth_rules(
self.QOS_POLICY_ID)]
self.assertIn(self.RULE_ID, rule_ids)
def test_update(self):
sot = self.conn.network.update_qos_minimum_bandwidth_rule(
self.RULE_ID,
self.QOS_POLICY_ID,
min_kbps=self.RULE_MIN_KBPS_NEW)
self.assertEqual(self.RULE_MIN_KBPS_NEW, sot.min_kbps)
|
{
"content_hash": "3f4b8381a945e5bf9268e1728f9870cc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 39.56944444444444,
"alnum_prop": 0.6293436293436293,
"repo_name": "stackforge/python-openstacksdk",
"id": "57138c22fa6e3d574c516a9b3c2524a68f2fe014",
"size": "3396",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1138292"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
class Candidate:
def __init__(self, name):
self.name = name
self.education = []
self.experience = []
self.projects = {}
self.other_experience = []
self.skills = []
def add_education(self, *args):
for arg in args:
self.education.append(arg)
def add_experience(self, position):
self.experience.append(position)
def add_projects(self, projects_dict):
self.projects.update(projects_dict)
def add_other_experience(self, *args):
for arg in args:
self.other_experience.append(arg)
def add_skills(self, skills_list):
self.skills.extend(skills_list)
class Experience:
def __init__(self, position, location, duration):
self.position = position
self.location = location
self.duration = duration
self.responsibilities = []
def add_responsibilities(self, responsibilities_list):
self.responsibilities.extend(responsibilities_list)
|
{
"content_hash": "e157dee35aa283d6720fc19a685d7e2f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 59,
"avg_line_length": 26.63157894736842,
"alnum_prop": 0.6175889328063241,
"repo_name": "joemarchese/resume",
"id": "74db37d7e74f546dc9ae6c92968de92c6346536b",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "candidate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190"
},
{
"name": "HTML",
"bytes": "16734"
},
{
"name": "Python",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils.translation import ugettext_lazy as _
from reviewboard.attachments.mimetypes import MIMETYPE_ICON_ALIASES
class FileAttachment(models.Model):
"""A file associated with a review request.
Like diffs, a file can have comments associated with it.
These comments are of type :model:`reviews.FileComment`.
"""
caption = models.CharField(_("caption"), max_length=256, blank=True)
draft_caption = models.CharField(_("draft caption"),
max_length=256, blank=True)
file = models.FileField(_("file"),
upload_to=os.path.join('uploaded', 'files',
'%Y', '%m', '%d'))
mimetype = models.CharField(_('mimetype'), max_length=256, blank=True)
@property
def filename(self):
"""Returns the filename for display purposes."""
return os.path.basename(self.file.name)
@property
def icon_url(self):
"""Returns the icon URL for this file."""
if self.mimetype in MIMETYPE_ICON_ALIASES:
name = MIMETYPE_ICON_ALIASES[self.mimetype]
else:
category = self.mimetype.split('/')[0]
name = self.mimetype.replace('/', '-')
mimetypes_dir = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'mimetypes')
if not os.path.exists(os.path.join(mimetypes_dir, name + '.png')):
name = category + '-x-generic'
if not os.path.exists(os.path.join(mimetypes_dir,
name + '.png')):
# We'll just use this as our fallback.
name = 'text-x-generic'
return static('rb/images/mimetypes/%s.png' % name)
def __unicode__(self):
return self.caption
def get_review_request(self):
try:
return self.review_request.all()[0]
except IndexError:
try:
return self.inactive_review_request.all()[0]
except IndexError:
# Maybe it's on a draft.
try:
draft = self.drafts.get()
except ObjectDoesNotExist:
draft = self.inactive_drafts.get()
return draft.review_request
def get_absolute_url(self):
return self.file.url
|
{
"content_hash": "0bfd4f5539bffc465c92c17f92a038ed",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 36.263888888888886,
"alnum_prop": 0.5614707008808886,
"repo_name": "atagar/ReviewBoard",
"id": "dc592a240b53f599da079cc05d0d9bda36761720",
"size": "2611",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reviewboard/attachments/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "313642"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "225"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "1736555"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "829"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, print_function
import sys
from .yugen_core import list_api_keys, get_lambdas, delete_api, \
export_to_swagger, create_api_key, list_apis, \
deploy_custom_domain, delete_api_key, deploy_api
from . import utils
from .gcdt_cmd_dispatcher import cmd
from . import gcdt_lifecycle
# creating docopt parameters and usage help
DOC = '''Usage:
yugen deploy [-v]
yugen delete -f [-v]
yugen export [-v]
yugen list [-v]
yugen apikey-create <keyname> [-v]
yugen apikey-list [-v]
yugen apikey-delete [-v]
yugen custom-domain-create [-v]
yugen version
-h --help show this
-v --verbose show debug messages
'''
# TODO support changing API keys
# TODO investigate base path problem
@cmd(spec=['version'])
def version_cmd():
utils.version()
@cmd(spec=['list'])
def list_cmd(**tooldata):
context = tooldata.get('context')
awsclient = context.get('_awsclient')
return list_apis(awsclient)
@cmd(spec=['deploy'])
def deploy_cmd(**tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
api_name = config['api'].get('name')
api_description = config['api'].get('description')
target_stage = config['api'].get('targetStage')
api_key = config['api'].get('apiKey')
lambdas = get_lambdas(awsclient, config, add_arn=True)
cache_cluster_enabled = config['api'].get('cacheClusterEnabled', False)
cache_cluster_size = config['api'].get('cacheClusterSize', False)
method_settings = config['api'].get('methodSettings', {})
exit_code = deploy_api(
awsclient=awsclient,
api_name=api_name,
api_description=api_description,
stage_name=target_stage,
api_key=api_key,
lambdas=lambdas,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
method_settings=method_settings
)
if 'customDomain' in config:
domain_name = config['customDomain'].get('domainName')
route_53_record = config['customDomain'].get('route53Record')
#ssl_cert = {
# 'name': config['customDomain'].get('certificateName'),
# 'body': config['customDomain'].get('certificateBody'),
# 'private_key': config['customDomain'].get('certificatePrivateKey'),
# 'chain': config['customDomain'].get('certificateChain')
#}
cert_name = config['customDomain'].get('certificateName')
cert_arn = config['customDomain'].get('certificateArn')
hosted_zone_id = config['customDomain'].get('hostedDomainZoneId')
api_base_path = config['customDomain'].get('basePath')
ensure_cname = config['customDomain'].get('ensureCname', True)
deploy_custom_domain(
awsclient=awsclient,
api_name=api_name,
api_target_stage=target_stage,
api_base_path=api_base_path,
domain_name=domain_name,
route_53_record=route_53_record,
cert_name=cert_name,
cert_arn=cert_arn,
hosted_zone_id=hosted_zone_id,
ensure_cname=ensure_cname,
)
return exit_code
@cmd(spec=['delete', '-f'])
def delete_cmd(force, **tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
exit_code = delete_api(
awsclient=awsclient,
api_name=config['api'].get('name')
)
return exit_code
@cmd(spec=['export'])
def export_cmd(**tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
api_name = config['api'].get('name')
target_stage = config['api'].get('targetStage')
api_description = config['api'].get('description')
lambdas = get_lambdas(awsclient, config, add_arn=True)
return export_to_swagger(
awsclient=awsclient,
api_name=api_name,
stage_name=target_stage,
api_description=api_description,
lambdas=lambdas,
custom_hostname=(config['customDomain'].get('domainName')
if 'customDomain' in config else False),
custom_base_path=(config['customDomain'].get('basePath')
if 'customDomain' in config else False)
)
@cmd(spec=['apikey-create', '<keyname>'])
def apikey_create_cmd(keyname, **tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
api_name = config['api'].get('name')
create_api_key(awsclient, api_name, keyname)
@cmd(spec=['apikey-delete'])
def apikey_delete_cmd(**tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
api_key = config['api'].get('apiKey')
delete_api_key(awsclient, api_key)
@cmd(spec=['apikey-list'])
def apikey_list_cmd(**tooldata):
context = tooldata.get('context')
awsclient = context.get('_awsclient')
list_api_keys(awsclient)
@cmd(spec=['custom-domain-create'])
def custom_domain_create_cmd(**tooldata):
context = tooldata.get('context')
config = tooldata.get('config')
awsclient = context.get('_awsclient')
api_name = config['api'].get('name')
api_target_stage = config['api'].get('targetStage')
domain_name = config['customDomain'].get('domainName')
route_53_record = config['customDomain'].get('route53Record')
api_base_path = config['customDomain'].get('basePath')
#ssl_cert = {
# 'name': config['customDomain'].get('certificateName'),
# 'body': config['customDomain'].get('certificateBody'),
# 'private_key': config['customDomain'].get('certificatePrivateKey'),
# 'chain': config['customDomain'].get('certificateChain')
#}
cert_name = config['customDomain'].get('certificateName')
cert_arn = config['customDomain'].get('certificateArn')
hosted_zone_id = config['customDomain'].get('hostedDomainZoneId')
ensure_cname = config['customDomain'].get('ensureCname', True)
return deploy_custom_domain(
awsclient=awsclient,
api_name=api_name,
api_target_stage=api_target_stage,
api_base_path=api_base_path,
domain_name=domain_name,
route_53_record=route_53_record,
cert_name=cert_name,
cert_arn=cert_arn,
hosted_zone_id=hosted_zone_id,
ensure_cname=ensure_cname,
)
def main():
sys.exit(gcdt_lifecycle.main(
DOC, 'yugen', dispatch_only=['version', 'clean']))
if __name__ == '__main__':
main()
|
{
"content_hash": "cd975e1c27a5b99162703122ccc9cce5",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 80,
"avg_line_length": 33.15841584158416,
"alnum_prop": 0.6295909226634816,
"repo_name": "glomex/gcdt",
"id": "b0c4b01806ae814a4b9b9fc6a8651ca585719fc5",
"size": "6744",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gcdt/yugen_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "368"
},
{
"name": "Groovy",
"bytes": "7494"
},
{
"name": "HTML",
"bytes": "102"
},
{
"name": "JavaScript",
"bytes": "1723"
},
{
"name": "Python",
"bytes": "509123"
},
{
"name": "Shell",
"bytes": "10192"
},
{
"name": "Smarty",
"bytes": "271"
}
],
"symlink_target": ""
}
|
from rally.benchmark import context
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
from rally import consts
from rally import osclients
from rally.plugins.openstack.context.cleanup import manager as resource_manager
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
LOG = logging.getLogger(__name__)
@context.context(name="images", order=410)
class ImageGenerator(context.Context):
"""Context class for adding images to each user for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_url": {
"type": "string",
},
"image_type": {
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"],
},
"image_container": {
"type": "string",
},
"image_name": {
"type": "string",
},
"min_ram": { # megabytes
"type": "integer",
"minimum": 0
},
"min_disk": { # gigabytes
"type": "integer",
"minimum": 0
},
"images_per_tenant": {
"type": "integer",
"minimum": 1
},
},
"required": ["image_url", "image_type", "image_container",
"images_per_tenant"],
"additionalProperties": False
}
def __init__(self, ctx):
super(ImageGenerator, self).__init__(ctx)
@rutils.log_task_wrapper(LOG.info, _("Enter context: `Images`"))
def setup(self):
image_url = self.config["image_url"]
image_type = self.config["image_type"]
image_container = self.config["image_container"]
images_per_tenant = self.config["images_per_tenant"]
image_name = self.config.get("image_name")
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
current_images = []
clients = osclients.Clients(user["endpoint"])
glance_scenario = glance_utils.GlanceScenario(
clients=clients)
for i in range(images_per_tenant):
if image_name and i > 0:
cur_name = image_name + str(i)
elif image_name:
cur_name = image_name
else:
cur_name = None
image = glance_scenario._create_image(
image_container, image_url, image_type,
name=cur_name, prefix="rally_ctx_image_",
min_ram=self.config.get("min_ram", 0),
min_disk=self.config.get("min_disk", 0))
current_images.append(image.id)
self.context["tenants"][tenant_id]["images"] = current_images
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Images`"))
def cleanup(self):
# TODO(boris-42): Delete only resources created by this context
resource_manager.cleanup(names=["glance.images"],
users=self.context.get("users", []))
|
{
"content_hash": "0687b81aef037c1e1175963fbc5eec6d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 35.91208791208791,
"alnum_prop": 0.5180538555691554,
"repo_name": "vponomaryov/rally",
"id": "90a18f9582827c040af29b12631d9ab977ef6f6b",
"size": "3834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/context/images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2367891"
},
{
"name": "Shell",
"bytes": "35878"
}
],
"symlink_target": ""
}
|
import cffi
ffi = cffi.FFI()
ffi.set_source('orcsome._ev', "#include <ev.h>", libraries=['ev'])
ffi.cdef("""
#define EVBACKEND_SELECT ...
#define EV_READ ...
#define EV_WRITE ...
#define EVBREAK_ALL ...
typedef ... ev_loop;
struct ev_loop *ev_loop_new (unsigned int flags);
void ev_loop_destroy (struct ev_loop*);
void ev_break (struct ev_loop*, int);
int ev_run (struct ev_loop*, int);
typedef struct { ...; } ev_io;
typedef void (*io_cb) (struct ev_loop*, ev_io*, int);
void ev_io_init(ev_io*, io_cb, int, int);
void ev_io_start(struct ev_loop*, ev_io*);
void ev_io_stop(struct ev_loop*, ev_io*);
typedef struct { ...; } ev_signal;
typedef void (*signal_cb) (struct ev_loop*, ev_signal*, int);
void ev_signal_init(ev_signal*, signal_cb, int);
void ev_signal_start(struct ev_loop*, ev_signal*);
void ev_signal_stop(struct ev_loop*, ev_signal*);
typedef double ev_tstamp;
typedef struct { ...; } ev_timer;
typedef void (*timer_cb) (struct ev_loop*, ev_timer*, int);
void ev_timer_init(ev_timer*, timer_cb, ev_tstamp, ev_tstamp);
void ev_timer_set(ev_timer*, ev_tstamp, ev_tstamp);
void ev_timer_start(struct ev_loop*, ev_timer*);
void ev_timer_again(struct ev_loop*, ev_timer*);
void ev_timer_stop(struct ev_loop*, ev_timer*);
ev_tstamp ev_timer_remaining(struct ev_loop*, ev_timer*);
""")
if __name__ == "__main__":
ffi.compile(verbose=True)
|
{
"content_hash": "973de36c00e72f64a7002b429a2fccc6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.6674041297935103,
"repo_name": "baverman/orcsome",
"id": "e006c8fe8e508c290f4b0c3c37fb22b3b9da2125",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcsome/ev_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60293"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
}
|
class SkipRope:
def partners(self, candidates, height):
first = (-1, 200)
second = (-1, 200)
for idx, person in enumerate(candidates):
diff = abs(person - height)
if diff < abs(first[1] - height):
first = (idx, person)
elif diff == abs(first[1] - height) and person > first[1]:
first = (idx, person)
for idx, person in enumerate(candidates):
if(idx != first[0]):
diff = abs(person - height)
old_diff = abs(second[1] - height)
if diff < old_diff:
second = (idx, person)
elif diff == abs(second[1] - height) and person > second[1]:
second = (idx, person)
closest = (first[1], second[1])
closest = sorted(closest)
return closest
|
{
"content_hash": "c9b4fefd76349911e66cdb8227849505",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 76,
"avg_line_length": 31.4,
"alnum_prop": 0.45010615711252655,
"repo_name": "mikefeneley/topcoder",
"id": "88e5f9182f5164898c411583002e87c9c95f8d37",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SRM-172/skip_rope.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53468"
}
],
"symlink_target": ""
}
|
import asyncio
import asyncio.events
import functools
import inspect
import os
import re
import sys
import threading
from contextlib import contextmanager
from glob import has_magic
from .callbacks import _DEFAULT_CALLBACK
from .exceptions import FSTimeoutError
from .spec import AbstractFileSystem
from .utils import PY36, is_exception, other_paths
private = re.compile("_[^_]")
async def _runner(event, coro, result, timeout=None):
timeout = timeout if timeout else None # convert 0 or 0.0 to None
if timeout is not None:
coro = asyncio.wait_for(coro, timeout=timeout)
try:
result[0] = await coro
except Exception as ex:
result[0] = ex
finally:
event.set()
if PY36:
grl = asyncio.events._get_running_loop
else:
grl = asyncio.events.get_running_loop
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = grl()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
return_result = result[0]
if isinstance(return_result, asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError from return_result
elif isinstance(return_result, BaseException):
raise return_result
else:
return return_result
iothread = [None] # dedicated fsspec IO thread
loop = [None] # global event loop for any non-async instance
lock = threading.Lock() # for setting exactly one thread
def sync_wrapper(func, obj=None):
"""Given a function, make so can be called in async or blocking contexts
Leave obj=None if defining within a class. Pass the instance if attaching
as an attribute of the instance.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = obj or args[0]
return sync(self.loop, func, *args, **kwargs)
return wrapper
@contextmanager
def _selector_policy():
original_policy = asyncio.get_event_loop_policy()
try:
if (
sys.version_info >= (3, 8)
and os.name == "nt"
and hasattr(asyncio, "WindowsSelectorEventLoopPolicy")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
yield
finally:
asyncio.set_event_loop_policy(original_policy)
def get_running_loop():
if hasattr(asyncio, "get_running_loop"):
return asyncio.get_running_loop()
else:
loop = asyncio._get_running_loop()
if loop is None:
raise RuntimeError("no running event loop")
else:
return loop
def get_loop():
"""Create or return the default fsspec IO loop
The loop will be running on a separate thread.
"""
if loop[0] is None:
with lock:
# repeat the check just in case the loop got filled between the
# previous two calls from another thread
if loop[0] is None:
with _selector_policy():
loop[0] = asyncio.new_event_loop()
th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
th.daemon = True
th.start()
iothread[0] = th
return loop[0]
@contextmanager
def fsspec_loop():
"""Temporarily switch the current event loop to the fsspec's
own loop, and then revert it back after the context gets
terinated.
"""
try:
original_loop = get_running_loop()
except RuntimeError:
original_loop = None
fsspec_loop = get_loop()
try:
asyncio._set_running_loop(fsspec_loop)
yield fsspec_loop
finally:
asyncio._set_running_loop(original_loop)
try:
import resource
except ImportError:
resource = None
ResourceError = OSError
else:
ResourceEror = resource.error
_DEFAULT_BATCH_SIZE = 128
def _get_batch_size():
from fsspec.config import conf
if "gather_batch_size" in conf:
return conf["gather_batch_size"]
if resource is None:
return _DEFAULT_BATCH_SIZE
try:
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
except (ImportError, ValueError, ResourceError):
return _DEFAULT_BATCH_SIZE
if soft_limit == resource.RLIM_INFINITY:
return -1
else:
return soft_limit // 8
async def _run_coros_in_chunks(
coros, batch_size=None, callback=_DEFAULT_CALLBACK, timeout=None
):
"""Run the given coroutines in smaller chunks to
not crossing the file descriptor limit.
If batch_size parameter is -1, then it will not be any throttling. If
it is none, it will be inferred from the process resources (soft limit divided
by 8) and fallback to 128 if the system doesn't support it."""
if batch_size is None:
batch_size = _get_batch_size()
if batch_size == -1:
batch_size = len(coros)
assert batch_size > 0
results = []
for start in range(0, len(coros), batch_size):
chunk = coros[start : start + batch_size]
for coro in asyncio.as_completed(chunk, timeout=timeout):
results.append(await coro)
callback.call("relative_update", 1)
return results
# these methods should be implemented as async by any async-able backend
async_methods = [
"_ls",
"_cat_file",
"_get_file",
"_put_file",
"_rm_file",
"_cp_file",
"_pipe_file",
"_expand_path",
"_info",
"_isfile",
"_isdir",
"_exists",
"_walk",
"_glob",
"_find",
"_du",
"_size",
"_mkdir",
"_makedirs",
]
class AsyncFileSystem(AbstractFileSystem):
"""Async file operations, default implementations
Passes bulk operations to asyncio.gather for concurrent operation.
Implementations that have concurrent batch operations and/or async methods
should inherit from this class instead of AbstractFileSystem. Docstrings are
copied from the un-underscored method in AbstractFileSystem, if not given.
"""
# note that methods do not have docstring here; they will be copied
# for _* methods and inferred for overridden methods.
async_impl = True
disable_throttling = False
def __init__(self, *args, asynchronous=False, loop=None, **kwargs):
self.asynchronous = asynchronous
self._pid = os.getpid()
if not asynchronous:
self._loop = loop or get_loop()
else:
self._loop = None
self.batch_size = kwargs.pop("batch_size", None)
super().__init__(*args, **kwargs)
@property
def loop(self):
if self._pid != os.getpid():
raise RuntimeError("This class is not fork-safe")
return self._loop
async def _rm_file(self, path, **kwargs):
raise NotImplementedError
async def _rm(self, path, recursive=False, **kwargs):
# TODO: implement on_error
path = await self._expand_path(path, recursive=recursive)
await asyncio.gather(*[self._rm_file(p, **kwargs) for p in path])
async def _copy(
self, path1, path2, recursive=False, on_error=None, maxdepth=None, **kwargs
):
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive)
path2 = other_paths(paths, path2)
result = await asyncio.gather(
*[self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)],
return_exceptions=True,
)
for ex in filter(is_exception, result):
if on_error == "ignore" and isinstance(ex, FileNotFoundError):
continue
raise ex
async def _pipe(self, path, value=None, **kwargs):
if isinstance(path, str):
path = {path: value}
await asyncio.gather(
*[self._pipe_file(k, v, **kwargs) for k, v in path.items()]
)
async def _process_limits(self, url, start, end):
"""Helper for "Range"-based _cat_file"""
size = None
suff = False
if start is not None and start < 0:
# if start is negative and end None, end is the "suffix length"
if end is None:
end = -start
start = ""
suff = True
else:
size = size or (await self._info(url))["size"]
start = size + start
elif start is None:
start = 0
if not suff:
if end is not None and end < 0:
if start is not None:
size = size or (await self._info(url))["size"]
end = size + end
elif end is None:
end = ""
if isinstance(end, int):
end -= 1 # bytes range is inclusive
return "bytes=%s-%s" % (start, end)
async def _cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
async def _cat(self, path, recursive=False, on_error="raise", **kwargs):
paths = await self._expand_path(path, recursive=recursive)
out = await asyncio.gather(
*[self._cat_file(path, **kwargs) for path in paths],
return_exceptions=True,
)
if on_error == "raise":
ex = next(filter(is_exception, out), False)
if ex:
raise ex
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
return {
k: v
for k, v in zip(paths, out)
if on_error != "omit" or not is_exception(v)
}
else:
return out[0]
async def _cat_ranges(self, paths, starts, ends, max_gap=None, **kwargs):
# TODO: on_error
if max_gap is not None:
# to be implemented in utils
raise NotImplementedError
if not isinstance(paths, list):
raise TypeError
if not isinstance(starts, list):
starts = [starts] * len(paths)
if not isinstance(ends, list):
ends = [starts] * len(paths)
if len(starts) != len(paths) or len(ends) != len(paths):
raise ValueError
return await asyncio.gather(
*[
self._cat_file(p, start=s, end=e, **kwargs)
for p, s, e in zip(paths, starts, ends)
]
)
async def _put(
self, lpath, rpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) from local.
Copies a specific file or tree of files (if recursive=True). If rpath
ends with a "/", it will be assumed to be a directory, and target files
will go within.
The put_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from .implementations.local import LocalFileSystem, make_path_posix
rpath = self._strip_protocol(rpath)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(
lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath)
)
is_dir = {l: os.path.isdir(l) for l in lpaths}
rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.call("set_size", len(file_pairs))
for lfile, rfile in file_pairs:
callback.branch(lfile, rfile, kwargs)
coros.append(self._put_file(lfile, rfile, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _get_file(self, rpath, lpath, **kwargs):
raise NotImplementedError
async def _get(
self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) to local.
Copies a specific file or tree of files (if recursive=True). If lpath
ends with a "/", it will be assumed to be a directory, and target files
will go within. Can submit a list of paths, which may be glob-patterns
and will be expanded.
The get_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from fsspec.implementations.local import make_path_posix
rpath = self._strip_protocol(rpath)
lpath = make_path_posix(lpath)
rpaths = await self._expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
[os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.lazy_call("set_size", len, lpaths)
for lpath, rpath in zip(lpaths, rpaths):
callback.branch(rpath, lpath, kwargs)
coros.append(self._get_file(rpath, lpath, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _isfile(self, path):
try:
return (await self._info(path))["type"] == "file"
except: # noqa: E722
return False
async def _isdir(self, path):
try:
return (await self._info(path))["type"] == "directory"
except IOError:
return False
async def _size(self, path):
return (await self._info(path)).get("size", None)
async def _sizes(self, paths):
return await asyncio.gather(*[self._size(p) for p in paths])
async def _exists(self, path):
try:
await self._info(path)
return True
except FileNotFoundError:
return False
async def _info(self, path, **kwargs):
raise NotImplementedError
async def _ls(self, path, **kwargs):
raise NotImplementedError
async def _walk(self, path, maxdepth=None, **kwargs):
path = self._strip_protocol(path)
full_dirs = {}
dirs = {}
files = {}
detail = kwargs.pop("detail", False)
try:
listing = await self._ls(path, detail=True, **kwargs)
except (FileNotFoundError, IOError):
if detail:
yield path, {}, {}
else:
yield path, [], []
return
for info in listing:
# each info name must be at least [path]/part , but here
# we check also for names like [path]/part/
pathname = info["name"].rstrip("/")
name = pathname.rsplit("/", 1)[-1]
if info["type"] == "directory" and pathname != path:
# do not include "self" path
full_dirs[pathname] = info
dirs[name] = info
elif pathname == path:
# file-like with same name as give path
files[""] = info
else:
files[name] = info
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs):
yield _
async def _glob(self, path, **kwargs):
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indques = path.find("?") if path.find("?") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indques, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif await self._exists(path):
if not detail:
return [path]
else:
return {path: await self._info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = await self._find(
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
.replace("?", ".")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
async def _du(self, path, total=True, maxdepth=None, **kwargs):
sizes = {}
# async for?
for f in await self._find(path, maxdepth=maxdepth, **kwargs):
info = await self._info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
# async for?
async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if not out and (await self._isfile(path)):
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
async def _expand_path(self, path, recursive=False, maxdepth=None):
if isinstance(path, str):
out = await self._expand_path([path], recursive, maxdepth)
else:
# reduce depth on each recursion level unless None or 0
maxdepth = maxdepth if not maxdepth else maxdepth - 1
out = set()
path = [self._strip_protocol(p) for p in path]
for p in path: # can gather here
if has_magic(p):
bit = set(await self._glob(p))
out |= bit
if recursive:
out |= set(
await self._expand_path(
list(bit), recursive=recursive, maxdepth=maxdepth
)
)
continue
elif recursive:
rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
out |= rec
if p not in out and (recursive is False or (await self._exists(p))):
# should only check once, for the root
out.add(p)
if not out:
raise FileNotFoundError(path)
return list(sorted(out))
async def _mkdir(self, path, create_parents=True, **kwargs):
pass # not necessary to implement, may not have directories
async def _makedirs(self, path, exist_ok=False):
pass # not necessary to implement, may not have directories
def mirror_sync_methods(obj):
"""Populate sync and async methods for obj
For each method will create a sync version if the name refers to an async method
(coroutine) and there is no override in the child class; will create an async
method for the corresponding sync method if there is no implementation.
Uses the methods specified in
- async_methods: the set that an implementation is expected to provide
- default_async_methods: that can be derived from their sync version in
AbstractFileSystem
- AsyncFileSystem: async-specific default coroutines
"""
from fsspec import AbstractFileSystem
for method in async_methods + dir(AsyncFileSystem):
if not method.startswith("_"):
continue
smethod = method[1:]
if private.match(method):
isco = inspect.iscoroutinefunction(getattr(obj, method, None))
unsync = getattr(getattr(obj, smethod, False), "__func__", None)
is_default = unsync is getattr(AbstractFileSystem, smethod, "")
if isco and is_default:
mth = sync_wrapper(getattr(obj, method), obj=obj)
setattr(obj, smethod, mth)
if not mth.__doc__:
mth.__doc__ = getattr(
getattr(AbstractFileSystem, smethod, None), "__doc__", ""
)
class FSSpecCoroutineCancel(Exception):
pass
def _dump_running_tasks(
printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
):
import traceback
if PY36:
raise NotImplementedError("Do not call this on Py 3.6")
tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
if printout:
[task.print_stack() for task in tasks]
out = [
{
"locals": task._coro.cr_frame.f_locals,
"file": task._coro.cr_frame.f_code.co_filename,
"firstline": task._coro.cr_frame.f_code.co_firstlineno,
"linelo": task._coro.cr_frame.f_lineno,
"stack": traceback.format_stack(task._coro.cr_frame),
"task": task if with_task else None,
}
for task in tasks
]
if cancel:
for t in tasks:
cbs = t._callbacks
t.cancel()
asyncio.futures.Future.set_exception(t, exc)
asyncio.futures.Future.cancel(t)
[cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
try:
t._coro.throw(exc) # exits coro, unless explicitly handled
except exc:
pass
return out
|
{
"content_hash": "76874b66c93bb9495c4f7aeebbc4e957",
"timestamp": "",
"source": "github",
"line_count": 749,
"max_line_length": 88,
"avg_line_length": 33.58077436582109,
"alnum_prop": 0.563295165394402,
"repo_name": "intake/filesystem_spec",
"id": "01c949eeddefab7ee05c796295854515e78ef86d",
"size": "25152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fsspec/asyn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "526138"
}
],
"symlink_target": ""
}
|
import sys
import os
import utils
def init():
path = os.getcwd()
text = utils.openFile(path + '/' + sys.argv[1])
if text is not None:
text = utils.stripWhiteSpace(text)
if len(sys.argv) > 2:
subLength = int(sys.argv[2])
else:
subLength = None
result = getTwinIndex(text, subLength)
print "Double Count: %s\nText Length: %s\nTwin Index: %s" % (result['dbl'], result['len'], result['ti'])
def getTwinIndex(text, subLength=None):
'''Calculate the Twin Index by counting double letters in the text.
pass a length integer as the second parameter to work only with a substring
'''
if subLength is not None:
text = text[:subLength]
# Copy the text string and shift forward by 1
text2 = text[1:]
textLength = len(text2)
doubles = 0
# Compare the copies at the same index to find doubles
for i, c in enumerate(text2):
if text[i] == c:
doubles += 1
# Cast all results to floats
twinIndex = float(doubles) / float(textLength) * 26.0
return {'len': textLength, 'dbl': doubles, 'ti': twinIndex}
if __name__ == "__main__":
init()
|
{
"content_hash": "73ef48f457dc9f3ae13b539e1501f5d6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 112,
"avg_line_length": 30.307692307692307,
"alnum_prop": 0.6057529610829103,
"repo_name": "appsol/crypto",
"id": "73d6eb2b78e22266b82f47364adb772a3f4f9b46",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/twins.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7579"
}
],
"symlink_target": ""
}
|
from django.core.management import call_command
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.councils import CouncilViewSet, CouncilCSVViewSet
from councils.tests.factories import CouncilFactory
class CouncilsTest(TestCase):
@classmethod
def setUpTestData(cls):
CouncilFactory(
council_id="ABC",
name="ABC Council",
electoral_services_email="",
electoral_services_phone_numbers=[""],
electoral_services_website="",
electoral_services_postcode="",
electoral_services_address="",
identifiers=["X01000001", "E06000001"],
geography__geography="MULTIPOLYGON (((-2.83447265625 53.64203274279828,1.549072265625 53.64203274279828,1.549072265625 52.52691653862567,-2.83447265625 52.52691653862567,-2.83447265625 53.64203274279828)))",
)
CouncilFactory(
council_id="DEF",
identifiers=["X01000002"],
geography__geography=None,
)
CouncilFactory(
council_id="GHI",
identifiers=["X01000002"],
geography__geography=None,
)
call_command( # Hack to avoid converting all fixtures to factories
"loaddata",
"polling_stations/apps/api/fixtures/test_api_pollingdistricts_stations.json",
verbosity=0,
)
def setUp(self):
factory = APIRequestFactory()
self.request = factory.get("/foo", format="json")
def test_list(self):
response = CouncilViewSet.as_view({"get": "list"})(self.request)
self.assertEqual(200, response.status_code)
self.assertEqual(3, len(response.data))
def test_valid_council(self):
response = CouncilViewSet.as_view({"get": "retrieve"})(self.request, pk="ABC")
self.assertEqual(200, response.status_code)
self.assertEqual("ABC", response.data["council_id"])
self.assertEqual("England", response.data["nation"])
def test_bad_council(self):
response = CouncilViewSet.as_view({"get": "retrieve"})(self.request, pk="FOO")
# should return 404 if council does not exist
self.assertEqual(404, response.status_code)
def test_geo(self):
geo_response = CouncilViewSet.as_view({"get": "geo"})(self.request, pk="ABC")
response = CouncilViewSet.as_view({"get": "retrieve"})(self.request, pk="ABC")
# geo_response should contain geometry
self.assertEqual(True, ("geometry" in geo_response.data))
self.assertEqual("MultiPolygon", geo_response.data["geometry"]["type"])
# (non-geo) response should not contain geometry
self.assertEqual(True, ("geometry" not in response.data))
self.assertEqual(response.data["name"], geo_response.data["properties"]["name"])
def test_null_area(self):
response = CouncilViewSet.as_view({"get": "geo"})(self.request, pk="DEF")
self.assertEqual(None, response.data["geometry"])
def test_redirect_from_identifier(self):
"""
Check that a non-PK ID that is a valid identifier is redirected to
the canonical URL for that instance.
:return:
"""
response = CouncilViewSet.as_view({"get": "retrieve"})(
self.request, pk="X01000001"
)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://testserver/api/beta/councils/ABC/")
def test_identifiers_in_api_response(self):
response = CouncilViewSet.as_view({"get": "retrieve"})(self.request, pk="ABC")
self.assertDictEqual(
response.data,
{
"address": "",
"council_id": "ABC",
"email": "",
"identifiers": ["X01000001", "E06000001"],
"name": "ABC Council",
"nation": "England",
"phone": "",
"postcode": "",
"url": "http://testserver/api/beta/councils/ABC/",
"website": "",
"registration_contacts": None,
"electoral_services_contacts": {
"email": "",
"phone_numbers": [""],
"address": "",
"postcode": "",
"website": "",
},
},
)
def test_council_csv_endpoint(self):
with self.assertNumQueries(1):
response = CouncilCSVViewSet.as_view({"get": "list"})(
APIRequestFactory().get("/api/beta/council_csv/", format="csv")
)
self.assertEqual(response.status_code, 200)
response.render()
self.assertEqual(
response.content.decode(),
"council_id,name,station_count\r\n"
"ABC,ABC Council,2\r\n"
"DEF,,1\r\n"
"GHI,,0\r\n",
)
|
{
"content_hash": "2a7d8052d62212dcc543306bcb8c4b57",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 219,
"avg_line_length": 39.52,
"alnum_prop": 0.5759109311740891,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "695943b0fbab5ccafe6368344e64f30cbb5453cd",
"size": "4940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/api/tests/test_councils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
}
|
print 'Hello world'
print 'The quick brown fox', 'jumps over', 'the lazy dog'
print '1 + 1 =', 1 + 1
print 'what\'s your name?'
name = raw_input()
print 'Hello', name
|
{
"content_hash": "8a206f91e5099c86ac3c6c47d5b8af02",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 18.88888888888889,
"alnum_prop": 0.6352941176470588,
"repo_name": "WellerQu/LearnPython",
"id": "7dba9ab98ba7841a036f51e13fdfcabc821603bb",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lesson1/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3642"
},
{
"name": "Python",
"bytes": "12418"
}
],
"symlink_target": ""
}
|
"""
This module is a functional test of the game_server and game_client
interactions. a game server is spun up in a seperate process space and bound
to localhost on port 5000. The game client is then used to interact with
the game server.
"""
import unittest
from multiprocessing.process import Process
from clueless.client import errors
from clueless.client.game_play import GameClient
from clueless import log
from clueless.model import game_state
from clueless.server.app import start_server
import time
_LOG = log.get_logger(__name__)
class WhenFunctionalTestingGameClient(unittest.TestCase):
def setUp(self):
#setup game server to run on a seperate process
self.game_server = Process(target=start_server)
self.game_server.start()
#create the game client
self.client = GameClient(host="127.0.0.1", port="5000")
self.player_one = "Arthur"
self.player_one_suspect = game_state.PEACOCK
self.player_two = "Steven"
self.player_two_suspect = game_state.PLUM
def test_game_client(self):
try:
#give the game server process a chance to start
time.sleep(3)
#test registering players and choosing suspects
self.client.register_player(self.player_one)
self.client.choose_suspect(
self.player_one, self.player_one_suspect)
self.client.register_player(
self.player_two)
self.client.choose_suspect(
self.player_two, self.player_two_suspect)
#retreive the registered players with the client and validate the
#return values
players = self.client.get_players()
for player in players:
self.assertIsInstance(player, game_state.Player)
self.assertTrue(
self.player_one in [player.username
for player in players])
self.assertTrue(
self.player_two in [player.username
for player in players])
self.assertTrue(
self.player_one_suspect in [player.suspect
for player in players])
self.assertTrue(
self.player_two_suspect in [player.suspect
for player in players])
#start a new game with the client and validate a GameState object
#is returned
game = self.client.start_new_game()
self.assertTrue(game, game_state.GameState)
game = self.client.get_game_state(game.game_id)
self.assertTrue(game, game_state.GameState)
#move player 1 from start space to hallway
player = game.current_player
player_1_current_space = game.game_board[player.suspect]
move_space = player_1_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
player_1_current_space = game.game_board[move_space]
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
#move player 2 from start space to hallway
player = game.current_player
player_2_current_space = game.game_board[player.suspect]
move_space = player_2_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
player_2_current_space = game.game_board[move_space]
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
#move player 1 from hallway to room
player = game.current_player
move_space = player_1_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_SUGGESTION)
#make suggestion based on room player is currently in
game = self.client.make_suggestion(
player.username, game_state.MUSTARD,
game_state.REVOLVER,
move_space
)
#if there is a player that can prove the suggestion false
#then test the suggestion response
if game.suggestion_response_player:
with self.assertRaises(errors.GameClientException):
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_SUGGESTION_RESPONSE)
response_player = game.suggestion_response_player
suggestion = game.current_suggestion
gamecard_item = list(
{suggestion.weapon, suggestion.room, suggestion.suspect}
&
set(card.item for card in response_player.game_cards))[0]
game = self.client.make_suggestion_response(
response_player.username, gamecard_item)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
last_player = player
player = game.current_player
self.assertNotEqual(player.username, last_player.username)
#test accusation
suspect = [
card.item for card in game.case_file
if card.type == game_state.SUSPECT
][0]
weapon = [
card.item for card in game.case_file
if card.type == game_state.WEAPON
][0]
room = [
card.item for card in game.case_file
if card.type == game_state.ROOM
][0]
game = self.client.make_accusation(
player.username, suspect, weapon, room)
for message in game.player_messages:
print message
self.client.destroy_game(game.game_id)
finally:
self.game_server.terminate()
|
{
"content_hash": "093e808015b2ad964ddc683be464f0b7",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 39.734939759036145,
"alnum_prop": 0.5933899332929048,
"repo_name": "PyBro-JHU/Clue-Less",
"id": "4adc9e7bbf0c01d07970011ab5904c39a2424bc8",
"size": "6596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clueless/tests/client/game_play_functest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101989"
}
],
"symlink_target": ""
}
|
'''define URLs patterns for users'''
from django.conf.urls import url
from django.contrib.auth.views import login
from . import views
urlpatterns = [
url(r'^login/$', login,
{'template_name': 'users/login.html'}, name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^register/$', views.register, name='register'),
]
|
{
"content_hash": "bacc219d07da2b151b7a53fd89d58e0b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 27.692307692307693,
"alnum_prop": 0.6583333333333333,
"repo_name": "DoWhatILove/turtle",
"id": "2bac16198a370693fa6107a318d33761044cd96d",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "programming/python/web/learning_log/users/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "6651"
},
{
"name": "C",
"bytes": "51179"
},
{
"name": "C#",
"bytes": "2573153"
},
{
"name": "CSS",
"bytes": "15296"
},
{
"name": "Dockerfile",
"bytes": "512"
},
{
"name": "HTML",
"bytes": "19499"
},
{
"name": "JavaScript",
"bytes": "10918"
},
{
"name": "Jupyter Notebook",
"bytes": "1174432"
},
{
"name": "Makefile",
"bytes": "1309"
},
{
"name": "PowerShell",
"bytes": "32263"
},
{
"name": "Python",
"bytes": "310753"
},
{
"name": "Shell",
"bytes": "10285"
},
{
"name": "Vim script",
"bytes": "1757"
}
],
"symlink_target": ""
}
|
"""Declare exceptions and warnings that are specific to PyTables."""
from __future__ import absolute_import
import six
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
import os
import warnings
import traceback
class HDF5ExtError(RuntimeError):
"""A low level HDF5 operation failed.
This exception is raised the low level PyTables components used for
accessing HDF5 files. It usually signals that something is not
going well in the HDF5 library or even at the Input/Output level.
Errors in the HDF5 C library may be accompanied by an extensive
HDF5 back trace on standard error (see also
:func:`tables.silence_hdf5_messages`).
.. versionchanged:: 2.4
Parameters
----------
message
error message
h5bt
This parameter (keyword only) controls the HDF5 back trace
handling. Any keyword arguments other than h5bt is ignored.
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
* if not set (or set to None) the default policy is used
(see :attr:`HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY`)
"""
# NOTE: in order to avoid circular dependencies between modules the
# _dump_h5_backtrace method is set at initialization time in
# the utilsExtenion.
_dump_h5_backtrace = None
DEFAULT_H5_BACKTRACE_POLICY = "VERBOSE"
"""Default policy for HDF5 backtrace handling
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
This parameter can be set using the
:envvar:`PT_DEFAULT_H5_BACKTRACE_POLICY` environment variable.
Allowed values are "IGNORE" (or "FALSE"), "SAVE" (or "TRUE") and
"VERBOSE" to set the policy to False, True and "VERBOSE"
respectively. The special value "DEFAULT" can be used to reset
the policy to the default value
.. versionadded:: 2.4
"""
@classmethod
def set_policy_from_env(cls):
envmap = {
"IGNORE": False,
"FALSE": False,
"SAVE": True,
"TRUE": True,
"VERBOSE": "VERBOSE",
"DEFAULT": "VERBOSE",
}
oldvalue = cls.DEFAULT_H5_BACKTRACE_POLICY
envvalue = os.environ.get("PT_DEFAULT_H5_BACKTRACE_POLICY", "DEFAULT")
try:
newvalue = envmap[envvalue.upper()]
except KeyError:
warnings.warn("Invalid value for the environment variable "
"'PT_DEFAULT_H5_BACKTRACE_POLICY'. The default "
"policy for HDF5 back trace management in PyTables "
"will be: '%s'" % oldvalue)
else:
cls.DEFAULT_H5_BACKTRACE_POLICY = newvalue
return oldvalue
def __init__(self, *args, **kargs):
super(HDF5ExtError, self).__init__(*args)
self._h5bt_policy = kargs.get('h5bt', self.DEFAULT_H5_BACKTRACE_POLICY)
if self._h5bt_policy and self._dump_h5_backtrace is not None:
self.h5backtrace = self._dump_h5_backtrace()
"""HDF5 back trace.
Contains the HDF5 back trace as a (possibly empty) list of
tuples. Each tuple has the following format::
(filename, line number, function name, text)
Depending on the value of the *h5bt* parameter passed to the
initializer the h5backtrace attribute can be set to None.
This means that the HDF5 back trace has been simply ignored
(not retrieved from the HDF5 C library error stack) or that
there has been an error (silently ignored) during the HDF5 back
trace retrieval.
.. versionadded:: 2.4
See Also
--------
traceback.format_list : :func:`traceback.format_list`
"""
# XXX: check _dump_h5_backtrace failures
else:
self.h5backtrace = None
def __str__(self):
"""Returns a sting representation of the exception.
The actual result depends on policy set in the initializer
:meth:`HDF5ExtError.__init__`.
.. versionadded:: 2.4
"""
verbose = bool(self._h5bt_policy in ('VERBOSE', 'verbose'))
if verbose and self.h5backtrace:
bt = "\n".join([
"HDF5 error back trace\n",
self.format_h5_backtrace(),
"End of HDF5 error back trace"
])
if len(self.args) == 1 and isinstance(self.args[0], six.string_types):
msg = super(HDF5ExtError, self).__str__()
msg = "%s\n\n%s" % (bt, msg)
elif self.h5backtrace[-1][-1]:
msg = "%s\n\n%s" % (bt, self.h5backtrace[-1][-1])
else:
msg = bt
else:
msg = super(HDF5ExtError, self).__str__()
return msg
def format_h5_backtrace(self, backtrace=None):
"""Convert the HDF5 trace back represented as a list of tuples.
(see :attr:`HDF5ExtError.h5backtrace`) into a string.
.. versionadded:: 2.4
"""
if backtrace is None:
backtrace = self.h5backtrace
if backtrace is None:
return 'No HDF5 back trace available'
else:
return ''.join(traceback.format_list(backtrace))
# Initialize the policy for HDF5 back trace handling
HDF5ExtError.set_policy_from_env()
# The following exceptions are concretions of the ``ValueError`` exceptions
# raised by ``file`` objects on certain operations.
class ClosedNodeError(ValueError):
"""The operation can not be completed because the node is closed.
For instance, listing the children of a closed group is not allowed.
"""
pass
class ClosedFileError(ValueError):
"""The operation can not be completed because the hosting file is closed.
For instance, getting an existing node from a closed file is not
allowed.
"""
pass
class FileModeError(ValueError):
"""The operation can not be carried out because the mode in which the
hosting file is opened is not adequate.
For instance, removing an existing leaf from a read-only file is not
allowed.
"""
pass
class NodeError(AttributeError, LookupError):
"""Invalid hierarchy manipulation operation requested.
This exception is raised when the user requests an operation on the
hierarchy which can not be run because of the current layout of the
tree. This includes accessing nonexistent nodes, moving or copying
or creating over an existing node, non-recursively removing groups
with children, and other similarly invalid operations.
A node in a PyTables database cannot be simply overwritten by
replacing it. Instead, the old node must be removed explicitely
before another one can take its place. This is done to protect
interactive users from inadvertedly deleting whole trees of data by
a single erroneous command.
"""
pass
class NoSuchNodeError(NodeError):
"""An operation was requested on a node that does not exist.
This exception is raised when an operation gets a path name or a
``(where, name)`` pair leading to a nonexistent node.
"""
pass
class UndoRedoError(Exception):
"""Problems with doing/redoing actions with Undo/Redo feature.
This exception indicates a problem related to the Undo/Redo
mechanism, such as trying to undo or redo actions with this
mechanism disabled, or going to a nonexistent mark.
"""
pass
class UndoRedoWarning(Warning):
"""Issued when an action not supporting Undo/Redo is run.
This warning is only shown when the Undo/Redo mechanism is enabled.
"""
pass
class NaturalNameWarning(Warning):
"""Issued when a non-pythonic name is given for a node.
This is not an error and may even be very useful in certain
contexts, but one should be aware that such nodes cannot be
accessed using natural naming (instead, ``getattr()`` must be
used explicitly).
"""
pass
class PerformanceWarning(Warning):
"""Warning for operations which may cause a performance drop.
This warning is issued when an operation is made on the database
which may cause it to slow down on future operations (i.e. making
the node tree grow too much).
"""
pass
class FlavorError(ValueError):
"""Unsupported or unavailable flavor or flavor conversion.
This exception is raised when an unsupported or unavailable flavor
is given to a dataset, or when a conversion of data between two
given flavors is not supported nor available.
"""
pass
class FlavorWarning(Warning):
"""Unsupported or unavailable flavor conversion.
This warning is issued when a conversion of data between two given
flavors is not supported nor available, and raising an error would
render the data inaccessible (e.g. on a dataset of an unavailable
flavor in a read-only file).
See the `FlavorError` class for more information.
"""
pass
class FiltersWarning(Warning):
"""Unavailable filters.
This warning is issued when a valid filter is specified but it is
not available in the system. It may mean that an available default
filter is to be used instead.
"""
pass
class OldIndexWarning(Warning):
"""Unsupported index format.
This warning is issued when an index in an unsupported format is
found. The index will be marked as invalid and will behave as if
doesn't exist.
"""
pass
class DataTypeWarning(Warning):
"""Unsupported data type.
This warning is issued when an unsupported HDF5 data type is found
(normally in a file created with other tool than PyTables).
"""
pass
class ExperimentalFeatureWarning(Warning):
"""Generic warning for experimental features.
This warning is issued when using a functionality that is still
experimental and that users have to use with care.
"""
pass
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
{
"content_hash": "33b6e886aa7b4b66fcc700ec8ed835c3",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 82,
"avg_line_length": 29.26259946949602,
"alnum_prop": 0.6528281363306744,
"repo_name": "gdementen/PyTables",
"id": "9306b6af8266358f5bd4e1eee2b99b7e9baf8b6c",
"size": "11309",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tables/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3325733"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
}
|
from CIM15.IEC61968.Common.Document import Document
class ErpReceiveDelivery(Document):
"""Transaction for an Organisation receiving goods or services that may be used to indicate receipt of goods in conjunction with a purchase order. A receivable is an open (unpaid) item in the Accounts Receivable ledger.Transaction for an Organisation receiving goods or services that may be used to indicate receipt of goods in conjunction with a purchase order. A receivable is an open (unpaid) item in the Accounts Receivable ledger.
"""
def __init__(self, ErpRecDelvLineItems=None, *args, **kw_args):
"""Initialises a new 'ErpReceiveDelivery' instance.
@param ErpRecDelvLineItems:
"""
self._ErpRecDelvLineItems = []
self.ErpRecDelvLineItems = [] if ErpRecDelvLineItems is None else ErpRecDelvLineItems
super(ErpReceiveDelivery, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ErpRecDelvLineItems"]
_many_refs = ["ErpRecDelvLineItems"]
def getErpRecDelvLineItems(self):
return self._ErpRecDelvLineItems
def setErpRecDelvLineItems(self, value):
for x in self._ErpRecDelvLineItems:
x.ErpReceiveDelivery = None
for y in value:
y._ErpReceiveDelivery = self
self._ErpRecDelvLineItems = value
ErpRecDelvLineItems = property(getErpRecDelvLineItems, setErpRecDelvLineItems)
def addErpRecDelvLineItems(self, *ErpRecDelvLineItems):
for obj in ErpRecDelvLineItems:
obj.ErpReceiveDelivery = self
def removeErpRecDelvLineItems(self, *ErpRecDelvLineItems):
for obj in ErpRecDelvLineItems:
obj.ErpReceiveDelivery = None
|
{
"content_hash": "ebbb37f03e25d2e45b2514ac9bce3c3e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 439,
"avg_line_length": 40.13636363636363,
"alnum_prop": 0.6993204983012458,
"repo_name": "rwl/PyCIM",
"id": "bf53e5684e3eaccc227803c2b0b9b5bef0746414",
"size": "2866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Informative/InfERPSupport/ErpReceiveDelivery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocks",
version="2.8.2.1",
license='http://www.apache.org/licenses/LICENSE-2.0',
description="A fast tunnel proxy that help you get through firewalls",
author='clowwindy',
author_email='clowwindy42@gmail.com',
url='https://github.com/shadowsocks/shadowsocks',
packages=['shadowsocks', 'shadowsocks.crypto'],
package_data={
'shadowsocks': ['README.rst', 'LICENSE']
},
install_requires=[],
entry_points="""
[console_scripts]
sslocal = shadowsocks.local:main
ssserver = shadowsocks.server:main
""",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
|
{
"content_hash": "6e5b90d8126d64c93a22ed092a597526",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 33.92307692307692,
"alnum_prop": 0.6281179138321995,
"repo_name": "gitchs/shadowsocks",
"id": "bc220355e3b5dec017d20595baa48b6fd6ba5705",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2.8.2-patch",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "158479"
},
{
"name": "Shell",
"bytes": "16316"
}
],
"symlink_target": ""
}
|
"""The Hangouts Bot."""
import logging
from homeassistant.helpers import dispatcher, intent
from .const import (
ATTR_MESSAGE, ATTR_TARGET, CONF_CONVERSATIONS, DOMAIN,
EVENT_HANGOUTS_CONNECTED, EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
EVENT_HANGOUTS_DISCONNECTED, EVENT_HANGOUTS_MESSAGE_RECEIVED,
CONF_MATCHERS, CONF_CONVERSATION_ID,
CONF_CONVERSATION_NAME, EVENT_HANGOUTS_CONVERSATIONS_RESOLVED, INTENT_HELP)
_LOGGER = logging.getLogger(__name__)
class HangoutsBot:
"""The Hangouts Bot."""
def __init__(self, hass, refresh_token, intents,
default_convs, error_suppressed_convs):
"""Set up the client."""
self.hass = hass
self._connected = False
self._refresh_token = refresh_token
self._intents = intents
self._conversation_intents = None
self._client = None
self._user_list = None
self._conversation_list = None
self._default_convs = default_convs
self._default_conv_ids = None
self._error_suppressed_convs = error_suppressed_convs
self._error_suppressed_conv_ids = None
dispatcher.async_dispatcher_connect(
self.hass, EVENT_HANGOUTS_MESSAGE_RECEIVED,
self._async_handle_conversation_message)
def _resolve_conversation_id(self, obj):
if CONF_CONVERSATION_ID in obj:
return obj[CONF_CONVERSATION_ID]
if CONF_CONVERSATION_NAME in obj:
conv = self._resolve_conversation_name(obj[CONF_CONVERSATION_NAME])
if conv is not None:
return conv.id_
return None
def _resolve_conversation_name(self, name):
for conv in self._conversation_list.get_all():
if conv.name == name:
return conv
return None
def async_update_conversation_commands(self):
"""Refresh the commands for every conversation."""
self._conversation_intents = {}
for intent_type, data in self._intents.items():
if data.get(CONF_CONVERSATIONS):
conversations = []
for conversation in data.get(CONF_CONVERSATIONS):
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
conversations.append(conv_id)
data['_' + CONF_CONVERSATIONS] = conversations
elif self._default_conv_ids:
data['_' + CONF_CONVERSATIONS] = self._default_conv_ids
else:
data['_' + CONF_CONVERSATIONS] = \
[conv.id_ for conv in self._conversation_list.get_all()]
for conv_id in data['_' + CONF_CONVERSATIONS]:
if conv_id not in self._conversation_intents:
self._conversation_intents[conv_id] = {}
self._conversation_intents[conv_id][intent_type] = data
try:
self._conversation_list.on_event.remove_observer(
self._async_handle_conversation_event)
except ValueError:
pass
self._conversation_list.on_event.add_observer(
self._async_handle_conversation_event)
def async_resolve_conversations(self, _):
"""Resolve the list of default and error suppressed conversations."""
self._default_conv_ids = []
self._error_suppressed_conv_ids = []
for conversation in self._default_convs:
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
self._default_conv_ids.append(conv_id)
for conversation in self._error_suppressed_convs:
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
self._error_suppressed_conv_ids.append(conv_id)
dispatcher.async_dispatcher_send(self.hass,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED)
async def _async_handle_conversation_event(self, event):
from hangups import ChatMessageEvent
if isinstance(event, ChatMessageEvent):
dispatcher.async_dispatcher_send(self.hass,
EVENT_HANGOUTS_MESSAGE_RECEIVED,
event.conversation_id,
event.user_id, event)
async def _async_handle_conversation_message(self,
conv_id, user_id, event):
"""Handle a message sent to a conversation."""
user = self._user_list.get_user(user_id)
if user.is_self:
return
message = event.text
_LOGGER.debug("Handling message '%s' from %s",
message, user.full_name)
intents = self._conversation_intents.get(conv_id)
if intents is not None:
is_error = False
try:
intent_result = await self._async_process(intents, message,
conv_id)
except (intent.UnknownIntent, intent.IntentHandleError) as err:
is_error = True
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
is_error = True
intent_result = intent.IntentResponse()
intent_result.async_set_speech(
"Sorry, I didn't understand that")
message = intent_result.as_dict().get('speech', {})\
.get('plain', {}).get('speech')
if (message is not None) and not (
is_error and conv_id in self._error_suppressed_conv_ids):
await self._async_send_message(
[{'text': message, 'parse_str': True}],
[{CONF_CONVERSATION_ID: conv_id}])
async def _async_process(self, intents, text, conv_id):
"""Detect a matching intent."""
for intent_type, data in intents.items():
for matcher in data.get(CONF_MATCHERS, []):
match = matcher.match(text)
if not match:
continue
if intent_type == INTENT_HELP:
return await self.hass.helpers.intent.async_handle(
DOMAIN, intent_type,
{'conv_id': {'value': conv_id}}, text)
return await self.hass.helpers.intent.async_handle(
DOMAIN, intent_type,
{key: {'value': value}
for key, value in match.groupdict().items()}, text)
async def async_connect(self):
"""Login to the Google Hangouts."""
from .hangups_utils import HangoutsRefreshToken, HangoutsCredentials
from hangups import Client
from hangups import get_auth
session = await self.hass.async_add_executor_job(
get_auth, HangoutsCredentials(None, None, None),
HangoutsRefreshToken(self._refresh_token))
self._client = Client(session)
self._client.on_connect.add_observer(self._on_connect)
self._client.on_disconnect.add_observer(self._on_disconnect)
self.hass.loop.create_task(self._client.connect())
def _on_connect(self):
_LOGGER.debug('Connected!')
self._connected = True
dispatcher.async_dispatcher_send(self.hass, EVENT_HANGOUTS_CONNECTED)
def _on_disconnect(self):
"""Handle disconnecting."""
_LOGGER.debug('Connection lost!')
self._connected = False
dispatcher.async_dispatcher_send(self.hass,
EVENT_HANGOUTS_DISCONNECTED)
async def async_disconnect(self):
"""Disconnect the client if it is connected."""
if self._connected:
await self._client.disconnect()
async def async_handle_hass_stop(self, _):
"""Run once when Home Assistant stops."""
await self.async_disconnect()
async def _async_send_message(self, message, targets):
conversations = []
for target in targets:
conversation = None
if CONF_CONVERSATION_ID in target:
conversation = self._conversation_list.get(
target[CONF_CONVERSATION_ID])
elif CONF_CONVERSATION_NAME in target:
conversation = self._resolve_conversation_name(
target[CONF_CONVERSATION_NAME])
if conversation is not None:
conversations.append(conversation)
if not conversations:
return False
from hangups import ChatMessageSegment, hangouts_pb2
messages = []
for segment in message:
if messages:
messages.append(ChatMessageSegment('',
segment_type=hangouts_pb2.
SEGMENT_TYPE_LINE_BREAK))
if 'parse_str' in segment and segment['parse_str']:
messages.extend(ChatMessageSegment.from_str(segment['text']))
else:
if 'parse_str' in segment:
del segment['parse_str']
messages.append(ChatMessageSegment(**segment))
if not messages:
return False
for conv in conversations:
await conv.send_message(messages)
async def _async_list_conversations(self):
import hangups
self._user_list, self._conversation_list = \
(await hangups.build_user_conversation_list(self._client))
conversations = {}
for i, conv in enumerate(self._conversation_list.get_all()):
users_in_conversation = []
for user in conv.users:
users_in_conversation.append(user.full_name)
conversations[str(i)] = {CONF_CONVERSATION_ID: str(conv.id_),
CONF_CONVERSATION_NAME: conv.name,
'users': users_in_conversation}
self.hass.states.async_set("{}.conversations".format(DOMAIN),
len(self._conversation_list.get_all()),
attributes=conversations)
dispatcher.async_dispatcher_send(self.hass,
EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
conversations)
async def async_handle_send_message(self, service):
"""Handle the send_message service."""
await self._async_send_message(service.data[ATTR_MESSAGE],
service.data[ATTR_TARGET])
async def async_handle_update_users_and_conversations(self, _=None):
"""Handle the update_users_and_conversations service."""
await self._async_list_conversations()
def get_intents(self, conv_id):
"""Return the intents for a specific conversation."""
return self._conversation_intents.get(conv_id)
|
{
"content_hash": "80abeed190efcda3e66fd48026c558b7",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 79,
"avg_line_length": 40.8014705882353,
"alnum_prop": 0.5633447468012255,
"repo_name": "persandstrom/home-assistant",
"id": "7edc8898c8cf813c37c8b4bb189b8668729c91da",
"size": "11098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/hangouts/hangouts_bot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
import asyncio
import json
import random
from ctypes import cdll
from time import sleep
import platform
import logging
from demo_utils import file_ext
from vcx.api.connection import Connection
from vcx.api.credential_def import CredentialDef
from vcx.api.issuer_credential import IssuerCredential
from vcx.api.proof import Proof
from vcx.api.schema import Schema
from vcx.api.utils import vcx_agent_provision
from vcx.api.vcx_init import vcx_init_with_config
from vcx.state import State, ProofState
# logging.basicConfig(level=logging.DEBUG) uncomment to get logs
# 'agency_url': URL of the agency
# 'agency_did': public DID of the agency
# 'agency_verkey': public verkey of the agency
# 'wallet_name': name for newly created encrypted wallet
# 'wallet_key': encryption key for encoding wallet
# 'payment_method': method that will be used for payments
provisionConfig = {
'agency_url':'http://localhost:8080',
'agency_did':'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey':'Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR',
'wallet_name':'faber_wallet',
'wallet_key':'123',
'payment_method': 'null',
'enterprise_seed':'000000000000000000000000Trustee1'
}
async def main():
payment_plugin = cdll.LoadLibrary('libnullpay' + file_ext())
payment_plugin.nullpay_init()
print("#1 Provision an agent and wallet, get back configuration details")
config = await vcx_agent_provision(json.dumps(provisionConfig))
config = json.loads(config)
# Set some additional configuration options specific to faber
config['institution_name'] = 'Faber'
config['institution_logo_url'] = 'http://robohash.org/234'
config['genesis_path'] = 'docker.txn'
print("#2 Initialize libvcx with new configuration")
await vcx_init_with_config(json.dumps(config))
print("#3 Create a new schema on the ledger")
version = format("%d.%d.%d" % (random.randint(1, 101), random.randint(1, 101), random.randint(1, 101)))
schema = await Schema.create('schema_uuid', 'degree schema', version, ['name', 'date', 'degree'], 0)
schema_id = await schema.get_schema_id()
print("#4 Create a new credential definition on the ledger")
cred_def = await CredentialDef.create('credef_uuid', 'degree', schema_id, 0)
cred_def_handle = cred_def.handle
cred_def_id = await cred_def.get_cred_def_id()
print("#5 Create a connection to alice and print out the invite details")
connection_to_alice = await Connection.create('alice')
await connection_to_alice.connect('{"use_public_did": true}')
await connection_to_alice.update_state()
details = await connection_to_alice.invite_details(False)
print("**invite details**")
print(json.dumps(details))
print("******************")
print("#6 Poll agency and wait for alice to accept the invitation (start alice.py now)")
connection_state = await connection_to_alice.get_state()
while connection_state != State.Accepted:
sleep(2)
await connection_to_alice.update_state()
connection_state = await connection_to_alice.get_state()
schema_attrs = {
'name': 'alice',
'date': '05-2018',
'degree': 'maths',
}
print("#12 Create an IssuerCredential object using the schema and credential definition")
credential = await IssuerCredential.create('alice_degree', schema_attrs, cred_def_handle, 'cred', '0')
print("#13 Issue credential offer to alice")
await credential.send_offer(connection_to_alice)
await credential.update_state()
print("#14 Poll agency and wait for alice to send a credential request")
credential_state = await credential.get_state()
while credential_state != State.RequestReceived:
sleep(2)
await credential.update_state()
credential_state = await credential.get_state()
print("#17 Issue credential to alice")
await credential.send_credential(connection_to_alice)
print("#18 Wait for alice to accept credential")
await credential.update_state()
credential_state = await credential.get_state()
while credential_state != State.Accepted:
sleep(2)
await credential.update_state()
credential_state = await credential.get_state()
proof_attrs = [
{'name': 'name', 'restrictions': [{'issuer_did': config['institution_did']}]},
{'name': 'date', 'restrictions': [{'issuer_did': config['institution_did']}]},
{'name': 'degree', 'restrictions': [{'issuer_did': config['institution_did']}]}
]
print("#19 Create a Proof object")
proof = await Proof.create('proof_uuid', 'proof_from_alice', proof_attrs, {})
print("#20 Request proof of degree from alice")
await proof.request_proof(connection_to_alice)
print("#21 Poll agency and wait for alice to provide proof")
proof_state = await proof.get_state()
while proof_state != State.Accepted:
sleep(2)
await proof.update_state()
proof_state = await proof.get_state()
print("#27 Process the proof provided by alice")
await proof.get_proof(connection_to_alice)
print("#28 Check if proof is valid")
if proof.proof_state == ProofState.Verified:
print("proof is verified!!")
else:
print("could not verify proof :(")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
{
"content_hash": "a29a6d1ec3b2cef7a512ac71fe1a17b0",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 107,
"avg_line_length": 37.49650349650349,
"alnum_prop": 0.6876165609847072,
"repo_name": "anastasia-tarasova/indy-sdk",
"id": "fad8209e59d18a3f051be4ea7447411adf5350ff",
"size": "5362",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vcx/wrappers/python3/demo/faber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "207870"
},
{
"name": "C#",
"bytes": "842011"
},
{
"name": "C++",
"bytes": "229233"
},
{
"name": "CSS",
"bytes": "137079"
},
{
"name": "Dockerfile",
"bytes": "23945"
},
{
"name": "Groovy",
"bytes": "102863"
},
{
"name": "HTML",
"bytes": "897750"
},
{
"name": "Java",
"bytes": "882162"
},
{
"name": "JavaScript",
"bytes": "185247"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Objective-C",
"bytes": "584121"
},
{
"name": "Objective-C++",
"bytes": "706749"
},
{
"name": "Perl",
"bytes": "8271"
},
{
"name": "Python",
"bytes": "750776"
},
{
"name": "Ruby",
"bytes": "80525"
},
{
"name": "Rust",
"bytes": "5872898"
},
{
"name": "Shell",
"bytes": "251160"
},
{
"name": "Swift",
"bytes": "1114"
},
{
"name": "TypeScript",
"bytes": "197439"
}
],
"symlink_target": ""
}
|
"""
With these settings, tests run faster.
"""
from .base import * # noqa
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = "!!!SET DJANGO_SECRET_KEY!!!"
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "",}}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
["django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader",],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
|
{
"content_hash": "0f1393aba045f3dfca400739dd0b8bec",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 105,
"avg_line_length": 38.891891891891895,
"alnum_prop": 0.5163307852675469,
"repo_name": "rohithpr/bookmark-manager",
"id": "ef7e68cec1143e3466c5c19d42fd69f6e32cec69",
"size": "1439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homepage/settings/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "33136"
},
{
"name": "Python",
"bytes": "36657"
}
],
"symlink_target": ""
}
|
from .Error import *
|
{
"content_hash": "0917c8887a927d812c9bf507bc0563b7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.7142857142857143,
"repo_name": "dhxkgozj/DirEngine",
"id": "22380e80915bb73491f7b2cdd5e6d15205d3015e",
"size": "21",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DirEngine/error/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "2243"
},
{
"name": "C",
"bytes": "14072157"
},
{
"name": "C#",
"bytes": "552697"
},
{
"name": "C++",
"bytes": "7111622"
},
{
"name": "CMake",
"bytes": "12244"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Java",
"bytes": "394503"
},
{
"name": "Makefile",
"bytes": "37485"
},
{
"name": "OCaml",
"bytes": "228692"
},
{
"name": "Objective-C",
"bytes": "52968"
},
{
"name": "POV-Ray SDL",
"bytes": "8872570"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "1352255"
},
{
"name": "Ruby",
"bytes": "973"
},
{
"name": "Shell",
"bytes": "8201"
},
{
"name": "Smalltalk",
"bytes": "302401"
},
{
"name": "Tcl",
"bytes": "1339"
}
],
"symlink_target": ""
}
|
"""
Test byteflow.py specific issues
"""
import unittest
from numba.tests.support import TestCase
from numba.core.compiler import run_frontend
class TestByteFlowIssues(TestCase):
def test_issue_5087(self):
# This is an odd issue. The exact number of print below is
# necessary to trigger it. Too many or too few will alter the behavior.
# Also note that the function below will not be executed. The problem
# occurs at compilation. The definition below is invalid for execution.
# The problem occurs in the bytecode analysis.
def udt():
print
print
print
for i in range:
print
print
print
print
print
print
print
print
print
print
print
print
print
print
print
print
print
print
for j in range:
print
print
print
print
print
print
print
for k in range:
for l in range:
print
print
print
print
print
print
print
print
print
print
if print:
for n in range:
print
else:
print
run_frontend(udt)
def test_issue_5097(self):
# Inspired by https://github.com/numba/numba/issues/5097
def udt():
for i in range(0):
if i > 0:
pass
a = None # noqa: F841
run_frontend(udt)
def test_issue_5680(self):
# From https://github.com/numba/numba/issues/5680#issuecomment-625351336
def udt():
for k in range(0):
if 1 == 1:
...
if 'a' == 'a':
...
run_frontend(udt)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "af4006efcb0453460311dbce00907728",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 25.80851063829787,
"alnum_prop": 0.39530090684253916,
"repo_name": "numba/numba",
"id": "041e1d65b20f7fcdc49857903748727cecc29a16",
"size": "2426",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "numba/tests/test_byteflow.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3522"
},
{
"name": "C",
"bytes": "574888"
},
{
"name": "C++",
"bytes": "166526"
},
{
"name": "Cuda",
"bytes": "2063"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "9400448"
},
{
"name": "Shell",
"bytes": "13621"
}
],
"symlink_target": ""
}
|
from yaml import load
from logging import config
import os
def setup_logger(settings):
"""
Settings object
:param settings: Settings
:type settings: dict
:return:
"""
with open(os.path.join("logging.yaml")) as logging_config:
log_config = load(logging_config)
configure_logs(settings, log_config)
def configure_logs(settings, cfg):
"""
Configure logs
:param settings: Settings
:type settings: dict
:param cfg: Logging config
:type cfg: dict
:return:
"""
cfg = cfg.copy()
log_path = os.path.join(settings['log_path'])
if not os.path.isdir(log_path):
os.mkdir(log_path)
try:
handlers = cfg['handlers']
except KeyError:
pass
else:
for handler in handlers.values():
try:
filename = handler['filename']
except KeyError:
pass
else:
handler['filename'] = os.path.join(log_path, filename)
config.dictConfig(cfg)
|
{
"content_hash": "05fc1ab752a715fde875793e80c0a076",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 20.62,
"alnum_prop": 0.5809893307468477,
"repo_name": "kollad/turbo-ninja",
"id": "79886baeaa3843cc9fd2360f03b754462d9668ce",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "413"
},
{
"name": "Python",
"bytes": "170897"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask.ext.login import LoginManager
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.admin import Admin
from config import config
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'manager.login'
admin = Admin()
class ReBlogerException(Exception):
code = 0
def to_json(self):
return dict(code=self.code, msg=self.msg)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
login_manager.init_app(app)
admin.init_app(app)
from general import general as general_blueprint
from manager import manager as manager_blueprint
app.register_blueprint(general_blueprint)
app.register_blueprint(manager_blueprint, url_prefix='/manager')
return app
|
{
"content_hash": "4b09e5ca71a79a75788b9253f834cb9f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 24.8,
"alnum_prop": 0.7338709677419355,
"repo_name": "realityone/ReBlogger",
"id": "cf91f66ab3d836e1c70c58990c749219c6fbf17a",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ReBloger/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108316"
},
{
"name": "HTML",
"bytes": "32691"
},
{
"name": "JavaScript",
"bytes": "1260"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "23790"
}
],
"symlink_target": ""
}
|
import argparse
import configparser
import os
import re
import stat
class TemplateFragment:
def __init__(self, tpl_global, context, verbatim, expr):
self.context = context
self.verbatim = verbatim
self.expr = expr
self.tpl_global = tpl_global
self.subst = ""
def __append_subst(self, subst):
self.subst += "%s\n" % subst
def value_of(self, k):
value = self.context.get(k)
if value:
self.__append_subst(value)
def on_value(self, k, v, ontrue, onfalse):
value = self.context.get(k.lower())
if value and value == v:
self.__append_subst(ontrue)
else:
self.__append_subst(onfalse)
def on_set(self, k, ontrue, onfalse):
if self.context.get(k.lower()):
self.__append_subst(ontrue)
else:
self.__append_subst(onfalse)
def println(self, ln):
self.__append_subst(ln)
def include(self, template):
dir_path = os.path.dirname(self.tpl_global["root_tpl"])
path = os.path.join(dir_path, template)
try:
f = open(path)
except:
print("Could not open include file: %s" % path)
return
content = run_template(f.read(), self.tpl_global, self.context, self)
self.__append_subst(content)
def parse_template(raw, tpl_global, context):
result = []
curr = prev = ""
expr = False
for ch in raw:
if ch == "{" and prev == "{":
if curr:
fragment = TemplateFragment(tpl_global, context, curr, expr)
result.append(fragment)
curr = ""
expr = True
elif ch == "}" and prev == "}":
if curr:
fragment = TemplateFragment(tpl_global, context, curr[:len(curr) - 1], expr)
result.append(fragment)
curr = ""
expr = False
else:
curr += ch
prev = ch
if curr:
fragment = TemplateFragment(tpl_global, context, curr, expr)
result.append(fragment)
return result
def load_context(files):
result = {}
for f in files:
lines = f.read()
content = "[context]\n%s" % lines
handle = configparser.ConfigParser(delimiters=('=','?=',':='))
handle.read_string(content)
dc = handle["context"]
result = dict(list(result.items()) + list(dc.items()))
# also consider env vars in the context
for k,v in os.environ.items():
result[k.lower()] = v
return result
def try_subst(verbatim):
p = re.compile("^\@.*\@$")
m = p.match(verbatim)
if not m:
return None
return verbatim.replace("@","")
def run_template(raw, tpl_global, context, nested=None):
fragments = parse_template(raw, tpl_global, context)
for frag in fragments:
if frag.expr:
subst = try_subst(frag.verbatim)
if subst:
subst = context.get(subst.lower(), "")
subst = subst.replace("\"","")
frag.subst = subst
else:
if nested:
tpl_global["st"] = nested
else:
tpl_global["st"] = frag
tpl_global["context"] = context
exec(frag.verbatim, tpl_global)
raw = raw.replace("{{%s}}" % frag.verbatim, frag.subst)
return raw
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--context-files",
help=("The context files path. A context file"
"is a file containing key=value pairs, like"
"the kconfig's .config file"),
type=argparse.FileType("r"), nargs="+",
required=True)
parser.add_argument("--template", help="The template file path",
type=argparse.FileType("r"), required=True)
parser.add_argument("--output", help="The template file path",
type=argparse.FileType("w"), required=True)
args = parser.parse_args()
tpl_global = {"root_tpl": os.path.realpath(args.template.name)}
context = load_context(args.context_files)
output = run_template(args.template.read(), tpl_global, context)
args.output.write(output)
st = os.fstat(args.template.fileno())
os.fchmod(args.output.fileno(), st.st_mode)
|
{
"content_hash": "4a02167c2847b7954fdc0b9b37284e21",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 92,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.5368632707774799,
"repo_name": "edersondisouza/soletta",
"id": "ea26bd5e86bf738f06469e1e474cfacf01d57e97",
"size": "5158",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "data/scripts/template.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3347"
},
{
"name": "C",
"bytes": "5917647"
},
{
"name": "C++",
"bytes": "176079"
},
{
"name": "CSS",
"bytes": "3953"
},
{
"name": "HTML",
"bytes": "1623"
},
{
"name": "JavaScript",
"bytes": "120410"
},
{
"name": "Makefile",
"bytes": "66060"
},
{
"name": "NSIS",
"bytes": "1390"
},
{
"name": "Objective-C",
"bytes": "959"
},
{
"name": "Python",
"bytes": "237605"
},
{
"name": "Shell",
"bytes": "8015"
},
{
"name": "Smarty",
"bytes": "1160"
},
{
"name": "VimL",
"bytes": "748"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.mark.plugins
def test_plugin_installed(Command):
assert 'cloud-aws' in Command.check_output(
'sudo /usr/share/elasticsearch/bin/plugin list')
@pytest.mark.plugins
def test_plugin_config(File):
assert File('/etc/elasticsearch/elasticsearch.yml').contains('region')
|
{
"content_hash": "775db965a17c8e3f7e17fbf5b45f69e3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 25.5,
"alnum_prop": 0.738562091503268,
"repo_name": "mitodl/elasticsearch-formula",
"id": "04f45292f2591a7bf25136c315aa541c2eec5ea5",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_elasticsearch_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4109"
},
{
"name": "Python",
"bytes": "4382"
},
{
"name": "SaltStack",
"bytes": "21724"
},
{
"name": "Shell",
"bytes": "2247"
}
],
"symlink_target": ""
}
|
import requests
BASE_URL = "http://fizzbuzzaas.herokuapp.com"
def fizzbuzz(params):
url = BASE_URL + "/fizzbuzz"
response = requests.get(url, params=params)
response_json = response.json()
return response_json["properties"]["value"]
for number in range(1, 101):
print fizzbuzz({"number": number })
|
{
"content_hash": "b016c9d3455970e6398c7e42f931c639",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 26.75,
"alnum_prop": 0.6822429906542056,
"repo_name": "smizell/fizzbuzz-hypermedia-client",
"id": "2d9af54eb52467cb3da534d5cf054056901010ad",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fizzbuzz-non-hypermedia1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5694"
}
],
"symlink_target": ""
}
|
import json
import django_filters
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from rest_framework import status
from rest_framework import viewsets, mixins
from rest_framework import filters
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from api.authentication import CsrfExemptSessionAuthentication, BasicAuthentication
from api.pagination import LargeResultsSetPagination
from api.serializers import RegisterSerializer
from api.models import BannedDomain
class RegisterViewSet(mixins.UpdateModelMixin, viewsets.GenericViewSet):
"""
A viewset for viewing and editing user instances.
"""
serializer_class = RegisterSerializer
queryset = User.objects.all()
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
@detail_route(methods=['post',])
def registration(self, request, pk=None):
"""
Function will fetch the login credentials and either log the user in
or return error. To call this function, use this URL:
--------------------------------------------
/api/registers/0/registration/?format=json
--------------------------------------------
"""
# ALGORITHM: Attempt to decode the data the way iOS encoded it
# else we have to just attempt to decode the data raw.
try:
for data in request.data:
json_arr = json.loads( data )
serializer = RegisterSerializer(data=json_arr) # iOS Version
except Exception as e:
serializer = RegisterSerializer(data=request.data) # Mobile Version
if serializer.is_valid():
username = serializer.data['username'].lower()
email = serializer.data['email']
password = serializer.data['password']
first_name = serializer.data['first_name']
last_name = serializer.data['last_name']
# Validate to ensure the user is not using an email which is banned in
# our system for whatever reason.
banned_domains = BannedDomain.objects.all()
for banned_domain in banned_domains:
if email.count(banned_domain.name) > 0:
return Response({
'status' : 'failure',
'errors' : {'email':'this emal domain is not accepted by our system'}
})
# Validate to ensure the email has not been taken by another user.
try:
user = User.objects.get(email=email)
return Response({
'status': 'failed',
'errors': {'email':'has already been taken by another user.'}
})
except User.DoesNotExist:
pass
# Create our new user
try:
user = User.objects.create_user(
username,
email,
password,
)
user.first_name = serializer.data['first_name']
user.last_name = serializer.data['last_name']
# user.is_active = False; # Need email verification to change status.
user.save()
return Response({ # Return success message.
'status': 'success',
'errors': [],
'user_id': user.id,
})
except Exception as e:
return Response({
'status' : 'failure',
'errors' : {'Unknown':'An unknown error occured, failed registering user.'}
})
else:
return Response({
'status': 'failed',
'errors': str(serializer.errors),
})
|
{
"content_hash": "a292951468ed8eb9740db92bfb1b682e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 95,
"avg_line_length": 42.09473684210526,
"alnum_prop": 0.5591397849462365,
"repo_name": "Oinweb/fly-django",
"id": "3158e8ea4aea31c31b7467f2a8002d610996a6c1",
"size": "3999",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "api/views/register.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "188049"
},
{
"name": "HTML",
"bytes": "334706"
},
{
"name": "JavaScript",
"bytes": "136630"
},
{
"name": "Python",
"bytes": "218526"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
"""reST directive for syntax-highlighting ipython interactive sessions.
XXX - See what improvements can be made based on the new (as of Sept 2009)
'pycon' lexer for the python console. At the very least it will give better
highlighted tracebacks.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
import matplotlib
matplotlib.cbook.warn_deprecated("1.4", """
The Sphinx extension ipython_console_highlighting has moved from
matplotlib to IPython, and its use in matplotlib is deprecated.
Change your import from 'matplotlib.sphinxext.ipython_directive' to
'IPython.sphinxext.ipython_directive.""")
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
|
{
"content_hash": "24bba1e337b8e5e8ebe9fe33b0164114",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 87,
"avg_line_length": 36.808,
"alnum_prop": 0.5431427950445555,
"repo_name": "daodaoliang/neural-network-animation",
"id": "0ba9cab40307bc8b04b413d68a95cf80666520b9",
"size": "4601",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "matplotlib/sphinxext/ipython_console_highlighting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "HTML",
"bytes": "4000"
},
{
"name": "JavaScript",
"bytes": "24260"
},
{
"name": "Python",
"bytes": "4443606"
}
],
"symlink_target": ""
}
|
"""Add node with given data to linked list at a desired position."""
class Node(object):
"""Implement a node class."""
def __init__(self, data, next_node=None):
"""Give node data and next attributes on initialization."""
self.data = data
self.next = next_node
class LinkedList(object):
"""Building LinkedList class."""
def __init__(self, data=None):
"""Init singly linked list, set head to None and iterate through data
if provided as an argument."""
self.head = None
if data is not None:
try:
for item in data:
self.push(item)
except TypeError:
raise TypeError('Please enter an object that is iterable.')
def push(self, data):
"""Insert data at the head of the list."""
new_node = Node(data)
new_node.next_node = self.head
self.head = new_node
def insert_nth(head, data, position):
"""Insert a node with data at a given position, return head."""
new_node = Node(data)
if head is None or position == 0:
new_node.next = head
head = new_node
else:
counter = 0
curr = head
while counter != position - 1:
if curr.next:
curr = curr.next
counter += 1
new_node.next = curr.next
curr.next = new_node
return head
|
{
"content_hash": "5bca26ab3ce0ca223cbec5a9950a2319",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 29.4375,
"alnum_prop": 0.5576786978060864,
"repo_name": "vbenavente/coffee_katas",
"id": "e63343e98d6aff4310f50a3a205c5f8e5d31df4b",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/insertnth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4213"
}
],
"symlink_target": ""
}
|
"""
This file contains filters for the extracted data, mainly images.
"""
from __future__ import division
import logging
logger = logging.getLogger(__name__)
import config, request
from contextlib import closing
from pkg_resources import resource_filename
import re, PIL
from cStringIO import StringIO
clsn = lambda e: e.__class__.__name__
class Image(object):
"Used by the filter classes in this module."
def __init__(self, url, size=None, format=None): # raw_image=None
self.url = url
self.size = size
self.format = format
# self.raw = raw_image # for MonoImageFilter
def __repr__(self):
return self.url # this is important
class AdblockURLFilterMeta(type):
"""
Lazy loading Adblock rules.
First try to download easylist.txt, or load file from package.
The same for extralist.txt.
"""
def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules
def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules
@property
def rules(cls):
if getattr(cls, '_rules', None) is None:
rules = cls.get_all_rules()
cls._rules = rules
return cls._rules
class AdblockURLFilter(object): # Filter
"""
Uses adblockparser (https://github.com/scrapinghub/adblockparser) and
returns `None` if it `should_block` the URL.
Hats off to Mikhail Korobov (https://github.com/kmike) for the awesome work.
It gives a lot of value to this mashup repo.
"""
__metaclass__ = AdblockURLFilterMeta
def __call__(self, url):
if AdblockURLFilter.rules.should_block(url):
logger.debug("Bad image (%s): %s", clsn(self), url)
return None
return url
class NoImageFilter(object): # AdblockURLFilter
"""
Retrieves actual image file, and returns `None` if it fails.
Otherwise it returns an instance of the `filters.Image` class containing
the URL, together with the size and format of the actual image.
Basically it hydrates this instance which is passed to following filters.
Worth mentioning again that it only gets first few chunks of the image file
until the PIL parser gets the size and format of the image.
"""
class MaxBytesException(Exception):
pass
class ZeroBytesException(Exception):
pass
class NoImageException(Exception):
pass
@classmethod
def get_image(cls, url):
"""
Returned Image instance has response url.
This might be different than the url param because of redirects.
"""
from PIL.ImageFile import Parser as PILParser
length = 0
raw_image = None
with closing(request.get(url, stream=True)) as response:
response.raise_for_status()
response_url = response.url
parser = PILParser()
for chunk in response.iter_content(config.CHUNK_SIZE):
length += len(chunk)
if length > config.IMAGE_MAX_BYTESIZE:
del parser
raise cls.MaxBytesException
parser.feed(chunk)
# comment this to get the whole file
if parser.image and parser.image.size:
raw_image = parser.image
del parser # free some memory
break
# or this to get just the size and format
# raw_image = parser.close()
if length == 0:
raise cls.ZeroBytesException
if not raw_image:
raise cls.NoImageException
image = Image(response_url, raw_image.size, raw_image.format)
return image
def __call__(self, url):
# url = super(NoImageFilter, self).__call__(url)
try:
image = NoImageFilter.get_image(url)
return image
except Exception, e:
if url.startswith('data'): # data URI
url = url[:url.find(';')]
logger.debug("Bad image (%s): %s", clsn(e), url)
pass
return None
class SizeImageFilter(object): # NoImageFilter
"""
Checks the `filters.Image` instance to have proper size.
This can raise following exceptions based on defined limits:
`TinyImageException`, `HugeImageException`, or `RatioImageException`.
If any of these happens it returns `None`.
"""
class TinyImageException(Exception):
pass
class HugeImageException(Exception):
pass
class RatioImageException(Exception):
pass
@classmethod
def check_size(cls, image):
if image.size[0] < config.IMAGE_MIN_IMGSIZE[0] or \
image.size[1] < config.IMAGE_MIN_IMGSIZE[1]:
raise cls.TinyImageException
if image.size[0] > config.IMAGE_MAX_IMGSIZE[0] or \
image.size[1] > config.IMAGE_MAX_IMGSIZE[1]:
raise cls.HugeImageException
ratio = image.size[0] / image.size[1]
if ratio < 1:
ratio = 1 / ratio
if ratio > config.IMAGE_LIMIT_RATIO:
raise cls.RatioImageException
def __call__(self, image):
# image = super(SizeImageFilter, self).__call__(image)
try:
SizeImageFilter.check_size(image)
return image
except Exception, e:
logger.debug("Bad image (%s): %s", clsn(e), image.url)
pass
return None
class MonoImageFilterMeta(type):
"Lazy load regex (former IMAGE_MONO_REGEX)."
@property
def regex(cls):
if getattr(cls, '_regex', None) is None:
regex = re.compile(config.IMAGE_MONO_RULE, re.IGNORECASE) # improve this
cls._regex = regex
return cls._regex
class MonoImageFilter(object): # SizeImageFilter
"""
Checks whether the image is plain white and returns `None`.
This filter retrieves the whole image file so it has an extra regex check
before. E.g.: rules out these URLs:
- http://wordpress.com/i/blank.jpg?m=1383295312g
- http://images.inc.com/leftnavmenu/inc-logo-white.png
"""
__metaclass__ = MonoImageFilterMeta
class MonoImageException(Exception):
pass
@classmethod
def check_color(cls, raw_image):
"""
Just check if raw_image is completely white.
http://stackoverflow.com/questions/14041562/python-pil-detect-if-an-image-is-completely-black-or-white
"""
# sum(img.convert("L").getextrema()) in (0, 2)
extrema = raw_image.convert("L").getextrema()
if extrema == (255, 255): # all white
raise cls.MonoImageException
def __call__(self, image):
# image = super(MonoImageFilter, self).__call__(image)
try:
if MonoImageFilter.regex.search(image.url):
content = request.get(image.url).content
pic = StringIO(content)
raw_image = PIL.Image.open(pic)
MonoImageFilter.check_color(raw_image)
del raw_image # more cleaning maybe
logger.debug("Good image (%s): %s", clsn(self), image.url)
return image
except Exception, e:
logger.debug("Bad image (%s): %s", clsn(e), image.url)
pass
return None
class FormatImageFilter(object): # MonoImageFilter
"""
Rules out animated gif images for the moment.
This can be extended to exclude other image formats based on file contents.
"""
class AnimatedImageException(Exception):
pass
@classmethod
def check_animated(cls, raw_image):
"Checks whether the gif is animated."
try:
raw_image.seek(1)
except EOFError:
isanimated= False
else:
isanimated= True
raise cls.AnimatedImageException
def __call__(self, image):
# image = super(FormatImageFilter, self).__call__(image)
try:
if image.format.lower() == "gif":
content = request.get(image.url).content
pic = StringIO(content)
raw_image = PIL.Image.open(pic)
FormatImageFilter.check_animated(raw_image)
del raw_image
logger.debug("Good image (%s): %s", clsn(self), image.url)
return image
except Exception, e:
logger.debug("Bad image (%s): %s", clsn(e), image.url)
pass
return None
|
{
"content_hash": "5868fc5adcc331087cbf9774c14aa2d3",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 110,
"avg_line_length": 35.01766784452297,
"alnum_prop": 0.5710393541876893,
"repo_name": "svven/summary",
"id": "18220df907bb4f14cfd63b51c8617b2e6115d228",
"size": "9910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summary/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2576"
},
{
"name": "HTML",
"bytes": "14954"
},
{
"name": "JavaScript",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "43535"
}
],
"symlink_target": ""
}
|
from pyqtgraph.Qt import QtCore, QtGui
if not hasattr(QtCore, 'Signal'):
QtCore.Signal = QtCore.pyqtSignal
import weakref
class CanvasManager(QtCore.QObject):
SINGLETON = None
sigCanvasListChanged = QtCore.Signal()
def __init__(self):
if CanvasManager.SINGLETON is not None:
raise Exception("Can only create one canvas manager.")
CanvasManager.SINGLETON = self
QtCore.QObject.__init__(self)
self.canvases = weakref.WeakValueDictionary()
@classmethod
def instance(cls):
return CanvasManager.SINGLETON
def registerCanvas(self, canvas, name):
n2 = name
i = 0
while n2 in self.canvases:
n2 = "%s_%03d" % (name, i)
i += 1
self.canvases[n2] = canvas
self.sigCanvasListChanged.emit()
return n2
def unregisterCanvas(self, name):
c = self.canvases[name]
del self.canvases[name]
self.sigCanvasListChanged.emit()
def listCanvases(self):
return list(self.canvases.keys())
def getCanvas(self, name):
return self.canvases[name]
manager = CanvasManager()
class CanvasCombo(QtGui.QComboBox):
def __init__(self, parent=None):
QtGui.QComboBox.__init__(self, parent)
man = CanvasManager.instance()
man.sigCanvasListChanged.connect(self.updateCanvasList)
self.hostName = None
self.updateCanvasList()
def updateCanvasList(self):
canvases = CanvasManager.instance().listCanvases()
canvases.insert(0, "")
if self.hostName in canvases:
canvases.remove(self.hostName)
sel = self.currentText()
if sel in canvases:
self.blockSignals(True) ## change does not affect current selection; block signals during update
self.clear()
for i in canvases:
self.addItem(i)
if i == sel:
self.setCurrentIndex(self.count())
self.blockSignals(False)
def setHostName(self, name):
self.hostName = name
self.updateCanvasList()
|
{
"content_hash": "14935c84677de402112c2e037213dabf",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 109,
"avg_line_length": 29.2,
"alnum_prop": 0.5949771689497717,
"repo_name": "ZhuangER/robot_path_planning",
"id": "e89ec00f9bb8262bb511d68d3ab74c303eb38171",
"size": "2214",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gui/pyqtgraph/canvas/CanvasManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1666"
},
{
"name": "Python",
"bytes": "2095512"
}
],
"symlink_target": ""
}
|
from widgets import ImageWithHighlightWidget
from django import forms
"""
Code largely inspired by: http://blog.elsdoerfer.name/2008/01/08/fuzzydates-or-one-django-model-field-multiple-database-columns/
"""
class ImageWithHighlightFormField(forms.ImageField):
"""
A form field to do the image with highlight thing
"""
widget = ImageWithHighlightWidget
|
{
"content_hash": "5ea789a92b3db6da802363c6b2bd7475",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 128,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.7654986522911051,
"repo_name": "gregplaysguitar/glamkit",
"id": "2568a4c521b62ad5f08c15941a785a7b3a6bf6fd",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glamkit/incubated/imageutil/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "25519"
},
{
"name": "Python",
"bytes": "111853"
}
],
"symlink_target": ""
}
|
"""
tcp_message Inline Script Hook API Demonstration
------------------------------------------------
* modifies packets containing "foo" to "bar"
* prints various details for each packet.
example cmdline invocation:
mitmdump --rawtcp --tcp-host ".*" -s examples/complex/tcp_message.py
"""
from mitmproxy.utils import strutils
from mitmproxy import ctx
from mitmproxy import tcp
def tcp_message(flow: tcp.TCPFlow):
message = flow.messages[-1]
old_content = message.content
message.content = old_content.replace(b"foo", b"bar")
ctx.log.info(
"[tcp_message{}] from {} to {}:\n{}".format(
" (modified)" if message.content != old_content else "",
"client" if message.from_client else "server",
"server" if message.from_client else "client",
strutils.bytes_to_escaped_str(message.content))
)
|
{
"content_hash": "bef37548c19eca77b65fcb27866fc485",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 32.148148148148145,
"alnum_prop": 0.630184331797235,
"repo_name": "ujjwal96/mitmproxy",
"id": "b1311d08ef54f651d8ccb73e1a63e7ab49ee598f",
"size": "868",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/complex/tcp_message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20955"
},
{
"name": "Dockerfile",
"bytes": "984"
},
{
"name": "HTML",
"bytes": "14747"
},
{
"name": "JavaScript",
"bytes": "276327"
},
{
"name": "PowerShell",
"bytes": "495"
},
{
"name": "Python",
"bytes": "1780651"
},
{
"name": "Shell",
"bytes": "4711"
}
],
"symlink_target": ""
}
|
import sys
import signal
import time
from threading import Thread
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class Handler(BaseHTTPRequestHandler):
#
# POST request handler
#
def do_POST(self):
# Display headers
print "POST request received headers:"
print self.headers
# Did we get content-length? That means we have data to upload
if ('Content-Length' in self.headers):
# Get content length
length = int(self.headers['Content-Length'])
# Read upload data and store in content
print "START read of:",
print length,
print " bytes."
content = self.rfile.read(length)
print "END read."
# Send response back to requester, we build response string
# FIRST so we can compute the content-length header in the
# response.
#
# NOTE: The server must compute a content-length and send this
# in the response otherwise the client will not know when the
# response is complete. This is because the server and client
# keep the connection open via keep-alive. One alternative to
# using content-length is to have the server close the connection
# but this isn't as clean as it doesn't allow the client to send
# further requests unless the client robustly handles an unexpected
# closed connection.
# response = "<html><head><title>POST RESPONSE</title></head>"
# response += "<body><p>The file was uploaded.</p>"
# response += "</body></html>"
response = "{\"uid\":\"020110\",\"companies\":[\"Apple\",\"Google\",\"Facebook\"]}"
# Send response
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', str(len(response)))
self.end_headers()
self.wfile.write(response)
# Error missing content-length
else:
print "Handler:do_Post(): Error content-length missing from headers."
self.send_response(411)
self.send_header('Content-Length', 0)
self.end_headers()
return
#
# Main Program
#
if __name__ == "__main__":
# Specify server name OR ip address
# DO NOT USE localhost or 127.0.0.1
SERVERID = '10.155.111.97'
# Specify port.
PORT = 42001
# Start server
print time.asctime(), "Server STARTS with id: %s port: %i. Use <ctrl-c> to stop." % (SERVERID, PORT)
server_address = (SERVERID, PORT)
Handler.protocol_version='HTTP/1.1'
httpd = HTTPServer(server_address, Handler)
httpd.serve_forever()
# Stop server, if we got here it's because user type <ctrl-c> in the background.
print time.asctime(), "Server STOPS"
|
{
"content_hash": "0bbf2cbb7dfb0226a25521733b780c81",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 105,
"avg_line_length": 35.864197530864196,
"alnum_prop": 0.6013769363166953,
"repo_name": "nishabe/Refresher-OBJC",
"id": "176626010bedf6baae87eb4db9939098718fec8c",
"size": "3199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MasterRefresher/postserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1486"
},
{
"name": "Objective-C",
"bytes": "223826"
},
{
"name": "Python",
"bytes": "3199"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Announcement'
db.create_table('announcements_announcement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('text', self.gf('django.db.models.fields.TextField')(max_length=750)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(default='', max_length=100, blank=True)),
('publish_from', self.gf('django.db.models.fields.DateTimeField')()),
('publish_until', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('announcements', ['Announcement'])
def backwards(self, orm):
# Deleting model 'Announcement'
db.delete_table('announcements_announcement')
models = {
'announcements.announcement': {
'Meta': {'ordering': "['-publish_from']", 'object_name': 'Announcement'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {}),
'publish_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '750'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['announcements']
|
{
"content_hash": "86dff35bf3e4525603c6128cb34d8bdc",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 119,
"avg_line_length": 50.63636363636363,
"alnum_prop": 0.5947037701974865,
"repo_name": "hoosteeno/mozillians",
"id": "cb00699b185e165f8ea6bf991287f530e338b4f8",
"size": "2246",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mozillians/announcements/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "205336"
},
{
"name": "HTML",
"bytes": "160325"
},
{
"name": "JavaScript",
"bytes": "90367"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "8289816"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
}
|
from keras import backend as K
from overrides import overrides
from .masked_layer import MaskedLayer
class VectorMatrixSplit(MaskedLayer):
"""
This Layer takes a tensor with K modes and splits it into a tensor with K - 1 modes and a
tensor with K modes, but one less row in one of the dimensions. We call this a vector-matrix
split to evoke the notion of taking a row- (or column-) vector off of a matrix and returning
both the vector and the remaining matrix, but this will also work with higher-order tensors.
For example, if you have a sentence that has a combined (word + characters) representation of
the tokens in the sentence, you'd have a tensor of shape
(batch_size, sentence_length, word_length + 1). You could split that using this Layer into a
tensor of shape (batch_size, sentence_length) for the word tokens in the sentence, and a tensor
of shape (batch_size, sentence_length, word_length) for the character for each word token.
This layer supports masking - we will split the mask the same way that we split the inputs.
This Layer is essentially the opposite of a VectorMatrixMerge.
"""
def __init__(self,
split_axis: int,
mask_split_axis: int=None,
propagate_mask: bool=True,
**kwargs):
self.split_axis = split_axis
self.mask_split_axis = mask_split_axis if mask_split_axis is not None else split_axis
self.propagate_mask = propagate_mask
super(VectorMatrixSplit, self).__init__(**kwargs)
@overrides
def call(self, inputs, mask=None):
return self._split_tensor(inputs, self.split_axis)
@overrides
def compute_output_shape(self, input_shape):
vector_shape = list(input_shape)
del vector_shape[self.split_axis]
matrix_shape = list(input_shape)
if matrix_shape[self.split_axis] is not None:
matrix_shape[self.split_axis] -= 1
return [tuple(vector_shape), tuple(matrix_shape)]
@overrides
def compute_mask(self, inputs, input_mask=None): # pylint: disable=unused-argument
if input_mask is None or not self.propagate_mask:
return [None, None]
return self._split_tensor(input_mask, self.mask_split_axis)
@staticmethod
def _split_tensor(tensor, split_axis: int):
modes = K.ndim(tensor)
if split_axis < 0:
split_axis = modes + split_axis
vector_slice = []
matrix_slice = []
for mode in range(modes):
if mode == split_axis:
vector_slice.append(0)
matrix_slice.append(slice(1, None, None))
else:
vector_slice.append(slice(None, None, None))
matrix_slice.append(slice(None, None, None))
return [tensor[vector_slice], tensor[matrix_slice]]
@overrides
def get_config(self):
base_config = super(VectorMatrixSplit, self).get_config()
config = {
'split_axis': self.split_axis,
'mask_split_axis': self.mask_split_axis,
'propagate_mask': self.propagate_mask,
}
config.update(base_config)
return config
|
{
"content_hash": "f0e15bc4160b09a9462e8fa79ce1e8b6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 99,
"avg_line_length": 41.55128205128205,
"alnum_prop": 0.6349892008639308,
"repo_name": "allenai/deep_qa",
"id": "825a5923f0ae4396fad6d9451016a6c535351a5f",
"size": "3241",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "deep_qa/layers/vector_matrix_split.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "792559"
},
{
"name": "Shell",
"bytes": "4730"
}
],
"symlink_target": ""
}
|
"""
Based on an example provided by Ricardo Reis.
https://github.com/ricardo-reis-1970/colorlog-YAML
This configures the `logging` module from a YAML file, and provides specific
configuration for the loggers named 'application' and 'example'.
"""
import logging.config
import pathlib
import yaml
def config():
"""
Configure `logging` from a YAML file. You might adjust this function to
provide the configuration path as an argument.
"""
path = pathlib.Path(__file__).with_suffix(".yaml")
logging.config.dictConfig(yaml.safe_load(path.read_text()))
if __name__ == "__main__":
config()
root = logging.getLogger()
root.debug("Root logs debug example")
root.info("Root logs written to console without colours")
root.warning("Root logs warning")
root.error("Root logs error")
root.critical("Root logs critical")
unknown = logging.getLogger("unknown")
unknown.debug("Unknown logs debug example")
unknown.info("Unknown logs propagated to root logger")
unknown.warning("Unknown logs warning")
unknown.error("Unknown logs error")
unknown.critical("Unknown logs critical")
application = logging.getLogger("application")
application.debug("Application logs debug filtered by log level")
application.info("Application logs written to console and file")
application.warning("Application logs not propagated to the root logger")
application.error("Application logs error example")
application.critical("Application logs critical example")
example = logging.getLogger("example")
example.debug("Example logs debug filtered by log level")
example.info("Example logs configured to write to file")
example.warning("Example logs propagated to the root logger")
example.error("Example logs error example")
example.critical("Example logs critical example")
|
{
"content_hash": "e6fd42467482381ef480c7b1b1474b21",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 35.283018867924525,
"alnum_prop": 0.7240641711229947,
"repo_name": "borntyping/python-colorlog",
"id": "ab1f7059f89bd4904481c2e044244b5856469894",
"size": "1870",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "doc/yaml_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25602"
}
],
"symlink_target": ""
}
|
from qingcloud.cli.qs_client.actions import key
from qingcloud.cli.qs_client.actions import bucket
from qingcloud.cli.qs_client.actions import service
from qingcloud.cli.qs_client.actions import multipart
class ActionManager(object):
@classmethod
def get_action(cls, action):
for item in cls.action_table:
if item[0] == action:
return item[1]
@classmethod
def get_valid_actions(cls):
return [item[0] for item in cls.action_table]
action_table = [
## service ##
('list-buckets', service.ListBucketsAction),
## bucket ##
('create-bucket', bucket.CreateBucketAction),
('delete-bucket', bucket.DeleteBucketAction),
('head-bucket', bucket.HeadBucketAction),
('stats-bucket', bucket.StatsBucketAction),
('list-objects', bucket.ListObjectsAction),
('get-bucket-acl', bucket.GetBucketAclAction),
('set-bucket-acl', bucket.SetBucketAclAction),
('list-multipart-uploads', bucket.ListMultipartUploadsAction),
## object ##
('create-object', key.CreateObjectAction),
('get-object', key.GetObjectAction),
('delete-object', key.DeleteObjectAction),
('head-object', key.HeadObjectAction),
## multipart ##
('initiate-multipart', multipart.InitiateMultipartAction),
('upload-multipart', multipart.UploadMultipartAction),
('list-multipart', multipart.ListMultipartAction),
('complete-multipart', multipart.CompleteMultipartAction),
('abort-multipart', multipart.AbortMultipartAction),
]
|
{
"content_hash": "f5601b6437e8f6814342b91f1809b977",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 70,
"avg_line_length": 39.41463414634146,
"alnum_prop": 0.656559405940594,
"repo_name": "yunify/qingcloud-cli",
"id": "0b86379e7c567d25fef7ab25c9d6438698946532",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/qs_client/actions/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
}
|
""" Defines the backend connection class for MySQL databases. """
from . import statements
from .mysqlconnection import MySQLConnection
|
{
"content_hash": "0ebd0b7051a8106b6edc0ed3b59f92f1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 65,
"avg_line_length": 34,
"alnum_prop": 0.8014705882352942,
"repo_name": "orb-framework/orb",
"id": "c379627142bf9ee1c2f376087111dd5024613551",
"size": "136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "orb/core/connection_types/sql/mysql/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "593294"
}
],
"symlink_target": ""
}
|
from test_dep_pack_old.mod import func
|
{
"content_hash": "b7fbc1a803b2f6c0eb4e2692f02b8a2a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.7948717948717948,
"repo_name": "westernx/metatools",
"id": "422fbcdfa8a813117ee786d2e64bde1d7b33e521",
"size": "39",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_deprecate_sandbox/test_dep_pack_use.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "677"
},
{
"name": "Python",
"bytes": "114302"
},
{
"name": "Shell",
"bytes": "1239"
}
],
"symlink_target": ""
}
|
import math
from datetime import datetime
from bottle import jinja2_template as template, request, redirect
from models.cmsmodels import Posts
import admin.session as withsession
app = withsession.app
@withsession.app.app.get('/posts/<page:int>')
@withsession.app.app.get('/posts')
@withsession.issessionactive()
def posts(page=1):
try:
returned_posts = Posts.objects.order_by('-date').skip((int(page) - 1) * 10).limit(10)
postcount = Posts.objects().count()
data = {
"posts": returned_posts,
"count": postcount,
"ceil": math.ceil(postcount / 10),
"currentPage": page
}
except:
return template('admin/views/login.jinja2', {'errorMessage': 'DB error'})
return template('admin/views/posts.jinja2', data)
@withsession.app.app.route('/posts/addpost', method=['GET', 'POST'])
@withsession.issessionactive()
def savepost():
if request.method == 'GET':
return template('admin/views/addoreditpost.jinja2')
else:
try:
title = request.forms.getunicode('title')
content = request.forms.getunicode('content')
p_id = request.forms.get('isediting')
if p_id:
post = Posts.objects.get(id=p_id)
else:
post = Posts()
post.title = title
post.content = content
post.date = datetime.now()
post.save()
except:
return template('admin/views/login.jinja2', {'errorMessage': 'DB error'})
redirect('/admin/posts')
@withsession.app.app.get('/posts/editpost')
@withsession.issessionactive()
def editpost():
post_id = request.params.get('id')
try:
post = Posts.objects.get(id=post_id)
if post:
return template('admin/views/addoreditpost.jinja2', {'post': post})
else:
raise Exception()
except:
redirect('/admin/posts')
@withsession.app.app.route('/posts/delete', method=['POST'])
def deletepost():
try:
Posts.objects(id=request.forms.get('id')).delete()
return 'ok'
except:
return 'failed'
def initialize():
print('post controller initialized')
|
{
"content_hash": "12aa8f903f8b261a15752496132c5d1b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 93,
"avg_line_length": 28.525641025641026,
"alnum_prop": 0.6013483146067415,
"repo_name": "unixxxx/simplecms",
"id": "01f54009122a8bea4674399d43b5e0debfb65d49",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/controllers/postcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "832"
},
{
"name": "Python",
"bytes": "17926"
}
],
"symlink_target": ""
}
|
from enum import Enum
from typing import List, Set
from russ.syllables import get_syllables
class Stress:
"""
Ударение
"""
class Type(Enum):
ANY = -1
PRIMARY = 0
SECONDARY = 1
def __init__(self, position: int, stress_type: Type=Type.PRIMARY) -> None:
self.position = position
self.type = stress_type
def __hash__(self):
return hash(self.position)
def __eq__(self, other: 'Stress'):
return self.position == other.position and self.type == other.type
def __str__(self):
return str(self.position) + "\t" + str(self.type)
def __repr__(self):
return self.__str__()
class StressedWord:
"""
Слово и его ударения.
"""
def __init__(self, text: str, stresses: Set[Stress]) -> None:
self.stresses = stresses
self.text = text
self.syllables = get_syllables(text)
self.__accent_syllables()
def get_primary_stresses(self) -> List[int]:
return [stress.position for stress in self.stresses if stress.type == Stress.Type.PRIMARY]
def get_secondary_stresses(self) -> List[int]:
return [stress.position for stress in self.stresses if stress.type == Stress.Type.SECONDARY]
def add_stress(self, position: int, stress_type: Stress.Type=Stress.Type.PRIMARY) -> None:
self.stresses.add(Stress(position, stress_type))
self.__accent_syllables()
def add_stresses(self, stresses: List[Stress]) -> None:
self.stresses = set(self.stresses).union(set(stresses))
self.__accent_syllables()
def __accent_syllables(self):
for syllable in self.syllables:
if Stress(syllable.vowel()) in self.stresses:
syllable.stress = syllable.vowel()
else:
syllable.stress = -1
def __str__(self):
return self.text + "\t" + ",".join([str(i) for i in self.get_primary_stresses()])+ \
"\t" + ",".join([str(i) for i in self.get_secondary_stresses()])
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.text)
def __eq__(self, other: 'StressedWord'):
return self.text == other.text
|
{
"content_hash": "ec4ce9a6f304e05adecd708138b181f5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 100,
"avg_line_length": 29.263157894736842,
"alnum_prop": 0.5908273381294964,
"repo_name": "IlyaGusev/rupo",
"id": "8f1b95ea0c47ad6f208731f9d91c0c210fdd71f9",
"size": "2373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rupo/stress/word.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "139385"
},
{
"name": "Shell",
"bytes": "692"
}
],
"symlink_target": ""
}
|
"""Loads run-time extensions
These loads components are considered extensions as they extend the underlying
AWS instances to add feature support and state maintenance. This composition
avoids excessively large AWS instance classes as external objects can augment
the AWS instances as needed to retain their information.
"""
import json
import os
import time
import urllib.parse
from datetime import date
from string import Template
from typing import Dict, Optional
import paramiko.client as sshclient
from influxdb import InfluxDBClient
from tornado import gen
from loadsbroker import logger
from loadsbroker.aws import EC2Collection, EC2Instance
from loadsbroker.dockerctrl import DOCKER_RETRY_EXC, DockerDaemon
from loadsbroker.options import InfluxDBOptions
from loadsbroker.ssh import makedirs
from loadsbroker.util import join_host_port, retry
SUPPORT_DIR = os.path.join(os.path.dirname(__file__), "support")
with open(os.path.join(SUPPORT_DIR, "telegraf.conf"), "r") as f:
TELEGRAF_CONF = f.read()
MONITOR_DASHBOARD_FN = "monitor-dashboard.json"
with open(os.path.join(SUPPORT_DIR, MONITOR_DASHBOARD_FN), "r") as f:
MONITOR_DASHBOARD_JSON = f.read()
UPLOAD2S3_PATH = os.path.join(SUPPORT_DIR, "upload2s3.sh")
class SSH:
"""SSH client to communicate with instances."""
def __init__(self, ssh_keyfile):
self._ssh_keyfile = ssh_keyfile
def connect(self, instance):
"""Opens an SSH connection to this instance."""
client = sshclient.SSHClient()
client.set_missing_host_key_policy(sshclient.AutoAddPolicy())
client.connect(instance.ip_address, username="core",
key_filename=self._ssh_keyfile)
return client
def _send_file(self, sftp, local_obj, remote_file):
# Ensure the base directory for the remote file exists
base_dir = os.path.dirname(remote_file)
makedirs(sftp, base_dir)
# Copy the local file to the remote location.
sftp.putfo(local_obj, remote_file)
def upload_file(self, instance, local_obj, remote_file):
"""Upload a file to an instance. Blocks."""
client = self.connect(instance)
try:
sftp = client.open_sftp()
try:
self._send_file(sftp, local_obj, remote_file)
finally:
sftp.close()
finally:
client.close()
async def reload_sysctl(self, collection):
def _reload(inst):
client = self.connect(inst.instance)
try:
stdin, stdout, stderr = client.exec_command(
"sudo sysctl -p /etc/sysctl.conf")
output = stdout.channel.recv(4096)
stdin.close()
stdout.close()
stderr.close()
return output
finally:
client.close()
await collection.map(_reload)
class Docker:
"""Docker commands for AWS instances using :class:`DockerDaemon`"""
def __init__(self, ssh):
self.sshclient = ssh
async def setup_collection(self, collection):
def setup_docker(ec2_instance):
instance = ec2_instance.instance
state = ec2_instance.state
if instance.ip_address is None:
docker_host = 'tcp://0.0.0.0:7890'
else:
docker_host = "tcp://%s:2375" % instance.ip_address
if not hasattr(state, "docker"):
state.docker = DockerDaemon(host=docker_host)
await collection.map(setup_docker)
@staticmethod
def not_responding_instances(collection):
return [x for x in collection.instances
if not x.state.docker.responded]
async def wait(self, collection, interval=60, timeout=600):
"""Waits till docker is available on every instance in the
collection."""
end = time.time() + timeout
not_responded = self.not_responding_instances(collection)
def get_container(inst):
try:
inst.state.docker.get_containers()
inst.state.docker.responded = True
except DOCKER_RETRY_EXC:
logger.debug("Docker not ready yet on %s",
str(inst.instance.id))
except Exception as exc:
logger.debug("Got exception on %s: %r",
str(inst.instance.id), exc)
# Attempt to fetch until they've all responded
while not_responded and time.time() < end:
await gen.multi([collection.execute(get_container, x)
for x in not_responded])
# Update the not_responded
not_responded = self.not_responding_instances(collection)
if not_responded:
await collection.wait(interval)
# Prune the non-responding
logger.debug("Pruning %d non-responding instances.",
len(not_responded))
await collection.remove_instances(not_responded)
async def is_running(self, collection, container_name, prune=True):
"""Checks running instances in a collection to see if the provided
container_name is running on the instance."""
def has_container(instance):
try:
all_containers = instance.state.docker.get_containers()
except:
if prune:
msg = ("Lost contact with a container on %s, "
"marking dead.")
logger.debug(msg % instance.instance.id)
instance.state.nonresponsive = True
return not prune
return any(container_name in cont["Image"]
for cont in all_containers.values())
results = await gen.multi([collection.execute(has_container, x)
for x in collection.running_instances()])
return any(results)
async def load_containers(self, collection, container_name, container_url):
"""Loads's a container of the provided name to the instance."""
@retry(on_result=lambda res: not res)
def image_loaded(docker, container_name):
return docker.has_image(container_name)
def load(instance):
def debug(msg):
logger.debug("[%s] %s" % (instance.instance.id, msg))
docker = instance.state.docker
has_container = docker.has_image(container_name)
if has_container and "latest" not in container_name:
return
if container_url:
debug("Importing %s" % container_url)
with self.sshclient.connect(instance.instance) as client:
output = docker.import_container(client, container_url)
if output:
logger.debug(output)
else:
debug("Pulling %r" % container_name)
output = docker.pull_container(container_name)
if not image_loaded(docker, container_name):
debug("Docker does not have %s" % container_name)
return False
return output
await collection.map(load)
async def run_containers(self,
collection: EC2Collection,
name: str,
command: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
volumes={},
ports={},
local_dns=None,
delay=0,
pid_mode=None):
"""Run a container of the provided name with the env/command
args supplied."""
if env is None:
env = {}
if local_dns is not None:
local_dns = collection.local_dns
if isinstance(ports, str):
port_list = [x.split(":") for x in ports.split(",")]
ports = {x[0]: x[1] for x in port_list if x and len(x) == 2}
if isinstance(volumes, str):
volume_list = [x.split(":") for x in volumes.split(",")]
volumes = {x[1]: {"bind": x[0], "ro": len(x) < 3 or x[2] == "ro"}
for x in volume_list if x and len(x) >= 2}
def run(instance, tries=0):
dns = getattr(instance.state, "dns_server", None)
dns = [dns] if dns else []
docker = instance.state.docker
rinstance = instance.instance
extra = [
("HOST_IP", rinstance.ip_address),
("PRIVATE_IP", rinstance.private_ip_address),
("STATSD_HOST", rinstance.private_ip_address),
("STATSD_PORT", "8125")]
extra_env = env.copy()
extra_env.update(extra)
_env = {self.substitute_names(k, extra_env):
self.substitute_names(v, extra_env)
for k, v in extra_env.items()}
if command is None:
_command = None
else:
_command = self.substitute_names(command, _env)
_volumes = {}
for host, volume in volumes.items():
binding = volume.copy()
binding["bind"] = self.substitute_names(
binding.get("bind", host), _env)
_volumes[self.substitute_names(host, _env)] = binding
try:
return docker.safe_run_container(
name,
_command,
env=_env,
volumes=_volumes,
ports=ports,
dns=dns,
pid_mode=pid_mode
)
except Exception:
return False
results = await collection.map(run, delay=delay)
return results
async def kill_containers(self, collection, container_name):
"""Kill the container with the provided name."""
def kill(instance):
try:
instance.state.docker.kill_container(container_name)
except Exception:
logger.debug("Lost contact with a container, marking dead.",
exc_info=True)
instance.state.nonresponsive = True
await collection.map(kill)
async def stop_containers(self,
collection,
container_name,
timeout=15,
capture_stream=None):
"""Gracefully stops the container with the provided name and
timeout."""
def stop(instance):
try:
instance.state.docker.stop_container(
container_name,
timeout,
capture_stream)
except Exception:
logger.debug("Lost contact with a container, marking dead.",
exc_info=True)
instance.state.nonresponsive = True
await collection.map(stop)
@staticmethod
def substitute_names(tmpl_string, dct):
"""Given a template string, sub in values from the dct"""
return Template(tmpl_string).substitute(dct)
class DNSMasq:
"""Manages DNSMasq on AWS instances."""
def __init__(self, info, docker):
self.info = info
self.docker = docker
async def start(self, collection, hostmap):
"""Starts dnsmasq on a host with a given host mapping.
Host mapping is a dict of "Hostname" -> ["IP"].
"""
records = []
tmpl = Template("--host-record=$name,$ip")
for name, ips in hostmap.items():
for ip in ips:
records.append(tmpl.substitute(name=name, ip=ip))
cmd = "--user=root " + " ".join(records)
ports = {(53, "udp"): 53}
results = await self.docker.run_containers(
collection, self.info.name, cmd, ports=ports, local_dns=False)
# Add the dns info to the instances
for inst, response in zip(collection.instances, results):
state = inst.state
if hasattr(state, "dns_server"):
continue
dns_ip = response["NetworkSettings"]["IPAddress"]
state.dns_server = dns_ip
async def stop(self, collection):
await self.docker.stop_containers(collection, self.info.name)
class Watcher:
"""Watcher additions to AWS instances"""
def __init__(self, info, options=None):
self.info = info
self.options = options
async def start(self, collection, docker):
"""Launches Heka containers on all instances."""
if not self.options:
logger.debug("Watcher not configured")
return
bind = {'bind': '/var/run/docker.sock', 'ro': False}
volumes = {'/var/run/docker.sock': bind}
ports = {}
env = {'AWS_ACCESS_KEY_ID': self.options['AWS_ACCESS_KEY_ID'] or "",
'AWS_SECRET_ACCESS_KEY':
self.options['AWS_SECRET_ACCESS_KEY'] or ""}
logger.debug("Launching Watcher...")
await docker.run_containers(collection, self.info.name,
"python ./watch.py", env=env,
volumes=volumes, ports=ports,
pid_mode="host")
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
class InfluxDB:
"""A Run's managed InfluxDB"""
def __init__(self, info, ssh: SSH, aws_creds: Dict[str, str]) -> None:
self.info = info
self.sshclient = ssh
self.aws_creds = aws_creds
async def start(self, collection: EC2Collection, options: InfluxDBOptions):
await collection.map(self._setup_influxdb, 0, options)
def _setup_influxdb(self, instance: EC2Instance, options: InfluxDBOptions):
"""With an already running InfluxDB, upload the backup script
and create a Run db.
"""
with open(UPLOAD2S3_PATH) as fp:
self.sshclient.upload_file(
instance.instance, fp, "/home/core/upload2s3.sh")
args = options.client_args
args['host'] = instance.instance.ip_address
database = args.pop('database')
client = InfluxDBClient(**args)
logger.debug("Creating InfluxDB: %s", options.database_url)
client.create_database(database)
async def stop(self,
collection: EC2Collection,
options: InfluxDBOptions,
env: Dict[str, str],
project: str,
plan: str):
"""Backup the InfluxDB to s3."""
if not (self.aws_creds.get('AWS_ACCESS_KEY_ID') or
self.aws_creds.get('AWS_SECRET_ACCESS_KEY')):
logger.error("Unable to upload2s3: No AWS credentials defined")
return
bucket = env.get('INFLUXDB_S3_BUCKET')
if not bucket:
logger.error("Unable to upload2s3: No INFLUXDB_S3_BUCKET defined")
return
db = options.database
backup = "{:%Y-%m-%d}-{}-influxdb".format(date.today(), db)
archive = backup + ".tar.bz2"
cmd = """\
influxd backup -database {db} {destdir}/{backup} && \
tar cjvf {destdir}/{archive} -C {destdir} {backup} \
""".format(
db=db,
destdir="/influxdb-backup",
backup=backup,
archive=archive
)
# wrap in a shell to chain commands in docker exec
cmd = "sh -c '{}'".format(cmd)
await collection.map(self._container_exec, 0, self.info.name, cmd)
# upload2s3's ran from the host (vs the lightweight
# influxdb-alpine container) because it requires openssl/curl
destdir = os.path.join(project, plan)
cmd = """\
export AWS_ACCESS_KEY_ID={AWS_ACCESS_KEY_ID} && \
export AWS_SECRET_ACCESS_KEY={AWS_SECRET_ACCESS_KEY} && \
sh /home/core/upload2s3.sh {archive} {bucket} "{destdir}" \
""".format(
archive=os.path.join("/home/core/influxdb/backup", archive),
bucket=bucket,
destdir=destdir,
**self.aws_creds
)
exits = await collection.map(self._ssh_exec, 0, cmd)
url = "https://{}.s3.amazonaws.com/{}/{}".format(
bucket,
urllib.parse.quote(destdir),
archive)
if any(exits):
logger.error("InfluxDB upload2s3 failed: %s (%s)", exits, url)
else:
logger.debug("InfluxDB upload2s3 succeeded (%s)", url)
def _container_exec(self,
instance: EC2Instance,
container_name: str,
cmd: str) -> bytes:
conts = list(instance.state.docker.containers_by_name(container_name))
if not conts:
return None
cont = conts[0] # assume 1
return instance.state.docker.exec_run(cont['Id'], cmd)
def _ssh_exec(self, instance: EC2Instance, cmd: str) -> int:
with self.sshclient.connect(instance.instance) as client:
stdin, stdout, stderr = client.exec_command(cmd)
stdin.close()
status = stdout.channel.recv_exit_status()
if status:
logger.error("ssh cmd failed:\n%s", stderr.read())
return status
class Grafana:
"""Grafana monitor Dashboard for AWS instances"""
data_source_defaults = dict(
type='influxdb',
access='proxy',
isDefault=True,
basicAuth=False
)
def __init__(self, info) -> None:
self.info = info
async def start(self,
collection: EC2Collection,
run_id: str,
options: InfluxDBOptions):
data_source = self.data_source_defaults.copy()
data_source.update(
name="loads-broker InfluxDB Monitor (run_id: {})".format(run_id),
url="http://" + join_host_port(options.host, options.port),
database=options.database,
)
port = 8080
ports = {3000: port}
cmd = """\
apt-get update -qq && \
apt-get install -qq -y --no-install-recommends curl && \
/etc/init.d/grafana-server start && \
until curl "${__LOADS_GRAFANA_URL__}" \
-X POST \
-H "Accept: application/json" \
-H "Content-Type: application/json" \
--data-binary "${__LOADS_GRAFANA_DS_PAYLOAD__}"; do
sleep 1
done && \
/etc/init.d/grafana-server stop && \
mkdir "${GF_DASHBOARDS_JSON_PATH}" && \
echo "${__LOADS_GRAFANA_DASHBOARD__}" >> \
"${GF_DASHBOARDS_JSON_PATH}/monitor-dashboard.json" && \
./run.sh
"""
cmd = "sh -c '{}'".format(cmd)
# Avoid docker.run_container: it munges our special env
def run(instance, tries=0):
docker = instance.state.docker
url = "http://admin:admin@localhost:3000/api/datasources"
env = {
'GF_DEFAULT_INSTANCE_NAME': instance.instance.id,
'GF_DASHBOARDS_JSON_ENABLED': "true",
'GF_DASHBOARDS_JSON_PATH': "/var/lib/grafana/dashboards",
'__LOADS_GRAFANA_URL__': url,
'__LOADS_GRAFANA_DS_PAYLOAD__': json.dumps(data_source),
'__LOADS_GRAFANA_DASHBOARD__': MONITOR_DASHBOARD_JSON,
}
try:
docker.safe_run_container(
self.info.name,
entrypoint=cmd,
env=env,
ports=ports,
)
except Exception:
return False
# XXX: not immediately available
logger.info("Setting up Dashboard: http://%s:%s/dashboard/file/%s",
instance.instance.ip_address,
port,
MONITOR_DASHBOARD_FN)
await collection.map(run)
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
class Telegraf:
"""Telegraf monitor for AWS instances"""
def __init__(self, info) -> None:
self.info = info
async def start(self,
collection: EC2Collection,
_: Docker,
options: InfluxDBOptions,
step: str,
type_: Optional[str] = None):
ports = {(8125, "udp"): 8125}
cmd = """\
echo "${__LOADS_TELEGRAF_CONF__}" > /etc/telegraf/telegraf.conf && \
telegraf \
"""
cmd = "sh -c '{}'".format(cmd)
# Avoid docker.run_container: it munges our special env
def run(instance, tries=0):
docker = instance.state.docker
env = {
'__LOADS_TELEGRAF_CONF__': TELEGRAF_CONF,
'__LOADS_INFLUX_ADDR__':
join_host_port(options.host, options.port),
'__LOADS_INFLUX_DB__': options.database,
'__LOADS_TELEGRAF_HOST__': instance.instance.id,
'__LOADS_TELEGRAF_STEP__': step
}
if type_:
env['__LOADS_TELEGRAF_TYPE__'] = type_
try:
return docker.safe_run_container(
self.info.name,
cmd,
env=env,
ports=ports,
)
except Exception:
return False
await collection.map(run)
async def stop(self, collection, docker):
await docker.stop_containers(collection, self.info.name)
|
{
"content_hash": "71507684c252289e7cdd5b38c9e807c5",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 79,
"avg_line_length": 36.74036850921273,
"alnum_prop": 0.5402571350414881,
"repo_name": "loads/loads-broker",
"id": "04e239daf0911b3be2449aa21f321cb0b1b5936d",
"size": "21934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loadsbroker/extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "73285"
},
{
"name": "JavaScript",
"bytes": "7555"
},
{
"name": "Makefile",
"bytes": "774"
},
{
"name": "Python",
"bytes": "210043"
},
{
"name": "Shell",
"bytes": "1407"
}
],
"symlink_target": ""
}
|
"""Tornado forms: simple form validation.
Form fields.
"""
import re
import decimal
from tornforms.requirements import *
from tornforms.utils import FormError, ErrorList, decapitalize
class BaseField(object):
"""Abstract base class for form fields.
"""
def __init__(self, required=False, in_list=False, not_in_list=False, regex=False, messages={}):
self.reqs = []
if required:
req = Required(message=messages.get('required'))
self.reqs.append(req)
if in_list:
req = InList(in_list, message=messages.get('in_list'))
self.reqs.append(req)
if not_in_list:
req = NotInList(not_in_list, message=messages.get('not_in_list'))
self.reqs.append(req)
if regex:
req = Regex(regex, message=messages.get('regex'))
self.reqs.append(req)
def to_python(self, val):
"""Returns str."""
# Tornado gives us lists
if isinstance(val, list):
val = val[-1]
try:
return val.decode('utf-8')
except AttributeError as e:
return val
def to_dict(self):
"""Return field requirements as dict.
"""
obj = dict()
for req in self.reqs:
name = decapitalize(req.__class__.__name__)
obj[name] = req.to_dict()
return obj
def validate(self, val):
"""Check value against field requirements.
"""
errors = ErrorList()
for req in self.reqs:
try:
req.test(val)
except FormError as e:
errors.append(e)
return errors
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__,
' '.join([repr(req) for req in self.reqs]))
class TextField(BaseField):
"""Text field handler.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
min_length - check for minimum value length int
max_length - check for maximum value length int
messages - custom messages dict
"""
def __init__(self, required=False, in_list=False, not_in_list=False, regex=False,
min_length=False, max_length=False, messages={}):
super(TextField, self).__init__(required=required, in_list=in_list,
not_in_list=not_in_list, regex=regex, messages=messages)
if min_length:
req = MinLength(min_length, message=messages.get('min_length'))
self.reqs.append(req)
if max_length:
req = MaxLength(max_length, message=messages.get('max_length'))
self.reqs.append(req)
def to_python(self, val):
"""Returns None or str."""
val = super(TextField, self).to_python(val)
if not val:
return None
else:
return val
class EmailField(TextField):
"""Email field handler.
Text field handler that includes a basic regex check for email formatting.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
min_length - check for minimum value length int
max_length - check for maximum value length int
messages - custom messages dict
"""
EMAIL_VALIDATOR = re.compile(r"[^@]+@[^@]+\.[^@]+")
def __init__(self, required=False, in_list=False, not_in_list=False, regex=False,
min_length=False, max_length=False,messages={}):
super(EmailField, self).__init__(required=required, in_list=in_list,
not_in_list=not_in_list, regex=regex, min_length=False,
max_length=False, messages=messages)
req = Regex(self.EMAIL_VALIDATOR, message=messages.get('regex'))
self.reqs.append(req)
class IntField(BaseField):
"""Int field handler.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
min_value - check for minimum value int
max_value - check for maximum value int
messages - custom messages dict
"""
def __init__(self, required=False, in_list=False, not_in_list=False, regex=False,
min_value=False, max_value=False, messages={}):
super(IntField, self).__init__(required=required, in_list=in_list,
not_in_list=not_in_list, regex=regex, messages=messages)
if min_value:
req = MinValue(min_value, message=messages.get('min_value'))
self.reqs.append(req)
if max_value:
req = MaxValue(max_value, message=messages.get('max_value'))
self.reqs.append(req)
def to_python(self, val):
"""Returns int."""
val = super(IntField, self).to_python(val)
if val in ('', None):
return None
else:
return int(val, base=10)
class DecimalField(IntField):
"""Decimal field handler.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
min_value - check for minimum value int
max_value - check for maximum value int
messages - custom messages dict
"""
def to_python(self, val):
"""Returns decimal."""
val = super(DecimalField, self).to_python(val)
if val in ('', None):
return None
else:
return decimal.Decimal(val)
class DateField(BaseField):
"""Date field handler.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
messages - custom messages dict
"""
def to_python(self, val):
"""Returns date."""
val = super(DateField, self).to_python(val)
if val in ('', None):
return None
else:
return datetime.datetime.strptime(val, '%Y-%m-%d').date
class TimeField(BaseField):
"""Time field handler.
Keyword args:
required - required field boolean
in_list - check for value included in list
not_in_list - check for value excluded from list
regex - check for regex match
messages - custom messages dict
"""
def to_python(self, val):
"""Returns time."""
val = super(TimeField, self).to_python(val)
if val in ('', None):
return None
else:
formats = ('%I:%M:%S %p', '%I:%M %p', '%I %p', '%I%p', '%H:%M:%S', '%H:%M', '%H',)
err = None
for format in formats:
try:
time = datetime.datetime.strptime(val, format)
except ValueError as e:
err = e
else:
return time
return err
|
{
"content_hash": "6ac8a05517bca5241ab9a6119d09e220",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 99,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.5694520547945205,
"repo_name": "cole/tornforms",
"id": "9b47d1168a9f4791a9b8b7b4c75ef7f9ec7c1253",
"size": "7356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornforms/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22870"
}
],
"symlink_target": ""
}
|
"""
Python program for powering on vms on a host on which hostd is running
"""
from __future__ import print_function
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
import argparse
import atexit
import getpass
import sys
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description='Process args for powering on a Virtual Machine')
parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store', help='Password to use when connecting to host')
parser.add_argument('-v', '--vmname', required=True, action='append', help='Names of the Virtual Machines to power on')
parser.add_argument('-f', '--foldername', required=True, action='append', help='Names of the Virtual Machines Folder to clone')
args = parser.parse_args()
return args
def WaitForTasks(tasks, si):
"""
Given the service instance si and tasks, it returns after all the
tasks are complete
"""
pc = si.content.propertyCollector
taskList = [str(task) for task in tasks]
# Create filter
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[], all=True)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
filterSpec.propSet = [propSpec]
filter = pc.CreateFilter(filterSpec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(taskList):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in taskList:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
taskList.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
# Start program
def main():
"""
Simple command-line program for powering on virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
foldernames = args.foldername
si = None
try:
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except IOError:
pass
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
# TODO: what is atexit?
atexit.register(Disconnect, si)
# Retreive the list of Virtual Machines from the invetory objects
# under the rootFolder
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.Folder],
True)
folderList = objView.view
def clone(vm):
def cloneto(folder):
print("Clone VM %s to folder %s" % (vm, folder))
relocateSpec = vim.vm.RelocateSpec()
cloneSpec = vim.vm.CloneSpec()
cloneSpec.location = relocateSpec
return vm.Clone(folder, "Clone Test VM", cloneSpec)
return [cloneto(folder) for folder in folderList if folder.name in foldernames]
# Find the vm and power it off
tasks = sum([clone(vm) for vm in vmList if vm.name in vmnames], [])
# Wait for power on to complete
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e))
print(sys.exc_traceback.tb_lineno)
# Start program
if __name__ == "__main__":
main()
|
{
"content_hash": "cbb9763866430362c4a42f2be98ff6b0",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 130,
"avg_line_length": 34.449367088607595,
"alnum_prop": 0.5812970788168289,
"repo_name": "mat-vmware/vsphere-api-guide-python",
"id": "262b3155208e271c6c4d0738e1161b79ede85c10",
"size": "6102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clonevm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24773"
}
],
"symlink_target": ""
}
|
"""
desispec.io.fluxcalibration
===========================
IO routines for flux calibration.
"""
from __future__ import absolute_import
import os
from astropy.io import fits
import numpy,scipy
from .util import fitsheader, native_endian, makepath
def write_stdstar_model(norm_modelfile,normalizedFlux,wave,fibers,data,header=None):
"""Writes the normalized flux for the best model.
"""
hdr = fitsheader(header)
hdr['EXTNAME'] = ('FLUX', 'erg/s/cm2/A')
hdr['BUNIT'] = ('erg/s/cm2/A', 'Flux units')
hdu1=fits.PrimaryHDU(normalizedFlux,header=hdr)
#fits.writeto(norm_modelfile,normalizedFlux,header=hdr, clobber=True)
hdr['EXTNAME'] = ('WAVE', '[Angstroms]')
hdr['BUNIT'] = ('Angstrom', 'Wavelength units')
hdu2 = fits.ImageHDU(wave, header=hdr)
hdr['EXTNAME'] = ('FIBERS', 'no dimension')
hdu3 = fits.ImageHDU(fibers, header=hdr)
hdr['EXTNAME'] = ('METADATA', 'no dimension')
from astropy.io.fits import Column
BESTMODELINDEX=Column(name='BESTMODELINDEX',format='K',array=data['BESTMODEL'])
TEMPLATEID=Column(name='TEMPLATEID',format='K',array=data['TEMPLATEID'])
CHI2DOF=Column(name='CHI2DOF',format='D',array=data['CHI2DOF'])
cols=fits.ColDefs([BESTMODELINDEX,TEMPLATEID,CHI2DOF])
tbhdu=fits.BinTableHDU.from_columns(cols,header=hdr)
hdulist=fits.HDUList([hdu1,hdu2,hdu3,tbhdu])
hdulist.writeto(norm_modelfile,clobber=True)
#fits.append(norm_modelfile,cols,header=tbhdu.header)
def read_stdstar_models(filename):
"""Read stdstar models from filename.
Args:
filename (str): File containing standard star models.
Returns:
read_stdstar_models (tuple): flux[nspec, nwave], wave[nwave], fibers[nspec]
"""
flux = native_endian(fits.getdata(filename, 0))
wave = native_endian(fits.getdata(filename, 1))
fibers = native_endian(fits.getdata(filename, 2))
return flux,wave,fibers
def write_flux_calibration(outfile, fluxcalib, header=None):
"""Writes flux calibration.
"""
hdr = fitsheader(header)
hdr['EXTNAME'] = ('FLUXCALIB', 'CHECK UNIT')
fits.writeto(outfile,fluxcalib.calib,header=hdr, clobber=True)
hdr['EXTNAME'] = ('IVAR', 'CHECK UNIT')
hdu = fits.ImageHDU(fluxcalib.ivar, header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
hdr['EXTNAME'] = ('MASK', 'no dimension')
hdu = fits.ImageHDU(fluxcalib.mask, header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
hdu = fits.ImageHDU(fluxcalib.wave, header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
def read_flux_calibration(filename):
"""Read flux calibration.
"""
# Avoid a circular import conflict at package install/build_sphinx time.
from ..fluxcalibration import FluxCalib
calib=native_endian(fits.getdata(filename, 0))
ivar=native_endian(fits.getdata(filename, "IVAR"))
mask=native_endian(fits.getdata(filename, "MASK", uint=True))
wave=native_endian(fits.getdata(filename, "WAVELENGTH"))
fluxcalib = FluxCalib(wave, calib, ivar, mask)
fluxcalib.header = fits.getheader(filename, 0)
return fluxcalib
def read_stdstar_templates(stellarmodelfile):
"""No documentation yet.
"""
phdu=fits.open(stellarmodelfile)
hdr0=phdu[0].header
crpix1=hdr0['CRPIX1']
crval1=hdr0['CRVAL1']
cdelt1=hdr0['CDELT1']
if hdr0["LOGLAM"]==1: #log bins
wavebins=10**(crval1+cdelt1*numpy.arange(len(phdu[0].data[0])))
else: #lin bins
model_wave_step = cdelt1
model_wave_offset = (crval1-cdelt1*(crpix1-1))
wavebins=model_wave_step*numpy.arange(n_model_wave) + model_wave_offset
paramData=phdu[1].data
templateid=paramData["TEMPLATEID"]
fluxData=phdu[0].data
phdu.close()
return wavebins,fluxData,templateid
|
{
"content_hash": "88327b20ec477361e9a19101a4d10dae",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 84,
"avg_line_length": 35.03636363636364,
"alnum_prop": 0.68188894654904,
"repo_name": "profxj/desispec",
"id": "650f79b53d6b500c3892716649f110aa165bd7da",
"size": "3854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/desispec/io/fluxcalibration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "236131"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
}
|
from apps.auth import helpers
NAME = 'instagram'
FIELDS = {
helpers.provider_field_name(NAME, 'consumer_key'): 'Client ID',
helpers.provider_field_name(NAME, 'consumer_secret'): 'Client Secret',
}
OAUTH = {
'base_url': 'https://api.instagram.com/v1',
'request_token_url': None,
'access_token_url': 'https://api.instagram.com/oauth/access_token',
'access_token_method': 'POST',
'authorize_url': 'https://instagram.com/oauth/authorize/',
}
CONFIG = helpers.make_provider_config(NAME, FIELDS, OAUTH)
|
{
"content_hash": "e27fa19df38b29a41fd93beb849fd1f1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 28.105263157894736,
"alnum_prop": 0.6704119850187266,
"repo_name": "gmist/3dhero2",
"id": "94452184470450a391f7c63b5dfae424645816a9",
"size": "550",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main/apps/auth/providers/instagram/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6787"
},
{
"name": "CoffeeScript",
"bytes": "10634"
},
{
"name": "Python",
"bytes": "110054"
}
],
"symlink_target": ""
}
|
"""Utility routines for workflow graphs
"""
from copy import deepcopy
from glob import glob
from collections import defaultdict
import os
import pickle
import pwd
import re
from uuid import uuid1
import numpy as np
from nipype.utils.misc import package_check
package_check('networkx', '1.3')
import json
from socket import gethostname, getfqdn
import networkx as nx
from ..utils.filemanip import (fname_presuffix, FileNotFoundError,
filename_to_list)
from ..utils.misc import create_function_from_source, str2bool
from ..interfaces.base import CommandLine, isdefined, Undefined, Bunch
from ..interfaces.base import pm as prov, safe_encode
from ..interfaces.utility import IdentityInterface
from .. import __version__ as nipype_version
from .. import get_info
from .. import logging, config
logger = logging.getLogger('workflow')
def dfs_preorder_function():
"""Return the networkx dfs_preoder function
If networkx 1.4 dev is installed, then the preorder function
is dfs_preorder_nodes, otherwise the preorder function is
dfs_preorder."""
if hasattr(dfs_preorder_function, 'dfs_preorder_function'):
return dfs_preorder_function.dfs_preorder_function
try:
dfs_preorder = nx.dfs_preorder
except AttributeError:
dfs_preorder = nx.dfs_preorder_nodes
logger.debug('networkx 1.4 dev or higher detected')
dfs_preorder_function.dfs_preorder_function = dfs_preorder
return dfs_preorder
try:
from os.path import relpath
except ImportError:
import os.path as op
def relpath(path, start=None):
"""Return a relative version of a path"""
if start is None:
start = os.curdir
if not path:
raise ValueError("no path specified")
start_list = op.abspath(start).split(op.sep)
path_list = op.abspath(path).split(op.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = op.splitunc(path)
unc_start, rest = op.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError(("Cannot mix UNC and non-UNC paths "
"(%s and %s)") % (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return os.curdir
return op.join(*rel_list)
def modify_paths(object, relative=True, basedir=None):
"""Convert paths in data structure to either full paths or relative paths
Supports combinations of lists, dicts, tuples, strs
Parameters
----------
relative : boolean indicating whether paths should be set relative to the
current directory
basedir : default os.getcwd()
what base directory to use as default
"""
if not basedir:
basedir = os.getcwd()
if isinstance(object, dict):
out = {}
for key, val in sorted(object.items()):
if isdefined(val):
out[key] = modify_paths(val, relative=relative,
basedir=basedir)
elif isinstance(object, (list, tuple)):
out = []
for val in object:
if isdefined(val):
out.append(modify_paths(val, relative=relative,
basedir=basedir))
if isinstance(object, tuple):
out = tuple(out)
else:
if isdefined(object):
if isinstance(object, str) and os.path.isfile(object):
if relative:
if config.getboolean('execution', 'use_relative_paths'):
out = relpath(object, start=basedir)
else:
out = object
else:
out = os.path.abspath(os.path.join(basedir, object))
if not os.path.exists(out):
raise FileNotFoundError('File %s not found' % out)
else:
out = object
return out
def get_print_name(node, simple_form=True):
"""Get the name of the node
For example, a node containing an instance of interfaces.fsl.BET
would be called nodename.BET.fsl
"""
name = node.fullname
if hasattr(node, '_interface'):
pkglist = node._interface.__class__.__module__.split('.')
interface = node._interface.__class__.__name__
destclass = ''
if len(pkglist) > 2:
destclass = '.%s' % pkglist[2]
if simple_form:
name = node.fullname + destclass
else:
name = '.'.join([node.fullname, interface]) + destclass
if simple_form:
parts = name.split('.')
if len(parts) > 2:
return ' ('.join(parts[1:])+')'
elif len(parts) == 2:
return parts[1]
return name
def _create_dot_graph(graph, show_connectinfo=False, simple_form=True):
"""Create a graph that can be pickled.
Ensures that edge info is pickleable.
"""
logger.debug('creating dot graph')
pklgraph = nx.DiGraph()
for edge in graph.edges():
data = graph.get_edge_data(*edge)
srcname = get_print_name(edge[0], simple_form=simple_form)
destname = get_print_name(edge[1], simple_form=simple_form)
if show_connectinfo:
pklgraph.add_edge(srcname, destname, l=str(data['connect']))
else:
pklgraph.add_edge(srcname, destname)
return pklgraph
def _write_detailed_dot(graph, dotfilename):
"""Create a dot file with connection info
digraph structs {
node [shape=record];
struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
struct2 [label="<f0> one|<f1> two"];
struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
struct1:f1 -> struct2:f0;
struct1:f0 -> struct2:f1;
struct1:f2 -> struct3:here;
}
"""
text = ['digraph structs {', 'node [shape=record];']
# write nodes
edges = []
replacefunk = lambda x: x.replace('_', '').replace('.', ''). \
replace('@', '').replace('-', '')
for n in nx.topological_sort(graph):
nodename = str(n)
inports = []
for u, v, d in graph.in_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
inport = cd[1]
ipstrip = 'in' + replacefunk(inport)
opstrip = 'out' + replacefunk(outport)
edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''),
opstrip,
str(v).replace('.', ''),
ipstrip))
if inport not in inports:
inports.append(inport)
inputstr = '{IN'
for ip in sorted(inports):
inputstr += '|<in%s> %s' % (replacefunk(ip), ip)
inputstr += '}'
outports = []
for u, v, d in graph.out_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
if outport not in outports:
outports.append(outport)
outputstr = '{OUT'
for op in sorted(outports):
outputstr += '|<out%s> %s' % (replacefunk(op), op)
outputstr += '}'
srcpackage = ''
if hasattr(n, '_interface'):
pkglist = n._interface.__class__.__module__.split('.')
if len(pkglist) > 2:
srcpackage = pkglist[2]
srchierarchy = '.'.join(nodename.split('.')[1:-1])
nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1],
srcpackage,
srchierarchy)
text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''),
inputstr,
nodenamestr,
outputstr)]
# write edges
for edge in sorted(edges):
text.append(edge)
text.append('}')
filep = open(dotfilename, 'wt')
filep.write('\n'.join(text))
filep.close()
return text
# Graph manipulations for iterable expansion
def _get_valid_pathstr(pathstr):
"""Remove disallowed characters from path
Removes: [][ (){}?:<>#!|"';]
Replaces: ',' -> '.'
"""
pathstr = pathstr.replace(os.sep, '..')
pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr)
pathstr = pathstr.replace(',', '.')
return pathstr
def expand_iterables(iterables, synchronize=False):
if synchronize:
return synchronize_iterables(iterables)
else:
return list(walk(iterables.items()))
def count_iterables(iterables, synchronize=False):
"""Return the number of iterable expansion nodes.
If synchronize is True, then the count is the maximum number
of iterables value lists.
Otherwise, the count is the product of the iterables value
list sizes.
"""
if synchronize:
op = max
else:
op = lambda x,y: x*y
return reduce(op, [len(func()) for _, func in iterables.iteritems()])
def walk(children, level=0, path=None, usename=True):
"""Generate all the full paths in a tree, as a dict.
Examples
--------
>>> from nipype.pipeline.utils import walk
>>> iterables = [('a', lambda: [1, 2]), ('b', lambda: [3, 4])]
>>> list(walk(iterables))
[{'a': 1, 'b': 3}, {'a': 1, 'b': 4}, {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]
"""
# Entry point
if level == 0:
path = {}
# Exit condition
if not children:
yield path.copy()
return
# Tree recursion
head, tail = children[0], children[1:]
name, func = head
for child in func():
# We can use the arg name or the tree level as a key
if usename:
path[name] = child
else:
path[level] = child
# Recurse into the next level
for child_paths in walk(tail, level + 1, path, usename):
yield child_paths
def synchronize_iterables(iterables):
"""Synchronize the given iterables in item-wise order.
Return: the {field: value} dictionary list
Examples
--------
>>> from nipype.pipeline.utils import synchronize_iterables
>>> iterables = dict(a=lambda: [1, 2]), b=lambda: [3, 4])
>>> synchronize_iterable_lists(iterables)
[{'a': 1, 'b': 3}, {'a': 2, 'b': 4}]
>>> iterables = dict(a=lambda: [1, 2]), b=lambda: [3], c=lambda: [4, 5, 6])
>>> synchronize_iterable_lists(iterables)
[{'a': 1, 'b': 3, 'c': 4}, {'a': 2, 'c': 5}, {'c': 6}]
"""
# Convert the (field, function) tuples into (field, value) lists
pair_lists = [[(field, value) for value in func()]
for field, func in iterables.iteritems()]
# A factory to make a dictionary from the mapped (field, value)
# key-value pairs. The filter removes any unmapped None items.
factory = lambda *pairs: dict(filter(None, pairs))
# Make a dictionary for each of the correlated (field, value) items
return map(factory, *pair_lists)
def evaluate_connect_function(function_source, args, first_arg):
func = create_function_from_source(function_source)
try:
output_value = func(first_arg,
*list(args))
except NameError as e:
if e.args[0].startswith("global name") and \
e.args[0].endswith("is not defined"):
e.args = (e.args[0],
("Due to engine constraints all imports have to be done "
"inside each function definition"))
raise e
return output_value
def get_levels(G):
levels = {}
for n in nx.topological_sort(G):
levels[n] = 0
for pred in G.predecessors_iter(n):
levels[n] = max(levels[n], levels[pred] + 1)
return levels
def _merge_graphs(supergraph, nodes, subgraph, nodeid, iterables,
prefix, synchronize=False):
"""Merges two graphs that share a subset of nodes.
If the subgraph needs to be replicated for multiple iterables, the
merge happens with every copy of the subgraph. Assumes that edges
between nodes of supergraph and subgraph contain data.
Parameters
----------
supergraph : networkx graph
Parent graph from which subgraph was selected
nodes : networkx nodes
Nodes of the parent graph from which the subgraph was initially
constructed.
subgraph : networkx graph
A subgraph that contains as a subset nodes from the supergraph.
These nodes connect the subgraph to the supergraph
nodeid : string
Identifier of a node for which parameterization has been sought
iterables : dict of functions
see `pipeline.NodeWrapper` for iterable requirements
Returns
-------
Returns a merged graph containing copies of the subgraph with
appropriate edge connections to the supergraph.
"""
# Retrieve edge information connecting nodes of the subgraph to other
# nodes of the supergraph.
supernodes = supergraph.nodes()
ids = ['.'.join((n._hierarchy, n._id)) for n in supernodes]
if len(np.unique(ids)) != len(ids):
# This should trap the problem of miswiring when multiple iterables are
# used at the same level. The use of the template below for naming
# updates to nodes is the general solution.
raise Exception(("Execution graph does not have a unique set of node "
"names. Please rerun the workflow"))
edgeinfo = {}
for n in subgraph.nodes():
nid = '.'.join((n._hierarchy, n._id))
nidx = ids.index(nid)
for edge in supergraph.in_edges_iter(supernodes[nidx]):
# make sure the edge is not part of the subgraph
if edge[0] not in subgraph.nodes():
if nid not in edgeinfo.keys():
edgeinfo[nid] = []
edgeinfo[nid].append((edge[0],
supergraph.get_edge_data(*edge)))
supergraph.remove_nodes_from(nodes)
# Add copies of the subgraph depending on the number of iterables
iterable_params = expand_iterables(iterables, synchronize)
# If there are no iterable subgraphs, then return
if not iterable_params:
return supergraph
# Make an iterable subgraph node id template
count = len(iterable_params)
template = '.%s%%0%dd' % (prefix, np.ceil(np.log10(count)))
# Copy the iterable subgraphs
for i, params in enumerate(iterable_params):
Gc = deepcopy(subgraph)
ids = ['.'.join((n._hierarchy, n._id)) for n in Gc.nodes()]
nodeidx = ids.index(nodeid)
rootnode = Gc.nodes()[nodeidx]
paramstr = ''
for key, val in sorted(params.items()):
paramstr = '_'.join((paramstr, _get_valid_pathstr(key),
_get_valid_pathstr(str(val))))
rootnode.set_input(key, val)
levels = get_levels(Gc)
for n in Gc.nodes():
"""
update parameterization of the node to reflect the location of
the output directory. For example, if the iterables along a
path of the directed graph consisted of the variables 'a' and
'b', then every node in the path including and after the node
with iterable 'b' will be placed in a directory
_a_aval/_b_bval/.
"""
path_length = levels[n]
# enter as negative numbers so that earlier iterables with longer
# path lengths get precedence in a sort
paramlist = [(-path_length, paramstr)]
if n.parameterization:
n.parameterization = paramlist + n.parameterization
else:
n.parameterization = paramlist
supergraph.add_nodes_from(Gc.nodes())
supergraph.add_edges_from(Gc.edges(data=True))
for node in Gc.nodes():
nid = '.'.join((node._hierarchy, node._id))
if nid in edgeinfo.keys():
for info in edgeinfo[nid]:
supergraph.add_edges_from([(info[0], node, info[1])])
node._id += template % i
logger.debug('Expanded %d iterables in node %s.' % (count, nodeid))
return supergraph
def _connect_nodes(graph, srcnode, destnode, connection_info):
"""Add a connection between two nodes
"""
data = graph.get_edge_data(srcnode, destnode, default=None)
if not data:
data = {'connect': connection_info}
graph.add_edges_from([(srcnode, destnode, data)])
else:
data['connect'].extend(connection_info)
def _prune_identity_nodes(graph, keep_iterables=False):
"""Remove identity nodes from the given graph, with the following
exceptions:
* join nodes are retained
* an iterable connect source or target is retained
* iterable nodes are retained if and only if the keep_iterables
flag is set to True
"""
# if keep_iterables is False, then include the iterable
# and join nodes in the nodes to delete
for node in _identity_nodes(graph, not keep_iterables):
if not hasattr(node, 'joinsource') and not node.iterconnect:
_remove_identity_node(graph, node)
return graph
def _identity_nodes(graph, include_iterables):
"""Return the IdentityInterface nodes in the graph
The nodes are in topological sort order. The iterable nodes
are included if and only if the include_iterables flag is set
to True.
"""
return [node for node in nx.topological_sort(graph)
if (isinstance(node._interface, IdentityInterface) and
(include_iterables or getattr(node, 'iterables') is None))]
def _remove_identity_node(graph, node):
"""Remove identity nodes from an execution graph
"""
portinputs, portoutputs = _node_ports(graph, node)
for field, connections in portoutputs.items():
if portinputs:
_propagate_internal_output(graph, node, field, connections,
portinputs)
else:
_propagate_root_output(graph, node, field, connections)
graph.remove_nodes_from([node])
logger.debug("Removed the identity node %s from the graph." % node)
def _node_ports(graph, node):
"""Return the given node's input and output ports
The return value is the (inputs, outputs) dictionaries.
The inputs is a {destination field: (source node, source field)}
dictionary.
The outputs is a {source field: destination items} dictionary,
where each destination item is a
(destination node, destination field, source field) tuple.
"""
portinputs = {}
portoutputs = {}
for u, _, d in graph.in_edges_iter(node, data=True):
for src, dest in d['connect']:
portinputs[dest] = (u, src)
for _, v, d in graph.out_edges_iter(node, data=True):
for src, dest in d['connect']:
if isinstance(src, tuple):
srcport = src[0]
else:
srcport = src
if srcport not in portoutputs:
portoutputs[srcport] = []
portoutputs[srcport].append((v, dest, src))
return (portinputs, portoutputs)
def _propagate_root_output(graph, node, field, connections):
"""Propagates the given graph root node output port
field connections to the out-edge destination nodes."""
for destnode, inport, src in connections:
value = getattr(node.inputs, field)
if isinstance(src, tuple):
value = evaluate_connect_function(src[1], src[2],
value)
destnode.set_input(inport, value)
def _propagate_internal_output(graph, node, field, connections, portinputs):
"""Propagates the given graph internal node output port
field connections to the out-edge source node and in-edge
destination nodes."""
for destnode, inport, src in connections:
if field in portinputs:
srcnode, srcport = portinputs[field]
if isinstance(srcport, tuple) and isinstance(src, tuple):
raise ValueError(("Does not support two inline functions "
"in series (\'%s\' and \'%s\'). "
"Please use a Function node") %
(srcport[1].split("\\n")[0][6:-1],
src[1].split("\\n")[0][6:-1]))
connect = graph.get_edge_data(srcnode, destnode,
default={'connect': []})
if isinstance(src, tuple):
connect['connect'].append(((srcport, src[1], src[2]), inport))
else:
connect = {'connect': [(srcport, inport)]}
old_connect = graph.get_edge_data(srcnode, destnode,
default={'connect': []})
old_connect['connect'] += connect['connect']
graph.add_edges_from([(srcnode, destnode, old_connect)])
else:
value = getattr(node.inputs, field)
if isinstance(src, tuple):
value = evaluate_connect_function(src[1], src[2], value)
destnode.set_input(inport, value)
def generate_expanded_graph(graph_in):
"""Generates an expanded graph based on node parameterization
Parameterization is controlled using the `iterables` field of the
pipeline elements. Thus if there are two nodes with iterables a=[1,2]
and b=[3,4] this procedure will generate a graph with sub-graphs
parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4).
"""
# the networkx preorder function
dfs_preorder = dfs_preorder_function()
logger.debug("PE: expanding iterables")
graph_in = _prune_identity_nodes(graph_in, keep_iterables=True)
# the iterable nodes
inodes = _iterable_nodes(graph_in)
logger.debug("Detected iterable nodes %s" % inodes)
# pre-process the iterable nodes as follows:
# * standardize the iterables as {(field, function)} dictionaries
# * validate that each itersource is iterable
for inode in inodes:
_standardize_iterables(inode)
if inode.itersource:
try:
src_name, _ = inode.itersource
next((n for n in inodes if n.name == src_name
and n.iterables))
except StopIteration:
raise ValueError("The node %s itersource %s iterables is not"
" set" % (inode.name, inode.itersource))
# validate that each joinsource has iterables
inode_names = {inode.name for inode in inodes}
for node in graph_in.nodes_iter():
if hasattr(node, 'joinsource'):
if node.joinsource not in inode_names:
raise Exception("The node '%s' joinsource '%s' was not"
" found in the iterable nodes %s." %
(node.name, node.joinsource, inode_names))
# while there is an iterable node, expand the iterable node's
# subgraphs
unexpanded_inodes = inodes
iter_src_filter = lambda node: any((node._id.startswith(inode._id)
for inode in inodes))
while unexpanded_inodes:
inode = unexpanded_inodes[0]
logger.debug("Expanding the iterable node %s..." % inode)
# the join successor nodes of the current iterable node
jnodes = [node for node in graph_in.nodes_iter()
if hasattr(node, 'joinsource')
and inode.name == node.joinsource
and nx.has_path(graph_in, inode, node)]
# excise the join in-edges. save the excised edges in a
# {jnode: {source name: (destination name, edge data)}}
# dictionary
jedge_dict = {}
for jnode in jnodes:
in_edges = jedge_dict[jnode] = {}
for src, dest, data in graph_in.in_edges_iter(jnode, True):
in_edges[src._id] = data
graph_in.remove_edge(src, dest)
logger.debug("Excised the %s -> %s join node in-edge."
% (src, dest))
if inode.itersource:
# the itersource is a (node name, fields) tuple
src_name, src_fields = inode.itersource
# convert a single field to a list
if isinstance(src_fields, str):
src_fields = [src_fields]
# find the unique iterable source node in the graph
iter_src = _find_ancestor(graph_in, inode, src_name, iter_src_filter)
if not iter_src:
raise ValueError("The node %s itersource %s was not found"
" among the ancestor nodes" %
(inode, src_name))
logger.debug("The node %s has iterable source node %s"
% (inode, iter_src))
# look up the iterables for this particular itersource descendant
# using the iterable source ancestor values as a key
iterables = {}
# the source node iterables values
src_values = [getattr(iter_src.inputs, field) for field in src_fields]
# if there is one source field, then the key is the the source value,
# otherwise the key is the tuple of source values
if len(src_values) == 1:
key = src_values[0]
else:
key = tuple(src_values)
# The itersource iterables is a {field: lookup} dictionary, where the
# lookup is a {source key: iteration list} dictionary. Look up the
# current iterable value using the predecessor itersource input values.
iter_dict = {field: lookup[key] for field, lookup in inode.iterables
if key in lookup}
# convert the iterables to the standard {field: function} format
iter_items = map(lambda(field, value): (field, lambda: value),
iter_dict.iteritems())
iterables = dict(iter_items)
else:
iterables = inode.iterables.copy()
inode.iterables = None
# collect the subnodes to expand
logger.debug('node: %s iterables: %s' % (inode, iterables))
subnodes = list(dfs_preorder(graph_in, inode))
iterable_prefix = _next_iterables_expansion_prefix(subnodes)
# append a suffix to the iterable node id
inode._id += ('.' + iterable_prefix + 'I')
# merge the iterated subgraphs
subgraph = graph_in.subgraph(subnodes)
inid = '.'.join((inode._hierarchy, inode._id))
graph_in = _merge_graphs(graph_in, subnodes, subgraph, inid,
iterables, iterable_prefix, inode.synchronize)
# reconnect the join nodes
for jnode in jnodes:
# the {node id: edge data} dictionary for edges connecting
# to the join node in the unexpanded graph
old_edge_dict = jedge_dict[jnode]
# the edge source node replicates
expansions = defaultdict(list)
for node in graph_in.nodes_iter():
for src_id, edge_data in old_edge_dict.iteritems():
if node._id.startswith(src_id):
expansions[src_id].append(node)
for in_id, in_nodes in expansions.iteritems():
logger.debug("The join node %s input %s was expanded"
" to %d nodes." %(jnode, in_id, len(in_nodes)))
# preserve the node iteration order by sorting on the node id
for in_nodes in expansions.itervalues():
in_nodes.sort(key=lambda node: node._id)
# the number of join source replicates
iter_cnt = count_iterables(iterables, inode.synchronize)
# make new join node fields to connect to each replicated
# join in-edge source node
slot_dicts = [jnode._add_join_item_fields() for _ in range(iter_cnt)]
# for each join in-edge, connect every expanded source node
# which matches on the in-edge source name to the destination
# join node. Qualify each edge connect join field name by
# appending the next join slot index, e.g. the connect
# from two expanded nodes from field 'out_file' to join
# field 'in' are qualified as ('out_file', 'in1') and
# ('out_file', 'in2'), resp. This preserves connection port
# integrity.
for old_id, in_nodes in expansions.iteritems():
# validate the expansion count
if len(in_nodes) > iter_cnt:
raise Exception("The number of iterable node %s expansions"
" %d exceeds the number of iterables %d" %
(old_id, len(in_nodes), iter_cnt))
# reconnect each replication of the current join in-edge
# source
for in_idx, in_node in enumerate(in_nodes):
olddata = old_edge_dict[old_id]
newdata = deepcopy(olddata)
# the (source, destination) field tuples
connects = newdata['connect']
# the join fields connected to the source
join_fields = [field for _, field in connects
if field in jnode.joinfield]
# the {field: slot fields} maps assigned to the input
# node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'}
# for the third join source expansion replicate of a
# join node with join fields image and mask
slots = slot_dicts[in_idx]
for con_idx, connect in enumerate(connects):
src_field, dest_field = connect
# qualify a join destination field name
if dest_field in slots:
slot_field = slots[dest_field]
connects[con_idx] = (src_field, slot_field)
logger.debug("Qualified the %s -> %s join field"
" %s as %s." %
(in_node, jnode, dest_field, slot_field))
graph_in.add_edge(in_node, jnode, newdata)
logger.debug("Connected the join node %s subgraph to the"
" expanded join point %s" % (jnode, in_node))
#nx.write_dot(graph_in, '%s_post.dot' % node)
# the remaining iterable nodes
unexpanded_inodes = _iterable_nodes(graph_in)
for node in graph_in.nodes():
if node.parameterization:
node.parameterization = [param for _, param in
sorted(node.parameterization)]
logger.debug("PE: expanding iterables ... done")
return _prune_identity_nodes(graph_in)
def _find_ancestor(graph, node, name, filter=None):
"""Finds a node in the given graph which matches the
given search name, has a path to the search node and
matches the optional filter
"""
try:
return next((other for other in graph.nodes_iter()
if (name == other.name and
nx.has_path(graph, other, node) and
(not filter or filter(other)))))
except StopIteration:
pass
def _iterable_nodes(graph_in):
"""Returns the iterable nodes in the given graph and their join
dependencies.
The nodes are ordered as follows:
- nodes without an itersource precede nodes with an itersource
- nodes without an itersource are sorted in reverse topological order
- nodes with an itersource are sorted in topological order
This order implies the following:
- every iterable node without an itersource is expanded before any
node with an itersource
- every iterable node without an itersource is expanded before any
of it's predecessor iterable nodes without an itersource
- every node with an itersource is expanded before any of it's
successor nodes with an itersource
Return the iterable nodes list
"""
nodes = nx.topological_sort(graph_in)
inodes = [node for node in nodes if node.iterables is not None]
inodes_no_src = [node for node in inodes if not node.itersource]
inodes_src = [node for node in inodes if node.itersource]
inodes_no_src.reverse()
return inodes_no_src + inodes_src
def _standardize_iterables(node):
"""Converts the given iterables to a {field: function} dictionary,
if necessary, where the function returns a list."""
# trivial case
if not node.iterables:
return
iterables = node.iterables
# The candidate iterable fields
fields = set(node.inputs.copyable_trait_names())
# Flag indicating whether the iterables are in the alternate
# synchronize form and are not converted to a standard format.
synchronize = False
# A synchronize iterables node without an itersource can be in
# [fields, value tuples] format rather than
# [(field, value list), (field, value list), ...]
if node.synchronize:
if len(iterables) == 2:
first, last = iterables
if all((isinstance(item, str) and item in fields
for item in first)):
iterables = _transpose_iterables(first, last)
# Convert a tuple to a list
if isinstance(iterables, tuple):
iterables = [iterables]
# Validate the standard [(field, values)] format
_validate_iterables(node, iterables, fields)
# Convert a list to a dictionary
if isinstance(iterables, list):
# Convert a values list to a function. This is a legacy
# Nipype requirement with unknown rationale.
if not node.itersource:
iter_items = map(lambda(field, value): (field, lambda: value),
iterables)
iterables = dict(iter_items)
node.iterables = iterables
def _validate_iterables(node, iterables, fields):
"""
Raise TypeError if an iterables member is not iterable.
Raise ValueError if an iterables member is not a (field, values) pair.
Raise ValueError if an iterable field is not in the inputs.
"""
# The iterables can be a {field: value list} dictionary.
if isinstance(iterables, dict):
iterables = iterables.items()
elif not isinstance(iterables, tuple) and not isinstance(iterables, list):
raise ValueError("The %s iterables type is not a list or a dictionary:"
" %s" % (node.name, iterables.__class__))
for item in iterables:
try:
if len(item) != 2:
raise ValueError("The %s iterables is not a [(field, values)]"
" list" % node.name)
except TypeError, e:
raise TypeError("A %s iterables member is not iterable: %s"
% (node.name, e))
field, _ = item
if field not in fields:
raise ValueError("The %s iterables field is unrecognized: %s"
% (node.name, field))
def _transpose_iterables(fields, values):
"""
Converts the given fields and tuple values into a standardized
iterables value.
If the input values is a synchronize iterables dictionary, then
the result is a (field, {key: values}) list.
Otherwise, the result is a list of (field: value list) pairs.
"""
if isinstance(values, dict):
transposed = {field: defaultdict(list) for field in fields}
for key, tuples in values.iteritems():
for kvals in tuples:
for idx, val in enumerate(kvals):
if val != None:
transposed[fields[idx]][key].append(val)
return transposed.items()
else:
return zip(fields, [filter(lambda(v): v != None, list(transpose))
for transpose in zip(*values)])
ITERABLES_PREFIX_PAT = re.compile('\.(.)I')
"""The pattern for matching an iterables node id prefix character."""
def _next_iterables_expansion_prefix(nodes):
"""Return the next available prefix character for the given nodes.
Parameters
----------
nodes: a node iterator
Exceptions
----------
ValueError: if there are too many iterables in the workflow
"""
# The iterables node id prefix characters already used
prior_prefixes = ((ITERABLES_PREFIX_PAT.findall(s._id) for s in nodes))
# The largest prefix character ordinals
max_ords = [max(ord(c) for c in p) for p in prior_prefixes if p]
if not max_ords:
return 'a'
# The largest prefix character ordinal
max_ord = max(max_ords)
# The last prefix is 'z'
if max_ord == ord('z'):
raise ValueError('Too many iterables in the workflow')
# Return the next available prefix character
return chr(max_ord + 1)
def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False,
show_connectinfo=False, dotfilename='graph.dot', format='png',
simple_form=True):
""" Displays the graph layout of the pipeline
This function requires that pygraphviz and matplotlib are available on
the system.
Parameters
----------
show : boolean
Indicate whether to generate pygraphviz output fromn
networkx. default [False]
use_execgraph : boolean
Indicates whether to use the specification graph or the
execution graph. default [False]
show_connectioninfo : boolean
Indicates whether to show the edge data on the graph. This
makes the graph rather cluttered. default [False]
"""
graph = deepcopy(graph_in)
if use_execgraph:
graph = generate_expanded_graph(graph)
logger.debug('using execgraph')
else:
logger.debug('using input graph')
if base_dir is None:
base_dir = os.getcwd()
if not os.path.exists(base_dir):
os.makedirs(base_dir)
outfname = fname_presuffix(dotfilename,
suffix='_detailed.dot',
use_ext=False,
newpath=base_dir)
logger.info('Creating detailed dot file: %s' % outfname)
_write_detailed_dot(graph, outfname)
cmd = 'dot -T%s -O %s' % (format, outfname)
res = CommandLine(cmd, terminal_output='allatonce').run()
if res.runtime.returncode:
logger.warn('dot2png: %s', res.runtime.stderr)
pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form)
outfname = fname_presuffix(dotfilename,
suffix='.dot',
use_ext=False,
newpath=base_dir)
nx.write_dot(pklgraph, outfname)
logger.info('Creating dot file: %s' % outfname)
cmd = 'dot -T%s -O %s' % (format, outfname)
res = CommandLine(cmd, terminal_output='allatonce').run()
if res.runtime.returncode:
logger.warn('dot2png: %s', res.runtime.stderr)
if show:
pos = nx.graphviz_layout(pklgraph, prog='dot')
nx.draw(pklgraph, pos)
if show_connectinfo:
nx.draw_networkx_edge_labels(pklgraph, pos)
def format_dot(dotfilename, format=None):
cmd = 'dot -T%s -O %s' % (format, dotfilename)
CommandLine(cmd).run()
logger.info('Converting dotfile: %s to %s format' % (dotfilename, format))
def make_output_dir(outdir):
"""Make the output_dir if it doesn't exist.
Parameters
----------
outdir : output directory to create
"""
if not os.path.exists(os.path.abspath(outdir)):
logger.debug("Creating %s" % outdir)
os.makedirs(outdir)
return outdir
def get_all_files(infile):
"""Return a list of files based on the given input file as follows:
* The input file is in the list
* If the input file has extension .img, then the corresponding .hdr
and .mat files in the same directory are in the list
* If the input file has extension .img.gz, then the corresponding
.hdr.gz file in the same directory are in the list
The result files are expanded with the absolute path.
"""
infile = os.path.abspath(infile)
files = [infile]
if infile.endswith(".img"):
files.append(infile[:-4] + ".hdr")
files.append(infile[:-4] + ".mat")
if infile.endswith(".img.gz"):
files.append(infile[:-7] + ".hdr.gz")
return files
def walk_outputs(object):
"""Extract every file and directory from a python structure
"""
out = []
if isinstance(object, dict):
for key, val in sorted(object.items()):
if isdefined(val):
out.extend(walk_outputs(val))
elif isinstance(object, (list, tuple)):
for val in object:
if isdefined(val):
out.extend(walk_outputs(val))
else:
if isdefined(object) and isinstance(object, basestring):
if os.path.islink(object) or os.path.isfile(object):
out = [(filename, 'f') for filename in get_all_files(object)]
elif os.path.isdir(object):
out = [(object, 'd')]
return out
def walk_files(cwd):
for path, _, files in os.walk(cwd):
for f in files:
yield os.path.join(path, f)
def clean_working_directory(outputs, cwd, inputs, needed_outputs, config,
files2keep=None, dirs2keep=None):
"""Removes all files not needed for further analysis from the directory
"""
if not outputs:
return
outputs_to_keep = outputs.get().keys()
if needed_outputs and \
str2bool(config['execution']['remove_unnecessary_outputs']):
outputs_to_keep = needed_outputs
# build a list of needed files
output_files = []
outputdict = outputs.get()
for output in outputs_to_keep:
output_files.extend(walk_outputs(outputdict[output]))
needed_files = [path for path, type in output_files if type == 'f']
if str2bool(config['execution']['keep_inputs']):
input_files = []
inputdict = inputs.get()
input_files.extend(walk_outputs(inputdict))
needed_files += [path for path, type in input_files if type == 'f']
for extra in ['_0x*.json', 'provenance.*', 'pyscript*.m',
'command.txt', 'result*.pklz', '_inputs.pklz', '_node.pklz']:
needed_files.extend(glob(os.path.join(cwd, extra)))
if files2keep:
keep_files = [os.path.abspath(f) for f in filename_to_list(files2keep)]
needed_files.extend(keep_files)
needed_dirs = [path for path, type in output_files if type == 'd']
if dirs2keep:
keep_dirs = [os.path.abspath(d) for d in filename_to_list(dirs2keep)]
needed_dirs.extend(keep_dirs)
for extra in ['_nipype', '_report']:
needed_dirs.extend(glob(os.path.join(cwd, extra)))
logger.debug('Needed files: %s' % (';'.join(needed_files)))
logger.debug('Needed dirs: %s' % (';'.join(needed_dirs)))
files2remove = []
if str2bool(config['execution']['remove_unnecessary_outputs']):
for f in walk_files(cwd):
if f not in needed_files:
if len(needed_dirs) == 0:
files2remove.append(f)
elif not any([f.startswith(dname) for dname in needed_dirs]):
files2remove.append(f)
else:
if not str2bool(config['execution']['keep_inputs']):
input_files = []
inputdict = inputs.get()
input_files.extend(walk_outputs(inputdict))
input_files = [path for path, type in input_files if type == 'f']
for f in walk_files(cwd):
if f in input_files and f not in needed_files:
files2remove.append(f)
logger.debug('Removing files: %s' % (';'.join(files2remove)))
for f in files2remove:
os.remove(f)
for key in outputs.copyable_trait_names():
if key not in outputs_to_keep:
setattr(outputs, key, Undefined)
return outputs
def merge_dict(d1, d2, merge=lambda x, y: y):
"""
Merges two dictionaries, non-destructively, combining
values on duplicate keys as defined by the optional merge
function. The default behavior replaces the values in d1
with corresponding values in d2. (There is no other generally
applicable merge strategy, but often you'll have homogeneous
types in your dicts, so specifying a merge technique can be
valuable.)
Examples:
>>> d1 = {'a': 1, 'c': 3, 'b': 2}
>>> merge_dict(d1, d1)
{'a': 1, 'c': 3, 'b': 2}
>>> merge_dict(d1, d1, lambda x,y: x+y)
{'a': 2, 'c': 6, 'b': 4}
"""
if not isinstance(d1, dict):
return merge(d1, d2)
result = dict(d1)
if d2 is None:
return result
for k, v in d2.iteritems():
if k in result:
result[k] = merge_dict(result[k], v, merge=merge)
else:
result[k] = v
return result
def write_prov(graph, filename=None, format='turtle'):
"""Write W3C PROV Model JSON file
"""
if not filename:
filename = os.path.join(os.getcwd(), 'workflow_provenance')
foaf = prov.Namespace("foaf", "http://xmlns.com/foaf/0.1/")
dcterms = prov.Namespace("dcterms", "http://purl.org/dc/terms/")
nipype = prov.Namespace("nipype", "http://nipy.org/nipype/terms/")
# create a provenance container
g = prov.ProvBundle()
# Set the default _namespace name
#g.set_default_namespace(nipype.get_uri())
g.add_namespace(foaf)
g.add_namespace(dcterms)
g.add_namespace(nipype)
get_id = lambda: nipype[uuid1().hex]
user_agent = g.agent(get_id(),
{prov.PROV["type"]: prov.PROV["Person"],
prov.PROV["label"]: pwd.getpwuid(os.geteuid()).pw_name,
foaf["name"]: safe_encode(pwd.getpwuid(os.geteuid()).pw_name)})
agent_attr = {prov.PROV["type"]: prov.PROV["SoftwareAgent"],
prov.PROV["label"]: "Nipype",
foaf["name"]: safe_encode("Nipype")}
for key, value in get_info().items():
agent_attr.update({nipype[key]: safe_encode(value)})
software_agent = g.agent(get_id(), agent_attr)
processes = []
nodes = graph.nodes()
for idx, node in enumerate(nodes):
result = node.result
classname = node._interface.__class__.__name__
_, hashval, _, _ = node.hash_exists()
if isinstance(result.runtime, list):
startTime = None
endTime = None
for runtime in result.runtime:
newStartTime = getattr(runtime, 'startTime')
if startTime:
if newStartTime < startTime:
startTime = newStartTime
else:
startTime = newStartTime
newEndTime = getattr(runtime, 'endTime')
if endTime:
if newEndTime > endTime:
endTime = newEndTime
else:
endTime = newEndTime
attrs = {foaf["host"]: gethostname(),
prov.PROV["type"]: nipype[classname],
prov.PROV["label"]: '_'.join((classname,
node.name)),
nipype['hashval']: hashval}
process = g.activity(uuid1().hex, startTime,
endTime, attrs)
process.add_extra_attributes({prov.PROV["type"]: nipype["MapNode"]})
# add info about sub processes
for runtime in result.runtime:
attrs = {foaf["host"]: runtime.hostname,
prov.PROV["type"]: nipype[classname],
prov.PROV["label"]: '_'.join((classname,
node.name)),
#nipype['hashval']: hashval,
nipype['duration']: runtime.duration,
nipype['working_directory']: runtime.cwd,
nipype['return_code']: runtime.returncode,
nipype['platform']: runtime.platform,
}
try:
attrs.update({nipype['command']: runtime.cmdline})
attrs.update({nipype['command_path']: runtime.command_path})
attrs.update({nipype['dependencies']: runtime.dependencies})
except AttributeError:
pass
process_sub = g.activity(uuid1().hex, runtime.startTime,
runtime.endTime, attrs)
process_sub.add_extra_attributes({prov.PROV["type"]: nipype["Node"]})
g.wasAssociatedWith(process_sub, user_agent, None, None,
{prov.PROV["Role"]: "LoggedInUser"})
g.wasAssociatedWith(process_sub, software_agent, None, None,
{prov.PROV["Role"]: prov.PROV["SoftwareAgent"]})
g.wasInformedBy(process_sub, process)
# environment
id = uuid1().hex
environ = g.entity(id)
environ.add_extra_attributes({prov.PROV['type']: nipype['environment'],
prov.PROV['label']: "environment",
nipype['environ_json']: json.dumps(runtime.environ)})
g.used(process_sub, id)
else:
runtime = result.runtime
attrs = {foaf["host"]: runtime.hostname,
prov.PROV["type"]: nipype[classname],
prov.PROV["label"]: '_'.join((classname,
node.name)),
nipype['hashval']: hashval,
nipype['duration']: runtime.duration,
nipype['working_directory']: runtime.cwd,
nipype['return_code']: runtime.returncode,
nipype['platform']: runtime.platform,
}
try:
attrs.update({nipype['command']: runtime.cmdline})
attrs.update({nipype['command_path']: runtime.command_path})
attrs.update({nipype['dependencies']: runtime.dependencies})
except AttributeError:
pass
process = g.activity(uuid1().hex, runtime.startTime,
runtime.endTime, attrs)
process.add_extra_attributes({prov.PROV["type"]: nipype["Node"]})
# environment
id = uuid1().hex
environ = g.entity(id)
environ.add_extra_attributes({prov.PROV['type']: nipype['environment'],
prov.PROV['label']: "environment",
nipype['environ_json']: json.dumps(runtime.environ)})
g.used(process, id)
processes.append(process)
g.wasAssociatedWith(process, user_agent, None, None,
{prov.PROV["Role"]: "LoggedInUser"})
g.wasAssociatedWith(process, software_agent, None, None,
{prov.PROV["Role"]: prov.PROV["SoftwareAgent"]})
for inidx, inputval in enumerate(sorted(node.inputs.get().items())):
if isdefined(inputval[1]):
inport = inputval[0]
used_ports = []
for _, _, d in graph.in_edges_iter([node], data=True):
for _, dest in d['connect']:
used_ports.append(dest)
if inport not in used_ports:
param = g.entity(uuid1().hex,
{prov.PROV["type"]: nipype['input'],
prov.PROV["label"]: inport,
nipype['port']: inport,
prov.PROV["value"]: str(inputval[1])
})
g.used(process, param)
# add dependencies (edges)
# add artifacts (files)
counter = 0
for idx, node in enumerate(nodes):
if node.result.outputs is None:
continue
if isinstance(node.result.outputs, Bunch):
outputs = node.result.outputs.dictcopy()
else:
outputs = node.result.outputs.get()
used_ports = {}
for _, v, d in graph.out_edges_iter([node], data=True):
for src, dest in d['connect']:
if isinstance(src, tuple):
srcname = src[0]
else:
srcname = src
if srcname not in used_ports:
used_ports[srcname] = []
used_ports[srcname].append((v, dest))
for outidx, nameval in enumerate(sorted(outputs.items())):
if not isdefined(nameval[1]):
continue
artifact = g.entity(uuid1().hex,
{prov.PROV["type"]: nipype['artifact'],
prov.PROV["label"]: nameval[0],
nipype['port']: nameval[0],
prov.PROV["value"]: str(nameval[1])
})
g.wasGeneratedBy(artifact, processes[idx])
if nameval[0] in used_ports:
for destnode, portname in used_ports[nameval[0]]:
counter += 1
# Used: Artifact->Process
attrs = {prov.PROV["label"]: portname}
g.used(processes[nodes.index(destnode)], artifact,
other_attributes=attrs)
# Process->Process
for idx, edgeinfo in enumerate(graph.in_edges_iter()):
g.wasStartedBy(processes[nodes.index(edgeinfo[1])],
starter=processes[nodes.index(edgeinfo[0])])
# write provenance
try:
if format in ['turtle', 'all']:
g.rdf().serialize(filename + '.ttl', format='turtle')
except (ImportError, NameError):
format = 'all'
finally:
if format in ['provn', 'all']:
with open(filename + '.provn', 'wt') as fp:
fp.writelines(g.get_provn())
if format in ['json', 'all']:
with open(filename + '.json', 'wt') as fp:
prov.json.dump(g, fp, cls=prov.ProvBundle.JSONEncoder)
return g
|
{
"content_hash": "87c14fdf3f74fb4f79e6bbf2937d2776",
"timestamp": "",
"source": "github",
"line_count": 1352,
"max_line_length": 99,
"avg_line_length": 41.14349112426036,
"alnum_prop": 0.563189875238198,
"repo_name": "FredLoney/nipype",
"id": "5ae82fe77a043d62291c57fbd49d0c0d6d50dd55",
"size": "55740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/pipeline/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2759"
},
{
"name": "Makefile",
"bytes": "1767"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "2609042"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import unittest
import sys
import StringIO
import IECore
import Gaffer
import GafferUI
import GafferUITest
class EventSignalCombinerTest( GafferUITest.TestCase ) :
def trueSlot( self, gadget, event ) :
self.trueSlotCalled = True
return True
def falseSlot( self, gadget, event ) :
self.falseSlotCalled = True
return False
def exceptionSlot( self, gadget, event ) :
self.exceptionSlotCalled = True
raise Exception( "oops" )
return False
def setUp( self ) :
self.falseSlotCalled = False
self.trueSlotCalled = False
self.exceptionSlotCalled = False
def testShortCutting( self ) :
s = GafferUI.Gadget.ButtonSignal()
c1 = s.connect( self.trueSlot )
c2 = s.connect( self.falseSlot )
self.assertEqual( self.trueSlotCalled, False )
self.assertEqual( self.falseSlotCalled, False )
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
self.assertEqual( self.trueSlotCalled, True )
self.assertEqual( self.falseSlotCalled, False )
def testNoShortCutting( self ) :
s = GafferUI.Gadget.ButtonSignal()
c1 = s.connect( self.falseSlot )
c2 = s.connect( self.trueSlot )
self.assertEqual( self.trueSlotCalled, False )
self.assertEqual( self.falseSlotCalled, False )
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
self.assertEqual( self.trueSlotCalled, True )
self.assertEqual( self.falseSlotCalled, True )
def testExceptionHandling( self ) :
# We don't want exceptions in one slot to prevent the
# invocation of other slots. But we do want the errors from
# those slots being printed to stderr.
s = GafferUI.Gadget.ButtonSignal()
c1 = s.connect( self.exceptionSlot )
c2 = s.connect( self.trueSlot )
self.assertEqual( self.exceptionSlotCalled, False )
self.assertEqual( self.trueSlotCalled, False )
tmpStdErr = StringIO.StringIO()
sys.stderr = tmpStdErr
try :
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
finally :
sys.stderr = sys.__stderr__
self.assert_( "oops" in tmpStdErr.getvalue() )
self.assertEqual( self.exceptionSlotCalled, True )
self.assertEqual( self.trueSlotCalled, True )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "fcca96ad1319f3e776f5a3deade85e31",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 62,
"avg_line_length": 24.370786516853933,
"alnum_prop": 0.7224527431996312,
"repo_name": "cedriclaunay/gaffer",
"id": "4665eda475021fb91a283982bcbddd550ce7f168",
"size": "3960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUITest/EventSignalCombinerTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8478"
},
{
"name": "C++",
"bytes": "3754297"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3726245"
},
{
"name": "Shell",
"bytes": "7956"
},
{
"name": "Slash",
"bytes": "39241"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.utils import timezone
from django.db import IntegrityError
from django.core.exceptions import MultipleObjectsReturned
from core.tests.mommy_utils import make_recipe, make_user
from timer.models import Timer
class RunningTimerManagerTestCase(TestCase):
def test_query_set(self):
timer1 = make_recipe("timer.Timer", stopped=None)
make_recipe("timer.Timer", stopped=timezone.now())
timer3 = make_recipe("timer.Timer", stopped=None)
make_recipe("timer.Timer", cancelled=True)
timers = Timer.running_objects.all()
self.assertItemsEqual(timers, [timer1, timer3])
def test_get_by_user_fails_with_multiple_timers(self):
try:
user = make_user()
make_recipe("timer.Timer", stopped=None, created_by=user, _quantity=2)
Timer.running_objects.get_by_user(user)
except (MultipleObjectsReturned, IntegrityError):
pass
else:
self.assertTrue(False, "It should raise MultipleObjectsReturned or IntegrityError")
def test_get_by_user_fails_when_no_timer(self):
user = make_user()
make_recipe("timer.Timer", stopped=timezone.now(), created_by=user)
self.assertRaises(IndexError, Timer.running_objects.get_by_user, user)
def test_get_by_user_returns_timer(self):
user = make_user()
timer = make_recipe("timer.Timer", stopped=None, created_by=user)
self.assertEqual(Timer.running_objects.get_by_user(user), timer)
|
{
"content_hash": "3c291bafd32e1e67e6a7157aa145e9a1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 95,
"avg_line_length": 36.714285714285715,
"alnum_prop": 0.682230869001297,
"repo_name": "ministryofjustice/cla_backend",
"id": "bb50ddc7c9fdf4690ca7e9bb60ec4a0df3dd0ba9",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/timer/tests/test_managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
from .external import ExternalLink
|
{
"content_hash": "b292576d0e76343e56d9f5afd9d2d03c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 27.5,
"alnum_prop": 0.7818181818181819,
"repo_name": "kawamon/hue",
"id": "df752945ddb5cf515b4b97e47b4b80fc4117d145",
"size": "110",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/openpyxl-2.6.4/openpyxl/workbook/external_link/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
class FeatureSparseToDense(ModelLayer):
def __init__(
self, model, input_record, input_specs, name="feature_sparse_to_dense", **kwargs
):
"""
`input_specs` follows the format of FeatureSpec from schema. To be more
precise it's a namedtuple that should have:
'feature_type', 'feature_names', 'feature_ids'
"""
super(FeatureSparseToDense, self).__init__(model, name, input_record, **kwargs)
self.input_specs = input_specs
outputs = []
for field, feature_specs in self.input_specs:
assert len(feature_specs.feature_names) == len(feature_specs.feature_ids)
if feature_specs.feature_type == "FLOAT":
outputs.append(
(
field,
schema.Scalar(
(np.float32, (len(feature_specs.feature_ids),)),
self.get_next_blob_reference(field + "_output"),
),
)
)
elif feature_specs.feature_type == "ID_LIST":
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.int64,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
elif feature_specs.feature_type == "ID_SCORE_LIST":
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"ids",
schema.Scalar(
np.int64,
self.get_next_blob_reference(field + "_ids"),
),
),
(
"scores",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_scores"),
),
),
),
)
)
elif feature_specs.feature_type == "EMBEDDING":
# We don't know dimensions of embeddings in input data.
# Even though they should match dimensions from feature config,
# we keep ranges blob to check input data later.
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
elif feature_specs.feature_type == "GENERIC_FEATURE":
# We don't know dimensions of embeddings in input data.
# Even though they should match dimensions from feature config,
# we keep ranges blob to check input data later.
# Currently this schema with ranges and values is only for
# generic type enum 1. If new types are implemented, we need to
# modify the ParseGeneric operator, and this part accordinly
outputs.append(
(
field,
schema.Struct(
(
"ranges",
schema.Scalar(
(np.int32, (len(feature_specs.feature_ids), 2)),
self.get_next_blob_reference(field + "_ranges"),
),
),
(
"values",
schema.Scalar(
np.float32,
self.get_next_blob_reference(field + "_values"),
),
),
),
)
)
else:
raise TypeError(
"Unsupported input type: {0}".format(feature_specs.feature_type)
)
# TODO(amalevich): This schema is producing ranges. And thus if there is
# something using it it should support ranges as well. It might be
# confusing, if we don't add better support for ranges/have it as a
# first layer
self.output_schema = schema.Struct(*outputs)
# TODO(amalevich): Consider moving this data to schema, instead
# Structs doens't support attaching metadata to them and clonning
# will break things badly, but this is the most elegant way to pass
# this info around. Should we change it or it'll be too much work and
# not worse it?
for field, feature_specs in input_specs:
schema.attach_metadata_to_scalars(
self.output_schema[field], schema.Metadata(feature_specs=feature_specs)
)
self.zero = model.global_constants["ZERO"]
self.zero_range = model.global_constants["ZERO_RANGE"]
# Add operators to all types that need to be densified
def add_ops(self, net):
record = self.input_record
for field, feature_specs in self.input_specs:
if feature_specs.feature_type == "FLOAT":
net.SparseToDenseMask(
[
record[field].keys(),
record[field].values(),
self.zero,
record[field].lengths(),
],
[self.output_schema[field]()],
mask=feature_specs.feature_ids,
)
elif feature_specs.feature_type == "ID_LIST":
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(), net.NextScopedBlob("id_list_ranges")
)
net.SparseToDenseMask(
[
record[field].keys(),
id_list_ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(
record[field].values.items(), self.output_schema[field].values()
)
elif feature_specs.feature_type == "ID_SCORE_LIST":
# TODO: merge this to the case above?
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob("id_score_list_ranges"),
)
net.SparseToDenseMask(
[
record[field].keys(),
id_list_ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(record[field].values.keys(), self.output_schema[field].ids())
net.Alias(
record[field].values.values(), self.output_schema[field].scores()
)
elif feature_specs.feature_type == "EMBEDDING":
ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob("embeddings_ranges"),
)
net.SparseToDenseMask(
[
record[field].keys(),
ranges,
self.zero_range,
record[field].lengths(),
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(
record[field].values.items(), self.output_schema[field].values()
)
elif feature_specs.feature_type == "GENERIC_FEATURE":
(
feature_lengths_blob,
feature_ids_blob,
value_lengths_blob,
value_values_blob,
) = net.ParseGeneric(
[record[field]()],
["feature_lengths", "feature_ids", "value_lengths", "value_values"],
feature_type_enum=1,
)
# Currently our implementation only supports
# generic type enum 1. If new types are implemented, we need to
# modify the ParseGeneric operator, the schema above,
# and this part accordinly to parse the generic feature strings
# into input_record
ranges = net.LengthsToRanges(
value_lengths_blob, net.NextScopedBlob("generics_ranges")
)
net.SparseToDenseMask(
[feature_ids_blob, ranges, self.zero_range, feature_lengths_blob],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(value_values_blob, self.output_schema[field].values())
def get_metadata(self):
metadata = []
for field, feature_specs in self.input_specs:
metadata.append(
(
{
"type": feature_specs.feature_type,
"names": feature_specs.feature_names,
"ids": feature_specs.feature_ids,
},
self.output_schema[field].field_blobs(),
self.output_schema[field].field_types(),
)
)
if feature_specs.feature_type == "FLOAT":
metadata[-1][0]["cardinality"] = 1
return metadata
|
{
"content_hash": "97fa14384501f4b71fe180464c633cdb",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 88,
"avg_line_length": 45.02040816326531,
"alnum_prop": 0.4262617104865518,
"repo_name": "ryfeus/lambda-packs",
"id": "8fd340b195a914b58dd7e14168ed231641b2a07a",
"size": "13309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch/source/caffe2/python/layers/feature_sparse_to_dense.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from sseclient import SSEClient
from Queue import Queue
import requests
import json
import threading
import display
import socket
import sys
#DISPLAY_CLASS = display.BasicDisplay
DISPLAY_CLASS = display.CursesDisplay
URL = 'https://eventsource.firebaseio-demo.com/.json'
class ClosableSSEClient(SSEClient):
"""
Hack in some closing functionality on top of the SSEClient
"""
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
# HACK: dig through the sseclient library to the requests library down to the underlying socket.
# then close that to raise an exception to get out of streaming. I should probably file an issue w/ the
# requests library to make this easier
self.resp.raw._fp.fp._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp._sock.close()
class PostThread(threading.Thread):
def __init__(self, outbound_queue):
self.outbound_queue = outbound_queue
super(PostThread, self).__init__()
def run(self):
while True:
msg = self.outbound_queue.get()
if not msg:
break
to_post = json.dumps(msg)
requests.post(URL, data=to_post)
def close(self):
self.outbound_queue.put(False)
class RemoteThread(threading.Thread):
def __init__(self, message_queue):
self.message_queue = message_queue
super(RemoteThread, self).__init__()
def run(self):
try:
self.sse = ClosableSSEClient(URL)
for msg in self.sse:
msg_data = json.loads(msg.data)
if msg_data is None: # keep-alives
continue
path = msg_data['path']
data = msg_data['data']
if path == '/':
# initial update
if data:
keys = data.keys()
keys.sort()
for k in keys:
self.message_queue.put(data[k])
else:
# must be a push ID
self.message_queue.put(data)
except socket.error:
pass # this can happen when we close the stream
def close(self):
if self.sse:
self.sse.close()
if __name__ == '__main__':
args = sys.argv
client = args[1] if len(args) == 2 else 'python'
outbound_queue = Queue()
inbound_queue = Queue()
post_thread = PostThread(outbound_queue)
post_thread.start()
remote_thread = RemoteThread(inbound_queue)
remote_thread.start()
disp = DISPLAY_CLASS(outbound_queue, client, inbound_queue)
disp.run()
post_thread.join()
remote_thread.close()
remote_thread.join()
|
{
"content_hash": "5594eb447694c849a9feb310e422843f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 111,
"avg_line_length": 29.864077669902912,
"alnum_prop": 0.5715214564369311,
"repo_name": "googlearchive/EventSource-Examples",
"id": "40eb97c96f58eaab478868e17eeb30f1978112a1",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/chat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7627"
},
{
"name": "Ruby",
"bytes": "12286"
}
],
"symlink_target": ""
}
|
import json
import os
import pipes
import shutil
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
nacl_dir = os.path.dirname(script_dir)
root_dir = os.path.dirname(nacl_dir)
sys.path.insert(1, os.path.join(root_dir, 'tools'))
sys.path.insert(0, os.path.join(root_dir, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs2013_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if sys.platform in ('win32', 'cygwin') and depot_tools_win_toolchain:
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
toolchain = toolchain_data['path']
version = toolchain_data['version']
version_is_pro = version[-1] != 'e'
win8sdk = toolchain_data['win8sdk']
wdk = toolchain_data['wdk']
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs2013_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
# We need to make sure windows_sdk_path is set to the automated
# toolchain values in GYP_DEFINES, but don't want to override any
# otheroptions.express
# values there.
gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
gyp_defines_dict['windows_sdk_path'] = win8sdk
os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
for k, v in gyp_defines_dict.iteritems())
os.environ['WINDOWSSDKDIR'] = win8sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = ';'.join(vs2013_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
return vs2013_runtime_dll_dirs
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
"""Copies the VS runtime DLLs from the given |runtime_dirs| to the output
directory so that even if not system-installed, built binaries are likely to
be able to run.
This needs to be run after gyp has been run so that the expected target
output directories are already created.
"""
assert sys.platform.startswith(('win32', 'cygwin'))
def copy_runtime(target_dir, source_dir, dll_pattern):
"""Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for which in ('p', 'r'):
dll = dll_pattern % which
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
# If gyp generated to that output dir, and the runtime isn't already
# there, then copy it over.
if (os.path.isdir(target_dir) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
copy_runtime(out_debug, x86, 'msvc%s120d.dll')
copy_runtime(out_release, x86, 'msvc%s120.dll')
copy_runtime(out_debug_x64, x64, 'msvc%s120d.dll')
copy_runtime(out_release_x64, x64, 'msvc%s120.dll')
copy_runtime(out_debug_nacl64, x64, 'msvc%s120d.dll')
copy_runtime(out_release_nacl64, x64, 'msvc%s120.dll')
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
sha1path = os.path.join(script_dir, 'toolchain_vs2013.hash')
with open(sha1path, 'rb') as f:
return f.read().strip().splitlines()
def Update():
"""Requests an update of the toolchain to the specific hashes we have at
this revision. The update outputs a .json of the various configuration
information required to pass to gyp which we use in |GetToolchainDir()|.
"""
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if sys.platform in ('win32', 'cygwin') and depot_tools_win_toolchain:
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
'win_toolchain',
'get_toolchain_if_necessary.py'),
'--output-json', json_data_file,
] + _GetDesiredVsToolchainHashes()
subprocess.check_call(get_toolchain_args)
return 0
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
SetEnvironmentAndGetRuntimeDllDirs()
print '''vs_path = "%s"
sdk_path = "%s"
vs_version = "%s"
wdk_dir = "%s"
''' % (
os.environ['GYP_MSVS_OVERRIDE_PATH'],
os.environ['WINDOWSSDKDIR'],
os.environ['GYP_MSVS_VERSION'],
os.environ['WDK_DIR'])
def main():
if not sys.platform.startswith(('win32', 'cygwin')):
return 0
commands = {
'update': Update,
'get_toolchain_dir': GetToolchainDir,
# TODO(scottmg): Add copy_dlls for GN builds (gyp_chromium calls
# CopyVsRuntimeDlls via import, currently).
}
if len(sys.argv) < 2 or sys.argv[1] not in commands:
print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
return 1
return commands[sys.argv[1]]()
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "218147d9e4392ddae9586792d7738aba",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 80,
"avg_line_length": 38.05917159763314,
"alnum_prop": 0.6714863184079602,
"repo_name": "CTSRD-SOAAP/chromium-42.0.2311.135",
"id": "feb8d740c6fce782abe8f003edcef67972634a8e",
"size": "6600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "native_client/build/vs_toolchain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "241154"
},
{
"name": "C",
"bytes": "12370053"
},
{
"name": "C++",
"bytes": "266788423"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "813488"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "20131029"
},
{
"name": "Java",
"bytes": "8495790"
},
{
"name": "JavaScript",
"bytes": "12980966"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "208709"
},
{
"name": "Objective-C",
"bytes": "1509363"
},
{
"name": "Objective-C++",
"bytes": "7960581"
},
{
"name": "PLpgSQL",
"bytes": "215882"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "432373"
},
{
"name": "Python",
"bytes": "11147426"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1207731"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
"""
Analytics service integration for Django
========================================
The django-analytical application integrates analytics services into a
Django_ project. See the ``docs`` directory for more information.
.. _Django: http://www.djangoproject.com/
"""
__author__ = "Joost Cassee"
__email__ = "joost@cassee.net"
__version__ = "0.22.0"
__copyright__ = "Copyright (C) 2011-2012 Joost Cassee and others"
__license__ = "MIT License"
|
{
"content_hash": "ca0de8bf8ece4a70ada5091bf10c59fc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.6414253897550112,
"repo_name": "ChristosChristofidis/django-analytical",
"id": "9490ae98972bfdd11106228ba231de0273d6372f",
"size": "449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analytical/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147560"
}
],
"symlink_target": ""
}
|
from keystone import config
from keystone import test
from keystone.common.sql import util as sql_util
from keystone.identity.backends import sql as identity_sql
from keystone.token.backends import sql as token_sql
import test_backend
import default_fixtures
CONF = config.CONF
class SqlIdentity(test.TestCase, test_backend.IdentityTests):
def setUp(self):
super(SqlIdentity, self).setUp()
CONF(config_files=[test.etcdir('keystone.conf'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.identity_api = identity_sql.Identity()
self.load_fixtures(default_fixtures)
def test_delete_user_with_tenant_association(self):
user = {'id': 'fake',
'name': 'fakeuser',
'password': 'passwd'}
self.identity_api.create_user('fake', user)
self.identity_api.add_user_to_tenant(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
tenants = self.identity_api.get_tenants_for_user(user['id'])
self.assertEquals(tenants, [])
class SqlToken(test.TestCase, test_backend.TokenTests):
def setUp(self):
super(SqlToken, self).setUp()
CONF(config_files=[test.etcdir('keystone.conf'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
sql_util.setup_test_database()
self.token_api = token_sql.Token()
#class SqlCatalog(test_backend_kvs.KvsCatalog):
# def setUp(self):
# super(SqlCatalog, self).setUp()
# self.catalog_api = sql.SqlCatalog()
# self._load_fixtures()
# def _load_fixtures(self):
# self.catalog_foobar = self.catalog_api._create_catalog(
# 'foo', 'bar',
# {'RegionFoo': {'service_bar': {'foo': 'bar'}}})
# def test_get_catalog_bad_user(self):
# catalog_ref = self.catalog_api.get_catalog('foo' + 'WRONG', 'bar')
# self.assert_(catalog_ref is None)
# def test_get_catalog_bad_tenant(self):
# catalog_ref = self.catalog_api.get_catalog('foo', 'bar' + 'WRONG')
# self.assert_(catalog_ref is None)
# def test_get_catalog(self):
# catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
# self.assertDictEquals(catalog_ref, self.catalog_foobar)
|
{
"content_hash": "67d809da6ee390c136399b861448ed86",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 71,
"avg_line_length": 36.04477611940298,
"alnum_prop": 0.6227743271221532,
"repo_name": "sileht/deb-openstack-keystone",
"id": "4d1da37c34247484b2e9ebbd8e81a1172205ee5b",
"size": "3039",
"binary": false,
"copies": "1",
"ref": "refs/heads/debian/unstable",
"path": "tests/test_backend_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2659"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "565381"
},
{
"name": "Shell",
"bytes": "5273"
}
],
"symlink_target": ""
}
|
ten_things = "Apples Oranges Crows Telephone Light Sugar"
print("Wait there are not 10 things in that list. Let's fix that.")
stuff = ten_things.split(' ')
more_stuff = ["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print("Adding:", next_one)
stuff.append(next_one)
print("There are %d items now." % len(stuff))
print("There we go: ", stuff)
print("Let's do some things with stuff.")
print(stuff[1])
print(stuff[-1]) #Whoa! fancy
print(stuff.pop())
print(' '.join(stuff)) #What cool
print('#'.join(stuff[3:5])) #super stella
|
{
"content_hash": "7df4af12d188568c78104ccf841cdf8d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 26.791666666666668,
"alnum_prop": 0.6236391912908242,
"repo_name": "davvi/Hardway3",
"id": "4fddfcdd0d8f09866b37cdf5ee627b9636b9ad97",
"size": "666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex38.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24375"
}
],
"symlink_target": ""
}
|
import numpy as np
from PIL import Image
import random
# [1 0] for syn label 1
class dNetData(object):
def __init__(self, batch_size, image_width, image_height):
self.batch_size = batch_size
self.seen = 0
self.fileNames = []
self.labels = []
self.image_height = image_height
self.image_width = image_width
def getAllFileNameAndLabel(self):
lineN = 0
for line in open("list.txt"):
ss = line.split(' ')
if len(ss) == 2:
self.fileNames.append(ss[0])
self.labels.append(int(ss[1]))
lineN += 1
self.allImgNum = lineN
def load_data(self):
self.getAllFileNameAndLabel()
self.randomData()
def randomData(self):
self.seen = 0
self.list = range(self.allImgNum)
np.random.shuffle(self.list)
def loadImgAndLab(self, index):
fileName = self.fileNames[self.list[index]]
labelN = self.labels[self.list[index]]
im = Image.open(fileName)
image = np.array(im.getdata()).reshape(1, self.image_height, self.image_width, 3).astype(np.float32)
if labelN == 1:
label = [[1, 0, 0]]
if labelN == 2:
label = [[0, 1, 0]]
if labelN == 3:
label = [[0, 0, 1]]
return image, label
def prepare_data(self):
if self.seen + self.batch_size > self.allImgNum:
self.randomData()
for i in range(self.seen, self.seen + self.batch_size):
image, label = self.loadImgAndLab(i)
if i == self.seen:
imagesNow = image
labelsNow = label
else:
imagesNow = np.concatenate((imagesNow, image))
labelsNow = np.concatenate((labelsNow, label))
self.seen += self.batch_size
return imagesNow, labelsNow
|
{
"content_hash": "86eee62acaa0cd8af57323bd985461f4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 108,
"avg_line_length": 30.758064516129032,
"alnum_prop": 0.5448348190875721,
"repo_name": "huangshiyu13/funnyLittleProgram",
"id": "79ffe6652325f154c0b0d5cb5e2036feb1018b03",
"size": "1907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WhatKindofGirlYouLIke/data_engine.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2873"
},
{
"name": "CSS",
"bytes": "335"
},
{
"name": "HTML",
"bytes": "444"
},
{
"name": "JavaScript",
"bytes": "62816"
},
{
"name": "Makefile",
"bytes": "452"
},
{
"name": "Matlab",
"bytes": "6988"
},
{
"name": "Python",
"bytes": "139596"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
}
|
def stray(arr):
return reduce(lambda prev, curr: prev ^ curr, arr)
|
{
"content_hash": "e0c0e5d216dd2767cb1a2c75dc71d05c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 54,
"avg_line_length": 35.5,
"alnum_prop": 0.676056338028169,
"repo_name": "the-zebulan/CodeWars",
"id": "2947c0a21e039c13a52c661a597ac229fed41e01",
"size": "71",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_7/find_the_stray_number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
"""Support for the OpenWeatherMap (OWM) service."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by OpenWeatherMap"
CONF_FORECAST = "forecast"
CONF_LANGUAGE = "language"
DEFAULT_NAME = "OWM"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
SENSOR_TYPES = {
"weather": ["Condition", None],
"temperature": ["Temperature", None],
"wind_speed": ["Wind speed", "m/s"],
"wind_bearing": ["Wind bearing", "°"],
"humidity": ["Humidity", "%"],
"pressure": ["Pressure", "mbar"],
"clouds": ["Cloud coverage", "%"],
"rain": ["Rain", "mm"],
"snow": ["Snow", "mm"],
"weather_code": ["Weather code", None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FORECAST, default=False): cv.boolean,
vol.Optional(CONF_LANGUAGE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OpenWeatherMap sensor."""
from pyowm import OWM
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return
SENSOR_TYPES["temperature"][1] = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
forecast = config.get(CONF_FORECAST)
language = config.get(CONF_LANGUAGE)
if isinstance(language, str):
language = language.lower()[:2]
owm = OWM(API_key=config.get(CONF_API_KEY), language=language)
if not owm:
_LOGGER.error("Unable to connect to OpenWeatherMap")
return
data = WeatherData(owm, forecast, hass.config.latitude, hass.config.longitude)
dev = []
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(
OpenWeatherMapSensor(name, data, variable, SENSOR_TYPES[variable][1])
)
if forecast:
SENSOR_TYPES["forecast"] = ["Forecast", None]
dev.append(
OpenWeatherMapSensor(name, data, "forecast", SENSOR_TYPES["temperature"][1])
)
add_entities(dev, True)
class OpenWeatherMapSensor(Entity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(self, name, weather_data, sensor_type, temp_unit):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.owa_client = weather_data
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest data from OWM and updates the states."""
from pyowm.exceptions.api_call_error import APICallError
try:
self.owa_client.update()
except APICallError:
_LOGGER.error("Error when calling API to update data")
return
data = self.owa_client.data
fc_data = self.owa_client.fc_data
if data is None:
return
try:
if self.type == "weather":
self._state = data.get_detailed_status()
elif self.type == "temperature":
if self.temp_unit == TEMP_CELSIUS:
self._state = round(data.get_temperature("celsius")["temp"], 1)
elif self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(data.get_temperature("fahrenheit")["temp"], 1)
else:
self._state = round(data.get_temperature()["temp"], 1)
elif self.type == "wind_speed":
self._state = round(data.get_wind()["speed"], 1)
elif self.type == "wind_bearing":
self._state = round(data.get_wind()["deg"], 1)
elif self.type == "humidity":
self._state = round(data.get_humidity(), 1)
elif self.type == "pressure":
self._state = round(data.get_pressure()["press"], 0)
elif self.type == "clouds":
self._state = data.get_clouds()
elif self.type == "rain":
if data.get_rain():
self._state = round(data.get_rain()["3h"], 0)
self._unit_of_measurement = "mm"
else:
self._state = "not raining"
self._unit_of_measurement = ""
elif self.type == "snow":
if data.get_snow():
self._state = round(data.get_snow(), 0)
self._unit_of_measurement = "mm"
else:
self._state = "not snowing"
self._unit_of_measurement = ""
elif self.type == "forecast":
if fc_data is None:
return
self._state = fc_data.get_weathers()[0].get_detailed_status()
elif self.type == "weather_code":
self._state = data.get_weather_code()
except KeyError:
self._state = None
_LOGGER.warning("Condition is currently not available: %s", self.type)
class WeatherData:
"""Get the latest data from OpenWeatherMap."""
def __init__(self, owm, forecast, latitude, longitude):
"""Initialize the data object."""
self.owm = owm
self.forecast = forecast
self.latitude = latitude
self.longitude = longitude
self.data = None
self.fc_data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from OpenWeatherMap."""
from pyowm.exceptions.api_call_error import APICallError
try:
obs = self.owm.weather_at_coords(self.latitude, self.longitude)
except (APICallError, TypeError):
_LOGGER.error("Error when calling API to get weather at coordinates")
obs = None
if obs is None:
_LOGGER.warning("Failed to fetch data")
return
self.data = obs.get_weather()
if self.forecast == 1:
try:
obs = self.owm.three_hours_forecast_at_coords(
self.latitude, self.longitude
)
self.fc_data = obs.get_forecast()
except (ConnectionResetError, TypeError):
_LOGGER.warning("Failed to fetch forecast")
|
{
"content_hash": "176272ef75f7e51b47a964c67595ebbe",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 88,
"avg_line_length": 33.50222222222222,
"alnum_prop": 0.5784027593526134,
"repo_name": "fbradyirl/home-assistant",
"id": "85bd1ccb2c6eb03233b4647a00a591d12a34f884",
"size": "7539",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/openweathermap/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
"""
FILE: chat_client_sample_async.py
DESCRIPTION:
These samples demonstrate create a chat client, get a chat thread client,
create a chat thread, get a chat thread by id, list chat threads, delete
a chat thread by id.
You need to use azure.communication.configuration module to get user access
token and user identity before run this sample
USAGE:
python chat_client_sample_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
2) TOKEN - the user access token, from token_response.token
3) USER_ID - the user id, from token_response.identity
"""
import os
import asyncio
class ChatClientSamplesAsync(object):
from azure.communication.identity import CommunicationIdentityClient
connection_string = os.environ.get("COMMUNICATION_SAMPLES_CONNECTION_STRING", None)
if not connection_string:
raise ValueError("Set COMMUNICATION_SAMPLES_CONNECTION_STRING env before run this sample.")
identity_client = CommunicationIdentityClient.from_connection_string(connection_string)
user = identity_client.create_user()
tokenresponse = identity_client.get_token(user, scopes=["chat"])
token = tokenresponse.token
endpoint = os.environ.get("AZURE_COMMUNICATION_SERVICE_ENDPOINT", None)
if not endpoint:
raise ValueError("Set AZURE_COMMUNICATION_SERVICE_ENDPOINT env before run this sample.")
_thread_id = None
def create_chat_client(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START create_chat_client]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
# [END create_chat_client]
print("chat_client created")
async def create_thread_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START create_thread]
from datetime import datetime
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
from azure.communication.chat import ChatParticipant
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
topic = "test topic"
participants = [ChatParticipant(
identifier=self.user,
display_name='name',
share_history_time=datetime.utcnow()
)]
# creates a new chat_thread everytime
create_chat_thread_result = await chat_client.create_chat_thread(topic, thread_participants=participants)
# creates a new chat_thread if not exists
idempotency_token = 'b66d6031-fdcc-41df-8306-e524c9f226b8' # unique identifier
create_chat_thread_result_w_repeatability_id = await chat_client.create_chat_thread(
topic,
thread_participants=participants,
idempotency_token=idempotency_token)
# [END create_thread]
self._thread_id = create_chat_thread_result.chat_thread.id
print("thread created, id: " + self._thread_id)
def get_chat_thread_client(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START get_chat_thread_client]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
# set `thread_id` to an existing chat thread id
chat_thread_client = chat_client.get_chat_thread_client(thread_id)
# [END get_chat_thread_client]
print("chat_thread_client created with thread id: ", chat_thread_client.thread_id)
async def list_threads_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START list_threads]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
from datetime import datetime, timedelta
start_time = datetime.utcnow() - timedelta(days=2)
chat_threads = chat_client.list_chat_threads(results_per_page=5, start_time=start_time)
print("list_threads succeeded with results_per_page is 5, and were created since 2 days ago.")
async for chat_thread_item_page in chat_threads.by_page():
async for chat_thread_item in chat_thread_item_page:
print("thread id: ", chat_thread_item.id)
# [END list_threads]
async def delete_thread_async(self):
token = self.token
endpoint = self.endpoint
thread_id = self._thread_id
# [START delete_thread]
from azure.communication.chat.aio import ChatClient, CommunicationTokenCredential
# set `endpoint` to an existing ACS endpoint
chat_client = ChatClient(endpoint, CommunicationTokenCredential(token))
async with chat_client:
# set `thread_id` to an existing chat thread id
await chat_client.delete_chat_thread(thread_id)
# [END delete_thread]
print("delete_thread succeeded")
def clean_up(self):
print("cleaning up: deleting created user.")
self.identity_client.delete_user(self.user)
async def main():
sample = ChatClientSamplesAsync()
sample.create_chat_client()
await sample.create_thread_async()
sample.get_chat_thread_client()
await sample.list_threads_async()
await sample.delete_thread_async()
sample.clean_up()
if __name__ == '__main__':
asyncio.run(main())
|
{
"content_hash": "29cb9bfc3cef4ae8aff99fed8c47cbee",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 117,
"avg_line_length": 39.3525641025641,
"alnum_prop": 0.6709561817885649,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3ff971aa1e46b3f65563c4256af182824198995e",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-communication-chat/samples/chat_client_sample_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import pandas as pd
import os, sys, time, random
import numpy as np
from scipy import stats
#NORMALIZATION FUNCTIONS
def LibSize_norm(df, normalization_probes=[]):
'''
Expects sample (rows) by gene (cols) pandas DataFrame
'''
if normalization_probes==[]:
normalization_probes = df.columns
return df.divide(df[normalization_probes].apply(np.sum, axis=1), axis=0) * 10000
def hkgene_norm(df, hk_gene):
'''
Housekeeping Gene Normalization
'''
return df.divide(df[hk_gene].apply(np.mean, axis=1), axis=0)
def MGMR_norm(df, normalization_probes=[], by_plate=False):
'''
Median Geometric Mean Ratio (MGMR)
Expects sample by probe count dataframe, no annotations
Anders Hubel 2010: http://www.genomebiology.com/content/pdf/gb-2010-11-10-r106.pdf
scale factor for sample j = median (for each gene i: ( kij / geometric mean gene i ) ); where kij is sample j gene i
R code:
data = gene by sample matrix
lib.size <- colSums(x)
RLE = .calcFactorRLE(x)/lib.size
.calcFactorRLE <- function (data)
{
gm <- exp(rowMeans(log(data))) #geometric mean of log gene counts
apply(data, 2, function(u) median((u/gm)[gm > 0])) #divide each gene count by geometric mean for gene and take median value
}
'''
def gmean(array_vals):
return stats.gmean([i for i in array_vals if i > 0])
def get_median(array_vals):
return np.median([i for i in array_vals if i > 0])
if len(normalization_probes) == 0:
normalization_probes = df.columns
if by_plate:
return pd.concat([MGMR_norm(n, normalization_probes) for i,n in df.groupby(level=0)])
#Removing wells with zero counts
df_nonzero = df[df.apply(np.sum, axis=1)>100] #minimum of 100 reads
#df_sample_sums = df_sample_sums[df_sample_sums>100].index
#df_nonzero = df[df.index.isin(df_sample_sums)]
#calculating the pseudo-reference probe count using the geometric mean
gmean_genei = df_nonzero[normalization_probes].apply(gmean, axis=0) #geometric mean for each probe
#calculating ratio of probe count to pseudo-reference probe count (geo mean)
df_ratio = df_nonzero[normalization_probes].divide(gmean_genei, axis=1) #counts divide by geomean by probe
#calculating the scale factor by taking the median across probe ratios for each sample
size_factor = df_ratio[normalization_probes].apply(get_median, axis=1) #pick the median gene as the size factor for each sample
#returning the scaled counts
return df_nonzero.divide(size_factor, axis=0) #dividing counts by size_factor
def quantile_norm(df):
df = df.copy()
df_rank = df.T.rank(method='min', axis=0)
data = []
for i in df.index:
df_sorted = df.ix[i].values
df_sorted.sort()
data.append(df_sorted)
quantile_values = pd.DataFrame(data).T.apply(np.mean, axis=1).to_dict()
quantile_df = df_rank.replace(quantile_values)
return quantile_df
#STANDARDIZATION FUNCTIONS
def standardize_df(df, by_plate=False):
if by_plate==False:
df_min = df.apply(np.min, axis=0)
df_max = df.apply(np.max, axis=0)
return df.subtract(df_min, axis=1).divide(df_max, axis=1)
return pd.concat([standardize_df(n) for i,n in df.groupby(level=0)])
def mean_center(values):
'''
input vector of values
'''
mean_val = np.mean(values)
return [i-mean_val for i in values]
def mean_center_by_plate(df):
'''
input df
'''
data = []
for i,n in df.groupby(level=0):
data.append(n.apply(mean_center, axis=0))
return pd.concat(data)
def std_scale(df, by_plate=False, level=0, with_mean=True, with_std=True):
'''
Convenience function to standard scale by pandas index level
'''
from sklearn.preprocessing import StandardScaler
if by_plate:
return pd.concat([pd.DataFrame(StandardScaler(with_mean=with_mean, with_std=with_std).fit_transform(n), columns=n.columns, index=n.index) for i,n in df.groupby(level=level)])
return pd.DataFrame(StandardScaler(with_mean=with_mean, with_std=with_std).fit_transform(df), columns=df.columns, index=df.index)
|
{
"content_hash": "afe2319720b4851ca44229c8cec0b18f",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 187,
"avg_line_length": 28.558441558441558,
"alnum_prop": 0.6382446566621192,
"repo_name": "erscott/RASLseqTools",
"id": "a85eb8901bae8284679866ec8cf4ea3c49004035",
"size": "4399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RASLseqTools/RASLseqNormalize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114701"
}
],
"symlink_target": ""
}
|
"""
This paver file is intended to help with the release process, and build sdist,
documentation, release notes, and generate checksums for them.
For details on the release process, see
http://scipy.github.io/devdocs/dev/core-dev/index.html#making-a-scipy-release
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release_and_log
This automatically puts the checksum into NOTES.txt and writes the Changelog,
which can be uploaded to Github Releases.
"""
import os
import sys
import subprocess
import re
import shutil
import warnings
from hashlib import md5
from hashlib import sha256
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except ImportError as e:
raise RuntimeError("paver version >= 1.0 required") from e
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task, cmdopts
sys.path.insert(0, os.path.dirname(__file__))
try:
setup_py = __import__("setup")
FULLVERSION = setup_py.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION = setup_py.git_version()
else:
GIT_REVISION = "Unknown"
if not setup_py.ISRELEASED:
if GIT_REVISION == "Unknown":
FULLVERSION += '.dev0+Unknown'
else:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(0)
try:
# Ensure sensible file permissions
os.umask(0o022)
except AttributeError:
# No umask on non-posix
pass
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Source of the release notes
RELEASE = 'doc/release/1.8.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'v1.7.0'
LOG_END = 'master'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
# Default Python version
PYVER="3.9"
# Paver options object, holds all default dirs
options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
virtualenv=Bunch(packages_to_install=["sphinx==1.8.5", "numpydoc"],
no_site_packages=False),
sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
superpack=Bunch(builddir="build-superpack",
bindir=os.path.join("build-superpack","binaries")),
installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),
doc=Bunch(doc_root="doc",
sdir=os.path.join("doc", "source"),
bdir=os.path.join("doc", "build"),
bdir_latex=os.path.join("doc", "build", "latex"),
destdir_pdf=os.path.join("build_doc", "pdf")),
html=Bunch(builddir=os.path.join("build", "html")),
dmg=Bunch(python_version=PYVER),
bdist_wininst_simple=Bunch(python_version=PYVER),)
#--------------------
# Documentation tasks
#--------------------
@task
def html(options):
"""Build SciPy documentation and put it into build/docs"""
# Don't use paver html target because of scipy bootstrapping problems
subprocess.check_call(["make", "html"], cwd="doc")
builtdocs = paver.path.path("doc") / options.sphinx.builddir / "html"
options.html.builddir.rmtree()
builtdocs.copytree(options.html.builddir)
@task
def latex():
"""Build SciPy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
bdir_latex = options.doc.bdir_latex
destdir_pdf = options.doc.destdir_pdf
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
dry("Build pdf doc", build_pdf)
if os.path.exists(destdir_pdf):
shutil.rmtree(destdir_pdf)
os.makedirs(destdir_pdf)
ref = os.path.join(bdir_latex, "scipy-ref.pdf")
shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
def tarball_name(type_name='gztar'):
root = 'scipy-%s' % FULLVERSION
if type_name == 'gztar':
return root + '.tar.gz'
elif type_name == 'xztar':
return root + '.tar.xz'
elif type_name == 'tar':
return root + '.tar'
elif type_name == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type_name)
@task
def sdist():
# First, clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# Fix file permissions
sh('chmod -R a+rX *')
# To be sure to bypass paver when building sdist... paver + scipy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python setup.py sdist --formats=gztar,zip')
sh('python setup.py sdist --formats=tar')
if os.path.exists(os.path.join('dist', tarball_name("xztar"))):
os.unlink(os.path.join('dist', tarball_name("xztar")))
sh('xz %s' % os.path.join('dist', tarball_name("tar")), ignore_error=True)
# Copy the sdists into installers dir
if not os.path.exists(options.installers.installersdir):
os.makedirs(options.installers.installersdir)
if not os.path.exists(os.path.join('dist', tarball_name("xztar"))):
warnings.warn("Could not create tar.xz! Do you have xz installed?")
else:
t = 'xztar'
source = os.path.join('dist', tarball_name(t))
target = os.path.join(options.installers.installersdir, tarball_name(t))
shutil.copy(source, target)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(options.installers.installersdir, tarball_name(t))
shutil.copy(source, target)
@task
def release(options):
"""sdists, release notes and changelog. Docs and wheels are built in
separate steps (see doc/source/dev/releasing.rst).
"""
# Source tarballs
sdist()
# README (gpg signed) and Changelog
write_release_and_log()
#----------------------------
# Release notes and Changelog
#----------------------------
def compute_md5(idirs):
released = paver.path.path(idirs).listdir()
checksums = []
for fn in sorted(released):
with open(fn, 'rb') as f:
m = md5(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = paver.path.path(idirs).listdir()
checksums = []
for fn in sorted(released):
with open(fn, 'rb') as f:
m = sha256(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def write_release_task(options, filename='NOTES.txt'):
idirs = options.installers.installersdir
source = paver.path.path(RELEASE)
target = paver.path.path(filename)
if target.exists():
target.remove()
tmp_target = paver.path.path(filename + '.tmp')
source.copy(tmp_target)
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
# Sign release; on some platforms gpg2 may actually
# be named gpg
cmd = ['gpg2', '--clearsign', '--armor']
if hasattr(options, 'gpg_key'):
cmd += ['--default-key', options.gpg_key]
cmd += ['--output', str(target), str(tmp_target)]
subprocess.check_call(cmd)
print("signed %s" % (target,))
tmp_target.remove()
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0].decode()
with open(filename, 'w') as a:
a.writelines(out)
@task
@cmdopts([('gpg_key=', 'g', 'GPG key to use for signing')])
def write_release(options):
write_release_task(options)
@task
def write_log():
write_log_task()
@task
@cmdopts([('gpg_key=', 'g', 'GPG key to use for signing')])
def write_release_and_log(options):
write_release_task(options, os.path.join(options.installers.releasedir, 'README'))
write_log_task(os.path.join(options.installers.releasedir, 'Changelog'))
|
{
"content_hash": "cc95806164469d3334f61ae06fa7d243",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 86,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.6190639269406393,
"repo_name": "matthew-brett/scipy",
"id": "e815a9a745ae8fb56834bb4d004a47a872bb7811",
"size": "8760",
"binary": false,
"copies": "3",
"ref": "refs/heads/polished-meson-windows",
"path": "pavement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818671"
},
{
"name": "C++",
"bytes": "3181034"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1035101"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "133294"
},
{
"name": "PowerShell",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "14259543"
},
{
"name": "Shell",
"bytes": "4415"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_link_resource_operations import build_get_request, build_list_by_batch_account_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourceOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.batch.aio.BatchManagementClient`'s
:attr:`private_link_resource` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_batch_account(
self, resource_group_name: str, account_name: str, maxresults: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.PrivateLinkResource"]:
"""Lists all of the private link resources in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param maxresults: The maximum number of items to return in the response. Default value is
None.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.batch.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ListPrivateLinkResourcesResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_batch_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxresults=maxresults,
api_version=api_version,
template_url=self.list_by_batch_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListPrivateLinkResourcesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_batch_account.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, account_name: str, private_link_resource_name: str, **kwargs: Any
) -> _models.PrivateLinkResource:
"""Gets information about the specified private link resource.
:param resource_group_name: The name of the resource group that contains the Batch account.
Required.
:type resource_group_name: str
:param account_name: The name of the Batch account. Required.
:type account_name: str
:param private_link_resource_name: The private link resource name. This must be unique within
the account. Required.
:type private_link_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.PrivateLinkResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateLinkResource]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
private_link_resource_name=private_link_resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources/{privateLinkResourceName}"} # type: ignore
|
{
"content_hash": "5c6346a475d1be174e00647c2a326da6",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 213,
"avg_line_length": 45.009433962264154,
"alnum_prop": 0.6435757702787676,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2b417daeafa63b59050d945e5d1e217b428e2219",
"size": "10042",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/batch/azure-mgmt-batch/azure/mgmt/batch/aio/operations/_private_link_resource_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import datetime
from visbeer.services.service_helper import beginning_of_current_day, DEFAULT_CREDITS_PER_DAY
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
class DataService:
def __init__(self, flag_service):
self.flag_service = flag_service
def find_person(self, rfid):
return self.flag_service.find_person(rfid)
def is_developer(self, person):
val = self.flag_service.get_flag_value('coffee_beer', person, 'developer')
return val and len(val) >= 1
def _get_last(self, person):
raw = self.flag_service.get_flag_value('coffee_beer', person, 'last_consumption')
if not raw:
return None
return datetime.datetime.strptime(raw, DATETIME_FORMAT)
def set_last(self, person, last_time):
raw = last_time.strftime(DATETIME_FORMAT)
self.flag_service.set_flag_value('coffee_beer', person, 'last_consumption', raw)
def _get_credits_per_day(self, person):
return int(self.flag_service.get_flag_value('coffee_beer', person, 'credits_per_day'))
def _set_default_credits_per_day(self, person, default):
self.flag_service.set_default_flag_value('coffee_beer', person, 'credits_per_day', int(default))
def _get_credits(self, person):
return int(self.flag_service.get_flag_value('coffee_beer', person, 'credits'))
def set_credits(self, person, amount):
self.flag_service.set_flag_value('coffee_beer', person, 'credits', int(amount))
def has_consumed_today(self, person):
last_consumption = self._get_last(person)
if not last_consumption:
# this is the case if someone never consumed a beverage until now
return False
return last_consumption >= beginning_of_current_day()
def credits_per_day(self, person):
self._set_default_credits_per_day(person, DEFAULT_CREDITS_PER_DAY)
return self._get_credits_per_day(person)
def remaining_credits(self, person):
return self._get_credits(person)
|
{
"content_hash": "1dd4a751accdc4c5e3fbedd19438d21d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 104,
"avg_line_length": 35.910714285714285,
"alnum_prop": 0.6618597712580806,
"repo_name": "lukaselmer/vis-beer",
"id": "2a49feae76557da57926fb9671c5608bcf29259b",
"size": "2011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visbeer/services/data_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29759"
}
],
"symlink_target": ""
}
|
"""Computes rouge scores between two text blobs.
Implementation replicates the functionality in the original ROUGE package. See:
Lin, Chin-Yew. ROUGE: a Package for Automatic Evaluation of Summaries. In
Proceedings of the Workshop on Text Summarization Branches Out (WAS 2004),
Barcelona, Spain, July 25 - 26, 2004.
Default options are equivalent to running:
ROUGE-1.5.5.pl -e data -n 2 -a settings.xml
Or with use_stemmer=True:
ROUGE-1.5.5.pl -m -e data -n 2 -a settings.xml
In these examples settings.xml lists input files and formats.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
import nltk
import numpy as np
import six
from six.moves import map
from six.moves import range
from rouge import scoring
from rouge import tokenizers
class RougeScorer(scoring.BaseScorer):
"""Calculate rouges scores between two blobs of text.
Sample usage:
scorer = RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
scores = scorer.score('The quick brown fox jumps over the lazy dog',
'The quick brown dog jumps on the log.')
"""
def __init__(self, rouge_types, use_stemmer=False, split_summaries=False,
tokenizer=None):
"""Initializes a new RougeScorer.
Valid rouge types that can be computed are:
rougen (e.g. rouge1, rouge2): n-gram based scoring.
rougeL: Longest common subsequence based scoring.
Args:
rouge_types: A list of rouge types to calculate.
use_stemmer: Bool indicating whether Porter stemmer should be used to
strip word suffixes to improve matching. This arg is used in the
DefaultTokenizer, but other tokenizers might or might not choose to
use this.
split_summaries: whether to add newlines between sentences for rougeLsum
tokenizer: Tokenizer object which has a tokenize() method.
Returns:
A dict mapping rouge types to Score tuples.
"""
self.rouge_types = rouge_types
if tokenizer:
self._tokenizer = tokenizer
else:
self._tokenizer = tokenizers.DefaultTokenizer(use_stemmer)
logging.info("Using default tokenizer.")
self._split_summaries = split_summaries
def score_multi(self, targets, prediction):
"""Calculates rouge scores between targets and prediction.
The target with the maximum f-measure is used for the final score for
each score type..
Args:
targets: list of texts containing the targets
prediction: Text containing the predicted text.
Returns:
A dict mapping each rouge type to a Score object.
Raises:
ValueError: If an invalid rouge type is encountered.
"""
score_dicts = [self.score(t, prediction) for t in targets]
max_score = {}
for k in self.rouge_types:
index = np.argmax([s[k].fmeasure for s in score_dicts])
max_score[k] = score_dicts[index][k]
return max_score
def score(self, target, prediction):
"""Calculates rouge scores between the target and prediction.
Args:
target: Text containing the target (ground truth) text,
or if a list
prediction: Text containing the predicted text.
Returns:
A dict mapping each rouge type to a Score object.
Raises:
ValueError: If an invalid rouge type is encountered.
"""
# Pre-compute target tokens and prediction tokens for use by different
# types, except if only "rougeLsum" is requested.
if len(self.rouge_types) == 1 and self.rouge_types[0] == "rougeLsum":
target_tokens = None
prediction_tokens = None
else:
target_tokens = self._tokenizer.tokenize(target)
prediction_tokens = self._tokenizer.tokenize(prediction)
result = {}
for rouge_type in self.rouge_types:
if rouge_type == "rougeL":
# Rouge from longest common subsequences.
scores = _score_lcs(target_tokens, prediction_tokens)
elif rouge_type == "rougeLsum":
# Note: Does not support multi-line text.
def get_sents(text):
if self._split_summaries:
sents = nltk.sent_tokenize(text)
else:
# Assume sentences are separated by newline.
sents = six.ensure_str(text).split("\n")
sents = [x for x in sents if len(x)]
return sents
target_tokens_list = [
self._tokenizer.tokenize(s) for s in get_sents(target)]
prediction_tokens_list = [
self._tokenizer.tokenize(s) for s in get_sents(prediction)]
scores = _summary_level_lcs(target_tokens_list,
prediction_tokens_list)
elif re.match(r"rouge[0-9]$", six.ensure_str(rouge_type)):
# Rouge from n-grams.
n = int(rouge_type[5:])
if n <= 0:
raise ValueError("rougen requires positive n: %s" % rouge_type)
target_ngrams = _create_ngrams(target_tokens, n)
prediction_ngrams = _create_ngrams(prediction_tokens, n)
scores = _score_ngrams(target_ngrams, prediction_ngrams)
else:
raise ValueError("Invalid rouge type: %s" % rouge_type)
result[rouge_type] = scores
return result
def _create_ngrams(tokens, n):
"""Creates ngrams from the given list of tokens.
Args:
tokens: A list of tokens from which ngrams are created.
n: Number of tokens to use, e.g. 2 for bigrams.
Returns:
A dictionary mapping each bigram to the number of occurrences.
"""
ngrams = collections.Counter()
for ngram in (tuple(tokens[i:i + n]) for i in range(len(tokens) - n + 1)):
ngrams[ngram] += 1
return ngrams
def _score_lcs(target_tokens, prediction_tokens):
"""Computes LCS (Longest Common Subsequence) rouge scores.
Args:
target_tokens: Tokens from the target text.
prediction_tokens: Tokens from the predicted text.
Returns:
A Score object containing computed scores.
"""
if not target_tokens or not prediction_tokens:
return scoring.Score(precision=0, recall=0, fmeasure=0)
# Compute length of LCS from the bottom up in a table (DP appproach).
lcs_table = _lcs_table(target_tokens, prediction_tokens)
lcs_length = lcs_table[-1][-1]
precision = lcs_length / len(prediction_tokens)
recall = lcs_length / len(target_tokens)
fmeasure = scoring.fmeasure(precision, recall)
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
def _lcs_table(ref, can):
"""Create 2-d LCS score table."""
rows = len(ref)
cols = len(can)
lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)]
for i in range(1, rows + 1):
for j in range(1, cols + 1):
if ref[i - 1] == can[j - 1]:
lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1
else:
lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1])
return lcs_table
def _backtrack_norec(t, ref, can):
"""Read out LCS."""
i = len(ref)
j = len(can)
lcs = []
while i > 0 and j > 0:
if ref[i - 1] == can[j - 1]:
lcs.insert(0, i-1)
i -= 1
j -= 1
elif t[i][j - 1] > t[i - 1][j]:
j -= 1
else:
i -= 1
return lcs
def _summary_level_lcs(ref_sent, can_sent):
"""ROUGE: Summary-level LCS, section 3.2 in ROUGE paper.
Args:
ref_sent: list of tokenized reference sentences
can_sent: list of tokenized candidate sentences
Returns:
summary level ROUGE score
"""
if not ref_sent or not can_sent:
return scoring.Score(precision=0, recall=0, fmeasure=0)
m = sum(map(len, ref_sent))
n = sum(map(len, can_sent))
if not n or not m:
return scoring.Score(precision=0, recall=0, fmeasure=0)
# get token counts to prevent double counting
token_cnts_r = collections.Counter()
token_cnts_c = collections.Counter()
for s in ref_sent:
# s is a list of tokens
token_cnts_r.update(s)
for s in can_sent:
token_cnts_c.update(s)
hits = 0
for r in ref_sent:
lcs = _union_lcs(r, can_sent)
# Prevent double-counting:
# The paper describes just computing hits += len(_union_lcs()),
# but the implementation prevents double counting. We also
# implement this as in version 1.5.5.
for t in lcs:
if token_cnts_c[t] > 0 and token_cnts_r[t] > 0:
hits += 1
token_cnts_c[t] -= 1
token_cnts_r[t] -= 1
recall = hits / m
precision = hits / n
fmeasure = scoring.fmeasure(precision, recall)
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
def _union_lcs(ref, c_list):
"""Find union LCS between a ref sentence and list of candidate sentences.
Args:
ref: list of tokens
c_list: list of list of indices for LCS into reference summary
Returns:
List of tokens in ref representing union LCS.
"""
lcs_list = [lcs_ind(ref, c) for c in c_list]
return [ref[i] for i in _find_union(lcs_list)]
def _find_union(lcs_list):
"""Finds union LCS given a list of LCS."""
return sorted(list(set().union(*lcs_list)))
def lcs_ind(ref, can):
"""Returns one of the longest lcs."""
t = _lcs_table(ref, can)
return _backtrack_norec(t, ref, can)
def _score_ngrams(target_ngrams, prediction_ngrams):
"""Compute n-gram based rouge scores.
Args:
target_ngrams: A Counter object mapping each ngram to number of
occurrences for the target text.
prediction_ngrams: A Counter object mapping each ngram to number of
occurrences for the prediction text.
Returns:
A Score object containing computed scores.
"""
intersection_ngrams_count = 0
for ngram in six.iterkeys(target_ngrams):
intersection_ngrams_count += min(target_ngrams[ngram],
prediction_ngrams[ngram])
target_ngrams_count = sum(target_ngrams.values())
prediction_ngrams_count = sum(prediction_ngrams.values())
precision = intersection_ngrams_count / max(prediction_ngrams_count, 1)
recall = intersection_ngrams_count / max(target_ngrams_count, 1)
fmeasure = scoring.fmeasure(precision, recall)
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
|
{
"content_hash": "43e91f50abd03d4f4a08ffe05e778fbc",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 31.58125,
"alnum_prop": 0.6611913714624975,
"repo_name": "google-research/google-research",
"id": "329be65e948ae9b8b2bd8a59954a9e99ea639b40",
"size": "10714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rouge/rouge_scorer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None,
check_X_y=True):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
check_X_y : bool, default True
Whether or not to check X and y for shape, validity and dtype. Speed
improvements possible if set to False when called repeatedly.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
if check_X_y:
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
|
{
"content_hash": "db5c44b1b4447c05517bea8162ba3359",
"timestamp": "",
"source": "github",
"line_count": 1215,
"max_line_length": 81,
"avg_line_length": 33.74074074074074,
"alnum_prop": 0.6097816806927674,
"repo_name": "0x0all/scikit-learn",
"id": "3543254b47ef02deb9646b04480f09f73aee5059",
"size": "41361",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/metrics/pairwise.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18557197"
},
{
"name": "C++",
"bytes": "1810938"
},
{
"name": "CSS",
"bytes": "1503"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Makefile",
"bytes": "4897"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5663133"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.