index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,200 | 7c004cb0c9eefa5e88f5085fb3b2878db98d2b20 | """
This class runs the RL Training
"""
from __future__ import division
import logging
import numpy as np
from data.data_provider import DataProvider
from episode.episode import Episode
from tracker import TrainingTracker
from tqdm import tqdm
class RLTrainer(object):
"""
Creates RL training object
"""
def __init__(self, config_, grid_search=False):
"""
Constructor
:param config_:
:param grid_search:
:return:
"""
self.config = config_
self.grid_search = grid_search
self.logger = logging.getLogger("cuda_logger")
self.expt_name = self.config['RL_parameters']['experiment']
self.objective = self.config['RL_parameters']['objective']
self.city_states_filename = self.config['RL_parameters']['city_states_filename']
# Create training tracker
self.training_tracker = TrainingTracker(self.config)
def run(self):
"""
Creates and runs training episode
:param:
:return:
"""
data_provider = DataProvider(self.config)
hex_attr_df = data_provider.read_hex_bin_attributes()
hex_distance_df = data_provider.read_hex_bin_distances()
city_states = data_provider.read_city_states(self.city_states_filename)
neighborhood = data_provider.read_neighborhood_data()
popular_bins = data_provider.read_popular_hex_bins()
num_episodes = self.config['RL_parameters']['num_episodes']
ind_episodes = self.config['RL_parameters']['ind_episodes']
exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']
q_ind = None
r_table = None
xi_matrix = None
best_episode = None
best_model = {}
progress_bar = tqdm(xrange(num_episodes))
for episode_id in progress_bar:
progress_bar.set_description("Episode: {}".format(episode_id))
current_best = -1000000
# Create episode
ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)
episode = Episode(self.config,
episode_id,
ind_exploration_factor,
hex_attr_df,
hex_distance_df,
city_states,
neighborhood,
popular_bins,
q_ind,
r_table,
xi_matrix)
# Run episode
tables = episode.run()
q_ind = tables['q_ind']
r_table = tables['r_table']
xi_matrix = tables['xi_matrix']
episode_tracker = tables['episode_tracker']
# Uncomment for logging if running a job, comment during experiments
# otherwise it leads to insanely huge logging output which is useless
# self.logger.info("""
# Expt: {} Episode: {} Earnings: {}
# Pax rides: {} Relocation rides: {} Unmet demand: {}
# """.format(self.expt_name, episode_id,
# episode_tracker.gross_earnings,
# episode_tracker.successful_waits,
# episode_tracker.relocation_rides,
# episode_tracker.unmet_demand))
# self.logger.info("----------------------------------")
self.training_tracker.update_RL_tracker(
episode_id, episode_tracker.gross_earnings,
episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,
episode_tracker.unmet_demand, episode_tracker.relocation_rides,
episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,
episode_tracker.DRT, episode_tracker.DCT)
# Keep track of the best episode
if self.objective == 'revenue':
if episode_tracker.gross_earnings >= current_best:
best_episode = episode_tracker
current_best = best_episode.gross_earnings
else: # self.objective == 'pickups':
if episode_tracker.successful_waits >= current_best:
best_episode = episode_tracker
current_best = episode_tracker.successful_waits
# Keep track of the best model
best_model['ind_exploration_factor'] = ind_exploration_factor
best_model['config'] = self.config
best_model['q_ind'] = q_ind
best_model['r_table'] = r_table
best_model['xi_matrix'] = xi_matrix
best_model['training_tracker'] = self.training_tracker
# After finishing training
self.logger.info("Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}".format(self.expt_name,
best_episode.gross_earnings,
best_episode.successful_waits,
best_episode.unmet_demand))
return best_episode, best_model, self.training_tracker
|
3,201 | 120021e44f6df9745db35ea2f38f25acecca9252 | # Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
import netaddr
from time import sleep
import uuid
from proboscis import after_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import InstanceTestInfo
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util.check import CollectionCheck
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util.mysql import create_mysql_connection
from trove.tests.util.users import Requirements
CONFIG_NAME = "test_configuration"
CONFIG_DESC = "configuration description"
configuration_default = None
configuration_info = None
configuration_href = None
configuration_instance = InstanceTestInfo()
configuration_instance_id = None
sql_variables = [
'key_buffer_size',
'connect_timeout',
'join_buffer_size',
]
def _is_valid_timestamp(time_string):
try:
datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
except ValueError:
return False
return True
# helper methods to validate configuration is applied to instance
def _execute_query(host, user_name, password, query):
print("Starting to query database, host: %s, user: %s, password: %s, "
"query: %s" % (host, user_name, password, query))
with create_mysql_connection(host, user_name, password) as db:
result = db.execute(query)
return result
def _get_address(instance_id):
result = instance_info.dbaas_admin.mgmt.instances.show(instance_id)
try:
return next(str(ip) for ip in result.ip
if netaddr.valid_ipv4(ip))
except StopIteration:
fail("No IPV4 ip found")
def _test_configuration_is_applied_to_instance(instance, configuration_id):
if CONFIG.fake_mode:
raise SkipTest("configuration from sql does not work in fake mode")
instance_test = instance_info.dbaas.instances.get(instance.id)
assert_equal(configuration_id, instance_test.configuration['id'])
if configuration_id:
testconfig_info = instance_info.dbaas.configurations.get(
configuration_id)
else:
testconfig_info = instance_info.dbaas.instance.configuration(
instance.id)
testconfig_info['configuration']
conf_instances = instance_info.dbaas.configurations.instances(
configuration_id)
config_instance_ids = [inst.id for inst in conf_instances]
assert_true(instance_test.id in config_instance_ids)
cfg_names = testconfig_info.values.keys()
host = _get_address(instance.id)
for user in instance.users:
username = user['name']
password = user['password']
concat_variables = "','".join(cfg_names)
query = ("show variables where Variable_name "
"in ('%s');" % concat_variables)
actual_values = _execute_query(host, username, password, query)
print("actual_values %s" % actual_values)
print("testconfig_info.values %s" % testconfig_info.values)
assert_true(len(actual_values) == len(cfg_names))
# check the configs exist
attrcheck = AttrCheck()
allowed_attrs = [actual_key for actual_key, actual_value in actual_values]
attrcheck.contains_allowed_attrs(
testconfig_info.values, allowed_attrs,
msg="Configurations parameters")
def _get_parameter_type(name):
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
name)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
return json.loads(body.decode())['type']
# check the config values are correct
for key, value in actual_values:
key_type = _get_parameter_type(key)
# mysql returns 'ON' and 'OFF' for True and False respectively
if value == 'ON':
converted_key_value = (str(key), 1)
elif value == 'OFF':
converted_key_value = (str(key), 0)
else:
if key_type == 'integer':
value = int(value)
converted_key_value = (str(key), value)
print("converted_key_value: %s" % str(converted_key_value))
assert_true(converted_key_value in testconfig_info.values.items())
class ConfigurationsTestBase(object):
@staticmethod
def expected_instance_datastore_configs(instance_id):
"""Given an instance retrieve the expected test configurations for
instance's datastore.
"""
instance = instance_info.dbaas.instances.get(instance_id)
datastore_type = instance.datastore['type']
datastore_test_configs = CONFIG.get(datastore_type, {})
return datastore_test_configs.get("configurations", {})
@staticmethod
def expected_default_datastore_configs():
"""Returns the expected test configurations for the default datastore
defined in the Test Config as dbaas_datastore.
"""
default_datastore = CONFIG.get('dbaas_datastore', None)
datastore_test_configs = CONFIG.get(default_datastore, {})
return datastore_test_configs.get("configurations", {})
@test(depends_on_groups=[tests.DBAAS_API_BACKUPS],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class CreateConfigurations(ConfigurationsTestBase):
@test
def test_expected_configurations_parameters(self):
"""Test get expected configurations parameters."""
allowed_attrs = ["configuration-parameters"]
instance_info.dbaas.configuration_parameters.parameters(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
attrcheck = AttrCheck()
config_parameters_dict = json.loads(body.decode())
attrcheck.contains_allowed_attrs(
config_parameters_dict, allowed_attrs,
msg="Configurations parameters")
# sanity check that a few options are in the list
config_params_list = config_parameters_dict['configuration-parameters']
config_param_keys = []
for param in config_params_list:
config_param_keys.append(param['name'])
expected_configs = self.expected_default_datastore_configs()
expected_config_params = expected_configs.get('parameters_list')
# check for duplicate configuration parameters
msg = "check for duplicate configuration parameters"
assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)
for expected_config_item in expected_config_params:
assert_true(expected_config_item in config_param_keys)
@test
def test_expected_get_configuration_parameter(self):
# tests get on a single parameter to verify it has expected attributes
param_name = 'key_buffer_size'
allowed_config_params = ['name', 'restart_required',
'max', 'min', 'type',
'deleted', 'deleted_at',
'datastore_version_id']
param = instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
param_name)
resp, body = instance_info.dbaas.client.last_response
print("params: %s" % param)
print("resp: %s" % resp)
print("body: %s" % body)
attrcheck = AttrCheck()
config_parameter_dict = json.loads(body.decode())
print("config_parameter_dict: %s" % config_parameter_dict)
attrcheck.contains_allowed_attrs(
config_parameter_dict,
allowed_config_params,
msg="Get Configuration parameter")
assert_equal(param_name, config_parameter_dict['name'])
with TypeCheck('ConfigurationParameter', param) as parameter:
parameter.has_field('name', str)
parameter.has_field('restart_required', bool)
parameter.has_field('max', int)
parameter.has_field('min', int)
parameter.has_field('type', str)
parameter.has_field('datastore_version_id', str)
@test
def test_configurations_create_invalid_values(self):
"""Test create configurations with invalid values."""
values = '{"this_is_invalid": 123}'
try:
instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC)
except exceptions.UnprocessableEntity:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 422)
@test
def test_configurations_create_invalid_value_type(self):
"""Test create configuration with invalid value type."""
values = '{"key_buffer_size": "this is a string not int"}'
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_configurations_create_value_out_of_bounds(self):
"""Test create configuration with value out of bounds."""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('out_of_bounds_over'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
values = json.dumps(expected_configs.get('out_of_bounds_under'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_valid_configurations_create(self):
"""create a configuration with valid parameters from config."""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('valid_values'))
expected_values = json.loads(values)
result = instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
with TypeCheck('Configuration', result) as configuration:
configuration.has_field('name', str)
configuration.has_field('description', str)
configuration.has_field('values', dict)
configuration.has_field('datastore_name', str)
configuration.has_field('datastore_version_id', str)
configuration.has_field('datastore_version_name', str)
global configuration_info
configuration_info = result
assert_equal(configuration_info.name, CONFIG_NAME)
assert_equal(configuration_info.description, CONFIG_DESC)
assert_equal(configuration_info.values, expected_values)
@test(runs_after=[test_valid_configurations_create])
def test_appending_to_existing_configuration(self):
"""test_appending_to_existing_configuration"""
# test being able to update and insert new parameter name and values
# to an existing configuration
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('appending_values'))
# ensure updated timestamp is different than created
if not CONFIG.fake_mode:
sleep(1)
instance_info.dbaas.configurations.edit(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(depends_on_classes=[CreateConfigurations],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class AfterConfigurationsCreation(ConfigurationsTestBase):
@test
def test_assign_configuration_to_invalid_instance(self):
"""test assigning to an instance that does not exist"""
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.modify(invalid_id,
configuration_info.id)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test
def test_assign_configuration_to_valid_instance(self):
"""test assigning a configuration to an instance"""
print("instance_info.id: %s" % instance_info.id)
print("configuration_info: %s" % configuration_info)
print("configuration_info.id: %s" % configuration_info.id)
config_id = configuration_info.id
instance_info.dbaas.instances.modify(instance_info.id,
configuration=config_id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_assign_configuration_to_valid_instance])
def test_assign_configuration_to_instance_with_config(self):
"""test assigning a configuration to an instance conflicts"""
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify, instance_info.id,
configuration=config_id)
@test(depends_on=[test_assign_configuration_to_valid_instance])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""validate the configuration after attaching"""
print("instance_info.id: %s" % instance_info.id)
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
print("configuration_info: %s" % configuration_id)
assert_not_equal(None, configuration_id)
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test(depends_on=[test_get_configuration_details_from_instance_validation])
def test_configurations_get(self):
"""test that the instance shows up on the assigned configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
# check the result field types
with TypeCheck("configuration", result) as check:
check.has_field("id", str)
check.has_field("name", str)
check.has_field("description", str)
check.has_field("values", dict)
check.has_field("created", str)
check.has_field("updated", str)
check.has_field("instance_count", int)
print(result.values)
# check for valid timestamps
assert_true(_is_valid_timestamp(result.created))
assert_true(_is_valid_timestamp(result.updated))
# check that created and updated timestamps differ, since
# test_appending_to_existing_configuration should have changed the
# updated timestamp
if not CONFIG.fake_mode:
assert_not_equal(result.created, result.updated)
assert_equal(result.instance_count, 1)
with CollectionCheck("configuration_values", result.values) as check:
# check each item has the correct type according to the rules
for (item_key, item_val) in result.values.items():
print("item_key: %s" % item_key)
print("item_val: %s" % item_val)
dbaas = instance_info.dbaas
param = dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
item_key)
if param.type == 'integer':
check.has_element(item_key, int)
if param.type == 'string':
check.has_element(item_key, str)
if param.type == 'boolean':
check.has_element(item_key, bool)
# Test to make sure that another user is not able to GET this config
reqs = Requirements(is_admin=False)
test_auth_user = instance_info.user.auth_user
other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])
other_user_tenant_id = other_user.tenant_id
client_tenant_id = instance_info.user.tenant_id
if other_user_tenant_id == client_tenant_id:
other_user = CONFIG.users.find_user(
reqs, black_list=[instance_info.user.auth_user,
other_user])
print(other_user)
print(other_user.__dict__)
other_client = create_dbaas_client(other_user)
assert_raises(exceptions.NotFound, other_client.configurations.get,
configuration_info.id)
@test(depends_on_classes=[AfterConfigurationsCreation],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class ListConfigurations(ConfigurationsTestBase):
@test
def test_configurations_list(self):
# test listing configurations show up
result = instance_info.dbaas.configurations.list()
for conf in result:
with TypeCheck("Configuration", conf) as check:
check.has_field('id', str)
check.has_field('name', str)
check.has_field('description', str)
check.has_field('datastore_version_id', str)
check.has_field('datastore_version_name', str)
check.has_field('datastore_name', str)
exists = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(exists))
configuration = exists[0]
assert_equal(configuration.id, configuration_info.id)
assert_equal(configuration.name, configuration_info.name)
assert_equal(configuration.description, configuration_info.description)
@test
def test_configurations_list_for_instance(self):
# test getting an instance shows the configuration assigned shows up
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal(instance.configuration['id'], configuration_info.id)
assert_equal(instance.configuration['name'], configuration_info.name)
# expecting two things in links, href and bookmark
assert_equal(2, len(instance.configuration['links']))
link = instance.configuration['links'][0]
global configuration_href
configuration_href = link['href']
@test
def test_get_default_configuration_on_instance(self):
# test the api call to get the default template of an instance exists
result = instance_info.dbaas.instances.configuration(instance_info.id)
global configuration_default
configuration_default = result
assert_not_equal(None, result.configuration)
@test
def test_changing_configuration_with_nondynamic_parameter(self):
"""test_changing_configuration_with_nondynamic_parameter"""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('nondynamic_parameter'))
instance_info.dbaas.configurations.update(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.configurations.get(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(depends_on=[test_changing_configuration_with_nondynamic_parameter])
@time_out(20)
def test_waiting_for_instance_in_restart_required(self):
"""test_waiting_for_instance_in_restart_required"""
def result_is_not_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return False
else:
return True
poll_until(result_is_not_active)
instance = instance_info.dbaas.instances.get(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_waiting_for_instance_in_restart_required])
def test_restart_service_should_return_active(self):
"""test_restart_service_should_return_active"""
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_true(instance.status in ['REBOOT', 'SHUTDOWN'])
return False
poll_until(result_is_active)
@test(depends_on=[test_restart_service_should_return_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""test_get_configuration_details_from_instance_validation"""
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test(depends_on=[test_configurations_list])
def test_compare_list_and_details_timestamps(self):
# compare config timestamps between list and details calls
result = instance_info.dbaas.configurations.list()
list_config = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(list_config))
details_config = instance_info.dbaas.configurations.get(
configuration_info.id)
assert_equal(list_config[0].created, details_config.created)
assert_equal(list_config[0].updated, details_config.updated)
@test(depends_on_classes=[ListConfigurations],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class StartInstanceWithConfiguration(ConfigurationsTestBase):
@test
def test_start_instance_with_configuration(self):
"""test that a new instance will apply the configuration on create"""
global configuration_instance
databases = []
databases.append({"name": "firstdbconfig", "character_set": "latin2",
"collate": "latin2_general_ci"})
databases.append({"name": "db2"})
configuration_instance.databases = databases
users = []
users.append({"name": "liteconf", "password": "liteconfpass",
"databases": [{"name": "firstdbconfig"}]})
configuration_instance.users = users
configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config"
flavor_href = instance_info.dbaas_flavor_href
configuration_instance.dbaas_flavor_href = flavor_href
configuration_instance.volume = instance_info.volume
configuration_instance.dbaas_datastore = instance_info.dbaas_datastore
configuration_instance.dbaas_datastore_version = \
instance_info.dbaas_datastore_version
configuration_instance.nics = instance_info.nics
result = instance_info.dbaas.instances.create(
configuration_instance.name,
configuration_instance.dbaas_flavor_href,
configuration_instance.volume,
configuration_instance.databases,
configuration_instance.users,
nics=configuration_instance.nics,
availability_zone="nova",
datastore=configuration_instance.dbaas_datastore,
datastore_version=configuration_instance.dbaas_datastore_version,
configuration=configuration_href)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
configuration_instance.id = result.id
@test(depends_on_classes=[StartInstanceWithConfiguration],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_instance_with_configuration_active(self):
"""wait for the instance created with configuration"""
def result_is_active():
instance = instance_info.dbaas.instances.get(
configuration_instance.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_instance_with_configuration_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""Test configuration is applied correctly to the instance."""
inst = instance_info.dbaas.instances.get(configuration_instance.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, configuration_id)
_test_configuration_is_applied_to_instance(configuration_instance,
configuration_id)
@test(depends_on=[WaitForConfigurationInstanceToFinish],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class DeleteConfigurations(ConfigurationsTestBase):
@before_class
def setUp(self):
# need to store the parameter details that will be deleted
config_param_name = sql_variables[1]
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
config_param_name)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
self.config_parameter_dict = json.loads(body.decode())
@after_class(always_run=True)
def tearDown(self):
# need to "undelete" the parameter that was deleted from the mgmt call
if instance_info.dbaas:
ds = instance_info.dbaas_datastore
ds_v = instance_info.dbaas_datastore_version
version = instance_info.dbaas.datastore_versions.get(
ds, ds_v)
client = instance_info.dbaas_admin.mgmt_configs
print(self.config_parameter_dict)
client.create(version.id,
self.config_parameter_dict['name'],
self.config_parameter_dict['restart_required'],
self.config_parameter_dict['type'],
self.config_parameter_dict['max'],
self.config_parameter_dict['min'])
@test
def test_delete_invalid_configuration_not_found(self):
# test deleting a configuration that does not exist throws exception
invalid_configuration_id = "invalid-config-id"
assert_raises(exceptions.NotFound,
instance_info.dbaas.configurations.delete,
invalid_configuration_id)
@test(depends_on=[test_delete_invalid_configuration_not_found])
def test_delete_configuration_parameter_with_mgmt_api(self):
# testing a param that is assigned to an instance can be deleted
# and doesn't affect an unassign later. So we delete a parameter
# that is used by a test (connect_timeout)
ds = instance_info.dbaas_datastore
ds_v = instance_info.dbaas_datastore_version
version = instance_info.dbaas.datastore_versions.get(
ds, ds_v)
client = instance_info.dbaas_admin.mgmt_configs
config_param_name = self.config_parameter_dict['name']
client.delete(version.id, config_param_name)
assert_raises(
exceptions.NotFound,
instance_info.dbaas.configuration_parameters.get_parameter,
ds,
ds_v,
config_param_name)
@test(depends_on=[test_delete_configuration_parameter_with_mgmt_api])
def test_unable_delete_instance_configurations(self):
# test deleting a configuration that is assigned to
# an instance is not allowed.
assert_raises(exceptions.BadRequest,
instance_info.dbaas.configurations.delete,
configuration_info.id)
@test(depends_on=[test_unable_delete_instance_configurations])
@time_out(30)
def test_unassign_configuration_from_instances(self):
"""test to unassign configuration from instance"""
instance_info.dbaas.instances.update(configuration_instance.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.update(instance_info.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.get(instance_info.id)
def result_has_no_configuration():
instance = instance_info.dbaas.instances.get(inst_info.id)
if hasattr(instance, 'configuration'):
return False
else:
return True
inst_info = instance_info
poll_until(result_has_no_configuration)
inst_info = configuration_instance
poll_until(result_has_no_configuration)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_unassign_configuration_from_instances])
def test_assign_in_wrong_state(self):
# test assigning a config to an instance in RESTART state
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify,
configuration_instance.id,
configuration=configuration_info.id)
@test(depends_on=[test_assign_in_wrong_state])
def test_no_instances_on_configuration(self):
"""test_no_instances_on_configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
assert_equal(result.instance_count, 0)
print(configuration_instance.id)
print(instance_info.id)
@test(depends_on=[test_unassign_configuration_from_instances])
@time_out(120)
def test_restart_service_should_return_active(self):
"""test that after restarting the instance it becomes active"""
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_restart_service_should_return_active])
def test_assign_config_and_name_to_instance_using_patch(self):
"""test_assign_config_and_name_to_instance_using_patch"""
new_name = 'new_name'
report = CONFIG.get_report()
report.log("instance_info.id: %s" % instance_info.id)
report.log("configuration_info: %s" % configuration_info)
report.log("configuration_info.id: %s" % configuration_info.id)
report.log("instance name:%s" % instance_info.name)
report.log("instance new name:%s" % new_name)
saved_name = instance_info.name
config_id = configuration_info.id
instance_info.dbaas.instances.update(instance_info.id,
configuration=config_id,
name=new_name)
assert_equal(202, instance_info.dbaas.last_http_code)
check = instance_info.dbaas.instances.get(instance_info.id)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal(check.name, new_name)
# restore instance name
instance_info.dbaas.instances.update(instance_info.id,
name=saved_name)
assert_equal(202, instance_info.dbaas.last_http_code)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
# restart to be sure configuration is applied
instance_info.dbaas.instances.restart(instance_info.id)
assert_equal(202, instance_info.dbaas.last_http_code)
sleep(2)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
# test assigning a configuration to an instance that
# already has an assigned configuration with patch
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.update,
instance_info.id, configuration=config_id)
@test(runs_after=[test_assign_config_and_name_to_instance_using_patch])
def test_unassign_configuration_after_patch(self):
"""Remove the configuration from the instance"""
instance_info.dbaas.instances.update(instance_info.id,
remove_configuration=True)
assert_equal(202, instance_info.dbaas.last_http_code)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
# restart to be sure configuration has been unassigned
instance_info.dbaas.instances.restart(instance_info.id)
assert_equal(202, instance_info.dbaas.last_http_code)
sleep(2)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(result.instance_count, 0)
@test
def test_unassign_configuration_from_invalid_instance_using_patch(self):
# test unassign config group from an invalid instance
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.update(invalid_id,
remove_configuration=True)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test(runs_after=[test_unassign_configuration_after_patch])
def test_delete_unassigned_configuration(self):
"""test_delete_unassigned_configuration"""
instance_info.dbaas.configurations.delete(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_delete_unassigned_configuration])
@time_out(TIMEOUT_INSTANCE_DELETE)
def test_delete_configuration_instance(self):
"""test_delete_configuration_instance"""
instance_info.dbaas.instances.delete(configuration_instance.id)
assert_equal(202, instance_info.dbaas.last_http_code)
def instance_is_gone():
try:
instance_info.dbaas.instances.get(configuration_instance.id)
return False
except exceptions.NotFound:
return True
poll_until(instance_is_gone)
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
configuration_instance.id)
|
3,202 | 09aedd6cab0b8c6a05bbee5b336fcd38aea1f7b9 | # animation2.py
# multiple-shot cannonball animation
from math import sqrt, sin, cos, radians, degrees
from graphics import *
from projectile import Projectile
from button import Button
class Launcher:
def __init__(self, win):
"""Create inital launcher with angle 45 degrees and velocity 40
win is the GraphWin to draw the launcher in.
"""
# draw the base shot of the launcher
base = Circle(Point(0,0), 3)
base.setFill("red")
base.setOutline("red")
base.draw(win)
# save the window and create initial angle and velocity
self.win = win
self.angle = radians(45.0)
self.vel = 40.0
# create inital "dummy" arrow
self.arrow = Line(Point(0,0), Point(0,0)).draw(win)
# replace it with the correct arrow
self.redraw()
def redraw(self):
"""undraw the arrow and draw a new one for the
current values of angle and velocity.
"""
self.arrow.undraw()
pt2 = Point(self.vel*cos(self.angle), self.vel*sin(self.angle))
self.arrow = Line(Point(0,0), pt2).draw(self.win)
self.arrow.setArrow("last")
self.arrow.setWidth(3)
def adjAngle(self, amt):
""" change angle by amt degrees """
self.angle = self.angle+radians(amt)
self.redraw()
def adjVel(self, amt):
""" change velocity by amt"""
self.vel = self.vel + amt
self.redraw()
def fire(self):
return ShotTracker(self.win, degrees(self.angle), self.vel, 0.0)
class ShotTracker:
""" Graphical depiction of a projectile flight using a Circle """
def __init__(self, win, angle, velocity, height):
"""win is the GraphWin to display the shot, angle, velocity, and
height are initial projectile parameters.
"""
self.proj = Projectile(angle, velocity, height)
self.marker = Circle(Point(0,height), 3)
self.marker.setFill("red")
self.marker.setOutline("red")
self.marker.draw(win)
def update(self, dt):
""" Move the shot dt seconds farther along its flight """
self.proj.update(dt)
center = self.marker.getCenter()
dx = self.proj.getX() - center.getX()
dy = self.proj.getY() - center.getY()
self.marker.move(dx,dy)
def getX(self):
""" return the current x coordinate of the shot's center """
return self.proj.getX()
def getY(self):
""" return the current y coordinate of the shot's center """
return self.proj.getY()
def undraw(self):
""" undraw the shot """
self.marker.undraw()
class ProjectileApp:
def __init__(self):
self.win = GraphWin("Projectile Animation", 640, 480)
self.win.setCoords(-10, -10, 210, 155)
Line(Point(-10,0), Point(210,0)).draw(self.win)
for x in range(0, 210, 50):
Text(Point(x,-7), str(x)).draw(self.win)
Line(Point(x,0), Point(x,2)).draw(self.win)
self.launcher = Launcher(self.win)
self.shots = []
def updateShots(self, dt):
alive = []
for shot in self.shots:
shot.update(dt)
if shot.getY() >= 0 and shot.getX() < 210:
alive.append(shot)
else:
shot.undraw()
self.shots = alive
def run(self):
# main event/animation lopp
while True:
self.updateShots(1/30)
key = self.win.checkKey()
if key in ["q", "Q"]:
break
if key == "Up":
self.launcher.adjAngle(5)
elif key == "Down":
self.launcher.adjAngle(-5)
elif key == "Right":
self.launcher.adjVel(5)
elif key == "Left":
self.launcher.adjVel(-5)
elif key == "f":
self.shots.append(self.launcher.fire())
update(30)
self.win.close()
if __name__ == "__main__":
ProjectileApp().run()
|
3,203 | 4fbe4d474e10e08eafee3bcc6173f8cd6b797dde | def swap(a,b):
print(a,b)
a=input("enter a value 1 : ")
b=input("enter b value 2 : ")
a,b=b,a
print("the vaalues after swaping the variables are below:")
print("the value of a is : ",a)
print("the value of b is : ",b)
|
3,204 | 501614f9c7df3c862c9951ea343964b6ed47e74a | import requests
import json
base_url = f"https://api.telegram.org/bot"
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(TOKEN):
url = f'{base_url}{TOKEN}/getUpdates'
js = get_json_from_url(url)
if not js['ok']:
print('Error: Invalid telegram token!')
exit(0)
return js
def get_last_chat_id_and_text(updates):
num_updates = len(updates["result"])
if num_updates == 0:
print('Error: Please send a message to the bot to initialize!')
exit(0)
last_update = num_updates - 1
text = updates["result"][last_update]["message"]["text"]
msg_timestamp = updates["result"][last_update]["message"]["date"]
chat_id = updates["result"][last_update]["message"]["chat"]["id"]
return text, msg_timestamp, chat_id
def send_message(TOKEN, text, chat_id):
url = f'{base_url}{TOKEN}/sendMessage?text={text}&chat_id={chat_id}'
resp = json.loads(get_url(url))
if not resp['ok']:
print('Error: Invalid telegram chat_id! Please delete cached.')
exit(0)
def initialize_bot(TOKEN):
get_updates(TOKEN) # ensure token is valid
# in case the bot doesn't have a recent incoming message, cached will prevent failing
try:
f_cached = open('cached', 'rt')
except FileNotFoundError:
_, _, chat_id = get_last_chat_id_and_text(get_updates(TOKEN))
with open('cached', 'wt') as f_cached:
json.dump({'chat_id': chat_id}, f_cached)
else:
chat_id = json.load(f_cached)['chat_id']
send_message(TOKEN, 'Bot initialized.', chat_id) # ensure chat_id is valid
return chat_id
|
3,205 | 63182a8708729606f96794cddb163f707252ba61 | from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers
from anonymizer import Anonymizer
class MediumAnonymizer(Anonymizer):
model = Medium
attributes = [
('medium_id', "integer"),
('description', "varchar"),
]
class ProfileAnonymizer(Anonymizer):
model = Profile
attributes = [
('user_id', "SKIP"),
('person_id', "SKIP"),
('datatel_avatar_url', "SKIP"),
('suffix', "choice"),
('salutation', "choice"),
('middle_name', "name"),
('title', "varchar"),
('about', "lorem"),
('email2', "email"),
('home_phone1', "phonenumber"),
('biz_phone1', "phonenumber"),
('mobile_phone1', "phonenumber"),
('fax', "phonenumber"),
('allow_contact', "bool"),
('show_name', "bool"),
('url_personal', "varchar"),
('url_org', "varchar"),
('accepted_terms', "bool"),
('email_on_follow', "bool"),
]
class StaffAnonymizer(Anonymizer):
model = Staff
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
]
class InstructorAnonymizer(Anonymizer):
model = Instructor
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
('bio_short', "lorem"),
('bio_long', "lorem"),
]
class StudentAnonymizer(Anonymizer):
model = Student
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('visiting_scholar', "bool"),
]
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('third_year', "bool"),
('j200_inst', "varchar"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('equipment_balance', "SKIP"),
('visiting_scholar', "bool"),
('employer', "varchar"),
('specialty', "varchar"),
('medium', "choice"),
('prev_emp1', "varchar"),
('prev_emp2', "varchar"),
('prev_emp3', "varchar"),
('notes_exclude', "bool"),
('notes', "lorem"),
('mod_date', "date"),
('pub_display', "bool"),
('freelance', "bool"),
('region', "choice"),
('prev_intern1', "varchar"),
('prev_intern2', "varchar"),
('prev_intern3', "varchar"),
('first_job', "varchar"),
('books', "lorem"),
('deceased_notes', "varchar"),
('mia', "bool"),
('mia_notes', "lorem"),
('interview', "bool"),
('interview_year', "choice"),
('interview_notes', "lorem"),
('agents_year', "choice"),
('agents_notes', "lorem"),
('event_attend_notes', "lorem"),
('famous_notes', "lorem"),
('volunteer_speak', "bool"),
('volunteer_committee', "bool"),
('volunteer_interview', "bool"),
('volunteer_mentor', "bool"),
('volunteer_agent', "bool"),
('maillist_class', "bool"),
('no_maillists', "bool"),
('no_reminder', "bool"),
('suggestions', "lorem"),
('committee_notes', "lorem"),
('inactive', "bool"),
('revision', "integer"),
]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('amount', "integer"),
('date', "date"),
('description', "varchar"),
('notes', "lorem"),
]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('address_type', "choice"),
('street_1', "street_address"),
('street_2', "street_address"),
('street_3', "street_address"),
('city', "city"),
('state', "choice"),
('state_other', "varchar"),
('postal_code', "uk_postcode"),
('display', "bool"),
]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('title', "varchar"),
('description', "lorem"),
('date_received', "date"),
('display', "bool"),
]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('body', "lorem"),
]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('experience_type', "choice"),
('title', "varchar"),
('description', "lorem"),
('company', "varchar"),
('city', "city"),
('state', "choice"),
('country', "varchar"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('summary', "lorem"),
('display', "bool"),
]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('diploma', "choice"),
('school', "varchar"),
('description', "lorem"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [
('id', "SKIP"),
('action', "SKIP"),
('person_id', "SKIP"),
('section_id', "SKIP"),
('first_name', "SKIP"),
('last_name', "SKIP"),
('email', "SKIP"),
('photo_url', "SKIP"),
('person_type', "SKIP"),
]
|
3,206 | d2e8c95dc144aa83128cc815ad145982f64b1819 | #!/usr/bin/env python
from math import factorial
F = [factorial(i) for i in range(10)]
#F[9] * 8 = 2903040 > this means no 8 digit numbers
#F[9] * 7 = 2540160 < this is the maximum that I could think of
total = 0
for i in xrange(10, 2540160):
if sum([F[int(d)] for d in str(i)]) == i:
total = total + i
print total
|
3,207 | 4193fa992d06890afb660c072842cf1b85a43774 | import glob
import logging
import os
import sqlite3
from aiogram import Bot, Dispatcher, executor, types
TOKEN = '1772334389:AAE5wv8gssOFOgxQjQwKk7rUSKQHr6NTjus'
logging.basicConfig(level=logging.INFO)
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
path1 = 'C:\\Users\\const\\PycharmProjects\\t'
conn = sqlite3.connect('kustoge.db')
cur = conn.cursor()
chunksize = 10
idfolder = "C:\\Users\\const\\PycharmProjects\\goskustoge\\data\\id"
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
await message.reply(
"Портал Государственных услуг округа Кустоже\n\n Используете команду /id + номер_паспорта \n для "
"получения информации о владельце.\n\n Так же можно использовать команду /fullname + имя + фамилия, "
"для получения информации о гражданине, регистр и порядок не важен\n\n Для получения информации о гражданах по "
"фамилии воспользуйтесь командой /lastname + Фамилия \n\n Для получения данных о гражданах по национальности "
"используйте /get_scan_nat+ брасогорец\отовичанин \n\n Для добавления гражданина "
"воспользуйтесь командой /add_person ИМЯ+ФАМИЛИЯ+НОМЕР_ПАСПОРТА+НАЦИОНАЛЬНОСТЬ+НОМЕР_ЛИЦЕНЗИИ_"
"ОРУЖЕЙНОЙ+Преступление ( если нет лицензии и преступления пишем НЕТ \n\n Для удаления гражданина "
"используйте /delete_person + номер паспорта \n\n Для добавления лицензии на оружение воспользуйтесь командой "
"/add_gun_lic+id+номер_лицензии \n\n "
"Для удалениея /delete_gun_lic + id \n\n Для добавления преступления гражданину используйте команду "
"/add_crime +id + преступление \n Для удаления преступления воспользуйтесь командой /delete_crime + id")
@dp.message_handler(commands=['id'])
async def echo(message: types.Message):
print("попросили данные по ID")
arguments = message.get_args()
print(arguments)
cur.execute("select * from barsa where id=:id", {"id": arguments})
res = cur.fetchone()
cur.execute("select count(*) from barsa where lastname=:lastname",
{"lastname": res[2]})
res2 = cur.fetchone()
print(res2)
result = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[2] + '\n' + 'Номер Паспорта:' + ' ' + \
res[3] + '\n' + 'Национальность:' + ' ' + res[4] + "\n" + "Родвественников:" + ' ' + str(
res2[0]) + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + "Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, result)
os.chdir(idfolder)
for file in glob.glob(res[3] + ".jpg"):
img = open(file, "rb")
await bot.send_photo(message.from_user.id, img)
@dp.message_handler(commands=['fullname'])
async def echo(message: types.Message):
print("попросили данные по имени")
arguments = message.get_args()
print(arguments)
s = arguments.lower()
s = s.split()
cur.execute("select * from barsa where name=:name and lastname=:lastname or name=:lastname and lastname=:name",
{"name": s[0], "lastname": s[1]})
res = cur.fetchone()
cur.execute("select count(*) from barsa where lastname=:lastname",
{"lastname": s[1]})
res2 = cur.fetchone()
print(res2[0])
result = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[2] + '\n' + 'Номер Паспорта:' + ' ' + \
res[3] + '\n' + 'Национальность:' + ' ' + res[4] + "\n" + "Родвественников:" + ' ' + str(
res2[0]) + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, result)
os.chdir(idfolder)
for file in glob.glob(res[3] + ".jpg"):
img = open(file, "rb")
await bot.send_photo(message.from_user.id, img)
@dp.message_handler(commands=['lastname'])
async def echo(message: types.Message):
arguments = message.get_args()
print("попросили данные по Фамилии:", arguments)
s = arguments.lower()
cur.execute("select * from barsa where lastname=:lastname ",
{"lastname": s})
res = cur.fetchall()
os.chdir(idfolder)
for f in res:
print(f)
cur.execute("select * from barsa where id=:id", {"id": f[3]})
res = cur.fetchone()
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[
4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, req)
for file in glob.glob(res[3] + ".jpg"):
img = open(file, "rb")
await bot.send_photo(message.from_user.id, img)
@dp.message_handler(commands=['get_scan_nat'])
async def echo(message: types.Message):
print("попросили ВСЕ СКАНЫ по по национальности")
arguments = message.get_args()
print(arguments)
s = arguments
s = s.capitalize()
cur.execute("select id from barsa where nat_=:nat_", {"nat_": s})
res = cur.fetchall()
out = [item for t in res for item in t]
out = [s.replace(" ", "") for s in out]
os.chdir(idfolder)
for f in out:
print(f)
if f == '472-641218' or f == '757-067985' or f == '642-741978' or f == '696-082959' or f == '442-446766' or f == '702-973965':
cur.execute("select * from barsa where id=:id", {"id": f})
res = cur.fetchone()
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[
4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, req)
else:
img = open(f + '.jpg', "rb")
print('ok')
await bot.send_photo(message.from_user.id, img)
@dp.message_handler(commands=['add_person'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments.split()
print(s)
sqlite_insert_query = """INSERT INTO barsa
(name, lastname, id, nat_,gunlic,crime)
VALUES
(?,?,?,?,?,?)"""
data_tuple = (s[0], s[1], s[2], s[3], s[4], s[5])
cur.execute(sqlite_insert_query, data_tuple)
conn.commit()
print("Запись о гражданине успешно добавлена ")
cur.execute("select * from barsa where id=:id", {"id": s[2]})
res = cur.fetchone()
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, req)
@dp.message_handler(commands=['delete_person'])
async def echo(message: types.Message):
arguments = message.get_args()
print("попросили удалить гражаднина с ID:", arguments)
s = arguments
cur.execute("delete from barsa where id=:id", {"id": s})
conn.commit()
print("Гражданин с ID :", arguments, 'удален')
res = "Гражданин с ID :", arguments, 'удален'
await bot.send_message(message.from_user.id, res)
@dp.message_handler(commands=['add_gun_lic'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments.split()
print(s[0], s[1])
cur.execute(" update barsa set gunlic=:gunlic where id=:id", {"gunlic": s[1], "id": s[0]})
conn.commit()
print("Record Updated successfully ")
cur.execute("select * from barsa where id=:id", {"id": s[0]})
res = cur.fetchone()
print(res)
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5]
await bot.send_message(message.from_user.id, req)
@dp.message_handler(commands=['gun_lic'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments
print(s)
cur.execute("select * from barsa where gunlic=:gunlic", {"gunlic": s})
res = cur.fetchone()
print(res)
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, req)
@dp.message_handler(commands=['delete_gun_lic'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments
print(s)
no = 'нет'
cur.execute("update barsa set gunlic=:gunlic1 where id=:id", {"gunlic1": no, "id": s})
res1 = cur.fetchone()
conn.commit()
ans = "Оружейная лицения гражданина:",s, " удалена"
await bot.send_message(message.from_user.id, ans)
@dp.message_handler(commands=['add_crime'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments.split()
print(s[0], s[1])
cur.execute(" update barsa set crime=:crime where id=:id", {"crime": s[1], "id": s[0]})
conn.commit()
print("Record Updated successfully ")
cur.execute("select * from barsa where id=:id", {"id": s[0]})
res = cur.fetchone()
print(res)
req = 'Имя:' + ' ' + res[1] + '\n' + 'Фамилия:' + ' ' + res[
2] + '\n' + 'Номер Паспорта:' + ' ' + res[3] + '\n' + 'Национальность:' + ' ' + res[4] + '\n' + "Номер " \
"лицензии на " \
"оружие:" + " " + \
res[5] + '\n' + \
"Преступление:" + " " + res[6]
await bot.send_message(message.from_user.id, req)
@dp.message_handler(commands=['delete_crime'])
async def echo(message: types.Message):
arguments = message.get_args()
s = arguments
print(s)
no = 'нет'
cur.execute("update barsa set crime=:crime where id=:id", {"crime": no, "id": s})
res1 = cur.fetchone()
conn.commit()
ans = "Преступление гражданина:",s, " удалено"
await bot.send_message(message.from_user.id, ans)
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=False)
|
3,208 | d04506e67071abf36d43a828d90fbe0f14230103 | # filename: cycle_break.py
# for i in range(1, 101):
# if i % 3 == 0 and i % 8 == 0:
# print(i)
# break
num = 1
while num <= 100:
if num % 4 == 0 and num % 6 == 0:
print(num)
break
num += 1 |
3,209 | e7699bb3f6080c78517f11445e2c48a0e40f3332 | class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.minheap = []
self.maxheap = []
def addNum(self, num: int) -> None:
heapq.heappush (self.maxheap ,-heapq.heappushpop(self.minheap , num) )
if len(self.maxheap) > len(self.minheap):
heapq.heappush( self.minheap, -heapq.heappop(self.maxheap))
def findMedian(self) -> float:
if len(self.maxheap) == len(self.minheap):
return (self.minheap[0] + -self.maxheap[0]) / 2.
return self.minheap[0]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian() |
3,210 | af7af5d1048d2b0968e831aad89d5baf30cab608 | '''
Copyright (c) 2021, Štěpán Beneš
The purpose of this script it to take the 5 BSE and 5 SE hand-picked prototype
images and turn them into the same shape and format as the rest of the data.
Prototype images are resized to 768x768, the info bar is cropped off. Afterwards
the images are normalized to float32 in range [0,1] and reshaped into Keras Input
shape of (len(images), width, height, 1). Finally they are saved for further use
during anomaly detection with siamese networks.
'''
import glob
import numpy as np
import cv2
from reshape_util import crop_reshape
from reshape_util import reshape_normalize
IMG_WIDTH = 768
IMG_HEIGHT = 768
proto_images_se = glob.glob('Clonky-prototypy/*_3*')
proto_images_bse = glob.glob('Clonky-prototypy/*_4*')
proto_images_se_list = crop_reshape(proto_images_se)
proto_images_bse_list = crop_reshape(proto_images_bse)
proto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH, IMG_HEIGHT)
proto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH, IMG_HEIGHT)
print(proto_images_se_list.shape)
print(proto_images_bse_list.shape)
np.save("Data/SE_prototypes.npy", proto_images_se_list)
np.save("Data/BSE_prototypes.npy", proto_images_bse_list)
|
3,211 | 007cce815f3ad4e47593ff00ff2e73d5d9961d9e | #!/usr/bin/env python
# Copyright (c) 2019, University of Stuttgart
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Jim Mainprice on Wed January 22 2019
from demos_common_imports import *
from pyrieef.geometry.workspace import *
from pyrieef.geometry import heat_diffusion
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
import matplotlib.pyplot as plt
ROWS = 1
COLS = 2
heat_diffusion.NB_POINTS = 101
heat_diffusion.TIME_FACTOR = 50
heat_diffusion.ALGORITHM = "forward"
iterations = 10
workspace = Workspace()
source = [0, 0]
renderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)
U = heat_diffusion.heat_diffusion(workspace, source, iterations)
U_e = heat_diffusion.compare_with_kernel(U[-1], 9.020E-03, workspace)
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(
U[-1] if i == 0 else U_e,
interpolate="none", color_style=plt.cm.gray)
renderer.show()
|
3,212 | 0a3e0eeda14e42bfff7797b3c42a0aebd9a72ade | import numpy as np
print(np.random.binomial(10,0.5,1)) |
3,213 | 80be5f49a179eebc4915bf734a8e362cc2f2ef7c | #
# @lc app=leetcode id=14 lang=python3
#
# [14] Longest Common Prefix
#
# https://leetcode.com/problems/longest-common-prefix/description/
#
# algorithms
# Easy (34.95%)
# Likes: 2372
# Dislikes: 1797
# Total Accepted: 718.5K
# Total Submissions: 2M
# Testcase Example: '["flower","flow","flight"]'
#
# Write a function to find the longest common prefix string amongst an array of
# strings.
#
# If there is no common prefix, return an empty string "".
#
# Example 1:
#
#
# Input: ["flower","flow","flight"]
# Output: "fl"
#
#
# Example 2:
#
#
# Input: ["dog","racecar","car"]
# Output: ""
# Explanation: There is no common prefix among the input strings.
#
#
# Note:
#
# All given inputs are in lowercase letters a-z.
#
#
# @lc code=start
class Solution:
def longestCommonPrefix(self, strs: [str]) -> str:
if not strs:
return ''
strs.sort(key=len)
res = strs[0]
while len(res) > 0:
found = False
for s in strs[1:]:
if res != s[:len(res)]:
res = res[:-1]
found = True
break
if found:
continue
return res
return res
# @lc code=end
if __name__ == '__main__':
s = Solution()
s.longestCommonPrefix(["ca","a"])
s.longestCommonPrefix(["dog","racecar","car"])
s.longestCommonPrefix(["flower","flow","flight"])
|
3,214 | f7f96b19bdc20f732566709a7801002fe49d49eb | '''
Character class
'''
import pygame
from time import sleep
class Character:
def __init__(self, screen, side_length, border_width, valid_points, start_point, end_point, current_position, a_colour, na_colour,\
keys=None, k_colour=None):
self.screen = screen # pygame screen
self.side_length = side_length # length of the grid unit
self.border_width = border_width # border width of the grid unit
self.start_point = start_point # starting point of character in maze stored as a tuple
self.end_point = end_point # end point of character in maze (tuple)
self.current_position = current_position # current position of character (tuple)
self.a_colour = a_colour # active colour of the character (tuple of 3 elements) RGB colour
self.na_colour = na_colour # inactive colour of the character (tuple of 3 elements) RGB colour
# draw the initial position of the character
self.draw_position()
# draw the character
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1], self.side_length, self.side_length])
# move the character to next position
def move_character(self, next_position):
# create a rectangle for the current position
current_rect = [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1],\
self.side_length, self.side_length]
# create a rectangle for the next position
next_rect = [self.border_width+(self.side_length+self.border_width)*next_position[0],\
self.border_width+(self.side_length+self.border_width)*next_position[1],\
self.side_length, self.side_length]
# draw the previous position of the character as an inactive block
pygame.draw.rect(self.screen, self.na_colour, current_rect)
# update the screen at the current point
pygame.display.update(current_rect)
# draw the next position of the character
pygame.draw.rect(self.screen, self.a_colour, next_rect)
# update the screen at the next point
pygame.display.update(next_rect)
# update the current position of the character to the next position
self.current_position = next_position
# draw the intermediate steps when moving a character
def move_character_smooth(self, next_position, steps):
# go right
if next_position[0] != self.current_position[0]:
# from i = 1 to steps
for i in range(1,steps+1):
# short delay between each intermediate step
sleep(0.005)
difference = (next_position[0]-self.current_position[0])*i/steps
next_pos = (self.current_position[0]+difference, self.current_position[1])
self.move_character(next_pos)
else:
for i in range(1,steps+1):
sleep(0.005)
difference = (next_position[1]-self.current_position[1])*i/steps
next_pos = (self.current_position[0], self.current_position[1]+difference)
self.move_character(next_pos)
# return the current position of the character
def get_current_position(self):
return self.current_position
# end goal flag
def reached_goal(self):
if self.current_position == self.end_point:
return True
else:
return False
|
3,215 | b5949b40d731178bdbab776af8877921dcdfbf15 | class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() -> MyClass:
return variable # noqa: F821
def my_fun() -> MyClass:
return variable # noqa: F821
def my_fun2() -> MyClass:
return variable # noqa: F821
variable: MyClass
variable_with_value: MyClass = MyClass()
__all__ = [ # noqa: F822
"OtherClass",
"my_fun2",
"variable",
]
|
3,216 | 4f19eed272c12be137df92bfd3c72e978408c974 | #!/usr/bin/python3
__author__ = "yang.dd"
"""
example 090
"""
# list
# 新建list
testList = [10086, "中国移动", [1, 2, 3, 4]]
# 访问列表长度
print("list len: ", len(testList))
# 切片
print("切片(slice):", testList[1:])
# 追加
print("追加一个元素")
testList.append("i'm new here!");
print("list len: ", len(testList))
print("last item :", testList[-1])
print("pop: ", testList.pop())
print("list len: ", len(testList))
print(testList)
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print(matrix)
print(matrix[1])
col2 = [x[1] for x in matrix]
print(col2)
col2even = [x[1] for x in matrix if x[1] % 2 == 0]
print(col2even) |
3,217 | daa287eeb967d47c9a8420beccf531d9c157e925 | from setuptools import setup
setup(name='discord-ext-menus',
author='TierGamerpy',
url='https://github.com/TierGamerpy/discord-ext-menus',
version= 0.1,
packages=['discord.ext.menus'],
description='An extension module to make reaction based menus with discord.py',
install_requires=['discord.py>=1.2.5'],
python_requires='>=3.5.3'
)
|
3,218 | 4c43c181dbba1680e036750a2a2ea1185bbe91da | from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import InstitutionSerializer, UserSerializer
from .models import Institution
from rest_framework.exceptions import PermissionDenied
from community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile
from community.filters import applyUserFilters, applyInstitutionFilters
from community.mappings import generateKeys
from django.db.models import Q
class InstitutionViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Institution.objects.all()
serializer_class = InstitutionSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
def list(self, request, *args, **kwargs):
if request.user.is_superuser:
self.queryset = applyInstitutionFilters(request, Institution, *args, **kwargs)
response = super(InstitutionViewSet, self).list(request, *args, **kwargs)
response = generateKeys(response, self.serializer_class)
return response
def retrieve(self, request, *args, **kwargs):
if not belongsToInstitution(request, self.get_object()):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, self.get_object()):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
from rest_framework.permissions import IsAuthenticated, IsAdminUser
if self.action =='retrieve' or self.action == 'update':
permission_classes = [IsAuthenticated]
else:
permission_classes = [IsAdminUser]
return [permission() for permission in permission_classes]
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
@api_view(['GET'])
def api_root(request, format=None):
authentication_classes = []
return Response({
'users': reverse('user-list', request=request, format=format),
'institutions': reverse('institution-list', request=request, format=format)
}) |
3,219 | c8dc143c09aa7f677167a4942ae1c4a0fbf75128 | from django import forms
from .models import GetInTouch
class GetInTouchForm(forms.ModelForm):
class Meta:
model = GetInTouch
fields = '__all__' |
3,220 | 3eaced9609c7adfa5457d7dcad8b2dfaeb697b16 | ##############################################
# Binary Tree #
# by Vishal Nirmal #
# #
# A Binary Tree ADT implementation. #
##############################################
class BinaryTree:
def __init__(self, data=None):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if self.data != None:
arr = [self]
while len(arr) > 0:
node = arr[0]
if node.left:
arr.append(node.left)
else:
node.left = BinaryTree(data)
break
if node.right:
arr.append(node.right)
else:
node.right = BinaryTree(data)
break
arr = arr[1:]
else:
self.data = data
def insertNodes(self, arr):
for i in arr:
self.insert(i)
def preorder(self):
print(self.data, end=' ')
if self.left:
self.left.preorder()
if self.right:
self.right.preorder()
def inorder(self):
if self.left:
self.left.inorder()
print(self.data, end=' ')
if self.right:
self.right.inorder()
def postorder(self):
if self.left:
self.left.postorder()
if self.right:
self.right.postorder()
print(self.data, end=' ')
def levelorder(self):
arr = [self]
while len(arr):
node = arr[0]
print(node.data, end=' ')
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
arr = arr[1:]
def height(self):
if self.left == None or self.right==None:
return 0
lh = self.left.height()
rh = self.right.height()
return max(lh, rh)+1
def level(self):
if self.left == None or self.right==None:
return 0
lh = self.left.level()
rh = self.right.level()
return max(lh, rh)+1
def search(self, data):
if self == None:
return False
if self.data == data:
return True
if self.left and self.left.search(data) == True:
return True
if self.right:
return self.right.search(data)
def size(self):
if self == None:
return 0
ls = rs = 0
if self.left:
ls = self.left.size()
if self.right:
rs = self.right.size()
return ls + rs + 1
def max(self):
if self == None:
return 0
lmx = rmx = 0
if self.left:
lmx = self.left.max()
if self.right:
rmx = self.right.max()
return max(lmx, rmx, self.data)
def min(self):
if self == None:
return 0
lmn = rmn = 0
if self.left:
lmn = self.left.min()
if self.right:
rmn = self.right.min()
return min(lmn, rmn, self.data)
def deepest(self):
if self==None:
return None
arr = [self]
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
temp = arr[-1]
arr = arr[1:]
return temp.data
def leafNodes(self):
if self.left == None and self.right == None:
return 1
lln = rln = 0
if self.left:
lln = self.left.leafNodes()
if self.right:
rln = self.right.leafNodes()
return lln + rln
def fullNodes(self):
if self==None:
return 0
arr = [self]
count = 0
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
if node.left and node.right:
count+=1
arr = arr[1:]
return count
def halfNodes(self):
if self==None:
return 0
arr = [self]
count = 0
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
if (node.left==None and node.right) or (node.left and node.right==None):
count+=1
arr = arr[1:]
return count
def allPaths(self, path=[0]*1000, pathlen=0):
if self == None:
return
path[pathlen] = self.data
pathlen+=1
if self.left == None and self.right == None:
for i in range(pathlen-1):
print(path[i], end='->')
print(path[pathlen])
return
if self.left:
self.left.allPaths(path, pathlen)
if self.right:
self.right.allPaths(path, pathlen)
def sum(self):
if self == None:
return 0
ls = rs = 0
if self.left:
ls = self.left.sum()
if self.right:
rs = self.right.sum()
return self.data+ls+rs
def delete(self):
arr = [self]
while len(arr):
node = arr[0]
if node.left:
arr.append(node.left)
if node.right:
arr.append(node.right)
temp = arr[-1]
arr = arr[1:]
temp = None |
3,221 | 56c5c515de8490f2e3516563e037c375aba03667 | #!/usr/bin/python
# ~~~~~============== HOW TO RUN ==============~~~~~
# 1) Configure things in CONFIGURATION section
# 2) Change permissions: chmod +x bot.py
# 3) Run in loop: while true; do ./bot.py; sleep 1; done
from __future__ import print_function
import sys
import socket
import json
import time
# ~~~~~============== CONFIGURATION ==============~~~~~
# replace REPLACEME with your team name!
team_name="BULBASAUR"
# This variable dictates whether or not the bot is connecting to the prod
# or test exchange. Be careful with this switch!
test_mode = True
# This setting changes which test exchange is connected to.
# 0 is prod-like
# 1 is slower
# 2 is empty
test_exchange_index=0
prod_exchange_hostname="production"
port=25000 + (test_exchange_index if test_mode else 0)
exchange_hostname = "test-exch-" + team_name if test_mode else prod_exchange_hostname
# ~~~~~============== NETWORKING CODE ==============~~~~~
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write("\n")
def read_from_exchange(exchange):
return json.loads(exchange.readline())
# ~~~~~============== MAIN LOOP ==============~~~~~
exchange = None
orders_placed = 0
pending_orders = []
pending_buy_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
pending_sell_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
positions = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
vale_buy_pending_id = None
vale_sell_pending_id = None
vale_sell = 0
vale_buy = 0
xlf_buy_pending_id = None
xlf_sell_pending_id = None
xlf_sell = 0
xlf_buy = 0
def main():
global exchange
exchange = connect()
hello()
hello_from_exchange = read_from_exchange(exchange)
# A common mistake people make is to call write_to_exchange() > 1
# time for every read_from_exchange() response.
# Since many write messages generate marketdata, this will cause an
# exponential explosion in pending messages. Please, don't do that!
print("The exchange replied:", hello_from_exchange, file=sys.stderr)
global positions
positions["BOND"] = hello_from_exchange["symbols"][0]["position"]
positions["VALE"] = hello_from_exchange["symbols"][5]["position"]
positions["VALBZ"] = hello_from_exchange["symbols"][4]["position"]
positions["XLF"] = hello_from_exchange["symbols"][7]["position"]
add("BOND", "BUY", 999, 100 - positions["BOND"])
add("BOND", "SELL", 1001, 100 + positions["BOND"])
while (True):
server_msg = read_from_exchange(exchange)
buy_sell_vale()
buy_sell_xlf()
listen_for_fills(server_msg)
listen_for_book(server_msg)
listen_for_errors(server_msg)
def hello():
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
def add(symbol, direction, price, size):
# Update order id to be order placed number
global orders_placed
orders_placed += 1
# Add to pending orders list
global pending_orders
pending_orders.append(orders_placed)
#if symbol == "VALE":
print("Order Placed: " + str(orders_placed) + " Position: " + str(positions[symbol])+ " Size: " + str(size) + " Dir: " + direction + " Symbol: " + symbol + " Price: " + str(price) + "")
# Increment Buy Orders If Necessary
if (direction == "BUY"):
global pending_buy_orders
pending_buy_orders[symbol] += size
elif (direction == "SELL"):
global pending_sell_orders
pending_sell_orders[symbol] += size
# Add order to exchange
write_to_exchange(exchange, {"type": "add", "order_id": orders_placed, "symbol": symbol,
"dir":direction, "price":price, "size": size })
#
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {"type": "cancel", "order_id": order_id})
def listen_for_fills(server_msg):
if (server_msg["type"] == "fill"):
# Get info of filled order
order_num = server_msg["order_id"]
symbol = server_msg["symbol"]
size = server_msg["size"]
direction = server_msg["dir"]
global positions
# Update bond order fill and buy/sell as necessary
if (symbol == "BOND"):
# print("Bond Order Partially Filled: " + str(order_num))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
add("BOND", "SELL", 1001, size)
elif (direction == "SELL"):
pending_sell_orders[symbol] -= size
add("BOND", "BUY", 999, size)
# Update Vale Order fill and hedge as necessary
if (symbol == "VALE"):
print("Vale Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["VALE"] += size
elif (direction == "SELL"):
positions["VALE"] -= size
pending_sell_orders[symbol] -= size
if (symbol == "XLF"):
print("XLF Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["XLF"] += size
elif (direction == "SELL"):
positions["XLF"] -= size
pending_sell_orders[symbol] -= size
def listen_for_book(server_msg):
if (server_msg["type"] == "book"):
global vale_sell
global vale_buy
global xlf_sell
global xlf_buy
if (server_msg["symbol"] == "VALE"):
if len(server_msg["sell"]) > 0:
vale_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
vale_buy = server_msg["buy"][0][0]
if (server_msg["symbol"] == "XLF"):
if len(server_msg["sell"]) > 0:
xlf_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
xlf_buy = server_msg["buy"][0][0]
def buy_sell_vale():
if vale_buy > 0 and vale_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["VALE"] + positions["VALE"] < 10):
global vale_buy_pending_id
if vale_buy_pending_id:
cancel(vale_buy_pending_id)
pending_buy_orders["VALE"] = 0
vale_buy_pending_id = None
print("Cancel VALE BUY Order: " + str(orders_placed))
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "BUY", vale_buy + 1, 10 - positions["VALE"])
vale_buy_pending_id = orders_placed
elif (positions["VALE"] - pending_sell_orders["VALE"] > -10):
global vale_sell_pending_id
if vale_sell_pending_id:
print("Cancel VALE Sell Order: " + str(orders_placed))
cancel(vale_sell_pending_id)
pending_sell_orders["VALE"] = 0
vale_sell_pending_id = None
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "SELL", vale_sell - 1, num_stock)
vale_sell_pending_id = orders_placed
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["XLF"] + positions["XLF"] < 100):
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders["XLF"] = 0
xlf_buy_pending_id = None
print("Cancel XLF Order: " + str(orders_placed))
time.sleep(1)
add("XLF", "BUY", xlf_buy + 1, 100 - positions["XLF"])
xlf_buy_pending_id = orders_placed
elif (positions["XLF"] - pending_sell_orders["XLF"] > -100):
global xlf_sell_pending_id
if xlf_sell_pending_id:
print("Cancel XLF Order: " + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders["XLF"] = 0
xlf_sell_pending_id = None
time.sleep(1)
add("XLF", "SELL", xlf_sell - 1, 100 + positions["XLF"])
xlf_sell_pending_id = orders_placed
def listen_for_errors(server_msg):
if (server_msg["type"] == "reject"):
print("ERROR: ORDER FAILED, id: " + str(server_msg["order_id"]) + " " + server_msg["error"])
if (server_msg["type"] == "error"):
print("ERROR: ORDER FAILED, id: " + str(id) + " " + server_msg["error"])
if (server_msg["type"] == "ack"):
print("Order Completed: " + str(server_msg["order_id"]))
if (server_msg["type"] == "out"):
print("Order Successfully Canceled: " + str(server_msg["order_id"]))
#add("BOND", "BUY", 999, 100 - positions["BOND"])
#add("BOND", "SELL", 1001, 100 + positions["BOND"])
if __name__ == "__main__":
main()
|
3,222 | 59596c69df6a2c453fd147a9c8a2c7d47ed79fb3 | # Generated by Django 2.1.2 on 2018-10-26 12:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20181010_0852'),
('accounts', '0004_playercards'),
]
operations = [
migrations.RenameModel(
old_name='PlayerCards',
new_name='PlayerCard',
),
migrations.RemoveField(
model_name='profile',
name='cards',
),
]
|
3,223 | 18689741a33e6d17e694ee0619a1f36d8d178cbb | from django.shortcuts import *
from shop.models import *
from django.db import transaction
from django.core.exceptions import *
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST['computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST['minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST['maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': "请输入整数"})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST['brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, "Dashio/computers.html", ctx)
@transaction.atomic
def details(request, computer_id):
rtx = {}
rtx['isUser'] = request.session['type'] == 'user'
rtx['computer'] = get_object_or_404(Computer, pk=computer_id)
rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=computer_id).count()
rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)
rtx['user_id'] = request.session['id']
rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id).count()
rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id=computer_id).order_by('-comment_date')
rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id).order_by('-buy_time')[:5]
if rtx['isUser']:
rtx['mark'] = ('收藏' if mark.objects.filter(user_id__user_id=rtx['user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏')
return render(request, 'Dashio/computer_detail.html', rtx)
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id, user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, ))) |
3,224 | 4989db28db0f823a54ff0942fbc40fc4640da38f | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
# A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
3,225 | 098488fd10bcf81c4efa198a44d2ff87e4f8c130 | # 約分して、互いに素な(1,3) (3,1)のようなペアを作りカウントする
# 正のグループと負のグループを別々に管理
# 正のグループの相手が負のグループに存在した場合、
# どちらかのグループから好きなだけ選ぶか、どちらも選ばないかしかない
# 誰ともペアにならなかったグループの個数を全て足してP個だとして、2^P通りを掛ける
# (0,0)については、その中から1つ選ぶか、選ばないかしかない
import sys
readline = sys.stdin.readline
N = int(readline())
import math
zeropair = 0
zeroa = 0
zerob = 0
from collections import defaultdict
pluspair = defaultdict(int)
minuspair = defaultdict(int)
for i in range(N):
a,b = map(int,readline().split())
if a == 0 and b == 0:
zeropair += 1
continue
if a == 0:
zeroa += 1
continue
if b == 0:
zerob += 1
continue
absa = abs(a)
absb = abs(b)
g = math.gcd(absa,absb)
absa,absb = absa//g,absb//g
if a * b > 0:
pluspair[(absa,absb)] += 1
else:
minuspair[(absa,absb)] += 1
DIV = 1000000007
ans = 1
# zeroa,zerobから選ぶパターンは、どちらから好きなだけ選ぶか、どちらも選ばないか
ans *= (pow(2,zeroa,DIV) + pow(2,zerob,DIV) - 1) % DIV
ans %= DIV
# 誰とでもペアになれるものをカウント
allcnt = 0
# plusから選ぶパターンで、minusにある対応ペアを探す
for item in pluspair.items():
a,b = item[0]
cnt = item[1]
if (b,a) in minuspair:
ans *= (pow(2,cnt,DIV) + pow(2,minuspair[(b,a)]) - 1) % DIV
ans %= DIV
del minuspair[(b,a)]
else:
allcnt += cnt
for val in minuspair.values():
allcnt += val
ans = (ans * pow(2,allcnt,DIV)) % DIV
# zeropairから選んだ場合、今までのパターンとは独立
ans += zeropair
print((ans - 1) % DIV)
|
3,226 | 7d6196268b85861e76efaa53e14976f2eae09405 | import pandas as pd
df1 = pd.read_csv('Tweets1.csv', names=['tweet'])
df2 = pd.read_csv('Tweets2.csv', names=['tweet'])
df3 = pd.read_csv('Tweets3.csv', names=['tweet'])
df = pd.concat([df1,df2,df3], axis=0, join='outer', ignore_index=False, keys=None,levels=None, names=None, verify_integrity=False, copy=True)
df.to_csv('Tweets.csv', index=None, header=None)
|
3,227 | 72ce7c48c9d1a7bcdbaead12648d03970663a11e | import tornado.web
import tornado.escape
from torcms.core.base_handler import BaseHandler
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if len(url_str) > 0:
url_arr = url_str.split('/')
# if url_str == '':
# self.render('metadata/meta_index.html')
if url_str == '':
self.list('')
elif url_arr[0] == 'search':
if len(url_arr[0]) >= 3:
self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])
else:
self.search(url_arr[1], url_arr[2], '', 10)
elif url_arr[0] == 'view':
self.ajax_get(url_arr[1], url_arr[2])
# def post(self, *args, **kwargs):
# post_data = self.get_request_arguments()
# keyword = post_data.get('keyw9', '')
# isweb = post_data.get('isweb', '1')
# ldrt = post_data.get('ldrt', '')
# maxrecords = post_data.get('maxrecords', 20)
#
# self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))
# def search(self, keyw):
# # print('====' * 40)
# # print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def list(self, keyw):
# print('====' * 40)
# print(post_data)
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/meta_index.html',
meta_results=csw.records,
userinfo=self.userinfo)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def search(self, keyw, isweb, ldrt, max_num):
# print('=' * 40)
# print(ldrt)
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) +1
print("," * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
# birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,
distributedsearch=True,
hopcount=2)
else:
if isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,
hopcount=2)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/show_result.html',
meta_results=csw.records,
userinfo=self.userinfo,
isweb=isweb,
startnum = startnum
)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# def get_result(self, post_data):
# print('====' * 40)
# print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5'.format(
# post_data['keyw'][0])
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# # data = urllib.request.Request(url)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
# uuid = uuid.split(':')[-1]
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,
hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {
'title': '',
'uid': '',
'sizhi': '',
}
self.render('../torcms_dde/search/show_rec.html',
kws=out_dict,
# meta_rec=csw.records.get(uuid),
meta_rec=rec,
unescape=tornado.escape.xhtml_unescape,
userinfo=self.userinfo
)
# #
# def parseXML(self, data):
#
# tree = etree.fromstring(data)
# # root = tree.getroot()
# uu = tree.findall('zs:record', tree.nsmap)
#
# meta_arr = []
# for x in uu:
# meta_arr.append(MyXML(x))
# # print(x.element('ows:LowerCorner'))
# # uu = etree.SubElement(x, "LowerCorner")
# # for sub_ele in x.iter():
# # print(sub_ele.tag)
# # if 'title' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
# # if 'LowerCorner' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
#
# self.render('metadata/show_result.html',
# meta_arr=meta_arr)
class MyXML():
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
|
3,228 | bae4eb94d561f7aa810718840ff7c2de52cb0d6f | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from app import create_app
from models import *
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# auth tokens should be updated before running tests,
# make sure update the tokens in setup.sh
# read the README to know more details
CASTING_ASSISTANT_TOKEN = os.environ.get(
"CASTING_ASSISTANT_TOKEN",
"abc123abc1234"
)
CASTING_DIRECTOR_TOKEN = os.environ.get(
"CASTING_DIRECTOR_TOKEN",
"abc123abc1234"
)
EXECUTIVE_PRODUCER_TOKEN = os.environ.get(
"EXECUTIVE_PRODUCER_TOKEN",
"abc123abc1234"
)
class CastingAgencyTestCase(unittest.TestCase):
"""This class has the test cases for casting agency web app endpoints"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = os.environ.get(
"TEST_DATABASE_NAME",
"abc123abc1234"
)
self.database_path = "postgres://postgres:postgres@{}/{}".format(
"localhost:5432", self.database_name
)
setup_db(self.app, self.database_path)
# drop db, create and populate with test data
setup_db_for_test()
self.casting_assistant_auth_header = {
"Authorization": "Bearer " + CASTING_ASSISTANT_TOKEN
}
self.casting_director_auth_header = {
"Authorization": "Bearer " + CASTING_DIRECTOR_TOKEN
}
self.executive_producer_auth_header = {
"Authorization": "Bearer " + EXECUTIVE_PRODUCER_TOKEN
}
self.create_actor_success = {
"name": "Chris Hemsworth",
"age": 37,
"gender": "Male",
}
self.create_actor_fail = {
"name": "Chris Evans",
"age": 39,
}
self.create_movie_success = {
"title": "Captain America: Civil War",
"release_date": "12/04/2016",
"actors_ids": [1, 2, 3],
}
self.create_movie_fail_1 = {
"title": "Avenger: Infinity War",
}
self.create_movie_fail_2 = {
"title": "Avenger: Infinity War",
"release_date": "27/04/2018",
"actors_ids": [],
}
self.create_movie_fail_3 = {
"title": "Avenger: Infinity War",
"release_date": "27/04/2018",
"actors_ids": [100],
}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
# test get actors endpoint
def test_get_actors(self):
res = self.client().get(
"/actors",
headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actors"]))
# test create actor endpoint with casting director auth token
def test_create_actors_success_director(self):
res = self.client().post(
"/actors",
headers=self.casting_director_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actor"]))
# test create actor endpoint with executive producer auth token
def test_create_actors_success_producer(self):
res = self.client().post(
"/actors",
headers=self.executive_producer_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actor"]))
# create actor fails due authentication failure with casting
# assistant auth token
def test_create_actors_401_failure_assistant(self):
res = self.client().post(
"/actors",
headers=self.casting_assistant_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# create actor fails due to incomplete input
def test_422_if_create_actor_fails(self):
res = self.client().post(
"/actors",
headers=self.executive_producer_auth_header,
json=self.create_actor_fail,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "unprocessable")
# test update actors with executive producer auth token
def test_update_actors_success_producer(self):
res = self.client().patch(
"/actors/1",
headers=self.executive_producer_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actor"]))
# test update actors with casting director auth token
def test_update_actors_success_director(self):
res = self.client().patch(
"/actors/1",
headers=self.casting_director_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actor"]))
# update actor fails due authentication failure
# with casting assitant auth token
def test_update_actors_401_failure_assistant(self):
res = self.client().patch(
"/actors/1",
headers=self.casting_assistant_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# test update actor faiure if actor with id doesnot
# exists in database
def test_update_actors_404_failure(self):
res = self.client().patch(
"/actors/100",
headers=self.casting_director_auth_header,
json=self.create_actor_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test successfull delete actor with executive producer auth token
def test_delete_actors_success_producer(self):
res = self.client().delete(
"/actors/1", headers=self.executive_producer_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertEqual(res_data["actor_id"], 1)
# test successfull delete actor with casting director auth token
def test_delete_actors_success_director(self):
res = self.client().delete(
"/actors/1", headers=self.casting_director_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertEqual(res_data["actor_id"], 1)
# delete actor fails due authentication failure
# with casting director auth token
def test_delete_actors_401_failure_assistant(self):
res = self.client().delete(
"/actors/1", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# delete actor failure if actor with input
# id doesnot exits
def test_delete_actors_404_failure(self):
res = self.client().delete(
"/actors/100", headers=self.casting_director_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test get movie endpoint
def test_get_movies(self):
res = self.client().get(
"/movies",
headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["movies"]))
# test create movie authentication failure
# with casting director auth token
def test_create_movies_401_failure_director(self):
res = self.client().post(
"/movies",
headers=self.casting_director_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# test create movies success with executive producer
# auth token
def test_create_movies_success_producer(self):
res = self.client().post(
"/movies",
headers=self.executive_producer_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["movie"]))
# create actor fails due authentication failure
# with casting assistant auth token
def test_create_movies_401_failure_assistant(self):
res = self.client().post(
"/movies",
headers=self.casting_assistant_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# create actor fails due to incomplete input
def test_422_create_movie_fails_incomplete_info(self):
res = self.client().post(
"/movies",
headers=self.executive_producer_auth_header,
json=self.create_movie_fail_1,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "unprocessable")
# create movie fails due to incomplete input, no input actor ids
def test_422_create_movie_fails_no_actor_input_info(self):
res = self.client().post(
"/movies",
headers=self.executive_producer_auth_header,
json=self.create_movie_fail_2,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "unprocessable")
# create movie fails due to wrong actor id input
def test_404_create_movie_fails_wrong_actor_id(self):
res = self.client().post(
"/movies",
headers=self.executive_producer_auth_header,
json=self.create_movie_fail_3,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test update movie success with executive producer
# auth token
def test_update_movies_success_producer(self):
res = self.client().patch(
"/movies/1",
headers=self.executive_producer_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["movie"]))
# test update movies success with casting
# director auth token
def test_update_movies_success_director(self):
res = self.client().patch(
"/movies/1",
headers=self.casting_director_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["movie"]))
# update actor fails due authentication failure
# with casting assitant auth token
def test_update_movies_401_failure_assistant(self):
res = self.client().patch(
"/movies/1",
headers=self.casting_assistant_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# test update movies failure if movie with
# input id does not exists
def test_update_movies_404_failure(self):
res = self.client().patch(
"/movies/100",
headers=self.casting_director_auth_header,
json=self.create_movie_success,
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test delete movies success with executive producer
# auth token
def test_delete_movies_success_producer(self):
res = self.client().delete(
"/movies/1", headers=self.executive_producer_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertEqual(res_data["movie_id"], 1)
# test delete movies failure with casting director
# auth token
def test_delete_movies_401_failure_director(self):
res = self.client().delete(
"/movies/1", headers=self.casting_director_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# test delete actor fails due authentication failure
# with casting assitant auth token
def test_delete_actors_401_failure_assistant(self):
res = self.client().delete(
"/movies/1", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 401)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "Permission missing.")
# test delete actor failure if actor with input id
# doesnot exists
def test_delete_actors_404_failure(self):
res = self.client().delete(
"/movies/100", headers=self.executive_producer_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test get actor by movies success
def test_get_actors_by_movies(self):
res = self.client().get(
"/movies/1/actors", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["actors"]))
# test get actor by movies failure if movie
# with input id does not exits
def test_404_get_actors_by_movies(self):
res = self.client().get(
"/movies/100/actors", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# test get movies by actor success
def test_get_movies_by_actors(self):
res = self.client().get(
"/actors/1/movies", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res_data["success"], True)
self.assertTrue(len(res_data["movies"]))
# test get movies by actor failure if actor
# with input id does not exists
def test_404_get_movies_by_actors(self):
res = self.client().get(
"/actors/100/movies", headers=self.casting_assistant_auth_header
)
res_data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(res_data["success"], False)
self.assertEqual(res_data["message"], "resource not found")
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
3,229 | 53cf6e97c3b71b1063d5b6bce5aa444933b69809 | from dagster import job, op
@op
def do_something():
return "foo"
@job
def do_it_all():
do_something()
|
3,230 | 9cc64edc81ab39b0ab2cd47661c9809545b03ac6 | '''
Twitter settings
Input your app credentials below
https://apps.twitter.com
'''
# consumer key
CONSUMER_KEY = ''
# consumer secret
CONSUMER_SECRET = ''
'''
App settings
'''
# Where to save tokens (JSON)
TOKENS_PATH = '/tmp/twitter-tokens.json'
# Redirect-back to URL after authenticated (optional)
REDIRECT_TO = ''
# secret key for flask
FLASK_SECRET = 'S$2[ShC-=BKKOQ.Z-|fa 6f;,5 <[QngmG)}5,s%0vX>B}?o-0X9PM;.dN{jo7' |
3,231 | 8bd5eff12e68f7145676f5e089b51376a82ab489 | _base_ = "../model.py"
model = dict(
type="ImageClassifier",
task="classification",
pretrained=None,
backbone=dict(),
head=dict(in_channels=-1, loss=dict(type="CrossEntropyLoss", loss_weight=1.0), topk=(1, 5)),
)
checkpoint_config = dict(type="CheckpointHookWithValResults")
|
3,232 | 36257340ebbc6bd2c7fa5995511b2c859f58f8e5 | from keras.layers import Dense, Activation, Dropout
from keras.utils.visualize_util import plot
from keras.models import Sequential
from emotions import FER2013Dataset
_deep_models = {}
def deep_model(model_name):
def wrapper(cls):
_deep_models[model_name] = cls
return cls
return wrapper
def get_model(model_name):
if model_name not in _deep_models:
available_models = ", ".join(_deep_models.keys())
raise ValueError(
"Model '%s' not found. Available models are: %s"
% (model_name, available_models))
return _deep_models[model_name]
def init_model(name, *args, **kwargs):
return get_model(name)(*args, **kwargs)
class DeepModel:
image_size = 48
n_pixels = image_size ** 2
n_classes = len(FER2013Dataset.VERBOSE_EMOTION)
def __init__(self, *args, **kwargs):
self.model = None
@property
def name(self):
return self.__class__.__name__
def build(self, **params):
raise NotImplementedError()
def show_structure(self, filename=None):
if not filename:
filename = self.name + '.png'
plot(self.model, to_file=filename)
@deep_model('trivial')
class DummyModel(DeepModel):
def build(self, **params):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))
model.add(Activation('relu'))
model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal'))
model.add(Activation('softmax'))
self.model = model
return model
@deep_model('simple')
class SimpleFeedforwardModel(DeepModel):
def build(self, init='normal', optimizer='adam', activation='relu',
output_activation='sigmoid'):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_pixels * 2, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_classes, init=init))
model.add(Activation(output_activation))
self.model = model
return model
@deep_model('dropout')
class DropoutFeedforwardModel(DeepModel):
def build(self, init='normal', optimizer='adam', activation='relu',
output_activation='sigmoid', dropout=0.2):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_pixels * 2, init=init))
model.add(Activation(activation))
model.add(Dropout(dropout))
model.add(Dense(self.n_pixels * 4, init=init))
model.add(Activation(activation))
model.add(Dropout(dropout))
model.add(Dense(self.n_classes, init=init))
model.add(Activation(output_activation))
self.model = model
return model
|
3,233 | cc99811321083147540a00e8029b792c8afc2ada | import json
import requests
import random
import boto3
from email.parser import BytesParser, Parser
from email.policy import default
##################################
endpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'
##################################
def get_msg_body(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif type == 'text':
return msg.get_payload()
def lambda_handler(event, context):
s3_bucket = event['Records'][0]['s3']['bucket']['name']
s3_key = event['Records'][0]['s3']['object']['key']
# s3_bucket = 'hw3-storemails'
# s3_key = '097caauj2ee2puftdrlohllf5748p70e1seovc81'
client = boto3.client('s3')
data = client.get_object(Bucket=s3_bucket, Key=s3_key)
contents = data['Body'].read()
msg = Parser(policy=default).parsestr(contents.decode('ascii'))
frm = msg['from']
to = msg['to']
time = msg['date']
subject = msg['subject']
body = get_msg_body(msg)
body = " ".join(body.split()).strip()
print(time)
r = requests.post(endpoint, data = {'data':body}, headers = {'Content-Type': 'application/x-www-form-urlencoded'})
r = json.loads(r.text)
print(r)
label = int(float(r['predicted_label']))
if label == 1:
label = 'SPAM'
else: label = 'HAM'
p = float(r['predicted_probability'])
print(label, p)
if len(body)>250: body = body[0:250]
return_msg = 'We received your email sent at ' +\
time + 'with the subject \'' + subject +\
'\'.\n\nHere is a 240 character sample of the email body:\n\n' +\
body + '\n\nThe email was categorized as ' + label +\
' with a ' + str(p) + ' % confidence.'
client = boto3.client('ses')
status = client.send_email(
Source='hamspamreply@hw3tiz2102.xyz',
Destination={
'ToAddresses': [
frm,
],
},
Message={
'Subject': {
'Data': 'Ham/Spam Analysis'
},
'Body': {
'Text': {
'Data': return_msg,
}
}
},
)
print(status)
return {
'statusCode': 200,
'body': json.dumps('LF2 successfull!')
}
|
3,234 | 8f311e15c15fe3309218dfaed5eefa4a8fc3f453 | # GERALDO AMELIO DE LIMA JUNIOR
# UNIFIP - Patos
# 05 de março de 2020
# Questão 08 - Escreva um programa que leia um valor inteiro e calcule o seu cubo.
n = int(input('Digite um numero:'))
t = n*3
print('O triplo de {} vale {}.'.format(n, t))
|
3,235 | d443e9054481984d5372343170254268dca8a3b1 | #!/usr/local/bin/python3
import time, json, os, sqlite3, uuid, json, base64, sys
import requests as http
import numpy as np
from os.path import isfile, join
from datetime import date, datetime
from argparse import ArgumentParser
PRINTER_IP = ""
FRAMERATE = 30
TIMELAPSE_DURATION = 60
TIMELAPSE_PATH = "timelapses"
DATABASE_PATH = "timelapses.db"
# Checks if a print is running
#
# @return boolean the status of the printer
def is_printing():
try:
status = http.get("http://" + PRINTER_IP + "/api/v1/printer/status", timeout=1)
if status.json() == "printing":
state = http.get("http://" + PRINTER_IP + "/api/v1/print_job/state", timeout=1).json()
if state == 'none' or state == 'wait_cleanup' or state == "wait_user_action":
return False
else:
return True
else:
return False;
except Exception as e:
return False
# Checks if a print is starting
#
# @return boolean the status of the calibration
def is_pre_printing():
state = http.get("http://" + PRINTER_IP + "/api/v1/print_job/state", timeout=1).json()
return state == 'pre_print'
# Adds a pre-printing print in the database
#
# @returns int the id of the timelapse
def register_pre_printing():
db = sqlite3.connect(DATABASE_PATH)
db_cur = db.cursor()
title = http.get("http://" + PRINTER_IP + "/api/v1/print_job/name", timeout=1).json()
duration = http.get("http://" + PRINTER_IP + "/api/v1/print_job/time_total", timeout=1).json()
status = "pre-printing"
db_cur.execute("INSERT INTO 'timelapses' VALUES(NULL, ?, ?, ?, ?, NULL)", (title, status, duration, date.today()))
db.commit()
db.close()
return db_cur.lastrowid
# Saves a preview image of the timelapse in the database
def store_preview(id):
db = sqlite3.connect(DATABASE_PATH)
db_cur = db.cursor()
f = open("tmp/preview.jpg", "rb")
db_cur.execute("UPDATE timelapses SET preview = ? WHERE id = ?", (sqlite3.Binary(f.read()), id, ))
f.close()
db.commit()
db.close()
# Updates a timelapse status
#
# @param id the id of the timelapse in the db
# @param status the status to be updated
def update_timelapse_status(id, status):
db = sqlite3.connect(DATABASE_PATH)
db_cur = db.cursor()
db_cur.execute("""
UPDATE timelapses SET status = ? WHERE id = ?
""", (status, id, ))
db.commit()
db.close()
# Checks if timelapses are not too old or if files are not missing
def check_timelapses():
db = sqlite3.connect(DATABASE_PATH)
db_cur = db.cursor()
db_cur.execute("""
CREATE TABLE IF NOT EXISTS timelapses(
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT,
status TEXT,
duration INTEGER,
date DATE,
preview BLOB)
""");
db_cur.execute("SELECT * from timelapses")
timelapses = db_cur.fetchall()
# checks if timelapse files are not missing
for timelapse in timelapses:
filepath = get_filepath(timelapse[0])
if timelapse[2] == "pre-printing" and not is_printing():
update_timelapse_status(timelapse[0], "failed")
elif timelapse[2] == "printing" and not is_printing():
update_timelapse_status(timelapse[0], "failed")
elif timelapse[2] == "missing":
if os.path.isfile(filepath):
update_timelapse_status(timelapse[0], "finished")
elif timelapse[2] == "finished":
if not os.path.isfile(filepath):
update_timelapse_status(timelapse[0], "missing")
# deletes a timelapse and its file if too old
for timelapse in timelapses:
timelapseDate = timelapse[4].split("-")
timelapseDate = date(int(timelapseDate[0]), int(timelapseDate[1]), int(timelapseDate[2]))
currentDate = date.today()
if (currentDate - timelapseDate).days > 31:
filepath = get_filepath(timelapse[0])
if os.path.isfile(filepath):
os.remove(filepath)
db_cur.execute("DELETE FROM timelapses WHERE id = ?", (timelapse[0], ))
db.commit()
db.close()
# Gets the filepath of a specific timelapse
#
# @param id the id of the timelapse
def get_filepath(id):
db = sqlite3.connect(DATABASE_PATH)
db_cur = db.cursor()
db_cur.execute("SELECT title FROM timelapses WHERE id = ?", (id, ))
title = db_cur.fetchone()[0]
db.commit()
db.close()
return os.path.join(TIMELAPSE_PATH, title + str(id) + ".mp4")
def start_timelapse_daemon():
while True:
check_timelapses()
print("Waiting for print to start...")
while not is_printing():
time.sleep(5)
print("Waiting for printer calibration...")
current_print_id = register_pre_printing()
while is_pre_printing():
time.sleep(1)
if not is_printing():
continue
print("Printing...")
update_timelapse_status(current_print_id, "printing")
# removes existing tmp folder
if os.path.isdir("tmp"):
for file in os.listdir("tmp"):
file_path = os.path.join("tmp", file)
if os.path.isfile(file_path):
os.remove(file_path)
else:
os.mkdir("tmp")
duration = http.get("http://" + PRINTER_IP + "/api/v1/print_job/time_total").json();
frame = 0
while is_printing():
frame += 1
res = http.get("http://" + PRINTER_IP + ":8080/?action=snapshot")
filepath = "tmp/" + str(frame) + ".jpg"
f = open(filepath, 'bw')
f.write(res.content)
f.close()
time.sleep(duration / (FRAMERATE * TIMELAPSE_DURATION))
update_timelapse_status(current_print_id, "finished")
# generates the video
filepath = get_filepath(current_print_id)
if not os.path.isdir(TIMELAPSE_PATH):
os.mkdir(TIMELAPSE_PATH)
os.system("ffmpeg -r " + str(FRAMERATE) + " -i 'tmp/%d.jpg' -qscale 7 -c:v libx264 " + filepath)
# extracts a preview image
os.system("ffmpeg -i " + filepath + " -vf \"select='eq(n," + str(5 * frame // 6) + ")'\" -vframes 1 tmp/preview.jpg")
store_preview(current_print_id)
# removes the tmp folder
for file in os.listdir("tmp"):
file_path = os.path.join("tmp", file)
if os.path.isfile(file_path):
os.remove(file_path)
os.rmdir("tmp")
print("Print done!")
parser = ArgumentParser(description='Recover timelapses from the ultimaker s5 printer')
parser.add_argument('-ip', help='specifies the ip of the printer', required=True)
args = parser.parse_args()
PRINTER_IP = args.ip
start_timelapse_daemon()
|
3,236 | 33ec822f6149a57244edf6d8d99a5b3726600c2e | 'Attempts to use <http://countergram.com/software/pytidylib>.'
try:
import tidylib
def tidy(html):
html, errors = tidylib.tidy_document(html, options={'force-output': True,
'output-xhtml': True, 'tidy-mark': False})
return html
except ImportError:
def tidy(html):
return html
|
3,237 | 14761cc2593556f58a7dc4e499db71456d7c7048 | import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib import cm
import imageio
# # Backpack values
# fx = 7190.247 # lense focal length
# baseline = 174.945 # distance in mm between the two cameras (values from middlebury)
# units = 0.001 # depth units
# doffs=342.523 # x-difference of principal points, following https://vision.middlebury.edu/stereo/data/scenes2014/#description
# texture_threshold = 2000 # 10 by default
# Classroom values
doffs=113.186
baseline=237.604
fx = 3920.793
doffs=113.186
disparities=0
block=23
# # Backpack images
# imgL = cv2.imread('images/im0_left.png', cv2.IMREAD_GRAYSCALE)
# imgR = cv2.imread('images/im0_right.png', cv2.IMREAD_GRAYSCALE)
# Classroom images
imgL = cv2.imread('images/Classroom1-perfect/im0.png', cv2.IMREAD_GRAYSCALE)
imgR = cv2.imread('images/Classroom1-perfect/im1.png', cv2.IMREAD_GRAYSCALE)
plt.imshow(imgL, cmap="gray")
plt.axis('off')
plt.show()
sbm = cv2.StereoBM_create(numDisparities=disparities,blockSize=block)
# sbm.setTextureThreshold(texture_threshold)
# calculate disparities
disparity = sbm.compute(imgL, imgR)
print(disparity)
# show disparity
plt.imshow(disparity)
plt.axis('off')
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[disparity > 0] = (fx * baseline) / (doffs + disparity[disparity > 0])
plt.imshow(depth)
plt.show()
# convert from pfm file equation?
pfm = imageio.imread('images/Classroom1-perfect/disp0.pfm')
pfm = np.asarray(pfm)
plt.imshow(pfm)
plt.show()
depth = np.zeros(shape=imgL.shape).astype(float)
depth[pfm > 0] = (fx * baseline) / (doffs + pfm[pfm > 0])
#print(depth)
plt.imshow(depth)
plt.axis('off')
plt.show() |
3,238 | 4572e243f75ad92c04f5cdc0b454df7389183a6a | import urllib.request
def get_html(url):
"""
Returns the html of url or None if status code is not 200
"""
req = urllib.request.Request(
url,
headers={
'User-Agent': 'Python Learning Program',
'From': 'hklee310@gmail.com'
}
)
resp = urllib.request.urlopen(req)
if resp.code == 200:
return resp.read() # returns the html document
else:
return None
|
3,239 | f100757fcb1bef334f9f8eacae83af551d2bac5b | from chalicelib.utilities import *
def Error(app):
@app.route('/errors', cors=True, methods=['POST'])
@printError
def errors():
request = app.current_request
data = request.json_body
print(data)
return data |
3,240 | 082e3350c5827ff2ca909084f2d6a206ae21a7e6 | #!/usr/bin/env python
# coding=utf-8
operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-',
'>', '>=', '<', '<=', '==', '!=', '&&', '||', '=']
types = ['int ', 'double ', 'float ', 'char ']
toDelete = types + ['struct ']
toRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'),
('/*', "'''"), ('*/', "'''"), ('//','#'),
('&&', 'and'), ('||', 'or')]
def isDigit(c):
return c > '0' and c < '9'
def isChar(c):
return (c > 'a' and c < 'z') or (c > 'A' and c < 'Z')
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([i in line for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
def isClarify(line):
return sum([line.startswith(i) for i in types]) and '=' not in line
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > (index + 1) and isChar(line[index + 1]) and \
(sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
# indent = indent[:-4]
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += ('{} = [{}.copy() for i in range({})]'
'').format(l[1][:l[1].index('[')],
l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
# indent += ' '
# s += 'class {}:\n' + indent + 'def __init__(self):'
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % (line[1])
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
# s += 'self.{} = None'
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write('if __name__ == "__main__":\n main()')
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
if __name__ == '__main__':
main1('test.c', output='replace.py')
# main2('test.c', output='list.py')
|
3,241 | f253816d08407950caad28f1ce630ac2b099aa70 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common methods shared by MNIST and ImageNet experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import getpass
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# mkdir -p in Python >2.5
def mkdir_p(path):
try:
os.makedirs(path, mode=0o755)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# Returns path to postfix under user's Unix home directory.
def make_experiment_dir(postfix):
home = os.path.expanduser('~')
exp_dir = os.path.join(home, postfix)
mkdir_p(exp_dir)
return exp_dir
# appends .png to file name
def save_fig(folder, filename):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.png')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
plt.savefig(out_file)
# appends .txt to file name
def save_array(x, folder, filename, formatting):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.txt')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
np.savetxt(out_file, x, fmt=formatting)
def load_array(filename):
with open(filename, 'r') as f:
return np.loadtxt(f)
# count parameters for svd truncation
def count_parameters_list(k_values, nrows, ncols):
new_list = []
for k in k_values:
new_k = count_parameters(k, nrows, ncols)
new_list.append(new_k)
return new_list
# number of parameters when nrows-by-ncols matrix is approximated
# with product of nrows-by-rank and rank-by-ncolds matrix.
def count_parameters(rank, nrows, ncols):
return (nrows + ncols) * rank
# Return one random rademacher matrix
def fully_random_rademacher_matrix(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
return np.random.choice(plus_minus_one, (nrows, ncols))
# Return a rank-1 Rademacher matrix
def rank1_rademacher(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
column_vector = np.random.choice(plus_minus_one, (nrows, 1))
row_vector = np.random.choice(plus_minus_one, (1, ncols))
# Plain * is quicker than equivalent np.dot(column_vector, row_vector)
return column_vector * row_vector
# Sketch matrix A
def sketch_matrix(A, sketch_type, k):
tf.logging.info('sketch_matrix %s %d', sketch_type, k)
h1 = A.shape[0]
h2 = A.shape[1]
# Numpy defaults to int64 or float64 (double precision).
# Computing with float32 (single precision) is quicker.
A_hat = np.zeros((h1, h2), dtype=np.float32)
for i in range(0, k):
tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000,
sketch_type, i, k)
# generate random matrix
if sketch_type == 'arora':
mat = fully_random_rademacher_matrix(h1, h2)
elif sketch_type == 'our_sketch':
mat = rank1_rademacher(h1, h2)
else:
print('wrong sketch_type variable')
return -1
# get coefficient
coefficient = np.dot(np.ravel(A), np.ravel(mat))
# add coefficient*matrix to A_hat
A_hat += coefficient * mat
tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)
return (1.0 / k) * A_hat
# Return truncated svd of A, where only the top k components are used.
# Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags
# speeds up svd by almost 2x. However it makes sketching, which is dominant,
# a tiny bit slower and hence it's not worth it.
def truncated_svd(A, k):
tf.logging.info('Computing SVD ...')
u, s, v = np.linalg.svd(A, full_matrices=False)
u_trunc = u[:, 0:k]
s_trunc = s[0:k]
v_trunc = v[0:k, :]
A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))
tf.logging.info('Done computing SVD ...')
return A_hat
# num_params is rank for SVD, number of coefficients for sketches.
def compress(A, compression_type, num_params):
if compression_type == 'svd':
A_hat = truncated_svd(A, num_params)
elif compression_type == 'our_sketch' or compression_type == 'arora':
A_hat = sketch_matrix(A, compression_type, num_params)
else:
print('Error: wrong compression type. Must be svd, our_sketch, or arora.')
return A_hat
# return singular values of A sorted in descending order
def singular_values(A):
u, s, v = np.linalg.svd(A)
sing = sorted(s, reverse=True)
return sing
def plot_and_save_singular_values(s, folder, fn, nrows, ncols):
x = range(1, len(s) + 1)
y = sorted(s, reverse=True)
title = 'Singular values\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'
plt.plot(x, y)
plt.title(title)
plt.tight_layout()
save_fig(folder, fn)
save_array(np.array(s), folder, fn + '_vals', '%.18e')
|
3,242 | 69721dca0f5d8396e330696cde52bfabad33c895 | from sikuli import *
import logging
import myTools
from datetime import date
import reports_Compare
#---------------------------------------------------#
def fSet_BillDate(pMonth):
#---------------------------------------------------#
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + "/27/" + Settings.dataYear)
time.sleep(1)
# make sure timeslips has focus
myTools.getFocus()
# open revise date
type("b",KeyModifier.ALT)
type("d")
time.sleep(2)
# go to today
type("t")
#get to 01/01 of current year
type(Key.HOME,KeyModifier.CTRL)
# get to 01/01 of the data year
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear),thisYear):
type(Key.PAGE_UP,KeyModifier.CTRL)
time.sleep(1)
# get to 01/27 of the data year
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth-1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
#---------------------------------------------------#
def fRemove_Sort():
#---------------------------------------------------#
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern("remove_sort-1.png").similar(0.80))
time.sleep(1)
type(Key.F6)
time.sleep(1)
#---------------------------------------------------#
def fPrint_BillRun(pMonth):
#---------------------------------------------------#
reportName = "Bill-" + myTools.padZero(pMonth) + "-" + Settings.tsVersion + ".txt"
logging.debug('fPrint_BillRun: ' + reportName)
type("b",KeyModifier.CTRL)
time.sleep(1)
fRemove_Sort()
myTools.enterSlipFilter(pMonth,"n")
# print bills to text
logging.debug('-- print')
type(Key.ENTER)
time.sleep(1)
# fill in path and name; press ENTER
type(Settings.repFolder + "\\" + reportName)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
if exists("replace_msg.png"):
type("y")
# approve bills
logging.debug('-- approve')
wait(Pattern("approve_bills-1.png").targetOffset(-100,-8),FOREVER)
click(Pattern("approve_bills-1.png").targetOffset(-100,-8))
type(Key.ENTER)
time.sleep(3)
if int(Settings.tsVersion) > 2015:
wait("approving_bills.png",FOREVER)
while exists("approving_bills.png"):
logging.debug('--- msg exists')
time.sleep(2)
else:
waitVanish("approving_statusbar.png",FOREVER)
time.sleep(1)
# compare the report with baseline
reports_Compare.Compare_OneReport(reportName)
# close report entry / don't save
logging.debug('-- close report window')
click("report_generate_bills.png")
type(Key.F4,KeyModifier.CTRL)
time.sleep(2)
type("n")
time.sleep(1)
#---------------------------------------------------#
def fPrint_Bills(pMonth):
#---------------------------------------------------#
myTools.sectionStartTimeStamp("bills" + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
|
3,243 | 9c935e9ef298484d565256a420b867e800c3df55 | """ Contains different comparator classes for model output data structures.
"""
import copy
def tuple_to_string(tuptup):
""" Converts a tuple to its string representation. Uses different separators (;, /, |) for
different depths of the representation.
Parameters
----------
tuptup : list
Tuple to convert to its string representation.
Returns
-------
str
String representation of the input tuple.
"""
def join_deepest(tup, sep=';'):
""" Recursive function to create the string representation for the deepest level of the
tuptup list.
Parameters
----------
tup : object
Element to join if list or list of lists.
sep : str, optional
Separation character to join the list elements by.
Returns
-------
object
List containing joined string in max depth. Str if input depth = 1.
"""
if not isinstance(tup, list):
return tup
if not isinstance(tup[0], list):
return sep.join(tup)
for idx, val in enumerate(tup):
tup[idx] = join_deepest(val, sep)
return tup
tup = copy.deepcopy(tuptup)
tup = join_deepest(tup, ';')
tup = join_deepest(tup, '/')
tup = join_deepest(tup, '|')
return tup
class Comparator():
""" Comparator base class.
"""
def compare(self, obj_a, obj_b):
""" Base comparison method.
Parameters
----------
obj_a : object
Object A for comparison.
obj_b : object
Object B for comparison.
Returns
-------
object
Comparison result.
"""
raise NotImplementedError()
class EqualityComparator():
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator():
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
|
3,244 | a7a219e9ea5cdec004ef936958994ed1f5a96103 | import xlrd
def get_rosters_from_excel(django_file):
workbook = xlrd.open_workbook(file_contents=django_file.read())
worksheet = workbook.sheet_by_name('Match_Rosters')
num_rows = worksheet.nrows - 1
cur_row = -1
rosters = []
while cur_row < num_rows:
cur_row += 1
if worksheet.cell_value(cur_row, 0) == "NCTTA Team Match Player Selection Form":
row = worksheet.row(cur_row + 4)
roster = {
"round_match" : worksheet.cell_value(cur_row + 2, 6), # consider adding the time in +2, 8
"left_team_label" : worksheet.cell_value(cur_row + 4, 3),
"right_team_label" : worksheet.cell_value(cur_row + 4, 8),
"left_team_title" : worksheet.cell_value(cur_row + 5, 1),
"right_team_title" : worksheet.cell_value(cur_row + 5, 6),
"players" : [],
"opponents" : [],
# Don't forget about doubles!
}
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 11, 0),
"player_name" : worksheet.cell_value(cur_row + 11, 1)
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 12, 0),
"player_name" : worksheet.cell_value(cur_row + 12, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 13, 0),
"player_name" : worksheet.cell_value(cur_row + 13, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 14, 0),
"player_name" : worksheet.cell_value(cur_row + 14, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 15, 0),
"player_name" : worksheet.cell_value(cur_row + 15, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 16, 0),
"player_name" : worksheet.cell_value(cur_row + 16, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 17, 0),
"player_name" : worksheet.cell_value(cur_row + 17, 1),
})
roster["players"].append({
"player_label" : worksheet.cell_value(cur_row + 18, 0),
"player_name" : worksheet.cell_value(cur_row + 18, 1),
})
# The opponents
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 11, 6),
"player_rating" : worksheet.cell_value(cur_row + 11, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 12, 6),
"player_rating" : worksheet.cell_value(cur_row + 12, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 13, 6),
"player_rating" : worksheet.cell_value(cur_row + 13, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 14, 6),
"player_rating" : worksheet.cell_value(cur_row + 14, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 15, 6),
"player_rating" : worksheet.cell_value(cur_row + 15, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 16, 6),
"player_rating" : worksheet.cell_value(cur_row + 16, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 17, 6),
"player_rating" : worksheet.cell_value(cur_row + 17, 9),
})
roster["opponents"].append({
"player_name" : worksheet.cell_value(cur_row + 18, 6),
"player_rating" : worksheet.cell_value(cur_row + 18, 9),
})
label_letter = ''.join(i for i in roster["players"][0]["player_label"] if not i.isdigit())
if label_letter == str(roster["left_team_label"].strip()):
roster["active_team"] = "left"
else:
roster["active_team"] = "right"
#for key, value in roster.items():
# print " ", key, ":", value
if roster["opponents"][0]["player_name"] != "" and roster["players"][0]["player_name"] and roster["round_match"] != "":
rosters.append(roster)
return rosters
|
3,245 | e7060658ae1838b0870b2a3adb61c9f8d78c93c7 | #!/usr/bin/env python3
import sys
all_neighbors_coord = []
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
if i != 0 or j != 0 or k != 0:
all_neighbors_coord.append((i, j, k))
def add_coord(c1, c2):
return (c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2])
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return ((min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs)))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
else:
if self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0]-1,ws[1][0]+2):
for j in range(ws[0][1]-1,ws[1][1]+2):
for k in range(ws[0][2]-1,ws[1][2]+2):
self.get_next_square_state((i,j,k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2]+1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1]+1):
s = ''
for i in range(ws[0][0], ws[1][0]+1):
if self.is_active((i,j,k)):
s += '#'
else:
s += '.'
print(s)
print()
def parse_world(rows):
world = {}
k = 0
for j, r in enumerate(rows):
for i, c in enumerate(r):
if c == '#':
world[(i,j,k)] = '#'
return world
inp = 'test.txt'
if len(sys.argv) == 2:
inp = sys.argv[1]
world = parse_world([r.strip() for r in open(inp, 'r').readlines()])
l = life(world)
l.print()
l.run(6)
print(l.count_active())
|
3,246 | e28cca2273e1c3ad4b8a955843e7dfb45c00694c | # -*- coding:utf-8 -*-
import math
r = float(input())
print("{0:f} {1:f}".format(r*r*math.pi,2*r*math.pi)) |
3,247 | 1d314a04625cfadf574f122b95577c1e677a8b35 | #! /usr/bin/env python
from thor.tree import TreeNode
class Solution(object):
def postorder_traversal(self, root: TreeNode):
if not root:
return []
else:
return self.postorder_traversal(root.left) + self.postorder_traversal(root.right) + [root.val]
|
3,248 | f680503488a2780624b28e49b045aad75506d8c5 | class SensorReadings:
def __init__(self, sense_hat):
self.temprerature_humidity_sensor = sense_hat.get_temperature_from_humidity()
self.temperature_pressure_sensor = sense_hat.get_temperature_from_pressure()
self.humidity = sense_hat.get_humidity()
self.pressure = sense_hat.get_pressure()
def printReadings(self):
print("temperature from humidity sensor: {} C".format(self.temprerature_humidity_sensor))
print("temperature from pressure sensor: {} C".format(self.temperature_pressure_sensor))
print("humidity: {}".format(self.humidity))
print("pressure: {}".format(self.pressure))
def display(self, sense_hat):
sense_hat.show_message("T:{:.1f} C".format(self.temprerature_humidity_sensor), text_colour=red)
sense_hat.show_message("H:{:.1f}".format(self.humidity), text_colour=blue)
sense_hat.show_message("P:{:.2f}".format(self.pressure), text_colour=green)
def getAsMap(self):
return {"temperature_humidity_sensor": self.temprerature_humidity_sensor, "temperature_pressure_sensor": self.temperature_pressure_sensor, "humidity": self.humidity, "pressure": self.pressure} |
3,249 | b791afec1c9fb214d1f3b4ec0ec67f905d96aabf | # link https://deeplizard.com/learn/video/QK_PP_2KgGE
import gym
import numpy as np
import random
import time
from IPython.display import clear_output
# setup the env
env = gym.make("FrozenLake8x8-v0", is_slippery=False)
observation = env.reset()
# setup the q-table
action_space_size = env.action_space.n
state_space_size = env.observation_space.n
q_table = np.zeros((state_space_size, action_space_size))
#print(q_table)
# instaniate hyper-parameters
num_episodes = 10000
steps_per_episodes = 100
learning_rate = 0.1
discount_rate = 0.99
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.001
# empty list to hold our rewards over time
rewards_all_episodes = []
# main loops
for episode in range(num_episodes):
state = env.reset()
done = False
rewards_current_episode = 0
for step in range(steps_per_episodes):
# exploration vs exploitation
exploration_rate_threshold = random.uniform(0,1)
if exploration_rate_threshold > exploration_rate:
action = np.argmax(q_table[state,:])
else:
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
#print(next_state)
#print(q_table.shape)
# update q-table
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))
state = next_state
rewards_current_episode += reward
if done == True:
break
# Exploration rate decay
exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)
rewards_all_episodes.append(rewards_current_episode)
# Calculate and print the average reward per thousand episodes
rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)
count = 1000
print("********Average reward per thousand episodes********\n")
for r in rewards_per_thousand_episodes:
print(count, ": ", str(sum(r/1000)))
count += 1000
# Print updated Q-table
print("\n\n********Q-table********\n")
print(q_table)
|
3,250 | d0a6bfb729a150863303621a136ae80e96ae32d0 | from tilBackend.celery import app
import smtplib
import email
import ssl
#librerias pruebas
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from celery import Celery
@app.task
def correo():
try:
port = 587
smtp_server = "smtp-mail.outlook.com"
user = 'pythonzuluaga@outlook.com'
password = "Python123"
message ="""Subject: Asuntooooo\n
Y este es el mensaje
"""
conn = smtplib.SMTP(smtp_server,587)
conn.ehlo()
conn.starttls()
conn.login(user,password)
#for x in range(0,10):
conn.sendmail(user,'lbzuluagag@eafit.edu.co',message)
conn.quit
except:
print("Algo fallo")
def task_correo():
"""
envia correo
"""
correo()
logger.info("se envio el correo")
app.conf.update
|
3,251 | 8dae8a89d08bc522f9a5fdde8aeb9e322fafcbec | import pymongo
import os,sys
import re
from db_User import *
from db_Event import *
class ClassRoom:
# 链接本地客户端
__myclient = pymongo.MongoClient("mongodb://localhost:27017")
# 创建数据库
__mydb = __myclient["MMKeyDB"]
# 创建新的集合
__mycol = __mydb["ClassRoom"]
# 判断是否输入id或是输入name,如果有输入则转译
def Name2Id(room_id,name):
bool_n = bool(re.match("教\d{1}-\d{3}",name))
bool_id = bool(re.match("B\d{1}R\d{3}",room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = "B" + name[1] + "R" + name[3:6]
else:
name = "教" + room_id[1] + "-" + room_id[3:6]
return room_id,name
def __init__(self,
room_id = "",
name = "",
seats = 0,
key_id = "",
event = []):
if not(ClassRoom.Name2Id(room_id,name)):
self.WrongFlag = 1
else:
self.id,self.name = ClassRoom.Name2Id(room_id,name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({ "_id": self.id })
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id= self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {
"_id" : self.id ,
"name" : self.name,
"seats" : self.seats,
"key_id" : self.key_id,
"event" : self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({ "_id": self.id }):
myquery = {"_id" : self.id}
self.__mycol.update(myquery,mydict)
return "Acc_Updated"
else:
self.__mycol.insert_one(mydict) # 上传新的document
return "Acc_Created"
def AllClassroom(self):
cursor = self.__mycol.find()
# __import__('ipdb').set_trace()
if cursor:
# index = []
# for doc in cursor:
# print(doc)
# temp = [doc['_id'],doc['name'],doc['seats'],doc['event']]
# index.append(temp)
return cursor
else:
return False
# 删除教室记录
def Delete(self):
User.mycol.delete_one({"_id": self.id})
return "Deleted"
if __name__ == '__main__':
index = ClassRoom().AllClassroom()
for i in index:
print(i)
|
3,252 | 049950bd4bbf7903218bb8fb3a4c91492d6af17b | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grpc.framework.foundation.logging_pool."""
import threading
import unittest
from grpc.framework.foundation import logging_pool
_POOL_SIZE = 16
class _CallableObject(object):
def __init__(self):
self._lock = threading.Lock()
self._passed_values = []
def __call__(self, value):
with self._lock:
self._passed_values.append(value)
def passed_values(self):
with self._lock:
return tuple(self._passed_values)
class LoggingPoolTest(unittest.TestCase):
def testUpAndDown(self):
pool = logging_pool.pool(_POOL_SIZE)
pool.shutdown(wait=True)
with logging_pool.pool(_POOL_SIZE) as pool:
self.assertIsNotNone(pool)
def testTaskExecuted(self):
test_list = []
with logging_pool.pool(_POOL_SIZE) as pool:
pool.submit(lambda: test_list.append(object())).result()
self.assertTrue(test_list)
def testException(self):
with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda: 1 / 0).exception()
self.assertIsNotNone(raised_exception)
def testCallableObjectExecuted(self):
callable_object = _CallableObject()
passed_object = object()
with logging_pool.pool(_POOL_SIZE) as pool:
future = pool.submit(callable_object, passed_object)
self.assertIsNone(future.result())
self.assertSequenceEqual(
(passed_object,), callable_object.passed_values()
)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
3,253 | 55df8d13ddf28f7b0477329bee743471a0780f24 | import os
from app_web import sg
from sendgrid.helpers.mail import *
import pdfkit
from models.user import User
from models.expense import Expense
from models.statement import Statement
from models.category import Category
import tempfile
import subprocess
from .aws_uploader import upload_image_to_s3
import datetime
from peewee import fn
from flask import render_template
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(
['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],
stdout=subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {
'margin-top': '10mm',
'margin-bottom': '10mm',
'margin-left': '10mm',
'margin-right': '10mm',
'page-size': 'A4',
'page-width': '210mm',
'page-height': '296mm'
}
pdf = pdfkit.from_string(
pdf_content, False, configuration=_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = "application/pdf"
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None :
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime("%B %Y") # current month
short_month = datetime.date.today().strftime("%b")
else:
# '2020-12' convert to 'December 2020'
year_month = month.split('-') # ['2020','12']
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%b")
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%B %Y")
# select all user from database
users = User.select()
# get all expenses to render in template
for user in users:
record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())
# ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))
pdf_name = (user.username).replace(" ", "-").lower() + "-" + str(full_month).replace(" ", "-")
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id ,temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url, month=full_month)
statement.save()
'''
Send monthly statement email
'''
# message = Mail(
# from_email="leongjinqwen@gmail.com",
# to_emails=user.email,
# subject=f"{month} Expenses Statement",
# html_content=Content("text/html", f"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>")
# )
# try:
# response = sg.send(message)
# print(response.body)
# except Exception as e:
# print(str(e))
else:
print('already exist!')
|
3,254 | b09d0806dfc6f4badfd9f2ac9c3f6d17d3df8e8c | from features.steps.web.test_home_page import *
from features.steps.mobile.test_home_page import *
from features.steps.web.test_login_page import * |
3,255 | 4e98ebd040297cb9472368478452bc484e0aaa04 | water = 400
milk = 540
coffee = 120
cups = 9
money = 550
def buying():
global water
global coffee
global cups
global milk
global money
choice_coffee = input("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:")
if choice_coffee == "1":
if water > 250 and coffee > 16 and cups > 1:
print("I have enough resources, making you a coffee!")
water -= 250
coffee -= 16
cups -= 1
money += 4
coffee_machine()
elif choice_coffee == "2":
if water > 350 and coffee > 16 and cups > 1 and milk > 75:
print("I have enough resources, making you a coffee!")
water -= 350
milk -= 75
coffee -= 20
cups -= 1
money += 7
elif water < 350:
print("Sorry, not enough water!")
coffee_machine()
elif choice_coffee == "3":
if water > 200 and coffee > 12 and cups > 1 and milk > 100:
print("I have enough resources, making you a coffee!")
water -= 200
milk -= 100
coffee -= 12
cups -= 1
money += 6
coffee_machine()
elif choice_coffee == "back":
coffee_machine()
def filling():
global water
global coffee
global cups
global milk
water_fill = int(input("Write how many ml of water do you want to add:"))
milk_fill = int(input("Write how many ml of milk do you want to add:"))
coffee_fill = int(input("Write how many grams of coffee beans do you want to add:"))
cups_fill = int(input("Write how many disposable cups of coffee do you want to add:"))
water += water_fill
milk += milk_fill
coffee += coffee_fill
cups += cups_fill
coffee_machine()
def taking():
global money
print("I gave you $" + str(money))
money = 0
coffee_machine()
def stats_print():
print("The coffee machine has:")
print(str(water) + " of water")
print(str(milk) + " of milk")
print(str(coffee) + " of coffee beans")
print(str(cups) + " of disposable cups")
print(str(money) + " of money")
def coffee_machine():
user_action = input("Write action (buy, fill, take, remaining, exit):")
if user_action == "buy":
buying()
elif user_action == "fill":
filling()
elif user_action == "take":
taking()
elif user_action == "remaining":
stats_print()
coffee_machine()
elif user_action == "exit":
return
coffee_machine() |
3,256 | 9543992e1b115f83640a07c4d4372be0fb465199 | # Reddit API feed
import praw
import sys
import os
def main():
if os.getenv("REDDIT_CLIENT_ID") is None:
print "Set your Reddit environment variables:"
print "REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET"
sys.exit()
client_id = os.environ['REDDIT_CLIENT_ID']
client_secret = os.environ['REDDIT_CLIENT_SECRET']
try:
reddit_api = praw.Reddit(client_id = client_id,
client_secret = client_secret,
user_agent = "sentiment")
except:
print "Reddit auth failed."
sys.exit()
sub = raw_input("Subreddit: ")
keyword = raw_input("Keyword: ")
get_posts(keyword, sub, reddit_api)
# currently only dumps top 10 posts from subreddit
# regardless of keyword
def get_posts(keyword, sub, reddit_api):
for post in reddit_api.subreddit(sub).hot(limit=10):
print post.title
if __name__ == '__main__':
main()
|
3,257 | 29dc940292a6805aabfa5bed22bb75d31140c83f | def check_bit4(input):
mas=0b1000
desired=input & mas
if desired>0:
return "om"
else :
return "off"
|
3,258 | 846a42a997539a45576d3ecbe0bd290e00b55935 | from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (
A1,
A,
)
__all__ = [
"A1",
"A",
]
|
3,259 | 4c42bad4197b51be0e9d18307c7b954a29281fe1 | #Exercise 5
#Define with names stair1, stair2, and stair3 (from bottom up to top), and insert within the building model, the 3 stair models of the building. |
3,260 | 6829f7bcbc1b12500795eec19829ff077502e270 | import os
import math
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k,v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers,datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10 : (page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
def query_from_id(headers, datas):
while True:
ID = input('Please input a students\'s ID:').strip()
if ID:
break
flag = True
for data in datas:
if data['ID'] == ID:
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_lastname(headers, datas):
while True:
name = input('Please input a students\'s name:').strip()
if name:
break
flag = True
for data in datas:
if data['Last'].lower().startswith(name.lower()):
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input('Please input a students\'s field name:').strip()
if field_name and field_name in headers:
break
while True:
value = input('Please input a students\'s value:').strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input('Please input a students\'s GradYear:').strip()
if grad_year and grad_year.isdigit():
# grad_year = int(grad_year)
break
datas = [d for d in datas if d['GradYear'] == grad_year]
# print(datas)
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
# print(ret)
if ret:
totals = sum(ret.values())
for k,v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
def count_from_grad_year(headers, datas):
while True:
grad_year = input('Please input a students\'s GradYear:').strip()
if grad_year and grad_year.isdigit():
# grad_year = int(grad_year)
break
while True:
on_after = input('Please Select On or After(On or Aft)? :').strip().lower()
if on_after and on_after in ('on', 'aft'):
break
if on_after == 'on':
count_one_year(headers, datas, grad_year)
elif on_after == 'aft':
max_year = 0
for data in datas:
if int(data['GradYear']) > max_year:
max_year = int(data['GradYear'])
if max_year < int(grad_year):
print('No datas')
else:
for year in range(int(grad_year), max_year):
count_one_year(headers, datas, grad_year)
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list':display_all,'qid':query_from_id,
'qlst':query_from_lastname, 'qfd':query_from_some_field,
'qcgy': count_from_grad_year, 'dgy':display_grad_year}
while True:
print()
print('-------------------------------')
print('List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd);\
Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)')
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
if __name__ == '__main__':
main()
|
3,261 | 50fa8852f74f4d2428fb238a86dd1feedb210877 | # Umut Cakan Computer Science S006742
# Fibonacci list. First and second terms are static.
fib_list = [0, 1]
# Current index.
CURRENT_INDEX = 2
# Function for the checking input is a Fibonacci number or not.
def check_fibonacci_number():
global CURRENT_INDEX
# Get the fibonacci numbers that are less or equal to input value.
# Because we will not need to check fib numbers that are higher than our input.
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[CURRENT_INDEX - 2])
CURRENT_INDEX += 1
# Check if the input value is in that list or not.
if NUMBER_TO_BE_CHECKED not in fib_list:
print("Your number is not a Fibonacci number.")
else:
print("Your number is a Fibonacci number.")
# Get number to be checked from user.
while True:
try:
NUMBER_TO_BE_CHECKED = int(input("Please enter the number to check: "))
# If it is not an integer throw an error and wait for another input.
except ValueError:
print("Your input is not an integer!")
continue
# If it is an integer, proceed.
else:
check_fibonacci_number()
break
|
3,262 | 8981d53641d22430efb2dd43401fab562b8a95ed | import socket
comms_socket1 = socket.socket()
comms_socket2 = socket.socket()
comms_socket1.bind(("120.79.26.97",55000))
comms_socket2.bind(("120.79.26.97",55001))
comms_socket1.listen()
user1,address1 = comms_socket1.accept()
comms_socket2.listen()
user2,address2 = comms_socket2.accept()
while True:
send_date = user1.recv(4096).decode("UTF-8")
user2.send(bytes(send_data,"UTF-8"))
send_date = user2.recv(4096).decode("UTF-8")
user1.send(bytes(send_data,"UTF-8"))
|
3,263 | 1c0f194bbdc6f7e3e4feb114e521aa958f11e83e | from typing import List
from pydantic import BaseModel
class BinBase(BaseModel):
name: str = None
title: str = None
class BinCreate(BinBase):
owner_id: int
password: str
class Bin(BinBase):
id: int
# TODO: token?
class Config():
orm_mode = True
class UserBase(BaseModel):
username: str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
# TODO: password?
# bins: List[Bin] = []
class Config():
orm_mode = True
|
3,264 | 5261ae90a67e2df8dd1c679a8046ee3e0cbc6221 | '''
Module for handling configurable portions of tools
'''
from json import load
default_file_loc = 'config.json'
config = None
def loadConfiguration(fileloc):
'''Loads configuration from file location'''
global config
with open(fileloc, 'r') as file_:
conf = load(file_)
if config is None:
config = conf
else:
config.update(conf)
def get(key):
'''Gets the configuration value for key '''
return config[key]
loadConfiguration(default_file_loc)
|
3,265 | 838279b4f8d9e656c2f90ff06eaff3bd9c12bbef | import pygame
from math import sqrt, sin, cos
from numpy import arctan
from os import path
# try these colors or create your own!
# each valid color is 3-tuple with values in range [0, 255]
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
WHITEGRAY = (192, 192, 192)
RED = (255, 0, 0)
MIDRED = (192, 0, 0)
DARKRED = (128, 0, 0)
MAROON = (80, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (0, 128, 0)
GREYGREEN = (0, 128, 128)
MINT = (51,153,102)
JADE = (0, 250, 154)
BLUE = (0, 0, 255)
NAVY = (0, 102, 204)
DARKBLUE = (0, 0, 128)
MIDBLUE = (0, 0, 192)
PINK = (255, 0, 255)
YELLOW = (255, 255, 0)
MIDYELLOW = (192, 192, 0)
MODESTYELLOW = (128, 128, 0)
CYAN = (0, 255, 255)
ORANGE = (255, 102, 0)
MIDORANGE = (192, 79, 0)
PURPLE = (128, 0, 128)
MIDPURPLE = (192, 0, 192)
sunset = [MIDORANGE, MIDRED, DARKRED, DARKBLUE]
ocean = [GREEN, BLUE, CYAN, PINK]
carousel = [RED, YELLOW, GREEN, YELLOW] # trying running this with rotate
summer = [GREEN, YELLOW, GREEN, BLUE]
#--------------CONFIGURATIONS----------------
# various configurations change the way image is displayed
# feel free to play around and see how the image changes
aspect_ratio = 3840 / 2160 # set this to the aspect ratio of your screen
x = 1540 # width of the window
y = int(x / aspect_ratio) # height of your screen
size = [x, y]
# Try out preset colorschemes or try out new ones
colors = ocean
background = BLACK
squares = 800 # number of squares drawn in the window
shade = True # creates fading effect on the colors as spiral moves outward
gradient = 1.5 # recommend 1.05 for dark colors (128-192) and 1.4 for light colors (255)
rotate = False # rotates colors around the spiral
same_colors = False # use the same color for all sides of a each square in the spiral
curr_length = 4 # starting side length of the first square in the spiral
# determines how tightly spiral is wound - rate at which the side lengths grow linearly
# use carefully, may cause divide by zero error for certain increments
adder = 4
#--------------HELPER FUNCTIONS---------------
distance = lambda p1, p2: sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) # get distance b/t two points
bound = lambda x: 0 if x < 0 else min(255, x) # keep color values within [0,255]
def next_point(p1, p2):
diff_x = p1[0] - p2[0]
diff_y = p1[1] - p2[1]
#calculate next point using triangle geometry
angle = arctan(abs(diff_x) / abs(diff_y))
new_diff_x = int(sin(angle) * curr_length)
new_diff_y = int(cos(angle) * curr_length)
new_x = p1[0] + new_diff_x if diff_x < 0 else p1[0] - new_diff_x
new_y = p1[1] + new_diff_y if diff_y < 0 else p1[1] - new_diff_y
return [new_x, new_y]
#--------------INITIALIZATION-----------------
pygame.init()
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Create a spiral drawing")
done = False
clock = pygame.time.Clock()
screen.fill(background)
#-----------------ARTWORK---------------------
p1 = [x//2 - curr_length//2, y//2 - curr_length//2]
p2 = [x//2 + curr_length//2, y//2 - curr_length//2]
p3 = [x//2 + curr_length//2, y//2 + curr_length//2]
p4 = [x//2 - curr_length//2, y//2 + curr_length//2]
points = [p1, p2, p3, p4]
#end of each line
curr_length += adder
p12 = [p1[0] + curr_length, p1[1]]
p22 = [p2[0], p2[1] + curr_length]
p32 = [p3[0] - curr_length, p3[1]]
p42 = [p4[0], p4[1] - curr_length]
new_points = [p12, p22, p32, p42]
for p1, p2 in zip(points, new_points):
pygame.draw.line(screen, colors[1], p1, p2)
# every iteration draws a new square and updates the points
for j in range(squares):
curr_length += adder
points = new_points
new_points = [0, 0, 0, 0]
if rotate and j % 40 == 0: # change colors every 10 squares
old = colors
colors = [old[3], old[0], old[1], old[2]] # shuffle colors to create rotating effect
# every iteration calculates a new point and draws line from points[i] to new point
for i in range(len(points)):
p1 = points[i]
p2 = points[(i+1)%4]
p1_new = next_point(p1,p2)
new_points[(i+1)%4] = p1_new
col = colors[0] if same_colors else colors[i]
fact = j * gradient if shade else 1 # with shade set, colors naturally fade to black
new_col = (bound(col[0] - fact), bound(col[1] - fact), bound(col[2] - fact))
pygame.draw.line(screen, new_col, p1, p1_new)
pygame.display.flip()
#----------------EVENT LOOP-------------------
while not done:
clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.QUIT: # If user clicked close
done = True
pygame.display.iconify()
filename = input("Enter filename with no extension or 0 to quit: ")
if filename != "0":
pygame.image.save(screen, path.join("images", filename + ".jpeg")) |
3,266 | 624027373f53f62ededc40bfc859f28b5a83ca04 | #!/use/bin/python
import os, sys
from io import BytesIO
from pathlib import Path
from flask_config import app
from flask import send_file
from PyPDF2 import PdfFileReader, PdfFileWriter
def rotate_pdf(working_dir, filename, rotation):
os.chdir(working_dir)
output_name = 'pages'
rotate_pdf_pages(filename, rotation, output_name)
memory_file = BytesIO()
with open('{}.pdf'.format(output_name), 'rb') as fin:
memory_file = BytesIO(fin.read())
memory_file.seek(0)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
return send_file(memory_file, attachment_filename='{}.pdf'.format(output_name), as_attachment=True)
def rotate_pdf_pages(filename, rotation, output_name):
pdf_reader = PdfFileReader('{}.pdf'.format(filename))
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
if rotation == '1':
rotated_page = pdf_reader.getPage(page).rotateClockwise(90)
if rotation == '2':
rotated_page = pdf_reader.getPage(page).rotateClockwise(180)
if rotation == '3':
rotated_page = pdf_reader.getPage(page).rotateCounterClockwise(90)
pdf_writer.addPage(rotated_page)
with open('{}.pdf'.format(output_name), 'wb') as out:
pdf_writer.write(out)
|
3,267 | 1fc1d2e1a7d18b1ef8ee6396210afe47a63ab09f | import sys
import os
from pyparsing import *
import csv
def parse_cave_details(details):
##########################################################################
# Define the Bretz Grammar.
# Sample cave description:
# Boring Caverns SE1/4 NW1/4 sec. 16, T. 37 N., R. 10 W., Pulaski County Not shown on Waynesville Quadrangle map The mouth of this cave ...\n
# Another Cave S1/2 sec. 15, T. 36 N., R. 12 W., Pulaski County Not shown on Waynesville Quadrangle map There are two large caves...\n
# Something Bridge Sec. 15 or 22, T. 36 N., R. 13 W., Pulaski County Not shown on Richland Quadrangle map This cave is near Ozark...\n
#
# CAVE ::= CAVE_NAME [ALIQUOT_PART] SECTION, TOWNSHIP, RANGE, COUNTY QUAD_MAP DESCRIPTION
# ALIQUOT_PART ::= (((NE|SE|SW|NW)1/4)|((N|E|S|W)1/2))*
# SECTION ::= (S|s)ec. num+
# TOWNSHIP ::= T. num+ TOWNSHIP_DIR.
# TOWNSHIP_DIR ::= N|S
# RANGE ::= R. num+ RANGE_DIR.
# RANGE_DIR ::= E|W
# COUNTY = WORD+ County
# QUAD_MAP = (Not s|S)hown on QUAD Quadrangle map
# QUAD = WORD+
# DESCRIPTION = WORD+
aliquotQuadrantID = Literal("NE") |\
Literal("SE") |\
Literal("SW") |\
Literal("NW")
aliquotQuadrantString = aliquotQuadrantID + Suppress("1/4")
aliquotHalfString = oneOf("N E S W") + Suppress("1/2")
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString))\
.setResultsName("aliquot")\
.setParseAction(lambda kwd: " ".join(kwd[0]))
sectionToken = Suppress(oneOf("S s") + Literal("ec") + Optional("."))
sectionNumber = Word(nums)
section = Group(
sectionToken \
+ sectionNumber \
+ ZeroOrMore(Suppress("or") + sectionNumber)
).setResultsName("section")
afterEndOfCaveName = aliquotHalfString | aliquotQuadrantString | sectionToken
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables)))\
.setResultsName('name')\
.setParseAction(lambda name: " ".join(name[0]))
townshipDirection = oneOf("N S").setResultsName("direction")
townshipNumber = Word(nums).setResultsName("number")
township = Suppress("T.") \
+ Group(townshipNumber + townshipDirection).setResultsName("township")\
+ Suppress('.')
rangeDirection = oneOf("E W").setResultsName("direction")
rangeNumber = Word(nums).setResultsName("number")
range_info = Suppress("R.") \
+ Group(rangeNumber + rangeDirection).setResultsName("range")\
+ Suppress('.')
countyKeyword = Literal("County")
countyName = Group(OneOrMore(~countyKeyword + Word(alphas+"-'.")))\
.setResultsName("county")\
.setParseAction(lambda c: " ".join(c[0]))
county = countyName + Suppress("County")
notShownOnQuad = (Literal("Not") + Suppress("s"))\
.setParseAction(lambda x: False)
shownOnQuad = Literal("S").setParseAction(lambda x: True)
onKeyword = Literal("on")
mapAlias = Group(OneOrMore(~onKeyword + Word(printables)))\
.setParseAction(lambda alias: " ".join(alias[0]))\
.setResultsName("alias")
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName("is_on_map")\
+ Suppress("hown") \
+ Optional(Suppress('as') + mapAlias)\
+ Suppress(onKeyword)
quadrangleKeyword = Literal("Quadrangle") + Literal("map")
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas+"-'.")))\
.setResultsName("name")\
.setParseAction(lambda name: " ".join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName("quad") \
+ Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables)))\
.setResultsName("description")\
.setParseAction(lambda desc: " ".join(desc[0]))
location = caveName \
+ aliquotPart \
+ section + Suppress(',') \
+ township + Suppress(',') \
+ range_info + Suppress(',')\
+ county \
+ quadrangle \
+ description
return location.parseString(details)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERROR: pass in the filename as the second argument.")
print(" $ python {0} /path/to/file.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
with open(filepath) as f:
raw_text = f.read()
raw_caves = raw_text.split("\n")
caves = []
for raw_cave_text in raw_caves:
raw_cave_text = raw_cave_text.strip()
if raw_cave_text:
try:
cave = parse_cave_details(raw_cave_text)
caves.append({
'Cave name': cave.name,
'Alias': cave.quad.alias,
'On map': cave.quad.is_on_map,
'Quad': cave.quad.name,
'County': cave.county,
'State': 'MO',
'Principal Meridian Code': 5,
'Township Number': cave.township.number,
'Township Fraction': 0,
'Township Direction': cave.township.direction,
'Range Number': cave.range.number,
'Range Fraction': 0,
'Range Direction': cave.range.direction,
'Section': cave.section[0],
'Section Division': "".join(cave.aliquot),
'Township Duplicate': 0,
'Description': raw_cave_text,
})
except:
print("="*80)
print("ERROR: unexpected format for {0}".format(cave.name))
print(raw_cave_text)
import traceback
print(traceback.format_exc())
print("\t" + "\n\t".join([str(x) for x in sys.exc_info()]))
print("Skipping this cave for the next one")
else:
sections = " or ".join(cave.section)
#print("="*80)
#print("{1} := {0.aliquot} Sect. {2}, T. {0.township.number} {0.township.direction}., R. {0.range.number} {0.range.direction}., in {0.county} County on the {0.quad.name} quad map.".format(cave, cave.name, sections))
#print(" Marked on map as {0}".format(cave.quad.alias if cave.quad.alias else cave.name) if cave.quad.is_on_map else " Not on map")
output_path = os.path.basename(filepath).split(".")[0] + ".csv"
print("#"*80)
print("{0} caves processed! Saving to '{1}'.".format(len(caves), output_path))
with open(output_path, 'wb') as f:
cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())
try:
cave_csv.writeheader()
except: # Versions before 2.7 of Python do not have csv with writeheader().
header = {}
for k in caves[0].keys():
header[k] = k
cave_csv.writerow(header)
cave_csv.writerows(caves)
|
3,268 | 4524dd5f5cddd475ca39fea7ec94fa3c1df6bd2e | from sharpie import Sharpie
class SharpieSet():
def __init__(self):
self.sharpies = []
self.usable_sharpies = []
self.usable_sharpies_count = 0
def add_sharpie(self, sharpie: Sharpie):
self.sharpies.append(sharpie)
def count_usable(self):
for i in self.sharpies:
if (i.ink_amount > 0):
self.usable_sharpies.append(i)
self.usable_sharpies_count += 1
def remove_unusable(self):
for i in self.sharpies:
if (i.ink_amount <= 0):
self.sharpies.remove(i)
|
3,269 | 846682072a125c76fc9ffa011109abce7c3bb5d7 | from bs4 import BeautifulSoup, CData
import requests,sys,csv,json,os, urllib.request, re
import json
url2 = "http://ufm.edu/Estudios"
def estudios(Minisoup):
print("2.Estudios")
#now navigate to /Estudios (better if you obtain href from the DOM)
try:
html_content = requests.get(url2).text
except:
print(f"unable to get {url2}")
sys.exit(1)
soup = BeautifulSoup(html_content, "html.parser")
#display all items from "topmenu" (8 in total)
print("Display all items from topmenu:")
b = 0
tabla = soup.find("div", { "id" : "topmenu" })
for datos in tabla.findAll("li"):
# for datos in tabla.findAll("a",{"class":"external text"}):
celda = datos.text
b += 1
print(b,"<",celda,">")
print("-------------------------------------------------------------------------------------------------------")
#display ALL "Estudios" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus)
print("Display all Estudios:")
tablas1 = soup.find("div",{"id":"mw-content-text"})
for datos in tablas1.findAll("div",{"class":"estudios"}):
celdas = datos.text
print("-",celdas)
print("-------------------------------------------------------------------------------------------------------")
#display from "leftbar" all <li> items (4 in total)
print("Display from leftbar all <li> items:")
c=0
tablas2 = soup.find("div",{"class":"leftbar"})
for datos in tablas2.findAll("li"):
#for datos in tablas2.findAll("a",{"class":"external text"}):
celdas2 = datos.text
c += 1
#print(celdas2)
print(c,"<",celdas2,">")
print("-------------------------------------------------------------------------------------------------------")
#get and display all available social media with its links (href) "class=social pull-right"
print("Get and display all available social media with its links (href) class =social pull -right:")
tablas3 = soup.find("div",{"class":"social pull-right"})
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print("-<",celdas3,">")
print("-------------------------------------------------------------------------------------------------------")
#count all <a> (just display the count)
d=0
for datos in soup.find_all('a'):
d += 1
print("count all <a: <",d,">")
print("-------------------------------------------------------------------------------------------------------")
print("=======================================================================================================") |
3,270 | 446c438b79f9957289fa85f21516c13d67e2cfaf | from mesa import Model
from mesa.space import SingleGrid
from mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation
from pdpython_model.fixed_model.agents import PDAgent
from mesa.datacollection import DataCollector
class PDModel(Model):
schedule_types = {"Sequential": BaseScheduler,
"Random": RandomActivation,
"Simultaneous": SimultaneousActivation}
def __init__(self, height=8, width=8,
number_of_agents=2,
schedule_type="Simultaneous",
rounds=1,):
# Model Parameters
self.height = height
self.width = width
self.number_of_agents = number_of_agents
self.step_count = 0
self.schedule_type = schedule_type
self.payoffs = {("C", "C"): 3,
("C", "D"): 0,
("D", "C"): 5,
("D", "D"): 2}
# Model Functions
self.schedule = self.schedule_types[self.schedule_type](self)
self.grid = SingleGrid(self.height, self.width, torus=True)
# Find list of empty cells
self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]
self.agentIDs = list(range(1, (number_of_agents + 1)))
self.make_agents()
self.running = True
def make_agents(self):
for i in range(self.number_of_agents):
x, y = self.coordinates.pop(0)
# print("x, y:", x, y)
# x, y = self.grid.find_empty()
pdagent = PDAgent((x, y), self, True)
self.grid.place_agent(pdagent, (x, y))
self.schedule.add(pdagent)
def step(self):
self.schedule.step()
self.step_count += 1
def run_model(self, rounds=200):
for i in range(rounds):
self.step()
|
3,271 | 2fb95fa2b7062085f31c6b1dbb8c1336c3871e93 | #
# PySNMP MIB module CISCO-L2NAT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-L2NAT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:47:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoInetAddressMask, = mibBuilder.importSymbols("CISCO-TC", "CiscoInetAddressMask")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Integer32, Bits, Counter64, Counter32, Unsigned32, NotificationType, IpAddress, MibIdentifier, ModuleIdentity, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, TimeTicks, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Bits", "Counter64", "Counter32", "Unsigned32", "NotificationType", "IpAddress", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "TimeTicks", "Gauge32")
DisplayString, TextualConvention, RowStatus, StorageType = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus", "StorageType")
ciscoL2natMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 806))
ciscoL2natMIB.setRevisions(('2013-04-16 00:00',))
if mibBuilder.loadTexts: ciscoL2natMIB.setLastUpdated('201304160000Z')
if mibBuilder.loadTexts: ciscoL2natMIB.setOrganization('Cisco Systems, Inc.')
ciscoL2natMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 806, 1))
cl2natTotalInstances = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalInstances.setStatus('current')
cl2natTotalMatched = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalMatched.setStatus('current')
cl2natTotalUnmatched = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalUnmatched.setStatus('current')
cl2natTotalFixups = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalFixups.setStatus('current')
cl2natTotalTranslationEntryConfigured = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalTranslationEntryConfigured.setStatus('current')
cl2natTotalPacketTranslated = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTotalPacketTranslated.setStatus('current')
cl2natInstConfigInstanceTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7), )
if mibBuilder.loadTexts: cl2natInstConfigInstanceTable.setStatus('current')
cl2natInstConfigInstanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1), ).setIndexNames((0, "CISCO-L2NAT-MIB", "cl2natInstConfigInstanceName"))
if mibBuilder.loadTexts: cl2natInstConfigInstanceEntry.setStatus('current')
cl2natInstConfigInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: cl2natInstConfigInstanceName.setStatus('current')
cl2natInstConfigPermitIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 2), Bits().clone(namedValues=NamedValues(("unmatched", 0), ("igmp", 1), ("multicast", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstConfigPermitIn.setStatus('current')
cl2natInstConfigPermitOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 3), Bits().clone(namedValues=NamedValues(("unmatched", 0), ("igmp", 1), ("multicast", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstConfigPermitOut.setStatus('current')
cl2natInstConfigFixup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 4), Bits().clone(namedValues=NamedValues(("arp", 0), ("icmp", 1), ("profinet", 2), ("cip", 3), ("snmp", 4))).clone(namedValues=NamedValues(("arp", 0), ("icmp", 1), ("profinet", 2), ("cip", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstConfigFixup.setStatus('current')
cl2natInstConfigStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 5), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natInstConfigStorageType.setStatus('current')
cl2natInstConfigInstanceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 7, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstConfigInstanceRowStatus.setStatus('current')
cl2natInstIpInstanceIpTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8), )
if mibBuilder.loadTexts: cl2natInstIpInstanceIpTable.setStatus('current')
cl2natInstIpInstanceIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1), ).setIndexNames((0, "CISCO-L2NAT-MIB", "cl2natInstConfigInstanceName"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpDirection"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpFromIpAddressType"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpFromIpAddress"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpAddressType"))
if mibBuilder.loadTexts: cl2natInstIpInstanceIpEntry.setStatus('current')
cl2natInstIpDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inside", 1), ("outside", 2))))
if mibBuilder.loadTexts: cl2natInstIpDirection.setStatus('current')
cl2natInstIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("host", 1), ("range", 2), ("network", 3))))
if mibBuilder.loadTexts: cl2natInstIpAddressType.setStatus('current')
cl2natInstIpFromIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 3), InetAddressType())
if mibBuilder.loadTexts: cl2natInstIpFromIpAddressType.setStatus('current')
cl2natInstIpFromIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 4), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), )))
if mibBuilder.loadTexts: cl2natInstIpFromIpAddress.setStatus('current')
cl2natInstIpToIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 5), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstIpToIpAddressType.setStatus('current')
cl2natInstIpToIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 6), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstIpToIpAddress.setStatus('current')
cl2natInstIpAddressMask = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 7), CiscoInetAddressMask()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstIpAddressMask.setStatus('current')
cl2natInstIpRange = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstIpRange.setStatus('current')
cl2natInstStorageIpStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 9), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natInstStorageIpStorageType.setStatus('current')
cl2natInstIpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 8, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInstIpRowStatus.setStatus('current')
cl2natInterfaceConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9), )
if mibBuilder.loadTexts: cl2natInterfaceConfigTable.setStatus('current')
cl2natInterfaceConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1), ).setIndexNames((0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigIfIndex"), (0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigVlanIndex"))
if mibBuilder.loadTexts: cl2natInterfaceConfigEntry.setStatus('current')
cl2natInterfaceConfigIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cl2natInterfaceConfigIfIndex.setStatus('current')
cl2natInterfaceConfigVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1, 2), Unsigned32())
if mibBuilder.loadTexts: cl2natInterfaceConfigVlanIndex.setStatus('current')
cl2natInterfaceConfigInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natInterfaceConfigInstanceName.setStatus('current')
cl2natInterfaceConfigStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natInterfaceConfigStorageType.setStatus('current')
cl2natInterfaceConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 9, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cl2natInterfaceConfigRowStatus.setStatus('current')
cl2natInterfaceStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10), )
if mibBuilder.loadTexts: cl2natInterfaceStatisticsTable.setStatus('current')
cl2natInterfaceStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1), ).setIndexNames((0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigIfIndex"), (0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigVlanIndex"))
if mibBuilder.loadTexts: cl2natInterfaceStatisticsEntry.setStatus('current')
cl2natFixupArpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupArpIn.setStatus('current')
cl2natFixupIcmpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupIcmpIn.setStatus('current')
cl2natFixupCipIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupCipIn.setStatus('current')
cl2natFixupProfinetIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupProfinetIn.setStatus('current')
cl2natFixupFtpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupFtpIn.setStatus('current')
cl2natFixupSnmpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSnmpIn.setStatus('current')
cl2natFixupSipIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSipIn.setStatus('current')
cl2natFixupSccpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSccpIn.setStatus('current')
cl2natUnmatchedIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natUnmatchedIn.setStatus('current')
cl2natTranslatedUnicastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTranslatedUnicastIn.setStatus('current')
cl2natDroppedUnicastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedUnicastIn.setStatus('current')
cl2natDroppedMulticastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedMulticastIn.setStatus('current')
cl2natPassThruUnicastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruUnicastIn.setStatus('current')
cl2natPassThruMulticastIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruMulticastIn.setStatus('current')
cl2natPassThruIgmpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruIgmpIn.setStatus('current')
cl2natDroppedIgmpIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedIgmpIn.setStatus('current')
cl2natFixupArpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupArpOut.setStatus('current')
cl2natFixupIcmpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupIcmpOut.setStatus('current')
cl2natFixupCipOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupCipOut.setStatus('current')
cl2natFixupProfinetOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 20), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupProfinetOut.setStatus('current')
cl2natFixupFtpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 21), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupFtpOut.setStatus('current')
cl2natFixupSnmpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 22), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSnmpOut.setStatus('current')
cl2natFixupSipOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 23), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSipOut.setStatus('current')
cl2natFixupSccpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 24), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natFixupSccpOut.setStatus('current')
cl2natUnmatchedOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 25), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natUnmatchedOut.setStatus('current')
cl2natDroppedUnicastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 26), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedUnicastOut.setStatus('current')
cl2natTranslatedUnicastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 27), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTranslatedUnicastOut.setStatus('current')
cl2natPassThruUnicastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 28), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruUnicastOut.setStatus('current')
cl2natDroppedMulticastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 29), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedMulticastOut.setStatus('current')
cl2natPassThruMulticastOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 30), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruMulticastOut.setStatus('current')
cl2natDroppedIgmpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 31), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natDroppedIgmpOut.setStatus('current')
cl2natPassThruIgmpOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 10, 1, 32), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natPassThruIgmpOut.setStatus('current')
cl2natInterfaceIpStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 11), )
if mibBuilder.loadTexts: cl2natInterfaceIpStatisticsTable.setStatus('current')
cl2natInterfaceIpStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 11, 1), ).setIndexNames((0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigIfIndex"), (0, "CISCO-L2NAT-MIB", "cl2natInterfaceConfigVlanIndex"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpDirection"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpFromIpAddressType"), (0, "CISCO-L2NAT-MIB", "cl2natInstIpFromIpAddress"))
if mibBuilder.loadTexts: cl2natInterfaceIpStatisticsEntry.setStatus('current')
cl2natTranslatesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 11, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTranslatesIn.setStatus('current')
cl2natTranslatesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 806, 1, 11, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cl2natTranslatesOut.setStatus('current')
ciscoL2natMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 806, 3))
ciscoL2natMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 1))
ciscoL2natMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 2))
ciscoL2natMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 1, 1)).setObjects(("CISCO-L2NAT-MIB", "cl2natGlobalStatisticsGroup"), ("CISCO-L2NAT-MIB", "cl2natInstanceConfigGroup"), ("CISCO-L2NAT-MIB", "cl2natInstanceStatisticsGroup"), ("CISCO-L2NAT-MIB", "cl2natInstanceTranslationStatisticsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoL2natMIBCompliance = ciscoL2natMIBCompliance.setStatus('current')
cl2natGlobalStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 2, 1)).setObjects(("CISCO-L2NAT-MIB", "cl2natTotalInstances"), ("CISCO-L2NAT-MIB", "cl2natTotalMatched"), ("CISCO-L2NAT-MIB", "cl2natTotalUnmatched"), ("CISCO-L2NAT-MIB", "cl2natTotalFixups"), ("CISCO-L2NAT-MIB", "cl2natTotalTranslationEntryConfigured"), ("CISCO-L2NAT-MIB", "cl2natTotalPacketTranslated"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cl2natGlobalStatisticsGroup = cl2natGlobalStatisticsGroup.setStatus('current')
cl2natInstanceConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 2, 2)).setObjects(("CISCO-L2NAT-MIB", "cl2natInstConfigPermitIn"), ("CISCO-L2NAT-MIB", "cl2natInstConfigPermitOut"), ("CISCO-L2NAT-MIB", "cl2natInstConfigFixup"), ("CISCO-L2NAT-MIB", "cl2natInstIpRange"), ("CISCO-L2NAT-MIB", "cl2natInstIpToIpAddress"), ("CISCO-L2NAT-MIB", "cl2natInstIpToIpAddressType"), ("CISCO-L2NAT-MIB", "cl2natInstConfigInstanceRowStatus"), ("CISCO-L2NAT-MIB", "cl2natInterfaceConfigRowStatus"), ("CISCO-L2NAT-MIB", "cl2natInstIpAddressMask"), ("CISCO-L2NAT-MIB", "cl2natInterfaceConfigInstanceName"), ("CISCO-L2NAT-MIB", "cl2natInstIpRowStatus"), ("CISCO-L2NAT-MIB", "cl2natInstConfigStorageType"), ("CISCO-L2NAT-MIB", "cl2natInstStorageIpStorageType"), ("CISCO-L2NAT-MIB", "cl2natInterfaceConfigStorageType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cl2natInstanceConfigGroup = cl2natInstanceConfigGroup.setStatus('current')
cl2natInstanceStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 2, 3)).setObjects(("CISCO-L2NAT-MIB", "cl2natUnmatchedIn"), ("CISCO-L2NAT-MIB", "cl2natDroppedUnicastIn"), ("CISCO-L2NAT-MIB", "cl2natTranslatedUnicastIn"), ("CISCO-L2NAT-MIB", "cl2natFixupArpIn"), ("CISCO-L2NAT-MIB", "cl2natFixupIcmpIn"), ("CISCO-L2NAT-MIB", "cl2natFixupCipIn"), ("CISCO-L2NAT-MIB", "cl2natFixupProfinetIn"), ("CISCO-L2NAT-MIB", "cl2natFixupFtpIn"), ("CISCO-L2NAT-MIB", "cl2natFixupSnmpIn"), ("CISCO-L2NAT-MIB", "cl2natFixupSipIn"), ("CISCO-L2NAT-MIB", "cl2natFixupSccpIn"), ("CISCO-L2NAT-MIB", "cl2natUnmatchedOut"), ("CISCO-L2NAT-MIB", "cl2natDroppedUnicastOut"), ("CISCO-L2NAT-MIB", "cl2natTranslatedUnicastOut"), ("CISCO-L2NAT-MIB", "cl2natFixupArpOut"), ("CISCO-L2NAT-MIB", "cl2natFixupIcmpOut"), ("CISCO-L2NAT-MIB", "cl2natFixupCipOut"), ("CISCO-L2NAT-MIB", "cl2natFixupProfinetOut"), ("CISCO-L2NAT-MIB", "cl2natFixupFtpOut"), ("CISCO-L2NAT-MIB", "cl2natFixupSnmpOut"), ("CISCO-L2NAT-MIB", "cl2natFixupSipOut"), ("CISCO-L2NAT-MIB", "cl2natFixupSccpOut"), ("CISCO-L2NAT-MIB", "cl2natPassThruUnicastIn"), ("CISCO-L2NAT-MIB", "cl2natPassThruUnicastOut"), ("CISCO-L2NAT-MIB", "cl2natDroppedMulticastIn"), ("CISCO-L2NAT-MIB", "cl2natDroppedMulticastOut"), ("CISCO-L2NAT-MIB", "cl2natPassThruMulticastIn"), ("CISCO-L2NAT-MIB", "cl2natPassThruMulticastOut"), ("CISCO-L2NAT-MIB", "cl2natDroppedIgmpIn"), ("CISCO-L2NAT-MIB", "cl2natDroppedIgmpOut"), ("CISCO-L2NAT-MIB", "cl2natPassThruIgmpIn"), ("CISCO-L2NAT-MIB", "cl2natPassThruIgmpOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cl2natInstanceStatisticsGroup = cl2natInstanceStatisticsGroup.setStatus('current')
cl2natInstanceTranslationStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 806, 3, 2, 4)).setObjects(("CISCO-L2NAT-MIB", "cl2natTranslatesIn"), ("CISCO-L2NAT-MIB", "cl2natTranslatesOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cl2natInstanceTranslationStatisticsGroup = cl2natInstanceTranslationStatisticsGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-L2NAT-MIB", cl2natTotalFixups=cl2natTotalFixups, cl2natInterfaceConfigVlanIndex=cl2natInterfaceConfigVlanIndex, ciscoL2natMIBObjects=ciscoL2natMIBObjects, cl2natFixupCipOut=cl2natFixupCipOut, cl2natInstConfigStorageType=cl2natInstConfigStorageType, cl2natPassThruMulticastOut=cl2natPassThruMulticastOut, cl2natUnmatchedOut=cl2natUnmatchedOut, ciscoL2natMIBGroups=ciscoL2natMIBGroups, cl2natInstIpRange=cl2natInstIpRange, cl2natInterfaceIpStatisticsEntry=cl2natInterfaceIpStatisticsEntry, cl2natInstanceConfigGroup=cl2natInstanceConfigGroup, cl2natInstIpInstanceIpTable=cl2natInstIpInstanceIpTable, cl2natInstIpToIpAddress=cl2natInstIpToIpAddress, cl2natInstanceStatisticsGroup=cl2natInstanceStatisticsGroup, cl2natDroppedMulticastIn=cl2natDroppedMulticastIn, cl2natInstConfigInstanceRowStatus=cl2natInstConfigInstanceRowStatus, cl2natInterfaceStatisticsEntry=cl2natInterfaceStatisticsEntry, cl2natInterfaceConfigStorageType=cl2natInterfaceConfigStorageType, cl2natDroppedIgmpIn=cl2natDroppedIgmpIn, cl2natUnmatchedIn=cl2natUnmatchedIn, cl2natTranslatesOut=cl2natTranslatesOut, cl2natFixupSnmpIn=cl2natFixupSnmpIn, ciscoL2natMIBCompliance=ciscoL2natMIBCompliance, cl2natInstConfigPermitOut=cl2natInstConfigPermitOut, ciscoL2natMIBConformance=ciscoL2natMIBConformance, cl2natPassThruIgmpOut=cl2natPassThruIgmpOut, cl2natInstIpFromIpAddressType=cl2natInstIpFromIpAddressType, cl2natFixupSipIn=cl2natFixupSipIn, cl2natInstConfigPermitIn=cl2natInstConfigPermitIn, cl2natInstIpAddressMask=cl2natInstIpAddressMask, cl2natInstIpDirection=cl2natInstIpDirection, cl2natInstIpAddressType=cl2natInstIpAddressType, cl2natTranslatedUnicastOut=cl2natTranslatedUnicastOut, cl2natGlobalStatisticsGroup=cl2natGlobalStatisticsGroup, cl2natInstConfigInstanceEntry=cl2natInstConfigInstanceEntry, cl2natInterfaceConfigInstanceName=cl2natInterfaceConfigInstanceName, cl2natDroppedMulticastOut=cl2natDroppedMulticastOut, cl2natInstIpFromIpAddress=cl2natInstIpFromIpAddress, cl2natTranslatesIn=cl2natTranslatesIn, cl2natFixupFtpIn=cl2natFixupFtpIn, cl2natFixupProfinetOut=cl2natFixupProfinetOut, cl2natInstIpRowStatus=cl2natInstIpRowStatus, cl2natTranslatedUnicastIn=cl2natTranslatedUnicastIn, cl2natInterfaceIpStatisticsTable=cl2natInterfaceIpStatisticsTable, cl2natFixupIcmpIn=cl2natFixupIcmpIn, cl2natInstanceTranslationStatisticsGroup=cl2natInstanceTranslationStatisticsGroup, cl2natInstStorageIpStorageType=cl2natInstStorageIpStorageType, cl2natFixupSnmpOut=cl2natFixupSnmpOut, cl2natPassThruUnicastOut=cl2natPassThruUnicastOut, cl2natFixupProfinetIn=cl2natFixupProfinetIn, ciscoL2natMIBCompliances=ciscoL2natMIBCompliances, cl2natPassThruUnicastIn=cl2natPassThruUnicastIn, cl2natTotalMatched=cl2natTotalMatched, cl2natFixupIcmpOut=cl2natFixupIcmpOut, cl2natFixupSipOut=cl2natFixupSipOut, cl2natTotalUnmatched=cl2natTotalUnmatched, cl2natInstIpToIpAddressType=cl2natInstIpToIpAddressType, cl2natTotalInstances=cl2natTotalInstances, cl2natTotalTranslationEntryConfigured=cl2natTotalTranslationEntryConfigured, cl2natDroppedIgmpOut=cl2natDroppedIgmpOut, cl2natPassThruIgmpIn=cl2natPassThruIgmpIn, cl2natPassThruMulticastIn=cl2natPassThruMulticastIn, cl2natFixupSccpIn=cl2natFixupSccpIn, cl2natInstConfigInstanceName=cl2natInstConfigInstanceName, cl2natInterfaceConfigIfIndex=cl2natInterfaceConfigIfIndex, cl2natFixupFtpOut=cl2natFixupFtpOut, cl2natInstConfigInstanceTable=cl2natInstConfigInstanceTable, cl2natInstIpInstanceIpEntry=cl2natInstIpInstanceIpEntry, cl2natDroppedUnicastOut=cl2natDroppedUnicastOut, cl2natInterfaceConfigTable=cl2natInterfaceConfigTable, cl2natFixupSccpOut=cl2natFixupSccpOut, cl2natFixupArpIn=cl2natFixupArpIn, PYSNMP_MODULE_ID=ciscoL2natMIB, cl2natDroppedUnicastIn=cl2natDroppedUnicastIn, cl2natInterfaceStatisticsTable=cl2natInterfaceStatisticsTable, cl2natTotalPacketTranslated=cl2natTotalPacketTranslated, ciscoL2natMIB=ciscoL2natMIB, cl2natFixupCipIn=cl2natFixupCipIn, cl2natFixupArpOut=cl2natFixupArpOut, cl2natInstConfigFixup=cl2natInstConfigFixup, cl2natInterfaceConfigRowStatus=cl2natInterfaceConfigRowStatus, cl2natInterfaceConfigEntry=cl2natInterfaceConfigEntry)
|
3,272 | ceab21e41adf171e99e6c3c8541c418d82db6168 | class Figure:
area = 0
def __new__(cls, *args):
if cls is Figure:
return None
return object.__new__(cls)
def add_area(self, other):
if isinstance(other, Figure):
return self.area + other.area
else:
raise ValueError("Should pass Figure as parameter")
|
3,273 | 5a5b2d0ade5b66981218b4ecf15a2253b7d665f9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Script for converting the new csv files to the desirable json format
'''
import codecs
import json
import re
def creeper():
'''
Settings for creeper file
'''
ccPrefix = False
inFilename = u'creeper.csv'
outFilename = u'Creeper.json'
mappingFile = u'creeper-mappings.json'
run(inFilename, outFilename, ccPrefix, mappingFile=mappingFile)
def mediaCreeper():
'''
Settings for mediaCreeper file
'''
ccPrefix = True
inFilename = u'mediacreeper.csv'
outFilename = u'MediaCreeper.json'
run(inFilename, outFilename, ccPrefix)
def run(inFilename, outFilename, ccPrefix,
mappingFile=None, source=u'http://b19.se/data/'):
'''
Run either file depending on settings
'''
# load mappings
mappings = {}
if mappingFile:
f = codecs.open(mappingFile, 'r', 'utf-8')
mappings = json.load(f)
f.close()
# load csv
f = codecs.open(inFilename, 'r', 'utf-8')
lines = f.read().split('\n')
f.close()
data = {}
dates = []
for l in lines:
if len(l) == 0 or l.startswith(u'#'):
continue
start, end, cc, caption, updated = l.split(';')
if ccPrefix:
caption = u'[%s] %s' % (cc, caption)
if caption in mappings.keys():
caption = mappings[caption]
if caption in data.keys():
data[caption].append([start, end])
else:
data[caption] = [[start, end], ]
dates.append(updated)
# create metadata entry
dates = sorted(list(set(dates)))
metadata = {
'source': source,
'oldest data': dates[0],
'newest data': dates[-1]}
data[u'@metadata'] = metadata
# output
f = codecs.open(outFilename, 'w', 'utf-8')
# f.write(json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False))
# compactify it without minimizing
txt = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
txt = re.sub(
r'\[\n "([^"]*)", \n "([^"]*)"\n \]',
r'["\1", "\2"]',
txt)
txt = txt.replace(u', \n [', u',\n [')
f.write(txt)
f.close()
if __name__ == '__main__':
creeper()
mediaCreeper()
|
3,274 | d8ea396ff8514cc10e02072ea478f0276584153d | def heapify(lst, index, heap_size):
largest = index
left_index = 2 * index + 1
right_index = 2 * index + 2
if left_index < heap_size and lst[left_index] > lst[largest]:
largest = left_index
if right_index < heap_size and lst[right_index] > lst[largest]:
largest = right_index
if largest != index:
lst[largest], lst[index] = lst[index], lst[largest]
heapify(lst, largest, heap_size)
def heap_sort(collection):
"""Pure implement of heap sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
"""
n = len(collection)
for i in range(n // 2 - 1, -1, -1):
heapify(collection, i, n)
for i in range(n - 1, 0, -1):
collection[0], collection[i] = collection[i], collection[0]
heapify(collection, 0, i)
return collection
|
3,275 | 860908126d473e6c4ed070992a1b518683fd4c27 | ###############################################################
## File Name: 11_exercise.py
## File Type: Python File
## Author: surge55
## Course: Python 4 Everybody
## Chapter: Chapter 11 - Regular Expressions
## Excercise: n/a
## Description: Code walkthrough from book
## Other References: associated files in folder
###############################################################
#11.2 Extracting data using regular expressions
# import re
# s = 'A message from csev@umich.edu to cwen@iupui.edu about meeting @2PM'
# lst = re.findall('\S+@\S+', s)
# print(lst)
# We can use this regular expression in a program
# to read all the lines in a file and print out
# anything that looks like an email address:
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('\S+@\S+', line)
# if len(x) > 0:
# print(x)
## Much Cleaner Version
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('[a-zA-Z0-9]\S*@\S*[a-zA-Z]', line)
# if len(x) > 0:
# print(x)
# Search for lines that start with "X" followed by any
# non-whitespace characters and ':'
# followed by a space and any number
# the number can include a decimal
# import re
# hand = open('mbox-short.txt')
# # Returns a List
# # for line in hand:
# # line = line.rstrip()
# # x = re.findall('^X\S*: [0-9.]+', line)
# # if len(x) > 0:
# # print(x)
# # print(type(line))
# # Returnes a String
# for line in hand:
# line = line.rstrip()
# if re.search('^X\S*: [0-9.]+', line):
# print(line)
# # print(type(line))
# Search for lines that start with 'X' followed by any
# non whitespace characters and ':' followed by a space
# and any number. The number can include a decimal
# Then print the number if it is greater than 0
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('^X\S*: ([0-9.]+)', line)
# if len(x) > 0:
# print(x)
# Exercise 1
# Write a simple program to simulate the operation of the grep
# command on unix. Ask the user to enter a regular expression
# and count the nuber of lines that matched the regular expression:
# import re
# reg_inp = input("Enter a regular expression: ")
# count = 0
# hand = open('mbox.txt')
# for line in hand:
# line = line.rstrip()
# if re.search(reg_inp, line):
# count += 1
# print('mbox.txt had', count, 'lines that match', reg_inp)
# Exercise 2
# Write a program to look for lines of the form:
# 'New Revision: 39772'
# Extract the number from each of the lines using a regular expression
# and the findall() method. Compute the average of the numbers
# and print out the average as an integer.
# import re
# hand = open('mbox.txt')
# total = 0
# count = 0
# for line in hand:
# line = line.rstrip()
# x = re.findall('^New Revision: ([0-9]+)', line)
# if len(x) > 0:
# for i in x:
# total = total + float(i)
# count += 1
# print(int(total/count))
# FINDING NUMBERS IN A HAYSTACK
# In this assignment you will read through and parse a file with text and numbers
# You will extract all the numbers in the file and compute the sum
# of the numbers
import re
hand = open('regex_sum_act.txt')
total = 0
count = 0
for line in hand:
line = line.rstrip()
x = re.findall('([0-9]+)', line)
if len(x) > 0:
# print(x)
for i in x:
total += float(i)
print('sum is', int(total))
|
3,276 | 8cd234c2ec1b36abd992cc1a46147376cc241ede | def non_dupulicates_lette(word):
text = list(word);
print(text)
i=0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d= dict()
for c in word:
if c not in d:
d[c]=1
else:
d[c]+=1
for k in d:
if d[k]==1:
print(k)
else:
print(k,d[k])
return d
#count=0
#othercount=1
#sizeword=len(word)-1
#while count<sizeword:
#letter=word[count]
#while othercount<sizeword:
#if letter == word[othercount]:
#return True
#othercount= othercount+1
#count+=1
#return False
A='bccata'#['a','b','b','c']
non_dupulicates_lette(A)
#result=has_dupulicates(A)
#print(result)
|
3,277 | dee7b12862d02837fbb0f2310b136dd768ca7bab | import time
import pickle
class BayesNetClassifier:
def __init__(self, train_file, out_file):
self.train_file = train_file
self.out_file = out_file
self.word_count_loc = {}
self.word_probs = {}
self.l_probs = {}
self.word_counts = {}
self.common_words = {}
self.cities = []
self.total_words = 0
# Saves probabilites to a pickle file
def pickle_probs(self):
all_probs = {'type': 'bayes', 'location': self.l_probs, 'words': self.word_probs, 'total': self.total_words, 'cities': self.cities}
#source: https://stackoverflow.com/questions/11218477/how-can-i-use-pickle-to-save-a-dict
with open(self.out_file, 'wb') as handle:
pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Loads training data
# Some ideas taken from label.py given by Dr. Crandall
def read_data(self, fname):
exemplars = []
file = open(fname, 'r');
for line in file:
data = tuple([w if i == 0 else w.lower() for i, w in enumerate(line.split())])
exemplars += [data]
return exemplars
# Source: https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
# Fastest way to find max value in dict
def max_val(self, d, c):
top = {}
for i in range(c):
v=list(d.values())
k=list(d.keys())
max_key = k[v.index(max(v))]
top[max_key] = max(v)
del d[max_key]
return top
# Stop Copy
# Pretty print of top 5 words per location
def print_values(self, d):
for key in d.keys():
print('{:<20}'.format(key.replace('_',' ')), end = '')
for i, val in enumerate(d[key]):
print('{:<20}'.format(val), end = '')
print()
# Fits the Bayes Net model
def fit(self):
t0 = time.time()
tweets = self.read_data(self.train_file)
total_tweets = len(tweets)
words_per_city = {}
for twe in tweets:
city = twe[0]
if city in self.l_probs.keys():
self.l_probs[city] += 1/total_tweets
else:
self.l_probs[city] = 1/total_tweets
words_per_city[city] = 0
for word in twe[1:]:
if city in self.word_count_loc.keys():
if word in self.word_count_loc[city].keys():
self.word_count_loc[city][word] += 1
words_per_city[city] +=1
else:
self.word_count_loc[city][word] = 1
words_per_city[city] +=1
else:
self.word_count_loc[city] = {}
self.word_count_loc[city][word] = 1
words_per_city[city] +=1
if word in self.word_counts.keys():
self.word_counts[word] += 1
else:
self.word_counts[word] = 1
#source: https://stackoverflow.com/questions/17095163/remove-a-dictionary-key-that-has-a-certain-value
self.common_words = {k:v for k,v in self.word_counts.items() if v >= 5}
self.cities = list(self.word_count_loc.keys())
self.total_words = len(self.word_counts.keys())
#print(self.max_val(self.word_counts,30))
for city in self.cities:
self.word_probs[city] = {w: c/words_per_city[city] for w,c in self.word_count_loc[city].items()}
# Find top 5 words by location
def top_words(self):
most_pop = {}
top_five = {}
for city in self.cities:
self.word_count_loc[city] = {k: v for k, v in sorted(self.word_count_loc[city].items(), key=lambda x: x[1], reverse = True)}
most_pop[city] = {k:v/self.common_words[k] for k,v in self.word_count_loc[city].items() if k in self.common_words.keys()}
top_five[city] = self.max_val(most_pop[city], 5)
print()
print('Top 5 Words Per Location')
print('-------------------------------------------------------------------------------------------------------------------')
self.print_values(top_five)
|
3,278 | 86177dfa9b8bed5916703edcc16ea4d01cbabf84 | import tkinter as tk
import random
import numpy as np
import copy
import time
#################################################################################
#
# Données de partie
NbSimulation = 20000
Data = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1] ]
GInit = np.array(Data,dtype=np.int8)
GInit = np.flip(GInit,0).transpose()
LARGEUR = 13
HAUTEUR = 17
# container pour passer efficacement toutes les données de la partie
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
GameInit = Game(GInit,3,5)
##############################################################
#
# création de la fenetre principale - NE PAS TOUCHER
L = 20 # largeur d'une case du jeu en pixel
largeurPix = LARGEUR * L
hauteurPix = HAUTEUR * L
Window = tk.Tk()
Window.geometry(str(largeurPix)+"x"+str(hauteurPix)) # taille de la fenetre
Window.title("TRON")
# création de la frame principale stockant toutes les pages
F = tk.Frame(Window)
F.pack(side="top", fill="both", expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
# gestion des différentes pages
ListePages = {}
PageActive = 0
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky="nsew")
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
Frame0 = CreerUnePage(0)
canvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg ="black" )
canvas.place(x=0,y=0)
# Dessine la grille de jeu - ne pas toucher
def Affiche(Game):
canvas.delete("all")
H = canvas.winfo_height()
def DrawCase(x,y,coul):
x *= L
y *= L
canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)
# dessin des murs
for x in range (LARGEUR):
for y in range (HAUTEUR):
if Game.Grille[x,y] == 1 : DrawCase(x,y,"gray" )
if Game.Grille[x,y] == 2 : DrawCase(x,y,"cyan" )
# dessin de la moto
DrawCase(Game.PlayerX,Game.PlayerY,"red" )
def AfficheScore(Game):
info = "SCORE : " + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill="yellow", text=info)
###########################################################
#
# gestion du joueur IA
# VOTRE CODE ICI
dx = np.array([0, -1, 0, 1, 0],dtype=np.int8)
dy = np.array([0, 0, 1, 0, -1],dtype=np.int8)
# scores associés à chaque déplacement
ds = np.array([0, 1, 1, 1, 1],dtype=np.int8)
def GetAllExectuableMove(Game):
possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]
executableMove = []
for tup in possibleMove :
x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x,y]
if v == 0 :
executableMove.append((x,y))
return executableMove
def Simulate(Game):
nb = NbSimulation
# on copie les datas de départ pour créer plusieurs parties
G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie
X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie
Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie
S = np.tile(Game.Score,nb) # score (s) pour chaque partie
I = np.arange(nb) # 0,1,2,3,...,nb-1
# VOTRE CODE ICI
continuer = True
while(continuer) :
# pour chaque partie, on fait une affectation à 2 le passage de la moto
G[I, X, Y] = 2
### pour chaque partie, on gère tous les index de déplacements possibles
# pour chaque partie, on associe une liste de taille 4 initialisée à 0
LPossibles = np.zeros((nb, 4),dtype=np.int8)
# pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon
for i in range(4):
LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)
# pour chaque partie, on trie la liste des directions de manière décroissante
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
### pour chaque partie, on compte le nombre de déplacements possibles
# pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls
Indices = np.count_nonzero(LPossibles, axis=1)
# pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo
Indices[Indices == 0] = 1
# pour chaque partie, on génère un index de direction aléatoire
R = np.random.randint(12,size=nb,dtype=np.int8)
# pour chaque partie, on réucupère un vecteur position
Position = LPossibles[I, R % Indices[I]]
### on gère les déplacement et le code
# on arrete le traitement si, on est statique sur l'ensemble des parties
if(nb == np.count_nonzero(Position == 0)): continuer = False
# pour chaque partie, on incrémente le score
S[I] += ds[Position]
# pour chaque partie, on déplace le joueur
X += dx[Position]
Y += dy[Position]
# on retourne la moyenne des scores
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = (None, None)
maxi = 0
if(len(executableMove)==0):
return None, None
for x,y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if(total>maxi):
result = (x,y)
maxi = total
return result
def Play(Game):
x,y = Game.PlayerX, Game.PlayerY
Game.Grille[x,y] = 2 # laisse la trace de la moto
x,y = MovePlayerWithIA(Game)
if x == None or y == None :
# collision détectée
return True # partie terminée
else :
Game.PlayerX = x # valide le déplacement
Game.PlayerY = y # valide le déplacement
Game.Score += 1
return False # la partie continue
################################################################################
CurrentGame = GameInit.copy()
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine :
Affiche(CurrentGame)
# rappelle la fonction Partie() dans 30ms
# entre temps laisse l'OS réafficher l'interface
Window.after(1000,Partie)
else :
AfficheScore(CurrentGame)
#####################################################################################
#
# Mise en place de l'interface - ne pas toucher
AfficherPage(0)
Window.after(100,Partie)
Window.mainloop()
|
3,279 | 4b3664153940b064b424bd77de473a6409437f88 | import sys
'''
Given a string, does the string contain an equal number of uppercase and
lowercase letters? Ignore whitespace, numbers, and punctuation. Return the
string “true” if balanced or the string “false” if not balanced.
'''
for line in sys.stdin:
lower = 0
upper = 0
# Count number of lowercase and uppercase letters
for x in range(0, len(line)):
if 'a' <= line[x] <= 'z':
lower = lower + 1
elif 'A' <= line[x] <= 'Z':
upper = upper + 1
# Determine if balanced or not
if lower == upper:
print('true')
else:
print('false')
# Repeat for each input line
|
3,280 | 8cec6778f530cb06e4f6cb2e6e9b6cb192d20f97 | # Generated by Django 3.2.6 on 2021-10-10 17:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reward', '0002_delete_user'),
]
operations = [
migrations.CreateModel(
name='order',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('created_date', models.DateField(auto_now_add=True)),
('points', models.IntegerField(blank=True, default=0, null=True)),
('green_rating', models.CharField(choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), ('4', 'rating 4'), ('5', 'rating 5')], max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
3,281 | bf8f7b51b685f0e9131cb4d8a0bfc16ee5ad1263 | import os
from flask import Flask,request
from flask_restful import Resource,Api,reqparse
from flask_jwt import JWT,jwt_required
from resources.Users import UserRegister
from security import authenticate,identity
from resources.items import Item, ItemList
from resources.stores import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'naveen'
api = Api(app)
jwt = JWT(app,authenticate,identity)
api.add_resource(StoreList,"/stores")
api.add_resource(Store,"/store/<string:name>")
api.add_resource(ItemList,"/items")
api.add_resource(Item,"/item/<string:name>")
api.add_resource(UserRegister,"/register")
if __name__ =="__main__":
from db import db
db.init_app(app)
app.run(port=5000,debug=True) |
3,282 | ae3198e68d9479605327b729c01fb15eae87ab98 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_visual_coding_2p_analysis
----------------------------------
Tests for `visual_coding_2p_analysis` module.
"""
import pytest
@pytest.fixture
def decorated_example():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_example(decorated_example):
"""Sample pytest test function with the pytest fixture as an argument.
"""
import visual_coding_2p_analysis
|
3,283 | 9dbadb2421b04961e8e813831d06abc1ff301566 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JiayuanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
person_id = scrapy.Field()#人员唯一ID
user_info = scrapy.Field()#搜索页面中的年龄与所属城市
main_url = scrapy.Field()#搜索页面中人员入口url
nick_name = scrapy.Field()#搜索页面中人员昵称
heigth = scrapy.Field()#搜索页面中身高
class PersonInfo((scrapy.Item)):
#person_info人员信息表
person_id = scrapy.Field()
buy_car = scrapy.Field()
address = scrapy.Field()
class OtherItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
'''
可以定义另外一个item
'''
user_info = scrapy.Field()#搜索页面中的年龄与所属城市
main_url = scrapy.Field()#搜索页面中人员入口url
nick_name = scrapy.Field()#搜索页面中人员昵称
heigth = scrapy.Field()#搜索页面中身高 |
3,284 | 9a539fd3ce4e3ff75af82407150ab4b550b255c1 | class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
if(n % 4 != 0):
return True;
return False;
"""main():
sol = Solution();
sol.canWinNim(4);
"""
|
3,285 | 39b07f1a515787e80a1fb822e67e19e2301b894a | import pynucastro as pyna
rl = pyna.ReacLibLibrary()
h_burn = rl.linking_nuclei(["h1", "he4",
"c12", "c13",
"n13", "n14", "n15",
"o14", "o15", "o16","o17","o18",
"f17", "f18","f19",
"ne18", "ne19", "ne20",
"mg22", "mg24"],
with_reverse=False)
rc = pyna.StarKillerCxxNetwork(libraries=[h_burn], inert_nuclei=["fe56"])
rc.write_network()
comp = pyna.Composition(rc.get_nuclei())
comp.set_solar_like()
rc.plot(outfile="cno_extras.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13])
rc.plot(outfile="cno_extras_hide_alpha.png", rho=1.e6, T=1.e8, comp=comp, Z_range=[1,13], N_range=[1,13],
rotated=True, highlight_filter_function=lambda r: r.Q > 0,
curved_edges=True, hide_xalpha=True)
|
3,286 | 682495fec200ddad5a68f06bb0ec24e59036e66b | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#######################
# Iterative solution
#######################
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head is None:
return head
val = []
while (head):
val.append(head.val)
head = head.next
new_head = ListNode(val[-1])
pre = new_head
for i in range(len(val)-2, -1, -1):
pre.next = ListNode(val[i])
pre = pre.next
pre.next = None
return new_head
#######################
# Recursive solution
#######################
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head is None:
return head
new_h, cur_nd = self.recursiveReverseList(head)
cur_nd.next = None
return new_h
def recursiveReverseList(self, node):
if node.next == None:
new_head = cur = ListNode(node.val)
return new_head, cur
new_head, cur_node = self.recursiveReverseList(node.next)
cur_node.next = ListNode(node.val)
return new_head, cur_node.next
#######################
# Other's iterative solution
#######################
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
#######################
# Other's recursive solution
#######################
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
|
3,287 | fbab5826f47163cf82b534d311eae572c5fcd128 | import re
import pandas as pd
import pandas.io.formats.excel
from configparser import ConfigParser
from datetime import datetime
from termcolor import cprint
import os
import shutil
from openpyxl import load_workbook
import numpy as np
class pairtron():
def affiliation_cleaner(self, affiliation):
# print(affiliation)
affiliation = str(affiliation)
affiliation = affiliation.strip(" ;").replace(" ", " ").replace(" "," ")
while ' ;' in affiliation:
affiliation = affiliation.replace(" ;", ";")
while ';;' in affiliation:
affiliation = affiliation.replace(";;", ";")
return affiliation
def zeta0_creation(self, indexed_files_dir, merge_columns):
""" Returns pandas dataframe which has latest record for each manual id after merging all "sheet_name"
in the previously indexed_files which are present in "indexed_files_dir"
"""
indexed_files = [file for file in os.listdir(indexed_files_dir) if not file.startswith("~")]
indexed_files_dict = {}
indexed_files_dict.clear()
dateList = []
del dateList[:]
for file in indexed_files:
dated = file.split('_')[-1].split('.')[0]
dated = dated[4:] + dated[:4]
dateList.append(dated)
indexed_files_dict[dated] = file
dataframes = {}
for dated, file in indexed_files_dict.items():
file_name = indexed_files_dir + '\\' + file
dataframes[dated] = pd.read_excel(file_name, sheet_name=0)
dataframes[dated]['file_date'] = dated
dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in dataframes[dated]['manual_id']]
merged_df = pd.concat([dataframes[dated] for dated in dateList], ignore_index=True)
merged_df = merged_df.sort_values('file_date', ascending=False)
zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')
pd.set_option('mode.chained_assignment', None)
for col in zeta0.columns:
zeta0[col] = zeta0[col].astype('str')
zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
zeta0 = zeta0.sort_values('mid', ascending=True)
if "manual_id" not in merge_columns:
merge_columns.append("manual_id")
zeta0 = zeta0[merge_columns]
# print(zeta0)
return zeta0
def copy_larvol_xlsx(self, template, acronym):
date = datetime.now().date().strftime('%m%d%Y')
self.dest_file = os.path.basename(template).replace('ACRONYM',acronym).replace('MMDDYYYY', date + '_Pairtron')
shutil.copy2(template, self.dest_file)
def selectionAfterJoin(self, df, cols, common_cols):
for col in common_cols:
if col != 'manual_id':
df[col] = np.where(df['{}_left'.format(col)].isnull() | ((df['{}_right'.format(col)].notnull()) & (df['{}_right'.format(col)] != '') & (df['{}_left'.format(col)] != df['{}_right'.format(col)])), df['{}_right'.format(col)], df['{}_left'.format(col)])
drop_list = ['{}_left'.format(col) for col in common_cols if col != 'manual_id']
drop_list.extend(['{}_right'.format(col) for col in common_cols if col != 'manual_id'])
df.drop(drop_list, axis=1, inplace=True)
return df[cols]
def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):
wb = load_workbook(filename=self.dest_file)
ws = wb[sheets[0]]
ws.title = sheets[0].replace('ACRONYM',acronym)
try:
curr_df = pd.read_excel(src)
except:
curr_df = pd.read_csv(src)
if zeta0_df is not None:
curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id', right_on='manual_id', how='left', suffixes=('_left', '_right'))
common_columns = [col for col in curr_df.columns if col in zeta0_df.columns]
self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns, common_columns)
else:
self.source_df = curr_df
session_list = self.source_df.fillna('').values.tolist()
for row_iter in range(len(session_list)):
print(row_iter)
for col_iter in range(len(session_list[row_iter])):
print(col_iter)
ws.cell(row=row_iter+2, column=col_iter+1).value = self.affiliation_cleaner(session_list[row_iter][col_iter])
wb.save(self.dest_file)
def process_merger(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):
self.copy_larvol_xlsx(template, acronym)
if merge.upper() == 'YES':
zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)
self.update_larvol_xlsx(source_file, acronym, sheets, columns, zeta0)
else:
self.update_larvol_xlsx(source_file, acronym, sheets, columns)
def separated_by_number(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
affiliations_dict = {}
prev_affiliation = None
for affiliation in affiliations_list:
#print(manual_id)
#print(affiliation)
if affiliation != '':
group = re.findall(r'\d+', affiliation)
#print(group)
if group != []:
num = list(map(int, group))[0]
affiliations_dict[str(num)] = str(num).join(affiliation.split(str(num))[1:]).strip(',. ')
prev_affiliation = num
elif prev_affiliation is not None:
num = prev_affiliation
affiliations_dict[str(num)] = affiliations_dict[str(num)] + '; ' + affiliation.strip(',. ')
prev_affiliation = num
for author in authors_list:
#print(author)
group = re.findall(r'\d+', author)
num_list = list(map(int, group))
#print(num_list)
if num_list != []:
author_name = author.split(str(num_list[0]))[0].strip(',.-; ')
else:
author_name = author.strip(',.-; ')
#print(author_name)
for num in num_list:
try:
elem = affiliations_dict[str(num)]
except:
affiliations_dict[str(num)] = ''
cprint("Exception for manual_id: {} as affiliation index {} wasn't found".format(manual_id, str(num)), 'yellow', attrs=['bold'])
affiliation_name = '; '.join([affiliations_dict[str(num)].strip(',.- ') for num in num_list])
#print(affiliation_name)
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def separated_by_semicolon(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
for iter in range(len(authors_list)):
author_name = authors_list[iter].strip(',.-; ')
try:
affiliation_name = affiliations_list[iter].strip(',.- ')
except:
affiliation_name = ''
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def common_affiliation(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
for iter in range(len(authors_list)):
author_name = authors_list[iter].strip(',.-; ')
affiliation_name = affiliations_list[0].strip(',.- ')
print(affiliation_name)
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def process_pairtron(self, sheet):
source_df = self.source_df
source_df = source_df[source_df['authors'].notnull()]
source_id_list = source_df['source_id'].fillna('').tolist()
manual_id_list = source_df['manual_id'].fillna('').tolist()
authors_list = source_df['authors'].tolist()
affiliation_list = source_df['author_affiliation'].fillna('').tolist()
pairtron_list = []
for iter in range(len(authors_list)):
#print(iter, manual_id_list[iter])
author_tokens = [elem.strip() for elem in authors_list[iter].split(';')]
affiliation_tokens = [elem.strip() for elem in affiliation_list[iter].split(';')]
try:
if author_tokens[0][-1].isdigit() and '1' in affiliation_list[iter]:
pairtron_list.extend(self.separated_by_number(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
elif len(author_tokens) == len(affiliation_tokens):
pairtron_list.extend(self.separated_by_semicolon(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
elif author_tokens[0][-1].isdigit() and '1' not in affiliation_list[iter]:
cprint("ALERT: manual_id: {} has missing affiliations.".format(manual_id_list[iter]), 'red', attrs=['bold'])
else:
pairtron_list.extend(self.common_affiliation(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
except:
pass
df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id', 'authors', 'author_affiliation'])
df.drop_duplicates(inplace = True)
authorsInfo_list = df.values.tolist()
wb = load_workbook(filename=self.dest_file)
ws = wb[sheet]
for row_iter in range(len(authorsInfo_list)):
for col_iter in range(len(authorsInfo_list[row_iter])):
ws.cell(row=row_iter+2, column=col_iter+1).value = authorsInfo_list[row_iter][col_iter]
wb.save(self.dest_file)
def processData(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):
self.process_merger(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)
self.process_pairtron(sheets[1])
if __name__ == "__main__":
start = datetime.now()
print ("Script Start Time ",start)
print ("Script Running.....\n")
parser = ConfigParser()
parser.read('pairtron_config.ini')
source_file = parser.get('dynamic_fields', 'source_file')
acronym = parser.get('dynamic_fields', 'ACRONYM')
merge = parser.get('dynamic_fields', 'merge')
merge_columns = [elem.strip() for elem in parser.get('dynamic_fields', 'merge_columns').split(',')]
template = parser.get('static_fields', 'template')
sheets = parser.get('static_fields', 'sheets').split(',')
indexed_files_dir = parser.get('static_fields', 'indexed_files_dir')
columns = parser.get('static_fields', 'columns').split(',')
obj = pairtron()
obj.processData(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)
total_time = datetime.now() - start
print ("\nScript End Time ",datetime.now())
print ("Execution Time", total_time) |
3,288 | 8bbc929e2ff2321b97195031fa675fbdab269fcb | """
SUMMARY
Auxiliary functions, provided here to avoid clutter
"""
"""
Transforms a point (P = [x, y]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding discrete point (D = [xd, yd])
loc_min = [x_min, y_min]
"""
def discretize_location(P, loc_min, Δxy):
x_from_start = P[0] - loc_min[0]
y_from_start = P[1] - loc_min[1]
xd = int(x_from_start//Δxy[0])
yd = int(y_from_start//Δxy[1])
return [xd, yd]
"""
Transforms a discretized point (PD = [xd, yd]) using the x, y intervals (Δxy = [Δx, Δy]) into the corresponding point (P = [x, d])
loc_min = [x_min, y_min]
"""
def continuous_location(PD, loc_min, Δxy):
x = PD[0]*Δxy[0] + loc_min[0]
y = PD[1]*Δxy[1] + loc_min[1]
return [x, y]
"""
Obtains the points in the border of a cell (starting at bottom left (BL = [x_bl, y_bl])), starting point not repeated
"""
def cell_borders(BL, Δxy):
[x_bl, y_bl] = BL
Δx = Δxy[0]
Δy = Δxy[1]
x_border = [x_bl, x_bl + Δx, x_bl + Δx, x_bl]
y_border = [y_bl, y_bl, y_bl + Δy, y_bl + Δy]
return [x_border, y_border]
"""
Appends the first element of the array to the end, useful when plotting
"""
def first_append_to_last(arr):
return arr + [arr[0]]
"""
Calculates the RMS (root mean square) value of an array
"""
def RMS(arr):
n = len(arr)
sq_sum = sum(a**2 for a in arr)
return (sq_sum/n)**0.5
"""
Calculates the L1 norm (Manhattan distance) between P1 = [x1, y1] and P2 = [x2, y2]
"""
def L1(P1, P2):
x1, y1 = P1
x2, y2 = P2
return abs(x2 - x1) + abs(y2 - y1)
"""
Turns x, y, o, v into a string of the form "x, y, v, o"
"""
def state_to_str(x, y, v, o):
return "%d, %d, %d, %d" % (x, y, v, o)
|
3,289 | e99c158e54fd86b00e4e045e7fb28d961089800d | import os
import hashlib
import argparse
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def make_duplicate_list(filepath):
unique_hashes = {}
duplicate_files = {}
for dir_name, subdir_list, file_list in os.walk(filepath):
for filename in file_list:
path = os.path.join(dir_name, filename)
file_hash = hashfile(path)
if file_hash in unique_hashes:
if file_hash not in duplicate_files:
# More than 2 duplicate files with same hash can exist,
# so list of filepaths is created.
duplicate_files[file_hash] = []
duplicate_files[file_hash].append(unique_hashes[file_hash])
duplicate_files[file_hash].append(path)
else:
unique_hashes[file_hash] = path
return duplicate_files
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="duplicates detector")
parser.add_argument("path_to_folder",
help="path to folder containig duplicates")
args = parser.parse_args()
path = args.path_to_folder
duplicates = make_duplicate_list(path)
for idx, (key, value) in enumerate(duplicates.items(), 1):
print("{}) {} files with {} MD5 hash were " +
"found:".format(idx, len(value), key))
for idx, folder in enumerate(value, 1):
print(" {}. {}".format(idx, folder))
|
3,290 | a8f2d527e9824d3986f4bb49c3cc75fd0d999bf7 | from .personal_questions import *
from .survey_questions import *
|
3,291 | 55986f6c2dafe650704660142cf85640e763b26d | #case1
print("My name is Jia-Chi. \nI have an older sister. \nI prefer Coke.\nMy favorite song is \"Amazing Grace\"")
#case2
print('''Liang, Jia-Chi
1
Coke
Amazing Grace''')
|
3,292 | 6ec39aa712c8abe610418e410883ff168d73126d | from sys import stdin
Read = stdin.readline
INF = int(1e9)
n, m = map(int, Read().split())
graph = [[INF] * (n+1) for _ in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n+1):
for i in range(1, n+1):
for j in range(1, n+1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
result = 0
for i in range(1, n+1):
count = 0
for j in range(1, n+1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result) |
3,293 | d2f77afd0d282b1fa4859c5368c9d2c745a5625e | #!/usr/bin/python
#
# Copyright 2017 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from MockProgram import *
command('strip', '-S', '-x', input_file('bin/darwin-4.2.1/release/target-os-darwin/test'))
main()
|
3,294 | 7413c06a990894c34ee5174d84f0e3bd20abf51f | import sys
import numpy as np
####################################################################################################
### These functions all perform QA checks on input files.
### These should catch many errors, but is not exhaustive.
####################################################################################################
####################################################################################################
def check_usage(subpuc_names,year,subpuc_usage):
if len(subpuc_names) == len(subpuc_usage[1:]):
pass
else: sys.exit('There is an issue with your subpuc_usage.csv file. Number of sub-PUC(s) is incorrect.')
year_available = 0
for i in range(len(subpuc_usage[0,1:])):
if subpuc_usage[0,1+i] == year:
year_available = 1
else: pass
if year_available == 0:
sys.exit('There is an issue with your subpuc_usage.csv file. '+str(year)+' is missing.')
else: pass
####################################################################################################
####################################################################################################
def check_usetime(subpuc_names,subpuc_usetime):
if len(subpuc_names) == len(subpuc_usetime):
pass
else: sys.exit('There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(subpuc_usetime)):
if subpuc_usetime[i,1] >= 0.0 and subpuc_usetime[i,1] <= 6.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_usetimescales.csv files.')
####################################################################################################
####################################################################################################
def check_controls(subpuc_names,subpuc_controls):
if len(subpuc_names) == len(subpuc_controls):
pass
else: sys.exit('There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(subpuc_controls)):
if subpuc_controls[i,1] >= 0.0 and subpuc_controls[i,1] <= 1.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_controls.csv files.')
####################################################################################################
####################################################################################################
def check_1st_order_spec(subpuc_names,first_ord_spec):
if len(subpuc_names) == len(first_ord_spec):
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(first_ord_spec)):
if np.sum(first_ord_spec[i,0:3]) >= 0.99 and np.sum(first_ord_spec[i,0:3]) <= 1.01:
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.')
if first_ord_spec[i,2] >= first_ord_spec[i,3]:
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.')
for j in range(len(first_ord_spec[0,:])):
if first_ord_spec[i,j] >= 0.0 and first_ord_spec[i,j] <= 1.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_1st_order_speciation.csv files.')
####################################################################################################
####################################################################################################
def check_organic_spec(subpuc_names,organic_spec,chem_index):
if len(subpuc_names) == len(organic_spec[0,:]):
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(organic_spec[0,:])):
if np.nansum(organic_spec[1:,i]) >= 0.99 and np.nansum(organic_spec[1:,i]) <= 1.01:
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.')
if len(chem_index) == len(organic_spec[1:,0]):
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.')
####################################################################################################
####################################################################################################
def check_chem_assignments(chem_props_vars,chem_props_strs,chem_index):
if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(chem_props_strs):
pass
else: sys.exit('There is an issue with your chemical_assignments.csv file. Number of species is incorrect.')
#################################################################################################### |
3,295 | d7c4bee7245dab1cbb90ee68b8e99994ce7dd219 | class Solution:
# @param num, a list of integer
# @return an integer
def longestConsecutive(self, num):
sted = {}
n = len(num)
for item in num:
if item in sted:
continue
sted[item] = item
if item-1 in sted:
sted[item] = sted[item-1]
sted[sted[item-1]] = item
if item+1 in sted:
tmp = sted[item+1]
sted[tmp] = sted[item]
sted[sted[item]] = tmp
res = 0
for item in sted:
res = max(res, sted[item] - item)
return res + 1 |
3,296 | 0aec3fbc9f4b9f33aee021fa417c43f0feb0e3d1 | import math
import time
t1 = time.time()
# n(3n-1)/2
def isPentagon(item):
num = math.floor(math.sqrt(item*2//3))+1
if num*(3*num-1)//2 == item:
return True
return False
# n(2n-1)
def isHexagon(item):
num = math.floor(math.sqrt(item//2))+1
if num*(2*num-1) == item:
return True
return False
i = 285
t = 0
while t == 0:
i += 1
n = i*(i+1)//2
if isPentagon(n) and isHexagon(n):
t = 1
print (n)
print("time:",time.time()-t1)
|
3,297 | 0d14534b210b13ede4a687e418d05d756d221950 | from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy.settings import CrawlerSettings
from scrapy import log, signals
from spiders.songspk_spider import SongsPKSpider
from scrapy.xlib.pydispatch import dispatcher
def stop_reactor():
reactor.stop()
dispatcher.connect(stop_reactor, signal=signals.spider_closed)
spider = SongsPKSpider(domain='aqaq.com')
crawler = Crawler(CrawlerSettings())
crawler.configure()
crawler.crawl(spider)
crawler.start()
log.start(loglevel=log.DEBUG)
log.msg("------------>Running reactor")
result = reactor.run()
print result
log.msg("------------>Running stoped")
|
3,298 | 8650e0f1e7f2ac42c3c78191f79810f5befc9f41 | import pygame, states, events
from settings import all as settings
import gui
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event%10 == 0:
world.sounds['uiaction'].play(0)
# world.shouldRedraw = True
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw( world ):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0,len(perfdata)):
separate_point = 15
if(i<separate_point):
if(i<5):
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) * 30, alignment="midleft", color=(239,145,242))
else:
gui.simpleText(world, perfdata[i], r.centerx-350, (i+1)*30,alignment="midleft")
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i-separate_point + 1) * 30, alignment="midleft")
world.shouldRedraw = False
def enter(world):
world.state = states.Perf
world.configCatX = 0
world.configOptX = -1
world.shouldRedraw = True
|
3,299 | f2786e445bdf66cf6bb66f4cde4c7b2bf819d8aa |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import itertools
# Save a nice dark grey as a variable
almost_black = '#262626'
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
sns.set()
get_ipython().magic('matplotlib inline')
# In[2]:
filepath = 'data/full_data_genre.csv'
header = None
header_included = True
if header_included:
header = 0
df = pd.read_csv(filepath, header = header)
df['billboard'] = df['weeks'].map(lambda x: x != 0)
df = df.drop('artist', 1)
df = df.drop('title', 1)
genres = ['country', 'dance', 'hip_hop', 'pop', 'r&b', 'rock', 'alternative']
accoustic = ['key', 'energy', 'liveness', 'tempo', 'speechiness',
'acousticness', 'instrumentalness', 'danceability',
'time_signature', 'loudness', 'duration', 'mode']
artist = ['artist_familiarity', 'artist_hottness']
var = ['artist_familiarity', 'artist_hottness', 'tempo', 'energy',
'liveness', 'danceability','speechiness', 'instrumentalness']
X = np.array(df[var])
# In[3]:
fnames = df[var].columns
features = dict(zip(fnames, range(len(fnames))))
# In[4]:
palette1 = sns.color_palette("Paired")
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
palette2 = sns.color_palette(flatui)
fs = 20 # fontsize
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15,15))
ax = list(itertools.product(range(2), range(2)))
combinations = [('artist_familiarity', 'artist_hottness'),
('tempo', 'energy'),
('liveness', 'danceability'),
('instrumentalness', 'speechiness')]
i = 0
for item in combinations:
index = ax[i]
x_vis = X[:, [features[item[0]], features[item[1]]]]
axes[index[0], index[1]].scatter(x_vis[Y==0, 0], x_vis[Y==0, 1], label="Class #0",
alpha=0.5, edgecolor=almost_black,
facecolor=palette1[4], linewidth=0.15)
axes[index[0], index[1]].scatter(x_vis[Y==1, 0], x_vis[Y==1, 1], label="Class #1",
alpha=0.1, edgecolor=almost_black,
facecolor=palette2[0], linewidth=0.15)
axes[index[0], index[1]].set_title(item[1].capitalize(),'v.s.',
item[0].capitalize(),
fontsize=fs)
i+=1
plt.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.