code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/SL_MkIII/physical_display.py
# Compiled at: 2019-04-23 14:43:03
from __future__ import absolute_import, print_function, unicode_literals
from itertools import chain, ifilter, imap
from ableton.v2.base import first
from ableton.v2.control_surface.elements import PhysicalDisplayElement as PhysicalDisplayElementBase
from .sysex import TEXT_PROPERTY_BYTE
class PhysicalDisplayElement(PhysicalDisplayElementBase):
def _translate_string(self, string):
return map(self._translate_char, ifilter(lambda c: c in self._translation_table, string))
class ConfigurablePhysicalDisplayElement(PhysicalDisplayElement):
def __init__(self, v_position=0, *a, **k):
super(ConfigurablePhysicalDisplayElement, self).__init__(*a, **k)
self._v_position = v_position
def _build_display_message(self, display):
def wrap_segment_message(segment):
return chain(segment.position_identifier(), (
TEXT_PROPERTY_BYTE, self._v_position), self._translate_string(unicode(segment).strip()), (0, ))
return chain(*imap(wrap_segment_message, display._logical_segments))
class SpecialPhysicalDisplayElement(PhysicalDisplayElement):
def _send_message(self):
if self._message_to_send is None:
self._message_to_send = self._build_message(map(first, self._central_resource.owners))
inner_message = self._message_to_send[len(self._message_header):-len(self._message_tail)]
if not self._is_whitespace(inner_message):
self.send_midi(self._message_to_send)
return
def _is_whitespace(self, message):
return all(map(lambda c: c == self.ascii_translations[' '], message))
|
[
"itertools.ifilter",
"itertools.imap"
] |
[((803, 858), 'itertools.ifilter', 'ifilter', (['(lambda c: c in self._translation_table)', 'string'], {}), '(lambda c: c in self._translation_table, string)\n', (810, 858), False, 'from itertools import chain, ifilter, imap\n'), ((1370, 1423), 'itertools.imap', 'imap', (['wrap_segment_message', 'display._logical_segments'], {}), '(wrap_segment_message, display._logical_segments)\n', (1374, 1423), False, 'from itertools import chain, ifilter, imap\n')]
|
from bs4 import BeautifulSoup
import requests
# By no means is this a complete list, but it is very easy to search for the ones you need later.
KNOWN_OPENGRAPH_TAGS = [
"og:site_name",
"og:title",
"og:locale",
"og:type",
"og:image",
"og:url",
"og:image:url",
"og:image:secure_url",
"og:image:type",
"og:image:width",
"og:image:height",
"og:image:alt",
]
def parse_page(page_url, tags_to_search = KNOWN_OPENGRAPH_TAGS):
'''
Parses a page, returns a JSON style dictionary of all OG tags found on that page.
Passing in tags_to_search is optional. By default it will search through KNOWN_OPENGRAPH_TAGS constant, but for the sake of efficiency, you may want to only search for 1 or 2 tags
Returns False if page is unreadable
'''
# read the html from the page
response = requests.get(page_url)
if response.status_code is not 200:
return False
# set up beautiful soup
soup = BeautifulSoup(response.content, 'html.parser')
# loop through the known list of opengraph tags, searching for each and appending a dictionary as we go.
found_tags = {}
for og_tag in tags_to_search:
new_found_tag = soup.find("meta", property=og_tag)
if new_found_tag is not None:
found_tags[new_found_tag["property"]] = new_found_tag["content"]
return found_tags
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((853, 875), 'requests.get', 'requests.get', (['page_url'], {}), '(page_url)\n', (865, 875), False, 'import requests\n'), ((978, 1024), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (991, 1024), False, 'from bs4 import BeautifulSoup\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
class PyBrDST():
def easter_date(self, year):
a = year % 19
b = year // 100
c = year % 100
d = b // 4
e = b % 4
f = (b + 8) // 25
g = (b - f + 1) // 3
h = (19 * a + b - d - g + 15) % 30
i = c // 4
k = c % 4
L = (32 + 2 * e + 2 * i - h - k) % 7
m = (a + 11 * h + 22 * L) // 451
month = (h + L - 7 * m + 114) // 31
day = ((h + L - 7 * m + 114) % 31) + 1
return datetime(year, month, day)
def carnival_date(self, easter_day):
return easter_day - timedelta(days=47)
def begin_dst(self, year):
diff = 6 - datetime(year, 10, 1).weekday()
return datetime(year, 10, 1) + timedelta(days=diff + 14)
def end_dst(self, year):
diff = 6 - datetime(year, 2, 1).weekday()
end_stime = datetime(year, 2, 1) + timedelta(days=diff + 14)
if self.carnival_date(self.easter_date(year)) == end_stime:
return end_stime + timedelta(days=7)
return end_stime
def get_dst(self, year):
return (self.begin_dst(year), self.end_dst(year + 1))
|
[
"datetime.timedelta",
"datetime.datetime"
] |
[((552, 578), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (560, 578), False, 'from datetime import datetime, timedelta\n'), ((649, 667), 'datetime.timedelta', 'timedelta', ([], {'days': '(47)'}), '(days=47)\n', (658, 667), False, 'from datetime import datetime, timedelta\n'), ((766, 787), 'datetime.datetime', 'datetime', (['year', '(10)', '(1)'], {}), '(year, 10, 1)\n', (774, 787), False, 'from datetime import datetime, timedelta\n'), ((790, 815), 'datetime.timedelta', 'timedelta', ([], {'days': '(diff + 14)'}), '(days=diff + 14)\n', (799, 815), False, 'from datetime import datetime, timedelta\n'), ((916, 936), 'datetime.datetime', 'datetime', (['year', '(2)', '(1)'], {}), '(year, 2, 1)\n', (924, 936), False, 'from datetime import datetime, timedelta\n'), ((939, 964), 'datetime.timedelta', 'timedelta', ([], {'days': '(diff + 14)'}), '(days=diff + 14)\n', (948, 964), False, 'from datetime import datetime, timedelta\n'), ((1064, 1081), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1073, 1081), False, 'from datetime import datetime, timedelta\n'), ((719, 740), 'datetime.datetime', 'datetime', (['year', '(10)', '(1)'], {}), '(year, 10, 1)\n', (727, 740), False, 'from datetime import datetime, timedelta\n'), ((865, 885), 'datetime.datetime', 'datetime', (['year', '(2)', '(1)'], {}), '(year, 2, 1)\n', (873, 885), False, 'from datetime import datetime, timedelta\n')]
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DbCredentialSummary(object):
"""
As the name suggests, an `DbCredentialSummary` object contains information about an `DbCredential`.
The DB credential is used for DB authentication with
the [DB Service].
"""
def __init__(self, **kwargs):
"""
Initializes a new DbCredentialSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DbCredentialSummary.
:type id: str
:param user_id:
The value to assign to the user_id property of this DbCredentialSummary.
:type user_id: str
:param description:
The value to assign to the description property of this DbCredentialSummary.
:type description: str
:param time_created:
The value to assign to the time_created property of this DbCredentialSummary.
:type time_created: datetime
:param time_expires:
The value to assign to the time_expires property of this DbCredentialSummary.
:type time_expires: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DbCredentialSummary.
:type lifecycle_state: str
"""
self.swagger_types = {
'id': 'str',
'user_id': 'str',
'description': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'lifecycle_state': 'str'
}
self.attribute_map = {
'id': 'id',
'user_id': 'userId',
'description': 'description',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'lifecycle_state': 'lifecycleState'
}
self._id = None
self._user_id = None
self._description = None
self._time_created = None
self._time_expires = None
self._lifecycle_state = None
@property
def id(self):
"""
Gets the id of this DbCredentialSummary.
The OCID of the DB credential.
:return: The id of this DbCredentialSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DbCredentialSummary.
The OCID of the DB credential.
:param id: The id of this DbCredentialSummary.
:type: str
"""
self._id = id
@property
def user_id(self):
"""
Gets the user_id of this DbCredentialSummary.
The OCID of the user the DB credential belongs to.
:return: The user_id of this DbCredentialSummary.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this DbCredentialSummary.
The OCID of the user the DB credential belongs to.
:param user_id: The user_id of this DbCredentialSummary.
:type: str
"""
self._user_id = user_id
@property
def description(self):
"""
Gets the description of this DbCredentialSummary.
The description you assign to the DB credential. Does not have to be unique, and it's changeable.
(For tenancies that support identity domains) You can have an empty description.
:return: The description of this DbCredentialSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this DbCredentialSummary.
The description you assign to the DB credential. Does not have to be unique, and it's changeable.
(For tenancies that support identity domains) You can have an empty description.
:param description: The description of this DbCredentialSummary.
:type: str
"""
self._description = description
@property
def time_created(self):
"""
Gets the time_created of this DbCredentialSummary.
Date and time the `DbCredential` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_created of this DbCredentialSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DbCredentialSummary.
Date and time the `DbCredential` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:param time_created: The time_created of this DbCredentialSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_expires(self):
"""
Gets the time_expires of this DbCredentialSummary.
Date and time when this credential will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_expires of this DbCredentialSummary.
:rtype: datetime
"""
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
"""
Sets the time_expires of this DbCredentialSummary.
Date and time when this credential will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:param time_expires: The time_expires of this DbCredentialSummary.
:type: datetime
"""
self._time_expires = time_expires
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this DbCredentialSummary.
The credential's current state. After creating a DB credential, make sure its `lifecycleState` changes from
CREATING to ACTIVE before using it.
:return: The lifecycle_state of this DbCredentialSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DbCredentialSummary.
The credential's current state. After creating a DB credential, make sure its `lifecycleState` changes from
CREATING to ACTIVE before using it.
:param lifecycle_state: The lifecycle_state of this DbCredentialSummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"oci.util.formatted_flat_dict"
] |
[((7261, 7286), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (7280, 7286), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
|
from collections import deque
from .entity import Entity
from .matcher import Matcher
from .group import Group
from .exceptions import MissingEntity
class Context(object):
"""A context is a data structure managing entities."""
def __init__(self):
#: Entities retained by this context.
self._entities = set()
#: An object pool to recycle entities.
self._reusable_entities = deque()
#: Entities counter.
self._entity_index = 0
#: Dictionary of matchers mapping groups.
self._groups = {}
self._entity_indices = {}
self.entity_class = Entity
@property
def entities(self):
return self._entities
def has_entity(self, entity):
"""Checks if the context contains this entity.
:param entity: Entity
:rtype: bool
"""
return entity in self._entities
def create_entity(self):
"""Creates an entity. Pop one entity from the pool if it is not
empty, otherwise creates a new one. Increments the entity index.
Then adds the entity to the list.
"""
entity = (self._reusable_entities.pop() if self._reusable_entities
else self._create_entity())
entity.activate(self._entity_index)
self._entity_index += 1
self._entities.add(entity)
entity.on_component_added += self._comp_added_or_removed
entity.on_component_removed += self._comp_added_or_removed
entity.on_component_replaced += self._comp_replaced
return entity
def init_entity(self, entity):
"""Creates an entity. Pop one entity from the pool if it is not
empty, otherwise creates a new one. Increments the entity index.
Then adds the entity to the list.
"""
entity.activate(self._entity_index)
self._entity_index += 1
self._entities.add(entity)
entity.on_component_added += self._comp_added_or_removed
entity.on_component_removed += self._comp_added_or_removed
entity.on_component_replaced += self._comp_replaced
return entity
def destroy_entity(self, entity):
"""Removes an entity from the list and add it to the pool. If
the context does not contain this entity, a
:class:`MissingEntity` exception is raised.
:param entity: Entity
"""
if not self.has_entity(entity):
raise MissingEntity()
entity.destroy()
self._entities.remove(entity)
self._reusable_entities.append(entity)
def get_group(self, matcher):
"""User can ask for a group of entities from the context. The
group is identified through a :class:`Matcher`.
:param entity: Matcher
"""
if matcher in self._groups:
return self._groups[matcher]
group = Group(matcher)
for entity in self._entities:
group.handle_entity_silently(entity)
self._groups[matcher] = group
return group
def set_entity_class(self, entity_class):
self.entity_class = entity_class
def _create_entity(self):
return self.entity_class()
def set_unique_component(self, comp_type, *args):
entity = self.create_entity()
new_comp = comp_type.new(...)
exec('self.{0}Entity = entity'.format(comp_type._name), globals(), locals())
exec('self.{0} = new_comp'.format(comp_type._name), globals(), locals())
comp = entity.add_with_component(comp_type, new_comp)
return comp, entity
def has_unique_component(self, comp_type):
name = comp_type._name
return exec('self.{0}Entity is not None'.format(name), globals(), locals())
def remove_unique_component(self, name):
oldEntity = exec('self.{0}Entity'.format(name), globals(), locals())
exec('self.{0} = None'.format(name), globals(), locals())
exec('self.{0}Entity = None'.format(name), globals(), locals())
self.destroy_entity(oldEntity)
def get_unique_component(self, comp_type):
group = self.get_group(Matcher(comp_type))
return group.single_entity.get(comp_type)
def add_entity_index(self, entity_index):
self._entity_indices[entity_index.type] = entity_index
def get_entity_index(self, comp_type):
return self._entity_indices[comp_type]
def _comp_added_or_removed(self, entity, comp):
for matcher in self._groups:
self._groups[matcher].handle_entity(entity, comp)
def _comp_replaced(self, entity, previous_comp, new_comp):
for matcher in self._groups:
group = self._groups[matcher]
group.update_entity(entity, previous_comp, new_comp)
def __repr__(self):
return '<Context ({}/{})>'.format(
len(self._entities), len(self._reusable_entities))
|
[
"collections.deque"
] |
[((419, 426), 'collections.deque', 'deque', ([], {}), '()\n', (424, 426), False, 'from collections import deque\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import os
import pickle
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow.compat.v1 as tf
from tf_agents.environments import suite_mujoco
from tf_agents.specs import array_spec
flags.DEFINE_integer('checkpoint_iterations', 50, 'Periodicity of checkpoints.')
flags.DEFINE_integer('eval_iterations', 50, 'Periodicity of evaluations.')
flags.DEFINE_integer('num_evals', 10, 'Number of evaluations.')
FLAGS = flags.FLAGS
_CHECKPOINT_FILENAME = 'model.ckpt'
def get_state_and_action_specs(gym_env, action_bounds=None):
"""Returns state and action specs for a Gym environment.
Args:
gym_env: gym.core.Env. A Gym environment.
action_bounds: list of strings. Min and max values in string for action
variables.
Returns:
(BoundedArraySpec, BoundedArraySpec). The first is a state spec and the
second is a action spec.
"""
if isinstance(gym_env.observation_space, gym.spaces.Box):
state_spec = array_spec.BoundedArraySpec(
shape=gym_env.observation_space.shape,
dtype=gym_env.observation_space.dtype,
minimum=gym_env.observation_space.low,
maximum=gym_env.observation_space.high)
else:
raise NotImplementedError(type(gym_env.observation_space))
if action_bounds:
assert len(action_bounds) == 2
action_min = np.tile(float(action_bounds[0]), gym_env.action_space.shape)
action_max = np.tile(float(action_bounds[1]), gym_env.action_space.shape)
else:
action_min = gym_env.action_space.low
action_max = gym_env.action_space.high
if isinstance(gym_env.action_space, gym.spaces.Box):
action_spec = array_spec.BoundedArraySpec(
shape=gym_env.action_space.shape,
dtype=gym_env.action_space.dtype,
minimum=action_min,
maximum=action_max)
else:
raise NotImplementedError(type(gym_env.action_space))
return state_spec, action_spec
def create_env(env_name):
"""Creates Environment."""
if env_name == 'Pendulum':
env = gym.make('Pendulum-v0')
elif env_name == 'Hopper':
env = suite_mujoco.load('Hopper-v2')
elif env_name == 'Walker2D':
env = suite_mujoco.load('Walker2d-v2')
elif env_name == 'HalfCheetah':
env = suite_mujoco.load('HalfCheetah-v2')
elif env_name == 'Ant':
env = suite_mujoco.load('Ant-v2')
elif env_name == 'Humanoid':
env = suite_mujoco.load('Humanoid-v2')
else:
raise ValueError('Unsupported environment: %s' % env_name)
return env
def _env_reset(env):
if hasattr(env, 'time_step_spec'):
return env.reset().observation
else:
return env.reset()
def _env_step(env, action):
if hasattr(env, 'time_step_spec'):
ts = env.step(action)
return ts.observation, ts.reward, env.done, env.get_info()
else:
return env.step(action)
def warm_up_replay_memory(session, behavior_policy, time_out, discount_factor,
replay_memory):
# The number of events in an epsidoe could be less than the maximum episode
# length (i.e., time_out) when the environment has a termination state.
min_replay_memory_size = FLAGS.batch_size * FLAGS.train_steps_per_iteration
while replay_memory.size < min_replay_memory_size:
num_events = min_replay_memory_size - replay_memory.size
num_episodes = int(num_events / time_out) + 1
collect_experience_parallel(num_episodes, session, behavior_policy,
time_out, discount_factor, replay_memory)
def collect_experience_parallel(num_episodes,
session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Executes threads for data collection."""
old_size = replay_memory.size
if num_episodes > 1:
with futures.ThreadPoolExecutor(
max_workers=FLAGS.collect_experience_parallelism) as executor:
for _ in range(num_episodes):
executor.submit(collect_experience, session, behavior_policy, time_out,
discount_factor, replay_memory, collect_init_state_step)
else:
collect_experience(session, behavior_policy, time_out, discount_factor,
replay_memory, collect_init_state_step)
return replay_memory.size - old_size
def collect_experience(session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Adds experiences into replay memory.
Generates an episode, computes Q targets for state and action pairs in the
episode, and adds them into the replay memory.
"""
with session.as_default():
with session.graph.as_default():
env = create_env(FLAGS.env_name)
episode, _, _ = _collect_episode(env, time_out, discount_factor,
behavior_policy, collect_init_state_step)
replay_memory.extend(episode)
if hasattr(env, 'close'):
env.close()
def _collect_episode(env, time_out, discount_factor, behavior_policy,
collect_init_state_step=False):
"""Collects episodes of trajectories by following a behavior policy."""
episode = []
episode_lengths = []
episode_rewards = []
state = _env_reset(env)
init_state = _env_reset(env)
done = False
episode_step_count = 0
e_reward = 0
for _ in range(time_out):
# First, sample an action
action = behavior_policy.action(state, use_action_function=True)
if action is None:
break
next_state, reward, done, info = _env_step(env, action)
reward = reward if not done else 0.0
# Save the experience to our buffer
if collect_init_state_step:
episode.append([
init_state, state, action, reward, next_state, episode_step_count,
done, info
])
else:
episode.append([state, action, reward, next_state, done, info])
# update state, e_reward and step count
state = next_state
if discount_factor < 1:
e_reward += (discount_factor**episode_step_count) * reward
else:
e_reward += reward
episode_step_count += 1
if done:
break
if episode_step_count > 0:
episode_lengths.append(episode_step_count)
episode_rewards.append(e_reward)
return (episode, episode_lengths, episode_rewards)
def periodic_updates(iteration,
train_step,
replay_memories,
greedy_policy,
saver,
sess,
time_out,
use_action_function=True,
tf_summary=None):
"""Evaluates the algorithm."""
if (FLAGS.checkpoint_dir and FLAGS.checkpoint_iterations and
iteration % FLAGS.checkpoint_iterations == 0):
logging.info('Iteration: %d, writing checkpoints..', iteration)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)
saver.save(
sess, checkpoint_file, global_step=train_step, write_meta_graph=False)
for replay_memory in replay_memories:
replay_memory.save(FLAGS.checkpoint_dir, delete_old=True)
logging.info('Iteration: %d, completed writing checkpoints.', iteration)
if FLAGS.eval_iterations and iteration % FLAGS.eval_iterations == 0:
logging.info('Iteration: %d, evaluating the model..', iteration)
scores = []
action_magnitudes = []
episode_lens = []
future_list = []
with futures.ThreadPoolExecutor(max_workers=FLAGS.num_evals) as executor:
for _ in range(FLAGS.num_evals):
future_list.append(
executor.submit(
_evaluate_model,
time_out,
greedy_policy,
use_action_function=use_action_function,
render=False))
for future in futures.as_completed(future_list):
score, action_magnitude, episode_len = future.result()
scores.append(score)
action_magnitudes.append(action_magnitude)
episode_lens.append(episode_len)
avg_score = np.mean(scores)
avg_action_magitude = np.mean(action_magnitudes)
avg_episode_len = np.mean(episode_lens)
logging.info(
'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, '
'avg_action_magnitude: %.3f', iteration, avg_score, avg_episode_len,
avg_action_magitude)
if tf_summary:
tf_summary.value.extend([
tf.Summary.Value(tag='avg_score', simple_value=avg_score),
tf.Summary.Value(
tag='avg_action_magnitude', simple_value=avg_action_magitude),
tf.Summary.Value(tag='avg_episode_len', simple_value=avg_episode_len)
])
def _evaluate_model(time_out,
greedy_policy,
use_action_function=False,
render=False):
"""Evaluates the model."""
env = create_env(FLAGS.env_name)
state = _env_reset(env)
total_reward = 0.0
total_action_magnitude = 0.0
episode_len = 0
for _ in range(time_out):
if render:
env.render()
action = greedy_policy.action(
np.reshape(state, [1, -1]), use_action_function)
if action is None:
break
next_state, reward, done, _ = _env_step(env, action)
state = next_state
total_reward += reward
if greedy_policy.continuous_action:
total_action_magnitude += np.linalg.norm(action, np.inf)
episode_len += 1
if done:
break
return total_reward, total_action_magnitude / episode_len, episode_len
def save_hparam_config(dict_to_save, config_dir):
"""Saves config file of hparam."""
filename = os.path.join(config_dir, 'hparam.pickle')
print('Saving results to %s' % filename)
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
with tf.gfile.GFile(filename, 'w') as f:
pickle.dump(dict_to_save, f, protocol=2)
def action_projection(action, action_spec, softmax=False):
"""Projects action tensor onto a bound."""
if isinstance(action, np.ndarray):
if softmax:
e_x = np.exp(action - np.max(action, axis=1))
return e_x / np.sum(e_x, axis=1)
else:
return np.minimum(action_spec.maximum,
np.maximum(action_spec.minimum, action))
else:
# TF version
if softmax:
return tf.nn.softmax(action, axis=1)
else:
return tf.minimum(action_spec.maximum,
tf.maximum(action_spec.minimum, action))
def create_placeholders_for_q_net(tf_vars):
"""Creates placeholders for feeding values to TF variables.
Args:
tf_vars: list. A list of TF variables. These are variables for a neural
network approximating a Q function.
Returns:
dict. A dictionary mapping a string to a tf.placeholder.
"""
ph_dict = {}
for var in tf_vars:
ph_dict['{}_ph'.format(var.name)] = tf.placeholder(
dtype=var.dtype, shape=var.shape)
return ph_dict
def build_dummy_q_net(state, action, ph_dict, q_net_vars):
"""Builds a dummy Q network.
This function builds a neural network where parameters are given by
placeholders.
Args:
state: TF Tensor. State tensor.
action: TF Tensor. Action tensor.
ph_dict: dict. A dictionary mapping a TF variable's name to a
tf.placeholder. There is one placeholder for each variable in
`q_net_vars`.
q_net_vars: list. A list of TF variables. The list should have even number
of variables. One for weights and other for bias for each layer of a
neural network.
Returns:
TF Tensor. Output tensor of a Q network.
"""
assert bool(q_net_vars) and len(q_net_vars) % 2 == 0
net = tf.concat([state, action], axis=1)
# Specific for MLP
for itr, var in enumerate(q_net_vars):
if itr % 2 == 0:
# even itr, multiplicative weights
net = tf.einsum('ij,jk->ik', net, ph_dict['{}_ph'.format(var.name)])
else:
# odd itr, additive weights
net = tf.nn.bias_add(net, ph_dict['{}_ph'.format(var.name)])
# Output layer doesn't have an activation function.
if itr < len(q_net_vars) - 1:
net = tf.nn.relu(net)
return net
def make_tf_summary_histogram(values, num_bins=10):
"""Constructs a tf Summary of type histogram from a np array of values.
Args:
values: list or np.array.
num_bins: int. Number of histogram bins.
Returns:
tf.HistogramProto.
"""
values = np.reshape(values, [-1])
counts, limits = np.histogram(values, bins=num_bins)
return tf.HistogramProto(
min=np.amin(values),
max=np.amax(values),
num=values.size,
sum=np.sum(values),
sum_squares=np.sum(values**2),
bucket_limit=limits.tolist()[1:],
bucket=counts.tolist())
|
[
"tf_agents.specs.array_spec.BoundedArraySpec",
"pickle.dump",
"numpy.sum",
"numpy.amin",
"numpy.maximum",
"absl.logging.info",
"tensorflow.compat.v1.Summary.Value",
"numpy.histogram",
"numpy.mean",
"tensorflow.compat.v1.gfile.Exists",
"numpy.linalg.norm",
"os.path.join",
"tensorflow.compat.v1.placeholder",
"tf_agents.environments.suite_mujoco.load",
"numpy.max",
"absl.flags.DEFINE_integer",
"numpy.reshape",
"concurrent.futures.ThreadPoolExecutor",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.gfile.GFile",
"concurrent.futures.as_completed",
"tensorflow.compat.v1.nn.softmax",
"gym.make",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.concat",
"numpy.amax",
"tensorflow.compat.v1.maximum"
] |
[((1009, 1094), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""checkpoint_iterations"""', '(50)', '"""Periodicity of checkpoints."""'], {}), "('checkpoint_iterations', 50, 'Periodicity of checkpoints.'\n )\n", (1029, 1094), False, 'from absl import flags\n'), ((1090, 1164), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_iterations"""', '(50)', '"""Periodicity of evaluations."""'], {}), "('eval_iterations', 50, 'Periodicity of evaluations.')\n", (1110, 1164), False, 'from absl import flags\n'), ((1165, 1228), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_evals"""', '(10)', '"""Number of evaluations."""'], {}), "('num_evals', 10, 'Number of evaluations.')\n", (1185, 1228), False, 'from absl import flags\n'), ((10639, 10680), 'os.path.join', 'os.path.join', (['config_dir', '"""hparam.pickle"""'], {}), "(config_dir, 'hparam.pickle')\n", (10651, 10680), False, 'import os\n'), ((12642, 12676), 'tensorflow.compat.v1.concat', 'tf.concat', (['[state, action]'], {'axis': '(1)'}), '([state, action], axis=1)\n', (12651, 12676), True, 'import tensorflow.compat.v1 as tf\n'), ((13389, 13413), 'numpy.reshape', 'np.reshape', (['values', '[-1]'], {}), '(values, [-1])\n', (13399, 13413), True, 'import numpy as np\n'), ((13433, 13468), 'numpy.histogram', 'np.histogram', (['values'], {'bins': 'num_bins'}), '(values, bins=num_bins)\n', (13445, 13468), True, 'import numpy as np\n'), ((1757, 1950), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', ([], {'shape': 'gym_env.observation_space.shape', 'dtype': 'gym_env.observation_space.dtype', 'minimum': 'gym_env.observation_space.low', 'maximum': 'gym_env.observation_space.high'}), '(shape=gym_env.observation_space.shape, dtype=\n gym_env.observation_space.dtype, minimum=gym_env.observation_space.low,\n maximum=gym_env.observation_space.high)\n', (1784, 1950), False, 'from tf_agents.specs import array_spec\n'), ((2425, 2565), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', ([], {'shape': 'gym_env.action_space.shape', 'dtype': 'gym_env.action_space.dtype', 'minimum': 'action_min', 'maximum': 'action_max'}), '(shape=gym_env.action_space.shape, dtype=gym_env\n .action_space.dtype, minimum=action_min, maximum=action_max)\n', (2452, 2565), False, 'from tf_agents.specs import array_spec\n'), ((2789, 2812), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (2797, 2812), False, 'import gym\n'), ((7736, 7799), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, writing checkpoints.."""', 'iteration'], {}), "('Iteration: %d, writing checkpoints..', iteration)\n", (7748, 7799), False, 'from absl import logging\n'), ((7918, 7974), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '_CHECKPOINT_FILENAME'], {}), '(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)\n', (7930, 7974), False, 'import os\n'), ((8181, 8253), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, completed writing checkpoints."""', 'iteration'], {}), "('Iteration: %d, completed writing checkpoints.', iteration)\n", (8193, 8253), False, 'from absl import logging\n'), ((8330, 8394), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, evaluating the model.."""', 'iteration'], {}), "('Iteration: %d, evaluating the model..', iteration)\n", (8342, 8394), False, 'from absl import logging\n'), ((9090, 9105), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (9097, 9105), True, 'import numpy as np\n'), ((9132, 9158), 'numpy.mean', 'np.mean', (['action_magnitudes'], {}), '(action_magnitudes)\n', (9139, 9158), True, 'import numpy as np\n'), ((9181, 9202), 'numpy.mean', 'np.mean', (['episode_lens'], {}), '(episode_lens)\n', (9188, 9202), True, 'import numpy as np\n'), ((9207, 9374), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, avg_action_magnitude: %.3f"""', 'iteration', 'avg_score', 'avg_episode_len', 'avg_action_magitude'], {}), "(\n 'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, avg_action_magnitude: %.3f'\n , iteration, avg_score, avg_episode_len, avg_action_magitude)\n", (9219, 9374), False, 'from absl import logging\n'), ((10733, 10760), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['config_dir'], {}), '(config_dir)\n', (10748, 10760), True, 'import tensorflow.compat.v1 as tf\n'), ((10766, 10795), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['config_dir'], {}), '(config_dir)\n', (10783, 10795), True, 'import tensorflow.compat.v1 as tf\n'), ((10803, 10832), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (10817, 10832), True, 'import tensorflow.compat.v1 as tf\n'), ((10843, 10883), 'pickle.dump', 'pickle.dump', (['dict_to_save', 'f'], {'protocol': '(2)'}), '(dict_to_save, f, protocol=2)\n', (10854, 10883), False, 'import pickle\n'), ((11849, 11897), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'var.dtype', 'shape': 'var.shape'}), '(dtype=var.dtype, shape=var.shape)\n', (11863, 11897), True, 'import tensorflow.compat.v1 as tf\n'), ((2852, 2882), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Hopper-v2"""'], {}), "('Hopper-v2')\n", (2869, 2882), False, 'from tf_agents.environments import suite_mujoco\n'), ((4690, 4766), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'FLAGS.collect_experience_parallelism'}), '(max_workers=FLAGS.collect_experience_parallelism)\n', (4716, 4766), False, 'from concurrent import futures\n'), ((7811, 7848), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (7826, 7848), True, 'import tensorflow.compat.v1 as tf\n'), ((7856, 7895), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (7873, 7895), True, 'import tensorflow.compat.v1 as tf\n'), ((8490, 8545), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'FLAGS.num_evals'}), '(max_workers=FLAGS.num_evals)\n', (8516, 8545), False, 'from concurrent import futures\n'), ((8854, 8887), 'concurrent.futures.as_completed', 'futures.as_completed', (['future_list'], {}), '(future_list)\n', (8874, 8887), False, 'from concurrent import futures\n'), ((10122, 10148), 'numpy.reshape', 'np.reshape', (['state', '[1, -1]'], {}), '(state, [1, -1])\n', (10132, 10148), True, 'import numpy as np\n'), ((10386, 10416), 'numpy.linalg.norm', 'np.linalg.norm', (['action', 'np.inf'], {}), '(action, np.inf)\n', (10400, 10416), True, 'import numpy as np\n'), ((11308, 11337), 'tensorflow.compat.v1.nn.softmax', 'tf.nn.softmax', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (11321, 11337), True, 'import tensorflow.compat.v1 as tf\n'), ((13507, 13522), 'numpy.amin', 'np.amin', (['values'], {}), '(values)\n', (13514, 13522), True, 'import numpy as np\n'), ((13534, 13549), 'numpy.amax', 'np.amax', (['values'], {}), '(values)\n', (13541, 13549), True, 'import numpy as np\n'), ((13584, 13598), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (13590, 13598), True, 'import numpy as np\n'), ((13618, 13637), 'numpy.sum', 'np.sum', (['(values ** 2)'], {}), '(values ** 2)\n', (13624, 13637), True, 'import numpy as np\n'), ((2924, 2956), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Walker2d-v2"""'], {}), "('Walker2d-v2')\n", (2941, 2956), False, 'from tf_agents.environments import suite_mujoco\n'), ((11114, 11133), 'numpy.sum', 'np.sum', (['e_x'], {'axis': '(1)'}), '(e_x, axis=1)\n', (11120, 11133), True, 'import numpy as np\n'), ((11213, 11252), 'numpy.maximum', 'np.maximum', (['action_spec.minimum', 'action'], {}), '(action_spec.minimum, action)\n', (11223, 11252), True, 'import numpy as np\n'), ((11417, 11456), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['action_spec.minimum', 'action'], {}), '(action_spec.minimum, action)\n', (11427, 11456), True, 'import tensorflow.compat.v1 as tf\n'), ((13096, 13111), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['net'], {}), '(net)\n', (13106, 13111), True, 'import tensorflow.compat.v1 as tf\n'), ((3001, 3036), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""HalfCheetah-v2"""'], {}), "('HalfCheetah-v2')\n", (3018, 3036), False, 'from tf_agents.environments import suite_mujoco\n'), ((9455, 9512), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_score"""', 'simple_value': 'avg_score'}), "(tag='avg_score', simple_value=avg_score)\n", (9471, 9512), True, 'import tensorflow.compat.v1 as tf\n'), ((9524, 9602), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_action_magnitude"""', 'simple_value': 'avg_action_magitude'}), "(tag='avg_action_magnitude', simple_value=avg_action_magitude)\n", (9540, 9602), True, 'import tensorflow.compat.v1 as tf\n'), ((9629, 9698), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_episode_len"""', 'simple_value': 'avg_episode_len'}), "(tag='avg_episode_len', simple_value=avg_episode_len)\n", (9645, 9698), True, 'import tensorflow.compat.v1 as tf\n'), ((11071, 11093), 'numpy.max', 'np.max', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (11077, 11093), True, 'import numpy as np\n'), ((3073, 3100), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Ant-v2"""'], {}), "('Ant-v2')\n", (3090, 3100), False, 'from tf_agents.environments import suite_mujoco\n'), ((3142, 3174), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Humanoid-v2"""'], {}), "('Humanoid-v2')\n", (3159, 3174), False, 'from tf_agents.environments import suite_mujoco\n')]
|
import datetime
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from clinic.models import Doctor, Speciality
from timetables.models import Timetable
class TimetableTestCase(APITestCase):
def test_return_empty_list(self):
response = self._get_response()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_return_not_empty_list(self):
spec = Speciality.objects.create(title='Test')
doc = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Test')
Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response()
self.assertEqual(len(response.data), Timetable.objects.count())
def test_return_correcct_list(self):
spec = Speciality.objects.create(title='Test')
doc = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Test')
tt = Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response()
tf = '%H:%M:%S'
self.assertEqual(response.data[0]['doctor'], doc.id)
self.assertEqual(response.data[0]['day_of_week'], tt.day_of_week)
self.assertEqual(response.data[0]['start_time'], tt.start_time.strftime(tf))
self.assertEqual(response.data[0]['end_time'], tt.end_time.strftime(tf))
self.assertEqual(response.data[0]['break_start_time'], tt.break_start_time.strftime(tf))
self.assertEqual(response.data[0]['break_end_time'], tt.break_end_time.strftime(tf))
def test_filter_by_doctor(self):
spec = Speciality.objects.create(title='Test')
doc1 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='One')
doc2 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Two')
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc2,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response(doctor=doc1.id)
self.assertEqual(len(response.data), doc1.timetable_set.count())
def test_filter_by_day_of_week(self):
spec = Speciality.objects.create(title='Test')
doc1 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='One')
doc2 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Two')
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc2,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response(day_of_week=Timetable.DW_MON)
self.assertEqual(len(response.data), Timetable.objects.filter(day_of_week=Timetable.DW_MON).count())
def _get_response(self, **kwargs):
url = reverse('api:timetable-list')
return self.client.get(url, data=kwargs, format='json')
|
[
"timetables.models.Timetable.objects.filter",
"timetables.models.Timetable.objects.count",
"clinic.models.Doctor.objects.create",
"django.urls.reverse",
"clinic.models.Speciality.objects.create",
"datetime.time"
] |
[((501, 540), 'clinic.models.Speciality.objects.create', 'Speciality.objects.create', ([], {'title': '"""Test"""'}), "(title='Test')\n", (526, 540), False, 'from clinic.models import Doctor, Speciality\n'), ((555, 632), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""Test"""'}), "(speciality=spec, first_name='Doctor', last_name='Test')\n", (576, 632), False, 'from clinic.models import Doctor, Speciality\n'), ((1579, 1618), 'clinic.models.Speciality.objects.create', 'Speciality.objects.create', ([], {'title': '"""Test"""'}), "(title='Test')\n", (1604, 1618), False, 'from clinic.models import Doctor, Speciality\n'), ((1633, 1710), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""Test"""'}), "(speciality=spec, first_name='Doctor', last_name='Test')\n", (1654, 1710), False, 'from clinic.models import Doctor, Speciality\n'), ((2740, 2779), 'clinic.models.Speciality.objects.create', 'Speciality.objects.create', ([], {'title': '"""Test"""'}), "(title='Test')\n", (2765, 2779), False, 'from clinic.models import Doctor, Speciality\n'), ((2795, 2871), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""One"""'}), "(speciality=spec, first_name='Doctor', last_name='One')\n", (2816, 2871), False, 'from clinic.models import Doctor, Speciality\n'), ((2887, 2963), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""Two"""'}), "(speciality=spec, first_name='Doctor', last_name='Two')\n", (2908, 2963), False, 'from clinic.models import Doctor, Speciality\n'), ((4316, 4355), 'clinic.models.Speciality.objects.create', 'Speciality.objects.create', ([], {'title': '"""Test"""'}), "(title='Test')\n", (4341, 4355), False, 'from clinic.models import Doctor, Speciality\n'), ((4371, 4447), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""One"""'}), "(speciality=spec, first_name='Doctor', last_name='One')\n", (4392, 4447), False, 'from clinic.models import Doctor, Speciality\n'), ((4463, 4539), 'clinic.models.Doctor.objects.create', 'Doctor.objects.create', ([], {'speciality': 'spec', 'first_name': '"""Doctor"""', 'last_name': '"""Two"""'}), "(speciality=spec, first_name='Doctor', last_name='Two')\n", (4484, 4539), False, 'from clinic.models import Doctor, Speciality\n'), ((5938, 5967), 'django.urls.reverse', 'reverse', (['"""api:timetable-list"""'], {}), "('api:timetable-list')\n", (5945, 5967), False, 'from django.urls import reverse\n'), ((1495, 1520), 'timetables.models.Timetable.objects.count', 'Timetable.objects.count', ([], {}), '()\n', (1518, 1520), False, 'from timetables.models import Timetable\n'), ((786, 807), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (799, 807), False, 'import datetime\n'), ((851, 873), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (864, 873), False, 'import datetime\n'), ((925, 947), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (938, 947), False, 'import datetime\n'), ((997, 1019), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (1010, 1019), False, 'import datetime\n'), ((1173, 1194), 'datetime.time', 'datetime.time', ([], {'hour': '(9)'}), '(hour=9)\n', (1186, 1194), False, 'import datetime\n'), ((1238, 1260), 'datetime.time', 'datetime.time', ([], {'hour': '(18)'}), '(hour=18)\n', (1251, 1260), False, 'import datetime\n'), ((1312, 1334), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (1325, 1334), False, 'import datetime\n'), ((1384, 1406), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (1397, 1406), False, 'import datetime\n'), ((1879, 1900), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (1892, 1900), False, 'import datetime\n'), ((1949, 1971), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (1962, 1971), False, 'import datetime\n'), ((2028, 2050), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (2041, 2050), False, 'import datetime\n'), ((2105, 2127), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (2118, 2127), False, 'import datetime\n'), ((3118, 3139), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (3131, 3139), False, 'import datetime\n'), ((3183, 3205), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (3196, 3205), False, 'import datetime\n'), ((3257, 3279), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (3270, 3279), False, 'import datetime\n'), ((3329, 3351), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (3342, 3351), False, 'import datetime\n'), ((3506, 3527), 'datetime.time', 'datetime.time', ([], {'hour': '(9)'}), '(hour=9)\n', (3519, 3527), False, 'import datetime\n'), ((3571, 3593), 'datetime.time', 'datetime.time', ([], {'hour': '(18)'}), '(hour=18)\n', (3584, 3593), False, 'import datetime\n'), ((3645, 3667), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (3658, 3667), False, 'import datetime\n'), ((3717, 3739), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (3730, 3739), False, 'import datetime\n'), ((3894, 3915), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (3907, 3915), False, 'import datetime\n'), ((3959, 3981), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (3972, 3981), False, 'import datetime\n'), ((4033, 4055), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (4046, 4055), False, 'import datetime\n'), ((4105, 4127), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (4118, 4127), False, 'import datetime\n'), ((4694, 4715), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (4707, 4715), False, 'import datetime\n'), ((4759, 4781), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (4772, 4781), False, 'import datetime\n'), ((4833, 4855), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (4846, 4855), False, 'import datetime\n'), ((4905, 4927), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (4918, 4927), False, 'import datetime\n'), ((5082, 5103), 'datetime.time', 'datetime.time', ([], {'hour': '(9)'}), '(hour=9)\n', (5095, 5103), False, 'import datetime\n'), ((5147, 5169), 'datetime.time', 'datetime.time', ([], {'hour': '(18)'}), '(hour=18)\n', (5160, 5169), False, 'import datetime\n'), ((5221, 5243), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (5234, 5243), False, 'import datetime\n'), ((5293, 5315), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (5306, 5315), False, 'import datetime\n'), ((5470, 5491), 'datetime.time', 'datetime.time', ([], {'hour': '(8)'}), '(hour=8)\n', (5483, 5491), False, 'import datetime\n'), ((5535, 5557), 'datetime.time', 'datetime.time', ([], {'hour': '(17)'}), '(hour=17)\n', (5548, 5557), False, 'import datetime\n'), ((5609, 5631), 'datetime.time', 'datetime.time', ([], {'hour': '(12)'}), '(hour=12)\n', (5622, 5631), False, 'import datetime\n'), ((5681, 5703), 'datetime.time', 'datetime.time', ([], {'hour': '(13)'}), '(hour=13)\n', (5694, 5703), False, 'import datetime\n'), ((5820, 5874), 'timetables.models.Timetable.objects.filter', 'Timetable.objects.filter', ([], {'day_of_week': 'Timetable.DW_MON'}), '(day_of_week=Timetable.DW_MON)\n', (5844, 5874), False, 'from timetables.models import Timetable\n')]
|
# Generated by Django 3.2.5 on 2021-07-23 19:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import localflavor.br.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('arbitragem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Competicao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=200, verbose_name='Nome da Competição')),
('data_inicio', models.DateField(verbose_name='Início da Competição')),
('data_final', models.DateField(verbose_name='Final da Competição')),
('esta_ativa', models.BooleanField(default=True, verbose_name='Está Ativa?')),
('slug_competicao', models.SlugField(blank=True, unique=True)),
],
options={
'verbose_name': 'Competição',
'verbose_name_plural': 'Competições',
},
),
migrations.CreateModel(
name='Estadio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_estadio', models.CharField(max_length=200, verbose_name='Nome do Estádio')),
('endereco_estadio', models.CharField(blank=True, max_length=200, verbose_name='Endereco do Estadio')),
('cidade_estadio', models.CharField(blank=True, max_length=200, verbose_name='Cidade')),
('estado_estadio', localflavor.br.models.BRStateField(blank=True, max_length=2, verbose_name='Estado')),
('cep_estadio', localflavor.br.models.BRPostalCodeField(blank=True, max_length=9, verbose_name='CEP')),
],
options={
'verbose_name': 'Estádio',
'verbose_name_plural': 'Estádios',
},
),
migrations.CreateModel(
name='Time',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=200, unique=True, verbose_name='Nome do Time')),
('esta_ativo', models.BooleanField(default=True, verbose_name='Está Ativo?')),
('slug_time', models.SlugField(blank=True, unique=True)),
],
options={
'verbose_name': 'Time',
'verbose_name_plural': 'Times',
},
),
migrations.CreateModel(
name='Partida',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_hora', models.DateTimeField(verbose_name='Horário da partida')),
('placar_mandante', models.IntegerField(default=0, verbose_name='Placar do Time Mandante')),
('placar_visitante', models.IntegerField(default=0, verbose_name='Placar do Time Visitante')),
('link_video', models.URLField(blank=True, verbose_name='Vídeo da Partida')),
('teve_periodo_extra', models.BooleanField(default=False, verbose_name='Houve Período Extra?')),
('competicao', models.ForeignKey(limit_choices_to={'esta_ativa': True}, on_delete=django.db.models.deletion.PROTECT, to='jogos.competicao', verbose_name='Competição')),
('escala_arbitragem', models.ManyToManyField(through='arbitragem.Escala', to=settings.AUTH_USER_MODEL, verbose_name='Escala de Arbitragem')),
('estadio', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='jogos.estadio', verbose_name='Estádio')),
('mandante', models.ForeignKey(limit_choices_to={'esta_ativo': True}, on_delete=django.db.models.deletion.PROTECT, related_name='mandante', to='jogos.time', verbose_name='Time Mandante')),
('visitante', models.ForeignKey(limit_choices_to={'esta_ativo': True}, on_delete=django.db.models.deletion.PROTECT, related_name='visitante', to='jogos.time', verbose_name='Time Visitante')),
],
options={
'verbose_name': 'Partida',
'verbose_name_plural': 'Partidas',
},
),
]
|
[
"django.db.models.URLField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.SlugField",
"django.db.models.IntegerField",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((276, 333), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (307, 333), False, 'from django.db import migrations, models\n'), ((508, 604), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (527, 604), False, 'from django.db import migrations, models\n'), ((628, 695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Nome da Competição"""'}), "(max_length=200, verbose_name='Nome da Competição')\n", (644, 695), False, 'from django.db import migrations, models\n'), ((730, 783), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Início da Competição"""'}), "(verbose_name='Início da Competição')\n", (746, 783), False, 'from django.db import migrations, models\n'), ((817, 869), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Final da Competição"""'}), "(verbose_name='Final da Competição')\n", (833, 869), False, 'from django.db import migrations, models\n'), ((903, 964), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Está Ativa?"""'}), "(default=True, verbose_name='Está Ativa?')\n", (922, 964), False, 'from django.db import migrations, models\n'), ((1003, 1044), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'unique': '(True)'}), '(blank=True, unique=True)\n', (1019, 1044), False, 'from django.db import migrations, models\n'), ((1314, 1410), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1333, 1410), False, 'from django.db import migrations, models\n'), ((1442, 1506), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Nome do Estádio"""'}), "(max_length=200, verbose_name='Nome do Estádio')\n", (1458, 1506), False, 'from django.db import migrations, models\n'), ((1546, 1631), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Endereco do Estadio"""'}), "(blank=True, max_length=200, verbose_name='Endereco do Estadio'\n )\n", (1562, 1631), False, 'from django.db import migrations, models\n'), ((1664, 1731), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'verbose_name': '"""Cidade"""'}), "(blank=True, max_length=200, verbose_name='Cidade')\n", (1680, 1731), False, 'from django.db import migrations, models\n'), ((2233, 2329), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2252, 2329), False, 'from django.db import migrations, models\n'), ((2353, 2427), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'unique': '(True)', 'verbose_name': '"""Nome do Time"""'}), "(max_length=200, unique=True, verbose_name='Nome do Time')\n", (2369, 2427), False, 'from django.db import migrations, models\n'), ((2461, 2522), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Está Ativo?"""'}), "(default=True, verbose_name='Está Ativo?')\n", (2480, 2522), False, 'from django.db import migrations, models\n'), ((2555, 2596), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'unique': '(True)'}), '(blank=True, unique=True)\n', (2571, 2596), False, 'from django.db import migrations, models\n'), ((2854, 2950), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2873, 2950), False, 'from django.db import migrations, models\n'), ((2979, 3034), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""Horário da partida"""'}), "(verbose_name='Horário da partida')\n", (2999, 3034), False, 'from django.db import migrations, models\n'), ((3073, 3143), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""Placar do Time Mandante"""'}), "(default=0, verbose_name='Placar do Time Mandante')\n", (3092, 3143), False, 'from django.db import migrations, models\n'), ((3183, 3254), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""Placar do Time Visitante"""'}), "(default=0, verbose_name='Placar do Time Visitante')\n", (3202, 3254), False, 'from django.db import migrations, models\n'), ((3288, 3348), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'verbose_name': '"""Vídeo da Partida"""'}), "(blank=True, verbose_name='Vídeo da Partida')\n", (3303, 3348), False, 'from django.db import migrations, models\n'), ((3390, 3461), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Houve Período Extra?"""'}), "(default=False, verbose_name='Houve Período Extra?')\n", (3409, 3461), False, 'from django.db import migrations, models\n'), ((3495, 3656), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'limit_choices_to': "{'esta_ativa': True}", 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""jogos.competicao"""', 'verbose_name': '"""Competição"""'}), "(limit_choices_to={'esta_ativa': True}, on_delete=django.\n db.models.deletion.PROTECT, to='jogos.competicao', verbose_name=\n 'Competição')\n", (3512, 3656), False, 'from django.db import migrations, models\n'), ((3687, 3809), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'through': '"""arbitragem.Escala"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Escala de Arbitragem"""'}), "(through='arbitragem.Escala', to=settings.\n AUTH_USER_MODEL, verbose_name='Escala de Arbitragem')\n", (3709, 3809), False, 'from django.db import migrations, models\n'), ((3835, 3946), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""jogos.estadio"""', 'verbose_name': '"""Estádio"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'jogos.estadio', verbose_name='Estádio')\n", (3852, 3946), False, 'from django.db import migrations, models\n'), ((3973, 4155), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'limit_choices_to': "{'esta_ativo': True}", 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""mandante"""', 'to': '"""jogos.time"""', 'verbose_name': '"""Time Mandante"""'}), "(limit_choices_to={'esta_ativo': True}, on_delete=django.\n db.models.deletion.PROTECT, related_name='mandante', to='jogos.time',\n verbose_name='Time Mandante')\n", (3990, 4155), False, 'from django.db import migrations, models\n'), ((4179, 4363), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'limit_choices_to': "{'esta_ativo': True}", 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""visitante"""', 'to': '"""jogos.time"""', 'verbose_name': '"""Time Visitante"""'}), "(limit_choices_to={'esta_ativo': True}, on_delete=django.\n db.models.deletion.PROTECT, related_name='visitante', to='jogos.time',\n verbose_name='Time Visitante')\n", (4196, 4363), False, 'from django.db import migrations, models\n')]
|
from securitybot import loader
PATH_ROOT = 'securitybot'
def main():
config = loader.load_yaml('config/bot.yaml')
secrets_provider = config['secretsmgmt']['provider']
secretsclient = loader.build_secrets_client(
secrets_provider=secrets_provider,
connection_config=config['secretsmgmt'][secrets_provider]
)
SECRETS = config['secretsmgmt']['secrets']
for client_type, clients in SECRETS.items():
client = config[client_type]['provider']
print('Chosen {} provider is {}.'.format(client_type, client))
if client in clients:
fullsecret = {}
for secret in clients[client]:
value = input(
"Enter value to store for {} secret {}: ".format(
client,
secret
)
)
fullsecret[secret] = value
secretsclient.create_secret(
name='{}/{}/{}'.format(PATH_ROOT, client_type, client),
value=fullsecret,
description='SecurityBot secrets for {} provider {}.'.format(
client_type, client
)
)
else:
print("No secrets found to store for client {}".format(client))
print('Finished.')
if __name__ == '__main__':
main()
|
[
"securitybot.loader.build_secrets_client",
"securitybot.loader.load_yaml"
] |
[((91, 126), 'securitybot.loader.load_yaml', 'loader.load_yaml', (['"""config/bot.yaml"""'], {}), "('config/bot.yaml')\n", (107, 126), False, 'from securitybot import loader\n'), ((210, 335), 'securitybot.loader.build_secrets_client', 'loader.build_secrets_client', ([], {'secrets_provider': 'secrets_provider', 'connection_config': "config['secretsmgmt'][secrets_provider]"}), "(secrets_provider=secrets_provider,\n connection_config=config['secretsmgmt'][secrets_provider])\n", (237, 335), False, 'from securitybot import loader\n')]
|
"""
Tests the analysis module.
"""
import unittest
import pandas as pd
import mock
from .context import analysis
from .context import evaluation
from .context import interpretability
from .context import label
from .context import purity
from .context import util
from .context import test_utils as tu
class AnalysisTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
mock_label_obj = mock.Mock(label.Label)
mock_purity_obj = mock.Mock(purity.Purity)
mock_evaluation_obj = mock.Mock(evaluation.Evaluation)
mock_interpretability_obj = mock.Mock(
interpretability.Interpretability)
util_obj = util.Util()
self.test_obj = analysis.Analysis(config_obj, mock_label_obj,
mock_purity_obj, mock_evaluation_obj,
mock_interpretability_obj, util_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
# setup
test_obj = self.test_obj
# assert
self.assertTrue(isinstance(test_obj.purity_obj,
purity.Purity))
self.assertTrue(isinstance(test_obj.evaluation_obj,
evaluation.Evaluation))
self.assertTrue(isinstance(test_obj.interpretability_obj,
interpretability.Interpretability))
def test_relabel(self):
self.test_obj.label_obj.relabel = mock.Mock()
self.test_obj.relabel()
self.test_obj.label_obj.relabel.assert_called()
def test_purity(self):
self.test_obj.purity_obj.test_relations = mock.Mock()
self.test_obj.test_purity('df')
self.test_obj.purity_obj.test_relations.assert_called_with('df')
def test_evaluate(self):
self.test_obj.config_obj.modified = True
self.test_obj.check_dataframe = mock.Mock(return_value='df2')
self.test_obj.evaluation_obj.evaluate = mock.Mock()
self.test_obj.evaluate('df')
self.test_obj.check_dataframe.assert_called_with('df')
self.test_obj.evaluation_obj.evaluate.assert_called_with('df2',
modified=True)
def test_explain(self):
self.test_obj.interpretability_obj.explain = mock.Mock()
self.test_obj.check_dataframe = mock.Mock(return_value='df2')
self.test_obj.explain('df')
self.test_obj.check_dataframe.assert_called_with('df')
self.test_obj.interpretability_obj.explain.assert_called_with('df2')
def test_check_dataframe(self):
result = self.test_obj.check_dataframe('df')
self.assertTrue(result == 'df')
def test_check_dataframe_none(self):
self.test_obj.define_file_folders = mock.Mock(return_value='folds/')
self.test_obj.read_fold = mock.Mock(return_value='df')
result = self.test_obj.check_dataframe(None)
self.test_obj.define_file_folders.assert_called()
self.test_obj.read_fold.assert_called_with('folds/')
self.assertTrue(result == 'df')
def test_define_file_folders(self):
result = self.test_obj.define_file_folders()
self.assertTrue(result == 'ind/data/soundcloud/folds/')
def test_read_fold(self):
self.test_obj.util_obj.check_file = mock.Mock(return_value=True)
pd.read_csv = mock.Mock(return_value='df')
result = self.test_obj.read_fold('f/')
self.test_obj.util_obj.check_file.assert_called_with('f/test_1.csv')
pd.read_csv.assert_called_with('f/test_1.csv', lineterminator='\n')
self.assertTrue(result == 'df')
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(AnalysisTestCase)
return suite
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pandas.read_csv.assert_called_with",
"unittest.TestLoader",
"mock.Mock"
] |
[((3689, 3704), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3702, 3704), False, 'import unittest\n'), ((433, 455), 'mock.Mock', 'mock.Mock', (['label.Label'], {}), '(label.Label)\n', (442, 455), False, 'import mock\n'), ((482, 506), 'mock.Mock', 'mock.Mock', (['purity.Purity'], {}), '(purity.Purity)\n', (491, 506), False, 'import mock\n'), ((537, 569), 'mock.Mock', 'mock.Mock', (['evaluation.Evaluation'], {}), '(evaluation.Evaluation)\n', (546, 569), False, 'import mock\n'), ((606, 650), 'mock.Mock', 'mock.Mock', (['interpretability.Interpretability'], {}), '(interpretability.Interpretability)\n', (615, 650), False, 'import mock\n'), ((1400, 1411), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1409, 1411), False, 'import mock\n'), ((1580, 1591), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1589, 1591), False, 'import mock\n'), ((1826, 1855), 'mock.Mock', 'mock.Mock', ([], {'return_value': '"""df2"""'}), "(return_value='df2')\n", (1835, 1855), False, 'import mock\n'), ((1904, 1915), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1913, 1915), False, 'import mock\n'), ((2203, 2214), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2212, 2214), False, 'import mock\n'), ((2255, 2284), 'mock.Mock', 'mock.Mock', ([], {'return_value': '"""df2"""'}), "(return_value='df2')\n", (2264, 2284), False, 'import mock\n'), ((2680, 2712), 'mock.Mock', 'mock.Mock', ([], {'return_value': '"""folds/"""'}), "(return_value='folds/')\n", (2689, 2712), False, 'import mock\n'), ((2747, 2775), 'mock.Mock', 'mock.Mock', ([], {'return_value': '"""df"""'}), "(return_value='df')\n", (2756, 2775), False, 'import mock\n'), ((3224, 3252), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (3233, 3252), False, 'import mock\n'), ((3275, 3303), 'mock.Mock', 'mock.Mock', ([], {'return_value': '"""df"""'}), "(return_value='df')\n", (3284, 3303), False, 'import mock\n'), ((3438, 3505), 'pandas.read_csv.assert_called_with', 'pd.read_csv.assert_called_with', (['"""f/test_1.csv"""'], {'lineterminator': '"""\n"""'}), "('f/test_1.csv', lineterminator='\\n')\n", (3468, 3505), True, 'import pandas as pd\n'), ((3578, 3599), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (3597, 3599), False, 'import unittest\n')]
|
# import warnings
# warnings.filterwarnings("ignore", category=DeprecationWarning)
import requests_mock
import zeeguu.core.model
from faker import Faker
from unittest import TestCase
from zeeguu.core.test.test_data.mocking_the_web import mock_requests_get
class ModelTestMixIn(TestCase):
db = zeeguu.core.db
def setUp(self):
self.faker = Faker()
self.db.create_all()
def tearDown(self):
super(ModelTestMixIn, self).tearDown()
self.faker = None
# sometimes the tearDown freezes on drop_all
# and it seems that it's because there's still
# a session open somewhere. Better call first:
self.db.session.close()
self.db.drop_all()
def run(self, result=None):
# For the unit tests we use several HTML documents
# that are stored locally so we don't have to download
# them for every test
# To do this we mock requests.get
with requests_mock.Mocker() as m:
mock_requests_get(m)
super(ModelTestMixIn, self).run(result)
|
[
"requests_mock.Mocker",
"zeeguu.core.test.test_data.mocking_the_web.mock_requests_get",
"faker.Faker"
] |
[((361, 368), 'faker.Faker', 'Faker', ([], {}), '()\n', (366, 368), False, 'from faker import Faker\n'), ((961, 983), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (981, 983), False, 'import requests_mock\n'), ((1002, 1022), 'zeeguu.core.test.test_data.mocking_the_web.mock_requests_get', 'mock_requests_get', (['m'], {}), '(m)\n', (1019, 1022), False, 'from zeeguu.core.test.test_data.mocking_the_web import mock_requests_get\n')]
|
import tensorflow as tf
from arekit.contrib.networks.context.configurations.rnn import RNNConfig
from arekit.contrib.networks.tf_helpers.cell_types import CellTypes
class BiLSTMConfig(RNNConfig):
__lstm_cell_initializer = tf.keras.initializers.glorot_normal()
def __init__(self):
super(BiLSTMConfig, self).__init__()
super(BiLSTMConfig, self).modify_hidden_size(128)
super(BiLSTMConfig, self).modify_l2_reg(0.001)
super(BiLSTMConfig, self).modify_dropout_rnn_keep_prob(0.8)
super(BiLSTMConfig, self).modify_cell_type(CellTypes.BasicLSTM)
super(BiLSTMConfig, self).modify_bias_initializer(tf.constant_initializer(0.1))
super(BiLSTMConfig, self).modify_weight_initializer(tf.contrib.layers.xavier_initializer())
# region properties
@property
def LSTMCellInitializer(self):
return self.__lstm_cell_initializer
# endregion
|
[
"tensorflow.constant_initializer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.keras.initializers.glorot_normal"
] |
[((229, 266), 'tensorflow.keras.initializers.glorot_normal', 'tf.keras.initializers.glorot_normal', ([], {}), '()\n', (264, 266), True, 'import tensorflow as tf\n'), ((648, 676), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (671, 676), True, 'import tensorflow as tf\n'), ((738, 776), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (774, 776), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# Inspired by https://github.com/silvester747/aio_marantz_avr
#
# This is to control Denon/Marantz AVR devices
#
# Copyright (c) 2020 <NAME>
#
# Note large part of this code was taken from scapy and other opensource software
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
"""Control of an AVR over Telnet."""
import asyncio
import logging
import re
from enum import Enum
from typing import Any, List, Mapping, Optional, Callable
from .enums import *
# Some replacement for the surround sound format
SSTRANSFORM = [
("Audio-", " "),
("Dd", "Dolby Digital "),
("Hd", "HD "),
("DD", "Dolby Digital "),
("Dts", "DTS"),
["Mstr", "Master "],
("Dsur", "Digital Surround "),
("Mtrx", "Matrix"),
("Dscrt", "Discrete "),
("Mch", "Multi-Channel "),
(" Es ", " ES "),
]
EXTRAS = ["SSINFAI"]
NEEDSPACE = ["PSDEL", "PSDYNVOL", "PSDRC"]
def cc_string(identifier: str) -> str:
""" From https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python """
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier
)
return " ".join([m.group(0) for m in matches])
def only_int(val: str) -> str:
return "".join(
[x for x in val if x in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]]
)
class AvrError(Exception):
"""Base class for all errors returned from an AVR."""
pass
class AvrTimeoutError(AvrError):
"""A request to the AVR has timed out."""
pass
async def avr_factory(
name: str, host: str, port: int = 23, timeout: float = 3.0
) -> "MDAVR":
"""Connect to an AVR.
:param name: The name of this device.
:type url: str
:param addr: The device IP address
:type name: str
:returns: A device instance or None if connection cannot be established
:rtype: MDAVR
"""
try:
reader, writer = await asyncio.open_connection(host, port=port)
return MDAVR(name, reader, writer, timeout)
except:
return None
def _on_off_from_bool(value: bool) -> str:
if value:
return "ON"
else:
return "OFF"
def _on_off_to_bool(value: str) -> bool:
return value == "ON"
class _CommandDef:
code: str
label: str
vals: Optional[Enum]
def __init__(self, label: str, vals: Any):
self.label = label
self.values = vals
class MDAVR:
"""Connection to a Marantz AVR over Telnet.
Uses `connect` to create a connection to the AVR.
"""
CMDS_DEFS: Mapping[str, _CommandDef] = {
"PW": _CommandDef("Power", Power),
"ZM": _CommandDef("Main Zone", Power),
"Z2": _CommandDef("Zone 2", Power),
"Z3": _CommandDef("Zone 3", Power),
"MU": _CommandDef("Muted", None),
"Z2MU": _CommandDef("Z2 Muted", None),
"Z3MU": _CommandDef("Z3 Muted", None),
"MV": _CommandDef("Volume", None),
"Z2MV": _CommandDef("Z2 Volume", None),
"Z3MV": _CommandDef("Z3 Volume", None),
"SI": _CommandDef("Source", InputSource),
"Z2SI": _CommandDef("Z2 Source", InputSource),
"Z3SI": _CommandDef("Z3 Source", InputSource),
"MS": _CommandDef("Surround Mode", SurroundMode),
"CV": _CommandDef("Channel Bias", ChannelBias),
"PV": _CommandDef("Picture Mode", PictureMode),
"ECO": _CommandDef("Eco Mode", EcoMode),
"SSSOD": _CommandDef("Available Source", InputSource),
"PSDEL": _CommandDef("Sound Delay", None),
"PSDRC": _CommandDef("Dynamic Range Compression", DRCMode),
"PSDYNVOL": _CommandDef("Dynamic Volume", DynamicMode),
# SSANA ? analog inputs
# SSHDM ? Mapping between source and HDMI connection
# SSDIN ? digital inputs, COax OPtical
# SSSPC ? Speakers' configuration
# SSPAA ? Not sure. Active speakers config? Also returns SSSPC
# SSQSNZMA ? Smart select.. what for?
}
_reader: asyncio.StreamReader
_writer: asyncio.StreamWriter
_timeout: float
def __init__(
self,
name: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
timeout: float,
):
self.name = name
self._reader = reader
self._writer = writer
self._timeout = timeout
self.status = {}
self.maxvol = 98 # Good default ;)
self.alive = True
self.write_queue = asyncio.Queue()
for x in self.CMDS_DEFS:
if len(x) < 5:
self.status[self.CMDS_DEFS[x].label] = "-"
for x in ["PSDEL", "PSDRC", "PSDYNVOL"]:
self.status[self.CMDS_DEFS[x].label] = "-"
self.cvend = True
self.notify = None
self.mysources = []
# Start reading
self.wtask = asyncio.get_event_loop().create_task(self._do_write())
self.rtask = asyncio.get_event_loop().create_task(self._do_read())
self._get_capabilities()
self.refresh()
def _get_capabilities(self):
"""
Here we try to get the various capabilities of the device connected.
"""
# Let's get the available Sources
self.write_queue.put_nowait(("SSSOD", " ?"))
def _get_current(self, cmd):
return self.status[self.CMDS_DEFS[cmd].label]
def _get_list(self, cmd):
return [cc_string(x.name) for x in list(self.CMDS_DEFS[cmd].values)]
# API Starts here
@property
def power(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("PW")
@property
def zmain(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("ZM")
@property
def z2(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("Z2")
@property
def z3(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("Z3")
@property
def muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("MU")
@property
def z2_muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("Z2MU")
@property
def z3_muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("Z3MU")
@property
def volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("MV")
@property
def z2_volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("Z2MV")
@property
def z2_volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("Z2MV")
@property
def max_volume(self) -> Optional[float]:
"""Maximum volume level of the AVR zone."""
return self.maxvol
@property
def source(self) -> str:
"""Name of the current input source."""
return self._get_current("SI")
@property
def z2_source(self) -> str:
"""Name of the current input source."""
return self._get_current("Z2SI")
@property
def z2_source(self) -> str:
"""Name of the current input source."""
return self._get_current("Z3SI")
@property
def source_list(self) -> List[str]:
"""List of available input sources."""
if self.mysources:
return self.mysources
return self._get_list("SI")
@property
def sound_mode(self) -> str:
"""Name of the current sound mode."""
return self._get_current("MS")
@property
def sound_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("MS")
@property
def picture_mode(self) -> str:
"""Name of the current sound mode."""
return self._get_current("PV")
@property
def picture_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PV")
@property
def eco_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("ECO")
@property
def eco_mode_list(self) -> List[str]:
"""List of available exo modes."""
return self._get_list("ECO")
@property
def channels_bias(self) -> Mapping[str, float]:
return self._get_current("CV")
@property
def channels_bias_list(self) -> List[str]:
"""List of currently available."""
return [x for x in self._get_current("CV").keys()]
@property
def drc_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDRC")
@property
def drc_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PSDRC")
@property
def dynamic_volume_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDYNVOL")
@property
def dynamic_volume_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PSDYNVOL")
@property
def delay(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDEL")
def refresh(self) -> None:
"""Refresh all properties from the AVR."""
for cmd_def in self.CMDS_DEFS:
if cmd_def in NEEDSPACE:
qs = " ?"
else:
qs = "?"
fut = self.write_queue.put_nowait((cmd_def, qs))
def turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("PW", "ON"))
def turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("PW", "STANDBY"))
def main_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("ZM", "ON"))
def main_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("ZM", "OFF"))
def z2_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("Z2", "ON"))
def z2_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("Z2", "OFF"))
def z3_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("Z3", "ON"))
def z3_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("Z3", "OFF"))
def mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self.write_queue.put_nowait(("MU", _on_off_from_bool(mute)))
def _zone_mute_volume(self, zone: str, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self.write_queue.put_nowait((zone, _on_off_from_bool(mute)))
def z2_mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self._zone_mute_volume("Z2MU", mute)
def z3_mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self._zone_mute_volume("Z3MU", mute)
def set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
if level > self.maxvol:
level = maxvol
if int(10 * level) % 10:
# Needs to be a nultiple of 5
level = int(5 * round(10 * level / 5))
else:
level = int(level)
self.write_queue.put_nowait(("MV", f"{level:02}"))
def volume_up(self) -> None:
"""Turn the volume level up one notch."""
self._zone_volume("MV", "UP")
def volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("MV", "DOWN")
def z2_set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
self._zone_set_volume("Z2", level)
def z3_set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
self._zone_set_volume("Z3", level)
def z2_volume_up(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z2", "UP")
def z3_volume_up(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z3", "UP")
def z2_volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z2", "DOWN")
def z3_volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z3", "DOWN")
def set_channel_bias(self, chan: str, level: float) -> None:
"""Set the volume level.
Arguments:
chan -- channel to set
level -- A float value between -12.0 and +12.0
"""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] != level:
chan = chan.replace(" ", "")
level = level + 50 # 50 is 0dB
if level < 38:
level = 38
elif level > 62:
level = 62
if int(10 * level) % 10:
# Needs to be a nultiple of 5
level = int(5 * round(10 * level / 5))
else:
level = int(level)
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} {level:02}"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channel_bias_up(self, chan: str) -> None:
"""Turn the volume level up one notch."""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] == 12:
# We are at the limit. It won't respond
logging.debugf(f"Channel {chan} it at the upper limit.")
return
chan = chan.replace(" ", "")
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} UP"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channel_bias_down(self, chan: str) -> None:
"""Turn the volume level down one notch."""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] == -12:
# We are at the limit. It won't respond
logging.debugf(f"Channel {chan} it at the lowewr limit.")
return
chan = chan.replace(" ", "")
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} DOWN"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channels_bias_reset(self):
self.write_queue.put_nowait(("CV", "ZRL"))
def select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("SI", source.value))
def z2_select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("Z2", source.value))
def z3_select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("Z3", source.value))
def select_sound_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["MS"].values[mode.replace(" ", "")]
except:
logging.warning(f"Warning: {mode} is not a valid mode")
return
self.write_queue.put_nowait(("MS", mode.value))
def select_picture_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PV"].values[mode.replace(" ", "")]
except:
logging.warning(f"Warning: {mode} is not a valid mode")
return
self.write_queue.put_nowait(("PV", mode.value))
def select_eco_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["ECO"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid eco mode")
return
self.write_queue.put_nowait(("ECO", mode.value))
def set_delay(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
level = int(level)
if level < 0:
level = 0
if level > 999:
level = 999
self.write_queue.put_nowait(("PSDEL", f" {level:03}"))
def select_drc_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PSDRC"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid DRC mode")
return
self.write_queue.put_nowait(("PSDRC", " " + mode.value))
def select_dynamic_volume_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PSDYNVOL"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid Dynamic Volume mode")
return
self.write_queue.put_nowait(("PSDYNVOL", " " + mode.value))
def notifyme(self, func: Callable) -> None:
"""Register a callback for when an event happens. The callable should have 2 parameters,
The label of the the changing value and the new value
"""
self.notify = func
def close(self):
self.alive = False
self._writer.close()
self.rtask.cancel()
self.wtask.cancel()
logging.debug(f"Closed device {self.name}")
# API ends here
def _zone_volume(self, zone: str, uod: str) -> None:
"""Turn the volume level up one notch."""
self.write_queue.put_nowait((zone, uod))
def _zone_set_volume(self, zone: str, level: float) -> None:
"""Set the volume level.
Arguments:
zone -- The zone affected
level -- An integer value between 0 and `max_volume`.
"""
if level > self.maxvol:
level = maxvol
level = int(level)
self.write_queue.put_nowait((zone, f"{level:02}"))
async def _send_command(self, cmd: str, val: Any) -> asyncio.Future:
tosend = f"{cmd}{val}\r"
logging.debug(f"Sending {tosend}")
self._writer.write(tosend.encode())
await self._writer.drain()
logging.debug("Write drained")
def _process_response(self, response: str) -> Optional[str]:
matches = [cmd for cmd in self.CMDS_DEFS.keys() if response.startswith(cmd)] + [
cmd for cmd in EXTRAS if response.startswith(cmd)
]
if not matches:
return None
if len(matches) > 1:
matches.sort(key=len, reverse=True)
match = matches[0]
if getattr(self, "_parse_" + match, None):
getattr(self, "_parse_" + match)(response.strip()[len(match) :].strip())
else:
# A few special cases ... for now
if response.startswith("SSINFAISFSV"):
try:
sr = int(only_int(response.split(" ")[-1]))
if sr > 200:
sr = round(sr / 10, 1)
else:
sr = float(sr)
self.status["Sampling Rate"] = sr
except Exception as e:
if response.split(" ")[-1] == "NON":
elf.status["Sampling Rate"] = "-"
else:
logging.debug(f"Error with sampling rate: {e}")
else:
self._parse_many(match, response.strip()[len(match) :].strip())
logging.debug(f"Warning _parse_{match} is not defined.")
return match
def _parse_many(self, cmd: str, resp: str) -> None:
for x in self.CMDS_DEFS[cmd].values:
if resp == x.value:
lbl = self.CMDS_DEFS[cmd].label
if self.status[lbl] != cc_string(x.name):
self.status[lbl] = cc_string(x.name)
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_MV(self, resp: str) -> None:
level = only_int(resp)
if level:
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
if resp.startswith("MAX"):
self.maxvol = level
else:
lbl = self.CMDS_DEFS["MV"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_Z2MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["Z2MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_Z3MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["Z3MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_zone(self, zone: str, resp: str) -> None:
""" Naturaly, those idiots had tn overload the zone prefix for
power, volume and source...
"""
if resp in ["ON", "OFF"]:
self._parse_many(zone, resp)
return
if resp.startswith("SMART"):
# not handled
return
if resp.startswith("FAVORITE"):
# not handled, learn to spell!
return
try:
logging.debug(f"Checking level for {zone}")
level = only_int(resp)
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
lbl = self.CMDS_DEFS[zone + "MV"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
except:
# Probably the source
try:
self._parse_many(zone + "SI", resp)
except Exception as e:
logging.debug(f"Failed when parsing {zone}: {e}")
def _parse_Z2(self, resp: str) -> None:
self._parse_zone("Z2", resp)
def _parse_Z3(self, resp: str) -> None:
self._parse_zone("Z3", resp)
def _parse_CV(self, resp: str) -> None:
""" Different here... Needs to be reset"""
if resp == "END":
self.cvend = True
if self.notify:
lbl = self.CMDS_DEFS["CV"].label
self.notify(lbl, self.status[lbl])
else:
if self.cvend:
self.status[self.CMDS_DEFS["CV"].label] = {}
self.cvend = False
spkr, level = resp.split(" ")
if level:
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
level -= 50
for x in self.CMDS_DEFS["CV"].values:
if x.value == spkr:
spkrname = cc_string(x.name)
break
try:
self.status[self.CMDS_DEFS["CV"].label][spkrname] = level
except:
logging.debug(f"Unknown speaker code {spkr}")
def _parse_SSSOD(self, resp: str) -> None:
""" Different here..."""
if resp == " END":
self.mysources.sort()
logging.debug(f"My source is now {self.mysources}")
return
si, f = resp.split(" ")
if f == "USE":
for x in self.CMDS_DEFS["SSSOD"].values:
if si == x.value:
self.mysources.append(cc_string(x.name))
break
def _parse_MS(self, resp: str) -> None:
""" Different here... What we get is not what we send. So we try to transform
the result through semi-cllever string manipulation
"""
resp = resp.replace("+", " ")
resp = " ".join([x.title() for x in resp.split(" ")])
for old, new in SSTRANSFORM:
resp = resp.replace(old, new)
# Clean up spaces
resp = re.sub(r"[_\W]+", " ", resp)
lbl = self.CMDS_DEFS["MS"].label
if self.status[lbl] != resp:
self.status[lbl] = resp
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_PSDEL(self, resp: str) -> None:
level = only_int(resp)
if level:
level = int(level)
lbl = self.CMDS_DEFS["PSDEL"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
async def _do_read(self):
""" Keep on reading the info coming from the AVR"""
while self.alive:
data = b""
while not data or data[-1] != ord("\r"):
char = await self._reader.read(1)
if char == b"":
break
data += char
if data == b"":
# Gone
self.close()
return
logging.debug(f"Received: {data}")
try:
match = self._process_response(data.decode().strip("\r"))
except Exception as e:
logging.debug(f"Problem processing response: {e}")
async def _do_write(self):
""" Keep on reading the info coming from the AVR"""
while self.alive:
cmd, param = await self.write_queue.get()
if cmd:
await self._send_command(cmd, param)
self.write_queue.task_done()
|
[
"logging.error",
"logging.debug",
"asyncio.get_event_loop",
"logging.warning",
"re.finditer",
"asyncio.open_connection",
"logging.debugf",
"asyncio.Queue",
"re.sub"
] |
[((2077, 2162), 're.finditer', 're.finditer', (['""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)"""', 'identifier'], {}), "('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier\n )\n", (2088, 2162), False, 'import re\n'), ((5493, 5508), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (5506, 5508), False, 'import asyncio\n'), ((20699, 20742), 'logging.debug', 'logging.debug', (['f"""Closed device {self.name}"""'], {}), "(f'Closed device {self.name}')\n", (20712, 20742), False, 'import logging\n'), ((21408, 21442), 'logging.debug', 'logging.debug', (['f"""Sending {tosend}"""'], {}), "(f'Sending {tosend}')\n", (21421, 21442), False, 'import logging\n'), ((21530, 21560), 'logging.debug', 'logging.debug', (['"""Write drained"""'], {}), "('Write drained')\n", (21543, 21560), False, 'import logging\n'), ((27824, 27852), 're.sub', 're.sub', (['"""[_\\\\W]+"""', '""" """', 'resp'], {}), "('[_\\\\W]+', ' ', resp)\n", (27830, 27852), False, 'import re\n'), ((2969, 3009), 'asyncio.open_connection', 'asyncio.open_connection', (['host'], {'port': 'port'}), '(host, port=port)\n', (2992, 3009), False, 'import asyncio\n'), ((14514, 14576), 'logging.warning', 'logging.warning', (['f"""Channel {chan} is not available right now."""'], {}), "(f'Channel {chan} is not available right now.')\n", (14529, 14576), False, 'import logging\n'), ((15597, 15659), 'logging.warning', 'logging.warning', (['f"""Channel {chan} is not available right now."""'], {}), "(f'Channel {chan} is not available right now.')\n", (15612, 15659), False, 'import logging\n'), ((15786, 15842), 'logging.debugf', 'logging.debugf', (['f"""Channel {chan} it at the upper limit."""'], {}), "(f'Channel {chan} it at the upper limit.')\n", (15800, 15842), False, 'import logging\n'), ((16151, 16228), 'logging.error', 'logging.error', (['f"""Channel {chan} should exist. This should not have happened."""'], {}), "(f'Channel {chan} should exist. This should not have happened.')\n", (16164, 16228), False, 'import logging\n'), ((16419, 16481), 'logging.warning', 'logging.warning', (['f"""Channel {chan} is not available right now."""'], {}), "(f'Channel {chan} is not available right now.')\n", (16434, 16481), False, 'import logging\n'), ((16609, 16666), 'logging.debugf', 'logging.debugf', (['f"""Channel {chan} it at the lowewr limit."""'], {}), "(f'Channel {chan} it at the lowewr limit.')\n", (16623, 16666), False, 'import logging\n'), ((16977, 17054), 'logging.error', 'logging.error', (['f"""Channel {chan} should exist. This should not have happened."""'], {}), "(f'Channel {chan} should exist. This should not have happened.')\n", (16990, 17054), False, 'import logging\n'), ((25152, 25195), 'logging.debug', 'logging.debug', (['f"""Checking level for {zone}"""'], {}), "(f'Checking level for {zone}')\n", (25165, 25195), False, 'import logging\n'), ((27100, 27151), 'logging.debug', 'logging.debug', (['f"""My source is now {self.mysources}"""'], {}), "(f'My source is now {self.mysources}')\n", (27113, 27151), False, 'import logging\n'), ((28841, 28875), 'logging.debug', 'logging.debug', (['f"""Received: {data}"""'], {}), "(f'Received: {data}')\n", (28854, 28875), False, 'import logging\n'), ((5860, 5884), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5882, 5884), False, 'import asyncio\n'), ((5936, 5960), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5958, 5960), False, 'import asyncio\n'), ((15325, 15402), 'logging.error', 'logging.error', (['f"""Channel {chan} should exist. This should not have happened."""'], {}), "(f'Channel {chan} should exist. This should not have happened.')\n", (15338, 15402), False, 'import logging\n'), ((17377, 17436), 'logging.warning', 'logging.warning', (['f"""Warning: {source} is not a valid source"""'], {}), "(f'Warning: {source} is not a valid source')\n", (17392, 17436), False, 'import logging\n'), ((17722, 17781), 'logging.warning', 'logging.warning', (['f"""Warning: {source} is not a valid source"""'], {}), "(f'Warning: {source} is not a valid source')\n", (17737, 17781), False, 'import logging\n'), ((18067, 18126), 'logging.warning', 'logging.warning', (['f"""Warning: {source} is not a valid source"""'], {}), "(f'Warning: {source} is not a valid source')\n", (18082, 18126), False, 'import logging\n'), ((18405, 18460), 'logging.warning', 'logging.warning', (['f"""Warning: {mode} is not a valid mode"""'], {}), "(f'Warning: {mode} is not a valid mode')\n", (18420, 18460), False, 'import logging\n'), ((18739, 18794), 'logging.warning', 'logging.warning', (['f"""Warning: {mode} is not a valid mode"""'], {}), "(f'Warning: {mode} is not a valid mode')\n", (18754, 18794), False, 'import logging\n'), ((19078, 19138), 'logging.warning', 'logging.warning', (['f"""Warning: {mode} is not a valid eco mode"""'], {}), "(f'Warning: {mode} is not a valid eco mode')\n", (19093, 19138), False, 'import logging\n'), ((19782, 19842), 'logging.warning', 'logging.warning', (['f"""Warning: {mode} is not a valid DRC mode"""'], {}), "(f'Warning: {mode} is not a valid DRC mode')\n", (19797, 19842), False, 'import logging\n'), ((20151, 20222), 'logging.warning', 'logging.warning', (['f"""Warning: {mode} is not a valid Dynamic Volume mode"""'], {}), "(f'Warning: {mode} is not a valid Dynamic Volume mode')\n", (20166, 20222), False, 'import logging\n'), ((22841, 22897), 'logging.debug', 'logging.debug', (['f"""Warning _parse_{match} is not defined."""'], {}), "(f'Warning _parse_{match} is not defined.')\n", (22854, 22897), False, 'import logging\n'), ((26900, 26945), 'logging.debug', 'logging.debug', (['f"""Unknown speaker code {spkr}"""'], {}), "(f'Unknown speaker code {spkr}')\n", (26913, 26945), False, 'import logging\n'), ((29018, 29068), 'logging.debug', 'logging.debug', (['f"""Problem processing response: {e}"""'], {}), "(f'Problem processing response: {e}')\n", (29031, 29068), False, 'import logging\n'), ((25750, 25799), 'logging.debug', 'logging.debug', (['f"""Failed when parsing {zone}: {e}"""'], {}), "(f'Failed when parsing {zone}: {e}')\n", (25763, 25799), False, 'import logging\n'), ((22679, 22726), 'logging.debug', 'logging.debug', (['f"""Error with sampling rate: {e}"""'], {}), "(f'Error with sampling rate: {e}')\n", (22692, 22726), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
import math
from arizona_asr.optim.lr_scheduler.lr_scheduler import LearningRateScheduler
class TransformerLRScheduler(LearningRateScheduler):
""" Transformer Learning Rate Scheduler proposed in "Attention Is All You Need" """
def __init__(self, optimizer, peak_lr, final_lr, final_lr_scale, warmup_steps, decay_steps):
assert isinstance(warmup_steps, int), "warmup_steps should be inteager type"
assert isinstance(decay_steps, int), "total_steps should be inteager type"
super(TransformerLRScheduler, self).__init__(optimizer, 0.0)
self.final_lr = final_lr
self.peak_lr = peak_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.warmup_rate = self.peak_lr / self.warmup_steps
self.decay_factor = -math.log(final_lr_scale) / self.decay_steps
self.lr = self.init_lr
self.update_step = 0
def _decide_stage(self):
if self.update_step < self.warmup_steps:
return 0, self.update_step
if self.warmup_steps <= self.update_step < self.warmup_steps + self.decay_steps:
return 1, self.update_step - self.warmup_steps
return 2, None
def step(self):
self.update_step += 1
stage, steps_in_stage = self._decide_stage()
if stage == 0:
self.lr = self.update_step * self.warmup_rate
elif stage == 1:
self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
elif stage == 2:
self.lr = self.final_lr
else:
raise ValueError("Undefined stage")
self.set_lr(self.optimizer, self.lr)
return self.lr
|
[
"math.log",
"math.exp"
] |
[((827, 851), 'math.log', 'math.log', (['final_lr_scale'], {}), '(final_lr_scale)\n', (835, 851), False, 'import math\n'), ((1471, 1516), 'math.exp', 'math.exp', (['(-self.decay_factor * steps_in_stage)'], {}), '(-self.decay_factor * steps_in_stage)\n', (1479, 1516), False, 'import math\n')]
|
# Generated by Django 3.2.4 on 2021-06-18 04:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0041_alter_settingsmodel_background_image'),
]
operations = [
migrations.AddField(
model_name='timewebmodel',
name='tags',
field=models.JSONField(blank=True, null=True),
),
]
|
[
"django.db.models.JSONField"
] |
[((359, 398), 'django.db.models.JSONField', 'models.JSONField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (375, 398), False, 'from django.db import migrations, models\n')]
|
from flask import Blueprint, request
from .controllers import Controllers
def make_blueprint(db_connection_string=None, configuration={}): # noqa
"""Create blueprint.
"""
controllers = Controllers(configuration=configuration,
connection_string=db_connection_string)
# Create instance
blueprint = Blueprint('etl_server', 'etl_server')
def query_pipelines_():
return controllers.query_pipelines()
def configuration_():
return controllers.configuration()
def edit_pipeline_():
if request.method == 'POST':
body = request.json
id = body.get('id')
return controllers.create_or_edit_pipeline(id, body)
else:
return {}
def query_pipeline_(id):
return controllers.query_pipeline(id)
def delete_pipeline_(id):
return controllers.delete_pipeline(id)
def start_pipeline_(id):
return controllers.start_pipeline(id)
# Register routes
blueprint.add_url_rule(
'pipelines', 'query_pipelines', query_pipelines_, methods=['GET'])
blueprint.add_url_rule(
'pipeline', 'edit_pipeline', edit_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'query_pipeline', query_pipeline_, methods=['GET'])
blueprint.add_url_rule(
'pipeline/start/<id>', 'start_pipeline', start_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'delete_pipeline', delete_pipeline_, methods=['DELETE'])
blueprint.add_url_rule(
'configuration', 'configuration', configuration_, methods=['GET'])
# Return blueprint
return blueprint
|
[
"flask.Blueprint"
] |
[((352, 389), 'flask.Blueprint', 'Blueprint', (['"""etl_server"""', '"""etl_server"""'], {}), "('etl_server', 'etl_server')\n", (361, 389), False, 'from flask import Blueprint, request\n')]
|
from dataclasses import dataclass
from typing import Optional, Type, TypeVar
from commanderbot.ext.automod import events
from commanderbot.ext.automod.automod_event import AutomodEvent
from commanderbot.ext.automod.automod_trigger import (
AutomodTrigger,
AutomodTriggerBase,
)
from commanderbot.lib import JsonObject, RolesGuard
ST = TypeVar("ST")
@dataclass
class MemberUpdated(AutomodTriggerBase):
"""
Fires when an `on_typing` event is received.
This occurs when one or more of the following things change:
- status
- activity
- nickname
- roles
- pending
See: https://discordpy.readthedocs.io/en/stable/api.html?highlight=events#discord.on_typing
Attributes
----------
roles
The roles to match against. If empty, all roles will match.
"""
event_types = (events.MemberUpdated,)
roles: Optional[RolesGuard] = None
@classmethod
def from_data(cls: Type[ST], data: JsonObject) -> ST:
roles = RolesGuard.from_field_optional(data, "roles")
return cls(
description=data.get("description"),
roles=roles,
)
def ignore_by_role(self, event: AutomodEvent) -> bool:
if self.roles is None:
return False
return self.roles.ignore(event.member)
def ignore(self, event: AutomodEvent) -> bool:
return self.ignore_by_role(event)
def create_trigger(data: JsonObject) -> AutomodTrigger:
return MemberUpdated.from_data(data)
|
[
"commanderbot.lib.RolesGuard.from_field_optional",
"typing.TypeVar"
] |
[((345, 358), 'typing.TypeVar', 'TypeVar', (['"""ST"""'], {}), "('ST')\n", (352, 358), False, 'from typing import Optional, Type, TypeVar\n'), ((994, 1039), 'commanderbot.lib.RolesGuard.from_field_optional', 'RolesGuard.from_field_optional', (['data', '"""roles"""'], {}), "(data, 'roles')\n", (1024, 1039), False, 'from commanderbot.lib import JsonObject, RolesGuard\n')]
|
import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, CSVLogger, Callback
START_CHAR = '\b'
END_CHAR = '\t'
PADDING_CHAR = '\a'
chars = set([START_CHAR, '\n', END_CHAR])
input_frame = 'shakespeare_short.txt'
model_fname = 'model_keras'
output_fname = 'output.txt'
batchout_fname = 'batch_out.txt'
USE_SIMPLE_MODEL = False
with open(input_frame) as f:
for line in f:
chars.update(list(line.strip().lower()))
char_indicies = {c: i for i, c in enumerate(sorted(list(chars)))}
char_indicies[PADDING_CHAR] = 0
indicies_to_chars = {i: c for c, i in char_indicies.items()}
num_chars = len(chars)
print(num_chars)
def get_one(i, sz):
res = np.zeros(sz)
res[i] = 1
return res
char_vectors = {
c: (np.zeros(num_chars) if c == PADDING_CHAR else get_one(v, num_chars)) for c, v in char_indicies.items()
}
sentence_end_markers = set('.!?')
sentences = []
current_sentence = ''
with open(input_frame, 'r') as f:
for line in f:
s = line.strip().lower()
if len(s) > 0:
current_sentence += s + '\n'
if len(s) == 0 or s[-1] in sentence_end_markers:
current_sentence = current_sentence.strip()
if len(current_sentence) > 10:
sentences.append(current_sentence)
current_sentence = ''
def get_matrices(sentences, max_sentence_len):
X = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
char_seq = (START_CHAR + sentence + END_CHAR).ljust(max_sentence_len + 1, PADDING_CHAR)
for t in range(max_sentence_len):
X[i, t, :] = char_vectors[char_seq[t]]
y[i, t, :] = char_vectors[char_seq[t + 1]]
return X, y
test_indicies = np.random.choice(range(len(sentences)), int(len(sentences) * 0.05))
sentences_train = [sentences[x] for x in set(range(len(sentences))) - set(test_indicies)]
sentences_test = [sentences[x] for x in test_indicies]
max_sentence_len = np.max([len(x) for x in sentences])
sentences_train = sorted(sentences_train, key=lambda x: len(x))
X_test, y_test = get_matrices(sentences_test, max_sentence_len)
batch_size = 16
print(sentences_train[1])
print(sentences_test[1])
print(X_test.shape)
def generate_batch():
while True:
for i in range(int(len(sentences_train) / batch_size)):
sentences_batch = sentences_train[i * batch_size:(i + 1) * batch_size]
yield get_matrices(sentences_batch, max_sentence_len)
class CharSampler(Callback):
def __init__(self, char_vectors, model):
self.char_vectors = char_vectors
self.model = model
def on_train_begin(self, logs={}):
self.epoch = 0
if os.path.isfile(output_fname):
os.remove(output_fname)
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def sample_one(self, T):
result = START_CHAR
while len(result) < 500:
Xsampled = np.zeros((1, len(result), num_chars)) # max_sentence_len
for t, c in enumerate(list(result)):
Xsampled[0, t, :] = self.char_vectors[c]
ysampled = self.model.predict(Xsampled, batch_size=1)[0, :]
yv = ysampled[len(result) - 1, :]
selected_char = indicies_to_chars[self.sample(yv, T)]
if selected_char == END_CHAR:
break
result = result + selected_char
return result
def on_epoch_end(self, epoch, logs=None):
self.epoch = self.epoch + 1
if self.epoch % 1 == 0:
print('\nEpoch: %d text sampling:' % self.epoch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Epoch %d =========' % self.epoch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write('\nT=%.1f \n%s \n' % (T, res[1:]))
def on_batch_end(self, batch, logs={}):
if (batch + 1) % 10 == 0:
print('\nBatch %d text sampling: ' % batch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Batch %d =========' % batch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write(res + '\n')
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.loss = []
self.acc = []
def on_batch_end(self, batch, logs={}):
self.loss.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
if (batch + 1) % 100 == 0:
with open(batchout_fname, 'a') as outf:
for i in range(100):
outf.write('%d\t%.6f\t%.6f\n' %
(batch + i - 99, self.loss[i - 100], self.acc[i - 100]))
if USE_SIMPLE_MODEL:
# simple model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
dense = TimeDistributed(Dense(num_chars))(l1_d)
output_res = Activation('softmax')(dense)
model = Model(input=vec, outputs=output_res)
else:
# deep model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
input2 = concatenate([vec, l1_d])
l2 = LSTM(128, activation='tanh', return_sequences=True)(input2)
l2_d = Dropout(0.2)(l2)
input3 = concatenate([vec, l2_d])
l3 = LSTM(128, activation='tanh', return_sequences=True)(input3)
l3_d = Dropout(0.2)(l2)
input_d = concatenate([l1_d, l2_d, l3_d])
dense3 = TimeDistributed(Dense(num_chars))(input_d)
output_res = Activation('softmax')(dense3)
model = Model(input=vec, outputs=output_res)
model.compile(loss='categorical_crossentropy', optimizer=Adam(clipnorm=1.), metrics=['accuracy'])
cb_sampler = CharSampler(char_vectors, model)
cb_logger = CSVLogger(model_fname + '.log')
cb_checkpoint = ModelCheckpoint("model.hdf5", monitor='val_acc', save_best_only=True, save_weights_only=False)
model.fit_generator(generate_batch(),
int(len(sentences_train) / batch_size) * batch_size,
epochs=10,
verbose=True,
validation_data=(X_test, y_test),
callbacks=[cb_logger, cb_sampler, cb_checkpoint])
|
[
"os.remove",
"numpy.sum",
"numpy.argmax",
"numpy.random.multinomial",
"keras.models.Model",
"os.path.isfile",
"numpy.exp",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"numpy.asarray",
"keras.optimizers.Adam",
"numpy.log",
"keras.layers.Activation",
"keras.layers.LSTM",
"numpy.zeros",
"keras.layers.Dense",
"keras.callbacks.CSVLogger"
] |
[((6724, 6755), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(model_fname + '.log')"], {}), "(model_fname + '.log')\n", (6733, 6755), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, Callback\n'), ((6772, 6870), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model.hdf5"""'], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "('model.hdf5', monitor='val_acc', save_best_only=True,\n save_weights_only=False)\n", (6787, 6870), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, Callback\n'), ((857, 869), 'numpy.zeros', 'np.zeros', (['sz'], {}), '(sz)\n', (865, 869), True, 'import numpy as np\n'), ((5665, 5695), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_chars)'}), '(shape=(None, num_chars))\n', (5670, 5695), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5900, 5936), 'keras.models.Model', 'Model', ([], {'input': 'vec', 'outputs': 'output_res'}), '(input=vec, outputs=output_res)\n', (5905, 5936), False, 'from keras.models import Sequential, Model\n'), ((5970, 6000), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_chars)'}), '(shape=(None, num_chars))\n', (5975, 6000), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6109, 6133), 'keras.layers.concatenate', 'concatenate', (['[vec, l1_d]'], {}), '([vec, l1_d])\n', (6120, 6133), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6245, 6269), 'keras.layers.concatenate', 'concatenate', (['[vec, l2_d]'], {}), '([vec, l2_d])\n', (6256, 6269), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6382, 6413), 'keras.layers.concatenate', 'concatenate', (['[l1_d, l2_d, l3_d]'], {}), '([l1_d, l2_d, l3_d])\n', (6393, 6413), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6529, 6565), 'keras.models.Model', 'Model', ([], {'input': 'vec', 'outputs': 'output_res'}), '(input=vec, outputs=output_res)\n', (6534, 6565), False, 'from keras.models import Sequential, Model\n'), ((927, 946), 'numpy.zeros', 'np.zeros', (['num_chars'], {}), '(num_chars)\n', (935, 946), True, 'import numpy as np\n'), ((2984, 3012), 'os.path.isfile', 'os.path.isfile', (['output_fname'], {}), '(output_fname)\n', (2998, 3012), False, 'import os\n'), ((3213, 3226), 'numpy.exp', 'np.exp', (['preds'], {}), '(preds)\n', (3219, 3226), True, 'import numpy as np\n'), ((3290, 3324), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'preds', '(1)'], {}), '(1, preds, 1)\n', (3311, 3324), True, 'import numpy as np\n'), ((3340, 3357), 'numpy.argmax', 'np.argmax', (['probas'], {}), '(probas)\n', (3349, 3357), True, 'import numpy as np\n'), ((5705, 5756), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (5709, 5756), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5773, 5785), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (5780, 5785), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5859, 5880), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5869, 5880), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6010, 6061), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6014, 6061), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6078, 6090), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6085, 6090), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6143, 6194), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6147, 6194), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6214, 6226), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6221, 6226), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6279, 6330), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6283, 6330), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6350, 6362), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6357, 6362), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6487, 6508), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (6497, 6508), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6624, 6642), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(1.0)'}), '(clipnorm=1.0)\n', (6628, 6642), False, 'from keras.optimizers import Adam\n'), ((3026, 3049), 'os.remove', 'os.remove', (['output_fname'], {}), '(output_fname)\n', (3035, 3049), False, 'import os\n'), ((3165, 3178), 'numpy.log', 'np.log', (['preds'], {}), '(preds)\n', (3171, 3178), True, 'import numpy as np\n'), ((3255, 3272), 'numpy.sum', 'np.sum', (['exp_preds'], {}), '(exp_preds)\n', (3261, 3272), True, 'import numpy as np\n'), ((5818, 5834), 'keras.layers.Dense', 'Dense', (['num_chars'], {}), '(num_chars)\n', (5823, 5834), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6443, 6459), 'keras.layers.Dense', 'Dense', (['num_chars'], {}), '(num_chars)\n', (6448, 6459), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((3113, 3130), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (3123, 3130), True, 'import numpy as np\n')]
|
import os
import os.path as osp
dir_path = osp.join( os.path.expanduser('~'), "space/trt_plugin/build/lib/")
if not osp.exists(dir_path):
if "AMIRSTAN_LIBRARY_PATH" in os.environ:
dir_path = os.environ["AMIRSTAN_LIBRARY_PATH"]
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
|
[
"os.path.realpath",
"os.path.expanduser",
"os.path.exists"
] |
[((53, 76), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (71, 76), False, 'import os\n'), ((117, 137), 'os.path.exists', 'osp.exists', (['dir_path'], {}), '(dir_path)\n', (127, 137), True, 'import os.path as osp\n'), ((285, 311), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (301, 311), False, 'import os\n')]
|
from django.db import models
from django.db.models import Q, Max
import logging
from wouso.core.config.models import IntegerSetting
from wouso.core.game.models import Game
from wouso.core.user.models import Player
from wouso.games.challenge.models import Challenge, ChallengeUser
class GrandChallengeUser(Player):
""" Extension of the user profile for GrandChallenge """
lost = models.IntegerField(default=0)
last_round = models.IntegerField(default=0)
def get_challenges(self):
"""
Return a queryset of grandchallenges for this player
"""
return Challenge.objects.filter(id__in=GrandChallenge.objects.filter(Q(challenge__user_from__user__id=self.id)|Q(challenge__user_to__user__id=self.id)).order_by('round').values('challenge'))
def get_active(self):
"""
Return a list of active GrandChallenges for this user
"""
return self.get_challenges().filter(status='A')
def get_played(self):
"""
Return a list of played GrandChallenges, ordered by round
"""
return self.get_challenges().filter(status__in=('D', 'P'))
def increase_lost(self):
self.lost += 1
self.save()
def set_last_round(self, round_number):
self.last_round = round_number
self.save()
class GrandChallenge(models.Model):
challenge = models.ForeignKey(Challenge, blank=True, null=True)
round = models.IntegerField(blank=True, null=True)
ALL = []
OUT_PLAY = []
CHALLENGES= []
def __oldinit__(self, user_from, user_to):
# TODO: change this constructor to a classmethod
if not GrandChallengeGame.is_final() and not GrandChallengeGame.is_winner():
self.branch = max(user_from.lost, user_to.lost)
else:
self.branch = min(user_from.lost, user_to.lost)
self.user_from = user_from
self.user_to = user_to
self.__class__.ALL.append(self)
self.won, self.lost = None, None
self.active = True
self.round_number = None
challenge_user_to = user_to.user.get_profile().get_extension(ChallengeUser)
challenge_user_from = user_from.user.get_profile().get_extension(ChallengeUser)
chall = Challenge.create(challenge_user_from, challenge_user_to)
chall.accept()
self.challenge_id = chall.id
self.__class__.CHALLENGES.append(chall.id)
@classmethod
def create(cls, user_from, user_to, round):
""" Create a new Challenge and automatically accept it.
"""
grand_challenge = cls.objects.create(round=round)
user_from = user_from.user.get_profile()
user_to = user_to.user.get_profile()
grand_challenge.challenge = Challenge.create(user_from.get_extension(ChallengeUser), user_to.get_extension(ChallengeUser))
grand_challenge.challenge.accept()
grand_challenge.save()
return grand_challenge
@classmethod
def get_challenges(cls):
return cls.ALL
@classmethod
def active(cls):
return filter(lambda c: c.active, cls.ALL)
@classmethod
def all_done(cls):
for i in cls.CHALLENGES:
x = Challenge.objects.get(id = i)
if x.status != "P":
return False
return True
def play(self, round_number):
winner = Challenge.objects.get(id= self.challenge_id).winner #trebuie generat de joc
if winner.user == self.user_from.user:
self.won = self.user_from
self.lost = self.user_to
self.user_to.lost += 1
else:
self.won = self.user_to
self.lost = self.user_from
self.user_from.lost += 1
self.active = False
self.round_number = round_number
@classmethod
def played_with(cls, user):
ret = []
for c in [c for c in cls.ALL if not c.active]:
if c.user_from == user:
ret.append(c.user_to)
elif c.user_to == user:
ret.append(c.user_from)
return ret
@classmethod
def joaca(cls, round_number):
for c in GrandChallenge.active():
#numarul rundei...
c.play(round_number)
if(c.lost.lost == 2):
cls.OUT_PLAY.append(c.lost)
#print c.lost
@classmethod
def clasament(cls):
arb_win = GrandChallengeGame.eligible(0)
arb_lose = GrandChallengeGame.eligible(1)
if(len(arb_win) == 1):
cls.OUT_PLAY.append(arb_win[0])
if(len(arb_lose) == 1):
cls.OUT_PLAY.append(arb_lose[0])
results = cls.OUT_PLAY
results.reverse()
return results
class Round(object):
def __init__(self, round_number):
self.round_number = int(round_number)
def challenges(self):
"""
Return a list of challenges in this round, ordered by status
"""
return [gc.challenge for gc in GrandChallenge.objects.filter(round=self.round_number).order_by('challenge__status')]
def info(self):
"""
Return a dictionary with information about this round
"""
return {}
def participants(self):
ps = set([c.user_from.user for c in self.challenges()] + [c.user_to.user for c in self.challenges()])
ps = map(lambda a: a.get_extension(GrandChallengeUser), ps)
return ps
def rounds(self):
"""
Return a list of previous rounds, as an iterator
"""
if self.round_number > 0:
for i in range(self.round_number):
yield Round(i + 1)
def __repr__(self):
return '<' + 'Round ' + unicode(self.round_number) + '>'
class GrandChallengeGame(Game):
ALL = []
round_number = 0
def __init__(self, *args, **kwargs):
# Set parent's fields
self._meta.get_field('verbose_name').default = "GrandChallenges"
self._meta.get_field('short_name').default = ""
# the url field takes as value only a named url from module's urls.py
self._meta.get_field('url').default = "grandchallenge_index_view"
super(GrandChallengeGame, self).__init__(*args, **kwargs)
@classmethod
def base_query(cls):
return GrandChallengeUser.objects.exclude(user__is_superuser=True).exclude(race__can_play=False)
@classmethod
def is_started(cls):
setting_round = IntegerSetting.get('gc_round')
return setting_round.get_value() > 0
@classmethod
def reset(cls):
"""
Reset a GC game, set every user lost to 0
"""
GrandChallenge.objects.all().delete()
GrandChallengeUser.objects.update(lost=0, last_round=0)
cls.set_current_round(0)
@classmethod
def create_users(cls):
"""
Create GrandChallengeUser extensions for all eligibile players.
"""
for p in Player.objects.exclude(race__can_play=False):
p.get_extension(GrandChallengeUser)
@classmethod
def start(cls):
"""
Create challenges for each consecutive players. Return a list of created challenges.
"""
cls.create_users()
challenges = []
round = 1
last = None
for user in cls.base_query():
u = user.user.get_profile()
if last is None:
last = u
else:
c = GrandChallenge.create(u, last, round)
challenges.append(c)
last = None
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(round)
return challenges
@classmethod
def eligible(cls, lost_count):
""" Return a queryset with players of lost_count
"""
return cls.base_query().filter(lost=lost_count)
@classmethod
def is_final(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 1) and (len(arb_lose) == 1):
return True
return False
@classmethod
def final_round(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
GrandChallenge(arb_win[0], arb_lose[0])
@classmethod
def final_second_round(cls):
GrandChallengeGame.play_round(1)
@classmethod
def is_winner(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 0) and (len(arb_lose) == 2):
return False
return True
@classmethod
def is_finished(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if len(arb_win) == 0 or (len(arb_win) == 1 and len(arb_lose) != 1):
return True
return False
@classmethod
def play_round(cls, lost_count, round_number):
"""
Create new challenges.
"""
if lost_count == 0:
all = GrandChallengeGame.eligible(0)
elif lost_count == 1:
all = GrandChallengeGame.eligible(1)
all = list(all)
challenges = []
while len(all):
u = all[0]
played_with = GrandChallenge.played_with(u)
adversari = [eu for eu in all if ((eu.lost == u.lost) and (eu != u) and ((eu not in played_with) or (eu == all[-1])) )]
if not len(adversari):
break
try:
adversar = adversari[0]
all.remove(adversar)
all.remove(u)
c = GrandChallenge.create(u, adversar, round_number)
challenges.append(c)
except Exception as e:
logging.exception(e)
return challenges
@classmethod
def set_current_round(cls, number):
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(number)
@classmethod
def get_current_round(cls):
setting_round = IntegerSetting.get('gc_round')
round = setting_round.get_value()
if round == 0:
return None
return cls.get_round(round)
@classmethod
def get_round(cls, round):
return Round(round_number=round)
@classmethod
def get_winner(cls):
"""
Return gc winner
"""
if cls.is_finished():
final_gc = GrandChallenge.objects.filter(round=cls.get_current_round().round_number)[0]
return final_gc.challenge.winner.user.get_profile()
return None
@classmethod
def force_round_close(cls, round):
"""
Finish every challenge in the round
"""
for c in round.challenges():
if c.is_runnable():
c.set_expired()
if c.is_draw():
# Temporary hack FIXME
if c.user_from.seconds_took < c.user_to.seconds_took:
c.set_won_by_player(c.user_from.user)
else:
c.set_won_by_player(c.user_to.user)
gc_user_from = c.user_from.user.get_extension(GrandChallengeUser)
gc_user_to = c.user_to.user.get_extension(GrandChallengeUser)
# Upgrade lost count
if c.user_from.user == c.winner:
if gc_user_to.last_round < round.round_number:
gc_user_to.increase_lost()
elif c.user_to.user == c.winner:
if gc_user_from.last_round < round.round_number:
gc_user_from.increase_lost()
gc_user_from.set_last_round(round.round_number)
gc_user_to.set_last_round(round.round_number)
@classmethod
def round_next(cls):
"""
Progress to next round
"""
if cls.is_finished():
logging.error('Grand challenge finished.')
return None
round = cls.get_current_round()
cls.force_round_close(round)
challenges = []
if cls.is_final():
# Only two players left in the game
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
challenges.append(GrandChallenge.create(arb_win[0], arb_lose[0], round.round_number + 1))
else:
# More than two players, create new challenges
if round.round_number % 2 == 1:
challenges += cls.play_round(1, round.round_number + 1)
challenges += cls.play_round(0, round.round_number + 1)
else:
challenges += cls.play_round(1, round.round_number + 1)
if challenges:
# Update round number
round.round_number += 1
cls.set_current_round(round.round_number)
logging.debug('Played round %s' % round.round_number)
return round
|
[
"logging.error",
"wouso.games.challenge.models.Challenge.objects.get",
"logging.debug",
"logging.exception",
"django.db.models.ForeignKey",
"wouso.core.user.models.Player.objects.exclude",
"django.db.models.Q",
"django.db.models.IntegerField",
"wouso.core.config.models.IntegerSetting.get",
"wouso.games.challenge.models.Challenge.create"
] |
[((388, 418), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (407, 418), False, 'from django.db import models\n'), ((436, 466), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (455, 466), False, 'from django.db import models\n'), ((1365, 1416), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Challenge'], {'blank': '(True)', 'null': '(True)'}), '(Challenge, blank=True, null=True)\n', (1382, 1416), False, 'from django.db import models\n'), ((1429, 1471), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1448, 1471), False, 'from django.db import models\n'), ((2242, 2298), 'wouso.games.challenge.models.Challenge.create', 'Challenge.create', (['challenge_user_from', 'challenge_user_to'], {}), '(challenge_user_from, challenge_user_to)\n', (2258, 2298), False, 'from wouso.games.challenge.models import Challenge, ChallengeUser\n'), ((6429, 6459), 'wouso.core.config.models.IntegerSetting.get', 'IntegerSetting.get', (['"""gc_round"""'], {}), "('gc_round')\n", (6447, 6459), False, 'from wouso.core.config.models import IntegerSetting\n'), ((6919, 6963), 'wouso.core.user.models.Player.objects.exclude', 'Player.objects.exclude', ([], {'race__can_play': '(False)'}), '(race__can_play=False)\n', (6941, 6963), False, 'from wouso.core.user.models import Player\n'), ((7556, 7586), 'wouso.core.config.models.IntegerSetting.get', 'IntegerSetting.get', (['"""gc_round"""'], {}), "('gc_round')\n", (7574, 7586), False, 'from wouso.core.config.models import IntegerSetting\n'), ((9779, 9809), 'wouso.core.config.models.IntegerSetting.get', 'IntegerSetting.get', (['"""gc_round"""'], {}), "('gc_round')\n", (9797, 9809), False, 'from wouso.core.config.models import IntegerSetting\n'), ((9924, 9954), 'wouso.core.config.models.IntegerSetting.get', 'IntegerSetting.get', (['"""gc_round"""'], {}), "('gc_round')\n", (9942, 9954), False, 'from wouso.core.config.models import IntegerSetting\n'), ((12669, 12722), 'logging.debug', 'logging.debug', (["('Played round %s' % round.round_number)"], {}), "('Played round %s' % round.round_number)\n", (12682, 12722), False, 'import logging\n'), ((3190, 3217), 'wouso.games.challenge.models.Challenge.objects.get', 'Challenge.objects.get', ([], {'id': 'i'}), '(id=i)\n', (3211, 3217), False, 'from wouso.games.challenge.models import Challenge, ChallengeUser\n'), ((3353, 3396), 'wouso.games.challenge.models.Challenge.objects.get', 'Challenge.objects.get', ([], {'id': 'self.challenge_id'}), '(id=self.challenge_id)\n', (3374, 3396), False, 'from wouso.games.challenge.models import Challenge, ChallengeUser\n'), ((11737, 11779), 'logging.error', 'logging.error', (['"""Grand challenge finished."""'], {}), "('Grand challenge finished.')\n", (11750, 11779), False, 'import logging\n'), ((9650, 9670), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (9667, 9670), False, 'import logging\n'), ((660, 701), 'django.db.models.Q', 'Q', ([], {'challenge__user_from__user__id': 'self.id'}), '(challenge__user_from__user__id=self.id)\n', (661, 701), False, 'from django.db.models import Q, Max\n'), ((702, 741), 'django.db.models.Q', 'Q', ([], {'challenge__user_to__user__id': 'self.id'}), '(challenge__user_to__user__id=self.id)\n', (703, 741), False, 'from django.db.models import Q, Max\n')]
|
"""
Cisco Intersight Universal API Calls Module, v1.1
Author: <NAME>
Contact: <EMAIL>, <EMAIL>
Summary: The Cisco Intersight Universal API Calls module provides
a set of functions that simplify creation, retrieval,
modification, and deletion of resources on Cisco Intersight.
"""
# Import needed Python modules
import sys
import json
import requests
import os
import intersight
from intersight.intersight_api_client import IntersightApiClient
# MODULE REQUIREMENT 1
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example: key_id = "<KEY>"
"""
key_id = ""
# MODULE REQUIREMENT 2
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt" file.
Here is an example: key = "C:\Keys\Key1\SecretKey.txt"
"""
key = ""
# Define Intersight SDK IntersightApiClient variables
# Tested on Cisco Intersight API Reference v1.0.9-853
base_url = "https://intersight.com/api/v1"
api_instance = IntersightApiClient(host=base_url,private_key=key,api_key_id=key_id)
# Establish Intersight Universal Functions
def iu_get(api_path):
"""This is a function to perform a universal or generic GET on objects under available Intersight API types,
including those not yet defined in the Intersight SDK for Python. An argument for the API type path is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
Returns:
A dictionary containing all objects of the specified API type. If the API type is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The API resource path '" + api_path + "' has been accessed successfully.\n")
return results
except:
print("Unable to access the API resource path '" + api_path + "'.\n")
def iu_get_moid(api_path,moid):
"""This is a function to perform a universal or generic GET on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A dictionary containing all parameters of the specified API object. If the API object is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The object located at the resource path '" + full_resource_path + "' has been accessed succesfully.\n")
return results
except:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
def iu_delete_moid(api_path,moid):
"""This is a function to perform a universal or generic DELETE on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A statement indicating whether the DELETE method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"DELETE")
print("The deletion of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The DELETE method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The DELETE method failed."
def iu_post(api_path,body):
"""This is a function to perform a universal or generic POST of an object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
body: The content to be created under the targeted API type. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The creation of the object under the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to create the object under the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_post_moid(api_path,moid,body):
"""This is a function to perform a universal or generic POST of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_patch_moid(api_path,moid,body):
"""This is a function to perform a universal or generic PATCH of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the PATCH method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"PATCH",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The PATCH method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The PATCH method failed."
# Verify API key variables have been set
key_id_setting = key_id.strip()
if key_id_setting is None or len(key_id_setting) is 0 or "/" not in key_id_setting:
print("\nThe key_id variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key_id variable \nwith the ID of your API key in order for the module to work properly.")
key_setting = key.strip()
if key_setting is None or len(key_setting) is 0 or not os.path.isfile(key_setting):
print("\nThe key variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key variable \nwith your system's path to your API key SecretKey.txt file in order for the module to work properly.")
|
[
"intersight.intersight_api_client.IntersightApiClient",
"os.path.isfile",
"json.loads"
] |
[((1112, 1182), 'intersight.intersight_api_client.IntersightApiClient', 'IntersightApiClient', ([], {'host': 'base_url', 'private_key': 'key', 'api_key_id': 'key_id'}), '(host=base_url, private_key=key, api_key_id=key_id)\n', (1131, 1182), False, 'from intersight.intersight_api_client import IntersightApiClient\n'), ((2142, 2162), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (2152, 2162), False, 'import json\n'), ((3419, 3439), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (3429, 3439), False, 'import json\n'), ((10275, 10302), 'os.path.isfile', 'os.path.isfile', (['key_setting'], {}), '(key_setting)\n', (10289, 10302), False, 'import os\n')]
|
#!/usr/bin/python2
"""worker to read presence records from redis and write to mongodb"""
from pymongo import MongoClient
import syslog
import redis
import json
import time
import datetime
from argparse import ArgumentParser
#from riemann import RiemannClient, RiemannUDPTransport
#rmmonitor = RiemannClient(transport = RiemannUDPTransport,
#host=config.riemann['host'])
#ok_response = {'status': 'ok'}
INQNAME = "oemap_db_worker_in_queue"
REPLYTO = "oemap_www_nodejs_in_queue"
class DbWorker():
def __init__ (self):
parser = ArgumentParser()
parser.add_argument('-n', '--job', dest='job', action='store',
help='worker instance id')
self.args = parser.parse_args()
self.rhost = "127.0.0.1"
self.rport = 6379
self.starttime = datetime.datetime.now()
self.statc = 0
self.stati = 0
self.database = None
def stats(self):
self.statc = self.statc + 1
self.stati = self.stati + 1
if self.stati == 10000:
now = datetime.datetime.now()
dur = now - self.starttime
rate = ''
if dur.seconds > 0:
rate = str(self.stati / dur.seconds) + " per second"
else:
rate = "1000+ per second"
self.log_notice("processed %s records. rate was %s." %
(self.statc, rate))
self.stati = 0
self.starttime = now
FIVE_MIN_IN_SECS = 60 * 5
ONE_HOUR_IN_SECS = 60 * 60
ONE_DAY_IN_SECS = ONE_HOUR_IN_SECS * 24
def setExpireTime(self, rec):
now = datetime.datetime.now()
ttl = rec['ttl']
if ttl == 1:
rec['exp_time'] = now + datetime.timedelta(0, FIVE_MIN_IN_SECS)
elif ttl == 2:
rec['exp_time'] = now + datetime.timedelta(0, ONE_HOUR_IN_SECS)
elif ttl == 3:
rec['exp_time'] = now + datetime.timedelta(0, ONE_DAY_IN_SECS)
else:
rec['exp_time'] = now # ready for sweeper
def run (self):
while True:
try:
self.log_notice('%s Python impl starting queue %s' % ("test", INQNAME))
rdis = redis.Redis(host=self.rhost, port=self.rport)
client = MongoClient()
self.database = client.oemap_test
while True:
(_, msg) = rdis.brpop(keys=[INQNAME], timeout=600)
if msg == None:
continue
rec = json.loads(msg)
self.log_debug("updating %s for %s" % (rec['_id'],
rec['label']))
self.setExpireTime(rec)
self.database.presences.save(rec)
self.stats()
except Exception:
self.handle_exception()
time.sleep(1)
except: # catch *all* exceptions
self.handle_exception()
time.sleep(1)
def log_debug (self, msg):
syslog.syslog(syslog.LOG_DEBUG, "%s %s" % (self.args.job, msg))
def log_notice (self, msg):
syslog.syslog(syslog.LOG_NOTICE, "%s %s" % (self.args.job, msg))
def log_error (self, msg):
syslog.syslog(syslog.LOG_ERR, "%s %s" % (self.args.job, msg))
def handle_exception(self):
import traceback
formatted_lines = traceback.format_exc().splitlines()
for line in formatted_lines:
self.log_error(line)
if __name__ == "__main__":
DbWorker().run()
|
[
"redis.Redis",
"pymongo.MongoClient",
"argparse.ArgumentParser",
"syslog.syslog",
"json.loads",
"time.sleep",
"datetime.timedelta",
"traceback.format_exc",
"datetime.datetime.now"
] |
[((549, 565), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (563, 565), False, 'from argparse import ArgumentParser\n'), ((805, 828), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (826, 828), False, 'import datetime\n'), ((1628, 1651), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1649, 1651), False, 'import datetime\n'), ((3183, 3246), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_DEBUG', "('%s %s' % (self.args.job, msg))"], {}), "(syslog.LOG_DEBUG, '%s %s' % (self.args.job, msg))\n", (3196, 3246), False, 'import syslog\n'), ((3288, 3352), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_NOTICE', "('%s %s' % (self.args.job, msg))"], {}), "(syslog.LOG_NOTICE, '%s %s' % (self.args.job, msg))\n", (3301, 3352), False, 'import syslog\n'), ((3393, 3454), 'syslog.syslog', 'syslog.syslog', (['syslog.LOG_ERR', "('%s %s' % (self.args.job, msg))"], {}), "(syslog.LOG_ERR, '%s %s' % (self.args.job, msg))\n", (3406, 3454), False, 'import syslog\n'), ((1056, 1079), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1077, 1079), False, 'import datetime\n'), ((1734, 1773), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'FIVE_MIN_IN_SECS'], {}), '(0, FIVE_MIN_IN_SECS)\n', (1752, 1773), False, 'import datetime\n'), ((2215, 2260), 'redis.Redis', 'redis.Redis', ([], {'host': 'self.rhost', 'port': 'self.rport'}), '(host=self.rhost, port=self.rport)\n', (2226, 2260), False, 'import redis\n'), ((2286, 2299), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (2297, 2299), False, 'from pymongo import MongoClient\n'), ((3539, 3561), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3559, 3561), False, 'import traceback\n'), ((1833, 1872), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'ONE_HOUR_IN_SECS'], {}), '(0, ONE_HOUR_IN_SECS)\n', (1851, 1872), False, 'import datetime\n'), ((2581, 2596), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (2591, 2596), False, 'import json\n'), ((3010, 3023), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3020, 3023), False, 'import time\n'), ((3125, 3138), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3135, 3138), False, 'import time\n'), ((1932, 1970), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'ONE_DAY_IN_SECS'], {}), '(0, ONE_DAY_IN_SECS)\n', (1950, 1970), False, 'import datetime\n')]
|
"""Cryptography module.
Encrypt and decrypt user's Bitcoin WIFs.
"""
import rncryptor
from base64 import b64encode, b64decode
def encrypt(wif, password):
return b64encode(rncryptor.RNCryptor().encrypt(data=wif, password=password)).decode('utf-8')
def decrypt(enc_wif, password):
return rncryptor.RNCryptor().decrypt(b64decode(enc_wif.encode('utf-8')), password=password)
|
[
"rncryptor.RNCryptor"
] |
[((299, 320), 'rncryptor.RNCryptor', 'rncryptor.RNCryptor', ([], {}), '()\n', (318, 320), False, 'import rncryptor\n'), ((178, 199), 'rncryptor.RNCryptor', 'rncryptor.RNCryptor', ([], {}), '()\n', (197, 199), False, 'import rncryptor\n')]
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import itertools
import numpy as np
import pandas as pd
import xgboost as xgb
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, train_test_split
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
def eval_models(models, data):
"""Calculates the root mean squared error (RMSE) and the coefficient of
determination (R^2) for each of the models.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:return: Returns a DataFrame containing the evaluation metric results
:rtype: pandas.DataFrame
"""
evals = []
for target_col, reg in models.items():
y_hat = reg.predict(data['X_test'])
y = data['y_test'][target_col]
rmse = metrics.mean_squared_error(y, y_hat, squared=False)
r2 = metrics.r2_score(y, y_hat)
eval_dict = {'Error': target_col, 'RMSE': rmse, 'R^2': r2}
evals.append(eval_dict)
return pd.DataFrame(evals)
def plot_feat_impts(models, data):
"""Plots the feature importances for each of the error models.
For use in an interactive jupyter session.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
"""
feat_names = data['X_train'].columns
fig, axs = plt.subplots(2, 3, figsize=(10, 10))
for (target_col, model), ax in zip(models.items(), axs.flat):
feat_imp = pd.Series(model.feature_importances_, index=feat_names)
feat_imp.sort_values(ascending=False, inplace=True)
feat_imp.plot(kind='bar', ax=ax, title=target_col)
plt.ylabel('Feature Importance Score')
plt.tight_layout()
def get_state_vect_cols(prefix):
"""Get the column names of the state vector components with the
provided `prefix`.
:param prefix: The prefix that is used in front of the state vector
components in the column names, examples are `physics_pred` and
`physics_err`
:type prefix: str
:return: A list of the 6 names of the prefixed state vector components
:rtype: [str]
"""
vectors = ['r', 'v']
components = ['x', 'y', 'z']
col_names = [f'{prefix}_{v}_{c}'
for v, c
in itertools.product(vectors, components)]
return col_names
def load_models(models_dir):
"""Loads previously trained XGBoost models from the `models_dir`
:param models_dir: The path to where the serialized XGBoost JSON files are
:type models_dir: str
:return: A list of the loaded XGBoost models
:rtype: [xgboost.XGBRegressor]
"""
ml_models = []
model_names = get_state_vect_cols('physics_err')
for mn in model_names:
model = xgb.XGBRegressor()
model_path = os.path.join(models_dir, f'{mn}.json')
model.load_model(model_path)
ml_models.append(model)
return ml_models
def save_models(models, models_dir):
"""Saves the error estimations models as JSON representations.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param models_dir: The path to save the serialized XGBoost JSON files to
:type models_dir: str
"""
for model_name, err_model in models.items():
file_name = f'{model_name}.json'
file_path = os.path.join(models_dir, file_name)
err_model.save_model(file_path)
def predict_err(models, physics_preds):
"""Uses the provide ML models to predict the error in the physics
model orbit prediction.
:param ml_models: The ML models to use to estimate the error in each
of the predicted state vector components.
:type ml_models: [xgboost.XGBRegressor]
:param physcis_preds: The elapsed time in seconds and the predicted
state vectors to estimate the errors for
:type physcis_preds: numpy.array
:return: The estimated errors
:rtype: numpy.array
"""
# Each model predicts the error for its respective state vector component
err_preds = [m.predict(physics_preds) for m in models]
# Orient the error estimates as column vectors
err_preds = np.stack(err_preds, axis=1)
return err_preds
def build_train_test_sets(df, test_size=0.2):
"""Builds training and testing sets from the provided DataFrame.
:param df: The DataFrame to use to build training and test sets from
:type df: pandas.DataFrame
:param test_size: The percentage size of the DataFrame that should be used
to build the test set
:type test_size: float
:return: A dictionary containing the feature and target training/test sets
:rtype: dict[str, pandas.DataFrame]
"""
# Features are the physics predicted state vectors and the amount of
# time in seconds into the future the prediction was made
feature_cols = ['elapsed_seconds'] + get_state_vect_cols('physics_pred')
# The target values are the errors between the physical model predictions
# and the ground truth observations
target_cols = get_state_vect_cols('physics_err')
# Create feature and target matrices
X = df[feature_cols]
y = df[target_cols]
# Split feature and target data into training and test sets
data_keys = ['X_train', 'X_test', 'y_train', 'y_test']
data_vals = train_test_split(X, y, test_size=test_size)
train_test_data = dict(zip(data_keys, data_vals))
return train_test_data
def train_models(data, params={}, eval_metric='rmse'):
"""Trains gradient boosted regression tree models to estimate the error in
each of the six state vector components in the physical model prediction
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:param params: A dictionary of parameters to pass to the XGBRegressor
constructor
:type params: dict
:param eval_metric: The loss function to use in model training
:type eval_metric: str
:return: Dictionary containing the trained models for each state vector
component
:rtype: {str: xgboost.XGBRegressor}
"""
default_params = {
'booster': 'gbtree',
'tree_method': 'gpu_hist',
'gpu_id': 0
}
default_params.update(params)
X, ys = data['X_train'], data['y_train']
models = {}
for target_col in ys.columns:
y = ys[target_col]
reg = xgb.XGBRegressor(**default_params)
reg.fit(X, y, eval_metric=eval_metric)
models[target_col] = reg
return models
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=('Train baseline XGBoost models to estimate physical '
'prediction error'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input_path',
help=('The path to the parquet file containing the physical model '
'prediction training data'),
type=str,
required=True
)
parser.add_argument(
'--use_gpu',
help='Use a GPU in model training',
action='store_true'
)
parser.add_argument(
'--out_dir',
help=('The directory to serialize the models to'),
type=str,
required=True
)
args = parser.parse_args()
logger.info('Loading physical model orbit prediction training data...')
physics_pred_df = pd.read_parquet(args.input_path)
logger.info('Building training and test sets...')
train_test_data = build_train_test_sets(physics_pred_df)
if args.use_gpu:
params = {}
else:
params = {'tree_method': 'hist'}
logger.info('Training Error Models...')
err_models = train_models(train_test_data, params=params)
logger.info(eval_models(err_models, train_test_data))
logger.info('Serializing Error Models...')
save_models(err_models, args.out_dir)
|
[
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"numpy.stack",
"argparse.ArgumentParser",
"os.path.join",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.r2_score",
"os.environ.get",
"pandas.read_parquet",
"pandas.Series",
"xgboost.XGBRegressor",
"matplotlib.pyplot.ylabel",
"itertools.product",
"matplotlib.pyplot.subplots",
"sklearn.metrics.mean_squared_error",
"logging.getLogger"
] |
[((901, 928), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (918, 928), False, 'import logging\n'), ((1798, 1817), 'pandas.DataFrame', 'pd.DataFrame', (['evals'], {}), '(evals)\n', (1810, 1817), True, 'import pandas as pd\n'), ((2275, 2311), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(10, 10)'}), '(2, 3, figsize=(10, 10))\n', (2287, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2614), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature Importance Score"""'], {}), "('Feature Importance Score')\n", (2586, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2637), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2635, 2637), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5145), 'numpy.stack', 'np.stack', (['err_preds'], {'axis': '(1)'}), '(err_preds, axis=1)\n', (5126, 5145), True, 'import numpy as np\n'), ((6266, 6309), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size'}), '(X, y, test_size=test_size)\n', (6282, 6309), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((7524, 7695), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train baseline XGBoost models to estimate physical prediction error"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Train baseline XGBoost models to estimate physical prediction error',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (7547, 7695), False, 'import argparse\n'), ((8354, 8386), 'pandas.read_parquet', 'pd.read_parquet', (['args.input_path'], {}), '(args.input_path)\n', (8369, 8386), True, 'import pandas as pd\n'), ((856, 890), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (870, 890), False, 'import os\n'), ((1596, 1647), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y', 'y_hat'], {'squared': '(False)'}), '(y, y_hat, squared=False)\n', (1622, 1647), True, 'import sklearn.metrics as metrics\n'), ((1661, 1687), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1677, 1687), True, 'import sklearn.metrics as metrics\n'), ((2397, 2452), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'feat_names'}), '(model.feature_importances_, index=feat_names)\n', (2406, 2452), True, 'import pandas as pd\n'), ((3671, 3689), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (3687, 3689), True, 'import xgboost as xgb\n'), ((3711, 3749), 'os.path.join', 'os.path.join', (['models_dir', 'f"""{mn}.json"""'], {}), "(models_dir, f'{mn}.json')\n", (3723, 3749), False, 'import os\n'), ((4304, 4339), 'os.path.join', 'os.path.join', (['models_dir', 'file_name'], {}), '(models_dir, file_name)\n', (4316, 4339), False, 'import os\n'), ((7349, 7383), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '(**default_params)\n', (7365, 7383), True, 'import xgboost as xgb\n'), ((3196, 3234), 'itertools.product', 'itertools.product', (['vectors', 'components'], {}), '(vectors, components)\n', (3213, 3234), False, 'import itertools\n')]
|
#!/usr/bin/env python
# coding:utf-8
# Code by : <NAME>
# E-mail : <EMAIL>
"""
#set:usage.py,examples.py,changelog.txt
##################################################################
# USAGE :
#s
from imgtoascii import imgtoascii
imgtoascii("<IMAGE_PATH>","<OPTION>").view()
#e
##################################################################
# EXAMPLES :
#s
from imgtoascii import imgtoascii
# Example:1
imgtoascii("test.png").view()
# Example:2
p1=imgtoascii("test.png",False).view()
for i in range(len(p1)):
print(p1[i])
#e
##################################################################
# CHANGELOG :
#s
## 0.0.2
- Fix Bugs.
## 0.0.1
- First public release.
#e
##################################################################
"""
# VALUES :
__version__="0.0.2"
__name__="imgtoascii"
__author__="<NAME> (<NAME>)"
__author_email__="<EMAIL>"
__github_user_name__="yasserbdj96"
__title__="image to ascii."
__description__="Convert images to ascii."
__author_website__=f"https://{__github_user_name__}.github.io/"
__source_code__=f"https://github.com/{__github_user_name__}/{__name__}"
__keywords__=[__github_user_name__,'python']
__keywords__.extend(__title__.split(" "))
__keywords__.extend(__description__.split(" "))
__install_requires__=['pipincluder']
__Installation__="pip install "+__name__+"=="+__version__
__license__='MIT License'
__copyright__='Copyright © 2008->Present, '+__author__+"."
__license_text__=f'''MIT License
{__copyright__}
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
You also agree that if you become very rich you will give me 1% of your wealth.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
##################################################################
#s
from pipincluder import pipincluder
#import pakages by pipincluder:
exec(pipincluder("from PIL import Image",
"from hexor import hexor").modules())
#start imgtoascii class:
class imgtoascii:
#__init__:
def __init__(self,img,oki=True):
self.oki=oki
art="██"
im=Image.open(img)
width,height=im.size
pixels=list(im.getdata())
allpi=[]
linepi=[]
k=1
p1=hexor(True,"rgb")
for i in range(len(pixels)):
if k<width:
linepi.append(p1.c(art,f"{pixels[i][0]},{pixels[i][1]},{pixels[i][2]}",f"{pixels[i][0]},{pixels[i][1]},{pixels[i][2]}"))
elif k==width:
allpi.append(linepi)
linepi=[]
k=0
k+=1
self.allpi=allpi
#view:
def view(self):
allart=self.allpi
image_art=[]
for i in range(len(allart)):
line=""
for j in range(len(allart[i])):
line=line+allart[i][j]
image_art.append(line)
if self.oki==True:
for i in range(len(image_art)):
print(image_art[i])
else:
return image_art
#e
|
[
"pipincluder.pipincluder"
] |
[((2805, 2868), 'pipincluder.pipincluder', 'pipincluder', (['"""from PIL import Image"""', '"""from hexor import hexor"""'], {}), "('from PIL import Image', 'from hexor import hexor')\n", (2816, 2868), False, 'from pipincluder import pipincluder\n')]
|
###############################################################################
# Copyright (C) Intel Corporation
#
# SPDX-License-Identifier: MIT
###############################################################################
"""Build oneVPL-cpu ffmpeg dependencies"""
from io import BytesIO
import sys
import os
import posixpath
import argparse
import subprocess
import shutil
import time
import multiprocessing
import urllib.request
import zipfile
import ssl
from pathlib import Path
from os import environ
from contextlib import contextmanager
# Component Versions
SVT_HEVC_VERSION = '1.5.1'
SVT_AV1_VERSION = 'v0.8.6' # v0.8.7 is missing AVC support
DAV1D_VERSION = '0.9.0'
X264_VERSION = 'stable'
FFMPEG_VERSION = 'n4.4'
# Folder this script is in
SCRIPT_PATH = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Number of CPU cores to try to use in parallel
CPU_COUNT = multiprocessing.cpu_count()
# Flag indicating if verbose (debug) loging should be output
VERBOSE = 'VERBOSE' in os.environ
if VERBOSE:
if os.environ['VERBOSE'] not in ['', '-']:
# pylint: disable=consider-using-with
VERBOSE_FILE = open(os.environ['VERBOSE'], 'w')
else:
VERBOSE_FILE = sys.stdout
if os.name == 'nt':
VERBOSE_CMD = '::'
VERBOSE_CMT = '@REM'
else:
VERBOSE_CMD = '# $'
VERBOSE_CMT = '#'
# Optional dictionary with environment options for Git
# mostly used to set an alternate PATH
GIT_ENV = None
# indicate if we prefer to clone, or to download archives
PREFER_CLONE = False
def _escape_cmd_arg(arg):
"""quote/escape and argument for a command line call so that it can
be safely used even if it has special charaters"""
arg = str(arg)
if ' ' in arg or '"' in arg:
return '"' + arg.replace('"', '""') + '"'
return arg
def log(message):
"""Log activity"""
if VERBOSE:
VERBOSE_FILE.write(f"{VERBOSE_CMD} {message}\n")
VERBOSE_FILE.flush()
def log_comment(message):
"""Log a comment"""
if VERBOSE:
VERBOSE_FILE.write(f"{VERBOSE_CMT} {message}\n")
VERBOSE_FILE.flush()
def to_posix_path(path):
"""convert path to posix
On Windows this includes adjusting it based on MinGW drive naming
"""
if os.name != 'nt':
return path
if not path:
return path
parts = path.split('\\')
if len(parts[0]) == 2 and parts[0].endswith(":"):
parts[0] = "/" + parts[0][:-1].lower()
return posixpath.join(*parts)
def set_env(name, value):
"""Set environment variable"""
if os.name == 'nt':
log(f'set {name}={value}')
else:
log(f'export {name}="{value}"')
os.environ[name] = value
def replace(target, old_str, new_str):
"""replace text in a file"""
log_comment(f'replace "{old_str}" with "{new_str}" in {target}')
if os.name == 'nt':
log(f'powershell -Command "(gc {target}) -replace \'{old_str}\', \'{new_str}\' '
+ f'| Out-File -encoding utf8 {target}"')
else:
log(f'sed -i \'s/{old_str}/{new_str}/\' {target}')
with open(target, "r") as file_obj:
content = file_obj.read()
content = content.replace(old_str, new_str)
with open(target, "w") as file_obj:
file_obj.write(content)
@contextmanager
def pushd(*dst):
"""change working directory"""
cur_dir = os.getcwd()
dest = os.path.join(cur_dir, *dst)
os.chdir(dest)
log(f'pushd {dest}')
try:
yield
finally:
log('popd')
log_comment(f' -> {cur_dir}')
os.chdir(cur_dir)
#pylint: disable=invalid-name
def rm(target):
"""delete a file or folder"""
if os.path.exists(target):
# Delete sometimes fails if done immediately, timeout
# is not great, but allows filesystem settings to stabilize.
timeout = time.time() + 10
while time.time() < timeout:
try:
if os.path.isfile(target):
if os.name == 'nt':
log(f'del {target}')
else:
log(f'rm {target}')
os.remove(target)
break
if os.path.isdir(target):
if os.name == 'nt':
log(f'rd /s /q {target}')
else:
log(f'rm -rf {target}')
shutil.rmtree(target)
break
except PermissionError:
time.sleep(1)
def mkdir(target):
"""make a folder"""
if target and not os.path.exists(target):
if os.name == 'nt':
log(f'md {target}')
else:
log(f'mkdir -p {target}')
os.makedirs(target)
# Rarely there is a bit of async delay in filesystem changes.
# If a user script deleted this folder just before running this
# script we may need to wait a moment to see the folder created.
if not os.path.exists(target):
time.sleep(2)
def join_command(command):
"""Join a series or parameters into a command, escaping if needed"""
return ' '.join([_escape_cmd_arg(argument) for argument in command])
def cmd(*args, shell=None, no_throw=False, env=None, xenv=None):
"""Run a command"""
if len(args) == 1:
command = args[0]
else:
command = join_command(args)
if env is not None:
log_comment('Using custom environment for next command')
if xenv is not None:
if env is None:
env = os.environ.copy()
env.update(xenv)
for name in xenv:
log_comment(f'Using "{name}={xenv[name]}" for next command')
exec_cmd = command
if shell is None and os.name != 'nt':
shell = 'bash'
if shell == 'bash':
if os.name == 'nt':
# In Windows bash is unexpected so we will record using it
# as part of the verbose log
command = f"bash -c '{command}'"
exec_cmd = command
else:
# outside Windows we explicitly use bash, but we don't need
# to worry about letting people know we are using it.
exec_cmd = f"exec bash -c '{command}'"
log(f'{command}')
with subprocess.Popen(exec_cmd, shell=True, env=env) as proc:
proc.communicate()
if not no_throw and proc.returncode != 0:
raise Exception(f"Error running command: {command}")
return proc.returncode
def capture_cmd(*args, shell=None, log_errors=True, env=None, xenv=None):
"""Run a command and capture the output"""
if len(args) == 1:
command = args[0]
else:
command = join_command(args)
if env is not None:
log_comment('Using custom environment for next command')
if xenv is not None:
if env is None:
env = os.environ.copy()
env.update(xenv)
for name in xenv:
log_comment(f'Using "{name}={xenv[name]}" for next command')
exec_cmd = command
if shell is None and os.name != 'nt':
shell = 'bash'
if shell == 'bash':
if os.name == 'nt':
# In Windows bash is unexpected so we will record using it
# as part of the verbose log
command = f"bash -c '{command}'"
exec_cmd = command
else:
# outside Windows we explicitly use bash, but we don't need
# to worry about letting people know we are using it.
exec_cmd = f"exec bash -c '{command}'"
log(f'{command}')
with subprocess.Popen(exec_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True,
env=env) as proc:
result = proc.communicate()
if log_errors and result[1]:
sys.stderr.write(result[1])
return (result[0], result[1], proc.returncode)
class ZipFileWithPermissions(zipfile.ZipFile):
"""ZipFile class that handles file permissions."""
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath, preserving permissions.
"""
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
targetpath = super()._extract_member(member, targetpath, pwd)
attr = member.external_attr >> 16
if attr != 0:
os.chmod(targetpath, attr)
return targetpath
def download_archive(url, path):
"""download an archive and unpack it to a folder"""
if not os.path.exists(path):
mkdir(path)
log_comment(f"Downloading {url}")
# bypassing ssl because we keep running into cases where certs have expired.
#pylint: disable=protected-access
context = ssl._create_unverified_context()
with urllib.request.urlopen(url, context=context) as webstream:
with ZipFileWithPermissions(BytesIO(webstream.read())) as archfileobj:
log_comment(f"Extracting {url} to {path} as zip file")
archfileobj.extractall(path)
def is_repo_root(path):
"""check if path is the root of a git working copy"""
output, _, result = capture_cmd('git',
'rev-parse',
"--git-dir",
xenv=GIT_ENV)
log(result)
log(output)
return (result != 0) and os.path.samefile(os.path.join(output, ".."), path)
def main():
"""Main steps to build ffmpeg and dependencies"""
proj_dir = str(Path(os.path.dirname(os.path.realpath(sys.argv[0]))).parent)
parser = argparse.ArgumentParser(prog="bootstrap")
parser.add_argument("--config",
'-m',
"--build_mode",
dest='build_mode',
choices=['Release', 'Debug'],
default='Release',
help='Build mode/configuration')
parser.add_argument('-gpl',
"--use_gpl",
"--gpl",
dest='use_gpl',
action="store_true",
help='Use GPL codecs (ex: x264)')
parser.add_argument(
'-A',
"--arch",
dest='arch',
choices=['x86_64', 'x86_32'] if os.name == 'nt' else ['x86_64'],
default='x86_64',
help='Target Architecture')
parser.add_argument(
'--clean',
'-clean',
dest='clean',
action="store_true",
help='Remove previous build/install dirs before starting')
parser.add_argument('--validation',
dest='validation',
action="store_true",
help='Build validation binaries')
# Unused argument for compatibility
parser.add_argument('--bootstrap',
dest='bootstrap',
action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
bootstrap(args.clean, args.use_gpl, args.build_mode, proj_dir, args.arch,
args.validation)
def make_mingw_path(arch):
"""Create PATH setting for MinGW"""
fallback_msys_root = os.path.join('C:\\', 'tools', 'msys64')
if 'MSYS_ROOT' in os.environ:
msys_root = os.environ['MSYS_ROOT']
print(f'MSYS_ROOT found: {msys_root}', file=sys.stderr)
elif os.path.exists(fallback_msys_root):
msys_root = fallback_msys_root
print(f'MSYS_ROOT not found using msys at: {msys_root}',
file=sys.stderr)
else:
raise 'MSys not found'
msys_usr_path = os.path.join(msys_root, 'usr')
msys_usr_bin_path = os.path.join(msys_usr_path, 'bin')
win_path = os.path.join('C:\\', 'Windows')
win_sys_path = os.path.join(win_path, 'System32')
mingw_path = []
if arch == 'x86_32':
mingw_path.append(os.path.join(msys_root, 'mingw32', 'bin'))
mingw_path.append(
os.path.join(msys_root, 'mingw32', 'i686-w64-mingw32', 'bin'))
mingw_path.append(os.path.join(msys_root, 'mingw64', 'bin'))
mingw_path.extend([
os.path.join(msys_usr_path, 'local', 'bin'),
msys_usr_bin_path,
os.path.join(msys_root, 'bin'),
win_sys_path,
win_path,
os.path.join(win_sys_path, 'Wbem'),
os.path.join(win_sys_path, 'WindowsPowerShell', 'v1.0'),
os.path.join(msys_usr_bin_path, 'site_perl'),
os.path.join(msys_usr_bin_path, 'vendor_perl'),
os.path.join(msys_usr_bin_path, 'core_perl'),
])
return os.pathsep.join(mingw_path)
def make_git_path(mingw_path):
"""Create PATH setting for Git"""
git_path = os.environ['PATH']
# MSYS git does not play with other gits, so use users version if present
git_location = shutil.which('git')
if git_location is None:
git_path = mingw_path
return git_path
#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def bootstrap(clean, use_gpl, build_mode, proj_dir, arch, validation):
"""Bootstrap install"""
if os.name == 'nt':
#pylint: disable=global-statement
global GIT_ENV
mingw_path = make_mingw_path(arch)
GIT_ENV = {'PATH': make_git_path(mingw_path)}
# Don't update PATH with MinGW until we have figured out Git path
set_env('PATH', mingw_path)
build_dir = os.path.join(proj_dir, '_extbuild')
if "VPL_CPU_DEPS_BUILD_DIR" in os.environ:
build_dir = environ.get("VPL_CPU_DEPS_BUILD_DIR")
else:
set_env('VPL_CPU_DEPS_BUILD_DIR', build_dir)
install_dir = os.path.join(proj_dir, '_deps')
if "VPL_BUILD_DEPENDENCIES" in os.environ:
install_dir = environ.get("VPL_BUILD_DEPENDENCIES")
else:
set_env('VPL_BUILD_DEPENDENCIES', install_dir)
pkg_config_path = [os.path.join(install_dir, "lib", "pkgconfig")]
if 'PKG_CONFIG_PATH' in os.environ:
pkg_config_path.append(os.environ['PKG_CONFIG_PATH'])
set_env('PKG_CONFIG_PATH', os.pathsep.join(pkg_config_path))
if clean:
rm(build_dir)
rm(install_dir)
mkdir(build_dir)
mkdir(install_dir)
with pushd(build_dir):
#build dependencies
# build_aom_av1_decoder(install_dir)
if arch == 'x86_64':
if use_gpl:
build_gpl_x264_encoder(install_dir)
build_dav1d_decoder(install_dir)
build_svt_av1_encoder(install_dir, build_mode)
build_svt_hevc_encoder(install_dir, build_mode)
#prepare ffmpeg build
version = FFMPEG_VERSION
if os.path.exists(f'FFmpeg-{version}'):
print("using existing ffmpeg dir")
else:
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://github.com/FFmpeg/FFmpeg',
f'FFmpeg-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://github.com/FFmpeg/FFmpeg/archive/refs/tags/{version}.zip",
".")
with pushd(f'FFmpeg-{version}'):
if not is_repo_root("."):
# make this folder a git repo so we can use "git am" to apply patches
cmd('git', 'init', xenv=GIT_ENV)
cmd('git', 'add', '.', xenv=GIT_ENV)
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'commit',
'-m',
'Import',
xenv=GIT_ENV)
patch_path = os.path.join(SCRIPT_PATH, 'patches', 'ffmpeg')
if os.path.exists(patch_path):
for patch in os.scandir(patch_path):
if patch.is_file():
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'am',
patch.path,
xenv=GIT_ENV)
configure_opts = []
configure_opts.extend(
ffmpeg_configure_opts(install_dir, arch, validation))
if build_mode == "Debug":
configure_opts.extend(ffmpeg_debug_configure_opts())
configure_opts.extend(
ffmpeg_3rdparty_configure_opts(build_dir, use_gpl))
# run configure
cmd('./configure', *configure_opts, shell='bash')
# build ffmpeg
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_dav1d_decoder(install_dir):
"""build libdav1d from source"""
version = DAV1D_VERSION
if os.path.exists(f'dav1d-{version}'):
print("using existing david decoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://code.videolan.org/videolan/dav1d.git',
f'dav1d-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://code.videolan.org/videolan/dav1d/-/archive/{version}/dav1d-{version}.zip",
".")
with pushd(f'dav1d-{version}'):
cmd('meson', 'build', '--prefix', os.path.join(install_dir,
''), '--libdir',
os.path.join(install_dir, 'lib'), '--buildtype', 'release',
'--default-library=static', '-Denable_avx512=false')
cmd('ninja', '-C', 'build')
with pushd('build'):
cmd('ninja', 'install')
if os.name != 'nt':
if os.path.isfile(
os.path.join(install_dir, 'lib', 'pkgconfig',
'dav1d_edited')):
print("dav1d.pc already edited")
else:
with pushd(install_dir, 'lib', 'pkgconfig'):
replace('dav1d.pc', '-ldav1d', '-ldav1d -pthread -ldl')
cmd('touch', 'dav1d_edited')
def build_svt_hevc_encoder(install_dir, build_mode):
"""build SVT HEVC encoder from source"""
version = SVT_HEVC_VERSION
if os.path.exists(f'SVT-HEVC-{version}'):
print("using existing SVT-HEVC encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'v{version}',
'https://github.com/OpenVisualCloud/SVT-HEVC.git',
f'SVT-HEVC-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://github.com/OpenVisualCloud/SVT-HEVC/archive/refs/tags/v{version}.zip",
".")
with pushd(f'SVT-HEVC-{version}'):
if build_mode == 'Debug':
replace(os.path.join('Source', 'Lib', 'Codec', 'EbMalloc.h'),
'#define DEBUG_MEMORY_USAGE', '#undef DEBUG_MEMORY_USAGE')
replace(os.path.join('Source', 'Lib', 'Codec', 'EbDefinitions.h'),
'#define LIB_PRINTF_ENABLE 1',
'#define LIB_PRINTF_ENABLE 0')
mkdir('release')
with pushd('release'):
cmd('cmake', '..', '-GUnix Makefiles',
f'-DCMAKE_BUILD_TYPE={build_mode}',
f'-DCMAKE_INSTALL_PREFIX={os.path.join(install_dir, "")}',
'-DCMAKE_INSTALL_LIBDIR=lib', '-DBUILD_SHARED_LIBS=off',
'-DBUILD_APP=off')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_svt_av1_encoder(install_dir, build_mode):
"""build SVT AV1 encoder from source"""
version = SVT_AV1_VERSION
if os.path.exists(f'SVT-AV1-{version}'):
print("using existing SVT-AV1 encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://gitlab.com/AOMediaCodec/SVT-AV1',
f'SVT-AV1-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://gitlab.com/AOMediaCodec/SVT-AV1/-/archive/{version}/SVT-AV1-{version}.zip",
".")
with pushd(f'SVT-AV1-{version}'):
if build_mode == 'Debug':
replace(
os.path.join('Source', 'Lib', 'Common', 'Codec', 'EbMalloc.h'),
'#define DEBUG_MEMORY_USAGE', '#undef DEBUG_MEMORY_USAGE')
mkdir('release')
with pushd('release'):
cmd('cmake', '..', '-GUnix Makefiles',
f'-DCMAKE_BUILD_TYPE={build_mode}',
f'-DCMAKE_INSTALL_PREFIX={os.path.join(install_dir, "")}',
'-DCMAKE_INSTALL_LIBDIR=lib', '-DBUILD_SHARED_LIBS=off',
'-DBUILD_APPS=off',
'-DBUILD_DEC=off' if os.name != 'nt' else '',
'-DCMAKE_C_FLAGS=$(CMAKE_C_FLAGS) -DSVT_LOG_QUIET=1')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_gpl_x264_encoder(install_dir):
"""build x264 encoder from source"""
version = X264_VERSION
posix_install_dir = to_posix_path(install_dir)
if os.path.exists(f'x264-{version}'):
print("using existing x264 encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://code.videolan.org/videolan/x264.git',
f'x264-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://code.videolan.org/videolan/x264/-/archive/{version}/x264-{version}.zip",
".")
with pushd(f'x264-{version}'):
cmd('./configure',
f'--prefix={posix_install_dir}',
'--enable-static',
'--enable-pic',
shell='bash')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def ffmpeg_configure_opts(install_dir, arch, validation):
"""configure options for ffmpeg build"""
posix_install_dir = to_posix_path(install_dir)
result = [
f'--prefix={posix_install_dir}',
'--enable-static',
'--disable-shared',
'--enable-pic',
'--disable-everything',
'--disable-network',
'--disable-doc',
'--disable-manpages',
'--disable-hwaccels',
'--disable-appkit',
'--disable-alsa',
'--disable-avfoundation',
'--disable-iconv',
'--disable-lzma',
'--disable-securetransport',
'--disable-xlib',
'--disable-zlib',
'--disable-amf',
'--disable-audiotoolbox',
'--disable-cuvid',
'--disable-d3d11va',
'--disable-dxva2',
'--disable-nvdec',
'--disable-nvenc',
'--disable-v4l2-m2m',
'--disable-videotoolbox',
'--disable-sdl2',
'--enable-indev=lavfi',
'--enable-protocol=file',
'--enable-bsf=h264_mp4toannexb',
'--enable-bsf=hevc_mp4toannexb',
'--enable-bsf=mjpeg2jpeg',
'--enable-bsf=mjpega_dump_header',
'--enable-decoder=rawvideo',
'--enable-encoder=rawvideo',
'--enable-demuxer=rawvideo',
'--enable-demuxer=mjpeg',
'--enable-muxer=rawvideo',
'--enable-muxer=null',
'--enable-decoder=wrapped_avframe',
'--enable-encoder=wrapped_avframe',
'--enable-muxer=h264',
'--enable-muxer=mpeg2video',
'--enable-muxer=mjpeg',
'--enable-muxer=hevc',
'--enable-muxer=ivf',
'--enable-filter=testsrc',
'--enable-demuxer=image2',
'--enable-muxer=image2',
'--enable-filter=yuvtestsrc',
'--enable-filter=rgbtestsrc',
'--enable-decoder=h264',
'--enable-parser=h264',
'--enable-demuxer=h264',
'--enable-decoder=hevc',
'--enable-demuxer=hevc',
'--enable-demuxer=ivf',
'--enable-parser=hevc',
'--enable-parser=mjpeg',
'--enable-parser=av1',
'--enable-decoder=mpeg2video',
'--enable-encoder=mpeg2video',
'--enable-encoder=mjpeg',
'--enable-decoder=mjpeg',
'--enable-filter=overlay',
'--enable-filter=crop',
'--enable-filter=scale',
'--enable-filter=drawbox',
'--enable-filter=psnr',
'--enable-filter=split',
'--enable-filter=select',
'--enable-filter=concat',
'--enable-filter=ssim',
]
if os.name == 'nt':
result.extend([
'--extra-cflags=-fPIC',
'--extra-ldflags=-fPIC',
'--enable-filter=testsrc2',
])
if arch == 'x86_64':
result.append('--arch=x86_64')
result.append('--target-os=mingw64')
elif arch == 'x86_32':
result.append('--arch=x86_32')
result.append('--target-os=mingw32')
else:
raise Exception(f'Unknown architecture {arch}')
else:
if validation:
result.extend([
'--enable-filter=testsrc2', '--disable-vaapi',
'--disable-cuda-llvm'
])
else:
result.extend([
'--disable-vaapi', '--disable-cuda-llvm', '--disable-avdevice',
'--disable-swresample'
])
return result
def ffmpeg_debug_configure_opts():
"""add ffmpeg configure debug flags if requested"""
return [
'--disable-optimizations', '--extra-cflags=-Og',
'--extra-cflags=-fno-omit-frame-pointer', '--enable-debug=3',
'--extra-cflags=-fno-inline'
]
def ffmpeg_3rdparty_configure_opts(build_dir, use_gpl):
"""update ffmpeg configure command line based on packages findable
by pkg-config"""
result = []
pkg_list = capture_cmd("pkg-config", "--list-all")[0]
if "aom" in pkg_list:
print("aom decoder found")
result.extend(['--enable-libaom', '--enable-decoder=libaom_av1'])
if "dav1d" in pkg_list:
print("dav1d decoder found")
result.extend(['--enable-libdav1d', '--enable-decoder=libdav1d'])
if use_gpl:
if "x264" in pkg_list:
print("x264 encoder found")
result.extend([
'--enable-gpl', '--enable-libx264', '--enable-encoder=libx264'
])
if "SvtAv1Enc" in pkg_list:
print("SVT-AV1 encoder found")
result.extend(['--enable-libsvtav1', '--enable-encoder=libsvtav1'])
if "SvtHevcEnc" in pkg_list:
print("SVT-HEVC encoder found")
result.extend(['--enable-libsvthevc', '--enable-encoder=libsvt_hevc'])
if os.path.isfile("svt-hevc-patched"):
print("SVT-HEVC patch already applied")
else:
patch = 'n4.4-0001-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch'
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'am',
os.path.join(build_dir, f'SVT-HEVC-{SVT_HEVC_VERSION}',
'ffmpeg_plugin', patch),
xenv=GIT_ENV)
cmd('touch', 'svt-hevc-patched')
return result
if __name__ == "__main__":
main()
|
[
"os.remove",
"argparse.ArgumentParser",
"os.environ.copy",
"os.path.isfile",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"multiprocessing.cpu_count",
"os.path.dirname",
"os.path.exists",
"ssl._create_unverified_context",
"subprocess.Popen",
"os.chmod",
"os.pathsep.join",
"os.path.realpath",
"shutil.which",
"time.sleep",
"posixpath.join",
"os.scandir",
"os.makedirs",
"os.getcwd",
"os.path.isdir",
"time.time",
"os.environ.get",
"sys.stderr.write"
] |
[((908, 935), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (933, 935), False, 'import multiprocessing\n'), ((2499, 2521), 'posixpath.join', 'posixpath.join', (['*parts'], {}), '(*parts)\n', (2513, 2521), False, 'import posixpath\n'), ((3380, 3391), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3389, 3391), False, 'import os\n'), ((3403, 3430), 'os.path.join', 'os.path.join', (['cur_dir', '*dst'], {}), '(cur_dir, *dst)\n', (3415, 3430), False, 'import os\n'), ((3435, 3449), 'os.chdir', 'os.chdir', (['dest'], {}), '(dest)\n', (3443, 3449), False, 'import os\n'), ((3684, 3706), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (3698, 3706), False, 'import os\n'), ((8911, 8943), 'ssl._create_unverified_context', 'ssl._create_unverified_context', ([], {}), '()\n', (8941, 8943), False, 'import ssl\n'), ((9748, 9789), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""bootstrap"""'}), "(prog='bootstrap')\n", (9771, 9789), False, 'import argparse\n'), ((11373, 11412), 'os.path.join', 'os.path.join', (['"""C:\\\\"""', '"""tools"""', '"""msys64"""'], {}), "('C:\\\\', 'tools', 'msys64')\n", (11385, 11412), False, 'import os\n'), ((11796, 11826), 'os.path.join', 'os.path.join', (['msys_root', '"""usr"""'], {}), "(msys_root, 'usr')\n", (11808, 11826), False, 'import os\n'), ((11851, 11885), 'os.path.join', 'os.path.join', (['msys_usr_path', '"""bin"""'], {}), "(msys_usr_path, 'bin')\n", (11863, 11885), False, 'import os\n'), ((11901, 11932), 'os.path.join', 'os.path.join', (['"""C:\\\\"""', '"""Windows"""'], {}), "('C:\\\\', 'Windows')\n", (11913, 11932), False, 'import os\n'), ((11952, 11986), 'os.path.join', 'os.path.join', (['win_path', '"""System32"""'], {}), "(win_path, 'System32')\n", (11964, 11986), False, 'import os\n'), ((12743, 12770), 'os.pathsep.join', 'os.pathsep.join', (['mingw_path'], {}), '(mingw_path)\n', (12758, 12770), False, 'import os\n'), ((12973, 12992), 'shutil.which', 'shutil.which', (['"""git"""'], {}), "('git')\n", (12985, 12992), False, 'import shutil\n'), ((13560, 13595), 'os.path.join', 'os.path.join', (['proj_dir', '"""_extbuild"""'], {}), "(proj_dir, '_extbuild')\n", (13572, 13595), False, 'import os\n'), ((13783, 13814), 'os.path.join', 'os.path.join', (['proj_dir', '"""_deps"""'], {}), "(proj_dir, '_deps')\n", (13795, 13814), False, 'import os\n'), ((17098, 17132), 'os.path.exists', 'os.path.exists', (['f"""dav1d-{version}"""'], {}), "(f'dav1d-{version}')\n", (17112, 17132), False, 'import os\n'), ((18614, 18651), 'os.path.exists', 'os.path.exists', (['f"""SVT-HEVC-{version}"""'], {}), "(f'SVT-HEVC-{version}')\n", (18628, 18651), False, 'import os\n'), ((20100, 20136), 'os.path.exists', 'os.path.exists', (['f"""SVT-AV1-{version}"""'], {}), "(f'SVT-AV1-{version}')\n", (20114, 20136), False, 'import os\n'), ((21569, 21602), 'os.path.exists', 'os.path.exists', (['f"""x264-{version}"""'], {}), "(f'x264-{version}')\n", (21583, 21602), False, 'import os\n'), ((806, 817), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (815, 817), False, 'import os\n'), ((819, 844), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (834, 844), False, 'import os\n'), ((3577, 3594), 'os.chdir', 'os.chdir', (['cur_dir'], {}), '(cur_dir)\n', (3585, 3594), False, 'import os\n'), ((4741, 4760), 'os.makedirs', 'os.makedirs', (['target'], {}), '(target)\n', (4752, 4760), False, 'import os\n'), ((6266, 6313), 'subprocess.Popen', 'subprocess.Popen', (['exec_cmd'], {'shell': '(True)', 'env': 'env'}), '(exec_cmd, shell=True, env=env)\n', (6282, 6313), False, 'import subprocess\n'), ((7578, 7702), 'subprocess.Popen', 'subprocess.Popen', (['exec_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)', 'shell': '(True)', 'env': 'env'}), '(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True, shell=True, env=env)\n', (7594, 7702), False, 'import subprocess\n'), ((8698, 8718), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8712, 8718), False, 'import os\n'), ((11564, 11598), 'os.path.exists', 'os.path.exists', (['fallback_msys_root'], {}), '(fallback_msys_root)\n', (11578, 11598), False, 'import os\n'), ((12225, 12266), 'os.path.join', 'os.path.join', (['msys_root', '"""mingw64"""', '"""bin"""'], {}), "(msys_root, 'mingw64', 'bin')\n", (12237, 12266), False, 'import os\n'), ((13663, 13700), 'os.environ.get', 'environ.get', (['"""VPL_CPU_DEPS_BUILD_DIR"""'], {}), "('VPL_CPU_DEPS_BUILD_DIR')\n", (13674, 13700), False, 'from os import environ\n'), ((13884, 13921), 'os.environ.get', 'environ.get', (['"""VPL_BUILD_DEPENDENCIES"""'], {}), "('VPL_BUILD_DEPENDENCIES')\n", (13895, 13921), False, 'from os import environ\n'), ((14011, 14056), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""', '"""pkgconfig"""'], {}), "(install_dir, 'lib', 'pkgconfig')\n", (14023, 14056), False, 'import os\n'), ((14191, 14223), 'os.pathsep.join', 'os.pathsep.join', (['pkg_config_path'], {}), '(pkg_config_path)\n', (14206, 14223), False, 'import os\n'), ((14773, 14808), 'os.path.exists', 'os.path.exists', (['f"""FFmpeg-{version}"""'], {}), "(f'FFmpeg-{version}')\n", (14787, 14808), False, 'import os\n'), ((27038, 27072), 'os.path.isfile', 'os.path.isfile', (['"""svt-hevc-patched"""'], {}), "('svt-hevc-patched')\n", (27052, 27072), False, 'import os\n'), ((3857, 3868), 'time.time', 'time.time', ([], {}), '()\n', (3866, 3868), False, 'import time\n'), ((3888, 3899), 'time.time', 'time.time', ([], {}), '()\n', (3897, 3899), False, 'import time\n'), ((4597, 4619), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (4611, 4619), False, 'import os\n'), ((4991, 5013), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (5005, 5013), False, 'import os\n'), ((5027, 5040), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5037, 5040), False, 'import time\n'), ((5559, 5576), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5574, 5576), False, 'import os\n'), ((6871, 6888), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (6886, 6888), False, 'import os\n'), ((7923, 7950), 'sys.stderr.write', 'sys.stderr.write', (['result[1]'], {}), '(result[1])\n', (7939, 7950), False, 'import sys\n'), ((8543, 8569), 'os.chmod', 'os.chmod', (['targetpath', 'attr'], {}), '(targetpath, attr)\n', (8551, 8569), False, 'import os\n'), ((9552, 9578), 'os.path.join', 'os.path.join', (['output', '""".."""'], {}), "(output, '..')\n", (9564, 9578), False, 'import os\n'), ((12058, 12099), 'os.path.join', 'os.path.join', (['msys_root', '"""mingw32"""', '"""bin"""'], {}), "(msys_root, 'mingw32', 'bin')\n", (12070, 12099), False, 'import os\n'), ((12140, 12201), 'os.path.join', 'os.path.join', (['msys_root', '"""mingw32"""', '"""i686-w64-mingw32"""', '"""bin"""'], {}), "(msys_root, 'mingw32', 'i686-w64-mingw32', 'bin')\n", (12152, 12201), False, 'import os\n'), ((12300, 12343), 'os.path.join', 'os.path.join', (['msys_usr_path', '"""local"""', '"""bin"""'], {}), "(msys_usr_path, 'local', 'bin')\n", (12312, 12343), False, 'import os\n'), ((12380, 12410), 'os.path.join', 'os.path.join', (['msys_root', '"""bin"""'], {}), "(msys_root, 'bin')\n", (12392, 12410), False, 'import os\n'), ((12460, 12494), 'os.path.join', 'os.path.join', (['win_sys_path', '"""Wbem"""'], {}), "(win_sys_path, 'Wbem')\n", (12472, 12494), False, 'import os\n'), ((12504, 12559), 'os.path.join', 'os.path.join', (['win_sys_path', '"""WindowsPowerShell"""', '"""v1.0"""'], {}), "(win_sys_path, 'WindowsPowerShell', 'v1.0')\n", (12516, 12559), False, 'import os\n'), ((12569, 12613), 'os.path.join', 'os.path.join', (['msys_usr_bin_path', '"""site_perl"""'], {}), "(msys_usr_bin_path, 'site_perl')\n", (12581, 12613), False, 'import os\n'), ((12623, 12669), 'os.path.join', 'os.path.join', (['msys_usr_bin_path', '"""vendor_perl"""'], {}), "(msys_usr_bin_path, 'vendor_perl')\n", (12635, 12669), False, 'import os\n'), ((12679, 12723), 'os.path.join', 'os.path.join', (['msys_usr_bin_path', '"""core_perl"""'], {}), "(msys_usr_bin_path, 'core_perl')\n", (12691, 12723), False, 'import os\n'), ((15932, 15978), 'os.path.join', 'os.path.join', (['SCRIPT_PATH', '"""patches"""', '"""ffmpeg"""'], {}), "(SCRIPT_PATH, 'patches', 'ffmpeg')\n", (15944, 15978), False, 'import os\n'), ((15994, 16020), 'os.path.exists', 'os.path.exists', (['patch_path'], {}), '(patch_path)\n', (16008, 16020), False, 'import os\n'), ((17675, 17704), 'os.path.join', 'os.path.join', (['install_dir', '""""""'], {}), "(install_dir, '')\n", (17687, 17704), False, 'import os\n'), ((17785, 17817), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""'], {}), "(install_dir, 'lib')\n", (17797, 17817), False, 'import os\n'), ((19364, 19421), 'os.path.join', 'os.path.join', (['"""Source"""', '"""Lib"""', '"""Codec"""', '"""EbDefinitions.h"""'], {}), "('Source', 'Lib', 'Codec', 'EbDefinitions.h')\n", (19376, 19421), False, 'import os\n'), ((3947, 3969), 'os.path.isfile', 'os.path.isfile', (['target'], {}), '(target)\n', (3961, 3969), False, 'import os\n'), ((4209, 4230), 'os.path.isdir', 'os.path.isdir', (['target'], {}), '(target)\n', (4222, 4230), False, 'import os\n'), ((16051, 16073), 'os.scandir', 'os.scandir', (['patch_path'], {}), '(patch_path)\n', (16061, 16073), False, 'import os\n'), ((19215, 19267), 'os.path.join', 'os.path.join', (['"""Source"""', '"""Lib"""', '"""Codec"""', '"""EbMalloc.h"""'], {}), "('Source', 'Lib', 'Codec', 'EbMalloc.h')\n", (19227, 19267), False, 'import os\n'), ((20710, 20772), 'os.path.join', 'os.path.join', (['"""Source"""', '"""Lib"""', '"""Common"""', '"""Codec"""', '"""EbMalloc.h"""'], {}), "('Source', 'Lib', 'Common', 'Codec', 'EbMalloc.h')\n", (20722, 20772), False, 'import os\n'), ((27416, 27495), 'os.path.join', 'os.path.join', (['build_dir', 'f"""SVT-HEVC-{SVT_HEVC_VERSION}"""', '"""ffmpeg_plugin"""', 'patch'], {}), "(build_dir, f'SVT-HEVC-{SVT_HEVC_VERSION}', 'ffmpeg_plugin', patch)\n", (27428, 27495), False, 'import os\n'), ((4146, 4163), 'os.remove', 'os.remove', (['target'], {}), '(target)\n', (4155, 4163), False, 'import os\n'), ((4416, 4437), 'shutil.rmtree', 'shutil.rmtree', (['target'], {}), '(target)\n', (4429, 4437), False, 'import shutil\n'), ((4516, 4529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4526, 4529), False, 'import time\n'), ((9695, 9724), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (9711, 9724), False, 'import os\n'), ((18102, 18163), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""', '"""pkgconfig"""', '"""dav1d_edited"""'], {}), "(install_dir, 'lib', 'pkgconfig', 'dav1d_edited')\n", (18114, 18163), False, 'import os\n'), ((19748, 19777), 'os.path.join', 'os.path.join', (['install_dir', '""""""'], {}), "(install_dir, '')\n", (19760, 19777), False, 'import os\n'), ((21050, 21079), 'os.path.join', 'os.path.join', (['install_dir', '""""""'], {}), "(install_dir, '')\n", (21062, 21079), False, 'import os\n')]
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from klever.core.utils import make_relative_path
from klever.core.pfg.fragmentation import FragmentationAlgorythm
class Busybox(FragmentationAlgorythm):
CLADE_PRESET = 'busybox_linux'
def __init__(self, logger, conf, tactic, pf_dir):
super().__init__(logger, conf, tactic, pf_dir)
self._incorporate_libbb = tactic.get("include dependencies from libbb to applets fragments")
self._match_files = dict()
def _determine_units(self, program):
"""
Find all files that has \w+_main function and add dependencies files except that ones that stored in libbb dir.
All files from the libbb directory add to the specific unit with the libbb name.
:param program: Program object.
"""
main_func = re.compile("\\w+main")
libbb = set()
applets = dict()
for file in program.files:
rel_path = make_relative_path(self.source_paths, str(file))
if os.path.commonpath(['libbb', rel_path]):
libbb.add(file)
else:
for func in file.export_functions:
if main_func.match(func):
path, name = os.path.split(rel_path)
name = os.path.splitext(name)[0]
applets[name] = {file}
if self._incorporate_libbb:
dfiles = program.collect_dependencies({file})
else:
dfiles = program.collect_dependencies(
{file}, filter_func=lambda x:
not os.path.commonpath(['libbb', make_relative_path(self.source_paths, x.name)]))
applets[name].update(dfiles)
# Create fragments for found applets and libbb
for name, files in applets.items():
program.create_fragment(name, files, add=True)
for file in files:
if file.name not in self._match_files:
self._match_files[file.name] = 0
else:
self._match_files[file.name] += 1
program.create_fragment('libbb', libbb, add=True)
self.logger.info('Found {} applets: {}'.format(len(applets), ', '.join(applets)))
def _determine_targets(self, program):
"""
Determine that program fragments that should be verified. We refer to these fragments as target fragments.
:param program:
:return:
"""
super()._determine_targets(program)
# Do not consider libbb files as targets
for file in (program._files[f] for f in self._match_files if self._match_files[f] > 0):
file.target = False
|
[
"os.path.commonpath",
"klever.core.utils.make_relative_path",
"os.path.splitext",
"os.path.split",
"re.compile"
] |
[((1477, 1499), 're.compile', 're.compile', (['"""\\\\w+main"""'], {}), "('\\\\w+main')\n", (1487, 1499), False, 'import re\n'), ((1670, 1709), 'os.path.commonpath', 'os.path.commonpath', (["['libbb', rel_path]"], {}), "(['libbb', rel_path])\n", (1688, 1709), False, 'import os\n'), ((1895, 1918), 'os.path.split', 'os.path.split', (['rel_path'], {}), '(rel_path)\n', (1908, 1918), False, 'import os\n'), ((1950, 1972), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (1966, 1972), False, 'import os\n'), ((2377, 2422), 'klever.core.utils.make_relative_path', 'make_relative_path', (['self.source_paths', 'x.name'], {}), '(self.source_paths, x.name)\n', (2395, 2422), False, 'from klever.core.utils import make_relative_path\n')]
|
# Shapely installed via pip doesn't have GEOS dependencies
# Instead we download a custom Python wheel which does have the dependencies
# (see https://stackoverflow.com/questions/13144158/python-geos-and-shapely-on-windows-64)
from urllib import urlretrieve
from subprocess import call
import os
wheels = [
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/GDAL-2.2.4-cp27-cp27m-win_amd64.whl',
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/Fiona-1.7.13-cp27-cp27m-win_amd64.whl',
'https://download.lfd.uci.edu/pythonlibs/l8ulg3xw/Shapely-1.6.4.post1-cp27-cp27m-win_amd64.whl'
]
# Make sure we can install wheels
call(['pip', 'install', 'wheel'])
# Download wheel
print("Install custom packages")
for url in wheels:
print("Downloading: %s" % url)
filename = url.split("/")[-1]
# Download it
urlretrieve(url, filename)
# Install it
call(['pip', 'install', filename])
# Clean up
os.remove(filename)
print("Done")
|
[
"os.remove",
"subprocess.call",
"urllib.urlretrieve"
] |
[((622, 655), 'subprocess.call', 'call', (["['pip', 'install', 'wheel']"], {}), "(['pip', 'install', 'wheel'])\n", (626, 655), False, 'from subprocess import call\n'), ((806, 832), 'urllib.urlretrieve', 'urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (817, 832), False, 'from urllib import urlretrieve\n'), ((848, 882), 'subprocess.call', 'call', (["['pip', 'install', filename]"], {}), "(['pip', 'install', filename])\n", (852, 882), False, 'from subprocess import call\n'), ((896, 915), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (905, 915), False, 'import os\n')]
|
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email_address = models.CharField(max_length=255)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((98, 130), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (114, 130), False, 'from django.db import models\n'), ((145, 177), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (161, 177), False, 'from django.db import models\n'), ((196, 228), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (212, 228), False, 'from django.db import models\n'), ((237, 258), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (256, 258), False, 'from django.db import models\n'), ((274, 313), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (294, 313), False, 'from django.db import models\n'), ((329, 364), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (349, 364), False, 'from django.db import models\n')]
|
# coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import
# pylint: disable = missing-docstring, invalid-name
# pylint: disable = unused-argument, no-member, attribute-defined-outside-init
# pylint: disable = too-many-lines, too-many-branches, too-many-statements
"""
Copyright (c) 2020, <NAME>. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import os
import fnmatch
import shutil
import pytest
from zm import utils
from zm.features import ToolchainVars
from zm.testing import loadFromJson
from tests.func_utils import *
@pytest.mark.usefixtures("unsetEnviron")
class TestParams(object):
@pytest.fixture(params = getZmExecutables(), autouse = True)
def allZmExe(self, request):
self.zmExe = zmExes[request.param]
@pytest.fixture(params = [joinpath('c', '02-simple'), joinpath('cpp', '04-complex')])
def project(self, request, tmpdir):
def teardown():
printErrorOnFailed(self, request)
request.addfinalizer(teardown)
setupTest(self, request, tmpdir)
return request.param
def testBuildRootInCLI(self, project):
env = { 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build', '-o', '_bld']
assert runZm(self, cmdLine, env)[0] == 0
checkBuildResults(self, cmdLine, resultExists = True, fakeBuild = True)
assert self.confPaths.buildroot == joinpath(self.confPaths.buildconfdir, '_bld')
def testBuildRootInEnv(self, project, monkeypatch):
monkeypatch.setenv('BUILDROOT', '_bld_') # for checkBuildResults
env = { 'BUILDROOT' : '_bld_', 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build']
assert runZm(self, cmdLine, env)[0] == 0
checkBuildResults(self, cmdLine, resultExists = True, fakeBuild = True)
assert self.confPaths.buildroot == joinpath(self.confPaths.buildconfdir, '_bld_')
@pytest.mark.skipif(PLATFORM != 'linux',
reason = "It's enough to test on linux only")
def testToolchainVars(self, project):
projectLang = os.path.split(project)[-2].replace('p', 'x')
fixture = {
'c' : {
'gcc': {
'sysenvval' : 'gcc',
'compflags' : '',
'linkflags' : '',
'ldflags' : '-Wl,-rpath,.',
},
'clang': {
'sysenvval' : 'clang',
'compflags' : '-O1 -g',
'linkflags' : '-Wl,-rpath,. ',
'ldflags' : '',
},
'clang-path': {
'sysenvval' : shutil.which('clang'),
'compflags' : '-O1 -g',
'linkflags' : '-Wl,-rpath,. ',
'ldflags' : '',
},
},
'cxx': {
'g++': {
'sysenvval' : 'g++',
'compflags' : '-O2 -Wall',
'linkflags' : '-Wl,-rpath,. -Wl,--as-needed',
'ldflags' : '-fsanitize=address',
},
'clang++': {
'sysenvval' : 'clang++',
'compflags' : '-O3 -Wall -Wextra',
'linkflags' : '-Wl,--as-needed -fsanitize=address',
'ldflags' : '-Wl,-rpath,.',
},
},
}
def formExpectedFlags(flags):
flags = utils.uniqueListWithOrder(reversed(flags))
flags.reverse()
return flags
env = { 'ZENMAKE_TESTING_MODE' : '1' }
cmdLine = ['build']
sysEnvToolVar = ToolchainVars.sysVarToSetToolchain(projectLang)
cfgEnvToolVar = ToolchainVars.cfgVarToSetToolchain(projectLang)
compFlagsName = projectLang.upper() + 'FLAGS'
# invalid name
toolchain = 'invalid'
env[sysEnvToolVar] = toolchain
assert runZm(self, cmdLine, env)[0] != 0
prjfixture = fixture[projectLang]
for toolchain, info in prjfixture.items():
env[sysEnvToolVar] = info['sysenvval']
env[compFlagsName] = info['compflags']
env['LINKFLAGS'] = info['linkflags']
env['LDFLAGS'] = info['ldflags']
assert runZm(self, ['distclean'])[0] == 0
assert runZm(self, cmdLine, env)[0] == 0
targets = obtainBuildTargets(self, cmdLine)
checkBuildTargets(targets, resultExists = True, fakeBuild = True)
confManager = processConfManagerWithCLI(self, cmdLine)
buildout = confManager.root.confPaths.buildout
paths = []
patterns = '.* c4che config.log'.split()
for root, dirs, files in os.walk(buildout):
ignore = set()
for pattern in patterns:
for name in fnmatch.filter(dirs, pattern):
dirs.remove(name) # don't visit sub directories
for name in fnmatch.filter(files, pattern):
ignore.add(name)
paths += [os.path.join(root, x) for x in files if x not in ignore]
for path in paths:
with open(path, 'r') as f:
data = loadFromJson(f.read())
zmTaskName = data['tgen-name']
usedEnv = data['env']
zmtasks = data['zmtasks']
taskParams = zmtasks[zmTaskName]
features = taskParams['features']
targetKind = getTargetPattern(usedEnv, features)[1]
# check toolchain
assert usedEnv[cfgEnvToolVar] == [shutil.which(info['sysenvval'])]
isLink = data['is-link']
if not isLink:
# check CFLAGS/CXXFLAGS
sysEnvFlags = env[compFlagsName].split()
bconfFlags = utils.toList(taskParams.get(compFlagsName.lower(), []))
expectedFlags = formExpectedFlags(bconfFlags + sysEnvFlags)
if targetKind == 'shlib':
# Waf adds this flag itself
expectedFlags = ['-fPIC'] + expectedFlags
assert usedEnv.get(compFlagsName, []) == expectedFlags
else:
# check LINKFLAGS/LDFLAGS
for flagsName in ('linkflags', 'ldflags'):
sysEnvFlags = env[flagsName.upper()].split()
bconfFlags = utils.toList(taskParams.get(flagsName, []))
expectedFlags = formExpectedFlags(bconfFlags + sysEnvFlags)
if targetKind == 'shlib' and flagsName == 'linkflags':
# Waf adds this flag itself
expectedFlags = ['-shared'] + expectedFlags
assert usedEnv.get(flagsName.upper(), []) == expectedFlags
|
[
"fnmatch.filter",
"zm.features.ToolchainVars.cfgVarToSetToolchain",
"os.walk",
"shutil.which",
"pytest.mark.skipif",
"zm.features.ToolchainVars.sysVarToSetToolchain",
"os.path.split",
"os.path.join",
"pytest.mark.usefixtures"
] |
[((588, 627), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""unsetEnviron"""'], {}), "('unsetEnviron')\n", (611, 627), False, 'import pytest\n'), ((1916, 2004), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(PLATFORM != 'linux')"], {'reason': '"""It\'s enough to test on linux only"""'}), '(PLATFORM != \'linux\', reason=\n "It\'s enough to test on linux only")\n', (1934, 2004), False, 'import pytest\n'), ((3690, 3737), 'zm.features.ToolchainVars.sysVarToSetToolchain', 'ToolchainVars.sysVarToSetToolchain', (['projectLang'], {}), '(projectLang)\n', (3724, 3737), False, 'from zm.features import ToolchainVars\n'), ((3762, 3809), 'zm.features.ToolchainVars.cfgVarToSetToolchain', 'ToolchainVars.cfgVarToSetToolchain', (['projectLang'], {}), '(projectLang)\n', (3796, 3809), False, 'from zm.features import ToolchainVars\n'), ((4782, 4799), 'os.walk', 'os.walk', (['buildout'], {}), '(buildout)\n', (4789, 4799), False, 'import os\n'), ((2091, 2113), 'os.path.split', 'os.path.split', (['project'], {}), '(project)\n', (2104, 2113), False, 'import os\n'), ((2675, 2696), 'shutil.which', 'shutil.which', (['"""clang"""'], {}), "('clang')\n", (2687, 2696), False, 'import shutil\n'), ((4905, 4934), 'fnmatch.filter', 'fnmatch.filter', (['dirs', 'pattern'], {}), '(dirs, pattern)\n', (4919, 4934), False, 'import fnmatch\n'), ((5040, 5070), 'fnmatch.filter', 'fnmatch.filter', (['files', 'pattern'], {}), '(files, pattern)\n', (5054, 5070), False, 'import fnmatch\n'), ((5140, 5161), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (5152, 5161), False, 'import os\n'), ((5701, 5732), 'shutil.which', 'shutil.which', (["info['sysenvval']"], {}), "(info['sysenvval'])\n", (5713, 5732), False, 'import shutil\n')]
|
from brewgorithm import beer2vec, beer_emb, word_weighter
import numpy as np
import unittest
from sklearn.metrics.pairwise import cosine_similarity
class TestBeer2vec(unittest.TestCase):
def test_most_similar_test(self):
beers = beer2vec.get_beer2vec()
embeddings = beer_emb.embed_doc("apricot peach fruity", word_weighter.is_beer_related)
emb = np.average(embeddings, axis=0)
sims = cosine_similarity([emb], [beer['vector'] for beer in beers]).reshape(-1)
candidates = []
for i, sim in enumerate(sims):
candidates.append((sim, i))
result = [x for x in sorted(candidates, key=lambda i: i[0], reverse=True)[:2]][1]
self.assertEqual(beers[result[1]]['BeerNamePlain'].strip(), "delirium tremens")
self.assertEqual(float(beers[result[1]]['Alcohol']), 8.5)
self.assertEqual(int(beers[result[1]]['OverallPctl']), 93)
desc = [a[0] for a in beer_emb.most_similar(positive=[beers[result[1]]['vector']], negative=[])]
self.assertIn("fruity", desc)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.average",
"brewgorithm.beer_emb.embed_doc",
"brewgorithm.beer2vec.get_beer2vec",
"brewgorithm.beer_emb.most_similar"
] |
[((1029, 1044), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1042, 1044), False, 'import unittest\n'), ((237, 260), 'brewgorithm.beer2vec.get_beer2vec', 'beer2vec.get_beer2vec', ([], {}), '()\n', (258, 260), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n'), ((279, 352), 'brewgorithm.beer_emb.embed_doc', 'beer_emb.embed_doc', (['"""apricot peach fruity"""', 'word_weighter.is_beer_related'], {}), "('apricot peach fruity', word_weighter.is_beer_related)\n", (297, 352), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n'), ((363, 393), 'numpy.average', 'np.average', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (373, 393), True, 'import numpy as np\n'), ((405, 465), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[emb]', "[beer['vector'] for beer in beers]"], {}), "([emb], [beer['vector'] for beer in beers])\n", (422, 465), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((889, 962), 'brewgorithm.beer_emb.most_similar', 'beer_emb.most_similar', ([], {'positive': "[beers[result[1]]['vector']]", 'negative': '[]'}), "(positive=[beers[result[1]]['vector']], negative=[])\n", (910, 962), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n')]
|
# Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.`
#
# Changes were made by
# Authors: <NAME>, <NAME>, <NAME>, <NAME>. 2018.
import re
import argparse
import logging
from . import architectures, datasets
LOG = logging.getLogger('main')
__all__ = ['parse_cmd_args', 'parse_dict_args']
def create_parser():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--dataset', metavar='DATASET', default='cifar10',
choices=datasets.__all__,
help='dataset: ' +
' | '.join(datasets.__all__) +
' (default: imagenet)')
parser.add_argument('--train-subdir', type=str, default='train+val',
help='the subdirectory inside the data directory that contains the training data')
parser.add_argument('--eval-subdir', type=str, default='test',
help='the subdirectory inside the data directory that contains the evaluation data')
parser.add_argument('--label-split', default=10, type=int, metavar='FILE',
help='list of image labels (default: based on directory structure)')
parser.add_argument('--exclude-unlabeled', default=False, type=str2bool, metavar='BOOL',
help='exclude unlabeled examples from the training set')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=180, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=100, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--labeled-batch-size', default=None, type=int,
metavar='N', help="labeled examples per minibatch (default: no constrain)")
parser.add_argument('--lr', '--learning-rate', default=0.05, type=float,
metavar='LR', help='max learning rate')
parser.add_argument('--initial-lr', default=0.0, type=float,
metavar='LR', help='initial learning rate when using linear rampup')
parser.add_argument('--lr-rampup', default=0, type=int, metavar='EPOCHS',
help='length of learning rate rampup in the beginning')
parser.add_argument('--lr-rampdown-epochs', default=210, type=int, metavar='EPOCHS',
help='length of learning rate cosine rampdown (>= length of training)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--nesterov', default=True, type=str2bool,
help='use nesterov momentum', metavar='BOOL')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--ema-decay', default=0.999, type=float, metavar='ALPHA',
help='ema variable decay rate (default: 0.999)')
parser.add_argument('--consistency', default=None, type=float, metavar='WEIGHT',
help='use consistency loss with given weight (default: None)')
parser.add_argument('--consistency-type', default="mse", type=str, metavar='TYPE',
choices=['mse', 'kl'],
help='consistency loss type to use')
parser.add_argument('--consistency-rampup', default=5, type=int, metavar='EPOCHS',
help='length of the consistency loss ramp-up')
parser.add_argument('--logit-distance-cost', default=-1, type=float, metavar='WEIGHT',
help='let the student model have two outputs and use an MSE loss between the logits with the given weight (default: only have one output)')
parser.add_argument('--checkpoint-epochs', default=10, type=int,
metavar='EPOCHS', help='checkpoint frequency in epochs, 0 to turn checkpointing off (default: 1)')
parser.add_argument('--evaluation-epochs', default=1, type=int,
metavar='EPOCHS', help='evaluation frequency in epochs, 0 to turn evaluation off (default: 1)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str2bool,
help='evaluate model on evaluation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--gpu-id', type=str, default='0',
help='gpu id')
parser.add_argument('--dfs-k', type=int, default=50,
help='diffusion k')
parser.add_argument('--fully-supervised', default=False, type=str2bool, metavar='BOOL',
help='is fully-supervised')
parser.add_argument('--isL2', default=True, type=str2bool, metavar='BOOL',
help='is l2 normalized features')
parser.add_argument('--num-labeled', type=int, default=1000,
help='number of labeled instances')
parser.add_argument('--test-mode', type=str, default='',
help='number of labeled instances')
parser.add_argument('--isMT', default=False, type=str2bool, metavar='BOOL',
help='is combined with mean teacher')
return parser
def parse_commandline_args():
return create_parser().parse_args()
def parse_dict_args(**kwargs):
def to_cmdline_kwarg(key, value):
if len(key) == 1:
key = "-{}".format(key)
else:
key = "--{}".format(re.sub(r"_", "-", key))
value = str(value)
return key, value
kwargs_pairs = (to_cmdline_kwarg(key, value)
for key, value in kwargs.items())
cmdline_args = list(sum(kwargs_pairs, ()))
LOG.info("Using these command line args: %s", " ".join(cmdline_args))
return create_parser().parse_args(cmdline_args)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2epochs(v):
try:
if len(v) == 0:
epochs = []
else:
epochs = [int(string) for string in v.split(",")]
except:
raise argparse.ArgumentTypeError(
'Expected comma-separated list of integers, got "{}"'.format(v))
if not all(0 < epoch1 < epoch2 for epoch1, epoch2 in zip(epochs[:-1], epochs[1:])):
raise argparse.ArgumentTypeError(
'Expected the epochs to be listed in increasing order')
return epochs
|
[
"re.sub",
"argparse.ArgumentParser",
"logging.getLogger",
"argparse.ArgumentTypeError"
] |
[((502, 527), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (519, 527), False, 'import logging\n'), ((613, 677), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch ImageNet Training"""'}), "(description='PyTorch ImageNet Training')\n", (636, 677), False, 'import argparse\n'), ((7448, 7535), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Expected the epochs to be listed in increasing order"""'], {}), "(\n 'Expected the epochs to be listed in increasing order')\n", (7474, 7535), False, 'import argparse\n'), ((7007, 7060), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (7033, 7060), False, 'import argparse\n'), ((6461, 6482), 're.sub', 're.sub', (['"""_"""', '"""-"""', 'key'], {}), "('_', '-', key)\n", (6467, 6482), False, 'import re\n')]
|
from sys import argv
import boto3
# unfortunately the AWS CLI cannot generate presigned S3 URLs for PutObject requests,
# so we have to do it with a proper AWS SDK
url = boto3.client('s3').generate_presigned_url(
ClientMethod='put_object',
Params={'Bucket': argv[1], 'Key': argv[2]},
ExpiresIn=3600
)
print(url)
|
[
"boto3.client"
] |
[((172, 190), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (184, 190), False, 'import boto3\n')]
|
import copy
import json
import random
import pytest
from indy_common.constants import REVOC_TYPE, TAG, TAG_LIMIT_SIZE
from plenum.common.constants import GENERAL_LIMIT_SIZE, REQNACK, REJECT
from plenum.common.types import OPERATION
from plenum.common.util import randomString
from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_signed_requests, sdk_get_replies
@pytest.fixture(scope="module", params=['lt', 'eq', 'gt'])
def _lt_eq_gt(request):
return request.param
@pytest.fixture(scope="module", params=[REVOC_TYPE, TAG])
def _res_field_size(request, _lt_eq_gt):
_field = request.param
_expected = REQNACK if _lt_eq_gt == 'gt' else REJECT
_valid_size = TAG_LIMIT_SIZE if _field == TAG else GENERAL_LIMIT_SIZE
if _lt_eq_gt == 'lt':
return _expected, _field, random.randint(0, _valid_size - 1)
if _lt_eq_gt == 'eq':
return _expected, _field, _valid_size
return _expected, _field, random.randint(_valid_size + 1, 2 * _valid_size)
@pytest.fixture(scope="module")
def revoc_def_req(looper,
sdk_wallet_steward,
build_revoc_def_by_default,
_res_field_size):
_expected, _field, _size = _res_field_size
_req = copy.deepcopy(build_revoc_def_by_default)
_req[OPERATION][_field] = randomString(_size)
return _expected, sdk_sign_request_from_dict(looper, sdk_wallet_steward, _req['operation'])
def test_revoc_def_static_validation_on_field_size(revoc_def_req,
looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward):
_expected, _req = revoc_def_req
results = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(_req)])
_reply = sdk_get_replies(looper, results)[0][1]
assert _expected == _reply['op']
|
[
"copy.deepcopy",
"random.randint",
"pytest.fixture",
"json.dumps",
"plenum.common.util.randomString",
"plenum.test.helper.sdk_get_replies",
"plenum.test.helper.sdk_sign_request_from_dict"
] |
[((383, 440), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['lt', 'eq', 'gt']"}), "(scope='module', params=['lt', 'eq', 'gt'])\n", (397, 440), False, 'import pytest\n'), ((493, 549), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': '[REVOC_TYPE, TAG]'}), "(scope='module', params=[REVOC_TYPE, TAG])\n", (507, 549), False, 'import pytest\n'), ((999, 1029), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1013, 1029), False, 'import pytest\n'), ((1234, 1275), 'copy.deepcopy', 'copy.deepcopy', (['build_revoc_def_by_default'], {}), '(build_revoc_def_by_default)\n', (1247, 1275), False, 'import copy\n'), ((1307, 1326), 'plenum.common.util.randomString', 'randomString', (['_size'], {}), '(_size)\n', (1319, 1326), False, 'from plenum.common.util import randomString\n'), ((947, 995), 'random.randint', 'random.randint', (['(_valid_size + 1)', '(2 * _valid_size)'], {}), '(_valid_size + 1, 2 * _valid_size)\n', (961, 995), False, 'import random\n'), ((1350, 1423), 'plenum.test.helper.sdk_sign_request_from_dict', 'sdk_sign_request_from_dict', (['looper', 'sdk_wallet_steward', "_req['operation']"], {}), "(looper, sdk_wallet_steward, _req['operation'])\n", (1376, 1423), False, 'from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_signed_requests, sdk_get_replies\n'), ((810, 844), 'random.randint', 'random.randint', (['(0)', '(_valid_size - 1)'], {}), '(0, _valid_size - 1)\n', (824, 844), False, 'import random\n'), ((1851, 1867), 'json.dumps', 'json.dumps', (['_req'], {}), '(_req)\n', (1861, 1867), False, 'import json\n'), ((1883, 1915), 'plenum.test.helper.sdk_get_replies', 'sdk_get_replies', (['looper', 'results'], {}), '(looper, results)\n', (1898, 1915), False, 'from plenum.test.helper import sdk_sign_request_from_dict, sdk_send_signed_requests, sdk_get_replies\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
('iso_code', models.CharField(max_length=6)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GeneralExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MOOCExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OERExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OpenAccessExpertise',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('alternative_contact', models.CharField(max_length=255, blank=True)),
('slug', models.SlugField(max_length=255)),
('job_title', models.CharField(max_length=255, blank=True)),
('institution', models.CharField(max_length=255, blank=True)),
('is_member', models.CharField(choices=[(0, "Don't know"), (1, 'Yes'), (2, 'No')], max_length=10, verbose_name='Open Education Consortium member?')),
('city', models.CharField(max_length=255, blank=True)),
('state_province', models.CharField(max_length=255, blank=True)),
('language_native', models.TextField(blank=True, verbose_name='Native/near native level')),
('language_business', models.TextField(blank=True, verbose_name='Business level')),
('language_conversational', models.TextField(blank=True, verbose_name='Conversational')),
('general_expertise_other', models.TextField(max_length=255, blank=True, verbose_name='Other, please indicate')),
('oer_expertise_other', models.TextField(blank=True, verbose_name='Other, please indicate:')),
('openacess_expertise_other', models.TextField(blank=True, verbose_name='Other, please indicate:')),
('mooc_expertise_other', models.TextField(blank=True)),
('discipline', models.TextField(blank=True, verbose_name='If you have expertise with open education in a particular discipline, please indicate:')),
('personal_statement', models.TextField(blank=True)),
('external_links', models.TextField(blank=True)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('mod_date', models.DateTimeField(auto_now=True)),
('visible', models.BooleanField(default=True)),
('country', models.ForeignKey(to='web.Country', null=True)),
('general_expertise', models.ManyToManyField(to='web.GeneralExpertise', verbose_name='Open Education - General', null=True)),
('mooc_expertise', models.ManyToManyField(to='web.MOOCExpertise', verbose_name='If you have expertise with open education in a particular discipline, please indicate:', null=True)),
('oer_expertise', models.ManyToManyField(to='web.OERExpertise', verbose_name='Open Educational Resources', null=True)),
('openacess_expertise', models.ManyToManyField(to='web.OpenAccessExpertise', verbose_name='MOOCs', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='person',
name='region',
field=models.ManyToManyField(to='web.Region', verbose_name='Please select the geographic regions in which you have professional experience:*'),
preserve_default=True,
),
]
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.BooleanField",
"django.db.models.DateTimeField"
] |
[((5555, 5701), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""web.Region"""', 'verbose_name': '"""Please select the geographic regions in which you have professional experience:*"""'}), "(to='web.Region', verbose_name=\n 'Please select the geographic regions in which you have professional experience:*'\n )\n", (5577, 5701), False, 'from django.db import models, migrations\n'), ((299, 392), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (315, 392), False, 'from django.db import models, migrations\n'), ((416, 448), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (432, 448), False, 'from django.db import models, migrations\n'), ((480, 510), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(6)'}), '(max_length=6)\n', (496, 510), False, 'from django.db import models, migrations\n'), ((724, 817), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (740, 817), False, 'from django.db import models, migrations\n'), ((841, 873), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (857, 873), False, 'from django.db import models, migrations\n'), ((1084, 1177), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (1100, 1177), False, 'from django.db import models, migrations\n'), ((1201, 1233), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1217, 1233), False, 'from django.db import models, migrations\n'), ((1443, 1536), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (1459, 1536), False, 'from django.db import models, migrations\n'), ((1560, 1592), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1576, 1592), False, 'from django.db import models, migrations\n'), ((1809, 1902), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (1825, 1902), False, 'from django.db import models, migrations\n'), ((1926, 1958), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1942, 1958), False, 'from django.db import models, migrations\n'), ((2162, 2255), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (2178, 2255), False, 'from django.db import models, migrations\n'), ((2285, 2317), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2301, 2317), False, 'from django.db import models, migrations\n'), ((2350, 2382), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2366, 2382), False, 'from django.db import models, migrations\n'), ((2411, 2443), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2427, 2443), False, 'from django.db import models, migrations\n'), ((2486, 2530), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (2502, 2530), False, 'from django.db import models, migrations\n'), ((2558, 2590), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2574, 2590), False, 'from django.db import models, migrations\n'), ((2623, 2667), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (2639, 2667), False, 'from django.db import models, migrations\n'), ((2702, 2746), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (2718, 2746), False, 'from django.db import models, migrations\n'), ((2779, 2916), 'django.db.models.CharField', 'models.CharField', ([], {'choices': '[(0, "Don\'t know"), (1, \'Yes\'), (2, \'No\')]', 'max_length': '(10)', 'verbose_name': '"""Open Education Consortium member?"""'}), '(choices=[(0, "Don\'t know"), (1, \'Yes\'), (2, \'No\')],\n max_length=10, verbose_name=\'Open Education Consortium member?\')\n', (2795, 2916), False, 'from django.db import models, migrations\n'), ((2940, 2984), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (2956, 2984), False, 'from django.db import models, migrations\n'), ((3022, 3066), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (3038, 3066), False, 'from django.db import models, migrations\n'), ((3105, 3174), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Native/near native level"""'}), "(blank=True, verbose_name='Native/near native level')\n", (3121, 3174), False, 'from django.db import models, migrations\n'), ((3215, 3274), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Business level"""'}), "(blank=True, verbose_name='Business level')\n", (3231, 3274), False, 'from django.db import models, migrations\n'), ((3321, 3380), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Conversational"""'}), "(blank=True, verbose_name='Conversational')\n", (3337, 3380), False, 'from django.db import models, migrations\n'), ((3427, 3515), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(255)', 'blank': '(True)', 'verbose_name': '"""Other, please indicate"""'}), "(max_length=255, blank=True, verbose_name=\n 'Other, please indicate')\n", (3443, 3515), False, 'from django.db import models, migrations\n'), ((3553, 3621), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Other, please indicate:"""'}), "(blank=True, verbose_name='Other, please indicate:')\n", (3569, 3621), False, 'from django.db import models, migrations\n'), ((3670, 3738), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""Other, please indicate:"""'}), "(blank=True, verbose_name='Other, please indicate:')\n", (3686, 3738), False, 'from django.db import models, migrations\n'), ((3782, 3810), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (3798, 3810), False, 'from django.db import models, migrations\n'), ((3844, 3985), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""If you have expertise with open education in a particular discipline, please indicate:"""'}), "(blank=True, verbose_name=\n 'If you have expertise with open education in a particular discipline, please indicate:'\n )\n", (3860, 3985), False, 'from django.db import models, migrations\n'), ((4017, 4045), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (4033, 4045), False, 'from django.db import models, migrations\n'), ((4083, 4111), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (4099, 4111), False, 'from django.db import models, migrations\n'), ((4143, 4182), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4163, 4182), False, 'from django.db import models, migrations\n'), ((4214, 4249), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4234, 4249), False, 'from django.db import models, migrations\n'), ((4280, 4313), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (4299, 4313), False, 'from django.db import models, migrations\n'), ((4344, 4390), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""web.Country"""', 'null': '(True)'}), "(to='web.Country', null=True)\n", (4361, 4390), False, 'from django.db import models, migrations\n'), ((4431, 4537), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""web.GeneralExpertise"""', 'verbose_name': '"""Open Education - General"""', 'null': '(True)'}), "(to='web.GeneralExpertise', verbose_name=\n 'Open Education - General', null=True)\n", (4453, 4537), False, 'from django.db import models, migrations\n'), ((4570, 4740), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""web.MOOCExpertise"""', 'verbose_name': '"""If you have expertise with open education in a particular discipline, please indicate:"""', 'null': '(True)'}), "(to='web.MOOCExpertise', verbose_name=\n 'If you have expertise with open education in a particular discipline, please indicate:'\n , null=True)\n", (4592, 4740), False, 'from django.db import models, migrations\n'), ((4767, 4871), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""web.OERExpertise"""', 'verbose_name': '"""Open Educational Resources"""', 'null': '(True)'}), "(to='web.OERExpertise', verbose_name=\n 'Open Educational Resources', null=True)\n", (4789, 4871), False, 'from django.db import models, migrations\n'), ((4909, 4998), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""web.OpenAccessExpertise"""', 'verbose_name': '"""MOOCs"""', 'null': '(True)'}), "(to='web.OpenAccessExpertise', verbose_name='MOOCs',\n null=True)\n", (4931, 4998), False, 'from django.db import models, migrations\n'), ((5198, 5291), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'verbose_name': '"""ID"""', 'serialize': '(False)', 'primary_key': '(True)'}), "(auto_created=True, verbose_name='ID', serialize=False,\n primary_key=True)\n", (5214, 5291), False, 'from django.db import models, migrations\n'), ((5315, 5347), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5331, 5347), False, 'from django.db import models, migrations\n')]
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
def get_all_execution_contexts():
return [
('native_local', tff.backends.native.create_local_execution_context()),
('native_sizing', tff.backends.native.create_sizing_execution_context()),
('native_debug',
tff.backends.native.create_thread_debugging_execution_context()),
]
def with_contexts(*args):
"""A decorator for creating tests parameterized by context.
Note: To use this decorator your test is required to inherit from
`parameterized.TestCase`.
The decorator can be called without arguments:
```
@with_contexts
def foo(self):
...
```
or with arguments:
```
@with_contexts(
('label', executor),
...
)
def foo(self):
...
```
If the decorator is specified without arguments or is called with no
arguments, the default contexts used are those returned by
`get_all_execution_contexts`.
If the decorator is called with arguments the arguments must be in a form that
is accpeted by `parameterized.named_parameters`.
Args:
*args: Either a test function to be decorated or named executors for the
decorated method, either a single iterable, or a list of tuples or dicts.
Returns:
A test generator to be handled by `parameterized.TestGeneratorMetaclass`.
"""
def decorator(fn, *named_contexts):
if not named_contexts:
named_contexts = get_all_execution_contexts()
@parameterized.named_parameters(*named_contexts)
def wrapped_fn(self, context):
context_stack = tff.framework.get_context_stack()
with context_stack.install(context):
fn(self)
return wrapped_fn
if len(args) == 1 and callable(args[0]):
return decorator(args[0])
else:
return lambda fn: decorator(fn, *args)
class ExecutionContextsTest(parameterized.TestCase):
@with_contexts
def test_federated_value(self):
@tff.federated_computation
def foo(x):
return tff.federated_value(x, tff.SERVER)
result = foo(10)
self.assertIsNotNone(result)
@with_contexts
def test_federated_zip(self):
@tff.federated_computation([tff.FederatedType(tf.int32, tff.CLIENTS)] * 2)
def foo(x):
return tff.federated_zip(x)
result = foo([[1, 2], [3, 4]])
self.assertIsNotNone(result)
@with_contexts
def test_federated_zip_with_twenty_elements(self):
# This test will fail if execution scales factorially with number of
# elements zipped.
num_element = 20
num_clients = 2
@tff.federated_computation([tff.FederatedType(tf.int32, tff.CLIENTS)] *
num_element)
def foo(x):
return tff.federated_zip(x)
value = [list(range(num_clients))] * num_element
result = foo(value)
self.assertIsNotNone(result)
@with_contexts
def test_identity(self):
@tff.federated_computation
def foo(x):
return x
result = foo(10)
self.assertIsNotNone(result)
if __name__ == '__main__':
tff.test.set_no_default_context()
absltest.main()
|
[
"absl.testing.absltest.main",
"tensorflow_federated.test.set_no_default_context",
"tensorflow_federated.backends.native.create_thread_debugging_execution_context",
"tensorflow_federated.federated_zip",
"tensorflow_federated.federated_value",
"tensorflow_federated.framework.get_context_stack",
"tensorflow_federated.backends.native.create_sizing_execution_context",
"absl.testing.parameterized.named_parameters",
"tensorflow_federated.FederatedType",
"tensorflow_federated.backends.native.create_local_execution_context"
] |
[((3672, 3705), 'tensorflow_federated.test.set_no_default_context', 'tff.test.set_no_default_context', ([], {}), '()\n', (3703, 3705), True, 'import tensorflow_federated as tff\n'), ((3708, 3723), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3721, 3723), False, 'from absl.testing import absltest\n'), ((2134, 2181), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*named_contexts'], {}), '(*named_contexts)\n', (2164, 2181), False, 'from absl.testing import parameterized\n'), ((803, 855), 'tensorflow_federated.backends.native.create_local_execution_context', 'tff.backends.native.create_local_execution_context', ([], {}), '()\n', (853, 855), True, 'import tensorflow_federated as tff\n'), ((882, 935), 'tensorflow_federated.backends.native.create_sizing_execution_context', 'tff.backends.native.create_sizing_execution_context', ([], {}), '()\n', (933, 935), True, 'import tensorflow_federated as tff\n'), ((968, 1031), 'tensorflow_federated.backends.native.create_thread_debugging_execution_context', 'tff.backends.native.create_thread_debugging_execution_context', ([], {}), '()\n', (1029, 1031), True, 'import tensorflow_federated as tff\n'), ((2239, 2272), 'tensorflow_federated.framework.get_context_stack', 'tff.framework.get_context_stack', ([], {}), '()\n', (2270, 2272), True, 'import tensorflow_federated as tff\n'), ((2649, 2683), 'tensorflow_federated.federated_value', 'tff.federated_value', (['x', 'tff.SERVER'], {}), '(x, tff.SERVER)\n', (2668, 2683), True, 'import tensorflow_federated as tff\n'), ((2898, 2918), 'tensorflow_federated.federated_zip', 'tff.federated_zip', (['x'], {}), '(x)\n', (2915, 2918), True, 'import tensorflow_federated as tff\n'), ((3346, 3366), 'tensorflow_federated.federated_zip', 'tff.federated_zip', (['x'], {}), '(x)\n', (3363, 3366), True, 'import tensorflow_federated as tff\n'), ((2822, 2862), 'tensorflow_federated.FederatedType', 'tff.FederatedType', (['tf.int32', 'tff.CLIENTS'], {}), '(tf.int32, tff.CLIENTS)\n', (2839, 2862), True, 'import tensorflow_federated as tff\n'), ((3229, 3269), 'tensorflow_federated.FederatedType', 'tff.FederatedType', (['tf.int32', 'tff.CLIENTS'], {}), '(tf.int32, tff.CLIENTS)\n', (3246, 3269), True, 'import tensorflow_federated as tff\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module to provide statistics and related functionality.
"""
from translate import lang
from translate.lang import factory
# calling classifyunits() in the constructor is probably not ideal.
# idea: have a property for .classification that calls it if necessary
# If we add units or change translations, statistics are out of date
# Compare with modules/Status.py in pootling that uses a bitmask to
# filter units
# Add support for reading and writing Pootle style .stats files
# Consider providing quickstats
class Statistics(object):
"""Manages statistics for storage objects."""
def __init__(self, sourcelanguage='en', targetlanguage='en', checkerstyle=None):
self.sourcelanguage = sourcelanguage
self.targetlanguage = targetlanguage
self.language = lang.factory.getlanguage(self.sourcelanguage)
# self.init_checker(checkerstyle)
self.classification = {}
def init_checker(self, checkerstyle=None):
from translate.filters import checks
from translate.filters import pofilter
checkerclasses = [checkerstyle or checks.StandardChecker, pofilter.StandardPOChecker]
self.checker = pofilter.POTeeChecker(checkerclasses=checkerclasses)
def fuzzy_units(self):
"""Return a list of fuzzy units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["fuzzy"]]
def fuzzy_unitcount(self):
"""Returns the number of fuzzy units."""
return len(self.fuzzy_units())
def translated_units(self):
"""Return a list of translated units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["translated"]]
def translated_unitcount(self):
"""Returns the number of translated units."""
return len(self.translated_units())
def untranslated_units(self):
"""Return a list of untranslated units."""
if not self.classification:
self.classifyunits()
units = self.getunits()
return [units[item] for item in self.classification["blank"]]
def untranslated_unitcount(self):
"""Returns the number of untranslated units."""
return len(self.untranslated_units())
def getunits(self):
"""Returns a list of all units in this object."""
return []
def get_source_text(self, units):
"""Joins the unit source strings in a single string of text."""
source_text = ""
for unit in units:
source_text += unit.source + "\n"
plurals = getattr(unit.source, "strings", [])
if plurals:
source_text += "\n".join(plurals[1:])
return source_text
def wordcount(self, text):
"""Returns the number of words in the given text."""
return len(self.language.words(text))
def source_wordcount(self):
"""Returns the number of words in the source text."""
source_text = self.get_source_text(self.getunits())
return self.wordcount(source_text)
def translated_wordcount(self):
"""Returns the number of translated words in this object."""
text = self.get_source_text(self.translated_units())
return self.wordcount(text)
def untranslated_wordcount(self):
"""Returns the number of untranslated words in this object."""
text = self.get_source_text(self.untranslated_units())
return self.wordcount(text)
def classifyunit(self, unit):
"""Returns a list of the classes that the unit belongs to.
:param unit: the unit to classify
"""
classes = ["total"]
if unit.isfuzzy():
classes.append("fuzzy")
if unit.gettargetlen() == 0:
classes.append("blank")
if unit.istranslated():
classes.append("translated")
#TODO: we don't handle checking plurals at all yet, as this is tricky...
source = unit.source
target = unit.target
if isinstance(source, str) and isinstance(target, unicode):
source = source.decode(getattr(unit, "encoding", "utf-8"))
#TODO: decoding should not be done here
# checkresult = self.checker.run_filters(unit, source, target)
checkresult = {}
for checkname, checkmessage in checkresult.iteritems():
classes.append("check-" + checkname)
return classes
def classifyunits(self):
"""Makes a dictionary of which units fall into which classifications.
This method iterates over all units.
"""
self.classification = {}
self.classification["fuzzy"] = []
self.classification["blank"] = []
self.classification["translated"] = []
self.classification["has-suggestion"] = []
self.classification["total"] = []
# for checkname in self.checker.getfilters().keys():
# self.classification["check-" + checkname] = []
for item, unit in enumerate(self.unit_iter()):
classes = self.classifyunit(unit)
# if self.basefile.getsuggestions(item):
# classes.append("has-suggestion")
for classname in classes:
if classname in self.classification:
self.classification[classname].append(item)
else:
self.classification[classname] = item
self.countwords()
def countwords(self):
"""Counts the source and target words in each of the units."""
self.sourcewordcounts = []
self.targetwordcounts = []
for unit in self.unit_iter():
self.sourcewordcounts.append([self.wordcount(text) for text in getattr(unit.source, "strings", [""])])
self.targetwordcounts.append([self.wordcount(text) for text in getattr(unit.target, "strings", [""])])
def reclassifyunit(self, item):
"""Updates the classification of a unit in self.classification.
:param item: an integer that is an index in .getunits().
"""
unit = self.getunits()[item]
self.sourcewordcounts[item] = [self.wordcount(text) for text in unit.source.strings]
self.targetwordcounts[item] = [self.wordcount(text) for text in unit.target.strings]
classes = self.classifyunit(unit)
# if self.basefile.getsuggestions(item):
# classes.append("has-suggestion")
for classname, matchingitems in self.classification.items():
if (classname in classes) != (item in matchingitems):
if classname in classes:
self.classification[classname].append(item)
else:
self.classification[classname].remove(item)
self.classification[classname].sort()
# self.savestats()
|
[
"translate.lang.factory.getlanguage",
"translate.filters.pofilter.POTeeChecker"
] |
[((1552, 1597), 'translate.lang.factory.getlanguage', 'lang.factory.getlanguage', (['self.sourcelanguage'], {}), '(self.sourcelanguage)\n', (1576, 1597), False, 'from translate import lang\n'), ((1930, 1982), 'translate.filters.pofilter.POTeeChecker', 'pofilter.POTeeChecker', ([], {'checkerclasses': 'checkerclasses'}), '(checkerclasses=checkerclasses)\n', (1951, 1982), False, 'from translate.filters import pofilter\n')]
|
import os
from os.path import dirname, abspath, join
import torch
CODEPATH = dirname(abspath(__file__))
DATAPATH = abspath(join(join(CODEPATH, os.pardir), 'data'))
RESPATH = abspath(join(join(CODEPATH, os.pardir), 'results'))
Tensor = torch.DoubleTensor
|
[
"os.path.abspath",
"os.path.join"
] |
[((87, 104), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (94, 104), False, 'from os.path import dirname, abspath, join\n'), ((130, 155), 'os.path.join', 'join', (['CODEPATH', 'os.pardir'], {}), '(CODEPATH, os.pardir)\n', (134, 155), False, 'from os.path import dirname, abspath, join\n'), ((189, 214), 'os.path.join', 'join', (['CODEPATH', 'os.pardir'], {}), '(CODEPATH, os.pardir)\n', (193, 214), False, 'from os.path import dirname, abspath, join\n')]
|
from Tkinter import PhotoImage
from random import randint
dX=[-3,3,-3,3]
dY=[-3,-3,3,3]
def nd(n, x):
if n == 1 or n == 3:
return (x + 2) % 4
elif n == 2:
return x + 1
elif n == 4:
return x - 1
class pcg:
def __init__(self, x, y, direction, master):
self.x = x
self.y = y
self.direction = direction
self.master = master
self.pic = self.master.master.canvas.create_image(self.x, self.y, image = self.master.img)
def upd(self):
if 20 > self.x + dX[self.direction]:
self.direction = nd(2, self.direction)
elif self.x + dX[self.direction] > 460:
self.direction = nd(4, self.direction)
self.x += dX[self.direction]
if 20 > self.y + dY[self.direction]:
self.direction = nd(1, self.direction)
elif self.y + dY[self.direction] > 590:
self.direction = nd(3, self.direction)
self.y += dY[self.direction]
self.master.master.canvas.move(self.pic, dX[self.direction], dY[self.direction])
class Packages:
def __init__(self, master):
self.master = master
self.img = PhotoImage(file = 'images\\package.gif')
self.data = []
self.length = 0
def clear(self):
for i in self.data:
self.master.canvas.delete(i.pic)
del self.data
self.data = []
self.length = 0
def add(self, x, y):
self.data.append(pcg(x, y, randint(2, 3), self))
self.length += 1
def upd(self):
x = []
for j in range(self.length):
i = self.data[j]
i.upd()
if (i.x - self.master.sonic.x) ** 2 + (i.y - self.master.sonic.y) ** 2 < 2000:
if self.master.sonic.hp < 120:
self.master.sonic.hp += 25
if self.master.sonic.hp > 120:
self.master.sonic.hp = 120
elif self.master.sonic.weaponmode < 3:
self.master.sonic.weaponmode += 1
else:
self.master.sonic.shield += 240
x = [j]+x
self.master.score += 100
self.length -= len(x)
for i in x:
self.master.canvas.delete(self.data[i].pic)
del self.data[i]
if not self.master.boss.status:
for i in self.data:
self.master.canvas.lift(i.pic)
self.master.enemies.up()
|
[
"random.randint",
"Tkinter.PhotoImage"
] |
[((1202, 1240), 'Tkinter.PhotoImage', 'PhotoImage', ([], {'file': '"""images\\\\package.gif"""'}), "(file='images\\\\package.gif')\n", (1212, 1240), False, 'from Tkinter import PhotoImage\n'), ((1527, 1540), 'random.randint', 'randint', (['(2)', '(3)'], {}), '(2, 3)\n', (1534, 1540), False, 'from random import randint\n')]
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.preprocessing import OneHotEncoder
import statistics
import math
import sys
import itertools
import time
np.seterr(over='raise', under="ignore")
def batch_pp(df, covariates, batch_column, ignore):
"""This function takes in a df, the name of the covariate columns, and the batch column
and it outputs a feature count matrix, feature zero inflation matrix,
batch dummy matrix (one hot vectors as rows), covariate matrix (concatenated one hot vectors )
(covariates coefficient matrix [X_ij], batch dummy matrix [X_batch],
the zero inflation matrix [I_ijk], and count matrix [Y])
NOTE: this df can be a combination of datasets, or an individual dataset"""
# df: [dataframe] input with rows as samples and columns as feature counts.
# should only have OTU names ,covariates, and batch_column in keyspace
# covariates: [List] of the covariates to retain and estimate betas for
# batch_column: [string] column that defines the batches in this dataframe
# ignore: [List] of column names to ignore
################################### Check proper input ###################################
if (batch_column not in df.keys()):
raise ValueError("Column name " + str(batch_column) + " not found")
if (not set(covariates) <= set(df.keys())):
raise ValueError("Covariate columns not found in dataframe")
################################### Turn batch column to one hot vector ###################################
# note: for all features, batch matrix and covariate matrix will be the same.
X_batch = pd.get_dummies(df[batch_column], drop_first=False)
################################### Turn covariate columns covariate matrix ###################################
# number of columns is the number of betas to estimate
X_cov = pd.get_dummies(df[covariates], drop_first=True)
intercept = [1 for _ in range(X_cov.shape[0])]
# adding intercept term
X_cov.insert(0, "intercept", intercept)
################################### Build the feature zero inflation matrix ###################################
# turn numbers to 1 and keep zeroes the way they are
otu_keys = df.keys().drop(ignore)
I = df[otu_keys].replace('0.0', False).astype(bool).replace(False, 0).replace(True, 1)
df_dict = {"X_cov": X_cov,
"X_batch": X_batch,
"I": I,
"Y": df[otu_keys],
"ignore": df[ignore]}
return df_dict
def reduce_batch_effects(Y, I, X_cov, X_batch, verbose=False):
"""This function takes in the output of batch_pp and does the feature-wise batch reduction"""
# INPUT:
# Y: matrix of feature counts with the columns as features and columns as sample counts as rows
# I: matrix of feature zero inflation (1s where values are >=1, 0s o.w.)
# X_cov: covariance matrix (this will give us the betas we need to estimate)
# X_batch: dummy matrix of batch values
# OUTPUT:
# corrected matrix
# merge the dummy variables for the covariates and also for the batch to get the whole design matrix
X_mat = pd.concat([X_cov, X_batch], axis=1).astype(float)
# type conversions and index storing
Y = Y.astype(float)
num_beta_cov = X_cov.shape[1]
num_beta_batch = X_batch.shape[1]
num_features = len(Y.keys())
num_samples = Y.shape[0]
Z = pd.DataFrame(index=Y.index, columns=Y.columns)
# for each of the features, we will calculate the batch reduction coefficients, then reduce the batch effects
count = 0
otu_names = list(Y.keys())
otu_names = [x for x in otu_names if Y[x][Y[x] > 0].count() > 2]
sigma_p_store = {}
beta_params_store = pd.DataFrame(columns=Y.columns, index=X_mat.columns)
beta_cov_store = pd.DataFrame(columns=Y.columns, index=X_cov.columns)
beta_batch_store = {}
start = time.time()
for p in otu_names:
# select only the feature as a row
y_ijp = Y[p]
y_store = Y[p] # storing the original column(unchanged)
I_ijp = I[p].astype(float)
if (count % 100 == 0 and verbose):
print("Estimating β_cov, β_batch, and σ_p for feature {}".format(count))
# --------- Estimate beta_p and beta_batch through OLS regression --------------
# ignore the keys with zero counts and only fit with non zero samples
fit_index = list(y_ijp.to_numpy().astype(float).nonzero()[0])
zero_index = list(set(range(num_samples)) - set(fit_index))
zero_keys = y_store.keys()[zero_index]
# use only non zero counts for index to fit our OLS
y_ijp = y_ijp.iloc[fit_index]
# y_ijp = y_ijp[fit_index] # PREVIOUS VERSION
X_design_mat = X_mat.iloc[fit_index, :]
X_cov_mat = X_cov.iloc[fit_index, :]
X_batch_mat = X_batch.iloc[fit_index, :]
# fit ols
model = sm.OLS(y_ijp, X_design_mat)
res = model.fit()
############# Calculate sigma_p using the standard deviation of previous regression ###########
residuals = y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])
sigma_hat_p = statistics.stdev(residuals)
# store in feature keyed dictionary of standard deviations
sigma_p_store[p] = sigma_hat_p
# separate the beta cov from the beta batch
beta_params = res.params
beta_cov = res.params[:num_beta_cov]
beta_batch = res.params[num_beta_cov:]
# store list of beta parameters indexed by feature
beta_params_store[p] = beta_params
beta_cov_store[p] = beta_cov
beta_batch_store[p] = beta_batch
####################################### Calculate Z_ijp #######################################
z_ijp = (y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])) / sigma_hat_p
Z[p] = z_ijp
count += 1
if count % 25 == 0:
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(otu_names), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total OLS time: {}s'.format(round(end - start, 2)))
Z = Z.fillna(0)
beta_params_store = beta_params_store.astype(float)
# return X_mat.dot(beta_params_store)
estimates = eb_estimator(X_batch, Z, sigma_p=sigma_p_store, X_add=X_cov.dot(beta_cov_store), verbose=verbose)
return estimates
def eb_estimator(X_batch, Z, sigma_p, X_add, max_itt=6000, verbose=False):
"""This function returns the empirical bayes estimates for gamma_star_p and delta_star_p
as well as the standerdized OTU counts"""
# X_batch: Batch effects dummy matrix (n x alpha) matrix
# Z: Matrix of standerdized data (n x p ) matrix
# sigma_p: Vec of OTU variances
# X_add: matrix to add back after parameter estimation
# max_itt: Maximum number of iterations until convergence
# smooth_delta: bool flag for whether or not we replace the 0 values in delta_i by 1
# Standardized matrix init
Z_out = pd.DataFrame(index=Z.index, columns=Z.columns)
# number of genes/otus
G = Z.shape[1]
# number of samples in each batch
N = X_batch.sum(axis=0)
# sample mean for each OTU in each per batch (p X alpha) matrix
gamma_hat = Z.T.dot(X_batch) / N
# parameter estimates for batch effect location - gamma
gamma_bar = gamma_hat.mean(axis=0).astype(float)
tau_bar = ((gamma_hat.sub(gamma_bar) ** 2).sum(axis=0)) / (G - 1)
# parameter estimates for batch effect scale - delta (p X alpha) matrix
delta_hat = (((Z - X_batch.dot(gamma_hat.T)) ** 2).T.dot(X_batch)) / (N - 1)
v_bar = delta_hat.sum(axis=0) / G
s_bar = ((delta_hat.sub(v_bar) ** 2).sum(axis=0)) / (G - 1)
lambda_bar = (v_bar + (2 * s_bar)) / (s_bar)
theta_bar = (v_bar ** 3 + v_bar * s_bar) / (s_bar)
# iteratively solve for gamma_star_ip and delta_star_ip
# initialize the keyed matrices
gamma_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
delta_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
batches = gamma_hat.keys()
genes = list(gamma_hat.T.keys())
genes = [x for x in genes if Z[x].max() != 0]
start = time.time()
count = 0
for i in batches:
# get individual variables to focus on
theta_i = theta_bar[i]
lambda_i = lambda_bar[i]
n = N[i]
tau_i = tau_bar[i]
gamma_bar_i = gamma_bar[i]
for p in genes:
gene_counts_in_batch = X_batch[i] * Z[p]
gene_counts_in_batch = gene_counts_in_batch[gene_counts_in_batch != 0]
changed_samples = gene_counts_in_batch.keys()
gamma_hat_ip = gamma_hat[i][p]
# initial iteration values
delta_star_ip_init = delta_hat[i][p]
gamma_star_ip_init = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate the next step in the iteration
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_next, n)
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt = 1
while ((conv_delta + conv_gamma) > 1e-8):
# store previous iteration of the values
delta_star_ip_init = delta_star_ip_next
gamma_star_ip_init = gamma_star_ip_next
# take our next "guess" for the values
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate how close we are to convergence
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt += 1
if (itt == max_itt):
raise ValueError("Maximum iteration reached for convergence. Try setting a higher limit")
if (verbose):
print("OTU {} on dataset {} Convergence took {} steps".format(p[-15:], i, itt))
# store found values in the relevant matrices
gamma_star_mat[i][p] = gamma_star_ip_next
delta_star_mat[i][p] = delta_star_ip_next
a = (sigma_p[p] / delta_star_ip_next)
b = (Z[p][changed_samples] - gamma_star_ip_next)
c = X_add[p]
Z_out[p][changed_samples] = (a * b + c)[changed_samples]
count += 1
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(batches), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total Batch Reduction Parameter Estimation time: {}s'.format(round(end - start, 2)))
Z_out = Z_out.fillna(0)
return {"gamma_star": gamma_star_mat,
"delta_star": delta_star_mat,
"BR": Z_out}
def f_delta_star_ip(theta_bar, lambda_bar, Z_in_batch, gamma_star, n):
"""This is the function to calculate delta star given gamma_star """
# INPUT
# theta_bar: theta estimate for batch i (scale estimate for delta star_ip)
# lambda_bar: lamda estimate for batch i (shape estimate for delta star_ip)
# Z_in_batch: vector of correctd counts for otu p in in batch o
# gamma_star: posterior mean for location parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# delta_star_ip: posterior mean for location parameter of OTU p in batch i
return (theta_bar + 0.5 * (((Z_in_batch - gamma_star) ** 2).sum())) / ((n / 2) + lambda_bar - 1)
def f_gamma_star_ip(tau_bar, gamma_bar, gamma_hat, delta_star, n):
"""This is the function to calculate gamma star given delta_star"""
# INPUT
# tau_bar: tau estimate in batch i
# gamma_bar: gamma mean estimate for batch i
# gamma_hat: sample mean for each OTU p in batch i
# delta_star: posterior mean for scale parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# gamma_star_ip: posterior mean for location parameter of OTU p in batch i
return (n * tau_bar * gamma_hat + delta_star * gamma_bar) / (n * tau_bar + delta_star)
def combat(in_df, covariates, batches, ignore, verbose=False):
df = in_df.copy()
for i in range(len(batches.keys())):
print("Performing ComBat Batch Correction for {}".format(batches[i].upper()))
df[df.columns.difference(ignore)] = df[df.columns.difference(ignore)]
t = batch_pp(df, covariates=covariates,batch_column=batches[i], ignore=ignore)
r = reduce_batch_effects(Y=t['Y'], X_cov=t['X_cov'], I=t['I'], X_batch=t['X_batch'], verbose=verbose)
try:
df = pd.concat([r["BR"], t['ignore']], axis=1)
except:
print('Error Occurred - returning original data set')
return ("error", r["BR"])
return df
|
[
"pandas.DataFrame",
"statsmodels.api.OLS",
"numpy.seterr",
"pandas.get_dummies",
"statistics.stdev",
"time.time",
"pandas.concat"
] |
[((187, 226), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""', 'under': '"""ignore"""'}), "(over='raise', under='ignore')\n", (196, 226), True, 'import numpy as np\n'), ((1670, 1720), 'pandas.get_dummies', 'pd.get_dummies', (['df[batch_column]'], {'drop_first': '(False)'}), '(df[batch_column], drop_first=False)\n', (1684, 1720), True, 'import pandas as pd\n'), ((1909, 1956), 'pandas.get_dummies', 'pd.get_dummies', (['df[covariates]'], {'drop_first': '(True)'}), '(df[covariates], drop_first=True)\n', (1923, 1956), True, 'import pandas as pd\n'), ((3459, 3505), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Y.index', 'columns': 'Y.columns'}), '(index=Y.index, columns=Y.columns)\n', (3471, 3505), True, 'import pandas as pd\n'), ((3782, 3834), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Y.columns', 'index': 'X_mat.columns'}), '(columns=Y.columns, index=X_mat.columns)\n', (3794, 3834), True, 'import pandas as pd\n'), ((3856, 3908), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Y.columns', 'index': 'X_cov.columns'}), '(columns=Y.columns, index=X_cov.columns)\n', (3868, 3908), True, 'import pandas as pd\n'), ((3948, 3959), 'time.time', 'time.time', ([], {}), '()\n', (3957, 3959), False, 'import time\n'), ((6193, 6204), 'time.time', 'time.time', ([], {}), '()\n', (6202, 6204), False, 'import time\n'), ((7143, 7189), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Z.index', 'columns': 'Z.columns'}), '(index=Z.index, columns=Z.columns)\n', (7155, 7189), True, 'import pandas as pd\n'), ((8077, 8139), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'gamma_hat.index', 'columns': 'gamma_hat.columns'}), '(index=gamma_hat.index, columns=gamma_hat.columns)\n', (8089, 8139), True, 'import pandas as pd\n'), ((8161, 8223), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'gamma_hat.index', 'columns': 'gamma_hat.columns'}), '(index=gamma_hat.index, columns=gamma_hat.columns)\n', (8173, 8223), True, 'import pandas as pd\n'), ((8356, 8367), 'time.time', 'time.time', ([], {}), '()\n', (8365, 8367), False, 'import time\n'), ((11118, 11129), 'time.time', 'time.time', ([], {}), '()\n', (11127, 11129), False, 'import time\n'), ((4960, 4987), 'statsmodels.api.OLS', 'sm.OLS', (['y_ijp', 'X_design_mat'], {}), '(y_ijp, X_design_mat)\n', (4966, 4987), True, 'import statsmodels.api as sm\n'), ((5210, 5237), 'statistics.stdev', 'statistics.stdev', (['residuals'], {}), '(residuals)\n', (5226, 5237), False, 'import statistics\n'), ((10905, 10916), 'time.time', 'time.time', ([], {}), '()\n', (10914, 10916), False, 'import time\n'), ((3201, 3236), 'pandas.concat', 'pd.concat', (['[X_cov, X_batch]'], {'axis': '(1)'}), '([X_cov, X_batch], axis=1)\n', (3210, 3236), True, 'import pandas as pd\n'), ((5975, 5986), 'time.time', 'time.time', ([], {}), '()\n', (5984, 5986), False, 'import time\n'), ((13169, 13210), 'pandas.concat', 'pd.concat', (["[r['BR'], t['ignore']]"], {'axis': '(1)'}), "([r['BR'], t['ignore']], axis=1)\n", (13178, 13210), True, 'import pandas as pd\n')]
|
from google.protobuf.symbol_database import Default
import nltk
import random
import pickle
from nltk.corpus.reader.chasen import test
from pandas.core.indexes import period
from statsmodels.tsa.seasonal import _extrapolate_trend
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from numpy.lib.function_base import append, select
lemmatizer = WordNetLemmatizer()
import pandas as pd
import yfinance as yf
import streamlit as st
import statsmodels.api as sm
import datetime as dt
import plotly.graph_objects as go
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import requests
import json
import numpy as np
from keras.models import load_model
from bs4 import BeautifulSoup
import csv
from requests.exceptions import ConnectionError
words=pickle.load(open('words.pkl','rb'))
classes=pickle.load(open('classes.pkl','rb'))
model = load_model("stock_model.h5")
intents=json.loads(open('training.json').read())
def calcMovingAverage(data, size):
df = data.copy()
df['sma'] = df['Adj Close'].rolling(size).mean()
df['ema'] = df['Adj Close'].ewm(span=size, min_periods=size).mean()
df.dropna(inplace=True)
return df
def calc_macd(data):
df = data.copy()
df['ema12'] = df['Adj Close'].ewm(span=12, min_periods=12).mean()
df['ema26'] = df['Adj Close'].ewm(span=26, min_periods=26).mean()
df['macd'] = df['ema12'] - df['ema26']
df['signal'] = df['macd'].ewm(span=9, min_periods=9).mean()
df.dropna(inplace=True)
return df
def calcBollinger(data, size):
df = data.copy()
df["sma"] = df['Adj Close'].rolling(size).mean()
df["bolu"] = df["sma"] + 2*df['Adj Close'].rolling(size).std(ddof=0)
df["bold"] = df["sma"] - 2*df['Adj Close'].rolling(size).std(ddof=0)
df["width"] = df["bolu"] - df["bold"]
df.dropna(inplace=True)
return df
def graphMyStock(finalvar,a,b,col):
stock2 = yf.Ticker(finalvar)
info2=stock2.info
ln2=info2['longName']
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(finalvar,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ finalvar+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col)
)
)
fig2.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def graphAllStocks(stocka,stockb,stockc,a,b,col1,col2,col3):
stock2 = yf.Ticker(stocka)
info2=stock2.info
ln2=info2['longName']
st.write('')
st.subheader('**Graph of optimal stocks:** ')
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(stocka,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ stocka+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col1)
)
)
livedata2=yf.download(stockb,start2,end2)
df_ma2= calcMovingAverage(livedata2, windowSizeMAb)
df_ma2= df_ma2.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ stockb+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col2)
))
livedata3=yf.download(stockc,start2,end2)
df_ma3= calcMovingAverage(livedata3, windowSizeMAb)
df_ma3= df_ma3.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma3['Date'],
y=df_ma3['Adj Close'],
name = '('+ stockc+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col3)
))
fig2.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def RootWordGen(lw):
j=nltk.word_tokenize(lw)
j= [lemmatizer.lemmatize(word.lower()) for word in j]
return(j)
def matrix(sentence, words, show_details=True):
sentence_words= RootWordGen(sentence)
# sentence_words is bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
#matrix contains number of elements = vocabulary, preset value=0
for s in sentence_words:
#traverses root words
for i,w in enumerate(words):
#i is roll no/dir no
#w is unique word
#makes directory, gives a 'roll no' to each word. If 'cramping' is entered, directory till cramping prints along w roll number, then matrix with 0s other than one 1 (one being element number=roll no of cramping)
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
#will give name of bag of unique base word the entered word is found in
print ("found in bag: %s" % w)
#removes commas from list, returns matrix
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold probability
pred= matrix(sentence, words,show_details=False)
res = model.predict(np.array([pred]))[0]
ERROR_THRESHOLD = 0.25
global results
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
global results1
results1 = [[i,r] for i,r in enumerate(res)]
print(results)
#for guesses above threshold
#f=open('r.txt','w')
#for all guesses
#f1=open('s.txt','w')
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
results1.sort(key=lambda x: x[1], reverse=True)
pr=results1[0]
global pp
pp=pr[1]
print(pp)
global return_list
return_list = []
global return_list1
return_list1=[]
for r in results1:
return_list1.append({"intent": classes[r[0]], "probability": str(r[1])})
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
#for x in return_list1:
# f1.write(str(x))
#for x in return_list:
#print(x)
#f.write(str(x))
return return_list[0]
def getResponse(ints, intents_json):
global tag
tag = ints[0]['intent']
print(tag)
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def FinalPrediction(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
stockdata = pd.read_csv("SP500.csv")
symbols = stockdata['Symbol'].sort_values().tolist()
st.title('Investment Optimizer and Stock Growth Predictor')
#We'll add this when we come up with something
expander=st.beta_expander(label='',expanded=False)
expander.write("This application aims at evaluating stock trends and current news to predict it's future growth. It provides a clean and efficient user interface to view current prices and fluctuation history. It also provides a tool to identify an ideal combination of stocks that one should invest in based on the given budget, using our machine learning and optimization algorithm. We have named our ML model 'ATHENA', which stands for Algorithmic Enhancer")
st.write("")
st.write("")
st.write('**Would you like to know where to invest or understand each Stock?**')
a=st.radio("", ("Invest", "Understand"))
if(a=="Invest"):
budget=st.sidebar.number_input("Enter your budget ($): ")
if(st.sidebar.button("Enter")):
st.header("")
st.header("**Following is the combination of stocks you should invest in: ** ")
st.write("")
st.write('Processing...')
invest=[]
invstock_sym=[]
invstock_name=[]
f= open("SP500.csv",'r')
rd=csv.reader(f)
for x in rd:
if x!=[]:
if x[2]=='badboy':
invstock_sym.append(x[0])
invstock_name.append(x[1])
invstock_price=[]
for ticker in invstock_sym:
ticker_yahoo = yf.Ticker(ticker)
data = ticker_yahoo.history()
last_quote = (data.tail(1)['Close'].iloc[0])
invstock_price.append(float(last_quote))
invstock_conf=[]
st.markdown("""
<style>
.stProgress .st-bo {
background-color: green;
}
</style>
""", unsafe_allow_html=True)
my_bar=st.progress(0)
progresscount=10
for badgirl in invstock_name:
checkerb=0
try:
send="https://www.google.com/search?q=should+you+invest+in+ "+badgirl.lower()+" stock"
res=requests.get(send)
except ReadTimeout:
checkerb=checkerb+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkerb=checkerb+1
else:
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
count=0
for i in soup.select("a"):
if count==1:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
count+=1
list1=[]
c=0
for i in all_links:
if c==1:
break
option=requests.get(i)
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select("p")
for j in pageinfo:
m=j.text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
find=predict_class(tex,model)
varun=[]
varun.append(float(find['probability']))
varun.append(find['intent'])
invstock_conf.append(varun)
progresscount=progresscount+10
my_bar.progress(progresscount)
stocks={}
for i in range(len(invstock_name)):
temp=[]
if invstock_conf[i][1]=='up':
temp.append(invstock_conf[i][0])
temp.append(invstock_price[i])
temp.append(invstock_name[i])
temp.append(invstock_sym[i])
length= len(stocks)
stocks[length]=temp
###### NEED TO GET "STOCKS" DICTIONARY DATA FROM ########
all_stocks={}
for i in range(len(stocks)):
if((budget >= stocks[i][1]) and (stocks[i][0]>0.5)):
n=len(all_stocks)
all_stocks[n]=[stocks[i][0], stocks[i][1], stocks[i][2], stocks[i][3]]
if len(all_stocks)>=3:
st.balloons()
quad1={}
quad2={}
quad3={}
quad4={}
for i in range(len(all_stocks)):
if((all_stocks[i][0]>=0.8) and (all_stocks[i][1]<=100)):
quad1[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]>=0.8) and (all_stocks[i][1]>100)):
quad2[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]<0.8) and (all_stocks[i][1]<=100)):
quad3[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
else:
quad4[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
def inputs(quad):
global invest
spq=[]
for i in quad:
spq.append(quad[i][1])
length=len(spq)
for i in range(length):
if(len(invest)==3):
break
minval=min(spq)
for i in quad:
if(quad[i][1]==minval):
invest.append(quad[i])
spq.remove(minval)
inputs(quad1)
if(len(invest)<3):
inputs(quad2)
if(len(invest)<3):
inputs(quad3)
if(len(invest)<3):
inputs(quad4)
#stock1 should get 60%
#stock2 should get 30%
#stock3 should get 10%
s1=budget*0.6
s2=budget*0.3
s3=budget*0.1
n_s1=s1//invest[0][1]
n_s2=s2//invest[1][1]
n_s3=s3//invest[2][1]
left=budget-invest[0][1]*n_s1-invest[1][1]*n_s2-invest[2][1]*n_s3
invest_val=[]
for i in range(3):
invest_val.append(invest[i][1])
a_s1=0
a_s2=0
a_s3=0
a_s3=left//invest[2][1]
left=left-a_s3*invest[2][1]
a_s2=left//invest[1][1]
left=left-a_s2*invest[1][1]
a_s1=left//invest[0][1]
left=left-a_s1*invest[0][1]
t_s1=n_s1+a_s1
t_s2=n_s2+a_s2
t_s3=n_s3+a_s3
st.write("")
st.subheader('**Summary:** ')
summary_table={}
names=[]
prices=[]
nstocks=[]
totalcosts=[]
confidences=[]
for i in range(len(invest)):
names.append(invest[i][2])
prices.append(invest[i][1])
if(i==0):
nstocks.append(t_s1)
tcost=t_s1*invest[i][1]
totalcosts.append(tcost)
if(i==1):
nstocks.append(t_s2)
tcost=t_s2*invest[i][1]
totalcosts.append(tcost)
if(i==2):
nstocks.append(t_s3)
tcost=t_s3*invest[i][1]
totalcosts.append(tcost)
confidences.append(invest[i][0])
summary_table["Stock Name"]=names
summary_table["Cost per Stock"]=prices
summary_table["Number to Purchase"]=nstocks
summary_table["Total Cost"]=totalcosts
summary_table["Our Confidence"]=confidences
column_order=["Stock Name", "Cost per Stock", "Number to Purchase", "Total Cost", "Our Confidence"]
summary_df=pd.DataFrame(data=summary_table)
st.dataframe(summary_df)
st.write("")
bala='**Your balance:** '+ '_$' + str(left) +'_'
st.write(bala)
graphAllStocks(invest[0][3],invest[1][3],invest[2][3],14,15,'royalblue','springgreen','indianred')
st.header('**In depth review:** ')
st.write('')
text1='Your first stock: ' + '_' + str(invest[0][2]) + '_'
st.header(text1)
graphMyStock(invest[0][3],1,2,'royalblue')
text1a='**Price:** '+ '_$'+ str(invest[0][1]) + '_'
st.write(text1a)
text1b='**Number of stocks you should buy:** '+ '_' + str(t_s1) + '_'
st.write(text1b)
text1c="**Athena's confidence: **"+'_'+ str(100*invest[0][0])+'%' + '_'
st.write(text1c)
st.write('')
st.write('')
text2='Your second stock: ' +'_'+ str(invest[1][2])+ '_'
st.header(text2)
graphMyStock(invest[1][3],3,4,'springgreen')
text2a='**Price:** '+ '_$'+ str(invest[1][1])+ '_'
st.write(text2a)
text2b='**Number of stocks you should buy:** '+'_'+ str(t_s2)+ '_'
st.write(text2b)
text2c="**Athena's confidence:** "+'_'+ str(100*invest[1][0]) + '%'+'_'
st.write(text2c)
st.write('')
st.write('')
text3= 'Your third stock: '+'_'+ str(invest[2][2])+ '_'
st.header(text3)
graphMyStock(invest[2][3],5,6,'indianred')
text3a='**Price:** '+ '_$'+ str(invest[2][1])+ '_'
st.write(text3a)
text3b='**Number of stocks you should buy: **'+'_'+ str(t_s3)+'_'
st.write(text3b)
text3c="**Athena's confidence: **"+'_'+ str(100*invest[2][0]) + '%'+'_'
st.write(text3c)
st.write('')
st.write('')
st.header("")
st.header("")
st.write("Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.")
else:
st.write('Budget too low to diversify')
if a=='Understand':
ticker = st.sidebar.selectbox(
'Choose a Stock',symbols)
stock = yf.Ticker(ticker)
info=stock.info
ln=info['longName']
st.title(info['longName'])
st.title(ticker)
opt1, opt2 = st.beta_columns(2)
with opt1:
numYearMA = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=0)
with opt2:
windowSizeMA = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=1)
start = dt.datetime.today()-dt.timedelta(numYearMA * 365)
end = dt.datetime.today()
livedata = yf.download(ticker,start,end)
df_ma = calcMovingAverage(livedata, windowSizeMA)
df_ma = df_ma.reset_index()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['Adj Close'],
name = '('+ ticker+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='royalblue')
)
)
compstock2=st.selectbox('Choose stock to compare with: ', symbols)
st.info("If you don't wish to compare, select the same stock again")
livedata2=yf.download(compstock2,start,end)
df_ma2= calcMovingAverage(livedata2, windowSizeMA)
df_ma2= df_ma2.reset_index()
fig.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ compstock2+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='firebrick')
))
fig.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig.update_layout(legend_title_text='Trend')
fig.update_yaxes(tickprefix="$")
st.plotly_chart(fig, use_container_width=True)
livedata3 = yf.download(ticker,start,end)
df_ma3 = calcMovingAverage(livedata3, windowSizeMA)
df_ma3 = df_ma.reset_index()
train_data, test_data = df_ma3[0:int(len(df_ma3)*0.7)], df_ma3[int(len(df_ma3)*0.7):]
training_data = train_data['Adj Close'].values
test_data = test_data['Adj Close'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
abcd=0
for time_point in range(N_test_observations):
model = ARIMA(history, order=(4,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat[0])
true_test_value = test_data[time_point]
history.append(true_test_value)
abcd=abcd+1
af=time_point
MSE_error = mean_squared_error(test_data, model_predictions)
test_set_range = df_ma3[int(len(df_ma3)*0.7):]
dts=df_ma3.loc[:,['Date']]
new = pd.date_range(test_set_range.Date.iloc[-1], periods=30)
df1 = pd.DataFrame(new[1:], columns=['Date'])
df_fin = test_set_range.append(df1, ignore_index=True)
mps=[]
for i in range(30):
model = ARIMA(history, order=(4,1,0))
fitted = model.fit(disp=0)
ou=fitted.forecast()
yha = ou[0]
mps.append(yha[0])
history.append(yha[0])
future_dates=[]
dat=[]
for row in df_fin.itertuples():
dat.append(row[2])
mxq=dat[-1]-dt.timedelta(days=29)
future_dates.append(mxq)
for i in range (30):
date=future_dates[-1]+dt.timedelta(days=1)
future_dates.append(date)
myseries=pd.Series(mps)
st.subheader('Future Graph Trend for '+ info['longName']+' using Time Series Analysis')
figtsa=go.Figure()
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=model_predictions,
name = 'Predicted Prices',
mode='lines'
)
)
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=test_data,
mode='lines',
name='Previous model prediction graph'
)
)
figtsa.add_trace(
go.Scatter(
x=future_dates,
y=mps,
mode='lines',
name='Future Price Trend'
)
)
st.plotly_chart(figtsa, use_container_width=True)
st.subheader('Bollinger Band')
opta, optb = st.beta_columns(2)
with opta:
numYearBoll = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=6)
with optb:
windowSizeBoll = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=7)
startBoll= dt.datetime.today()-dt.timedelta(numYearBoll * 365)
endBoll = dt.datetime.today()
dataBoll = yf.download(ticker,startBoll,endBoll)
df_boll = calcBollinger(dataBoll, windowSizeBoll)
df_boll = df_boll.reset_index()
figBoll = go.Figure()
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bolu'],
name = "Upper Band"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['sma'],
name = "SMA" + str(windowSizeBoll) + " Over Last " + str(numYearBoll) + " Year(s)"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bold'],
name = "Lower Band"
)
)
figBoll.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
figBoll.update_yaxes(tickprefix="$")
st.plotly_chart(figBoll, use_container_width=True)
st.sidebar.title("Stock News")
send="https://www.google.com/search?q=should+you+invest+in+ "+ln.lower()+" stock"
res=requests.get(send)
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
all_titles=[]
count=0
for i in soup.select("a"):
if count==5:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
all_titles.append(name)
count+=1
for i in range(len(all_titles)):
make="["+str(all_titles[i])+"]"+" "+"("+str(all_links[i])+")"
st.sidebar.markdown(make)
st.sidebar.write("")
st.sidebar.write("")
list1=[]
c=0
alllinksind=len(all_links)
for x in range(alllinksind):
checkera=0
if c==10:
break
try:
option=requests.get(all_links[x], timeout=3)
except ReadTimeout:
checkera=checkera+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkera=checkera+1
else:
if checkera==0:
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select('p')
paglen=len(pageinfo)
for j in range(paglen):
m=pageinfo[j].text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
understand_prob=predict_class(tex,model)
finint=understand_prob['intent']
finprob=100*float(understand_prob['probability'])
if finint=='up':
fininta='Stock prices will go up'
elif finint=='down':
fininta='Stock prices will go down'
fina='**Stock trend prediction: **' + '_'+ str(fininta)+ '_'
finb="**Athena's confidence: **"+ '_'+ str(finprob)+'%' +'_'
st.subheader(fininta)
st.subheader(finb)
st.header("")
st.header("")
st.markdown("""
<style>
.small-font {
font-size:10px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>', unsafe_allow_html=True)
|
[
"keras.models.load_model",
"streamlit.balloons",
"streamlit.selectbox",
"csv.reader",
"statsmodels.tsa.arima_model.ARIMA",
"streamlit.sidebar.write",
"pandas.read_csv",
"streamlit.radio",
"streamlit.title",
"streamlit.sidebar.title",
"streamlit.sidebar.selectbox",
"nltk.download",
"streamlit.sidebar.button",
"streamlit.beta_expander",
"nltk.word_tokenize",
"pandas.DataFrame",
"streamlit.subheader",
"streamlit.progress",
"nltk.stem.WordNetLemmatizer",
"yfinance.download",
"streamlit.info",
"datetime.timedelta",
"streamlit.sidebar.markdown",
"requests.get",
"streamlit.beta_columns",
"sklearn.metrics.mean_squared_error",
"plotly.graph_objects.Scatter",
"streamlit.plotly_chart",
"datetime.datetime.today",
"pandas.date_range",
"streamlit.header",
"plotly.graph_objects.Figure",
"pandas.Series",
"bs4.BeautifulSoup",
"streamlit.sidebar.number_input",
"streamlit.markdown",
"streamlit.dataframe",
"random.choice",
"streamlit.write",
"numpy.array",
"yfinance.Ticker",
"streamlit.number_input"
] |
[((237, 259), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (250, 259), False, 'import nltk\n'), ((261, 285), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (274, 285), False, 'import nltk\n'), ((393, 412), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (410, 412), False, 'from nltk.stem import WordNetLemmatizer\n'), ((946, 974), 'keras.models.load_model', 'load_model', (['"""stock_model.h5"""'], {}), "('stock_model.h5')\n", (956, 974), False, 'from keras.models import load_model\n'), ((8682, 8706), 'pandas.read_csv', 'pd.read_csv', (['"""SP500.csv"""'], {}), "('SP500.csv')\n", (8693, 8706), True, 'import pandas as pd\n'), ((8766, 8825), 'streamlit.title', 'st.title', (['"""Investment Optimizer and Stock Growth Predictor"""'], {}), "('Investment Optimizer and Stock Growth Predictor')\n", (8774, 8825), True, 'import streamlit as st\n'), ((8886, 8928), 'streamlit.beta_expander', 'st.beta_expander', ([], {'label': '""""""', 'expanded': '(False)'}), "(label='', expanded=False)\n", (8902, 8928), True, 'import streamlit as st\n'), ((9394, 9406), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9402, 9406), True, 'import streamlit as st\n'), ((9408, 9420), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9416, 9420), True, 'import streamlit as st\n'), ((9422, 9507), 'streamlit.write', 'st.write', (['"""**Would you like to know where to invest or understand each Stock?**"""'], {}), "('**Would you like to know where to invest or understand each Stock?**'\n )\n", (9430, 9507), True, 'import streamlit as st\n'), ((9506, 9544), 'streamlit.radio', 'st.radio', (['""""""', "('Invest', 'Understand')"], {}), "('', ('Invest', 'Understand'))\n", (9514, 9544), True, 'import streamlit as st\n'), ((2001, 2020), 'yfinance.Ticker', 'yf.Ticker', (['finalvar'], {}), '(finalvar)\n', (2010, 2020), True, 'import yfinance as yf\n'), ((2105, 2123), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (2120, 2123), True, 'import streamlit as st\n'), ((2474, 2493), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (2491, 2493), True, 'import datetime as dt\n'), ((2511, 2546), 'yfinance.download', 'yf.download', (['finalvar', 'start2', 'end2'], {}), '(finalvar, start2, end2)\n', (2522, 2546), True, 'import yfinance as yf\n'), ((2676, 2687), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2685, 2687), True, 'import plotly.graph_objects as go\n'), ((3362, 3409), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig2'], {'use_container_width': '(True)'}), '(fig2, use_container_width=True)\n', (3377, 3409), True, 'import streamlit as st\n'), ((3489, 3506), 'yfinance.Ticker', 'yf.Ticker', (['stocka'], {}), '(stocka)\n', (3498, 3506), True, 'import yfinance as yf\n'), ((3562, 3574), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (3570, 3574), True, 'import streamlit as st\n'), ((3580, 3625), 'streamlit.subheader', 'st.subheader', (['"""**Graph of optimal stocks:** """'], {}), "('**Graph of optimal stocks:** ')\n", (3592, 3625), True, 'import streamlit as st\n'), ((3662, 3680), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (3677, 3680), True, 'import streamlit as st\n'), ((4031, 4050), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (4048, 4050), True, 'import datetime as dt\n'), ((4068, 4101), 'yfinance.download', 'yf.download', (['stocka', 'start2', 'end2'], {}), '(stocka, start2, end2)\n', (4079, 4101), True, 'import yfinance as yf\n'), ((4231, 4242), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4240, 4242), True, 'import plotly.graph_objects as go\n'), ((4631, 4664), 'yfinance.download', 'yf.download', (['stockb', 'start2', 'end2'], {}), '(stockb, start2, end2)\n', (4642, 4664), True, 'import yfinance as yf\n'), ((5082, 5115), 'yfinance.download', 'yf.download', (['stockc', 'start2', 'end2'], {}), '(stockc, start2, end2)\n', (5093, 5115), True, 'import yfinance as yf\n'), ((5846, 5893), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig2'], {'use_container_width': '(True)'}), '(fig2, use_container_width=True)\n', (5861, 5893), True, 'import streamlit as st\n'), ((5926, 5948), 'nltk.word_tokenize', 'nltk.word_tokenize', (['lw'], {}), '(lw)\n', (5944, 5948), False, 'import nltk\n'), ((7048, 7061), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (7056, 7061), True, 'import numpy as np\n'), ((9577, 9627), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter your budget ($): """'], {}), "('Enter your budget ($): ')\n", (9600, 9627), True, 'import streamlit as st\n'), ((9636, 9662), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Enter"""'], {}), "('Enter')\n", (9653, 9662), True, 'import streamlit as st\n'), ((19822, 19869), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choose a Stock"""', 'symbols'], {}), "('Choose a Stock', symbols)\n", (19842, 19869), True, 'import streamlit as st\n'), ((19902, 19919), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (19911, 19919), True, 'import yfinance as yf\n'), ((19977, 20003), 'streamlit.title', 'st.title', (["info['longName']"], {}), "(info['longName'])\n", (19985, 20003), True, 'import streamlit as st\n'), ((20009, 20025), 'streamlit.title', 'st.title', (['ticker'], {}), '(ticker)\n', (20017, 20025), True, 'import streamlit as st\n'), ((20058, 20076), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (20073, 20076), True, 'import streamlit as st\n'), ((20451, 20470), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (20468, 20470), True, 'import datetime as dt\n'), ((20487, 20518), 'yfinance.download', 'yf.download', (['ticker', 'start', 'end'], {}), '(ticker, start, end)\n', (20498, 20518), True, 'import yfinance as yf\n'), ((20634, 20645), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (20643, 20645), True, 'import plotly.graph_objects as go\n'), ((21019, 21074), 'streamlit.selectbox', 'st.selectbox', (['"""Choose stock to compare with: """', 'symbols'], {}), "('Choose stock to compare with: ', symbols)\n", (21031, 21074), True, 'import streamlit as st\n'), ((21080, 21148), 'streamlit.info', 'st.info', (['"""If you don\'t wish to compare, select the same stock again"""'], {}), '("If you don\'t wish to compare, select the same stock again")\n', (21087, 21148), True, 'import streamlit as st\n'), ((21164, 21199), 'yfinance.download', 'yf.download', (['compstock2', 'start', 'end'], {}), '(compstock2, start, end)\n', (21175, 21199), True, 'import yfinance as yf\n'), ((21925, 21971), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (21940, 21971), True, 'import streamlit as st\n'), ((21997, 22028), 'yfinance.download', 'yf.download', (['ticker', 'start', 'end'], {}), '(ticker, start, end)\n', (22008, 22028), True, 'import yfinance as yf\n'), ((22840, 22888), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_data', 'model_predictions'], {}), '(test_data, model_predictions)\n', (22858, 22888), False, 'from sklearn.metrics import mean_squared_error\n'), ((22990, 23045), 'pandas.date_range', 'pd.date_range', (['test_set_range.Date.iloc[-1]'], {'periods': '(30)'}), '(test_set_range.Date.iloc[-1], periods=30)\n', (23003, 23045), True, 'import pandas as pd\n'), ((23057, 23096), 'pandas.DataFrame', 'pd.DataFrame', (['new[1:]'], {'columns': "['Date']"}), "(new[1:], columns=['Date'])\n", (23069, 23096), True, 'import pandas as pd\n'), ((23700, 23714), 'pandas.Series', 'pd.Series', (['mps'], {}), '(mps)\n', (23709, 23714), True, 'import pandas as pd\n'), ((23724, 23818), 'streamlit.subheader', 'st.subheader', (["('Future Graph Trend for ' + info['longName'] + ' using Time Series Analysis')"], {}), "('Future Graph Trend for ' + info['longName'] +\n ' using Time Series Analysis')\n", (23736, 23818), True, 'import streamlit as st\n'), ((23824, 23835), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (23833, 23835), True, 'import plotly.graph_objects as go\n'), ((24465, 24514), 'streamlit.plotly_chart', 'st.plotly_chart', (['figtsa'], {'use_container_width': '(True)'}), '(figtsa, use_container_width=True)\n', (24480, 24514), True, 'import streamlit as st\n'), ((24536, 24566), 'streamlit.subheader', 'st.subheader', (['"""Bollinger Band"""'], {}), "('Bollinger Band')\n", (24548, 24566), True, 'import streamlit as st\n'), ((24585, 24603), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (24600, 24603), True, 'import streamlit as st\n'), ((24970, 24989), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (24987, 24989), True, 'import datetime as dt\n'), ((25006, 25045), 'yfinance.download', 'yf.download', (['ticker', 'startBoll', 'endBoll'], {}), '(ticker, startBoll, endBoll)\n', (25017, 25045), True, 'import yfinance as yf\n'), ((25151, 25162), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (25160, 25162), True, 'import plotly.graph_objects as go\n'), ((26377, 26427), 'streamlit.plotly_chart', 'st.plotly_chart', (['figBoll'], {'use_container_width': '(True)'}), '(figBoll, use_container_width=True)\n', (26392, 26427), True, 'import streamlit as st\n'), ((26433, 26463), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Stock News"""'], {}), "('Stock News')\n", (26449, 26463), True, 'import streamlit as st\n'), ((26560, 26578), 'requests.get', 'requests.get', (['send'], {}), '(send)\n', (26572, 26578), False, 'import requests\n'), ((26589, 26630), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.content', '"""html.parser"""'], {}), "(res.content, 'html.parser')\n", (26602, 26630), False, 'from bs4 import BeautifulSoup\n'), ((28915, 28936), 'streamlit.subheader', 'st.subheader', (['fininta'], {}), '(fininta)\n', (28927, 28936), True, 'import streamlit as st\n'), ((28942, 28960), 'streamlit.subheader', 'st.subheader', (['finb'], {}), '(finb)\n', (28954, 28960), True, 'import streamlit as st\n'), ((28966, 28979), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (28975, 28979), True, 'import streamlit as st\n'), ((28985, 28998), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (28994, 28998), True, 'import streamlit as st\n'), ((29004, 29166), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n .small-font {\n font-size:10px !important;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n .small-font {\n font-size:10px !important;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (29015, 29166), True, 'import streamlit as st\n'), ((29170, 29342), 'streamlit.markdown', 'st.markdown', (['"""<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n \'<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>\'\n , unsafe_allow_html=True)\n', (29181, 29342), True, 'import streamlit as st\n'), ((2163, 2252), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': 'a'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=a)\n", (2178, 2252), True, 'import streamlit as st\n'), ((2312, 2399), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': 'b'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=b)\n", (2327, 2399), True, 'import streamlit as st\n'), ((2411, 2430), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (2428, 2430), True, 'import datetime as dt\n'), ((2431, 2461), 'datetime.timedelta', 'dt.timedelta', (['(numYearMAb * 365)'], {}), '(numYearMAb * 365)\n', (2443, 2461), True, 'import datetime as dt\n'), ((3720, 3809), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': 'a'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=a)\n", (3735, 3809), True, 'import streamlit as st\n'), ((3869, 3956), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': 'b'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=b)\n", (3884, 3956), True, 'import streamlit as st\n'), ((3968, 3987), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (3985, 3987), True, 'import datetime as dt\n'), ((3988, 4018), 'datetime.timedelta', 'dt.timedelta', (['(numYearMAb * 365)'], {}), '(numYearMAb * 365)\n', (4000, 4018), True, 'import datetime as dt\n'), ((9674, 9687), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (9683, 9687), True, 'import streamlit as st\n'), ((9697, 9782), 'streamlit.header', 'st.header', (['"""**Following is the combination of stocks you should invest in: ** """'], {}), "('**Following is the combination of stocks you should invest in: ** '\n )\n", (9706, 9782), True, 'import streamlit as st\n'), ((9787, 9799), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9795, 9799), True, 'import streamlit as st\n'), ((9809, 9834), 'streamlit.write', 'st.write', (['"""Processing..."""'], {}), "('Processing...')\n", (9817, 9834), True, 'import streamlit as st\n'), ((9951, 9964), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (9961, 9964), False, 'import csv\n'), ((10445, 10640), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n .stProgress .st-bo {\n background-color: green;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n .stProgress .st-bo {\n background-color: green;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (10456, 10640), True, 'import streamlit as st\n'), ((10653, 10667), 'streamlit.progress', 'st.progress', (['(0)'], {}), '(0)\n', (10664, 10667), True, 'import streamlit as st\n'), ((20128, 20217), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': '(0)'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=0)\n", (20143, 20217), True, 'import streamlit as st\n'), ((20271, 20358), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': '(1)'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=1)\n", (20286, 20358), True, 'import streamlit as st\n'), ((20390, 20409), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (20407, 20409), True, 'import datetime as dt\n'), ((20410, 20439), 'datetime.timedelta', 'dt.timedelta', (['(numYearMA * 365)'], {}), '(numYearMA * 365)\n', (20422, 20439), True, 'import datetime as dt\n'), ((22502, 22533), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': '(4, 1, 0)'}), '(history, order=(4, 1, 0))\n', (22507, 22533), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((23219, 23250), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': '(4, 1, 0)'}), '(history, order=(4, 1, 0))\n', (23224, 23250), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((23521, 23542), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(29)'}), '(days=29)\n', (23533, 23542), True, 'import datetime as dt\n'), ((23868, 23960), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_fin['Date']", 'y': 'model_predictions', 'name': '"""Predicted Prices"""', 'mode': '"""lines"""'}), "(x=df_fin['Date'], y=model_predictions, name='Predicted Prices',\n mode='lines')\n", (23878, 23960), True, 'import plotly.graph_objects as go\n'), ((24067, 24167), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_fin['Date']", 'y': 'test_data', 'mode': '"""lines"""', 'name': '"""Previous model prediction graph"""'}), "(x=df_fin['Date'], y=test_data, mode='lines', name=\n 'Previous model prediction graph')\n", (24077, 24167), True, 'import plotly.graph_objects as go\n'), ((24287, 24361), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'future_dates', 'y': 'mps', 'mode': '"""lines"""', 'name': '"""Future Price Trend"""'}), "(x=future_dates, y=mps, mode='lines', name='Future Price Trend')\n", (24297, 24361), True, 'import plotly.graph_objects as go\n'), ((24643, 24732), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': '(6)'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=6)\n", (24658, 24732), True, 'import streamlit as st\n'), ((24789, 24876), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': '(7)'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=7)\n", (24804, 24876), True, 'import streamlit as st\n'), ((24903, 24922), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (24920, 24922), True, 'import datetime as dt\n'), ((24923, 24954), 'datetime.timedelta', 'dt.timedelta', (['(numYearBoll * 365)'], {}), '(numYearBoll * 365)\n', (24935, 24954), True, 'import datetime as dt\n'), ((25208, 25275), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_boll['Date']", 'y': "df_boll['bolu']", 'name': '"""Upper Band"""'}), "(x=df_boll['Date'], y=df_boll['bolu'], name='Upper Band')\n", (25218, 25275), True, 'import plotly.graph_objects as go\n'), ((25858, 25925), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_boll['Date']", 'y': "df_boll['bold']", 'name': '"""Lower Band"""'}), "(x=df_boll['Date'], y=df_boll['bold'], name='Lower Band')\n", (25868, 25925), True, 'import plotly.graph_objects as go\n'), ((27521, 27546), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['make'], {}), '(make)\n', (27540, 27546), True, 'import streamlit as st\n'), ((27556, 27576), 'streamlit.sidebar.write', 'st.sidebar.write', (['""""""'], {}), "('')\n", (27572, 27576), True, 'import streamlit as st\n'), ((27586, 27606), 'streamlit.sidebar.write', 'st.sidebar.write', (['""""""'], {}), "('')\n", (27602, 27606), True, 'import streamlit as st\n'), ((7241, 7257), 'numpy.array', 'np.array', (['[pred]'], {}), '([pred])\n', (7249, 7257), True, 'import numpy as np\n'), ((8478, 8507), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (8491, 8507), False, 'import random\n'), ((10233, 10250), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (10242, 10250), True, 'import yfinance as yf\n'), ((13805, 13818), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (13816, 13818), True, 'import streamlit as st\n'), ((16261, 16273), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (16269, 16273), True, 'import streamlit as st\n'), ((16287, 16316), 'streamlit.subheader', 'st.subheader', (['"""**Summary:** """'], {}), "('**Summary:** ')\n", (16299, 16316), True, 'import streamlit as st\n'), ((17538, 17570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'summary_table'}), '(data=summary_table)\n', (17550, 17570), True, 'import pandas as pd\n'), ((17584, 17608), 'streamlit.dataframe', 'st.dataframe', (['summary_df'], {}), '(summary_df)\n', (17596, 17608), True, 'import streamlit as st\n'), ((17622, 17634), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (17630, 17634), True, 'import streamlit as st\n'), ((17710, 17724), 'streamlit.write', 'st.write', (['bala'], {}), '(bala)\n', (17718, 17724), True, 'import streamlit as st\n'), ((17852, 17886), 'streamlit.header', 'st.header', (['"""**In depth review:** """'], {}), "('**In depth review:** ')\n", (17861, 17886), True, 'import streamlit as st\n'), ((17900, 17912), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (17908, 17912), True, 'import streamlit as st\n'), ((17998, 18014), 'streamlit.header', 'st.header', (['text1'], {}), '(text1)\n', (18007, 18014), True, 'import streamlit as st\n'), ((18164, 18180), 'streamlit.write', 'st.write', (['text1a'], {}), '(text1a)\n', (18172, 18180), True, 'import streamlit as st\n'), ((18277, 18293), 'streamlit.write', 'st.write', (['text1b'], {}), '(text1b)\n', (18285, 18293), True, 'import streamlit as st\n'), ((18392, 18408), 'streamlit.write', 'st.write', (['text1c'], {}), '(text1c)\n', (18400, 18408), True, 'import streamlit as st\n'), ((18422, 18434), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18430, 18434), True, 'import streamlit as st\n'), ((18448, 18460), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18456, 18460), True, 'import streamlit as st\n'), ((18546, 18562), 'streamlit.header', 'st.header', (['text2'], {}), '(text2)\n', (18555, 18562), True, 'import streamlit as st\n'), ((18698, 18714), 'streamlit.write', 'st.write', (['text2a'], {}), '(text2a)\n', (18706, 18714), True, 'import streamlit as st\n'), ((18808, 18824), 'streamlit.write', 'st.write', (['text2b'], {}), '(text2b)\n', (18816, 18824), True, 'import streamlit as st\n'), ((18923, 18939), 'streamlit.write', 'st.write', (['text2c'], {}), '(text2c)\n', (18931, 18939), True, 'import streamlit as st\n'), ((18953, 18965), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18961, 18965), True, 'import streamlit as st\n'), ((18979, 18991), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18987, 18991), True, 'import streamlit as st\n'), ((19076, 19092), 'streamlit.header', 'st.header', (['text3'], {}), '(text3)\n', (19085, 19092), True, 'import streamlit as st\n'), ((19227, 19243), 'streamlit.write', 'st.write', (['text3a'], {}), '(text3a)\n', (19235, 19243), True, 'import streamlit as st\n'), ((19336, 19352), 'streamlit.write', 'st.write', (['text3b'], {}), '(text3b)\n', (19344, 19352), True, 'import streamlit as st\n'), ((19451, 19467), 'streamlit.write', 'st.write', (['text3c'], {}), '(text3c)\n', (19459, 19467), True, 'import streamlit as st\n'), ((19481, 19493), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (19489, 19493), True, 'import streamlit as st\n'), ((19507, 19519), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (19515, 19519), True, 'import streamlit as st\n'), ((19533, 19546), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (19542, 19546), True, 'import streamlit as st\n'), ((19560, 19573), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (19569, 19573), True, 'import streamlit as st\n'), ((19587, 19706), 'streamlit.write', 'st.write', (['"""Disclaimer: We are not liable for the results or actions taken on the basis of these predictions."""'], {}), "(\n 'Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.'\n )\n", (19595, 19706), True, 'import streamlit as st\n'), ((19727, 19766), 'streamlit.write', 'st.write', (['"""Budget too low to diversify"""'], {}), "('Budget too low to diversify')\n", (19735, 19766), True, 'import streamlit as st\n'), ((23630, 23650), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (23642, 23650), True, 'import datetime as dt\n'), ((27809, 27846), 'requests.get', 'requests.get', (['all_links[x]'], {'timeout': '(3)'}), '(all_links[x], timeout=3)\n', (27821, 27846), False, 'import requests\n'), ((10903, 10921), 'requests.get', 'requests.get', (['send'], {}), '(send)\n', (10915, 10921), False, 'import requests\n'), ((11162, 11203), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.content', '"""html.parser"""'], {}), "(res.content, 'html.parser')\n", (11175, 11203), False, 'from bs4 import BeautifulSoup\n'), ((28126, 28170), 'bs4.BeautifulSoup', 'BeautifulSoup', (['option.content', '"""html.parser"""'], {}), "(option.content, 'html.parser')\n", (28139, 28170), False, 'from bs4 import BeautifulSoup\n'), ((12329, 12344), 'requests.get', 'requests.get', (['i'], {}), '(i)\n', (12341, 12344), False, 'import requests\n'), ((12371, 12415), 'bs4.BeautifulSoup', 'BeautifulSoup', (['option.content', '"""html.parser"""'], {}), "(option.content, 'html.parser')\n", (12384, 12415), False, 'from bs4 import BeautifulSoup\n')]
|
"""
Quick smoke test that our implementation of salsa20 does the right thing.
"""
from hypothesis import given
import hypothesis.strategies as st
from Crypto.Cipher import Salsa20
from umash import C, FFI
@given(
length=st.integers(min_value=1, max_value=512),
nonce=st.binary(min_size=8, max_size=8),
key=st.binary(min_size=32, max_size=32),
)
def test_salsa20(length, nonce, key):
expected = Salsa20.new(key, nonce).encrypt(b"\x00" * length)
buf = FFI.new("char[]", length)
C.salsa20_stream(buf, length, nonce, key)
assert bytes(FFI.buffer(buf, length)) == expected
|
[
"hypothesis.strategies.binary",
"Crypto.Cipher.Salsa20.new",
"umash.FFI.new",
"umash.FFI.buffer",
"umash.C.salsa20_stream",
"hypothesis.strategies.integers"
] |
[((472, 497), 'umash.FFI.new', 'FFI.new', (['"""char[]"""', 'length'], {}), "('char[]', length)\n", (479, 497), False, 'from umash import C, FFI\n'), ((502, 543), 'umash.C.salsa20_stream', 'C.salsa20_stream', (['buf', 'length', 'nonce', 'key'], {}), '(buf, length, nonce, key)\n', (518, 543), False, 'from umash import C, FFI\n'), ((226, 265), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(512)'}), '(min_value=1, max_value=512)\n', (237, 265), True, 'import hypothesis.strategies as st\n'), ((277, 310), 'hypothesis.strategies.binary', 'st.binary', ([], {'min_size': '(8)', 'max_size': '(8)'}), '(min_size=8, max_size=8)\n', (286, 310), True, 'import hypothesis.strategies as st\n'), ((320, 355), 'hypothesis.strategies.binary', 'st.binary', ([], {'min_size': '(32)', 'max_size': '(32)'}), '(min_size=32, max_size=32)\n', (329, 355), True, 'import hypothesis.strategies as st\n'), ((412, 435), 'Crypto.Cipher.Salsa20.new', 'Salsa20.new', (['key', 'nonce'], {}), '(key, nonce)\n', (423, 435), False, 'from Crypto.Cipher import Salsa20\n'), ((561, 584), 'umash.FFI.buffer', 'FFI.buffer', (['buf', 'length'], {}), '(buf, length)\n', (571, 584), False, 'from umash import C, FFI\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 18:22:15 2018
@author: arpit-mint
"""
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
#Initializing the CNN
classifier=Sequential()
#Step 1: Convolution
classifier.add(Convolution2D(32, kernel_size=(3, 3), input_shape=(64,64,3), activation='relu'))
#Step 2: Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#2nd convolution layer
classifier.add(Convolution2D(32, kernel_size=(3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
#Step 3: Flattening
classifier.add(Flatten())
#Step 4: Full Connection
classifier.add(Dense(units=128,activation='relu'))
classifier.add(Dense(units=1,activation='sigmoid'))
#Compiling the CNN
classifier.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
#Part 2: Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
single_prediction = test_datagen.flow_from_directory(
'dataset/single_prediction',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
classifier.fit_generator(
training_set,
steps_per_epoch=8000,
epochs=2,
validation_data=single_prediction,
validation_steps=2)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.models.Sequential",
"keras.layers.MaxPooling2D"
] |
[((324, 336), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (334, 336), False, 'from keras.models import Sequential\n'), ((1058, 1154), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (1076, 1154), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1197, 1234), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1215, 1234), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((374, 460), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)'], {'kernel_size': '(3, 3)', 'input_shape': '(64, 64, 3)', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), input_shape=(64, 64, 3), activation=\n 'relu')\n", (387, 460), False, 'from keras.layers import Convolution2D\n'), ((487, 517), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (499, 517), False, 'from keras.layers import MaxPooling2D\n'), ((557, 613), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), activation='relu')\n", (570, 613), False, 'from keras.layers import Convolution2D\n'), ((630, 660), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (642, 660), False, 'from keras.layers import MaxPooling2D\n'), ((698, 707), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (705, 707), False, 'from keras.layers import Flatten\n'), ((749, 784), 'keras.layers.Dense', 'Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (754, 784), False, 'from keras.layers import Dense\n'), ((800, 836), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), "(units=1, activation='sigmoid')\n", (805, 836), False, 'from keras.layers import Dense\n')]
|
# Copyright (C) 2018 <NAME>
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from .utils import sorted_by_key # noqa
from haversine import haversine
# Task 1B
def stations_by_distance(stations, p):
"""returns all stations sorted by distance from coordinate
Input arguments: stations(a list of Monitoring station objects), p(lat, lon)
Returns: list of tuples of form (name, town, distance)
"""
station_distance = []
# calculate and append haversine distances
for station in stations:
distance = haversine(station.coord, p)
station_distance.append((station.name, station.town, distance))
#sort by key
station_distance = sorted_by_key(station_distance, 2)
return station_distance
# Task 1C
def stations_within_radius(stations, centre, r):
"""returns all stations within a radius of a coordinate
Input arguments: stations(a list of Monitoring station objects), centre(lat, lon), r(distance in Km)
Returns: list of tuples of form (name)
"""
station_in_radius= []
for station in stations:
distance2= haversine(station.coord, centre)
if distance2<=r:
station_in_radius.append((station.name))
station_in_radius= sorted(station_in_radius)
return station_in_radius
# Task 1D
def rivers_with_station(stations):
""""returns a set with a list of non duplicate rivers with monitoring stations
Input arguments: stations (a list of Monitoring Station objects)
Returns: Set
"""
rivers = set() #set means no requirement to check for duplicates
for station in stations:
rivers.add(station.river)
return rivers
def stations_by_river(stations):
""""maps river names to the list of station objects on a given river
Input arguments: stations (a list of Monitoring Station objects)
Returns: Dictionary {River: [list of monitoring stations on that river]]}
"""
rivers = rivers_with_station(stations) #reuses previous function
rivers_stations = {}
#iterate through rivers and stations to generate each key-value pair
for river in rivers:
river_stations = []
for station in stations:
if station.river == river: #check if the station's river matches the current
river_stations.append(station.name)
rivers_stations[river] = river_stations
return rivers_stations
#task 1E
def rivers_by_station_number(stations, N):
"""determines the N rivers with the greatest number of monitoring stations
Input arguments: stations (a list of Monitoring Station object), N(number of rivers)
Returns: list of tuples of form (river, number of stations)"""
if N<1:
print("error: N must be greater than 0")
rivers_by_station_number=[]
rivers=[]
for station in stations:
rivers.append(station.river)
for river in rivers:
rivers_by_station_number.append((river, (rivers.count(river)))) #iterating through rivers and counting the number of duplicate entries indicating each station
rivers_by_station_number=sorted_by_key(set(rivers_by_station_number), 1, reverse=True) #sorting by number of stations
N_stations=rivers_by_station_number[:N]
i = 0
while True: #check for further values that are the same as the Nth
if rivers_by_station_number[N+i][1] == rivers_by_station_number[N][1]:
N_stations.append(rivers_by_station_number[N+i])
else:
break
i += 1
return N_stations
|
[
"haversine.haversine"
] |
[((611, 638), 'haversine.haversine', 'haversine', (['station.coord', 'p'], {}), '(station.coord, p)\n', (620, 638), False, 'from haversine import haversine\n'), ((1169, 1201), 'haversine.haversine', 'haversine', (['station.coord', 'centre'], {}), '(station.coord, centre)\n', (1178, 1201), False, 'from haversine import haversine\n')]
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import lrange
ALL_RESULTS = lrange(7)
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, CANCELLED = ALL_RESULTS
Results = ["success", "warnings", "failure", "skipped", "exception", "retry", "cancelled"]
def statusToString(status):
if status is None:
return "not finished"
if status < 0 or status >= len(Results):
return "Invalid status"
return Results[status]
def worst_status(a, b):
# SKIPPED > SUCCESS > WARNINGS > FAILURE > EXCEPTION > RETRY > CANCELLED
# CANCELLED needs to be considered the worst.
for s in (CANCELLED, RETRY, EXCEPTION, FAILURE, WARNINGS, SUCCESS, SKIPPED):
if s in (a, b):
return s
def computeResultAndTermination(obj, result, previousResult):
possible_overall_result = result
terminate = False
if result == FAILURE:
if not obj.flunkOnFailure:
possible_overall_result = SUCCESS
if obj.warnOnFailure:
possible_overall_result = WARNINGS
if obj.flunkOnFailure:
possible_overall_result = FAILURE
if obj.haltOnFailure:
terminate = True
elif result == WARNINGS:
if not obj.warnOnWarnings:
possible_overall_result = SUCCESS
else:
possible_overall_result = WARNINGS
if obj.flunkOnWarnings:
possible_overall_result = FAILURE
elif result in (EXCEPTION, RETRY, CANCELLED):
terminate = True
result = worst_status(previousResult, possible_overall_result)
return result, terminate
class ResultComputingConfigMixin(object):
haltOnFailure = False
flunkOnWarnings = False
flunkOnFailure = True
warnOnWarnings = False
warnOnFailure = False
resultConfig = [
"haltOnFailure",
"flunkOnWarnings",
"flunkOnFailure",
"warnOnWarnings",
"warnOnFailure",
]
|
[
"future.utils.lrange"
] |
[((830, 839), 'future.utils.lrange', 'lrange', (['(7)'], {}), '(7)\n', (836, 839), False, 'from future.utils import lrange\n')]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ResNetVariant for Detection."""
from zeus.common import ClassType, ClassFactory
from .deformation import Deformation
from zeus.modules.operators import ops
from zeus.modules.operators import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter
@ClassFactory.register(ClassType.NETWORK)
class PruneDeformation(Deformation):
"""Prune any Network."""
def __init__(self, desc, from_graph=False, weight_file=None):
super(PruneDeformation, self).__init__(desc, from_graph, weight_file)
self.is_adaptive_weight = True
def deform(self):
"""Deform Network."""
if not self.props:
return
for name, module in self.model.named_modules():
if isinstance(module, ops.Conv2d):
PruneConv2DFilter(module, self.props).filter()
elif isinstance(module, ops.BatchNorm2d):
PruneBatchNormFilter(module, self.props).filter()
elif isinstance(module, ops.Linear):
PruneLinearFilter(module, self.props).filter()
|
[
"zeus.modules.operators.PruneConv2DFilter",
"zeus.modules.operators.PruneBatchNormFilter",
"zeus.modules.operators.PruneLinearFilter",
"zeus.common.ClassFactory.register"
] |
[((693, 733), 'zeus.common.ClassFactory.register', 'ClassFactory.register', (['ClassType.NETWORK'], {}), '(ClassType.NETWORK)\n', (714, 733), False, 'from zeus.common import ClassType, ClassFactory\n'), ((1202, 1239), 'zeus.modules.operators.PruneConv2DFilter', 'PruneConv2DFilter', (['module', 'self.props'], {}), '(module, self.props)\n', (1219, 1239), False, 'from zeus.modules.operators import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter\n'), ((1319, 1359), 'zeus.modules.operators.PruneBatchNormFilter', 'PruneBatchNormFilter', (['module', 'self.props'], {}), '(module, self.props)\n', (1339, 1359), False, 'from zeus.modules.operators import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter\n'), ((1434, 1471), 'zeus.modules.operators.PruneLinearFilter', 'PruneLinearFilter', (['module', 'self.props'], {}), '(module, self.props)\n', (1451, 1471), False, 'from zeus.modules.operators import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter\n')]
|
from wsgiref.simple_server import make_server
import oauth2
import oauth2.grant
import oauth2.error
import oauth2.store.memory
import oauth2.tokengenerator
import oauth2.web.wsgi
# Create a SiteAdapter to interact with the user.
# This can be used to display confirmation dialogs and the like.
class ExampleSiteAdapter(oauth2.web.AuthorizationCodeGrantSiteAdapter,
oauth2.web.ImplicitGrantSiteAdapter):
def authenticate(self, request, environ, scopes, client):
# Check if the user has granted access
if request.post_param("confirm") == "confirm":
return {}
raise oauth2.error.UserNotAuthenticated
def render_auth_page(self, request, response, environ, scopes, client):
response.body = '''
<html>
<body>
<form method="POST" name="confirmation_form">
<input type="submit" name="confirm" value="confirm" />
<input type="submit" name="deny" value="deny" />
</form>
</body>
</html>'''
return response
def user_has_denied_access(self, request):
# Check if the user has denied access
if request.post_param("deny") == "deny":
return True
return False
# Create an in-memory storage to store your client apps.
client_store = oauth2.store.memory.ClientStore()
# Add a client
client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=["http://localhost/callback"])
site_adapter = ExampleSiteAdapter()
# Create an in-memory storage to store issued tokens.
# LocalTokenStore can store access and auth tokens
token_store = oauth2.store.memory.TokenStore()
# Create the controller.
provider = oauth2.Provider(
access_token_store=token_store,
auth_code_store=token_store,
client_store=client_store,
token_generator=oauth2.tokengenerator.Uuid4TokenGenerator()
)
# Add Grants you want to support
provider.add_grant(oauth2.grant.AuthorizationCodeGrant(site_adapter=site_adapter))
provider.add_grant(oauth2.grant.ImplicitGrant(site_adapter=site_adapter))
# Add refresh token capability and set expiration time of access tokens to 30 days
provider.add_grant(oauth2.grant.RefreshToken(expires_in=2592000))
# Wrap the controller with the Wsgi adapter
app = oauth2.web.wsgi.Application(provider=provider)
if __name__ == "__main__":
httpd = make_server('', 8080, app)
httpd.serve_forever()
|
[
"oauth2.web.wsgi.Application",
"oauth2.grant.ImplicitGrant",
"oauth2.store.memory.TokenStore",
"oauth2.tokengenerator.Uuid4TokenGenerator",
"oauth2.grant.RefreshToken",
"wsgiref.simple_server.make_server",
"oauth2.store.memory.ClientStore",
"oauth2.grant.AuthorizationCodeGrant"
] |
[((1294, 1327), 'oauth2.store.memory.ClientStore', 'oauth2.store.memory.ClientStore', ([], {}), '()\n', (1325, 1327), False, 'import oauth2\n'), ((1607, 1639), 'oauth2.store.memory.TokenStore', 'oauth2.store.memory.TokenStore', ([], {}), '()\n', (1637, 1639), False, 'import oauth2\n'), ((2252, 2298), 'oauth2.web.wsgi.Application', 'oauth2.web.wsgi.Application', ([], {'provider': 'provider'}), '(provider=provider)\n', (2279, 2298), False, 'import oauth2\n'), ((1913, 1975), 'oauth2.grant.AuthorizationCodeGrant', 'oauth2.grant.AuthorizationCodeGrant', ([], {'site_adapter': 'site_adapter'}), '(site_adapter=site_adapter)\n', (1948, 1975), False, 'import oauth2\n'), ((1996, 2049), 'oauth2.grant.ImplicitGrant', 'oauth2.grant.ImplicitGrant', ([], {'site_adapter': 'site_adapter'}), '(site_adapter=site_adapter)\n', (2022, 2049), False, 'import oauth2\n'), ((2154, 2199), 'oauth2.grant.RefreshToken', 'oauth2.grant.RefreshToken', ([], {'expires_in': '(2592000)'}), '(expires_in=2592000)\n', (2179, 2199), False, 'import oauth2\n'), ((2339, 2365), 'wsgiref.simple_server.make_server', 'make_server', (['""""""', '(8080)', 'app'], {}), "('', 8080, app)\n", (2350, 2365), False, 'from wsgiref.simple_server import make_server\n'), ((1814, 1857), 'oauth2.tokengenerator.Uuid4TokenGenerator', 'oauth2.tokengenerator.Uuid4TokenGenerator', ([], {}), '()\n', (1855, 1857), False, 'import oauth2\n')]
|
import astropy
from astropy.cosmology import WMAP9 as cosmo
z = cosmo.comoving_distance([0.5, 1.0, 1.5])
print(z)
|
[
"astropy.cosmology.WMAP9.comoving_distance"
] |
[((68, 108), 'astropy.cosmology.WMAP9.comoving_distance', 'cosmo.comoving_distance', (['[0.5, 1.0, 1.5]'], {}), '([0.5, 1.0, 1.5])\n', (91, 108), True, 'from astropy.cosmology import WMAP9 as cosmo\n')]
|
from abc import ABC, abstractmethod
from typing import Any, TypeVar, Optional
GOType = TypeVar("GOType", bound="GameObjectInterface")
class GameObjectInterface(ABC):
"""
A game object is a composable element in game.
"""
_parent: Optional["GameObjectInterface"] = None
def __init_subclass__(cls, **kwargs):
cls._message_handlers_cache = {}
super().__init_subclass__(**kwargs)
@abstractmethod
def _iter_game_object_children(self):
raise NotImplementedError
def draw(self: GOType, camera: Any = None) -> GOType:
"""
Draw object using the given camera.
The default implementation is empty. THis method is useful to integrate
with game libraries with a canvas-like rendering metaphor.
"""
return self
def step(self: GOType, dt: float) -> GOType:
"""
Update object by evolving a single step of duration dt.
Args:
dt: Duration of time step.
"""
for child in self._iter_game_object_children():
child.step(dt)
return self
def process_message(self, msg, /, *args, sender=None):
"""
Process message.
The default implementation seeks for a method named handle_<msg>_message()
and execute it forwarding any positional arguments. The sender object
is passed as a keyword argument and other keyword arguments can be
either forwarded or influence the way the message is processed.
"""
try:
fn = self._message_handlers_cache[msg]
except KeyError:
msg = msg.replace("-", "_")
name = f"handler_{msg}_message"
cls = type(self)
fn = self._message_handlers_cache[msg] = getattr(cls, name)
fn(self, sender, *args)
def send_message(self, msg, *args, **kwargs):
"""
Send message to parent.
"""
kwargs.setdefault("sender", self)
self._parent.send(msg, *args, **kwargs)
|
[
"typing.TypeVar"
] |
[((88, 134), 'typing.TypeVar', 'TypeVar', (['"""GOType"""'], {'bound': '"""GameObjectInterface"""'}), "('GOType', bound='GameObjectInterface')\n", (95, 134), False, 'from typing import Any, TypeVar, Optional\n')]
|
import bravado
import pytest
import requests
from client import BaseDevicesApiClient, ManagementClient, \
SimpleInternalClient, SimpleManagementClient
from common import Device, DevAuthorizer, device_auth_req, \
make_devid, explode_jwt
class TestToken(ManagementClient):
intclient = SimpleInternalClient()
devapi = BaseDevicesApiClient()
def test_token(self):
d = Device()
da = DevAuthorizer()
url = self.devapi.make_api_url("/auth_requests")
# poke devauth so that device appears
rsp = device_auth_req(url, da, d)
assert rsp.status_code == 401
# try to find our devices in all devices listing
mc = SimpleManagementClient()
dev = mc.find_device_by_identity(d.identity)
self.log.debug('found matching device with ID: %s', dev.id)
devid = dev.id
# extract authentication data set ID
aid = dev.auth_sets[0].id
try:
self.accept_device(devid, aid)
except bravado.exception.HTTPError as e:
assert e.response.status_code == 204
# device is accepted, we should get a token now
rsp = device_auth_req(url, da, d)
assert rsp.status_code == 200
da.parse_rsp_payload(d, rsp.text)
assert len(d.token) > 0
self.log.info("device token: %s", d.token)
thdr, tclaims, tsign = explode_jwt(d.token)
assert 'typ' in thdr and thdr['typ'] == 'JWT'
assert 'jti' in tclaims
assert 'exp' in tclaims
assert 'sub' in tclaims and tclaims['sub'] == devid
assert 'iss' in tclaims and tclaims['iss'] == 'Mender'
# TODO: signature verification?
# verify token; the token is to be placed in the Authorization header
# and it looks like bravado cannot handle a POST request with no data
# in body, hence we fall back to sending request directly
verify_url = self.intclient.make_api_url("/tokens/verify")
self.log.info("verify URL: %s", verify_url)
auth_hdr = 'Bearer {}'.format(d.token)
# no auth header should raise an error
rsp = requests.post(verify_url, data='')
assert rsp.status_code == 401
# successful verification
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr})
assert rsp.status_code == 200
# use a bogus token that is not a valid JWT
rsp = requests.post(verify_url, data='',
headers={'Authorization': 'bogus'})
assert rsp.status_code == 401
# or a correct token with data appended at the end
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr + "==foo"})
assert rsp.status_code == 401
# bravado cannot handle DELETE requests either
# self.client.tokens.delete_tokens_id(id=tclaims['jti'])
# use requests instead
rsp = requests.delete(self.make_api_url('/tokens/{}'.format(tclaims['jti'])))
assert rsp.status_code == 204
# unsuccessful verification
rsp = requests.post(verify_url, data='',
headers={'Authorization': auth_hdr})
assert rsp.status_code == 401
|
[
"common.Device",
"client.SimpleManagementClient",
"client.SimpleInternalClient",
"common.DevAuthorizer",
"common.explode_jwt",
"requests.post",
"client.BaseDevicesApiClient",
"common.device_auth_req"
] |
[((299, 321), 'client.SimpleInternalClient', 'SimpleInternalClient', ([], {}), '()\n', (319, 321), False, 'from client import BaseDevicesApiClient, ManagementClient, SimpleInternalClient, SimpleManagementClient\n'), ((336, 358), 'client.BaseDevicesApiClient', 'BaseDevicesApiClient', ([], {}), '()\n', (356, 358), False, 'from client import BaseDevicesApiClient, ManagementClient, SimpleInternalClient, SimpleManagementClient\n'), ((398, 406), 'common.Device', 'Device', ([], {}), '()\n', (404, 406), False, 'from common import Device, DevAuthorizer, device_auth_req, make_devid, explode_jwt\n'), ((420, 435), 'common.DevAuthorizer', 'DevAuthorizer', ([], {}), '()\n', (433, 435), False, 'from common import Device, DevAuthorizer, device_auth_req, make_devid, explode_jwt\n'), ((554, 581), 'common.device_auth_req', 'device_auth_req', (['url', 'da', 'd'], {}), '(url, da, d)\n', (569, 581), False, 'from common import Device, DevAuthorizer, device_auth_req, make_devid, explode_jwt\n'), ((691, 715), 'client.SimpleManagementClient', 'SimpleManagementClient', ([], {}), '()\n', (713, 715), False, 'from client import BaseDevicesApiClient, ManagementClient, SimpleInternalClient, SimpleManagementClient\n'), ((1166, 1193), 'common.device_auth_req', 'device_auth_req', (['url', 'da', 'd'], {}), '(url, da, d)\n', (1181, 1193), False, 'from common import Device, DevAuthorizer, device_auth_req, make_devid, explode_jwt\n'), ((1391, 1411), 'common.explode_jwt', 'explode_jwt', (['d.token'], {}), '(d.token)\n', (1402, 1411), False, 'from common import Device, DevAuthorizer, device_auth_req, make_devid, explode_jwt\n'), ((2145, 2179), 'requests.post', 'requests.post', (['verify_url'], {'data': '""""""'}), "(verify_url, data='')\n", (2158, 2179), False, 'import requests\n'), ((2267, 2338), 'requests.post', 'requests.post', (['verify_url'], {'data': '""""""', 'headers': "{'Authorization': auth_hdr}"}), "(verify_url, data='', headers={'Authorization': auth_hdr})\n", (2280, 2338), False, 'import requests\n'), ((2472, 2542), 'requests.post', 'requests.post', (['verify_url'], {'data': '""""""', 'headers': "{'Authorization': 'bogus'}"}), "(verify_url, data='', headers={'Authorization': 'bogus'})\n", (2485, 2542), False, 'import requests\n'), ((2683, 2768), 'requests.post', 'requests.post', (['verify_url'], {'data': '""""""', 'headers': "{'Authorization': auth_hdr + '==foo'}"}), "(verify_url, data='', headers={'Authorization': auth_hdr +\n '==foo'})\n", (2696, 2768), False, 'import requests\n'), ((3160, 3231), 'requests.post', 'requests.post', (['verify_url'], {'data': '""""""', 'headers': "{'Authorization': auth_hdr}"}), "(verify_url, data='', headers={'Authorization': auth_hdr})\n", (3173, 3231), False, 'import requests\n')]
|
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import tempfile
import numpy as np
import six
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.distribute import multi_worker_training_state
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from rl.callbacks import Callback
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling. Must run in TensorFlow eager mode.
embeddings_freq: frequency (in epochs) at which embedding layers will
be visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._validate_kwargs(kwargs)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._samples_seen = 0
self._samples_seen_at_last_write = 0
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
# A collection of file writers currently in use, to be closed when
# training ends for this callback. Writers are keyed by the
# directory name under the root logdir: e.g., "train" or
# "validation".
self._writers = {}
self._train_run_name = 'train'
self._validation_run_name = 'validation'
self._profile_batch = profile_batch
# True when a trace is running.
self._is_tracing = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model.model
with context.eager_mode():
self._close_writers()
if self.write_graph:
with self._get_writer(self._train_run_name).as_default():
with summary_ops_v2.always_record_summaries():
if not self.model.run_eagerly:
summary_ops_v2.graph(K.get_graph(), step=0)
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
summary_ops_v2.keras_model('keras', self.model, step=0)
if self.embeddings_freq:
self._configure_embeddings()
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from tensorflow.python.keras.layers import embeddings
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
config = projector.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
embedding.tensor_name = layer.embeddings.name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in embedding.metadata_path:
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata:
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
class DummyWriter(object):
"""Dummy writer to conform to `Projector` API."""
def __init__(self, logdir):
self.logdir = logdir
def get_logdir(self):
return self.logdir
writer = DummyWriter(self.log_dir)
projector.visualize_embeddings(writer, config)
def _close_writers(self):
"""Close all remaining open file writers owned by this callback.
If there are no such file writers, this is a no-op.
"""
with context.eager_mode():
for writer in six.itervalues(self._writers):
writer.close()
self._writers.clear()
def _get_writer(self, writer_name):
"""Get a summary writer for the given subdirectory under the logdir.
A writer will be created if it does not yet exist.
Arguments:
writer_name: The name of the directory for which to create or
retrieve a writer. Should be either `self._train_run_name` or
`self._validation_run_name`.
Returns:
A `SummaryWriter` object.
"""
if writer_name not in self._writers:
path = os.path.join(self.log_dir, writer_name)
writer = summary_ops_v2.create_file_writer_v2(path)
self._writers[writer_name] = writer
return self._writers[writer_name]
def on_train_begin(self, logs=None):
if self._profile_batch == 1:
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
Arguments:
batch: Integer, index of batch within the current epoch.
logs: Dict. Metric results for this batch.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
self._log_metrics(logs, prefix='batch_', step=self._total_batches_seen)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
if self._is_tracing:
self._log_trace()
elif (not self._is_tracing and
self._total_batches_seen == self._profile_batch - 1):
self._enable_trace()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
step = epoch if self.update_freq == 'epoch' else self._samples_seen
self._log_metrics(logs, prefix='epoch_', step=step)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def on_train_end(self, logs=None):
if self._is_tracing:
self._log_trace()
self._close_writers()
def _enable_trace(self):
if context.executing_eagerly():
summary_ops_v2.trace_on(graph=True, profiler=True)
self._is_tracing = True
def _log_trace(self):
if context.executing_eagerly():
with self._get_writer(self._train_run_name).as_default(), \
summary_ops_v2.always_record_summaries():
# TODO(b/126388999): Remove step info in the summary name.
summary_ops_v2.trace_export(
name='batch_%d' % self._total_batches_seen,
step=self._total_batches_seen,
profiler_outdir=os.path.join(self.log_dir, 'train'))
self._is_tracing = False
def _log_metrics(self, logs, prefix, step):
"""Writes metrics out as custom scalar summaries.
Arguments:
logs: Dict. Keys are scalar summary names, values are NumPy scalars.
prefix: String. The prefix to apply to the scalar summary names.
step: Int. The global step to use for TensorBoard.
"""
if logs is None:
logs = {}
# Group metrics by the name of their associated file writer. Values
# are lists of metrics, as (name, scalar_value) pairs.
logs_by_writer = {
self._train_run_name: [],
self._validation_run_name: [],
}
validation_prefix = 'val_'
for (name, value) in logs.items():
if name in ('batch', 'size', 'num_steps'):
# Scrub non-metric items.
continue
if name.startswith(validation_prefix):
name = name[len(validation_prefix):]
writer_name = self._validation_run_name
else:
writer_name = self._train_run_name
name = prefix + name # assign batch or epoch prefix
logs_by_writer[writer_name].append((name, value))
with context.eager_mode():
with summary_ops_v2.always_record_summaries():
for writer_name in logs_by_writer:
these_logs = logs_by_writer[writer_name]
if not these_logs:
# Don't create a "validation" events file if we don't
# actually have any validation data.
continue
writer = self._get_writer(writer_name)
with writer.as_default():
for (name, value) in these_logs:
summary_ops_v2.scalar(name, value, step=step)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
writer = self._get_writer(self._train_run_name)
with context.eager_mode(), \
writer.as_default(), \
summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
with ops.init_scope():
weight = K.get_value(weight)
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self.log_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, the latest best model according
to the quantity monitored will not be overwritten.
mode: one of {auto, min, max}. If `save_best_only=True`, the decision to
overwrite the current save file is made based on either the maximization
or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto`
mode, the direction is automatically inferred from the name of the
monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of a batch at which this many samples have been seen since
last saving. Note that if the saving isn't aligned to epochs, the
monitored metric may potentially be less reliable (it could reflect as
little as 1 batch, since the metrics get reset every epoch). Defaults to
`'epoch'`
load_weights_on_restart: Whether the training should restore the model. If
True, the model will attempt to load the checkpoint file from `filepath`
at the start of `model.fit()`. This saves the need of manually calling
`model.load_weights()` before `model.fit(). In multi-worker distributed
training, this provides fault-tolerance and loads the model
automatically upon recovery of workers. The callback gives up loading if
the filepath does not exist, and raises ValueError if format does not
match. Defaults to False.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
load_weights_on_restart=False,
**kwargs):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.load_weights_on_restart = load_weights_on_restart
self.epochs_since_last_save = 0
self._samples_seen_since_last_saving = 0
self.metrics = []
self.infos = []
self.info_names = None
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of samples seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
if K.in_multi_worker_mode():
# pylint: disable=protected-access
# MultiWorkerTrainingState is used to manage the training state needed
# for preemption-recovery of a worker in multi-worker training.
self.model._training_state = (
multi_worker_training_state.MultiWorkerTrainingState(
self.model, self.filepath))
self._training_state = self.model._training_state
if self._training_state.restore():
# If the training state needs to be and is successfully restored,
# it is recovering from a previous failure (or preemption). In such
# case, do not load the weights from user specified file path.
return
# If this is not multi worker training, restoring is not needed, or
# restoring failed, check if it should load weights on restart.
# TODO(rchao): Also restore the epoch in single-worker training when
# `self.load_weights_on_restart=True`.
if self.load_weights_on_restart:
# In multi worker training, it only should if `experimental_should_init`
# is True.
# TODO(rchao): Reference `experimental_should_init` api from a util file.
if not K.in_multi_worker_mode() or dc_context.get_current_worker_context(
).experimental_should_init:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(
self.filepath))
if filepath_to_load is not None and os.path.exists(filepath_to_load):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_end(self, logs=None):
if K.in_multi_worker_mode():
# In multi-worker training, on successful exit of training, delete the
# training state backup file that was saved for the purpose of worker
# recovery.
self._training_state.delete_backup()
# Restore the training state so the model is ready for next (possible)
# multi worker training.
del self._training_state
del self.model._training_state
def on_batch_end(self, batch, logs=None):
logs = logs or {}
if isinstance(self.save_freq, int):
self._samples_seen_since_last_saving += logs.get('size', 1)
if self._samples_seen_since_last_saving >= self.save_freq:
self._save_model(epoch=self._current_epoch, logs=logs)
self._samples_seen_since_last_saving = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
self._save_model(epoch=epoch, logs=logs)
if K.in_multi_worker_mode():
# For multi-worker training, back up the weights and current training
# state for possible future recovery.
# TODO(rchao): Call `back_up` at finer period such as N steps.
self._training_state.back_up(epoch)
def on_step_end(self, step, logs):
if self.info_names is None:
self.info_names = logs['info'].keys()
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in logs['info'].keys()])
def _save_model(self, epoch, logs):
"""Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
file_handle, filepath = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self._maybe_remove_file(file_handle, filepath)
def _get_file_handle_and_path(self, epoch, logs):
"""Returns the file handle and path."""
# TODO(rchao): Replace dc_context reference with
# distributed_training_utils.should_current_worker_checkpoint() once
# distributed_training_utils.py no longer depends on callbacks.py.
if not K.in_multi_worker_mode() or dc_context.get_current_worker_context(
).should_checkpoint:
return None, self.filepath.format(epoch=epoch + 1, **logs)
else:
# If this is multi-worker training, and this worker should not
# save checkpoint, we replace the filepath with a dummy filepath so
# it writes to a file that will be removed at the end of _save_model()
# call. This is because the SyncOnReadVariable needs to be synced across
# all the workers in order to be read, and all workers need to initiate
# that.
file_handle, temp_file_name = tempfile.mkstemp()
extension = os.path.splitext(self.filepath)[1]
return file_handle, temp_file_name + extension
def _maybe_remove_file(self, file_handle, filepath):
# Remove the file in multi-worker training where this worker should
# not checkpoint. It is a dummy file previously saved for sync distributed
# training.
if K.in_multi_worker_mode(
) and not dc_context.get_current_worker_context().should_checkpoint:
os.close(file_handle)
os.remove(filepath)
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if os.path.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
```python
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
# This callback will stop the training when there is no improvement in
# the validation loss for three consecutive epochs.
model.fit(data, labels, epochs=100, callbacks=[callback],
validation_data=(val_data, val_labels))
```
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
|
[
"os.remove",
"tensorboard.plugins.projector.visualize_embeddings",
"tensorflow.python.keras.distribute.multi_worker_training_state.MultiWorkerTrainingState",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.distribute.distribute_coordinator_context.get_current_worker_context",
"os.close",
"os.path.join",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.ops.summary_ops_v2.histogram",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.ops.summary_ops_v2.trace_on",
"os.path.dirname",
"os.path.exists",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.summary_ops_v2.always_record_summaries",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.backend.get_value",
"re.sub",
"tensorflow.python.ops.summary_ops_v2.create_file_writer_v2",
"tensorflow.python.ops.summary_ops_v2.keras_model",
"os.path.basename",
"re.match",
"tensorflow.python.keras.backend.in_multi_worker_mode",
"tensorflow.python.ops.summary_ops_v2.scalar",
"os.listdir",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.keras.backend.int_shape",
"tempfile.mkstemp",
"tensorflow.python.training.checkpoint_management.latest_checkpoint",
"tensorflow.python.ops.summary_ops_v2.image",
"six.itervalues",
"tensorboard.plugins.projector.ProjectorConfig",
"os.path.splitext",
"os.path.getmtime",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.array_ops.transpose"
] |
[((7464, 7491), 'tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (7489, 7491), False, 'from tensorboard.plugins import projector\n'), ((8537, 8583), 'tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['writer', 'config'], {}), '(writer, config)\n', (8567, 8583), False, 'from tensorboard.plugins import projector\n'), ((11301, 11328), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (11326, 11328), False, 'from tensorflow.python.eager import context\n'), ((11454, 11481), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (11479, 11481), False, 'from tensorflow.python.eager import context\n'), ((14361, 14386), 'tensorflow.python.ops.array_ops.squeeze', 'array_ops.squeeze', (['weight'], {}), '(weight)\n', (14378, 14386), False, 'from tensorflow.python.ops import array_ops\n'), ((14400, 14418), 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['w_img'], {}), '(w_img)\n', (14411, 14418), True, 'from tensorflow.python.keras import backend as K\n'), ((15124, 15142), 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['w_img'], {}), '(w_img)\n', (15135, 15142), True, 'from tensorflow.python.keras import backend as K\n'), ((20482, 20506), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (20504, 20506), True, 'from tensorflow.python.keras import backend as K\n'), ((22475, 22499), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (22497, 22499), True, 'from tensorflow.python.keras import backend as K\n'), ((23515, 23539), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (23537, 23539), True, 'from tensorflow.python.keras import backend as K\n'), ((29344, 29368), 'os.path.dirname', 'os.path.dirname', (['pattern'], {}), '(pattern)\n', (29359, 29368), False, 'import os\n'), ((29386, 29411), 'os.path.basename', 'os.path.basename', (['pattern'], {}), '(pattern)\n', (29402, 29411), False, 'import os\n'), ((29655, 29704), 'tensorflow.python.training.checkpoint_management.latest_checkpoint', 'checkpoint_management.latest_checkpoint', (['dir_name'], {}), '(dir_name)\n', (29694, 29704), False, 'from tensorflow.python.training import checkpoint_management\n'), ((30024, 30048), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (30038, 30048), False, 'import os\n'), ((5014, 5122), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback."""'], {}), "(\n '`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback.'\n )\n", (5029, 5122), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((5187, 5317), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""`batch_size` is no longer needed in the `TensorBoard` Callback and will be ignored in TensorFlow 2.0."""'], {}), "(\n '`batch_size` is no longer needed in the `TensorBoard` Callback and will be ignored in TensorFlow 2.0.'\n )\n", (5202, 5317), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((5420, 5561), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""`embeddings_layer_names` is not supported in TensorFlow 2.0. Instead, all `Embedding` layers will be visualized."""'], {}), "(\n '`embeddings_layer_names` is not supported in TensorFlow 2.0. Instead, all `Embedding` layers will be visualized.'\n )\n", (5435, 5561), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((5657, 5794), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""`embeddings_data` is not supported in TensorFlow 2.0. Instead, all `Embedding` variables will be visualized."""'], {}), "(\n '`embeddings_data` is not supported in TensorFlow 2.0. Instead, all `Embedding` variables will be visualized.'\n )\n", (5672, 5794), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((6325, 6345), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (6343, 6345), False, 'from tensorflow.python.eager import context\n'), ((8763, 8783), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (8781, 8783), False, 'from tensorflow.python.eager import context\n'), ((8806, 8835), 'six.itervalues', 'six.itervalues', (['self._writers'], {}), '(self._writers)\n', (8820, 8835), False, 'import six\n'), ((9373, 9412), 'os.path.join', 'os.path.join', (['self.log_dir', 'writer_name'], {}), '(self.log_dir, writer_name)\n', (9385, 9412), False, 'import os\n'), ((9429, 9471), 'tensorflow.python.ops.summary_ops_v2.create_file_writer_v2', 'summary_ops_v2.create_file_writer_v2', (['path'], {}), '(path)\n', (9465, 9471), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((9637, 9687), 'tensorflow.python.ops.summary_ops_v2.trace_on', 'summary_ops_v2.trace_on', ([], {'graph': '(True)', 'profiler': '(True)'}), '(graph=True, profiler=True)\n', (9660, 9687), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((11337, 11387), 'tensorflow.python.ops.summary_ops_v2.trace_on', 'summary_ops_v2.trace_on', ([], {'graph': '(True)', 'profiler': '(True)'}), '(graph=True, profiler=True)\n', (11360, 11387), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((13033, 13053), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (13051, 13053), False, 'from tensorflow.python.eager import context\n'), ((13724, 13744), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (13742, 13744), False, 'from tensorflow.python.eager import context\n'), ((13793, 13833), 'tensorflow.python.ops.summary_ops_v2.always_record_summaries', 'summary_ops_v2.always_record_summaries', ([], {}), '()\n', (13831, 13833), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((14472, 14517), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['w_img', '[1, shape[0], 1, 1]'], {}), '(w_img, [1, shape[0], 1, 1])\n', (14489, 14517), False, 'from tensorflow.python.ops import array_ops\n'), ((15249, 15301), 'tensorflow.python.ops.summary_ops_v2.image', 'summary_ops_v2.image', (['weight_name', 'w_img'], {'step': 'epoch'}), '(weight_name, w_img, step=epoch)\n', (15269, 15301), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((19007, 19143), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen."""'], {}), "(\n '`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen.'\n )\n", (19022, 19143), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((19247, 19334), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""ModelCheckpoint mode %s is unknown, fallback to auto mode."""', 'mode'], {}), "('ModelCheckpoint mode %s is unknown, fallback to auto mode.',\n mode)\n", (19262, 19334), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((20748, 20827), 'tensorflow.python.keras.distribute.multi_worker_training_state.MultiWorkerTrainingState', 'multi_worker_training_state.MultiWorkerTrainingState', (['self.model', 'self.filepath'], {}), '(self.model, self.filepath)\n', (20800, 20827), False, 'from tensorflow.python.keras.distribute import multi_worker_training_state\n'), ((26709, 26727), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (26725, 26727), False, 'import tempfile\n'), ((27072, 27096), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (27094, 27096), True, 'from tensorflow.python.keras import backend as K\n'), ((27177, 27198), 'os.close', 'os.close', (['file_handle'], {}), '(file_handle)\n', (27185, 27198), False, 'import os\n'), ((27206, 27225), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (27215, 27225), False, 'import os\n'), ((30074, 30094), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (30084, 30094), False, 'import os\n'), ((33701, 33786), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""EarlyStopping mode %s is unknown, fallback to auto mode."""', 'mode'], {}), "('EarlyStopping mode %s is unknown, fallback to auto mode.',\n mode)\n", (33716, 33786), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((11561, 11601), 'tensorflow.python.ops.summary_ops_v2.always_record_summaries', 'summary_ops_v2.always_record_summaries', ([], {}), '()\n', (11599, 11601), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((13067, 13107), 'tensorflow.python.ops.summary_ops_v2.always_record_summaries', 'summary_ops_v2.always_record_summaries', ([], {}), '()\n', (13105, 13107), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((14698, 14750), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['w_img', '[1, shape[0], shape[1], 1]'], {}), '(w_img, [1, shape[0], shape[1], 1])\n', (14715, 14750), False, 'from tensorflow.python.ops import array_ops\n'), ((26105, 26129), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (26127, 26129), True, 'from tensorflow.python.keras import backend as K\n'), ((26133, 26172), 'tensorflow.python.distribute.distribute_coordinator_context.get_current_worker_context', 'dc_context.get_current_worker_context', ([], {}), '()\n', (26170, 26172), True, 'from tensorflow.python.distribute import distribute_coordinator_context as dc_context\n'), ((26747, 26778), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (26763, 26778), False, 'import os\n'), ((29441, 29472), 're.sub', 're.sub', (['"""{.*}"""', '""".*"""', 'base_name'], {}), "('{.*}', '.*', base_name)\n", (29447, 29472), False, 'import re\n'), ((29786, 29824), 'os.path.basename', 'os.path.basename', (['latest_tf_checkpoint'], {}), '(latest_tf_checkpoint)\n', (29802, 29824), False, 'import os\n'), ((30169, 30205), 're.match', 're.match', (['base_name_regex', 'file_name'], {}), '(base_name_regex, file_name)\n', (30177, 30205), False, 'import re\n'), ((14054, 14111), 'tensorflow.python.ops.summary_ops_v2.histogram', 'summary_ops_v2.histogram', (['weight_name', 'weight'], {'step': 'epoch'}), '(weight_name, weight, step=epoch)\n', (14078, 14111), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((14620, 14646), 'tensorflow.python.ops.array_ops.transpose', 'array_ops.transpose', (['w_img'], {}), '(w_img)\n', (14639, 14646), False, 'from tensorflow.python.ops import array_ops\n'), ((14664, 14682), 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['w_img'], {}), '(w_img)\n', (14675, 14682), True, 'from tensorflow.python.keras import backend as K\n'), ((15049, 15108), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['w_img', '[shape[0], shape[1], shape[2], 1]'], {}), '(w_img, [shape[0], shape[1], shape[2], 1])\n', (15066, 15108), False, 'from tensorflow.python.ops import array_ops\n'), ((21677, 21701), 'tensorflow.python.keras.backend.in_multi_worker_mode', 'K.in_multi_worker_mode', ([], {}), '()\n', (21699, 21701), True, 'from tensorflow.python.keras import backend as K\n'), ((21705, 21744), 'tensorflow.python.distribute.distribute_coordinator_context.get_current_worker_context', 'dc_context.get_current_worker_context', ([], {}), '()\n', (21742, 21744), True, 'from tensorflow.python.distribute import distribute_coordinator_context as dc_context\n'), ((21956, 21988), 'os.path.exists', 'os.path.exists', (['filepath_to_load'], {}), '(filepath_to_load)\n', (21970, 21988), False, 'import os\n'), ((24624, 24714), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""Can save best model only with %s available, skipping."""', 'self.monitor'], {}), "('Can save best model only with %s available, skipping.',\n self.monitor)\n", (24639, 24714), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((27111, 27150), 'tensorflow.python.distribute.distribute_coordinator_context.get_current_worker_context', 'dc_context.get_current_worker_context', ([], {}), '()\n', (27148, 27150), True, 'from tensorflow.python.distribute import distribute_coordinator_context as dc_context\n'), ((30230, 30263), 'os.path.join', 'os.path.join', (['dir_name', 'file_name'], {}), '(dir_name, file_name)\n', (30242, 30263), False, 'import os\n'), ((30286, 30313), 'os.path.getmtime', 'os.path.getmtime', (['file_path'], {}), '(file_path)\n', (30302, 30313), False, 'import os\n'), ((6487, 6527), 'tensorflow.python.ops.summary_ops_v2.always_record_summaries', 'summary_ops_v2.always_record_summaries', ([], {}), '()\n', (6525, 6527), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((11839, 11874), 'os.path.join', 'os.path.join', (['self.log_dir', '"""train"""'], {}), "(self.log_dir, 'train')\n", (11851, 11874), False, 'import os\n'), ((13983, 13999), 'tensorflow.python.framework.ops.init_scope', 'ops.init_scope', ([], {}), '()\n', (13997, 13999), False, 'from tensorflow.python.framework import ops\n'), ((14023, 14042), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['weight'], {}), '(weight)\n', (14034, 14042), True, 'from tensorflow.python.keras import backend as K\n'), ((14804, 14825), 'tensorflow.python.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (14823, 14825), True, 'from tensorflow.python.keras import backend as K\n'), ((14955, 14997), 'tensorflow.python.ops.array_ops.transpose', 'array_ops.transpose', (['w_img'], {'perm': '[2, 0, 1]'}), '(w_img, perm=[2, 0, 1])\n', (14974, 14997), False, 'from tensorflow.python.ops import array_ops\n'), ((15015, 15033), 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['w_img'], {}), '(w_img)\n', (15026, 15033), True, 'from tensorflow.python.keras import backend as K\n'), ((6902, 6957), 'tensorflow.python.ops.summary_ops_v2.keras_model', 'summary_ops_v2.keras_model', (['"""keras"""', 'self.model'], {'step': '(0)'}), "('keras', self.model, step=0)\n", (6928, 6957), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((13522, 13567), 'tensorflow.python.ops.summary_ops_v2.scalar', 'summary_ops_v2.scalar', (['name', 'value'], {'step': 'step'}), '(name, value, step=step)\n', (13543, 13567), False, 'from tensorflow.python.ops import summary_ops_v2\n'), ((6609, 6622), 'tensorflow.python.keras.backend.get_graph', 'K.get_graph', ([], {}), '()\n', (6620, 6622), True, 'from tensorflow.python.keras import backend as K\n')]
|
"""Module containing the recipient alias API of the v1 API."""
from flask import abort
from flask.views import MethodView
from .root import API_V1
from .models import RecipientAlias
from ...db import DB
from ...db.models.recipient_alias import RecipientAlias as RecipientAlias_DB
@API_V1.route("/recipient_alias/")
class RecipientAliasList(MethodView):
"""Root endpoint for all recipient alias resources."""
@API_V1.response(RecipientAlias(many=True))
def get(self):
"""Get all recipient aliases"""
return RecipientAlias_DB.query.all()
@API_V1.arguments(RecipientAlias, description="The alias to add")
@API_V1.response(RecipientAlias, code=201)
def post(self, new_data):
"""Add a new recipient alias"""
item = RecipientAlias_DB(**new_data)
DB.session.add(item)
DB.session.commit()
return item
@API_V1.route("/recipient_alias/create_many")
class RecipientAliasCreateMany(MethodView):
"""Endpoint to create many aliases in one request."""
@API_V1.arguments(RecipientAlias(many=True), description="The aliases to add")
@API_V1.response(RecipientAlias(many=True), code=201)
def post(self, new_data):
"""Add new recipient aliases"""
items = []
for data in new_data:
item = RecipientAlias_DB(**data)
DB.session.add(item)
items.append(item)
DB.session.commit()
return items
@API_V1.route("/recipient_alias/replace")
class RecipientAliasReplace(MethodView):
"""Endpoint to replace all recipient aliases."""
@API_V1.arguments(RecipientAlias(many=True), description="The new list which should be set")
@API_V1.response(code=204)
def post(self, new_data):
"""Replace all recipient aliases with the given list."""
RecipientAlias_DB.query.delete()
for data in new_data:
item = RecipientAlias_DB(**data)
DB.session.add(item)
DB.session.commit()
@API_V1.route("/recipient_alias/<recipient_alias_id>/")
class RecipientAlias(MethodView):
"""Endpoint for a single recipient alias resource"""
@API_V1.doc(responses={'404': {'description': 'When requested recipient alias is not found'}})
@API_V1.response(RecipientAlias())
def get(self, recipient_alias_id):
""" Get a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
abort(404, "Requested recipient alias not found.")
return item
@API_V1.arguments(RecipientAlias, description="The new values for the alias")
@API_V1.response(RecipientAlias())
def put(self, recipient_alias_id, update_data):
""" Update a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
abort(404, "Requested recipient alias not found.")
item.update(update_data)
DB.session.commit()
return item
@API_V1.response(code=204)
def delete(self, recipient_alias_id):
""" Delete a single recipient alias """
item = RecipientAlias_DB.query.filter(RecipientAlias_DB.id == recipient_alias_id).first()
if item is None:
return
DB.session.delete(item)
DB.session.commit()
|
[
"flask.abort"
] |
[((2496, 2546), 'flask.abort', 'abort', (['(404)', '"""Requested recipient alias not found."""'], {}), "(404, 'Requested recipient alias not found.')\n", (2501, 2546), False, 'from flask import abort\n'), ((2924, 2974), 'flask.abort', 'abort', (['(404)', '"""Requested recipient alias not found."""'], {}), "(404, 'Requested recipient alias not found.')\n", (2929, 2974), False, 'from flask import abort\n')]
|
from django.conf import settings
from django.contrib.auth import (
authenticate, login, logout, get_user_model,
user_logged_out
)
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import traceback
User = get_user_model()
class LoginView(APIView):
def post(self, request):
try:
user = authenticate(
request, username=request.data['username'],
password=request.data['password']
)
if user is not None:
login(request, user)
return Response(
data={
'success':True, 'message':"Success: User logged in",
'data':{
'username': user.username,
'email': user.email,
}
}, status=status.HTTP_200_OK
)
else:
return Response(
data={
'success':False, 'message':"User does not exists",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
except Exception:
if settings.DEBUG:
traceback.print_exc()
return Response(
data={
'success':False, 'message':"Internal Server Error",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
class LogoutView(APIView):
def post(self ,request):
user_logged_out.send(
sender=request.user.__class__, request=request, user=request.user
)
logout(request)
return Response(
data={
'success':True, 'message':"Success: User logged out",
'data':{
'username': request.data['username'],
}
}, status=status.HTTP_200_OK
)
|
[
"traceback.print_exc",
"django.contrib.auth.get_user_model",
"django.contrib.auth.user_logged_out.send",
"django.contrib.auth.logout",
"rest_framework.response.Response",
"django.contrib.auth.authenticate",
"django.contrib.auth.login"
] |
[((285, 301), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (299, 301), False, 'from django.contrib.auth import authenticate, login, logout, get_user_model, user_logged_out\n'), ((1813, 1905), 'django.contrib.auth.user_logged_out.send', 'user_logged_out.send', ([], {'sender': 'request.user.__class__', 'request': 'request', 'user': 'request.user'}), '(sender=request.user.__class__, request=request, user=\n request.user)\n', (1833, 1905), False, 'from django.contrib.auth import authenticate, login, logout, get_user_model, user_logged_out\n'), ((1935, 1950), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (1941, 1950), False, 'from django.contrib.auth import authenticate, login, logout, get_user_model, user_logged_out\n'), ((1966, 2116), 'rest_framework.response.Response', 'Response', ([], {'data': "{'success': True, 'message': 'Success: User logged out', 'data': {\n 'username': request.data['username']}}", 'status': 'status.HTTP_200_OK'}), "(data={'success': True, 'message': 'Success: User logged out',\n 'data': {'username': request.data['username']}}, status=status.HTTP_200_OK)\n", (1974, 2116), False, 'from rest_framework.response import Response\n'), ((390, 486), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': "request.data['username']", 'password': "request.data['password']"}), "(request, username=request.data['username'], password=request.\n data['password'])\n", (402, 486), False, 'from django.contrib.auth import authenticate, login, logout, get_user_model, user_logged_out\n'), ((577, 597), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (582, 597), False, 'from django.contrib.auth import authenticate, login, logout, get_user_model, user_logged_out\n'), ((621, 785), 'rest_framework.response.Response', 'Response', ([], {'data': "{'success': True, 'message': 'Success: User logged in', 'data': {'username':\n user.username, 'email': user.email}}", 'status': 'status.HTTP_200_OK'}), "(data={'success': True, 'message': 'Success: User logged in',\n 'data': {'username': user.username, 'email': user.email}}, status=\n status.HTTP_200_OK)\n", (629, 785), False, 'from rest_framework.response import Response\n'), ((1012, 1159), 'rest_framework.response.Response', 'Response', ([], {'data': "{'success': False, 'message': 'User does not exists', 'data': {'username':\n request.data['username']}}", 'status': 'status.HTTP_200_OK'}), "(data={'success': False, 'message': 'User does not exists', 'data':\n {'username': request.data['username']}}, status=status.HTTP_200_OK)\n", (1020, 1159), False, 'from rest_framework.response import Response\n'), ((1436, 1584), 'rest_framework.response.Response', 'Response', ([], {'data': "{'success': False, 'message': 'Internal Server Error', 'data': {'username':\n request.data['username']}}", 'status': 'status.HTTP_200_OK'}), "(data={'success': False, 'message': 'Internal Server Error', 'data':\n {'username': request.data['username']}}, status=status.HTTP_200_OK)\n", (1444, 1584), False, 'from rest_framework.response import Response\n'), ((1395, 1416), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1414, 1416), False, 'import traceback\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player_draft import PlayerDraft
from db.team import Team
def test_find_by_id():
pdft = PlayerDraft.find_by_player_id(8475883) # <NAME>
assert len(pdft) == 2
pdft = PlayerDraft.find_by_player_id(8466145) # <NAME>
assert len(pdft) == 2
def test_find():
pdft = PlayerDraft.find(8479318, 10, 2016) # <NAME>
assert pdft.round == 1
assert pdft.overall == 1
def test_constructor():
pdft = PlayerDraft(8999444, 1, 2018, 3, 75) # fictional player
assert pdft.player_id == 8999444
assert Team.find_by_id(pdft.team_id).name == 'New Jersey Devils'
assert pdft.year == 2018
assert pdft.round == 3
assert pdft.overall == 75
def test_comparison_operators():
pdft_kopitar = PlayerDraft.find_by_player_id(8471685).pop(0) # 2005, 11
pdft_toews = PlayerDraft.find_by_player_id(8473604).pop(0) # 2006, 3
pdft_kessel = PlayerDraft.find_by_player_id(8473548).pop(0) # 2006, 5
pdft_stamkos = PlayerDraft.find_by_player_id(8474564).pop(0) # 2008, 1
ordered = sorted([pdft_kessel, pdft_kopitar, pdft_stamkos, pdft_toews])
assert ordered[0] == pdft_kopitar
assert ordered[1] == pdft_toews
assert ordered[2] == pdft_kessel
assert ordered[3] == pdft_stamkos
|
[
"db.player_draft.PlayerDraft",
"db.player_draft.PlayerDraft.find_by_player_id",
"db.player_draft.PlayerDraft.find",
"db.team.Team.find_by_id"
] |
[((148, 186), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8475883)'], {}), '(8475883)\n', (177, 186), False, 'from db.player_draft import PlayerDraft\n'), ((234, 272), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8466145)'], {}), '(8466145)\n', (263, 272), False, 'from db.player_draft import PlayerDraft\n'), ((339, 374), 'db.player_draft.PlayerDraft.find', 'PlayerDraft.find', (['(8479318)', '(10)', '(2016)'], {}), '(8479318, 10, 2016)\n', (355, 374), False, 'from db.player_draft import PlayerDraft\n'), ((478, 514), 'db.player_draft.PlayerDraft', 'PlayerDraft', (['(8999444)', '(1)', '(2018)', '(3)', '(75)'], {}), '(8999444, 1, 2018, 3, 75)\n', (489, 514), False, 'from db.player_draft import PlayerDraft\n'), ((583, 612), 'db.team.Team.find_by_id', 'Team.find_by_id', (['pdft.team_id'], {}), '(pdft.team_id)\n', (598, 612), False, 'from db.team import Team\n'), ((781, 819), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8471685)'], {}), '(8471685)\n', (810, 819), False, 'from db.player_draft import PlayerDraft\n'), ((856, 894), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8473604)'], {}), '(8473604)\n', (885, 894), False, 'from db.player_draft import PlayerDraft\n'), ((931, 969), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8473548)'], {}), '(8473548)\n', (960, 969), False, 'from db.player_draft import PlayerDraft\n'), ((1007, 1045), 'db.player_draft.PlayerDraft.find_by_player_id', 'PlayerDraft.find_by_player_id', (['(8474564)'], {}), '(8474564)\n', (1036, 1045), False, 'from db.player_draft import PlayerDraft\n')]
|
from django.db import models
class Student(models.Model):
roll = models.IntegerField(unique=True)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=10, unique=True, blank=True, null=True)
email = models.EmailField(unique=True, blank=True, null=True)
sem = models.IntegerField()
program = models.CharField(max_length=200, blank=True, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
pic = models.ImageField(upload_to='uploads/accounts/', default='uploads/accounts/default.jpeg')
registered = models.BooleanField(default=False)
class Meta:
ordering = ['roll']
@property
def first_name(self):
return self.name.split(' ')[0]
def __str__(self):
return self.name
class StudentLogin(models.Model):
password = models.CharField(max_length=100)
student = models.OneToOneField(Student, on_delete=models.CASCADE)
def __str__(self):
return f'{self.student.name} {self.student.roll}'
|
[
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.EmailField",
"django.db.models.ImageField",
"django.db.models.IntegerField"
] |
[((70, 102), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (89, 102), False, 'from django.db import models\n'), ((114, 146), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (130, 146), False, 'from django.db import models\n'), ((159, 226), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'unique': '(True)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, unique=True, blank=True, null=True)\n', (175, 226), False, 'from django.db import models\n'), ((239, 292), 'django.db.models.EmailField', 'models.EmailField', ([], {'unique': '(True)', 'blank': '(True)', 'null': '(True)'}), '(unique=True, blank=True, null=True)\n', (256, 292), False, 'from django.db import models\n'), ((303, 324), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (322, 324), False, 'from django.db import models\n'), ((339, 394), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (355, 394), False, 'from django.db import models\n'), ((409, 464), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (425, 464), False, 'from django.db import models\n'), ((475, 569), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""uploads/accounts/"""', 'default': '"""uploads/accounts/default.jpeg"""'}), "(upload_to='uploads/accounts/', default=\n 'uploads/accounts/default.jpeg')\n", (492, 569), False, 'from django.db import models\n'), ((582, 616), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (601, 616), False, 'from django.db import models\n'), ((842, 874), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (858, 874), False, 'from django.db import models\n'), ((889, 944), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Student'], {'on_delete': 'models.CASCADE'}), '(Student, on_delete=models.CASCADE)\n', (909, 944), False, 'from django.db import models\n')]
|
# pylint: disable=redefined-outer-name,unused-argument
"""Coupon request API tests"""
import os
from types import SimpleNamespace
import pytest
from pygsheets import Worksheet, Spreadsheet
from pygsheets.client import Client as PygsheetsClient
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from courses.factories import CourseRunFactory
from ecommerce.factories import ProductVersionFactory
from ecommerce.models import Company, Coupon
from sheets.coupon_request_api import CouponRequestHandler, CouponRequestRow
from sheets.factories import GoogleApiAuthFactory
from sheets.models import CouponGenerationRequest
from sheets.utils import ResultType
@pytest.fixture
def courseware_objects():
"""Database objects that CSV data depends on"""
run = CourseRunFactory.create(courseware_id="course-v1:edX+DemoX+Demo_Course")
ProductVersionFactory.create(product__content_object=run)
@pytest.fixture
def request_csv_rows(settings, courseware_objects):
"""Fake coupon request spreadsheet data rows (loaded from CSV)"""
fake_request_csv_filepath = os.path.join(
settings.BASE_DIR, "sheets/resources/coupon_requests.csv"
)
with open(fake_request_csv_filepath) as f:
# Return all rows except for the header
return [line.split(",") for i, line in enumerate(f.readlines()) if i > 0]
@pytest.fixture
def pygsheets_fixtures(mocker, db, request_csv_rows):
"""Patched functions for pygsheets client functionality"""
Mock = mocker.Mock
MagicMock = mocker.MagicMock
google_api_auth = GoogleApiAuthFactory.create()
patched_get_data_rows = mocker.patch(
"sheets.sheet_handler_api.get_data_rows", return_value=request_csv_rows
)
mocked_worksheet = MagicMock(spec=Worksheet, get_all_values=Mock(return_value=[]))
mocked_spreadsheet = MagicMock(
spec=Spreadsheet, sheet1=mocked_worksheet, id="abc123"
)
mocked_pygsheets_client = MagicMock(
spec=PygsheetsClient,
oauth=Mock(),
open_by_key=Mock(return_value=mocked_spreadsheet),
drive=MagicMock(spec=DriveAPIWrapper),
sheet=MagicMock(spec=SheetAPIWrapper),
create=Mock(return_value=mocked_spreadsheet),
)
mocker.patch(
"sheets.coupon_request_api.get_authorized_pygsheets_client",
return_value=mocked_pygsheets_client,
)
return SimpleNamespace(
client=mocked_pygsheets_client,
spreadsheet=mocked_spreadsheet,
worksheet=mocked_worksheet,
google_api_auth=google_api_auth,
patched_get_data_rows=patched_get_data_rows,
)
@pytest.fixture
def patched_sheets_api(mocker):
"""Patches for sheets API functions that use the Drive/Sheets APIs"""
share_drive_file = mocker.patch(
"sheets.coupon_request_api.share_drive_file_with_emails", return_value=None
)
create_file_watch = mocker.patch(
"sheets.coupon_request_api.create_or_renew_sheet_file_watch", return_value=None
)
return SimpleNamespace(
share_drive_file=share_drive_file, create_file_watch=create_file_watch
)
def test_full_sheet_process(
db, pygsheets_fixtures, patched_sheets_api, request_csv_rows
):
"""
CouponRequestHandler.process_sheet should parse rows, create relevant objects in the database, and report
on results
"""
handler = CouponRequestHandler()
result = handler.process_sheet()
expected_processed_rows = {6, 8}
expected_failed_rows = {5, 7}
assert ResultType.PROCESSED.value in result
assert set(result[ResultType.PROCESSED.value]) == expected_processed_rows, (
"Rows %s as defined in coupon_requests.csv should be processed"
% str(expected_processed_rows)
)
assert ResultType.FAILED.value in result
assert set(result[ResultType.FAILED.value]) == expected_failed_rows, (
"Rows %s as defined in coupon_requests.csv should fail"
% str(expected_failed_rows)
)
# A CouponGenerationRequest should be created for each row that wasn't ignored and did not fail full sheet
# validation (CSV has 1 row that should fail validation, hence the 1)
assert CouponGenerationRequest.objects.all().count() == (
len(expected_processed_rows) + len(expected_failed_rows) - 1
)
# The correct number of coupons should have been created for each processed row
processed_rows = [
CouponRequestRow.parse_raw_data(i, row_data)
for i, row_data in enumerate(request_csv_rows, start=2)
if i in expected_processed_rows
]
expected_coupons = sum((row.num_codes for row in processed_rows))
assert Coupon.objects.all().count() == expected_coupons
# Sheets API should have been used to create an assignment sheet and share it
assert patched_sheets_api.create_file_watch.call_count == len(
expected_processed_rows
)
assert patched_sheets_api.share_drive_file.call_count == len(
expected_processed_rows
)
# New companies should have been created during the processing
assert list(Company.objects.order_by("name").values_list("name", flat=True)) == [
"MIT",
"MIT Open Learning",
]
|
[
"ecommerce.factories.ProductVersionFactory.create",
"ecommerce.models.Company.objects.order_by",
"sheets.coupon_request_api.CouponRequestHandler",
"sheets.coupon_request_api.CouponRequestRow.parse_raw_data",
"sheets.models.CouponGenerationRequest.objects.all",
"ecommerce.models.Coupon.objects.all",
"sheets.factories.GoogleApiAuthFactory.create",
"courses.factories.CourseRunFactory.create",
"os.path.join",
"types.SimpleNamespace"
] |
[((800, 872), 'courses.factories.CourseRunFactory.create', 'CourseRunFactory.create', ([], {'courseware_id': '"""course-v1:edX+DemoX+Demo_Course"""'}), "(courseware_id='course-v1:edX+DemoX+Demo_Course')\n", (823, 872), False, 'from courses.factories import CourseRunFactory\n'), ((877, 934), 'ecommerce.factories.ProductVersionFactory.create', 'ProductVersionFactory.create', ([], {'product__content_object': 'run'}), '(product__content_object=run)\n', (905, 934), False, 'from ecommerce.factories import ProductVersionFactory\n'), ((1107, 1178), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""sheets/resources/coupon_requests.csv"""'], {}), "(settings.BASE_DIR, 'sheets/resources/coupon_requests.csv')\n", (1119, 1178), False, 'import os\n'), ((1583, 1612), 'sheets.factories.GoogleApiAuthFactory.create', 'GoogleApiAuthFactory.create', ([], {}), '()\n', (1610, 1612), False, 'from sheets.factories import GoogleApiAuthFactory\n'), ((2389, 2584), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'client': 'mocked_pygsheets_client', 'spreadsheet': 'mocked_spreadsheet', 'worksheet': 'mocked_worksheet', 'google_api_auth': 'google_api_auth', 'patched_get_data_rows': 'patched_get_data_rows'}), '(client=mocked_pygsheets_client, spreadsheet=\n mocked_spreadsheet, worksheet=mocked_worksheet, google_api_auth=\n google_api_auth, patched_get_data_rows=patched_get_data_rows)\n', (2404, 2584), False, 'from types import SimpleNamespace\n'), ((3016, 3108), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'share_drive_file': 'share_drive_file', 'create_file_watch': 'create_file_watch'}), '(share_drive_file=share_drive_file, create_file_watch=\n create_file_watch)\n', (3031, 3108), False, 'from types import SimpleNamespace\n'), ((3372, 3394), 'sheets.coupon_request_api.CouponRequestHandler', 'CouponRequestHandler', ([], {}), '()\n', (3392, 3394), False, 'from sheets.coupon_request_api import CouponRequestHandler, CouponRequestRow\n'), ((4412, 4456), 'sheets.coupon_request_api.CouponRequestRow.parse_raw_data', 'CouponRequestRow.parse_raw_data', (['i', 'row_data'], {}), '(i, row_data)\n', (4443, 4456), False, 'from sheets.coupon_request_api import CouponRequestHandler, CouponRequestRow\n'), ((4171, 4208), 'sheets.models.CouponGenerationRequest.objects.all', 'CouponGenerationRequest.objects.all', ([], {}), '()\n', (4206, 4208), False, 'from sheets.models import CouponGenerationRequest\n'), ((4648, 4668), 'ecommerce.models.Coupon.objects.all', 'Coupon.objects.all', ([], {}), '()\n', (4666, 4668), False, 'from ecommerce.models import Company, Coupon\n'), ((5071, 5103), 'ecommerce.models.Company.objects.order_by', 'Company.objects.order_by', (['"""name"""'], {}), "('name')\n", (5095, 5103), False, 'from ecommerce.models import Company, Coupon\n')]
|
from django.conf.urls import url
from app05plus.views import index, register, mylogin
from app05plus.views import mylogout
urlpatterns = [
url(r"^newindex01$",index),
url(r"^register01$",register,name="register"),
url(r"^mylogin01$",mylogin,name="mylogin"),
url(r"^logout$",mylogout),
]
|
[
"django.conf.urls.url"
] |
[((153, 179), 'django.conf.urls.url', 'url', (['"""^newindex01$"""', 'index'], {}), "('^newindex01$', index)\n", (156, 179), False, 'from django.conf.urls import url\n'), ((186, 232), 'django.conf.urls.url', 'url', (['"""^register01$"""', 'register'], {'name': '"""register"""'}), "('^register01$', register, name='register')\n", (189, 232), False, 'from django.conf.urls import url\n'), ((238, 281), 'django.conf.urls.url', 'url', (['"""^mylogin01$"""', 'mylogin'], {'name': '"""mylogin"""'}), "('^mylogin01$', mylogin, name='mylogin')\n", (241, 281), False, 'from django.conf.urls import url\n'), ((287, 312), 'django.conf.urls.url', 'url', (['"""^logout$"""', 'mylogout'], {}), "('^logout$', mylogout)\n", (290, 312), False, 'from django.conf.urls import url\n')]
|
'''
Gets the Text out of an English Audio File
'''
import speech_recognition as sr
r = sr.Recognizer()
def convert_to_text(audioFile):
with sr.AudioFile(audioFile) as source:
audioData = r.record(source)
text = r.recognize_google(audioData)
print("\ntext : ", text)
if __name__ == "__main__":
file_path = input("Enter the realtive path to audio file: ")
# 16-122828-0002.wav is the audio file u can use any
convert_to_text(file_path)
|
[
"speech_recognition.AudioFile",
"speech_recognition.Recognizer"
] |
[((90, 105), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (103, 105), True, 'import speech_recognition as sr\n'), ((148, 171), 'speech_recognition.AudioFile', 'sr.AudioFile', (['audioFile'], {}), '(audioFile)\n', (160, 171), True, 'import speech_recognition as sr\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 16:56:20 2019
@author: CHaithcock
"""
import sys
sys.path.insert(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')
import RHConstants as const
'''
Constants for Toplogical Combinatorial Constructions.
'''
STRIPS = ['C','CC','CCC','CT','TC','T','TT']
SLOTS = range(12)
EXIT_SLOT = 2
ROW_SLOTS = SLOTS[:6]
COL_SLOTS = SLOTS[6:]
HORZ_STRIPS = {}
HORZ_STRIPS['C'] = []
HORZ_STRIPS['C'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0,0])
HORZ_STRIPS['C'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0])
HORZ_STRIPS['C'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['C'].append([0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['C'].append([0,0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'] = []
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CCC'] = [ [const.HORIZONTAL_CAR]*6 ]
HORZ_STRIPS['CT'] = []
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 + [0] )
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [0] + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['CT'].append([0] + [const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['TC'] = []
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 + [0] )
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [0] + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['TC'].append([0] + [const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['T'] = []
HORZ_STRIPS['T'].append([const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0,0])
HORZ_STRIPS['T'].append([0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0])
HORZ_STRIPS['T'].append([0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0])
HORZ_STRIPS['T'].append([0,0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK])
HORZ_STRIPS['TT'] = [[const.HORIZONTAL_TRUCK]*6]
VERT_STRIPS = {}
VERT_STRIPS['C'] = []
VERT_STRIPS['C'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0,0])
VERT_STRIPS['C'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0])
VERT_STRIPS['C'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['C'].append([0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['C'].append([0,0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'] = []
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CCC'] = [ [const.VERTICAL_CAR]*6 ]
VERT_STRIPS['CT'] = []
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 + [0] )
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [0] + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['CT'].append([0] + [const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['TC'] = []
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 + [0] )
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [0] + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['TC'].append([0] + [const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['T'] = []
VERT_STRIPS['T'].append([const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0,0])
VERT_STRIPS['T'].append([0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0])
VERT_STRIPS['T'].append([0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0])
VERT_STRIPS['T'].append([0,0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK])
VERT_STRIPS['TT'] = [[const.VERTICAL_TRUCK]*6]
|
[
"sys.path.insert"
] |
[((101, 175), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""C:/Users/chaithcock/Documents/repos/RushHour/RHGraph"""'], {}), "(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')\n", (116, 175), False, 'import sys\n')]
|
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from PIL import Image, ImageEnhance
import cv2
import urllib
import numpy as np
from tensorflow.keras.utils import to_categorical
import glob
from random import shuffle
import h5py
import torch
from torchvision import transforms
import math
import time
import os
import argparse
# tf.enable_v2_behavior()
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
from rcnn_sat import preprocess_image, bl_net
from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5
from custom_transforms import all_random_noise
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
parser = argparse.ArgumentParser()
parser.add_argument('--tag', default='noise-contrast-gray', type=str)
parser.add_argument('--color', default='gray', type=str)
parser.add_argument('--download-data', default=False, type=bool)
parser.add_argument('--pretrained', default=True, type=bool)
args = parser.parse_args()
print(args)
data_root = '../data/{}'.format(args.color)
if args.download_data == True:
trainX, trainy, testX, testy = load_dataset()
os.makedirs(data_root, exist_ok = True)
prep_pixels_h5(trainX, trainy, testX, testy, data_root, args.color)
args.download_data = False
if args.download_data == False:
trainX,trainy,testX,testy = load_dataset_h5(data_root)
input_layer = tf.keras.layers.Input((128, 128, 3))
model = bl_net(input_layer, classes=10, cumulative_readout=False)
if args.pretrained:
model.load_weights('bl_imagenet.h5',skip_mismatch=True,by_name=True)
## Lets try fine tuning it
# tf.keras.utils.plot_model(model,to_file='check.png')
skip_layers = ['ReadoutDense','Sotfmax_Time_0','Sotfmax_Time_1',
'Sotfmax_Time_2','Sotfmax_Time_3','Sotfmax_Time_4',
'Sotfmax_Time_5','Sotfmax_Time_6','Sotfmax_Time_7']
for layer in model.layers:
if layer.name in skip_layers:
layer.trainable = True
else:
layer.trainable = False
# compile model with optimizer and loss
"""
B, BL and parameter-matched controls (B-K, B-F and B-D) were trained for a total of 90 epochs
with a batch size of 100. B-U was trained using the same procedure but with a batch size of 64
due to its substantially larger number of parameters.
The cross-entropy between the softmax of the network category readout and the labels
was used as the training loss. For networks with multiple readouts (BL and B-U),
we calculate the cross-entropy at each readout and average this across readouts.
Adam [64] was used for optimisation with a learning rate of 0.005 and epsilon parameter 0.1.
L2-regularisation was applied throughout training with a coefficient of 10−6.
"""
cce = tf.keras.losses.CategoricalCrossentropy()
opt = tf.keras.optimizers.Adam(learning_rate=0.005)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
from tensorflow.keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("pretrained_mp_{}.hdf5".format(args.tag), monitor='loss', verbose=1,
save_best_only=True, mode='auto', period=1)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(preprocessing_function=all_random_noise)
# trainy = np.transpose(trainy, (1,2,0))
# testy = np.transpose(testy, (1,2,0))
print(trainX.shape)
print(trainy.shape)
history = model.fit(x=datagen.flow(trainX, trainy[0],batch_size=32),
validation_data=(testX,testy[0]),
steps_per_epoch=len(trainX)//32,
epochs=100,callbacks=[checkpoint])
model.save('./model/{}_{}'.format(
args.tag,
time.strftime('%Y.%m.%d_%H.%M.%S'),
))
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"argparse.ArgumentParser",
"os.makedirs",
"tensorflow.config.list_physical_devices",
"time.strftime",
"load_data.prep_pixels_h5",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"rcnn_sat.bl_net",
"load_data.load_dataset",
"load_data.load_dataset_h5",
"tensorflow.test.is_gpu_available",
"tensorflow.test.gpu_device_name"
] |
[((651, 676), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (674, 676), True, 'import tensorflow as tf\n'), ((876, 951), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {'cuda_only': '(False)', 'min_cuda_compute_capability': 'None'}), '(cuda_only=False, min_cuda_compute_capability=None)\n', (900, 951), True, 'import tensorflow as tf\n'), ((962, 987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (985, 987), False, 'import argparse\n'), ((1660, 1696), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (1681, 1696), True, 'import tensorflow as tf\n'), ((1705, 1762), 'rcnn_sat.bl_net', 'bl_net', (['input_layer'], {'classes': '(10)', 'cumulative_readout': '(False)'}), '(input_layer, classes=10, cumulative_readout=False)\n', (1711, 1762), False, 'from rcnn_sat import preprocess_image, bl_net\n'), ((2985, 3026), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (3024, 3026), True, 'import tensorflow as tf\n'), ((3033, 3078), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.005)'}), '(learning_rate=0.005)\n', (3057, 3078), True, 'import tensorflow as tf\n'), ((3443, 3502), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'all_random_noise'}), '(preprocessing_function=all_random_noise)\n', (3461, 3502), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1391, 1405), 'load_data.load_dataset', 'load_dataset', ([], {}), '()\n', (1403, 1405), False, 'from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5\n'), ((1410, 1447), 'os.makedirs', 'os.makedirs', (['data_root'], {'exist_ok': '(True)'}), '(data_root, exist_ok=True)\n', (1421, 1447), False, 'import os\n'), ((1454, 1521), 'load_data.prep_pixels_h5', 'prep_pixels_h5', (['trainX', 'trainy', 'testX', 'testy', 'data_root', 'args.color'], {}), '(trainX, trainy, testX, testy, data_root, args.color)\n', (1468, 1521), False, 'from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5\n'), ((1618, 1644), 'load_data.load_dataset_h5', 'load_dataset_h5', (['data_root'], {}), '(data_root)\n', (1633, 1644), False, 'from load_data import load_dataset, load_dataset_h5, prep_pixels, prep_pixels_h5\n'), ((835, 873), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (866, 873), True, 'import tensorflow as tf\n'), ((3910, 3944), 'time.strftime', 'time.strftime', (['"""%Y.%m.%d_%H.%M.%S"""'], {}), "('%Y.%m.%d_%H.%M.%S')\n", (3923, 3944), False, 'import time\n'), ((720, 745), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (743, 745), True, 'import tensorflow as tf\n')]
|
import spacy
import sys
import numpy as np
import operator
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import preprocess_data
MAX_SEQUENCE_LENGTH = 100
EMBEDDING_DIM = 300
model = load_model('models/bidirectional_lstm/model.h5')
nlp = spacy.load('en')
print('Test your sentences.')
print('> ', end='', flush=True)
intents = preprocess_data.load_intents()
for line in sys.stdin:
doc = nlp(line)
embedding_matrix = np.zeros((1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))
for index, word in enumerate(doc):
embedding_matrix[0][index] = word.vector
prediction = model.predict(embedding_matrix)
scores = {}
for (x, y), score in np.ndenumerate(prediction):
scores[intents[y]] = score
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
print(sorted_scores)
print('> ', end='', flush=True)
|
[
"keras.models.load_model",
"numpy.ndenumerate",
"numpy.zeros",
"spacy.load",
"operator.itemgetter",
"preprocess_data.load_intents"
] |
[((279, 327), 'keras.models.load_model', 'load_model', (['"""models/bidirectional_lstm/model.h5"""'], {}), "('models/bidirectional_lstm/model.h5')\n", (289, 327), False, 'from keras.models import load_model\n'), ((334, 350), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (344, 350), False, 'import spacy\n'), ((425, 455), 'preprocess_data.load_intents', 'preprocess_data.load_intents', ([], {}), '()\n', (453, 455), False, 'import preprocess_data\n'), ((523, 572), 'numpy.zeros', 'np.zeros', (['(1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM)'], {}), '((1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))\n', (531, 572), True, 'import numpy as np\n'), ((751, 777), 'numpy.ndenumerate', 'np.ndenumerate', (['prediction'], {}), '(prediction)\n', (765, 777), True, 'import numpy as np\n'), ((862, 884), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (881, 884), False, 'import operator\n')]
|
import argparse
import numpy as np
import os, sys
import keyboard
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from functions import (
assert_OBJ_exist_if_GRP_exist,
get_OBJECT_dict,
is_obj_grp_OBJ_GRP,
partition_str,
object_file_name,
print_if_worked,
)
parser = argparse.ArgumentParser(
description="view the grasps saved on the predetermined file"
)
parser.add_argument(
"-o",
"--object",
type=str,
default="",
help="select an object [def: all]",
)
parser.add_argument(
"-g",
"--grasp",
type=str,
default="",
help="select a grasp of an object [def: all]",
)
parser.add_argument(
"-gi",
"--grasp_info",
type=bool,
default=False,
help="print Grasp Info [def: False]",
)
args = parser.parse_args()
OBJ = args.object
GRP = args.grasp
assert_OBJ_exist_if_GRP_exist(OBJ, GRP)
objects = get_OBJECT_dict()
worked = False
for obj1, mesh in objects["meshes"].items():
for grp1, grasp in objects["grasps"].items():
obj, grp = partition_str(grp1)
if obj != obj1:
continue
if is_obj_grp_OBJ_GRP(OBJ, GRP, obj, grp):
worked = True
if args.grasp_info or True:
print("Gt \n", grasp.Gt.round(3))
grasp.get_classification(True)
mesh.view(grp1, grasp.contact_points)
print_if_worked(
worked,
"Finished" + 50 * " ",
"No objects and/or grasps declared on "
+ object_file_name()
+ ".yaml or object and/or grasp passed as argument doesn't exists",
)
|
[
"functions.is_obj_grp_OBJ_GRP",
"argparse.ArgumentParser",
"functions.partition_str",
"functions.object_file_name",
"os.path.dirname",
"functions.get_OBJECT_dict",
"functions.assert_OBJ_exist_if_GRP_exist"
] |
[((322, 413), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""view the grasps saved on the predetermined file"""'}), "(description=\n 'view the grasps saved on the predetermined file')\n", (345, 413), False, 'import argparse\n'), ((857, 896), 'functions.assert_OBJ_exist_if_GRP_exist', 'assert_OBJ_exist_if_GRP_exist', (['OBJ', 'GRP'], {}), '(OBJ, GRP)\n', (886, 896), False, 'from functions import assert_OBJ_exist_if_GRP_exist, get_OBJECT_dict, is_obj_grp_OBJ_GRP, partition_str, object_file_name, print_if_worked\n'), ((908, 925), 'functions.get_OBJECT_dict', 'get_OBJECT_dict', ([], {}), '()\n', (923, 925), False, 'from functions import assert_OBJ_exist_if_GRP_exist, get_OBJECT_dict, is_obj_grp_OBJ_GRP, partition_str, object_file_name, print_if_worked\n'), ((1056, 1075), 'functions.partition_str', 'partition_str', (['grp1'], {}), '(grp1)\n', (1069, 1075), False, 'from functions import assert_OBJ_exist_if_GRP_exist, get_OBJECT_dict, is_obj_grp_OBJ_GRP, partition_str, object_file_name, print_if_worked\n'), ((1132, 1170), 'functions.is_obj_grp_OBJ_GRP', 'is_obj_grp_OBJ_GRP', (['OBJ', 'GRP', 'obj', 'grp'], {}), '(OBJ, GRP, obj, grp)\n', (1150, 1170), False, 'from functions import assert_OBJ_exist_if_GRP_exist, get_OBJECT_dict, is_obj_grp_OBJ_GRP, partition_str, object_file_name, print_if_worked\n'), ((115, 140), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'import os, sys\n'), ((1493, 1511), 'functions.object_file_name', 'object_file_name', ([], {}), '()\n', (1509, 1511), False, 'from functions import assert_OBJ_exist_if_GRP_exist, get_OBJECT_dict, is_obj_grp_OBJ_GRP, partition_str, object_file_name, print_if_worked\n')]
|
"""empty message
Revision ID: dc089ecc2c38
Revises: <PASSWORD>
Create Date: 2019-09-25 00:46:18.814508
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "dc089ecc2c38"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_shops_name", table_name="shops")
op.create_index(op.f("ix_shops_name"), "shops", ["name"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_shops_name"), table_name="shops")
op.create_index("ix_shops_name", "shops", ["name"], unique=True)
# ### end Alembic commands ###
|
[
"alembic.op.drop_index",
"alembic.op.f",
"alembic.op.create_index"
] |
[((355, 405), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_shops_name"""'], {'table_name': '"""shops"""'}), "('ix_shops_name', table_name='shops')\n", (368, 405), False, 'from alembic import op\n'), ((667, 731), 'alembic.op.create_index', 'op.create_index', (['"""ix_shops_name"""', '"""shops"""', "['name']"], {'unique': '(True)'}), "('ix_shops_name', 'shops', ['name'], unique=True)\n", (682, 731), False, 'from alembic import op\n'), ((426, 447), 'alembic.op.f', 'op.f', (['"""ix_shops_name"""'], {}), "('ix_shops_name')\n", (430, 447), False, 'from alembic import op\n'), ((620, 641), 'alembic.op.f', 'op.f', (['"""ix_shops_name"""'], {}), "('ix_shops_name')\n", (624, 641), False, 'from alembic import op\n')]
|
import numpy as np
def train_test(
X: np.ndarray,
y: np.ndarray,
test_size: float,
random_seed: int = 0,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Split input data randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
test_size (float): fraction of test split
random_seed (int): number to initialize a pseudorandom number generator
Returns:
tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train, y_train,
X_test, y_test
"""
np.random.seed(random_seed)
num_samples = X.shape[0]
num_train_samples = int(num_samples * (1 - test_size))
permuted_ids = np.random.permutation(np.arange(num_samples))
train_ids = permuted_ids[:num_train_samples]
test_ids = permuted_ids[num_train_samples:]
X_test = X[test_ids]
X_train = X[train_ids]
y_test = y[test_ids]
y_train = y[train_ids]
return X_train, y_train, X_test, y_test
class _KFoldIterator:
def __init__(self, kfold):
self._kfold = kfold
self._counter = 0
def __next__(self):
if self._counter < self._kfold.num_folds:
item = self._kfold.__getitem__(self._counter)
self._counter += 1
return item
else:
raise StopIteration
class KFold:
"""Iterable cross-validation object
Args:
X (np.ndarray): samples decision matrix
y (np.ndarray): samples ground-truth value
num_folds (int): number of cross-validation folds
random_seed (int): value for numpy random number generator initialization
Methods:
__getitem__(key): returns X_train, y_train, X_test, y_test
"""
def __init__(self, X: np.ndarray, y: np.ndarray, num_folds: int, random_seed: int):
self.num_samples = X.shape[0]
self.num_folds = num_folds
np.random.seed(random_seed)
permuted_ids = np.random.permutation(np.arange(self.num_samples))
self.X = X[permuted_ids]
self.y = y[permuted_ids]
def __getitem__(self, key: int):
assert key < self.num_folds, "Key must be lower than number of folds"
assert key >= 0, "Key must be not negative"
test_start_id = int(key * self.num_samples / self.num_folds)
test_end_id = int((key + 1) * self.num_samples / self.num_folds)
X_test = self.X[test_start_id: test_end_id]
X_train = np.concatenate([
self.X[: test_start_id],
self.X[test_end_id:],
],
axis=0,
)
y_test = self.y[test_start_id: test_end_id]
y_train = np.concatenate([
self.y[: test_start_id],
self.y[test_end_id:],
],
axis=0,
)
return X_train, y_train, X_test, y_test
def __iter__(self):
return _KFoldIterator(self)
def cross_val(
X: np.ndarray,
y: np.ndarray,
num_folds: int,
random_seed: int = 0,
) -> KFold:
"""
Make cross-validation split randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
num_folds (int): number of train/test folds
random_seed (int): number to initialize a pseudorandom number generator
Returns:
KFold: object containing data with __getitem__ method for getting splits
"""
kfold = KFold(X, y, num_folds, random_seed)
return kfold
|
[
"numpy.random.seed",
"numpy.arange",
"numpy.concatenate"
] |
[((605, 632), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (619, 632), True, 'import numpy as np\n'), ((762, 784), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (771, 784), True, 'import numpy as np\n'), ((1939, 1966), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1953, 1966), True, 'import numpy as np\n'), ((2487, 2557), 'numpy.concatenate', 'np.concatenate', (['[self.X[:test_start_id], self.X[test_end_id:]]'], {'axis': '(0)'}), '([self.X[:test_start_id], self.X[test_end_id:]], axis=0)\n', (2501, 2557), True, 'import numpy as np\n'), ((2698, 2768), 'numpy.concatenate', 'np.concatenate', (['[self.y[:test_start_id], self.y[test_end_id:]]'], {'axis': '(0)'}), '([self.y[:test_start_id], self.y[test_end_id:]], axis=0)\n', (2712, 2768), True, 'import numpy as np\n'), ((2012, 2039), 'numpy.arange', 'np.arange', (['self.num_samples'], {}), '(self.num_samples)\n', (2021, 2039), True, 'import numpy as np\n')]
|
"""
Special numbers utility functions.
"""
# pylint: disable=invalid-name
import math
def get_triangle_number(n: int) -> int:
"""Get Triangle number `T_n=n(n+1)/2` for a given number `n`."""
return (n * (n + 1)) // 2
def is_triangle_number(number: int) -> bool:
"""Check if a given number `number` is a triangle number of the form 1/2 * n * (n+1)."""
return ((math.sqrt(8*number + 1) - 1) / 2.0).is_integer()
def get_square_number(n: int) -> int:
"""Get Square number `S_n=n*n` for a given number `n`."""
return n * n
def get_pentagonal_number(n: int) -> int:
"""Get Pentagonal number `P_n=n*(3n−1)/2` for a given number `n`."""
return (n * (3*n - 1)) // 2
def is_pentagonal_number(number: int) -> bool:
"""Check if a given number `number` is a pentagonal number of the form n * (3*n − 1) / 2."""
return ((math.sqrt(24*number + 1) + 1) / 6.0).is_integer()
def get_hexagonal_number(n: int) -> int:
"""Get Hexagonal number `H_n=n*(2n−1)` for a given number `n`."""
return n * (2*n - 1)
def is_hexagonal_number(number: int) -> bool:
"""Check if a given number `number` is a hexagonal number of the form n * (2*n − 1)."""
return ((math.sqrt(8*number + 1) + 1) / 4.0).is_integer()
def get_heptagonal_number(n: int) -> int:
"""Get Heptagonal number `H_n=n*(5n−3)/2` for a given number `n`."""
return (n * (5*n - 3)) // 2
def get_octagonal_number(n: int) -> int:
"""Get Octagonal number `O_n=n*(3n-2)` for a given number `n`."""
return n * (3*n - 2)
|
[
"math.sqrt"
] |
[((381, 406), 'math.sqrt', 'math.sqrt', (['(8 * number + 1)'], {}), '(8 * number + 1)\n', (390, 406), False, 'import math\n'), ((857, 883), 'math.sqrt', 'math.sqrt', (['(24 * number + 1)'], {}), '(24 * number + 1)\n', (866, 883), False, 'import math\n'), ((1198, 1223), 'math.sqrt', 'math.sqrt', (['(8 * number + 1)'], {}), '(8 * number + 1)\n', (1207, 1223), False, 'import math\n')]
|
# Author: <NAME>
# Angular Distribution Dependency Parser
# This is a python script that is placed into an Angular project directory and is run to obtain a list
# of unique npmjs package directories. It parsed the vendor source map header that contains the paths
# to all the files that are pulled in from the node_modules directory by the embedded Angular webpack.
import glob, os
# finds the vendor map in the dist directory when inside the angular project folder (w/ some arbitrary names)
# if there's no vendor map it parses the main source map
try:
path = glob.glob('./dist/*/vendor.*.map')[0]
except IndexError:
print('Vendor source map not found, using main source map.')
try:
path = glob.glob('./dist/*/main.*.map')[0]
except IndexError:
print('No valid source map found.')
quit()
# reads in the file
with open(path, 'r') as f:
vendor_paths = f.read().replace('\n', '')
# chops off the end of the source map header with the vendor directories
vendor_paths = vendor_paths.split(']')[0]
# chops off the opener to give all the paths dilimited by commas
vendor_paths = vendor_paths.split('[')[1]
# splits by commas
vendor_paths = vendor_paths.split(',')
package_dirs = set()
for path in vendor_paths:
# further cuts to include only the relative path starting with the node_modules directory
path = path[12:-1]
print(path)
# running ng build with the --build-optimizer flag appends this to the end of the file in the source map
if path.endswith('.pre-build-optimizer.js'):
path = path.replace('.pre-build-optimizer.js','')
# checks if the file actually exists
if not os.path.isfile(path):
print('The following file was not found: ', path)
continue
# gets directory containing the source map file
parent_dir = os.path.dirname(path)
# loop to traverse up the path terminating at the node_modules directory
while(parent_dir != './node_modules'):
# checks to prevent infinite loop if the path doesn't have a node_modules directory
if parent_dir == '.' or parent_dir == '':
print('The following path is not in a node_modules directory: ', path)
break
# checks if a package.json exists in the directory, adds to the set of package directorys, and breaks
if os.path.isfile(parent_dir + '/package.json'):
package_dirs.add(parent_dir)
#print(parent_dir)
break
# progresses the loop by getting the next parent directory
parent_dir = os.path.dirname(parent_dir)
# checks if the loops terminates with no breaks (it hit the node_modules)
else:
print('No package.json file was found anywhere in the following path: ', path)
# opens/overwrites output file to be passed into the iq server cli
out_file = open('package_dirs.txt', 'w+')
# prints package directories and writes to file
print('This is the set of unique package directories for files defined in the vendor source map: ')
for package_path in package_dirs:
out_file.write(package_path + '\n')
print(package_path)
out_file.close()
|
[
"os.path.dirname",
"os.path.isfile",
"glob.glob"
] |
[((1834, 1855), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1849, 1855), False, 'import glob, os\n'), ((568, 602), 'glob.glob', 'glob.glob', (['"""./dist/*/vendor.*.map"""'], {}), "('./dist/*/vendor.*.map')\n", (577, 602), False, 'import glob, os\n'), ((1663, 1683), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1677, 1683), False, 'import glob, os\n'), ((2345, 2389), 'os.path.isfile', 'os.path.isfile', (["(parent_dir + '/package.json')"], {}), "(parent_dir + '/package.json')\n", (2359, 2389), False, 'import glob, os\n'), ((2569, 2596), 'os.path.dirname', 'os.path.dirname', (['parent_dir'], {}), '(parent_dir)\n', (2584, 2596), False, 'import glob, os\n'), ((714, 746), 'glob.glob', 'glob.glob', (['"""./dist/*/main.*.map"""'], {}), "('./dist/*/main.*.map')\n", (723, 746), False, 'import glob, os\n')]
|
import pymysql
pymysql.version_info = (1,4,0, "final", 0)
pymysql.install_as_MySQLdb()
|
[
"pymysql.install_as_MySQLdb"
] |
[((58, 86), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (84, 86), False, 'import pymysql\n')]
|
from copy import copy
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.autograd import grad
from tqdm import tqdm
from graphwar import Surrogate
from graphwar.attack.injection.injection_attacker import InjectionAttacker
class AdvInjection(InjectionAttacker, Surrogate):
r"""2nd place solution of KDD CUP 2020
"Adversarial attack and defense" challenge.
Example
-------
>>> from graphwar.dataset import GraphWarDataset
>>> import torch_geometric.transforms as T
>>> dataset = GraphWarDataset(root='~/data/pygdata', name='cora',
transform=T.LargestConnectedComponents())
>>> data = dataset[0]
>>> surrogate_model = ... # train your surrogate model
>>> from graphwar.attack.injection import AdvInjection
>>> attacker.setup_surrogate(surrogate_model)
>>> attacker = AdvInjection(data)
>>> attacker.reset()
>>> attacker.attack(10, feat_limits=(0, 1)) # injecting 10 nodes for continuous features
>>> attacker.reset()
>>> attacker.attack(10, feat_budgets=10) # injecting 10 nodes for binary features
>>> attacker.data() # get attacked graph
>>> attacker.injected_nodes() # get injected nodes after attack
>>> attacker.injected_edges() # get injected edges after attack
>>> attacker.injected_feats() # get injected features after attack
Note
----
* Please remember to call :meth:`reset` before each attack.
"""
def attack(self, num_budgets: Union[int, float], *,
targets: Optional[Tensor] = None,
interconnection: bool = False,
lr: float = 0.01,
num_edges_global: Optional[int] = None,
num_edges_local: Optional[int] = None,
feat_limits: Optional[Union[tuple, dict]] = None,
feat_budgets: Optional[int] = None,
disable: bool = False) -> "AdvInjection":
super().attack(num_budgets, targets=targets,
num_edges_global=num_edges_global,
num_edges_local=num_edges_local,
feat_limits=feat_limits,
feat_budgets=feat_budgets)
candidate_nodes = self.targets.tolist()
edge_index, edge_weight, feat = self.edge_index, self.edge_weight, self.feat
if edge_weight is None:
edge_weight = feat.new_ones(edge_index.size(1))
feat_min, feat_max = self.feat_limits
feat_limits = max(abs(feat_min), feat_max)
feat_budgets = self.feat_budgets
injected_feats = None
for injected_node in tqdm(range(self.num_nodes, self.num_nodes+self.num_budgets),
desc="Injecting nodes...",
disable=disable):
injected_edge_index = np.stack(
[np.tile(injected_node, len(candidate_nodes)), candidate_nodes], axis=0)
injected_edge_index = torch.as_tensor(
injected_edge_index).to(edge_index)
injected_edge_weight = edge_weight.new_zeros(
injected_edge_index.size(1)).requires_grad_()
injected_feat = feat.new_zeros(1, self.num_feats)
if injected_feats is None:
injected_feats = injected_feat.requires_grad_()
else:
injected_feats = torch.cat(
[injected_feats, injected_feat], dim=0).requires_grad_()
edge_grad, feat_grad = self.compute_gradients(
feat, edge_index, edge_weight,
injected_feats, injected_edge_index, injected_edge_weight,
targets=self.targets, target_labels=self.target_labels)
topk_edges = torch.topk(edge_grad, k=self.num_edges_local).indices
injected_edge_index = injected_edge_index[:, topk_edges]
self.inject_node(injected_node)
self.inject_edges(injected_edge_index)
with torch.no_grad():
edge_index = torch.cat(
[edge_index, injected_edge_index, injected_edge_index.flip(0)], dim=1)
edge_weight = torch.cat(
[edge_weight, edge_weight.new_ones(injected_edge_index.size(1)*2)], dim=0)
if feat_budgets is not None:
topk = torch.topk(
feat_grad, k=feat_budgets, dim=1)
injected_feats.data.fill_(0.)
injected_feats.data.scatter_(
1, topk.indices, 1.0)
else:
injected_feats.data = (
feat_limits * feat_grad.sign()).clamp(min=feat_min, max=feat_max)
if interconnection:
candidate_nodes.append(injected_node)
self._injected_feats = injected_feats.data
return self
def compute_gradients(self, x, edge_index, edge_weight,
injected_feats, injected_edge_index,
injected_edge_weight,
targets, target_labels):
x = torch.cat([x, injected_feats], dim=0)
edge_index = torch.cat(
[edge_index, injected_edge_index, injected_edge_index.flip(0)], dim=1)
edge_weight = torch.cat(
[edge_weight, injected_edge_weight.repeat(2)], dim=0)
logit = self.surrogate(x, edge_index, edge_weight)[targets] / self.eps
loss = F.cross_entropy(logit, target_labels)
return grad(loss, [injected_edge_weight, injected_feats], create_graph=False)
|
[
"torch.topk",
"torch.autograd.grad",
"torch.nn.functional.cross_entropy",
"torch.cat",
"torch.as_tensor",
"torch.no_grad"
] |
[((5311, 5348), 'torch.cat', 'torch.cat', (['[x, injected_feats]'], {'dim': '(0)'}), '([x, injected_feats], dim=0)\n', (5320, 5348), False, 'import torch\n'), ((5663, 5700), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logit', 'target_labels'], {}), '(logit, target_labels)\n', (5678, 5700), True, 'import torch.nn.functional as F\n'), ((5719, 5789), 'torch.autograd.grad', 'grad', (['loss', '[injected_edge_weight, injected_feats]'], {'create_graph': '(False)'}), '(loss, [injected_edge_weight, injected_feats], create_graph=False)\n', (5723, 5789), False, 'from torch.autograd import grad\n'), ((3917, 3962), 'torch.topk', 'torch.topk', (['edge_grad'], {'k': 'self.num_edges_local'}), '(edge_grad, k=self.num_edges_local)\n', (3927, 3962), False, 'import torch\n'), ((4160, 4175), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4173, 4175), False, 'import torch\n'), ((3124, 3160), 'torch.as_tensor', 'torch.as_tensor', (['injected_edge_index'], {}), '(injected_edge_index)\n', (3139, 3160), False, 'import torch\n'), ((4524, 4568), 'torch.topk', 'torch.topk', (['feat_grad'], {'k': 'feat_budgets', 'dim': '(1)'}), '(feat_grad, k=feat_budgets, dim=1)\n', (4534, 4568), False, 'import torch\n'), ((3541, 3590), 'torch.cat', 'torch.cat', (['[injected_feats, injected_feat]'], {'dim': '(0)'}), '([injected_feats, injected_feat], dim=0)\n', (3550, 3590), False, 'import torch\n')]
|
from markdownio import block
def test_linebreak(document):
elem = block.HorizontalRule()
document.add(elem)
assert "---\n" == document.output()
|
[
"markdownio.block.HorizontalRule"
] |
[((72, 94), 'markdownio.block.HorizontalRule', 'block.HorizontalRule', ([], {}), '()\n', (92, 94), False, 'from markdownio import block\n')]
|
""" Usage:
<file-name> --in=IN_FILE --langs=LANGUAGES --out=OUT_FILE [--debug]
"""
# External imports
import logging
import pdb
from pprint import pprint
from pprint import pformat
from docopt import docopt
from collections import defaultdict
from operator import itemgetter
from tqdm import tqdm
# Local imports
from google_translate import google_translate
#=-----
if __name__ == "__main__":
# Parse command line arguments
args = docopt(__doc__)
inp_fn = args["--in"]
langs = args["--langs"].split(",")
out_fn = args["--out"]
debug = args["--debug"]
if debug:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.INFO)
logging.info(f"Writing output to {out_fn}")
with open(out_fn, "w", encoding = "utf8") as fout:
fout.write("\t".join(["sentid", "sentence"] + langs) + "\n")
lines = [line.strip() for line in open(inp_fn, encoding = "utf8")]
for line in tqdm(lines[1:]):
sentid, sent = line.strip().split("\t")
trans = [google_translate([sent], "en", target_lang)[0]["translatedText"]
for target_lang in langs]
fout.write("\t".join([sentid, sent] + trans) + "\n")
logging.info("DONE")
|
[
"tqdm.tqdm",
"logging.basicConfig",
"docopt.docopt",
"google_translate.google_translate",
"logging.info"
] |
[((447, 462), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (453, 462), False, 'from docopt import docopt\n'), ((713, 756), 'logging.info', 'logging.info', (['f"""Writing output to {out_fn}"""'], {}), "(f'Writing output to {out_fn}')\n", (725, 756), False, 'import logging\n'), ((1249, 1269), 'logging.info', 'logging.info', (['"""DONE"""'], {}), "('DONE')\n", (1261, 1269), False, 'import logging\n'), ((605, 645), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (624, 645), False, 'import logging\n'), ((666, 705), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (685, 705), False, 'import logging\n'), ((977, 992), 'tqdm.tqdm', 'tqdm', (['lines[1:]'], {}), '(lines[1:])\n', (981, 992), False, 'from tqdm import tqdm\n'), ((1067, 1110), 'google_translate.google_translate', 'google_translate', (['[sent]', '"""en"""', 'target_lang'], {}), "([sent], 'en', target_lang)\n", (1083, 1110), False, 'from google_translate import google_translate\n')]
|
"""Authors: <NAME>."""
from spikeextractors import load_extractor_from_pickle
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ..basesortingextractorinterface import BaseSortingExtractorInterface
from ....utils import FilePathType
class SIPickleRecordingExtractorInterface(BaseRecordingExtractorInterface):
"""Primary interface for reading and converting SpikeInterface Recording objects through .pkl files."""
RX = None
def __init__(self, file_path: FilePathType):
self.recording_extractor = load_extractor_from_pickle(pkl_file=file_path)
self.subset_channels = None
self.source_data = dict(file_path=file_path)
class SIPickleSortingExtractorInterface(BaseSortingExtractorInterface):
"""Primary interface for reading and converting SpikeInterface Sorting objects through .pkl files."""
SX = None
def __init__(self, file_path: FilePathType):
self.sorting_extractor = load_extractor_from_pickle(pkl_file=file_path)
self.source_data = dict(file_path=file_path)
|
[
"spikeextractors.load_extractor_from_pickle"
] |
[((552, 598), 'spikeextractors.load_extractor_from_pickle', 'load_extractor_from_pickle', ([], {'pkl_file': 'file_path'}), '(pkl_file=file_path)\n', (578, 598), False, 'from spikeextractors import load_extractor_from_pickle\n'), ((966, 1012), 'spikeextractors.load_extractor_from_pickle', 'load_extractor_from_pickle', ([], {'pkl_file': 'file_path'}), '(pkl_file=file_path)\n', (992, 1012), False, 'from spikeextractors import load_extractor_from_pickle\n')]
|
#!/usr/bin/env python
import rospy
import numpy as np
from smach import State, StateMachine, Sequence
from smach_ros import SimpleActionState, MonitorState, IntrospectionServer
# action message
import actionlib
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped
from nav_msgs.msg import OccupancyGrid, Odometry
from nav_msgs.srv import GetPlan, GetMap
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from visualization_msgs.msg import Marker, MarkerArray
from vortex_msgs.msg import MoveAction, MoveGoal
def makeMoveGoal(contr_name, target_x, target_y, target_z, radius_of_acceptance = 0.2):
"""
string controller_name
geometry_msgs/Pose target_pose
float32 radius_of_acceptance
---
---
"""
move_goal = MoveGoal()
move_goal.controller_name = contr_name
move_goal.target_pose.position.x = target_x
move_goal.target_pose.position.y = target_y
move_goal.target_pose.position.z = target_z
move_goal.radius_of_acceptance = radius_of_acceptance
return move_goal
class TaskManager():
def __init__(self):
rospy.init_node('move_to_and_inspect_point_sm', anonymous=False)
hsm = StateMachine(outcomes=['finished statemachine'])
with hsm:
StateMachine.add( 'GO_TO_POINT',
SimpleActionState( 'pid_global',
MoveAction,
makeMoveGoal("pid_global_plan", -3.0, 0, -0.5, radius_of_acceptance = 2.0)),
transitions = { "succeeded": 'INSPECT_POINT',
"preempted": 'INSPECT_POINT',
"aborted": 'INSPECT_POINT' })
StateMachine.add( 'INSPECT_POINT',
SimpleActionState( 'inspect_point',
MoveAction,
makeMoveGoal("inspect_point", -3.0, 0.0, -0.5, radius_of_acceptance=2.0)),
transitions = { 'succeeded': 'INSPECT_POINT',
"preempted": 'INSPECT_POINT',
"aborted": 'INSPECT_POINT' })
intro_server = IntrospectionServer(str(rospy.get_name()), hsm,'/SM_ROOT')
intro_server.start()
hsm.execute()
#patrol.execute()
print("State machine execute finished")
intro_server.stop()
def shutdown(self):
rospy.loginfo("stopping the AUV...")
rospy.sleep(10)
if __name__ == '__main__':
try:
TaskManager()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Pathplanning state machine has been finished")
|
[
"vortex_msgs.msg.MoveGoal",
"smach.StateMachine",
"rospy.sleep",
"rospy.loginfo",
"rospy.init_node",
"rospy.get_name",
"rospy.spin"
] |
[((835, 845), 'vortex_msgs.msg.MoveGoal', 'MoveGoal', ([], {}), '()\n', (843, 845), False, 'from vortex_msgs.msg import MoveAction, MoveGoal\n'), ((1173, 1237), 'rospy.init_node', 'rospy.init_node', (['"""move_to_and_inspect_point_sm"""'], {'anonymous': '(False)'}), "('move_to_and_inspect_point_sm', anonymous=False)\n", (1188, 1237), False, 'import rospy\n'), ((1254, 1302), 'smach.StateMachine', 'StateMachine', ([], {'outcomes': "['finished statemachine']"}), "(outcomes=['finished statemachine'])\n", (1266, 1302), False, 'from smach import State, StateMachine, Sequence\n'), ((2748, 2784), 'rospy.loginfo', 'rospy.loginfo', (['"""stopping the AUV..."""'], {}), "('stopping the AUV...')\n", (2761, 2784), False, 'import rospy\n'), ((2793, 2808), 'rospy.sleep', 'rospy.sleep', (['(10)'], {}), '(10)\n', (2804, 2808), False, 'import rospy\n'), ((2879, 2891), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2889, 2891), False, 'import rospy\n'), ((2940, 3001), 'rospy.loginfo', 'rospy.loginfo', (['"""Pathplanning state machine has been finished"""'], {}), "('Pathplanning state machine has been finished')\n", (2953, 3001), False, 'import rospy\n'), ((2527, 2543), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2541, 2543), False, 'import rospy\n')]
|
import re
import copy
import random
from faker import Faker
from . import filth as filth_module
from .filth import Filth
from .detectors.known import KnownFilthItem
from typing import List, Dict, Union, Optional, Tuple
import pandas as pd
import sklearn.metrics
def get_filth_classification_report(
filth_list: List[Filth],
output_dict: bool = False,
) -> Optional[Union[str, Dict[str, float]]]:
"""Evaluates the performance of detectors using KnownFilth.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': '<EMAIL>', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
name name_detector en_US 1.00 1.00 1.00 1
<BLANKLINE>
accuracy 1.00 1
macro avg 1.00 1.00 1.00 1
weighted avg 1.00 1.00 1.00 1
<BLANKLINE>
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:param output_dict: Return the report in JSON format, defautls to False
:type output_dict: bool, optional
:return: The report in JSON (a `dict`) or in plain text
:rtype: `str` or `dict`
"""
results = [] # type: List[Dict[str, int]]
filth_max_length = 0
detector_name_max_length = 0
locale_max_length = 0
for filth_item in filth_list:
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
results_row = {}
for sub_filth in sub_filths:
if isinstance(sub_filth, filth_module.KnownFilth) and sub_filth.comparison_type is not None:
results_row[
'{}:{}:{}'.format(sub_filth.comparison_type, filth_module.KnownFilth.type, sub_filth.locale)] = 1
else:
try:
results_row['{}:{}:{}'.format(sub_filth.type, sub_filth.detector_name, sub_filth.locale)] = 1
except AttributeError:
print(type(sub_filth), sub_filth)
raise
# Dont include filth that was not produced by one of the detectors of interest
if sum(results_row.values()) > 0:
results.append(results_row)
if len(results) == 0:
return None
results_df = pd.DataFrame(results).fillna(0).astype(int)
results_df.columns = pd.MultiIndex.from_tuples(
results_df.columns.str.split(':').values.tolist(),
names=['filth_type', 'detector_name', 'locale'],
)
# Find filth types that have some known filth
known_types = [x[0] for x in results_df.columns if x[1] == filth_module.KnownFilth.type]
# Select columns for filth that have related known filth, but that are not known filth
detected_columns = [
x for x in results_df.columns
if x[1] != filth_module.KnownFilth.type and x[0] in known_types
]
detected_classes = results_df.loc[:, detected_columns].values
# Take the detected_columns above and find their associated known counterparts
known_cols = [(x[0], filth_module.KnownFilth.type, x[2]) for x in detected_columns]
true_classes = results_df.loc[:, known_cols].values
if not output_dict:
filth_max_length = max([len(x[0]) for x in detected_columns] + [len("filth")])
detector_name_max_length = max([len(x[1]) for x in detected_columns] + [len("detector")]) + 4
locale_max_length = max([len(x[2]) for x in detected_columns] + [len("locale")]) + 4
class_labels = [
"{} {} {} ".format(
x[0].rjust(filth_max_length),
x[1].rjust(detector_name_max_length),
x[2].rjust(locale_max_length)
)
for x in detected_columns
]
else:
class_labels = ["{}:{}:{}".format(*x) for x in detected_columns]
report_labels = []
# If there is only one label reshape the data so that
# the classification_report interprets it less ambiguously
if detected_classes.shape[1] == 1:
detected_classes = detected_classes.T[0]
true_classes = true_classes.T[0]
report_labels = [1]
else:
report_labels = [class_labels.index(x) for x in sorted(class_labels)]
class_labels = sorted(class_labels)
report = sklearn.metrics.classification_report(
true_classes,
detected_classes,
output_dict=output_dict,
zero_division=0,
target_names=class_labels,
labels=report_labels,
# **extra_args
)
if not output_dict:
report = (
'filth'.rjust(filth_max_length) +
'detector'.rjust(detector_name_max_length + 1) +
'locale'.rjust(locale_max_length + 1) +
(' '*4) +
report.lstrip(' ')
)
return report
def get_filth_dataframe(filth_list: List[Filth]) -> pd.DataFrame:
"""Produces a pandas `DataFrame` to allow debugging and improving detectors.
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison, scrubadub.detectors.text_blob
>>> scrubber = scrubadub.Scrubber(detector_list=[
... scrubadub.detectors.TextBlobNameDetector(name='name_detector'),
... scrubadub.detectors.KnownFilthDetector([
... {'match': 'Tom', 'filth_type': 'name'},
... {'match': '<EMAIL>', 'filth_type': 'email'},
... ]),
... ])
>>> filth_list = list(scrubber.iter_filth("Hello I am Tom"))
>>> with pd.option_context("display.max_columns", 20):
... print(scrubadub.comparison.get_filth_dataframe(filth_list)) # doctest: +NORMALIZE_WHITESPACE
group_id filth_id filth_type detector_name document_name text beg end \\
0 0 0 name name_detector None Tom 11 14
<BLANKLINE>
locale known_filth comparison_type known_text known_beg known_end \\
0 en_US True NaN Tom 11 14
<BLANKLINE>
known_comparison_type exact_match partial_match true_positive \\
0 name True True True
<BLANKLINE>
false_positive false_negative
0 False False
:param filth_list: The list of detected filth
:type filth_list: A list of `Filth` objects
:return: A `pd.DataFrame` containing infomatoin about the detected `Filth`
:rtype: `pd.DataFrame`
"""
results = []
for group_id, filth_item in enumerate(filth_list):
sub_filths = [filth_item]
if isinstance(filth_item, filth_module.base.MergedFilth):
sub_filths = filth_item.filths
for filth_id, sub_filth in enumerate(sub_filths):
results.append({
'group_id': group_id,
'filth_id': filth_id,
'filth_type': sub_filth.type,
'detector_name': getattr(sub_filth, 'detector_name', float('nan')),
'document_name': getattr(sub_filth, 'document_name', float('nan')),
'text': sub_filth.text,
'beg': sub_filth.beg,
'end': sub_filth.end,
'locale': sub_filth.locale,
'known_filth': isinstance(sub_filth, filth_module.KnownFilth),
'comparison_type': getattr(sub_filth, 'comparison_type', float('nan')),
})
results_df = pd.DataFrame(results)
suffix_label = '_y_suffix'
return (
pd.merge(
results_df[~results_df['known_filth']],
results_df[results_df['known_filth']][['group_id', 'text', 'beg', 'end', 'comparison_type']],
how='outer',
left_on=('group_id', 'filth_type'),
right_on=('group_id', 'comparison_type'),
suffixes=('', suffix_label)
)
.rename(columns=lambda x: x if not x.endswith(suffix_label) else 'known_' + x[:-len(suffix_label)])
.assign(
known_filth=lambda df: ~pd.isnull(df['known_text']),
exact_match=lambda df: (df['text'] == df['known_text']).fillna(False),
partial_match=lambda df: ((df['beg'] < df['known_end']) & (df['end'] > df['known_beg']).fillna(False)),
true_positive=lambda df: (~pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_positive=lambda df: (pd.isnull(df['known_text'])) & (~pd.isnull(df['text'])),
false_negative=lambda df: (~pd.isnull(df['known_text'])) & (pd.isnull(df['text'])),
)
)
def make_fake_document(
paragraphs: int = 20, locale: str = 'en_US', seed: Optional[int] = None, faker: Optional[Faker] = None,
filth_types: Optional[List[str]] = None
) -> Tuple[str, List[KnownFilthItem]]:
"""Creates a fake document containing `Filth` that needs to be removed. Also returns the list of known filth
items that are needed byt the `KnownFilthDetector`\\ .
An example of using this is shown below:
.. code:: pycon
>>> import scrubadub, scrubadub.comparison
>>> document, known_filth_items = scrubadub.comparison.make_fake_document(paragraphs=1, seed=1)
>>> scrubber = scrubadub.Scrubber()
>>> scrubber.add_detector(scrubadub.detectors.KnownFilthDetector(known_filth_items=known_filth_items))
>>> filth_list = list(scrubber.iter_filth(document))
>>> print(scrubadub.comparison.get_filth_classification_report(filth_list))
filth detector locale precision recall f1-score support
<BLANKLINE>
url url en_US 1.00 1.00 1.00 1
email email en_US 1.00 1.00 1.00 2
<BLANKLINE>
micro avg 1.00 1.00 1.00 3
macro avg 1.00 1.00 1.00 3
weighted avg 1.00 1.00 1.00 3
samples avg 1.00 1.00 1.00 3
<BLANKLINE>
:param paragraphs: The list of detected filth
:type paragraphs: int
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:param seed: The random seed used to generate the document
:type seed: int, optional
:param faker: A Faker object that is used to generate the text
:type faker: int
:param filth_types: A list of the ``Filth.type`` to generate
:type filth_types: List[str]
:return: The document and a list of `KnownFilthItem`\\ s
:rtype: Tuple[str, List[KnownFilthItem]]
"""
if faker is None:
faker = Faker(locale=locale)
# TODO: register filth types to build up a dict that can be read from, like the detectors
possible_filth = [
filth_module.AddressFilth,
filth_module.EmailFilth,
filth_module.NameFilth,
filth_module.PhoneFilth,
filth_module.PostalCodeFilth,
filth_module.SSNFilth,
filth_module.TwitterFilth,
filth_module.UrlFilth,
]
if filth_types is not None:
possible_filth = [filth for filth in possible_filth if filth.type in filth_types]
if seed is not None:
Faker.seed(seed)
random.seed(seed)
doc = ""
known_items = [] # type: List[KnownFilthItem]
for i_paragraph in range(paragraphs):
for i_sentance_group in range(random.randint(1, 10)):
text = faker.text()
matches = list(re.finditer(r'[\s.]', text))
position = random.choice(matches)
chosen_filth = random.choice(possible_filth)
pii_text = chosen_filth.generate(faker=faker)
known_items.append({
'match': copy.copy(pii_text),
'filth_type': copy.copy(chosen_filth.type),
})
doc += (
text[:position.start()] +
position.group() +
pii_text +
position.group() +
text[position.end():]
)
doc += "\n\n"
return (doc.strip(), known_items)
|
[
"pandas.DataFrame",
"random.randint",
"faker.Faker",
"faker.Faker.seed",
"re.finditer",
"pandas.merge",
"copy.copy",
"random.choice",
"pandas.isnull",
"random.seed"
] |
[((8279, 8300), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (8291, 8300), True, 'import pandas as pd\n'), ((11677, 11697), 'faker.Faker', 'Faker', ([], {'locale': 'locale'}), '(locale=locale)\n', (11682, 11697), False, 'from faker import Faker\n'), ((12247, 12263), 'faker.Faker.seed', 'Faker.seed', (['seed'], {}), '(seed)\n', (12257, 12263), False, 'from faker import Faker\n'), ((12272, 12289), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (12283, 12289), False, 'import random\n'), ((12435, 12456), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (12449, 12456), False, 'import random\n'), ((12570, 12592), 'random.choice', 'random.choice', (['matches'], {}), '(matches)\n', (12583, 12592), False, 'import random\n'), ((12620, 12649), 'random.choice', 'random.choice', (['possible_filth'], {}), '(possible_filth)\n', (12633, 12649), False, 'import random\n'), ((12518, 12545), 're.finditer', 're.finditer', (['"""[\\\\s.]"""', 'text'], {}), "('[\\\\s.]', text)\n", (12529, 12545), False, 'import re\n'), ((3069, 3090), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3081, 3090), True, 'import pandas as pd\n'), ((8354, 8629), 'pandas.merge', 'pd.merge', (["results_df[~results_df['known_filth']]", "results_df[results_df['known_filth']][['group_id', 'text', 'beg', 'end',\n 'comparison_type']]"], {'how': '"""outer"""', 'left_on': "('group_id', 'filth_type')", 'right_on': "('group_id', 'comparison_type')", 'suffixes': "('', suffix_label)"}), "(results_df[~results_df['known_filth']], results_df[results_df[\n 'known_filth']][['group_id', 'text', 'beg', 'end', 'comparison_type']],\n how='outer', left_on=('group_id', 'filth_type'), right_on=('group_id',\n 'comparison_type'), suffixes=('', suffix_label))\n", (8362, 8629), True, 'import pandas as pd\n'), ((8860, 8887), 'pandas.isnull', 'pd.isnull', (["df['known_text']"], {}), "(df['known_text'])\n", (8869, 8887), True, 'import pandas as pd\n'), ((9223, 9250), 'pandas.isnull', 'pd.isnull', (["df['known_text']"], {}), "(df['known_text'])\n", (9232, 9250), True, 'import pandas as pd\n'), ((9352, 9373), 'pandas.isnull', 'pd.isnull', (["df['text']"], {}), "(df['text'])\n", (9361, 9373), True, 'import pandas as pd\n'), ((12766, 12785), 'copy.copy', 'copy.copy', (['pii_text'], {}), '(pii_text)\n', (12775, 12785), False, 'import copy\n'), ((12817, 12845), 'copy.copy', 'copy.copy', (['chosen_filth.type'], {}), '(chosen_filth.type)\n', (12826, 12845), False, 'import copy\n'), ((9127, 9154), 'pandas.isnull', 'pd.isnull', (["df['known_text']"], {}), "(df['known_text'])\n", (9136, 9154), True, 'import pandas as pd\n'), ((9160, 9181), 'pandas.isnull', 'pd.isnull', (["df['text']"], {}), "(df['text'])\n", (9169, 9181), True, 'import pandas as pd\n'), ((9256, 9277), 'pandas.isnull', 'pd.isnull', (["df['text']"], {}), "(df['text'])\n", (9265, 9277), True, 'import pandas as pd\n'), ((9320, 9347), 'pandas.isnull', 'pd.isnull', (["df['known_text']"], {}), "(df['known_text'])\n", (9329, 9347), True, 'import pandas as pd\n')]
|
# -*- encoding: utf-8 -*-
"""
@File : address.py
@Time : 2020/4/24 13:57
@Author : Tianjin
@Email : <EMAIL>
@Software: PyCharm
"""
from lin.exception import NotFound, ParameterException
from lin.interface import InfoCrud as Base
from sqlalchemy import Integer, Column, ForeignKey, String, Boolean
class Address(Base):
__tablename__ = "Address"
id = Column("id",Integer, primary_key=True, autoincrement=True, comment="收货地址id")
userId = Column("userId", Integer, ForeignKey("UserInfo.id"), nullable=False, comment="用户id")
userName = Column("userName", String(32), nullable=False, comment="用户姓名")
address = Column("address", String(250), nullable=False, comment="收货地址")
phoneCode = Column("phoneCode", String(11), nullable=False, comment="电话号码")
default = Column("default", Boolean, nullable=False, comment="默认地址")
@classmethod
def append(cls, data):
# 首次添加地址默认为True
if data["default"] == "True":
address = Address.query.filter_by(userId=data["userId"], delete_time=None, default=True).first()
if address:
address.update(
default=False,
commit=True
)
Address.create(
userId=int(data["userId"]),
userName=data["userName"],
address=data["address"],
phoneCode=data["phoneCode"],
default=True,
commit=True
)
else:
Address.create(
userId=int(data["userId"]),
userName=data["userName"],
address=data["address"],
phoneCode=data["phoneCode"],
default=False,
commit=True
)
@classmethod
def amend_default_address(cls, form):
address = Address.query.filter_by(userId=form.userId.data, delete_time=None, default=True).first()
if address:
address.update(
default=False,
commit=True
)
else:
raise NotFound(msg='没有找到相关用户')
address = Address.query.filter_by(id=form.id.data, delete_time=None).first()
if address:
address.update(
userName=form.userName.data,
address=form.address.data,
phoneCode=form.phoneCode.data,
default=True,
commit=True
)
else:
raise NotFound(msg='没有找到相关地址')
@classmethod
def address_list(cls, userId):
address = Address.query.filter_by(userId=userId, delete_time=None).all()
if address:
return address
else:
raise NotFound(msg="没有找到相关地址")
@classmethod
def delete_address(cls, id):
address = Address.query.filter_by(id=id, delete_time=None).first()
if address:
address.delete(
commit=True
)
else:
raise NotFound(msg="没有找到相关地址")
|
[
"sqlalchemy.String",
"lin.exception.NotFound",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] |
[((370, 447), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'autoincrement': '(True)', 'comment': '"""收货地址id"""'}), "('id', Integer, primary_key=True, autoincrement=True, comment='收货地址id')\n", (376, 447), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((794, 852), 'sqlalchemy.Column', 'Column', (['"""default"""', 'Boolean'], {'nullable': '(False)', 'comment': '"""默认地址"""'}), "('default', Boolean, nullable=False, comment='默认地址')\n", (800, 852), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((486, 511), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""UserInfo.id"""'], {}), "('UserInfo.id')\n", (496, 511), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((579, 589), 'sqlalchemy.String', 'String', (['(32)'], {}), '(32)\n', (585, 589), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((655, 666), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (661, 666), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((736, 746), 'sqlalchemy.String', 'String', (['(11)'], {}), '(11)\n', (742, 746), False, 'from sqlalchemy import Integer, Column, ForeignKey, String, Boolean\n'), ((2125, 2149), 'lin.exception.NotFound', 'NotFound', ([], {'msg': '"""没有找到相关用户"""'}), "(msg='没有找到相关用户')\n", (2133, 2149), False, 'from lin.exception import NotFound, ParameterException\n'), ((2523, 2547), 'lin.exception.NotFound', 'NotFound', ([], {'msg': '"""没有找到相关地址"""'}), "(msg='没有找到相关地址')\n", (2531, 2547), False, 'from lin.exception import NotFound, ParameterException\n'), ((2761, 2785), 'lin.exception.NotFound', 'NotFound', ([], {'msg': '"""没有找到相关地址"""'}), "(msg='没有找到相关地址')\n", (2769, 2785), False, 'from lin.exception import NotFound, ParameterException\n'), ((3034, 3058), 'lin.exception.NotFound', 'NotFound', ([], {'msg': '"""没有找到相关地址"""'}), "(msg='没有找到相关地址')\n", (3042, 3058), False, 'from lin.exception import NotFound, ParameterException\n')]
|
import numpy as np
import pandas as pd
from tqdm import tqdm
def map_prediction_to_emergence_label(results, training_values, test_values, predictors_to_run, test_terms,
emergence_linear_thresholds=(
('rapidly emergent', 0.1),
('emergent', 0.02),
('stationary', -0.02),
('declining', None)
)):
def __map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds):
if np.isnan(sum(normalised_counts_to_trend)):
predicted_emergence[predictor_name][test_term] = 'Fail'
return
x_data = range(len(normalised_counts_to_trend))
trend = np.polyfit(x_data, normalised_counts_to_trend, 1)
emergence = emergence_linear_thresholds[-1][0]
for emergence_threshold in emergence_linear_thresholds[:-1]:
if trend[0] > emergence_threshold[1]:
emergence = emergence_threshold[0]
break
predicted_emergence[predictor_name][test_term] = emergence
predicted_emergence = {}
if test_values:
predictor_name = 'Actual'
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
counts_to_trend = test_values[test_term]
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
for predictor_name in predictors_to_run:
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
(none, configuration, predicted_values, num_training_values) = results[predictor_name][test_term]
counts_to_trend = predicted_values.ravel().tolist()
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
return predicted_emergence
def report_predicted_emergence_labels_html(predicted_emergence, emergence_colours={
'highly emergent': 'lime',
'emergent': 'green',
'stationary': 'black',
'declining': 'red'}):
html_string = f'''
<h2>Emergence Label Prediction</h2>
'''
# df = pd.DataFrame(predicted_emergence, index=[0])
test_terms = list(predicted_emergence[list(predicted_emergence.keys())[0]].keys())
df_results = pd.DataFrame({'terms': test_terms})
predictor_display_names = []
for predictor_name in predicted_emergence:
term_results = []
for test_term in predicted_emergence[predictor_name]:
result = predicted_emergence[predictor_name][test_term]
term_results.append(result)
predictor_display_name = predictor_name.replace('-', '<br/>')
predictor_display_names.append(predictor_display_name)
df_term_column = pd.DataFrame({predictor_display_name: term_results})
df_results = df_results.join(df_term_column)
df_summary_table = df_results.style.hide_index()
df_summary_table = df_summary_table.set_table_styles([
dict(selector='table', props=[('border-collapse', 'collapse')]),
dict(selector='td', props=[('border', '2px solid black'),
('text-align', 'right'),
('padding-left', '15px'),
('padding-right', '15px')])
])
def colour_emergence(val):
colour = 'black'
if val in emergence_colours:
colour = emergence_colours[val]
return f'color: {colour}'
df_summary_table = df_summary_table.applymap(colour_emergence)
# for predictor_name in predictor_names:
# df_summary_table = df_summary_table.format({predictor_name: predictor_style})
# df_summary_table = df_summary_table.highlight_min(axis=1)
html_string += '<style type="text/css">table {border-collapse: collapse;} </style>\n'
html_string += df_summary_table.render()
return html_string
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.polyfit"
] |
[((3031, 3066), 'pandas.DataFrame', 'pd.DataFrame', (["{'terms': test_terms}"], {}), "({'terms': test_terms})\n", (3043, 3066), True, 'import pandas as pd\n'), ((919, 968), 'numpy.polyfit', 'np.polyfit', (['x_data', 'normalised_counts_to_trend', '(1)'], {}), '(x_data, normalised_counts_to_trend, 1)\n', (929, 968), True, 'import numpy as np\n'), ((1446, 1522), 'tqdm.tqdm', 'tqdm', (['test_terms'], {'unit': '"""term"""', 'desc': "('Labelling prediction ' + predictor_name)"}), "(test_terms, unit='term', desc='Labelling prediction ' + predictor_name)\n", (1450, 1522), False, 'from tqdm import tqdm\n'), ((2011, 2087), 'tqdm.tqdm', 'tqdm', (['test_terms'], {'unit': '"""term"""', 'desc': "('Labelling prediction ' + predictor_name)"}), "(test_terms, unit='term', desc='Labelling prediction ' + predictor_name)\n", (2015, 2087), False, 'from tqdm import tqdm\n'), ((3504, 3556), 'pandas.DataFrame', 'pd.DataFrame', (['{predictor_display_name: term_results}'], {}), '({predictor_display_name: term_results})\n', (3516, 3556), True, 'import pandas as pd\n')]
|
#
# This file is part of scopedconfig software.
#
# Copyright (c) 2019, <NAME> <<EMAIL>>
# License: https://github.com/etingof/scopedconfig/blob/master/LICENSE.rst
#
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
['tests.unit.__main__.suite']
)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"unittest.TestLoader"
] |
[((191, 212), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (210, 212), False, 'import unittest\n'), ((302, 338), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (325, 338), False, 'import unittest\n')]
|
import json
from time import sleep
from Transliterator import transliterate_to_english
import requests
from bs4 import BeautifulSoup
path_raw = './data/raw/'
path_json = './data/json/'
url = "http://www.kashmirizabaan.com/eng_ver.php"
def query(key):
payload = f'meaning_target={key}&Submit=Go&lantype=hin&opt_dic=mat_like'
headers = {
'Connection': "keep-alive",
'Content-Type': "application/x-www-form-urlencoded",
'Accept-Language': "en,et;q=0.9,ur;q=0.8",
}
page = requests.request("POST", url, data=payload, headers=headers)
page.encoding = 'utf-8'
# cleanup
page_text = page.text
page_text = page_text.replace("<table>", '')
page_text = page_text.replace(" ", '')
page_text = page_text.replace("</br>", '<br />')
page_text = page_text.replace('<table width="717" border="0" bordercolor="#F0F0F0" bgcolor="#FFFFFF">', '<table>')
page_text = page_text.replace('<font color="#CC6600">', '')
page_text = page_text.replace('</div>\n\n</body>', '</body>')
page_text = page_text.replace('<font face="Afan_Koshur_Naksh,Afan Koshur Naksh,Times New Roman" size=4>', '')
page_text = page_text.replace('<font face=\\"Afan_Koshur_Naksh,Afan Koshur Naksh,Times New Roman\\" size=4>', '')
page_text = page_text.replace(
'<form name="dictionary" method="post" action=""onSubmit=return validate_form(this) >', '')
page_text = page_text.replace('</div></th>', '<div></div></th>')
soup = BeautifulSoup(page_text, 'lxml')
page_text = soup.prettify()
filename = get_raw_filename(key)
file = open(filename, 'w', encoding='utf-16')
file.write(page_text)
file.close()
def load(key):
filename = get_raw_filename(key)
print(filename)
file = open(filename, 'r', encoding='utf-16')
page = file.read()
file.close()
return page
def get_raw_filename(key):
filename = f'{path_raw}{key}.html'
return filename
def get_json_filename(key):
filename = f'{path_json}{key}.json'
return filename
def export_to_json(key, data):
with open(get_json_filename(key), 'w', encoding='utf-16') as fp:
json.dump(data, fp, ensure_ascii=False, indent=2)
fp.close()
def from_to_json(key):
with open(get_json_filename(key), 'r', encoding='utf-16') as fp:
data = json.load(fp)
fp.close()
return data
def for_each_key_do(action):
alfabits = '<KEY>'
for ch1 in alfabits:
for ch2 in alfabits:
key = f'{ch1}{ch2}'
x = action(key)
yield x
def fetch_data(key):
print(f'searching for {key}')
query(key)
sleep(0.25)
return 1
def transform(key):
word_count = 0
entries = []
print(f'transforming for {key}')
page = load(key)
soup = BeautifulSoup(page, 'html.parser')
tables = soup.find_all("table")
if len(tables) > 1:
for i in range(1, len(tables)):
element = tables[i]
tds = element.findAll('td')
ks_word = tds[0].getText().strip()
if ks_word == '' or ks_word == '۔۔۔':
continue
print('----------------------')
category = tds[1].getText().strip()
en_example = tds[2].getText().strip()
hi_meaning = tds[3].getText().strip()
ks_example = tds[4].getText().strip()
en_meaning = tds[5].getText().strip()
transliteration = transliterate_to_english(ks_word)
print(f'ks_word = {ks_word}')
print(f'category = {category}')
print(f'en_example = {en_example}')
print(f'hi_meaning = {hi_meaning}')
print(f'ks_example = {ks_example}')
print(f'en_meaning = {en_meaning}')
print(f'transliteration = {transliteration}')
entry = {
'ks_word': ks_word,
'category': category,
'en_example': en_example,
'ks_example': ks_example,
'en_meaning': en_meaning,
'transliteration': transliteration
}
entries.append(entry)
word_count = word_count + 1
export_to_json(key, entries)
print(f'Total number of words for {key} = {word_count}')
return word_count
|
[
"json.dump",
"json.load",
"Transliterator.transliterate_to_english",
"time.sleep",
"bs4.BeautifulSoup",
"requests.request"
] |
[((513, 573), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'data': 'payload', 'headers': 'headers'}), "('POST', url, data=payload, headers=headers)\n", (529, 573), False, 'import requests\n'), ((1490, 1522), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page_text', '"""lxml"""'], {}), "(page_text, 'lxml')\n", (1503, 1522), False, 'from bs4 import BeautifulSoup\n'), ((2649, 2660), 'time.sleep', 'sleep', (['(0.25)'], {}), '(0.25)\n', (2654, 2660), False, 'from time import sleep\n'), ((2802, 2836), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (2815, 2836), False, 'from bs4 import BeautifulSoup\n'), ((2154, 2203), 'json.dump', 'json.dump', (['data', 'fp'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(data, fp, ensure_ascii=False, indent=2)\n', (2163, 2203), False, 'import json\n'), ((2332, 2345), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2341, 2345), False, 'import json\n'), ((3460, 3493), 'Transliterator.transliterate_to_english', 'transliterate_to_english', (['ks_word'], {}), '(ks_word)\n', (3484, 3493), False, 'from Transliterator import transliterate_to_english\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Data Series Sonification
========================
Functionality for sonifying data series.
"""
import warnings
from inspect import signature, Parameter
import numpy as np
from astropy.table import Table, MaskedColumn
from astropy.time import Time
import pyo
from ..utils.pitch_mapping import data_to_pitch
from ..utils.exceptions import InputWarning
__all__ = ['PitchMap', 'SoniSeries']
class PitchMap():
def __init__(self, pitch_func=data_to_pitch, **pitch_args):
"""
Class that encapsulates the data value to pitch function
and associated arguments.
Parameters
----------
pitch_func : function
Optional. Defaults to `~astronify.utils.data_to_pitch`.
If supplying a function it should take a data array as the first
parameter, and all other parameters should be optional.
**pitch_args
Default parameters and values for the pitch function. Should include
all necessary arguments other than the data values.
"""
# Setting up the default arguments
if (not pitch_args) and (pitch_func == data_to_pitch):
pitch_args = {"pitch_range": [100, 10000],
"center_pitch": 440,
"zero_point": "median",
"stretch": "linear"}
self.pitch_map_func = pitch_func
self.pitch_map_args = pitch_args
def _check_func_args(self):
"""
Make sure the pitch mapping function and argument dictionary match.
Note: This function does not check the the function gets all the required arguments.
"""
# Only test if both pitch func and args are set
if hasattr(self, "pitch_map_func") and hasattr(self, "pitch_map_args"):
# Only check parameters if there is no kwargs argument
param_types = [x.kind for x in signature(self.pitch_map_func).parameters.values()]
if Parameter.VAR_KEYWORD not in param_types:
for arg_name in list(self.pitch_map_args):
if arg_name not in signature(self.pitch_map_func).parameters:
wstr = "{} is not accepted by the pitch mapping function and will be ignored".format(arg_name)
warnings.warn(wstr, InputWarning)
del self.pitch_map_args[arg_name]
def __call__(self, data):
"""
Where does this show up?
"""
self._check_func_args()
return self.pitch_map_func(data, **self.pitch_map_args)
@property
def pitch_map_func(self):
"""
The pitch mapping function.
"""
return self._pitch_map_func
@pitch_map_func.setter
def pitch_map_func(self, new_func):
assert callable(new_func), "Pitch mapping function must be a function."
self._pitch_map_func = new_func
self._check_func_args()
@property
def pitch_map_args(self):
"""
Dictionary of additional arguments (other than the data array)
for the pitch mapping function.
"""
return self._pitch_map_args
@pitch_map_args.setter
def pitch_map_args(self, new_args):
assert isinstance(new_args, dict), "Pitch mapping function args must be in a dictionary."
self._pitch_map_args = new_args
self._check_func_args()
class SoniSeries():
def __init__(self, data, time_col="time", val_col="flux"):
"""
Class that encapsulates a sonified data series.
Parameters
----------
data : `astropy.table.Table`
The table of data to be sonified.
time_col : str
Optional, default "time". The data column to be mapped to time.
val_col : str
Optional, default "flux". The data column to be mapped to pitch.
"""
self.time_col = time_col
self.val_col = val_col
self.data = data
# Default specs
self.note_duration = 0.5 # note duration in seconds
self.note_spacing = 0.01 # spacing between notes in seconds
self.gain = 0.05 # default gain in the generated sine wave. pyo multiplier, -1 to 1.
self.pitch_mapper = PitchMap(data_to_pitch)
self._init_pyo()
def _init_pyo(self):
self.server = pyo.Server()
self.streams = None
@property
def data(self):
""" The data table (~astropy.table.Table). """
return self._data
@data.setter
def data(self, data_table):
assert isinstance(data_table, Table), 'Data must be a Table.'
# Removing any masked values as they interfere with the sonification
if isinstance(data_table[self.val_col], MaskedColumn):
data_table = data_table[~data_table[self.val_col].mask]
if isinstance(data_table[self.time_col], MaskedColumn):
data_table = data_table[~data_table[self.time_col].mask]
# Removing any nans as they interfere with the sonification
data_table = data_table[~np.isnan(data_table[self.val_col])]
# making sure we have a float column for time
if isinstance(data_table[self.time_col], Time):
float_col = "asf_time"
data_table[float_col] = data_table[self.time_col].jd
self.time_col = float_col
self._data = data_table
@property
def time_col(self):
""" The data column mappend to time when sonifying. """
return self._time_col
@time_col.setter
def time_col(self, value):
assert isinstance(value, str), 'Time column name must be a string.'
self._time_col = value
@property
def val_col(self):
""" The data column mappend to putch when sonifying. """
return self._val_col
@val_col.setter
def val_col(self, value):
assert isinstance(value, str), 'Value column name must be a string.'
self._val_col = value
@property
def pitch_mapper(self):
""" The pitch mapping object that takes data values to pitch values (Hz). """
return self._pitch_mapper
@pitch_mapper.setter
def pitch_mapper(self, value):
self._pitch_mapper = value
@property
def gain(self):
""" Adjustable gain for output. """
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
@property
def note_duration(self):
""" How long each individual note will be in seconds."""
return self._note_duration
@note_duration.setter
def note_duration(self, value):
# Add in min value check
self._note_duration = value
@property
def note_spacing(self):
""" The spacing of the notes on average (will adjust based on time) in seconds. """
return self._note_spacing
@note_spacing.setter
def note_spacing(self, value):
# Add in min value check
self._note_spacing = value
def sonify(self):
"""
Perform the sonification, two columns will be added to the data table: asf_pitch, and asf_onsets.
The asf_pitch column will contain the sonified data in Hz.
The asf_onsets column will contain the start time for each note in seconds from the first note.
Metadata will also be added to the table giving information about the duration and spacing
of the sonified pitches, as well as an adjustable gain.
"""
data = self.data
exptime = np.median(np.diff(data[self.time_col]))
data.meta["asf_exposure_time"] = exptime
data.meta["asf_note_duration"] = self.note_duration
data.meta["asf_spacing"] = self.note_spacing
data["asf_pitch"] = self.pitch_mapper(data[self.val_col])
data["asf_onsets"] = [x for x in (data[self.time_col] - data[self.time_col][0])/exptime*self.note_spacing]
def play(self):
"""
Play the data sonification.
"""
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.boot()
self.server.start()
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# TODO: This doesn't seem like the best way to do this, but I don't know
# how to make it better
env = pyo.Linseg(list=[(0, 0), (0.01, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
self.streams = pyo.Sine(list(pitches), 0, env).out(delay=list(delays),
dur=duration)
def stop(self):
"""
Stop playing the data sonification.
"""
self.streams.stop()
def write(self, filepath):
"""
Save data sonification to the given file.
Currently the only output option is a wav file.
Parameters
----------
filepath : str
The path to the output file.
"""
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.reinit(audio="offline")
self.server.boot()
self.server.recordOptions(dur=delays[-1]+duration, filename=filepath)
env = pyo.Linseg(list=[(0, 0), (0.1, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
sine = pyo.Sine(list(pitches), 0, env).out(delay=list(delays), dur=duration) # noqa: F841
self.server.start()
# Clean up
self.server.shutdown()
self.server.reinit(audio="portaudio")
|
[
"numpy.isnan",
"pyo.Server",
"numpy.diff",
"inspect.signature",
"warnings.warn",
"numpy.repeat"
] |
[((4450, 4462), 'pyo.Server', 'pyo.Server', ([], {}), '()\n', (4460, 4462), False, 'import pyo\n'), ((8386, 8422), 'numpy.repeat', 'np.repeat', (["self.data['asf_pitch']", '(2)'], {}), "(self.data['asf_pitch'], 2)\n", (8395, 8422), True, 'import numpy as np\n'), ((8440, 8477), 'numpy.repeat', 'np.repeat', (["self.data['asf_onsets']", '(2)'], {}), "(self.data['asf_onsets'], 2)\n", (8449, 8477), True, 'import numpy as np\n'), ((9522, 9558), 'numpy.repeat', 'np.repeat', (["self.data['asf_pitch']", '(2)'], {}), "(self.data['asf_pitch'], 2)\n", (9531, 9558), True, 'import numpy as np\n'), ((9576, 9613), 'numpy.repeat', 'np.repeat', (["self.data['asf_onsets']", '(2)'], {}), "(self.data['asf_onsets'], 2)\n", (9585, 9613), True, 'import numpy as np\n'), ((7644, 7672), 'numpy.diff', 'np.diff', (['data[self.time_col]'], {}), '(data[self.time_col])\n', (7651, 7672), True, 'import numpy as np\n'), ((5171, 5205), 'numpy.isnan', 'np.isnan', (['data_table[self.val_col]'], {}), '(data_table[self.val_col])\n', (5179, 5205), True, 'import numpy as np\n'), ((2394, 2427), 'warnings.warn', 'warnings.warn', (['wstr', 'InputWarning'], {}), '(wstr, InputWarning)\n', (2407, 2427), False, 'import warnings\n'), ((2208, 2238), 'inspect.signature', 'signature', (['self.pitch_map_func'], {}), '(self.pitch_map_func)\n', (2217, 2238), False, 'from inspect import signature, Parameter\n'), ((2001, 2031), 'inspect.signature', 'signature', (['self.pitch_map_func'], {}), '(self.pitch_map_func)\n', (2010, 2031), False, 'from inspect import signature, Parameter\n')]
|
"""
This prints a list of publicly accessible resources in a group
"""
from django.core.management.base import BaseCommand
from hs_access_control.models import GroupAccess
def usage():
print("groups_with_public_resources usage:")
print(" groups_with_public_resources ")
def shorten(title, length):
if len(title) <= length:
return title
else:
return title[0:19]+'...'
def access_type(thing):
if thing['published']:
return 'published'
elif thing['public']:
return 'public'
elif thing['discoverable']:
return 'discoverable'
else:
return 'private'
class Command(BaseCommand):
help = """List public groups."""
def handle(self, *args, **options):
for g in GroupAccess.groups_with_public_resources():
# n = g.gaccess.public_resources.count()
print("group is {} (id={})".format(g.name, g.id))
|
[
"hs_access_control.models.GroupAccess.groups_with_public_resources"
] |
[((759, 801), 'hs_access_control.models.GroupAccess.groups_with_public_resources', 'GroupAccess.groups_with_public_resources', ([], {}), '()\n', (799, 801), False, 'from hs_access_control.models import GroupAccess\n')]
|
from datetime import datetime
import gzip
import json
import os
from pytest import fixture
from time import sleep
from baq.operations import backup, restore
from baq.backends import FileBackend
@fixture
def sample_age_key(temp_dir):
secret_key_path = temp_dir / 'age_key'
secret_key_path.write_text('<KEY>')
public_key = '<KEY>'
return public_key
def test_backup_and_restore_without_encryption(temp_dir):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/dir1').mkdir()
(temp_dir / 'src/dir1/sample.txt').write_text('This is dir1/sample.txt\n')
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[], recipients_files=[])
backup_id = backup_result.backup_id
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id, [])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
assert sorted(p.name for p in (temp_dir / 'backup_target').iterdir()) == [
f'baq.{backup_id}.data.00000',
f'baq.{backup_id}.meta',
]
def test_backup_and_restore(temp_dir, sample_age_key):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/dir1').mkdir()
(temp_dir / 'src/dir1/sample.txt').write_text('This is dir1/sample.txt\n')
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id = backup_result.backup_id
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id, [temp_dir / 'age_key'])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
assert sorted(p.name for p in (temp_dir / 'backup_target').iterdir()) == [
f'baq.{backup_id}.data.00000',
f'baq.{backup_id}.meta',
]
meta_path = temp_dir / f'backup_target/baq.{backup_id}.meta'
meta_content = [json.loads(line) for line in gzip.decompress(meta_path.read_bytes()).splitlines()]
assert meta_content == [
{
'baq_backup': {
'file_format_version': 'v1',
'backup_id': backup_id,
'date': meta_content[0]['baq_backup']['date'],
'encryption_keys': [
{
'backup_id': backup_id,
'sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'age_encrypted': meta_content[0]['baq_backup']['encryption_keys'][0]['age_encrypted'],
}
]
}
}, {
'directory': {
'atime': meta_content[1]['directory']['atime'],
'ctime': meta_content[1]['directory']['ctime'],
'mtime': meta_content[1]['directory']['mtime'],
'uid': meta_content[1]['directory']['uid'],
'gid': meta_content[1]['directory']['gid'],
'mode': meta_content[1]['directory']['mode'],
'path': '.',
}
}, {
'file': {
'atime': meta_content[2]['file']['atime'],
'ctime': meta_content[2]['file']['ctime'],
'mtime': meta_content[2]['file']['mtime'],
'uid': meta_content[2]['file']['uid'],
'gid': meta_content[2]['file']['gid'],
'mode': meta_content[2]['file']['mode'],
'path': 'hello.txt',
}
}, {
'content': {
'offset': 0,
'sha3_512': 'adb798d7b4c94952e61c5d9beed5d3bf9443460f5d5a9f17eb32def95bc23ba8608f7630ea236958602500d06f5c19c64114c06ce09f1b92301b9c3fc73f0728',
'encryption_key_sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'df_name': f'baq.{backup_id}.data.00000',
'df_offset': 0,
'df_size': 33,
}
}, {
'file_done': {
'sha3_512': 'adb798d7b4c94952e61c5d9beed5d3bf9443460f5d5a9f17eb32def95bc23ba8608f7630ea236958602500d06f5c19c64114c06ce09f1b92301b9c3fc73f0728',
}
}, {
'directory': {
'atime': meta_content[5]['directory']['atime'],
'ctime': meta_content[5]['directory']['ctime'],
'mtime': meta_content[5]['directory']['mtime'],
'uid': meta_content[5]['directory']['uid'],
'gid': meta_content[5]['directory']['gid'],
'mode': meta_content[5]['directory']['mode'],
'path': 'dir1',
}
}, {
'file': {
'atime': meta_content[6]['file']['atime'],
'ctime': meta_content[6]['file']['ctime'],
'mtime': meta_content[6]['file']['mtime'],
'uid': meta_content[6]['file']['uid'],
'gid': meta_content[6]['file']['gid'],
'mode': meta_content[6]['file']['mode'],
'path': 'dir1/sample.txt',
}
}, {
'content': {
'offset': 0,
'sha3_512': 'd318a04d4a61bcb9f2f10a9523c30cfef69922fea0a3c4c1c7f5f01fed01cea9ee4a9a14e29126fadb0427eae42df1efa8a0cd18eb0d75a96241a1da432dbe8d',
'encryption_key_sha1': meta_content[0]['baq_backup']['encryption_keys'][0]['sha1'],
'df_name': f'baq.{backup_id}.data.00000',
'df_offset': 33,
'df_size': 49,
}
}, {
'file_done': {
'sha3_512': 'd318a04d4a61bcb9f2f10a9523c30cfef69922fea0a3c4c1c7f5f01fed01cea9ee4a9a14e29126fadb0427eae42df1efa8a0cd18eb0d75a96241a1da432dbe8d'
}
}, {
'done': {
'backup_id': backup_id,
'date': meta_content[-1]['done']['date'],
}
}
]
def test_incremental_backup_and_restore(temp_dir, sample_age_key):
(temp_dir / 'src').mkdir()
(temp_dir / 'src/hello.txt').write_text('Hello, World!\n')
(temp_dir / 'src/big').write_bytes(os.urandom(3 * 2**20))
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id_1 = backup_result.backup_id
while datetime.utcnow().strftime('%Y%m%dT%H%M%SZ') == backup_result.backup_id:
sleep(0.05)
with (temp_dir / 'src/big').open(mode='r+b') as f:
f.write(os.urandom(100))
backend = FileBackend(temp_dir / 'backup_target')
backup_result = backup(temp_dir / 'src', backend=backend, recipients=[sample_age_key], recipients_files=[])
backup_id_2 = backup_result.backup_id
assert (temp_dir / 'backup_target' / f'baq.{backup_id_1}.data.00000').is_file()
assert (temp_dir / 'backup_target' / f'baq.{backup_id_1}.data.00000').stat().st_size > 3000000
assert (temp_dir / 'backup_target' / f'baq.{backup_id_2}.data.00000').is_file()
assert (temp_dir / 'backup_target' / f'baq.{backup_id_2}.data.00000').stat().st_size < 1500000
(temp_dir / 'restored').mkdir()
restore(temp_dir / 'restored', backend, backup_id_2, [temp_dir / 'age_key'])
assert (temp_dir / 'src/hello.txt').read_bytes() == (temp_dir / 'restored/hello.txt').read_bytes()
#assert (temp_dir / 'src/dir1/sample.txt').read_bytes() == (temp_dir / 'restored/dir1/sample.txt').read_bytes()
|
[
"baq.backends.FileBackend",
"json.loads",
"time.sleep",
"datetime.datetime.utcnow",
"baq.operations.backup",
"os.urandom",
"baq.operations.restore"
] |
[((649, 688), 'baq.backends.FileBackend', 'FileBackend', (["(temp_dir / 'backup_target')"], {}), "(temp_dir / 'backup_target')\n", (660, 688), False, 'from baq.backends import FileBackend\n'), ((709, 786), 'baq.operations.backup', 'backup', (["(temp_dir / 'src')"], {'backend': 'backend', 'recipients': '[]', 'recipients_files': '[]'}), "(temp_dir / 'src', backend=backend, recipients=[], recipients_files=[])\n", (715, 786), False, 'from baq.operations import backup, restore\n'), ((867, 921), 'baq.operations.restore', 'restore', (["(temp_dir / 'restored')", 'backend', 'backup_id', '[]'], {}), "(temp_dir / 'restored', backend, backup_id, [])\n", (874, 921), False, 'from baq.operations import backup, restore\n'), ((1577, 1616), 'baq.backends.FileBackend', 'FileBackend', (["(temp_dir / 'backup_target')"], {}), "(temp_dir / 'backup_target')\n", (1588, 1616), False, 'from baq.backends import FileBackend\n'), ((1637, 1732), 'baq.operations.backup', 'backup', (["(temp_dir / 'src')"], {'backend': 'backend', 'recipients': '[sample_age_key]', 'recipients_files': '[]'}), "(temp_dir / 'src', backend=backend, recipients=[sample_age_key],\n recipients_files=[])\n", (1643, 1732), False, 'from baq.operations import backup, restore\n'), ((1809, 1883), 'baq.operations.restore', 'restore', (["(temp_dir / 'restored')", 'backend', 'backup_id', "[temp_dir / 'age_key']"], {}), "(temp_dir / 'restored', backend, backup_id, [temp_dir / 'age_key'])\n", (1816, 1883), False, 'from baq.operations import backup, restore\n'), ((6544, 6583), 'baq.backends.FileBackend', 'FileBackend', (["(temp_dir / 'backup_target')"], {}), "(temp_dir / 'backup_target')\n", (6555, 6583), False, 'from baq.backends import FileBackend\n'), ((6604, 6699), 'baq.operations.backup', 'backup', (["(temp_dir / 'src')"], {'backend': 'backend', 'recipients': '[sample_age_key]', 'recipients_files': '[]'}), "(temp_dir / 'src', backend=backend, recipients=[sample_age_key],\n recipients_files=[])\n", (6610, 6699), False, 'from baq.operations import backup, restore\n'), ((6943, 6982), 'baq.backends.FileBackend', 'FileBackend', (["(temp_dir / 'backup_target')"], {}), "(temp_dir / 'backup_target')\n", (6954, 6982), False, 'from baq.backends import FileBackend\n'), ((7003, 7098), 'baq.operations.backup', 'backup', (["(temp_dir / 'src')"], {'backend': 'backend', 'recipients': '[sample_age_key]', 'recipients_files': '[]'}), "(temp_dir / 'src', backend=backend, recipients=[sample_age_key],\n recipients_files=[])\n", (7009, 7098), False, 'from baq.operations import backup, restore\n'), ((7543, 7619), 'baq.operations.restore', 'restore', (["(temp_dir / 'restored')", 'backend', 'backup_id_2', "[temp_dir / 'age_key']"], {}), "(temp_dir / 'restored', backend, backup_id_2, [temp_dir / 'age_key'])\n", (7550, 7619), False, 'from baq.operations import backup, restore\n'), ((2344, 2360), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2354, 2360), False, 'import json\n'), ((6507, 6530), 'os.urandom', 'os.urandom', (['(3 * 2 ** 20)'], {}), '(3 * 2 ** 20)\n', (6517, 6530), False, 'import os\n'), ((6829, 6840), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (6834, 6840), False, 'from time import sleep\n'), ((6912, 6927), 'os.urandom', 'os.urandom', (['(100)'], {}), '(100)\n', (6922, 6927), False, 'import os\n'), ((6748, 6765), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6763, 6765), False, 'from datetime import datetime\n')]
|
import logging
import random as rnd
import lea
import scapy.layers.inet as inet
import Attack.BaseAttack as BaseAttack
import Lib.Utility as Util
from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# noinspection PyPep8
class PortscanAttack(BaseAttack.BaseAttack):
PORT_SOURCE = 'port.src'
PORT_DESTINATION = 'port.dst'
PORT_OPEN = 'port.open'
PORT_DEST_SHUFFLE = 'port.dst.shuffle'
PORT_DEST_ORDER_DESC = 'port.dst.order-desc'
IP_SOURCE_RANDOMIZE = 'ip.src.shuffle'
PORT_SOURCE_RANDOMIZE = 'port.src.shuffle'
def __init__(self):
"""
Creates a new instance of the PortscanAttack.
This attack injects TCP Syn-requests and respective responses into the output pcap file.
"""
# Initialize attack
super(PortscanAttack, self).__init__("Portscan Attack", "Injects a nmap 'regular scan'",
"Scanning/Probing")
# Define allowed parameters and their type
self.update_params([
Parameter(self.IP_SOURCE, IPAddress()),
Parameter(self.IP_DESTINATION, IPAddress()),
Parameter(self.PORT_SOURCE, Port()),
Parameter(self.PORT_DESTINATION, Port()),
Parameter(self.PORT_OPEN, Port()),
Parameter(self.MAC_SOURCE, MACAddress()),
Parameter(self.MAC_DESTINATION, MACAddress()),
Parameter(self.PORT_DEST_SHUFFLE, Boolean()),
Parameter(self.PORT_DEST_ORDER_DESC, Boolean()),
Parameter(self.IP_SOURCE_RANDOMIZE, Boolean()),
Parameter(self.PACKETS_PER_SECOND, Float()),
Parameter(self.PORT_SOURCE_RANDOMIZE, Boolean())
])
def init_param(self, param: str) -> bool:
"""
Initialize a parameter with a default value specified in the specific attack.
:param param: parameter, which should be initialized
:return: True if initialization was successful, False if not
"""
value = None
if param == self.IP_SOURCE:
value = self.statistics.get_most_used_ip_address()
elif param == self.IP_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.MAC_SOURCE:
ip_src = self.get_param_value(self.IP_SOURCE)
if ip_src is None:
return False
value = self.get_mac_address(ip_src)
elif param == self.IP_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.IP_DESTINATION:
ip_src = self.get_param_value(self.IP_SOURCE)
if ip_src is None:
return False
value = self.statistics.get_random_ip_address(ips=[ip_src])
elif param == self.MAC_DESTINATION:
ip_dst = self.get_param_value(self.IP_DESTINATION)
if ip_dst is None:
return False
value = self.get_mac_address(ip_dst)
elif param == self.PORT_DESTINATION:
value = self.get_ports_from_nmap_service_dst(1000)
elif param == self.PORT_OPEN:
value = '1'
elif param == self.PORT_DEST_SHUFFLE:
value = 'False'
elif param == self.PORT_DEST_ORDER_DESC:
value = 'False'
elif param == self.PORT_SOURCE:
value = rnd.randint(1024, 65535)
elif param == self.PORT_SOURCE_RANDOMIZE:
value = 'False'
elif param == self.PACKETS_PER_SECOND:
value = self.statistics.get_most_used_pps()
elif param == self.INJECT_AFTER_PACKET:
value = rnd.randint(0, self.statistics.get_packet_count())
if value is None:
return False
return self.add_param_value(param, value)
def generate_attack_packets(self):
"""
Creates the attack packets.
"""
mac_source = self.get_param_value(self.MAC_SOURCE)
mac_destination = self.get_param_value(self.MAC_DESTINATION)
# Determine ports
dest_ports = self.get_param_value(self.PORT_DESTINATION)
if self.get_param_value(self.PORT_DEST_ORDER_DESC):
dest_ports.reverse()
elif self.get_param_value(self.PORT_DEST_SHUFFLE):
rnd.shuffle(dest_ports)
if self.get_param_value(self.PORT_SOURCE_RANDOMIZE):
# FIXME: why is sport never used?
sport = rnd.randint(1, 65535)
else:
sport = self.get_param_value(self.PORT_SOURCE)
# Timestamp
timestamp_next_pkt = self.get_param_value(self.INJECT_AT_TIMESTAMP)
# store start time of attack
self.attack_start_utime = timestamp_next_pkt
# Initialize parameters
ip_source = self.get_param_value(self.IP_SOURCE)
if isinstance(ip_source, list):
ip_source = ip_source[0]
ip_destination = self.get_param_value(self.IP_DESTINATION)
if not isinstance(ip_destination, list):
ip_destination = [ip_destination]
# Check ip.src == ip.dst
self.ip_src_dst_catch_equal(ip_source, ip_destination)
for ip in ip_destination:
# Select open ports
ports_open = self.get_param_value(self.PORT_OPEN)
if ports_open == 1: # user did not specify open ports
# the ports that were already used by ip.dst (direction in) in the background traffic are open ports
ports_used_by_ip_dst = self.statistics.process_db_query(
"SELECT portNumber FROM ip_ports WHERE portDirection='in' AND ipAddress='" + ip + "'")
if ports_used_by_ip_dst:
ports_open = ports_used_by_ip_dst
else: # if no ports were retrieved from database
# Take open ports from nmap-service file
# ports_temp = self.get_ports_from_nmap_service_dst(100)
# ports_open = ports_temp[0:rnd.randint(1,10)]
# OR take open ports from the most used ports in traffic statistics
ports_open = self.statistics.process_db_query(
"SELECT portNumber FROM ip_ports GROUP BY portNumber ORDER BY SUM(portCount) DESC LIMIT " + str(
rnd.randint(1, 10)))
# in case of one open port, convert ports_open to array
if not isinstance(ports_open, list):
ports_open = [ports_open]
# Set MSS (Maximum Segment Size) based on MSS distribution of IP address
source_mss_dist = self.statistics.get_mss_distribution(ip_source)
if len(source_mss_dist) > 0:
source_mss_prob_dict = lea.Lea.fromValFreqsDict(source_mss_dist)
source_mss_value = source_mss_prob_dict.random()
else:
source_mss_value = Util.handle_most_used_outputs(self.statistics.get_most_used_mss_value())
destination_mss_dist = self.statistics.get_mss_distribution(ip)
if len(destination_mss_dist) > 0:
destination_mss_prob_dict = lea.Lea.fromValFreqsDict(destination_mss_dist)
destination_mss_value = destination_mss_prob_dict.random()
else:
destination_mss_value = Util.handle_most_used_outputs(self.statistics.get_most_used_mss_value())
# Set TTL based on TTL distribution of IP address
source_ttl_dist = self.statistics.get_ttl_distribution(ip_source)
if len(source_ttl_dist) > 0:
source_ttl_prob_dict = lea.Lea.fromValFreqsDict(source_ttl_dist)
source_ttl_value = source_ttl_prob_dict.random()
else:
source_ttl_value = Util.handle_most_used_outputs(self.statistics.get_most_used_ttl_value())
destination_ttl_dist = self.statistics.get_ttl_distribution(ip)
if len(destination_ttl_dist) > 0:
destination_ttl_prob_dict = lea.Lea.fromValFreqsDict(destination_ttl_dist)
destination_ttl_value = destination_ttl_prob_dict.random()
else:
destination_ttl_value = Util.handle_most_used_outputs(self.statistics.get_most_used_ttl_value())
# Set Window Size based on Window Size distribution of IP address
source_win_dist = self.statistics.get_win_distribution(ip_source)
if len(source_win_dist) > 0:
source_win_prob_dict = lea.Lea.fromValFreqsDict(source_win_dist)
source_win_value = source_win_prob_dict.random()
else:
source_win_value = Util.handle_most_used_outputs(self.statistics.get_most_used_win_size())
destination_win_dist = self.statistics.get_win_distribution(ip)
if len(destination_win_dist) > 0:
destination_win_prob_dict = lea.Lea.fromValFreqsDict(destination_win_dist)
destination_win_value = destination_win_prob_dict.random()
else:
destination_win_value = Util.handle_most_used_outputs(self.statistics.get_most_used_win_size())
min_delay, max_delay = self.get_reply_latency(ip_source, ip)
for dport in dest_ports:
# Parameters changing each iteration
if self.get_param_value(self.IP_SOURCE_RANDOMIZE) and isinstance(ip_source, list):
ip_source = rnd.choice(ip_source)
# 1) Build request package
request_ether = inet.Ether(src=mac_source, dst=mac_destination)
request_ip = inet.IP(src=ip_source, dst=ip, ttl=source_ttl_value)
# Random src port for each packet
sport = rnd.randint(1, 65535)
request_tcp = inet.TCP(sport=sport, dport=dport, window=source_win_value, flags='S',
options=[('MSS', source_mss_value)])
request = (request_ether / request_ip / request_tcp)
request.time = timestamp_next_pkt
# Append request
self.add_packet(request, ip_source, ip)
# 2) Build reply (for open ports) package
if dport in ports_open: # destination port is OPEN
reply_ether = inet.Ether(src=mac_destination, dst=mac_source)
reply_ip = inet.IP(src=ip, dst=ip_source, ttl=destination_ttl_value, flags='DF')
reply_tcp = inet.TCP(sport=dport, dport=sport, seq=0, ack=1, flags='SA', window=destination_win_value,
options=[('MSS', destination_mss_value)])
reply = (reply_ether / reply_ip / reply_tcp)
timestamp_reply = self.timestamp_controller.next_timestamp(latency=min_delay)
reply.time = timestamp_reply
self.add_packet(reply, ip_source, ip)
# requester confirms
confirm_ether = request_ether
confirm_ip = request_ip
confirm_tcp = inet.TCP(sport=sport, dport=dport, seq=1, window=0, flags='R')
confirm = (confirm_ether / confirm_ip / confirm_tcp)
self.timestamp_controller.set_timestamp(timestamp_reply)
timestamp_confirm = self.timestamp_controller.next_timestamp(latency=min_delay)
confirm.time = timestamp_confirm
self.add_packet(confirm, ip_source, ip)
# else: destination port is NOT OPEN -> no reply is sent by target
self.timestamp_controller.set_timestamp(timestamp_next_pkt)
timestamp_next_pkt = self.timestamp_controller.next_timestamp()
def generate_attack_pcap(self):
"""
Creates a pcap containing the attack packets.
:return: The location of the generated pcap file.
"""
# store end time of attack
self.attack_end_utime = self.packets[-1].time
# write attack packets to pcap
pcap_path = self.write_attack_pcap(sorted(self.packets, key=lambda pkt: pkt.time))
# return packets sorted by packet time_sec_start
return len(self.packets), pcap_path
|
[
"Attack.Parameter.IPAddress",
"random.randint",
"random.shuffle",
"lea.Lea.fromValFreqsDict",
"scapy.layers.inet.TCP",
"Attack.Parameter.Boolean",
"random.choice",
"scapy.layers.inet.Ether",
"scapy.layers.inet.IP",
"Attack.Parameter.Float",
"Attack.Parameter.Port",
"Attack.Parameter.MACAddress",
"logging.getLogger"
] |
[((234, 268), 'logging.getLogger', 'logging.getLogger', (['"""scapy.runtime"""'], {}), "('scapy.runtime')\n", (251, 268), False, 'import logging\n'), ((4444, 4465), 'random.randint', 'rnd.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (4455, 4465), True, 'import random as rnd\n'), ((4293, 4316), 'random.shuffle', 'rnd.shuffle', (['dest_ports'], {}), '(dest_ports)\n', (4304, 4316), True, 'import random as rnd\n'), ((6739, 6780), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['source_mss_dist'], {}), '(source_mss_dist)\n', (6763, 6780), False, 'import lea\n'), ((7138, 7184), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['destination_mss_dist'], {}), '(destination_mss_dist)\n', (7162, 7184), False, 'import lea\n'), ((7612, 7653), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['source_ttl_dist'], {}), '(source_ttl_dist)\n', (7636, 7653), False, 'import lea\n'), ((8011, 8057), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['destination_ttl_dist'], {}), '(destination_ttl_dist)\n', (8035, 8057), False, 'import lea\n'), ((8501, 8542), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['source_win_dist'], {}), '(source_win_dist)\n', (8525, 8542), False, 'import lea\n'), ((8899, 8945), 'lea.Lea.fromValFreqsDict', 'lea.Lea.fromValFreqsDict', (['destination_win_dist'], {}), '(destination_win_dist)\n', (8923, 8945), False, 'import lea\n'), ((9545, 9592), 'scapy.layers.inet.Ether', 'inet.Ether', ([], {'src': 'mac_source', 'dst': 'mac_destination'}), '(src=mac_source, dst=mac_destination)\n', (9555, 9592), True, 'import scapy.layers.inet as inet\n'), ((9622, 9674), 'scapy.layers.inet.IP', 'inet.IP', ([], {'src': 'ip_source', 'dst': 'ip', 'ttl': 'source_ttl_value'}), '(src=ip_source, dst=ip, ttl=source_ttl_value)\n', (9629, 9674), True, 'import scapy.layers.inet as inet\n'), ((9750, 9771), 'random.randint', 'rnd.randint', (['(1)', '(65535)'], {}), '(1, 65535)\n', (9761, 9771), True, 'import random as rnd\n'), ((9803, 9914), 'scapy.layers.inet.TCP', 'inet.TCP', ([], {'sport': 'sport', 'dport': 'dport', 'window': 'source_win_value', 'flags': '"""S"""', 'options': "[('MSS', source_mss_value)]"}), "(sport=sport, dport=dport, window=source_win_value, flags='S',\n options=[('MSS', source_mss_value)])\n", (9811, 9914), True, 'import scapy.layers.inet as inet\n'), ((1145, 1156), 'Attack.Parameter.IPAddress', 'IPAddress', ([], {}), '()\n', (1154, 1156), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1202, 1213), 'Attack.Parameter.IPAddress', 'IPAddress', ([], {}), '()\n', (1211, 1213), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1256, 1262), 'Attack.Parameter.Port', 'Port', ([], {}), '()\n', (1260, 1262), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1310, 1316), 'Attack.Parameter.Port', 'Port', ([], {}), '()\n', (1314, 1316), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1357, 1363), 'Attack.Parameter.Port', 'Port', ([], {}), '()\n', (1361, 1363), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1405, 1417), 'Attack.Parameter.MACAddress', 'MACAddress', ([], {}), '()\n', (1415, 1417), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1464, 1476), 'Attack.Parameter.MACAddress', 'MACAddress', ([], {}), '()\n', (1474, 1476), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1525, 1534), 'Attack.Parameter.Boolean', 'Boolean', ([], {}), '()\n', (1532, 1534), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1586, 1595), 'Attack.Parameter.Boolean', 'Boolean', ([], {}), '()\n', (1593, 1595), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1646, 1655), 'Attack.Parameter.Boolean', 'Boolean', ([], {}), '()\n', (1653, 1655), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1705, 1712), 'Attack.Parameter.Float', 'Float', ([], {}), '()\n', (1710, 1712), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((1765, 1774), 'Attack.Parameter.Boolean', 'Boolean', ([], {}), '()\n', (1772, 1774), False, 'from Attack.Parameter import Parameter, Boolean, Float, IPAddress, MACAddress, Port\n'), ((9447, 9468), 'random.choice', 'rnd.choice', (['ip_source'], {}), '(ip_source)\n', (9457, 9468), True, 'import random as rnd\n'), ((10321, 10368), 'scapy.layers.inet.Ether', 'inet.Ether', ([], {'src': 'mac_destination', 'dst': 'mac_source'}), '(src=mac_destination, dst=mac_source)\n', (10331, 10368), True, 'import scapy.layers.inet as inet\n'), ((10400, 10469), 'scapy.layers.inet.IP', 'inet.IP', ([], {'src': 'ip', 'dst': 'ip_source', 'ttl': 'destination_ttl_value', 'flags': '"""DF"""'}), "(src=ip, dst=ip_source, ttl=destination_ttl_value, flags='DF')\n", (10407, 10469), True, 'import scapy.layers.inet as inet\n'), ((10502, 10639), 'scapy.layers.inet.TCP', 'inet.TCP', ([], {'sport': 'dport', 'dport': 'sport', 'seq': '(0)', 'ack': '(1)', 'flags': '"""SA"""', 'window': 'destination_win_value', 'options': "[('MSS', destination_mss_value)]"}), "(sport=dport, dport=sport, seq=0, ack=1, flags='SA', window=\n destination_win_value, options=[('MSS', destination_mss_value)])\n", (10510, 10639), True, 'import scapy.layers.inet as inet\n'), ((11118, 11180), 'scapy.layers.inet.TCP', 'inet.TCP', ([], {'sport': 'sport', 'dport': 'dport', 'seq': '(1)', 'window': '(0)', 'flags': '"""R"""'}), "(sport=sport, dport=dport, seq=1, window=0, flags='R')\n", (11126, 11180), True, 'import scapy.layers.inet as inet\n'), ((6315, 6333), 'random.randint', 'rnd.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (6326, 6333), True, 'import random as rnd\n'), ((3383, 3407), 'random.randint', 'rnd.randint', (['(1024)', '(65535)'], {}), '(1024, 65535)\n', (3394, 3407), True, 'import random as rnd\n')]
|
#! python3
"""Walks through a folder tree and copies every file with a given extensison
to a new folder."""
import os, sys, shutil
from pathlib import Path
def main(args):
"""Walks through the current working directory and copies files with a given extension."""
for folder, subfolders, files in os.walk(Path.cwd()):
for file in files:
if file[(-1) * len(args[2]):] == args[1]:
if not Path.exists(Path(Path.cwd(), args[2])):
os.mkdir(Path(Path.cwd(), args[2]))
shutil.copy(Path(Path.cwd(), file), Path(Path.cwd(), args[2]))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python selective_copy.py <file extension> <new folder>")
print("Example python selective_copy.py .txt text_files")
sys.exit()
else:
main(sys.argv)
|
[
"pathlib.Path.cwd",
"sys.exit"
] |
[((321, 331), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (329, 331), False, 'from pathlib import Path\n'), ((822, 832), 'sys.exit', 'sys.exit', ([], {}), '()\n', (830, 832), False, 'import os, sys, shutil\n'), ((567, 577), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (575, 577), False, 'from pathlib import Path\n'), ((591, 601), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (599, 601), False, 'from pathlib import Path\n'), ((455, 465), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (463, 465), False, 'from pathlib import Path\n'), ((512, 522), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (520, 522), False, 'from pathlib import Path\n')]
|
import pickle
import numpy as np
import pandas as pd
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM, Dense, Embedding, Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tensorflow.keras.callbacks import TensorBoard
import time
NAME = 'lstm-{}'.format(int(time.time()))
tensorboard = TensorBoard(log_dir='./logs/{}'.format(NAME))
# 导入数据
# 文件的数据中,特征为evaluation, 类别为label.
def load_data(filepath, input_shape=20):
df = pd.read_csv(filepath)
# 标签及词汇表
labels, vocabulary = list(df['label'].unique()), list(df['CONTENT'].unique())
# 构造字符级别的特征
string = ''
for word in vocabulary:
string += word
vocabulary = set(string)
# 字典列表
word_dictionary = {word: i+1 for i, word in enumerate(vocabulary)}
with open('./data/lstm/word_dict.pk', 'wb') as f:
pickle.dump(word_dictionary, f)
inverse_word_dictionary = {i+1: word for i, word in enumerate(vocabulary)}
label_dictionary = {label: i for i, label in enumerate(labels)}
with open('./data/lstm/label_dict.pk', 'wb') as f:
pickle.dump(label_dictionary, f)
output_dictionary = {i: labels for i, labels in enumerate(labels)}
vocab_size = len(word_dictionary.keys()) # 词汇表大小
label_size = len(label_dictionary.keys()) # 标签类别数量
# 序列填充,按input_shape填充,长度不足的按0补充
x = [[word_dictionary[word] for word in sent] for sent in df['CONTENT']]
x = pad_sequences(maxlen=input_shape, sequences=x, padding='post', value=0)
y = [[label_dictionary[sent]] for sent in df['label']]
y = [np_utils.to_categorical(label, num_classes=label_size) for label in y]
y = np.array([list(_[0]) for _ in y])
return x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary
# 创建深度学习模型, Embedding + LSTM + Softmax.
def create_LSTM(n_units, input_shape, output_dim, filepath):
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath)
model = Sequential()
model.add(Embedding(input_dim=vocab_size + 1, output_dim=output_dim,
input_length=input_shape, mask_zero=True))
model.add(LSTM(n_units, input_shape=(x.shape[0], x.shape[1])))
model.add(Dropout(0.2))
model.add(Dense(label_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
plot_model(model, to_file='./data/img/model_lstm.png', show_shapes=True)
model.summary()
return model
# 模型训练
def model_train(input_shape, filepath, model_save_path):
# 将数据集分为训练集和测试集,占比为9:1
# input_shape = 100
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath, input_shape)
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.1, random_state = 42)
# 模型输入参数,需要自己根据需要调整
n_units = 100
batch_size = 32
epochs = 5
output_dim = 20
# 模型训练
lstm_model = create_LSTM(n_units, input_shape, output_dim, filepath)
lstm_model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1,callbacks=[tensorboard])
# 模型保存
lstm_model.save(model_save_path)
N = test_x.shape[0] # 测试的条数
predict = []
label = []
for start, end in zip(range(0, N, 1), range(1, N+1, 1)):
sentence = [inverse_word_dictionary[i] for i in test_x[start] if i != 0]
y_predict = lstm_model.predict(test_x[start:end])
label_predict = output_dictionary[np.argmax(y_predict[0])]
label_true = output_dictionary[np.argmax(test_y[start:end])]
print(''.join(sentence), label_true, label_predict) # 输出预测结果
predict.append(label_predict)
label.append(label_true)
acc = accuracy_score(predict, label) # 预测准确率
print('模型在测试集上的准确率为: %s.' % acc)
if __name__ == '__main__':
filepath = './data/comment_trainset_2class.csv'
input_shape = 140
model_save_path = './data/lstm/douban_lstm.model'
model_train(input_shape, filepath, model_save_path)
|
[
"pickle.dump",
"numpy.argmax",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_selection.train_test_split",
"keras.utils.vis_utils.plot_model",
"sklearn.metrics.accuracy_score",
"keras.layers.LSTM",
"keras.layers.Dropout",
"time.time",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((632, 653), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (643, 653), True, 'import pandas as pd\n'), ((1586, 1657), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', ([], {'maxlen': 'input_shape', 'sequences': 'x', 'padding': '"""post"""', 'value': '(0)'}), "(maxlen=input_shape, sequences=x, padding='post', value=0)\n", (1599, 1657), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2137, 2149), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2147, 2149), False, 'from keras.models import Sequential\n'), ((2536, 2608), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': '"""./data/img/model_lstm.png"""', 'show_shapes': '(True)'}), "(model, to_file='./data/img/model_lstm.png', show_shapes=True)\n", (2546, 2608), False, 'from keras.utils.vis_utils import plot_model\n'), ((2915, 2969), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(x, y, test_size=0.1, random_state=42)\n', (2931, 2969), False, 'from sklearn.model_selection import train_test_split\n'), ((3869, 3899), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict', 'label'], {}), '(predict, label)\n', (3883, 3899), False, 'from sklearn.metrics import accuracy_score\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((1009, 1040), 'pickle.dump', 'pickle.dump', (['word_dictionary', 'f'], {}), '(word_dictionary, f)\n', (1020, 1040), False, 'import pickle\n'), ((1251, 1283), 'pickle.dump', 'pickle.dump', (['label_dictionary', 'f'], {}), '(label_dictionary, f)\n', (1262, 1283), False, 'import pickle\n'), ((1726, 1780), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['label'], {'num_classes': 'label_size'}), '(label, num_classes=label_size)\n', (1749, 1780), False, 'from keras.utils import np_utils\n'), ((2164, 2269), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(vocab_size + 1)', 'output_dim': 'output_dim', 'input_length': 'input_shape', 'mask_zero': '(True)'}), '(input_dim=vocab_size + 1, output_dim=output_dim, input_length=\n input_shape, mask_zero=True)\n', (2173, 2269), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2304, 2355), 'keras.layers.LSTM', 'LSTM', (['n_units'], {'input_shape': '(x.shape[0], x.shape[1])'}), '(n_units, input_shape=(x.shape[0], x.shape[1]))\n', (2308, 2355), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2371, 2383), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2378, 2383), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2399, 2438), 'keras.layers.Dense', 'Dense', (['label_size'], {'activation': '"""softmax"""'}), "(label_size, activation='softmax')\n", (2404, 2438), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((3624, 3647), 'numpy.argmax', 'np.argmax', (['y_predict[0]'], {}), '(y_predict[0])\n', (3633, 3647), True, 'import numpy as np\n'), ((3688, 3716), 'numpy.argmax', 'np.argmax', (['test_y[start:end]'], {}), '(test_y[start:end])\n', (3697, 3716), True, 'import numpy as np\n')]
|
import os
from src.Background import Background
from PIL import Image
# Class that represents the whole set of background images in the application
class BackgroundSet:
def __init__(self, directory):
self.directory = directory
self.number_of_backgrounds = 0
self.background_images = []
def make_background_set(self):
for filename in os.listdir(self.directory):
image_path = self.directory + filename
open_image = Background(filename, Image.open(image_path, 'r'))
self.background_images.append(open_image)
self.number_of_backgrounds += 1
|
[
"os.listdir",
"PIL.Image.open"
] |
[((377, 403), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (387, 403), False, 'import os\n'), ((502, 529), 'PIL.Image.open', 'Image.open', (['image_path', '"""r"""'], {}), "(image_path, 'r')\n", (512, 529), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'Commit-Man'
DESCRIPTION = 'Official Commit man python package'
URL = 'https://github.com/atharva-Gundawar/commit-man'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.0.3'
REQUIRED = [
"datetime", "gitignore_parser", "docopt"
]
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
# package_dir={'simplepipreqs':
# 'simplepipreqs'},
entry_points ={
'console_scripts': [
'cm = src.main:main'
]
},
include_package_data=True,
install_requires=REQUIRED,
keywords = 'git commit version-control-system vcs',
zip_safe = False,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
"Operating System :: OS Independent",
]
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((448, 473), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (463, 473), False, 'import os\n'), ((989, 1004), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1002, 1004), False, 'from setuptools import find_packages, setup, Command\n'), ((498, 529), 'os.path.join', 'os.path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (510, 529), False, 'import os\n')]
|
import re
import os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
master_doc = 'index'
project = 'cairocffi'
copyright = '2013, <NAME>'
release = re.search(
"VERSION = '([^']+)'",
open(os.path.join(os.path.dirname(__file__), os.pardir,
'cairocffi', '__init__.py')).read().strip()).group(1)
version = '.'.join(release.split('.')[:2])
exclude_patterns = ['_build']
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members']
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://cairographics.org/documentation/pycairo/2/': None}
|
[
"os.path.dirname"
] |
[((260, 285), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (275, 285), False, 'import os\n')]
|
from Components.Sources.Source import Source
class CurrentService(Source):
def __init__(self, session):
Source.__init__(self)
self.session = session
def command(self):
currentServiceRef = self.session.nav.getCurrentlyPlayingServiceReference()
if currentServiceRef is not None:
text = currentServiceRef.toString()
else:
text = "N/A"
return text
text = property(command)
|
[
"Components.Sources.Source.Source.__init__"
] |
[((108, 129), 'Components.Sources.Source.Source.__init__', 'Source.__init__', (['self'], {}), '(self)\n', (123, 129), False, 'from Components.Sources.Source import Source\n')]
|
from pyregions.utilities import table_utilities
import pytest
@pytest.mark.parametrize(
"columns,expected",
[
(['abc', '123', 456], ['123',456]),
(["13.4", 'aslkjnsf12312ll'], ['13.4'])
]
)
def test_get_numeric_columns(columns, expected):
result = table_utilities.get_numeric_columns(columns)
assert result == expected
@pytest.mark.parametrize(
"value,expected",
[
("abc.tsv", '\t'),
("assssdawrfa.csv", ','),
("a.tab", "\t")
]
)
def test_get_delimiter(value, expected):
assert table_utilities._get_delimiter(value) == expected
|
[
"pytest.mark.parametrize",
"pyregions.utilities.table_utilities._get_delimiter",
"pyregions.utilities.table_utilities.get_numeric_columns"
] |
[((64, 192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""columns,expected"""', "[(['abc', '123', 456], ['123', 456]), (['13.4', 'aslkjnsf12312ll'], ['13.4'])]"], {}), "('columns,expected', [(['abc', '123', 456], ['123', \n 456]), (['13.4', 'aslkjnsf12312ll'], ['13.4'])])\n", (87, 192), False, 'import pytest\n'), ((331, 441), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value,expected"""', "[('abc.tsv', '\\t'), ('assssdawrfa.csv', ','), ('a.tab', '\\t')]"], {}), "('value,expected', [('abc.tsv', '\\t'), (\n 'assssdawrfa.csv', ','), ('a.tab', '\\t')])\n", (354, 441), False, 'import pytest\n'), ((257, 301), 'pyregions.utilities.table_utilities.get_numeric_columns', 'table_utilities.get_numeric_columns', (['columns'], {}), '(columns)\n', (292, 301), False, 'from pyregions.utilities import table_utilities\n'), ((499, 536), 'pyregions.utilities.table_utilities._get_delimiter', 'table_utilities._get_delimiter', (['value'], {}), '(value)\n', (529, 536), False, 'from pyregions.utilities import table_utilities\n')]
|
# coding: utf-8
import base64
from decimal import *
from .asset import Asset
from .stellarxdr import Xdr
from .utils import account_xdr_object, signer_key_xdr_object, encode_check, best_rational_approximation as best_r, division, decode_check
from .utils import XdrLengthError, DecodeError
ONE = Decimal(10 ** 7)
class Operation(object):
"""what we can do in stellar network.
follow the specific . the source can be none.
"""
def __init__(self, opts):
assert type(opts) is dict
self.source = opts.get('source')
self.body = Xdr.nullclass()
def __eq__(self, other):
return self.xdr() == other.xdr()
def to_xdr_object(self):
try:
source_account = [account_xdr_object(self.source)]
except TypeError:
source_account = []
return Xdr.types.Operation(source_account, self.body)
def xdr(self):
op = Xdr.StellarXDRPacker()
op.pack_Operation(self.to_xdr_object())
return base64.b64encode(op.get_buffer())
@staticmethod
def to_xdr_amount(value):
if not isinstance(value, str):
raise Exception("value must be a string")
# throw exception if value * ONE has decimal places (it can't be represented as int64)
return int((Decimal(value) * ONE).to_integral_exact(context=Context(traps=[Inexact])))
@staticmethod
def from_xdr_amount(value):
return str(Decimal(value) / ONE)
@classmethod
def from_xdr(cls, xdr):
xdr_decode = base64.b64decode(xdr)
op = Xdr.StellarXDRUnpacker(xdr_decode)
op = op.unpack_Operation()
if op.type == Xdr.const.CREATE_ACCOUNT:
return CreateAccount.from_xdr_object(op)
elif op.type == Xdr.const.PAYMENT:
return Payment.from_xdr_object(op)
elif op.type == Xdr.const.PATH_PAYMENT:
return PathPayment.from_xdr_object(op)
elif op.type == Xdr.const.CHANGE_TRUST:
return ChangeTrust.from_xdr_object(op)
elif op.type == Xdr.const.ALLOW_TRUST:
return AllowTrust.from_xdr_object(op)
elif op.type == Xdr.const.SET_OPTIONS:
return SetOptions.from_xdr_object(op)
elif op.type == Xdr.const.MANAGE_OFFER:
return ManageOffer.from_xdr_object(op)
elif op.type == Xdr.const.CREATE_PASSIVE_OFFER:
return CreatePassiveOffer.from_xdr_object(op)
elif op.type == Xdr.const.ACCOUNT_MERGE:
return AccountMerge.from_xdr_object(op)
elif op.type == Xdr.const.INFLATION:
return Inflation.from_xdr_object(op)
elif op.type == Xdr.const.MANAGE_DATA:
return ManageData.from_xdr_object(op)
class CreateAccount(Operation):
def __init__(self, opts):
super(CreateAccount, self).__init__(opts)
self.destination = opts.get('destination')
self.starting_balance = opts.get('starting_balance')
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
create_account_op = Xdr.types.CreateAccountOp(destination, Operation.to_xdr_amount(self.starting_balance))
self.body.type = Xdr.const.CREATE_ACCOUNT
self.body.createAccountOp = create_account_op
return super(CreateAccount, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.createAccountOp.destination.ed25519).decode()
starting_balance = Operation.from_xdr_amount(op_xdr_object.body.createAccountOp.startingBalance)
return cls({
'source': source,
'destination': destination,
'starting_balance': starting_balance,
})
class Payment(Operation):
def __init__(self, opts):
super(Payment, self).__init__(opts)
self.destination = opts.get('destination')
self.asset = opts.get('asset')
self.amount = opts.get('amount')
def to_xdr_object(self):
asset = self.asset.to_xdr_object()
destination = account_xdr_object(self.destination)
amount = Operation.to_xdr_amount(self.amount)
payment_op = Xdr.types.PaymentOp(destination, asset, amount)
self.body.type = Xdr.const.PAYMENT
self.body.paymentOp = payment_op
return super(Payment, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.paymentOp.destination.ed25519).decode()
asset = Asset.from_xdr_object(op_xdr_object.body.paymentOp.asset)
amount = Operation.from_xdr_amount(op_xdr_object.body.paymentOp.amount)
return cls({
'source': source,
'destination': destination,
'asset': asset,
'amount': amount,
})
class PathPayment(Operation):
def __init__(self, opts):
super(PathPayment, self).__init__(opts)
self.destination = opts.get('destination')
self.send_asset = opts.get('send_asset')
self.send_max = opts.get('send_max')
self.dest_asset = opts.get('dest_asset')
self.dest_amount = opts.get('dest_amount')
self.path = opts.get('path') # a list of paths/assets
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
send_asset = self.send_asset.to_xdr_object()
dest_asset = self.dest_asset.to_xdr_object()
path_payment = Xdr.types.PathPaymentOp(send_asset, Operation.to_xdr_amount(self.send_max), destination,
dest_asset, Operation.to_xdr_amount(self.dest_amount), self.path)
self.body.type = Xdr.const.PATH_PAYMENT
self.body.pathPaymentOp = path_payment
return super(PathPayment, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.pathPaymentOp.destination.ed25519).decode()
send_asset = Asset.from_xdr_object(op_xdr_object.body.pathPaymentOp.sendAsset)
dest_asset = Asset.from_xdr_object(op_xdr_object.body.pathPaymentOp.destAsset)
send_max = Operation.from_xdr_amount(op_xdr_object.body.pathPaymentOp.sendMax)
dest_amount = Operation.from_xdr_amount(op_xdr_object.body.pathPaymentOp.destAmount)
path = []
if op_xdr_object.body.pathPaymentOp.path:
for x in op_xdr_object.body.pathPaymentOp.path:
path.append(Asset.from_xdr_object(x))
return cls({
'source': source,
'destination': destination,
'send_asset': send_asset,
'send_max': send_max,
'dest_asset': dest_asset,
'dest_amount': dest_amount,
'path': path
})
class ChangeTrust(Operation):
def __init__(self, opts):
super(ChangeTrust, self).__init__(opts)
self.line = opts.get('asset')
if opts.get('limit') is not None:
self.limit = opts.get('limit')
else:
self.limit = "922337203685.4775807"
def to_xdr_object(self):
line = self.line.to_xdr_object()
limit = Operation.to_xdr_amount(self.limit)
change_trust_op = Xdr.types.ChangeTrustOp(line, limit)
self.body.type = Xdr.const.CHANGE_TRUST
self.body.changeTrustOp = change_trust_op
return super(ChangeTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
line = Asset.from_xdr_object(op_xdr_object.body.changeTrustOp.line)
print(line)
limit = Operation.from_xdr_amount(op_xdr_object.body.changeTrustOp.limit)
return cls({
'source': source,
'asset': line,
'limit': limit
})
class AllowTrust(Operation):
def __init__(self, opts):
super(AllowTrust, self).__init__(opts)
self.trustor = opts.get('trustor')
self.asset_code = opts.get('asset_code')
self.authorize = opts.get('authorize')
def to_xdr_object(self):
trustor = account_xdr_object(self.trustor)
length = len(self.asset_code)
assert length <= 12
pad_length = 4 - length if length <= 4 else 12 - length
# asset_code = self.asset_code + '\x00' * pad_length
# asset_code = bytearray(asset_code, encoding='utf-8')
asset_code = bytearray(self.asset_code, 'ascii') + b'\x00' * pad_length
asset = Xdr.nullclass()
if len(asset_code) == 4:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4
asset.assetCode4 = asset_code
else:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12
asset.assetCode12 = asset_code
allow_trust_op = Xdr.types.AllowTrustOp(trustor, asset, self.authorize)
self.body.type = Xdr.const.ALLOW_TRUST
self.body.allowTrustOp = allow_trust_op
return super(AllowTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
trustor = encode_check('account', op_xdr_object.body.allowTrustOp.trustor.ed25519).decode()
authorize = op_xdr_object.body.allowTrustOp.authorize
asset_type = op_xdr_object.body.allowTrustOp.asset.type
if asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4:
asset_code = op_xdr_object.body.allowTrustOp.asset.assetCode4.decode()
elif asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12:
asset_code = op_xdr_object.body.allowTrustOp.asset.assetCode12.decode()
else:
raise Exception
return cls({
'source': source,
'trustor': trustor,
'authorize': authorize,
'asset_code': asset_code
})
class SetOptions(Operation):
def __init__(self, opts):
super(SetOptions, self).__init__(opts)
self.inflation_dest = opts.get('inflation_dest')
self.clear_flags = opts.get('clear_flags')
self.set_flags = opts.get('set_flags')
self.master_weight = opts.get('master_weight')
self.low_threshold = opts.get('low_threshold')
self.med_threshold = opts.get('med_threshold')
self.high_threshold = opts.get('high_threshold')
self.home_domain = opts.get('home_domain')
self.signer_address = opts.get('signer_address')
self.signer_type = opts.get('signer_type')
self.signer_weight = opts.get('signer_weight')
if self.signer_address is not None and self.signer_type is None:
try:
decode_check('account', self.signer_address)
except DecodeError:
raise Exception('must be a valid strkey if not give signer_type')
self.signer_type = 'ed25519PublicKey'
if self.signer_type in ('hashX', 'preAuthTx') and \
(self.signer_address is None or len(self.signer_address) != 32):
raise Exception('hashX or preAuthTx Signer must be 32 bytes')
if self.signer_type is not None and self.signer_type not in ('ed25519PublicKey', 'hashX', 'preAuthTx'):
raise Exception('invalid signer type.')
def to_xdr_object(self):
def assert_option_array(x):
if x is None:
return []
if not isinstance(x, list):
return [x]
return x
if self.inflation_dest is not None:
inflation_dest = [account_xdr_object(self.inflation_dest)]
else:
inflation_dest = []
self.clear_flags = assert_option_array(self.clear_flags)
self.set_flags = assert_option_array(self.set_flags)
self.master_weight = assert_option_array(self.master_weight)
self.low_threshold = assert_option_array(self.low_threshold)
self.med_threshold = assert_option_array(self.med_threshold)
self.high_threshold = assert_option_array(self.high_threshold)
self.home_domain = assert_option_array(self.home_domain)
if self.signer_address is not None and \
self.signer_type is not None and \
self.signer_weight is not None:
signer = [
Xdr.types.Signer(signer_key_xdr_object(self.signer_type, self.signer_address), self.signer_weight)]
else:
signer = []
set_options_op = Xdr.types.SetOptionsOp(inflation_dest, self.clear_flags, self.set_flags,
self.master_weight, self.low_threshold, self.med_threshold,
self.high_threshold, self.home_domain, signer)
self.body.type = Xdr.const.SET_OPTIONS
self.body.setOptionsOp = set_options_op
return super(SetOptions, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
if not op_xdr_object.body.setOptionsOp.inflationDest:
inflation_dest = None
else:
inflation_dest = encode_check('account', op_xdr_object.body.setOptionsOp.inflationDest[0].ed25519).decode()
clear_flags = op_xdr_object.body.setOptionsOp.clearFlags # list
set_flags = op_xdr_object.body.setOptionsOp.setFlags
master_weight = op_xdr_object.body.setOptionsOp.masterWeight
low_threshold = op_xdr_object.body.setOptionsOp.lowThreshold
med_threshold = op_xdr_object.body.setOptionsOp.medThreshold
high_threshold = op_xdr_object.body.setOptionsOp.highThreshold
home_domain = op_xdr_object.body.setOptionsOp.homeDomain
if op_xdr_object.body.setOptionsOp.signer:
key = op_xdr_object.body.setOptionsOp.signer[0].key
if key.type == Xdr.const.SIGNER_KEY_TYPE_ED25519:
signer_address = encode_check('account', key.ed25519).decode()
signer_type = 'ed25519PublicKey'
if key.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX:
signer_address = key.preAuthTx
signer_type = 'preAuthTx'
if key.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X:
signer_address = key.hashX
signer_type = 'hashX'
signer_weight = op_xdr_object.body.setOptionsOp.signer[0].weight
else:
signer_address = None
signer_type = None
signer_weight = None
return cls({
'source': source,
'inflation_dest': inflation_dest,
'clear_flags': clear_flags,
'set_flags': set_flags,
'master_weight': master_weight,
'low_threshold': low_threshold,
'med_threshold': med_threshold,
'high_threshold': high_threshold,
'home_domain': home_domain,
'signer_address': signer_address,
'Signer_type': signer_type,
'signer_weight': signer_weight
})
class ManageOffer(Operation):
def __init__(self, opts):
super(ManageOffer, self).__init__(opts)
self.selling = opts.get('selling') # Asset
self.buying = opts.get('buying') # Asset
self.amount = opts.get('amount')
self.price = opts.get('price')
self.offer_id = opts.get('offer_id', 0)
def to_xdr_object(self):
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = best_r(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
manage_offer_op = Xdr.types.ManageOfferOp(selling, buying, amount, price, self.offer_id)
self.body.type = Xdr.const.MANAGE_OFFER
self.body.manageOfferOp = manage_offer_op
return super(ManageOffer, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
selling = Asset.from_xdr_object(op_xdr_object.body.manageOfferOp.selling)
buying = Asset.from_xdr_object(op_xdr_object.body.manageOfferOp.buying)
amount = Operation.from_xdr_amount(op_xdr_object.body.manageOfferOp.amount)
n = op_xdr_object.body.manageOfferOp.price.n
d = op_xdr_object.body.manageOfferOp.price.d
price = division(n, d)
offer_id = op_xdr_object.body.manageOfferOp.offerID
return cls({
'source': source,
'selling': selling,
'buying': buying,
'amount': amount,
'price': price,
'offer_id': offer_id
})
class CreatePassiveOffer(Operation):
def __init__(self, opts):
super(CreatePassiveOffer, self).__init__(opts)
self.selling = opts.get('selling')
self.buying = opts.get('buying')
self.amount = opts.get('amount')
self.price = opts.get('price')
def to_xdr_object(self):
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = best_r(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
create_passive_offer_op = Xdr.types.CreatePassiveOfferOp(selling, buying, amount, price)
self.body.type = Xdr.const.CREATE_PASSIVE_OFFER
self.body.createPassiveOfferOp = create_passive_offer_op
return super(CreatePassiveOffer, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
selling = Asset.from_xdr_object(op_xdr_object.body.createPassiveOfferOp.selling)
buying = Asset.from_xdr_object(op_xdr_object.body.createPassiveOfferOp.buying)
amount = Operation.from_xdr_amount(op_xdr_object.body.createPassiveOfferOp.amount)
n = op_xdr_object.body.createPassiveOfferOp.price.n
d = op_xdr_object.body.createPassiveOfferOp.price.d
price = division(n, d)
return cls({
'source': source,
'selling': selling,
'buying': buying,
'amount': amount,
'price': price
})
class AccountMerge(Operation):
def __init__(self, opts):
super(AccountMerge, self).__init__(opts)
self.destination = opts.get('destination')
def to_xdr_object(self):
destination = account_xdr_object(self.destination)
self.body.type = Xdr.const.ACCOUNT_MERGE
self.body.destination = destination
return super(AccountMerge, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check('account', op_xdr_object.body.destination.ed25519).decode()
return cls({
'source': source,
'destination': destination
})
class Inflation(Operation):
def __init__(self, opts):
super(Inflation, self).__init__(opts)
def to_xdr_object(self):
self.body.type = Xdr.const.INFLATION
return super(Inflation, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
return cls({'source': source})
class ManageData(Operation):
def __init__(self, opts):
super(ManageData, self).__init__(opts)
self.data_name = opts.get('data_name')
self.data_value = opts.get('data_value')
if len(self.data_name) > 64 or (self.data_value is not None and len(self.data_value) > 64):
raise XdrLengthError("Data or value should be <= 64 bytes (ascii encoded). ")
def to_xdr_object(self):
data_name = bytearray(self.data_name, encoding='utf-8')
if self.data_value is not None:
data_value = [bytearray(self.data_value, 'utf-8')]
else:
data_value = []
manage_data_op = Xdr.types.ManageDataOp(data_name, data_value)
self.body.type = Xdr.const.MANAGE_DATA
self.body.manageDataOp = manage_data_op
return super(ManageData, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check('account', op_xdr_object.sourceAccount[0].ed25519).decode()
data_name = op_xdr_object.body.manageDataOp.dataName.decode()
if op_xdr_object.body.manageDataOp.dataValue:
data_value = op_xdr_object.body.manageDataOp.dataValue[0].decode()
else:
data_value = None
return cls({
'source': source,
'data_name': data_name,
'data_value': data_value
})
|
[
"base64.b64decode"
] |
[((1531, 1552), 'base64.b64decode', 'base64.b64decode', (['xdr'], {}), '(xdr)\n', (1547, 1552), False, 'import base64\n')]
|
import sqlite3
class DatabaseError(Exception):
pass
class Database:
def __init__(self, db_path):
self.db_path = db_path
# connect
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
# check if table exists, or create one
tables = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='botamusique';").fetchall()
if len(tables) == 0:
cursor.execute("CREATE TABLE botamusique (section text, option text, value text, UNIQUE(section, option))")
conn.commit()
conn.close()
def get(self, section, option, **kwargs):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
result = cursor.execute("SELECT value FROM botamusique WHERE section=? AND option=?", (section, option)).fetchall()
conn.close()
if len(result) > 0:
return result[0][0]
else:
if 'fallback' in kwargs:
return kwargs['fallback']
else:
raise DatabaseError("Item not found")
def getboolean(self, section, option, **kwargs):
return bool(int(self.get(section, option, **kwargs)))
def getfloat(self, section, option, **kwargs):
return float(self.get(section, option, **kwargs))
def getint(self, section, option, **kwargs):
return int(self.get(section, option, **kwargs))
def set(self, section, option, value):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute('''
INSERT OR REPLACE INTO botamusique (section, option, value)
VALUES (?, ?, ?)
''', (section, option, value))
conn.commit()
conn.close()
def has_option(self, section, option):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
result = cursor.execute("SELECT value FROM botamusique WHERE section=? AND option=?", (section, option)).fetchall()
conn.close()
if len(result) > 0:
return True
else:
return False
def remove_option(self, section, option):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM botamusique WHERE section=? AND option=?", (section, option))
conn.commit()
conn.close()
def remove_section(self, section):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM botamusique WHERE section=?", (section, ))
conn.commit()
conn.close()
def items(self, section):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
results = cursor.execute("SELECT option, value FROM botamusique WHERE section=?", (section, )).fetchall()
conn.close()
return map(lambda v: (v[0], v[1]), results)
def drop_table(self):
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("DROP TABLE botamusique")
conn.close()
|
[
"sqlite3.connect"
] |
[((172, 201), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (187, 201), False, 'import sqlite3\n'), ((660, 689), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (675, 689), False, 'import sqlite3\n'), ((1483, 1512), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (1498, 1512), False, 'import sqlite3\n'), ((1813, 1842), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (1828, 1842), False, 'import sqlite3\n'), ((2172, 2201), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2187, 2201), False, 'import sqlite3\n'), ((2429, 2458), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2444, 2458), False, 'import sqlite3\n'), ((2658, 2687), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2673, 2687), False, 'import sqlite3\n'), ((2949, 2978), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2964, 2978), False, 'import sqlite3\n')]
|
from flask_assets import Bundle, Environment
scss_styles = Bundle( # pylint: disable=invalid-name
'src/app/scss/styles.scss',
filters='libsass',
depends='**/*.scss',
output='build/css/styles.css',
)
css_styles = Bundle( # pylint: disable=invalid-name
scss_styles,
filters='autoprefixer6',
output='dist/css/styles.css',
)
css_min_styles = Bundle( # pylint: disable=invalid-name
scss_styles,
filters='autoprefixer6,cleancss',
output='dist/css/styles.min.css',
)
common_js = Bundle( # pylint: disable=invalid-name
'src/vendor/jquery/jquery.min.js',
'src/vendor/popper.js/popper.min.js',
'src/vendor/bootstrap/index.js',
'src/vendor/bootstrap/util.js',
'src/vendor/bootstrap/collapse.js', # For navbar.
'src/vendor/bootstrap/alert.js',
'src/vendor/bootstrap/button.js',
'src/vendor/bootstrap/dropdown.js',
'src/vendor/bootstrap/modal.js',
'src/vendor/bootstrap/tab.js',
filters='jsmin',
output='dist/js/common.min.js',
)
search_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/search.js',
filters='jsmin',
output='dist/js/search.min.js',
)
item_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/item.js',
filters='jsmin',
output='dist/js/item.min.js',
)
print_js = Bundle( # pylint: disable=invalid-name
'kerko/kerko/js/print.js',
filters='jsmin',
output='dist/js/print.min.js',
)
class EnvironmentWithBundles(Environment):
"""
An assets environment that registers its own bundles.
Registering the bundles at `init_app` time lets it refer to the app config.
"""
def init_app(self, app):
super().init_app(app)
if app.config['ASSETS_DEBUG']:
assets.register('css_styles', css_styles)
else:
assets.register('css_styles', css_min_styles)
assets.register('common_js', common_js)
assets.register('search_js', search_js)
assets.register('item_js', item_js)
assets.register('print_js', print_js)
assets = EnvironmentWithBundles() # pylint: disable=invalid-name
|
[
"flask_assets.Bundle"
] |
[((61, 170), 'flask_assets.Bundle', 'Bundle', (['"""src/app/scss/styles.scss"""'], {'filters': '"""libsass"""', 'depends': '"""**/*.scss"""', 'output': '"""build/css/styles.css"""'}), "('src/app/scss/styles.scss', filters='libsass', depends='**/*.scss',\n output='build/css/styles.css')\n", (67, 170), False, 'from flask_assets import Bundle, Environment\n'), ((231, 305), 'flask_assets.Bundle', 'Bundle', (['scss_styles'], {'filters': '"""autoprefixer6"""', 'output': '"""dist/css/styles.css"""'}), "(scss_styles, filters='autoprefixer6', output='dist/css/styles.css')\n", (237, 305), False, 'from flask_assets import Bundle, Environment\n'), ((370, 462), 'flask_assets.Bundle', 'Bundle', (['scss_styles'], {'filters': '"""autoprefixer6,cleancss"""', 'output': '"""dist/css/styles.min.css"""'}), "(scss_styles, filters='autoprefixer6,cleancss', output=\n 'dist/css/styles.min.css')\n", (376, 462), False, 'from flask_assets import Bundle, Environment\n'), ((517, 938), 'flask_assets.Bundle', 'Bundle', (['"""src/vendor/jquery/jquery.min.js"""', '"""src/vendor/popper.js/popper.min.js"""', '"""src/vendor/bootstrap/index.js"""', '"""src/vendor/bootstrap/util.js"""', '"""src/vendor/bootstrap/collapse.js"""', '"""src/vendor/bootstrap/alert.js"""', '"""src/vendor/bootstrap/button.js"""', '"""src/vendor/bootstrap/dropdown.js"""', '"""src/vendor/bootstrap/modal.js"""', '"""src/vendor/bootstrap/tab.js"""'], {'filters': '"""jsmin"""', 'output': '"""dist/js/common.min.js"""'}), "('src/vendor/jquery/jquery.min.js',\n 'src/vendor/popper.js/popper.min.js', 'src/vendor/bootstrap/index.js',\n 'src/vendor/bootstrap/util.js', 'src/vendor/bootstrap/collapse.js',\n 'src/vendor/bootstrap/alert.js', 'src/vendor/bootstrap/button.js',\n 'src/vendor/bootstrap/dropdown.js', 'src/vendor/bootstrap/modal.js',\n 'src/vendor/bootstrap/tab.js', filters='jsmin', output=\n 'dist/js/common.min.js')\n", (523, 938), False, 'from flask_assets import Bundle, Environment\n'), ((1024, 1112), 'flask_assets.Bundle', 'Bundle', (['"""kerko/kerko/js/search.js"""'], {'filters': '"""jsmin"""', 'output': '"""dist/js/search.min.js"""'}), "('kerko/kerko/js/search.js', filters='jsmin', output=\n 'dist/js/search.min.js')\n", (1030, 1112), False, 'from flask_assets import Bundle, Environment\n'), ((1165, 1244), 'flask_assets.Bundle', 'Bundle', (['"""kerko/kerko/js/item.js"""'], {'filters': '"""jsmin"""', 'output': '"""dist/js/item.min.js"""'}), "('kerko/kerko/js/item.js', filters='jsmin', output='dist/js/item.min.js')\n", (1171, 1244), False, 'from flask_assets import Bundle, Environment\n'), ((1303, 1389), 'flask_assets.Bundle', 'Bundle', (['"""kerko/kerko/js/print.js"""'], {'filters': '"""jsmin"""', 'output': '"""dist/js/print.min.js"""'}), "('kerko/kerko/js/print.js', filters='jsmin', output=\n 'dist/js/print.min.js')\n", (1309, 1389), False, 'from flask_assets import Bundle, Environment\n')]
|
# Python Standard Libraries
import warnings
import time
import os
import sys
from pathlib import Path
# Third party imports
# fancy prints
import numpy as np
from tqdm import tqdm
# grAdapt package
import grAdapt.utils.math
import grAdapt.utils.misc
import grAdapt.utils.sampling
from grAdapt import surrogate as sur, optimizer as opt, escape as esc
from grAdapt.space.transformer import Transformer
from grAdapt.sampling import initializer as init, equidistributed as equi
class Asynchronous:
def __init__(self, bounds, surrogate=None, optimizer=None, sampling_method=None,
escape=None, training=None, random_state=1,
n_evals='auto', eps=1e-3, f_min=-np.inf, f_min_eps=1e-2, n_random_starts='auto',
auto_checkpoint=False, show_progressbar=True, prints=True):
"""
Parameters
----------
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
surrogate : grAdapt Surrogate object
optimizer : grAdapt Optimizer object
sampling_method : Sampling Method to be used. static method from utils
escape : grAdapt Escape object
training : (X, y) with X shape (n, m) and y shape (n,)
random_state : integer
random_state integer sets numpy seed
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
"""
# Stock module settings
self.bounds = bounds
# seed
self.random_state = random_state
np.random.seed(self.random_state)
if surrogate is None:
self.surrogate = sur.GPRSlidingWindow()
else:
self.surrogate = surrogate
if optimizer is None:
self.optimizer = opt.AMSGradBisection(surrogate=self.surrogate)
else:
self.optimizer = optimizer
if surrogate is None:
raise Exception('If optimizer is passed, then surrogate must be passed, too.')
if sampling_method is None:
self.sampling_method = equi.MaximalMinDistance()
else:
self.sampling_method = sampling_method
if escape is None:
self.escape = esc.NormalDistributionDecay(surrogate=self.surrogate, sampling_method=self.sampling_method)
else:
self.escape = escape
if surrogate is None or sampling_method is None:
raise Exception('When passing an escape function, surrogate and sampling_method must be passed, too.')
# other settings
# continue optimizing
self.training = training
if training is not None:
self.X = list(training[0])
self.y = list(training[1])
if len(self.X) != len(self.y):
raise AssertionError('Training data not valid. Length of X and y must be the same.')
# self.fit(self.X, self.y)
else:
self.X = list(grAdapt.utils.sampling.sample_points_bounds(self.bounds, 11))
self.y = []
self.n_evals = n_evals
self.eps = eps
self.f_min = f_min
self.f_min_eps = f_min_eps
self.n_random_starts = n_random_starts
# keep track of checkpoint files
self.checkpoint_file = None
self.auto_checkpoint = auto_checkpoint
# results
self.res = None
self.show_progressbar = show_progressbar
self.prints = prints
# save current iteration
if training is not None:
self.iteration = len(self.X) - 1
else:
self.iteration = 0
def escape_x_criteria(self, x_train, iteration):
"""Checks whether new point is different than the latest point by the euclidean distance
Checks whether new point is inside the defined search space/bounds.
Returns True if one of the conditions above are fulfilled.
Parameters
----------
x_train : ndarray (n, d)
iteration : integer
Returns
-------
boolean
"""
# x convergence
# escape_convergence = (np.linalg.norm(x_train[iteration - 1] - x_train[iteration])) < self.eps
n_hist = 2
escape_convergence_history = any(
(np.linalg.norm(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1], axis=1)) < self.eps)
# check whether point is inside bounds
escape_valid = not (grAdapt.utils.sampling.inside_bounds(self.bounds, x_train[iteration - 1]))
# escape_x = escape_convergence or escape_valid
escape_x = escape_convergence_history or escape_valid
return escape_x
@staticmethod
def escape_y_criteria(y_train, iteration, pct):
"""
Parameters
----------
y_train : array-like (n, d)
iteration : integer
pct : numeric
pct should be less than 1.
Returns
-------
boolean
"""
try:
return grAdapt.utils.misc.is_inside_relative_range(y_train[iteration - 1], y_train[iteration - 2], pct)
except:
return False
def dummy(self):
return 0
def ask(self):
if len(self.X) > len(self.y): # initial points
self.iteration += 1
# if user asks consecutively without telling
if self.iteration == len(self.y) + 2:
self.iteration -= 1
warnings.warn("Tell the optimizer/model after you ask.", RuntimeWarning)
return self.X[self.iteration - 1]
else:
# gradient parameters specific for the surrogate model
surrogate_grad_params = [np.array(self.X[:self.iteration]), np.array(self.y[:self.iteration]),
self.dummy, self.bounds]
# apply optimizer
return_x = self.optimizer.run(self.X[self.iteration - 1],
grAdapt.utils.misc.epochs(self.iteration),
surrogate_grad_params)
# escape indicator variables
escape_x_criteria_boolean = self.escape_x_criteria(np.array(self.X), self.iteration)
escape_y_criteria_boolean = self.escape_y_criteria(self.y, self.iteration, self.f_min_eps)
escape_boolean = escape_x_criteria_boolean or escape_y_criteria_boolean
# sample new point if must escape or bounds not valid
if escape_boolean:
return_x = self.escape.get_point(self.X[:self.iteration], self.y[:self.iteration],
self.iteration, self.bounds)
self.iteration += 1
# save current training data
return return_x
def tell(self, next_x, f_val):
if len(self.X) > len(self.y):
# no need to append x
self.y.append(f_val)
elif len(self.X) == len(self.y):
# append
self.X.append(next_x)
self.y.append(f_val)
else:
raise RuntimeError('More function values available than x values/parameter sets.')
# Fit data on surrogate model
self.surrogate.fit(np.array(self.X[:self.iteration]), np.array(self.X[:self.iteration]))
|
[
"numpy.random.seed",
"grAdapt.sampling.equidistributed.MaximalMinDistance",
"grAdapt.surrogate.GPRSlidingWindow",
"grAdapt.optimizer.AMSGradBisection",
"numpy.array",
"numpy.linalg.norm",
"warnings.warn",
"grAdapt.escape.NormalDistributionDecay"
] |
[((1506, 1539), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (1520, 1539), True, 'import numpy as np\n'), ((1600, 1622), 'grAdapt.surrogate.GPRSlidingWindow', 'sur.GPRSlidingWindow', ([], {}), '()\n', (1620, 1622), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((1736, 1782), 'grAdapt.optimizer.AMSGradBisection', 'opt.AMSGradBisection', ([], {'surrogate': 'self.surrogate'}), '(surrogate=self.surrogate)\n', (1756, 1782), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((2037, 2062), 'grAdapt.sampling.equidistributed.MaximalMinDistance', 'equi.MaximalMinDistance', ([], {}), '()\n', (2060, 2062), True, 'from grAdapt.sampling import initializer as init, equidistributed as equi\n'), ((2182, 2278), 'grAdapt.escape.NormalDistributionDecay', 'esc.NormalDistributionDecay', ([], {'surrogate': 'self.surrogate', 'sampling_method': 'self.sampling_method'}), '(surrogate=self.surrogate, sampling_method=self.\n sampling_method)\n', (2209, 2278), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((7183, 7216), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (7191, 7216), True, 'import numpy as np\n'), ((7218, 7251), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (7226, 7251), True, 'import numpy as np\n'), ((4235, 4322), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1])'], {'axis': '(1)'}), '(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1],\n axis=1)\n', (4249, 4322), True, 'import numpy as np\n'), ((5411, 5483), 'warnings.warn', 'warnings.warn', (['"""Tell the optimizer/model after you ask."""', 'RuntimeWarning'], {}), "('Tell the optimizer/model after you ask.', RuntimeWarning)\n", (5424, 5483), False, 'import warnings\n'), ((5650, 5683), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (5658, 5683), True, 'import numpy as np\n'), ((5685, 5718), 'numpy.array', 'np.array', (['self.y[:self.iteration]'], {}), '(self.y[:self.iteration])\n', (5693, 5718), True, 'import numpy as np\n'), ((6138, 6154), 'numpy.array', 'np.array', (['self.X'], {}), '(self.X)\n', (6146, 6154), True, 'import numpy as np\n')]
|
import unittest
from inan.util.setproctitle import setproctitle
class TestSetProcTitle(unittest.TestCase):
def test_does_not_crash(self):
setproctitle("inan test title")
|
[
"inan.util.setproctitle.setproctitle"
] |
[((153, 184), 'inan.util.setproctitle.setproctitle', 'setproctitle', (['"""inan test title"""'], {}), "('inan test title')\n", (165, 184), False, 'from inan.util.setproctitle import setproctitle\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from ..types import AstronomyDataModelType
from ..obs_context import ObsContext
class ObsContextType(AstronomyDataModelType):
name = 'datamodel/obs_context'
version = '1.0.0'
supported_versions = ['1.0.0']
types = ['astronomy_datamodels.obs_context.ObsContext']
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx): # to ASDF representation
d = {}
if node.telescope is not None:
d['telescope'] = yamlutil.custom_tree_to_tagged_tree(node.telescope, ctx)
if node.instrument is not None:
d['instrument'] = yamlutil.custom_tree_to_tagged_tree(node.instrument, ctx)
if node.proposal is not None:
d['proposal'] = yamlutil.custom_tree_to_tagged_tree(node.proposal, ctx)
if node.observers is not None:
d['observers'] = yamlutil.custom_tree_to_tagged_tree(node.observers, ctx)
if node.target is not None:
d['target'] = yamlutil.custom_tree_to_tagged_tree(node.target, ctx)
if node.associated_data is not None:
d['associated_data'] = yamlutil.custom_tree_to_tagged_tree(node.associated_data, ctx)
if node.meta is not None:
d['meta'] = yamlutil.custom_tree_to_tagged_tree(node.meta, ctx)
return d
@classmethod
def from_tree(cls, node, ctx): # from ASDF to object representation
obscontext = ObsContext()
if 'telescope' in node:
obscontext.telescope = yamlutil.tagged_tree_to_custom_tree(node['telescope'], ctx)
if 'instrument' in node:
obscontext.instrument = yamlutil.tagged_tree_to_custom_tree(node['instrument'], ctx)
if 'proposal' in node:
obscontext.proposal = yamlutil.tagged_tree_to_custom_tree(node['proposal'], ctx)
if 'observers' in node:
obscontext.observers = yamlutil.tagged_tree_to_custom_tree(node['observers'], ctx)
if 'target' in node:
obscontext.target = yamlutil.tagged_tree_to_custom_tree(node['target'], ctx)
if 'associated_data' in node:
obscontext.associated_data = yamlutil.tagged_tree_to_custom_tree(node['associated_data'], ctx)
if 'meta' in node:
obscontext.meta = yamlutil.tagged_tree_to_custom_tree(node['meta'], ctx)
return obscontext
@classmethod
def assert_equal(cls, old, new):
pass
|
[
"asdf.yamlutil.tagged_tree_to_custom_tree",
"asdf.yamlutil.custom_tree_to_tagged_tree"
] |
[((617, 673), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.telescope', 'ctx'], {}), '(node.telescope, ctx)\n', (652, 673), False, 'from asdf import yamlutil\n'), ((744, 801), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.instrument', 'ctx'], {}), '(node.instrument, ctx)\n', (779, 801), False, 'from asdf import yamlutil\n'), ((868, 923), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.proposal', 'ctx'], {}), '(node.proposal, ctx)\n', (903, 923), False, 'from asdf import yamlutil\n'), ((992, 1048), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.observers', 'ctx'], {}), '(node.observers, ctx)\n', (1027, 1048), False, 'from asdf import yamlutil\n'), ((1111, 1164), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.target', 'ctx'], {}), '(node.target, ctx)\n', (1146, 1164), False, 'from asdf import yamlutil\n'), ((1245, 1307), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.associated_data', 'ctx'], {}), '(node.associated_data, ctx)\n', (1280, 1307), False, 'from asdf import yamlutil\n'), ((1366, 1417), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.meta', 'ctx'], {}), '(node.meta, ctx)\n', (1401, 1417), False, 'from asdf import yamlutil\n'), ((1627, 1686), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['telescope']", 'ctx'], {}), "(node['telescope'], ctx)\n", (1662, 1686), False, 'from asdf import yamlutil\n'), ((1756, 1816), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['instrument']", 'ctx'], {}), "(node['instrument'], ctx)\n", (1791, 1816), False, 'from asdf import yamlutil\n'), ((1882, 1940), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['proposal']", 'ctx'], {}), "(node['proposal'], ctx)\n", (1917, 1940), False, 'from asdf import yamlutil\n'), ((2008, 2067), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['observers']", 'ctx'], {}), "(node['observers'], ctx)\n", (2043, 2067), False, 'from asdf import yamlutil\n'), ((2129, 2185), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['target']", 'ctx'], {}), "(node['target'], ctx)\n", (2164, 2185), False, 'from asdf import yamlutil\n'), ((2265, 2330), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['associated_data']", 'ctx'], {}), "(node['associated_data'], ctx)\n", (2300, 2330), False, 'from asdf import yamlutil\n'), ((2388, 2442), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['meta']", 'ctx'], {}), "(node['meta'], ctx)\n", (2423, 2442), False, 'from asdf import yamlutil\n')]
|
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
class nba_request():
def __init__(self):
self.url = 'https://www.basketball-reference.com/leagues/NBA_' # NOT A FULL URL
def totals(self, url, year):
""" This gets the total statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_totals.html', year)
def per_game(self,url, year):
""" This gets the per-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_game.html', year)
def per_36(self, url, year):
""" This gets the per-36-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_minute.html', year)
def per_100(self, url, year):
""" This gets the per-100-game statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_per_poss.html', year)
def advanced(self, url, year):
""" This gets the advanced statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_advanced.html', year)
def play(self, url, year):
""" This gets the play-by-play statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_play-by-play.html', year)
def shooting(self, url, year):
""" This gets the shooting statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_shooting.html', year)
def adjusted_shooting(self, url, year):
""" This gets the adjusted shooting statistics in a season
:param url: an attribute of nba_request object
:param year: an integer as a season year
:return:
df: a data pandas frame
"""
return self.parse_url(url, '_adj_shooting.html', year)
def parse_url(self, url, extension, year):
""" Return a panda dataframe based on the extension and season
:param url: (string) an attribute of nba_request object
:param extension: (string) user specified
:param year: (integer) user specified
:return:
stats: a pandas dataframe
"""
# NBA season we will be analyzing
# URL page we will scraping (see image above)
merger = "{}" + extension
temp = url + merger.format(year)
# this is the HTML from the given URL
html = urlopen(temp)
soup = BeautifulSoup(html, features='lxml')
# use findALL() to get the column headers
soup.findAll('tr', limit=2)
# use getText()to extract the text we need into a list
headers = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
# exclude the first column as we will not need the ranking order from Basketball Reference for the analysis
headers = headers[1:]
headers
# avoid the first header row
rows = soup.findAll('tr')[1:]
player_stats = [[td.getText() for td in rows[i].findAll('td')]
for i in range(len(rows))]
stats = pd.DataFrame(player_stats, columns = headers)
return stats
|
[
"bs4.BeautifulSoup",
"urllib.request.urlopen",
"pandas.DataFrame"
] |
[((3433, 3446), 'urllib.request.urlopen', 'urlopen', (['temp'], {}), '(temp)\n', (3440, 3446), False, 'from urllib.request import urlopen\n'), ((3462, 3498), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {'features': '"""lxml"""'}), "(html, features='lxml')\n", (3475, 3498), False, 'from bs4 import BeautifulSoup\n'), ((4111, 4154), 'pandas.DataFrame', 'pd.DataFrame', (['player_stats'], {'columns': 'headers'}), '(player_stats, columns=headers)\n', (4123, 4154), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""CLI Interface to Walletlib
This is a simple implementation to allow walletlib to be used from the cli.
It will certainly gain more features as they are added. Currently it takes a wallet.dat file and dumps either a full
seet of its contents or just the keys out.
"""
import click
from walletlib import Walletdat, ProtobufWallet
import json
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.option("-p", "--password", help="Password if any", type=click.STRING)
@click.option(
"-o", "--output", help="File to save to. If not set, results only will be displayed"
)
@click.option(
"-v",
"--versionprefix",
type=int,
help="Force output to use this p2pkh version byte",
)
@click.option(
"-s", "--secretprefix", type=int, help="Force output to use this WIF version byte"
)
@click.option("--keys", is_flag=True, help="Only dump keys.")
def main(filename, password, output, versionprefix, secretprefix, keys):
if filename.endswith(".dat"):
w = Walletdat.load(filename)
click.echo("Loaded file")
if password:
w.parse(passphrase=str(password))
else:
w.parse()
click.echo(
"Found {} keypairs and {} transactions".format(len(w.keypairs), len(w.txes))
)
click.echo("Default version byte: {}".format(w.default_wifnetwork))
if keys:
if not output:
d = w.dump_keys(version=versionprefix, privkey_prefix_override=secretprefix)
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_keys(output, version=versionprefix, privkey_prefix_override=secretprefix)
else:
if not output:
d = w.dump_all(version=versionprefix, privkey_prefix_override=secretprefix)
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_all(output, version=versionprefix, privkey_prefix_override=secretprefix)
click.echo("Done")
else:
try:
w = ProtobufWallet.load(filename)
click.echo("Loaded file")
if password:
w.parse(passphrase=str(password))
else:
w.parse()
click.echo("Found {} keypairs and {} transactions".format(len(w.keypairs), len(w.txes)))
click.echo("Default version byte: {}".format(w.default_wifnetwork))
if keys:
if not output:
d = w.dump_keys()
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_keys(output)
else:
if not output:
d = w.dump_all()
click.echo(json.dumps(d, sort_keys=True, indent=4))
else:
w.dump_all(output)
click.echo("Done")
except:
click.echo("Error, cannot read wallet file")
|
[
"walletlib.ProtobufWallet.load",
"click.option",
"click.echo",
"walletlib.Walletdat.load",
"click.command",
"json.dumps",
"click.Path"
] |
[((395, 410), 'click.command', 'click.command', ([], {}), '()\n', (408, 410), False, 'import click\n'), ((470, 545), 'click.option', 'click.option', (['"""-p"""', '"""--password"""'], {'help': '"""Password if any"""', 'type': 'click.STRING'}), "('-p', '--password', help='Password if any', type=click.STRING)\n", (482, 545), False, 'import click\n'), ((547, 650), 'click.option', 'click.option', (['"""-o"""', '"""--output"""'], {'help': '"""File to save to. If not set, results only will be displayed"""'}), "('-o', '--output', help=\n 'File to save to. If not set, results only will be displayed')\n", (559, 650), False, 'import click\n'), ((653, 757), 'click.option', 'click.option', (['"""-v"""', '"""--versionprefix"""'], {'type': 'int', 'help': '"""Force output to use this p2pkh version byte"""'}), "('-v', '--versionprefix', type=int, help=\n 'Force output to use this p2pkh version byte')\n", (665, 757), False, 'import click\n'), ((773, 874), 'click.option', 'click.option', (['"""-s"""', '"""--secretprefix"""'], {'type': 'int', 'help': '"""Force output to use this WIF version byte"""'}), "('-s', '--secretprefix', type=int, help=\n 'Force output to use this WIF version byte')\n", (785, 874), False, 'import click\n'), ((877, 937), 'click.option', 'click.option', (['"""--keys"""'], {'is_flag': '(True)', 'help': '"""Only dump keys."""'}), "('--keys', is_flag=True, help='Only dump keys.')\n", (889, 937), False, 'import click\n'), ((1057, 1081), 'walletlib.Walletdat.load', 'Walletdat.load', (['filename'], {}), '(filename)\n', (1071, 1081), False, 'from walletlib import Walletdat, ProtobufWallet\n'), ((1090, 1115), 'click.echo', 'click.echo', (['"""Loaded file"""'], {}), "('Loaded file')\n", (1100, 1115), False, 'import click\n'), ((2057, 2075), 'click.echo', 'click.echo', (['"""Done"""'], {}), "('Done')\n", (2067, 2075), False, 'import click\n'), ((444, 467), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (454, 467), False, 'import click\n'), ((2115, 2144), 'walletlib.ProtobufWallet.load', 'ProtobufWallet.load', (['filename'], {}), '(filename)\n', (2134, 2144), False, 'from walletlib import Walletdat, ProtobufWallet\n'), ((2157, 2182), 'click.echo', 'click.echo', (['"""Loaded file"""'], {}), "('Loaded file')\n", (2167, 2182), False, 'import click\n'), ((2938, 2956), 'click.echo', 'click.echo', (['"""Done"""'], {}), "('Done')\n", (2948, 2956), False, 'import click\n'), ((2985, 3029), 'click.echo', 'click.echo', (['"""Error, cannot read wallet file"""'], {}), "('Error, cannot read wallet file')\n", (2995, 3029), False, 'import click\n'), ((1578, 1617), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(4)'}), '(d, sort_keys=True, indent=4)\n', (1588, 1617), False, 'import json\n'), ((1894, 1933), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(4)'}), '(d, sort_keys=True, indent=4)\n', (1904, 1933), False, 'import json\n'), ((2604, 2643), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(4)'}), '(d, sort_keys=True, indent=4)\n', (2614, 2643), False, 'import json\n'), ((2824, 2863), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(4)'}), '(d, sort_keys=True, indent=4)\n', (2834, 2863), False, 'import json\n')]
|
from django.http import HttpResponse
from django.template import loader
def customer_weekly(request, site_id):
template = loader.get_template('customer_weekly.html')
context = {
# 'site_id' : site_id,
}
return HttpResponse(template.render(context, request))
def serve_svg(request):
template = loader.get_template('serve_svg.html')
context = {
# 'site_id' : site_id,
}
return HttpResponse(template.render(context, request))
|
[
"django.template.loader.get_template"
] |
[((127, 170), 'django.template.loader.get_template', 'loader.get_template', (['"""customer_weekly.html"""'], {}), "('customer_weekly.html')\n", (146, 170), False, 'from django.template import loader\n'), ((322, 359), 'django.template.loader.get_template', 'loader.get_template', (['"""serve_svg.html"""'], {}), "('serve_svg.html')\n", (341, 359), False, 'from django.template import loader\n')]
|
#!/usr/bin/env python3
"""
Set effective registration.
**Note**: This is generally for testing only and should be done automatically via
`jaws-effective-processor <https://github.com/JeffersonLab/jaws-effective-processor>`_
"""
import click
from jaws_libp.clients import EffectiveRegistrationProducer
from jaws_libp.entities import EffectiveRegistration, \
AlarmInstance, SimpleProducer
# pylint: disable=duplicate-code
def __get_instance():
return AlarmInstance("base",
SimpleProducer(),
["INJ"],
"alarm1",
"command1")
# pylint: disable=missing-function-docstring,no-value-for-parameter
@click.command()
@click.option('--unset', is_flag=True, help="present to clear state, missing to set state")
@click.argument('name')
def set_effective_registration(unset, name):
producer = EffectiveRegistrationProducer('set_effective_registration.py')
key = name
if unset:
value = None
else:
alarm_class = None
alarm_instance = __get_instance()
value = EffectiveRegistration(alarm_class, alarm_instance)
producer.send(key, value)
def click_main() -> None:
set_effective_registration()
if __name__ == "__main__":
click_main()
|
[
"click.argument",
"click.option",
"click.command",
"jaws_libp.entities.SimpleProducer",
"jaws_libp.clients.EffectiveRegistrationProducer",
"jaws_libp.entities.EffectiveRegistration"
] |
[((716, 731), 'click.command', 'click.command', ([], {}), '()\n', (729, 731), False, 'import click\n'), ((733, 828), 'click.option', 'click.option', (['"""--unset"""'], {'is_flag': '(True)', 'help': '"""present to clear state, missing to set state"""'}), "('--unset', is_flag=True, help=\n 'present to clear state, missing to set state')\n", (745, 828), False, 'import click\n'), ((825, 847), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (839, 847), False, 'import click\n'), ((908, 970), 'jaws_libp.clients.EffectiveRegistrationProducer', 'EffectiveRegistrationProducer', (['"""set_effective_registration.py"""'], {}), "('set_effective_registration.py')\n", (937, 970), False, 'from jaws_libp.clients import EffectiveRegistrationProducer\n'), ((521, 537), 'jaws_libp.entities.SimpleProducer', 'SimpleProducer', ([], {}), '()\n', (535, 537), False, 'from jaws_libp.entities import EffectiveRegistration, AlarmInstance, SimpleProducer\n'), ((1119, 1169), 'jaws_libp.entities.EffectiveRegistration', 'EffectiveRegistration', (['alarm_class', 'alarm_instance'], {}), '(alarm_class, alarm_instance)\n', (1140, 1169), False, 'from jaws_libp.entities import EffectiveRegistration, AlarmInstance, SimpleProducer\n')]
|
#!/usr/bin/python3
"""modelling.py
Various utility functions for modelling
"""
__author__ = "<NAME>"
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation, BatchNormalization, \
Bidirectional, concatenate, Conv1D, Dense, Dropout, \
GlobalAveragePooling1D, GRU, Input, LSTM, Masking, \
SpatialDropout1D
from tensorflow.keras.losses import MeanAbsoluteError, \
SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from nicu_los.src.utils.evaluation import evaluate_classification_model, \
evaluate_regression_model
from nicu_los.src.utils.custom_keras_layers import ApplyMask, \
squeeze_excite_block, Slice
def construct_rnn(input_dimension, output_dimension, model_type='lstm',
n_cells=1, dropout=0.3, hid_dimension=64, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
n_cells (int): Number of RNN cells
dropout (float): Amount of dropout to apply after each RNN cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
Returns:
model (tf.keras.Model): Constructed RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
X = Masking()(inputs)
num_hid_units = hid_dimension
for layer in range(n_cells - 1):
num_hid_units = num_hid_units // 2
if model_type == 'lstm':
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
elif model_type == 'gru':
cell = GRU(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
X = Bidirectional(cell)(X)
# There always has to be at least one cell
if model_type == 'lstm':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
elif model_type == 'gru':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
if dropout:
X = Dropout(dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn(input_dimension, output_dimension, dropout=0.5,
model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply in the first two
convolutional blocks
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = GlobalAveragePooling1D()(X, mask)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn_originial(input_dimension, output_dimension, model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = GlobalAveragePooling1D()(X2)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn_original(input_dimension, output_dimension, dropout=0.8,
hid_dimension_lstm=8, model_name=""):
"""Construct an LSTM-FCN model
Architecture as described in:
Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = GlobalAveragePooling1D()(X2)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn(input_dimension, output_dimension, dropout=0.5,
hid_dimension_lstm=16, model_name=""):
"""Construct a (modified) LSTM-FCN model
Modified architecture:
- Perform batch normalization after ReLU activation
- Use SpatialDropout1D in the convolutional blocks to reduce overfitting
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = GlobalAveragePooling1D()(X2, mask)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_channel_wise_rnn(input_dimension, output_dimension,
model_type='lstm_cw', dropout=0.0, global_dropout=0.0, hid_dimension=16,
multiplier=4, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after each RNN cell
global_dropout (float): Amount of dropout to apply before the output
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
multiplier (int): Multiplier for the hidden dimension of the global LSTM
Returns:
model (tf.keras.Model): Constructed channel-wise RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
mask = Masking().compute_mask(inputs)
X = Masking()(inputs)
# Train LSTMs over the channels, and append them
cXs = []
for feature in range(int(input_dimension/2)):
mask_var = int(feature+input_dimension/2)
channel_slice = Slice(feature, mask_var)(X)
num_hid_units = hid_dimension // 2
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=dropout,
dropout=dropout)
cX = Bidirectional(cell)(channel_slice)
cX = ApplyMask()(cX, mask)
cXs.append(cX)
# Concatenate the channels
X = concatenate(cXs, axis=2)
X = Masking()(X)
# There always has to be at least one cell
if model_type == 'lstm_cw':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
elif model_type == 'gru_cw':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm_cw' or 'gru_cw'.")
if global_dropout:
X = Dropout(global_dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_and_compile_model(model_type, model_name, task, checkpoint_file,
checkpoints_dir, model_params={}):
"""Construct and compile a model of a specific type
Args:
model_type (str): The type of model to be constructed
model_name (str): The name of model to be constructed
task (str): Either 'regression' or 'classification'
checkpoint_file (str): Name of a checkpoint file
checkpoints_dir (str): Path to the checkpoints directory
model_params (dict): Possible hyper-parameters for the model to be
constructed
Returns:
model (tf.keras.Model): Constructed and compiled model
"""
n_cells = model_params['n_cells']
input_dimension = model_params['input_dimension']
output_dimension = model_params['output_dimension']
dropout = model_params['dropout']
global_dropout = model_params['global_dropout']
hid_dimension = model_params['hidden_dimension']
multiplier = model_params['multiplier']
if task == 'classification':
loss_fn = SparseCategoricalCrossentropy()
metrics = ['accuracy']
elif task == 'regression':
loss_fn = MeanAbsoluteError()
metrics = ['mse']
output_dimension = 1
else:
raise ValueError('Argument "task" must be one of "classification" ' \
'or "regression"')
if model_type == 'lstm' or model_type == 'gru':
model = construct_rnn(input_dimension, output_dimension, model_type,
n_cells, dropout, hid_dimension, model_name)
elif model_type == 'lstm_cw' or model_type == 'gru_cw':
model = construct_channel_wise_rnn(input_dimension, output_dimension,
model_type, dropout, global_dropout, hid_dimension, multiplier,
model_name)
elif model_type == 'fcn':
model = construct_fcn(input_dimension, output_dimension, dropout,
model_name)
elif model_type == 'lstm_fcn':
model = construct_lstm_fcn(input_dimension, output_dimension, dropout,
hid_dimension, model_name)
else:
raise ValueError(f'Model type {model_type} is not supported.')
if checkpoint_file:
print(f"=> Loading weights from checkpoint: {checkpoint_file}")
model.load_weights(os.path.join(checkpoints_dir, checkpoint_file))
model.compile(optimizer=Adam(), loss=loss_fn, metrics=metrics)
model.summary()
return model
class MetricsCallback(Callback):
def __init__(self, model, task, training_data, validation_data,
training_steps, validation_steps):
"""Callback to compute metrics after an epoch has ended
Args:
model (tf.keras.model): TensorFlow (Keras) model
task (str): Classification or regression
training_data (tf.data.Dataset)
validation_data (tf.data.Dataset)
training_steps (int)
validation_steps (int)
"""
self.model = model
self.task = task
self.training_data = training_data
self.validation_data = validation_data
self.training_steps = training_steps
self.validation_steps = validation_steps
def on_epoch_end(self, epoch, logs=None):
"""The callback
Args:
epoch (int): Identifier of the current epoch
"""
print('\n=> Predict on training data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.training_data):
if batch > self.training_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
print('\n=> Predict on validation data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.validation_data):
if batch > self.validation_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.Masking",
"os.path.join",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.BatchNormalization",
"nicu_los.src.utils.custom_keras_layers.ApplyMask",
"tensorflow.keras.losses.MeanAbsoluteError",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.models.Model",
"nicu_los.src.utils.custom_keras_layers.Slice",
"tensorflow.keras.layers.SpatialDropout1D",
"numpy.concatenate",
"nicu_los.src.utils.custom_keras_layers.squeeze_excite_block",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Bidirectional"
] |
[((1471, 1507), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (1476, 1507), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3053, 3107), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (3058, 3107), False, 'from tensorflow.keras.models import Model\n'), ((3759, 3795), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (3764, 3795), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4067, 4096), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X', 'mask'], {}), '(X, mask)\n', (4087, 4096), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4309, 4338), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X', 'mask'], {}), '(X, mask)\n', (4329, 4338), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4745, 4799), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (4750, 4799), False, 'from tensorflow.keras.models import Model\n'), ((5324, 5360), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (5329, 5360), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6062, 6116), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (6067, 6116), False, 'from tensorflow.keras.models import Model\n'), ((6885, 6921), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (6890, 6921), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7187, 7211), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2'], {}), '(X2)\n', (7207, 7211), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((7365, 7389), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2'], {}), '(X2)\n', (7385, 7389), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((7582, 7603), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[X1, X2]'], {}), '([X1, X2])\n', (7593, 7603), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7829, 7883), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (7834, 7883), False, 'from tensorflow.keras.models import Model\n'), ((8703, 8739), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (8708, 8739), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9114, 9144), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2', 'mask'], {}), '(X2, mask)\n', (9134, 9144), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9364, 9394), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2', 'mask'], {}), '(X2, mask)\n', (9384, 9394), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9593, 9614), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[X1, X2]'], {}), '([X1, X2])\n', (9604, 9614), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9840, 9894), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (9845, 9894), False, 'from tensorflow.keras.models import Model\n'), ((10739, 10775), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (10744, 10775), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11475, 11499), 'tensorflow.keras.layers.concatenate', 'concatenate', (['cXs'], {'axis': '(2)'}), '(cXs, axis=2)\n', (11486, 11499), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12345, 12399), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (12350, 12399), False, 'from tensorflow.keras.models import Model\n'), ((1578, 1587), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (1585, 1587), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3847, 3910), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (3853, 3910), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3939, 3957), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3949, 3957), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3969, 3989), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3987, 3989), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4001, 4026), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['dropout'], {}), '(dropout)\n', (4017, 4026), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4038, 4049), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (4047, 4049), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4106, 4169), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (4112, 4169), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4181, 4199), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4191, 4199), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4211, 4231), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4229, 4231), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4243, 4268), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['dropout'], {}), '(dropout)\n', (4259, 4268), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4280, 4291), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (4289, 4291), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4348, 4411), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (4354, 4411), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4423, 4441), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4433, 4441), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4453, 4473), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4471, 4473), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4486, 4510), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (4508, 4510), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5370, 5433), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (5376, 5433), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5461, 5481), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5479, 5481), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5494, 5512), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5504, 5512), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5526, 5589), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (5532, 5589), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5602, 5622), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5620, 5622), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5635, 5653), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5645, 5653), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5667, 5730), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (5673, 5730), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5743, 5763), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5761, 5763), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5776, 5794), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5786, 5794), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5808, 5832), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (5830, 5832), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6932, 6941), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (6939, 6941), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6959, 6983), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hid_dimension_lstm'], {}), '(hid_dimension_lstm)\n', (6963, 6983), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6997, 7013), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (7004, 7013), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7028, 7091), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (7034, 7091), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7121, 7141), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7139, 7141), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7155, 7173), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7165, 7173), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7222, 7285), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (7228, 7285), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7299, 7319), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7317, 7319), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7333, 7351), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7343, 7351), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7400, 7463), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (7406, 7463), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7477, 7497), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7495, 7497), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7511, 7529), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7521, 7529), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7544, 7568), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (7566, 7568), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8793, 8802), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (8800, 8802), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8820, 8844), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hid_dimension_lstm'], {}), '(hid_dimension_lstm)\n', (8824, 8844), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8858, 8874), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (8865, 8874), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8889, 8952), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (8895, 8952), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8982, 9000), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8992, 9000), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9014, 9034), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9032, 9034), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9048, 9069), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.5)'], {}), '(0.5)\n', (9064, 9069), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9083, 9094), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (9092, 9094), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9155, 9218), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (9161, 9218), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9232, 9250), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9242, 9250), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9264, 9284), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9282, 9284), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9298, 9319), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.5)'], {}), '(0.5)\n', (9314, 9319), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9333, 9344), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (9342, 9344), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9405, 9468), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (9411, 9468), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9482, 9500), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9492, 9500), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9514, 9534), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9532, 9534), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9549, 9573), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (9571, 9573), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((10888, 10897), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (10895, 10897), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11185, 11300), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': 'dropout', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=dropout, dropout=dropout)\n", (11189, 11300), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11508, 11517), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (11515, 11517), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((13503, 13534), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {}), '()\n', (13532, 13534), False, 'from tensorflow.keras.losses import MeanAbsoluteError, SparseCategoricalCrossentropy\n'), ((1765, 1876), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': '(0.0)', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=0.0, dropout=dropout)\n", (1769, 1876), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2254, 2273), 'tensorflow.keras.layers.Bidirectional', 'Bidirectional', (['cell'], {}), '(cell)\n', (2267, 2273), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2366, 2478), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': 'hid_dimension'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=hid_dimension)\n", (2370, 2478), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2808, 2824), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (2815, 2824), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2902, 2953), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (2907, 2953), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3007, 3036), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (3012, 3036), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3807, 3816), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (3814, 3816), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4594, 4645), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (4599, 4645), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4699, 4728), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (4704, 4728), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5911, 5962), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (5916, 5962), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6016, 6045), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (6021, 6045), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7678, 7729), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (7683, 7729), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7783, 7812), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (7788, 7812), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8752, 8761), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (8759, 8761), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9689, 9740), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (9694, 9740), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9794, 9823), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (9799, 9823), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((10849, 10858), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (10856, 10858), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11097, 11121), 'nicu_los.src.utils.custom_keras_layers.Slice', 'Slice', (['feature', 'mask_var'], {}), '(feature, mask_var)\n', (11102, 11121), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((11342, 11361), 'tensorflow.keras.layers.Bidirectional', 'Bidirectional', (['cell'], {}), '(cell)\n', (11355, 11361), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11390, 11401), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (11399, 11401), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((11613, 11738), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': '(multiplier * hid_dimension)'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=multiplier * hid_dimension)\n", (11617, 11738), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12093, 12116), 'tensorflow.keras.layers.Dropout', 'Dropout', (['global_dropout'], {}), '(global_dropout)\n', (12100, 12116), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12194, 12245), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (12199, 12245), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12299, 12328), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (12304, 12328), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((13615, 13634), 'tensorflow.keras.losses.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), '()\n', (13632, 13634), False, 'from tensorflow.keras.losses import MeanAbsoluteError, SparseCategoricalCrossentropy\n'), ((14744, 14790), 'os.path.join', 'os.path.join', (['checkpoints_dir', 'checkpoint_file'], {}), '(checkpoints_dir, checkpoint_file)\n', (14756, 14790), False, 'import os\n'), ((14821, 14827), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (14825, 14827), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1966, 2076), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': '(0.0)', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=0.0, dropout=dropout)\n", (1969, 2076), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2536, 2647), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': 'hid_dimension'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=hid_dimension)\n", (2539, 2647), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11797, 11921), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': '(multiplier * hid_dimension)'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=multiplier * hid_dimension)\n", (11800, 11921), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((16352, 16382), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (16366, 16382), True, 'import numpy as np\n'), ((16404, 16434), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (16418, 16434), True, 'import numpy as np\n'), ((16488, 16518), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (16502, 16518), True, 'import numpy as np\n'), ((16540, 16570), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (16554, 16570), True, 'import numpy as np\n'), ((17118, 17148), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (17132, 17148), True, 'import numpy as np\n'), ((17170, 17200), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (17184, 17200), True, 'import numpy as np\n'), ((17254, 17284), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (17268, 17284), True, 'import numpy as np\n'), ((17306, 17336), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (17320, 17336), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2005-2007 <NAME> <<EMAIL>>
# Copyright (c) 2007-2012 <NAME> and <NAME>
# All rights reserved.
#
# Generates marshaling code based on libevent.
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-statements
# pylint: disable=global-statement
# TODO:
# 1) propagate the arguments/options parsed by argparse down to the
# instantiated factory objects.
# 2) move the globals into a class that manages execution, including the
# progress outputs that go to stderr at the moment.
# 3) emit other languages.
import argparse
import re
import sys
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
# Globals
LINE_COUNT = 0
CPPCOMMENT_RE = re.compile(r"\/\/.*$")
NONIDENT_RE = re.compile(r"\W")
PREPROCESSOR_DEF_RE = re.compile(r"^#define")
STRUCT_REF_RE = re.compile(r"^struct\[(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)\]$")
STRUCT_DEF_RE = re.compile(r"^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$")
WHITESPACE_RE = re.compile(r"\s+")
HEADER_DIRECT = []
CPP_DIRECT = []
QUIETLY = False
def declare(s):
if not QUIETLY:
print(s)
def TranslateList(mylist, mydict):
return [x % mydict for x in mylist]
class RpcGenError(Exception):
"""An Exception class for parse errors."""
def __init__(self, why): # pylint: disable=super-init-not-called
self.why = why
def __str__(self):
return str(self.why)
# Holds everything that makes a struct
class Struct(object):
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
declare(" Created struct: %s" % name)
def AddEntry(self, entry):
if entry.Tag() in self._tags:
raise RpcGenError(
'Entry "%s" duplicates tag number %d from "%s" '
"around line %d"
% (entry.Name(), entry.Tag(), self._tags[entry.Tag()], LINE_COUNT)
)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
declare(" Added entry: %s" % entry.Name())
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
@staticmethod
def PrintIndented(filep, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
filep.write("%s%s\n" % (ident, entry))
class StructCCode(Struct):
""" Knows how to generate C code for a struct """
def __init__(self, name):
Struct.__init__(self, name)
def PrintTags(self, filep):
"""Prints the tag definitions for a structure."""
filep.write("/* Tag definition for %s */\n" % self._name)
filep.write("enum %s_ {\n" % self._name.lower())
for entry in self._entries:
filep.write(" %s=%d,\n" % (self.EntryTagName(entry), entry.Tag()))
filep.write(" %s_MAX_TAGS\n" % (self._name.upper()))
filep.write("};\n\n")
def PrintForwardDeclaration(self, filep):
filep.write("struct %s;\n" % self._name)
def PrintDeclaration(self, filep):
filep.write("/* Structure declaration for %s */\n" % self._name)
filep.write("struct %s_access_ {\n" % self._name)
for entry in self._entries:
dcl = entry.AssignDeclaration("(*%s_assign)" % entry.Name())
dcl.extend(entry.GetDeclaration("(*%s_get)" % entry.Name()))
if entry.Array():
dcl.extend(entry.AddDeclaration("(*%s_add)" % entry.Name()))
self.PrintIndented(filep, " ", dcl)
filep.write("};\n\n")
filep.write("struct %s {\n" % self._name)
filep.write(" struct %s_access_ *base;\n\n" % self._name)
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIndented(filep, " ", dcl)
filep.write("\n")
for entry in self._entries:
filep.write(" ev_uint8_t %s_set;\n" % entry.Name())
filep.write("};\n\n")
filep.write(
"""struct %(name)s *%(name)s_new(void);
struct %(name)s *%(name)s_new_with_arg(void *);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
struct %(name)s *);\n"""
% {"name": self._name}
)
# Write a setting function of every variable
for entry in self._entries:
self.PrintIndented(
filep, "", entry.AssignDeclaration(entry.AssignFuncName())
)
self.PrintIndented(filep, "", entry.GetDeclaration(entry.GetFuncName()))
if entry.Array():
self.PrintIndented(filep, "", entry.AddDeclaration(entry.AddFuncName()))
filep.write("/* --- %s done --- */\n\n" % self._name)
def PrintCode(self, filep):
filep.write(
"""/*
* Implementation of %s
*/
"""
% (self._name)
)
filep.write(
"""
static struct %(name)s_access_ %(name)s_base__ = {
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeBase())
filep.write("};\n\n")
# Creation
filep.write(
"""struct %(name)s *
%(name)s_new(void)
{
return %(name)s_new_with_arg(NULL);
}
struct %(name)s *
%(name)s_new_with_arg(void *unused)
{
struct %(name)s *tmp;
if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {
event_warn("%%s: malloc", __func__);
return (NULL);
}
tmp->base = &%(name)s_base__;
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeInitialize("tmp"))
filep.write(" tmp->%s_set = 0;\n\n" % entry.Name())
filep.write(
""" return (tmp);
}
"""
)
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIndented(filep, "", entry.CodeAdd())
filep.write("\n")
# Assigning
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeAssign())
filep.write("\n")
# Getting
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeGet())
filep.write("\n")
# Clearing
filep.write(
"""void
%(name)s_clear(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeClear("tmp"))
filep.write("}\n\n")
# Freeing
filep.write(
"""void
%(name)s_free(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeFree("tmp"))
filep.write(
""" free(tmp);
}
"""
)
# Marshaling
filep.write(
"""void
%(name)s_marshal(struct evbuffer *evbuf, const struct %(name)s *tmp) {
"""
% {"name": self._name}
)
for entry in self._entries:
indent = " "
# Optional entries do not have to be set
if entry.Optional():
indent += " "
filep.write(" if (tmp->%s_set) {\n" % entry.Name())
self.PrintIndented(
filep,
indent,
entry.CodeMarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
if entry.Optional():
filep.write(" }\n")
filep.write("}\n\n")
# Unmarshaling
filep.write(
"""int
%(name)s_unmarshal(struct %(name)s *tmp, struct evbuffer *evbuf)
{
ev_uint32_t tag;
while (evbuffer_get_length(evbuf) > 0) {
if (evtag_peek(evbuf, &tag) == -1)
return (-1);
switch (tag) {
"""
% {"name": self._name}
)
for entry in self._entries:
filep.write(" case %s:\n" % (self.EntryTagName(entry)))
if not entry.Array():
filep.write(
""" if (tmp->%s_set)
return (-1);
"""
% (entry.Name())
)
self.PrintIndented(
filep,
" ",
entry.CodeUnmarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
filep.write(
""" tmp->%s_set = 1;
break;
"""
% (entry.Name())
)
filep.write(
""" default:
return -1;
}
}
"""
)
# Check if it was decoded completely
filep.write(
""" if (%(name)s_complete(tmp) == -1)
return (-1);
return (0);
}
"""
% {"name": self._name}
)
# Checking if a structure has all the required data
filep.write(
"""
int
%(name)s_complete(struct %(name)s *msg)
{
"""
% {"name": self._name}
)
for entry in self._entries:
if not entry.Optional():
code = [
"""if (!msg->%(name)s_set)
return (-1);"""
]
code = TranslateList(code, entry.GetTranslation())
self.PrintIndented(filep, " ", code)
self.PrintIndented(
filep, " ", entry.CodeComplete("msg", entry.GetVarName("msg"))
)
filep.write(
""" return (0);
}
"""
)
# Complete message unmarshaling
filep.write(
"""
int
evtag_unmarshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct %(name)s *msg)
{
ev_uint32_t tag;
int res = -1;
struct evbuffer *tmp = evbuffer_new();
if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
goto error;
if (%(name)s_unmarshal(msg, tmp) == -1)
goto error;
res = 0;
error:
evbuffer_free(tmp);
return (res);
}
"""
% {"name": self._name}
)
# Complete message marshaling
filep.write(
"""
void
evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag,
const struct %(name)s *msg)
{
struct evbuffer *buf_ = evbuffer_new();
assert(buf_ != NULL);
%(name)s_marshal(buf_, msg);
evtag_marshal_buffer(evbuf, tag, buf_);
evbuffer_free(buf_);
}
"""
% {"name": self._name}
)
class Entry(object):
def __init__(self, ent_type, name, tag):
self._type = ent_type
self._name = name
self._tag = int(tag)
self._ctype = ent_type
self._optional = False
self._can_be_array = False
self._array = False
self._line_count = -1
self._struct = None
self._refname = None
self._optpointer = True
self._optaddarg = True
@staticmethod
def GetInitializer():
raise NotImplementedError("Entry does not provide an initializer")
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self):
self._array = True
def MakeOptional(self):
self._optional = True
def Verify(self):
if self.Array() and not self._can_be_array:
raise RpcGenError(
'Entry "%s" cannot be created as an array '
"around line %d" % (self._name, self.LineCount())
)
if not self._struct:
raise RpcGenError(
'Entry "%s" does not know which struct it belongs to '
"around line %d" % (self._name, self.LineCount())
)
if self._optional and self._array:
raise RpcGenError(
'Entry "%s" has illegal combination of optional and array '
"around line %d" % (self._name, self.LineCount())
)
def GetTranslation(self, extradict=None):
if extradict is None:
extradict = {}
mapping = {
"parent_name": self._struct.Name(),
"name": self._name,
"ctype": self._ctype,
"refname": self._refname,
"optpointer": self._optpointer and "*" or "",
"optreference": self._optpointer and "&" or "",
"optaddarg": self._optaddarg and ", const %s value" % self._ctype or "",
}
for (k, v) in list(extradict.items()):
mapping[k] = v
return mapping
def GetVarName(self, var):
return "%(var)s->%(name)s_data" % self.GetTranslation({"var": var})
def GetVarLen(self, _var):
return "sizeof(%s)" % self._ctype
def GetFuncName(self):
return "%s_%s_get" % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, %(ctype)s *value)
{
if (msg->%(name)s_set != 1)
return (-1);
*value = msg->%(name)s_data;
return (0);
}"""
code = code % self.GetTranslation()
return code.split("\n")
def AssignFuncName(self):
return "%s_%s_assign" % (self._struct.Name(), self._name)
def AddFuncName(self):
return "%s_%s_add" % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,"
" const %(ctype)s value)",
"{",
" msg->%(name)s_set = 1;",
" msg->%(name)s_data = value;",
" return (0);",
"}",
]
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
def CodeClear(self, structname):
code = ["%s->%s_set = 0;" % (structname, self.Name())]
return code
@staticmethod
def CodeComplete(_structname, _var_name):
return []
@staticmethod
def CodeFree(_name):
return []
def CodeBase(self):
code = ["%(parent_name)s_%(name)s_assign,", "%(parent_name)s_%(name)s_get,"]
if self.Array():
code.append("%(parent_name)s_%(name)s_add,")
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
class EntryBytes(Entry):
def __init__(self, ent_type, name, tag, length):
# Init base class
super(EntryBytes, self).__init__(ent_type, name, tag)
self._length = length
self._ctype = "ev_uint8_t"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, _var):
return "(%s)" % self._length
@staticmethod
def CodeArrayAdd(varname, _value):
# XXX: copy here
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s **);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def Declaration(self):
dcl = ["ev_uint8_t %s_data[%s];" % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s **value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, const %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" msg->%s_set = 1;" % name,
" memcpy(msg->%s_data, value, %s);" % (name, self._length),
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, "
"%(var)s, %(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
return TranslateList(
code,
self.GetTranslation(
{"var": var_name, "varlen": var_len, "buf": buf, "tag": tag_name}
),
)
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"%s->%s_set = 0;" % (structname, self.Name()),
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (structname, self._name, structname, self._name),
]
return code
def CodeInitialize(self, name):
code = [
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (name, self._name, name, self._name)
]
return code
def Verify(self):
if not self._length:
raise RpcGenError(
'Entry "%s" needs a length '
"around line %d" % (self._name, self.LineCount())
)
super(EntryBytes, self).Verify()
class EntryInt(Entry):
def __init__(self, ent_type, name, tag, bits=32):
# Init base class
super(EntryInt, self).__init__(ent_type, name, tag)
self._can_be_array = True
if bits == 32:
self._ctype = "ev_uint32_t"
self._marshal_type = "int"
if bits == 64:
self._ctype = "ev_uint64_t"
self._marshal_type = "int64"
@staticmethod
def GetInitializer():
return "0"
@staticmethod
def CodeArrayFree(_var):
return []
@staticmethod
def CodeArrayAssign(varname, srcvar):
return ["%(varname)s = %(srcvar)s;" % {"varname": varname, "srcvar": srcvar}]
@staticmethod
def CodeArrayAdd(varname, value):
"""Returns a new entry of this type."""
return ["%(varname)s = %(value)s;" % {"varname": varname, "value": value}]
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"ma": self._marshal_type, "buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);"
% (self._marshal_type, buf, tag_name, var_name)
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
def CodeInitialize(self, name):
code = ["%s->%s_data = 0;" % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryString, self).__init__(ent_type, name, tag)
self._can_be_array = True
self._ctype = "char *"
@staticmethod
def GetInitializer():
return "NULL"
@staticmethod
def CodeArrayFree(varname):
code = ["if (%(var)s != NULL) free(%(var)s);"]
return TranslateList(code, {"var": varname})
@staticmethod
def CodeArrayAssign(varname, srcvar):
code = [
"if (%(var)s != NULL)",
" free(%(var)s);",
"%(var)s = strdup(%(srcvar)s);",
"if (%(var)s == NULL) {",
' event_warnx("%%s: strdup", __func__);',
" return (-1);",
"}",
]
return TranslateList(code, {"var": varname, "srcvar": srcvar})
@staticmethod
def CodeArrayAdd(varname, value):
code = [
"if (%(value)s != NULL) {",
" %(var)s = strdup(%(value)s);",
" if (%(var)s == NULL) {",
" goto error;",
" }",
"} else {",
" %(var)s = NULL;",
"}",
]
return TranslateList(code, {"var": varname, "value": value})
def GetVarLen(self, var):
return "strlen(%s)" % self.GetVarName(var)
@staticmethod
def CodeMakeInitalize(varname):
return "%(varname)s = NULL;" % {"varname": varname}
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % (
self.GetTranslation()
)
return code.split("\n")
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, _var_len):
code = ["evtag_marshal_string(%s, %s, %s);" % (buf, tag_name, var_name)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free(%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free (%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = ["char *%s_data;" % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, ent_type, name, tag, refname):
# Init base class
super(EntryStruct, self).__init__(ent_type, name, tag)
self._optpointer = False
self._can_be_array = True
self._refname = refname
self._ctype = "struct %s*" % refname
self._optaddarg = False
def GetInitializer(self):
return "NULL"
def GetVarLen(self, _var):
return "-1"
def CodeArrayAdd(self, varname, _value):
code = [
"%(varname)s = %(refname)s_new();",
"if (%(varname)s == NULL)",
" goto error;",
]
return TranslateList(code, self.GetTranslation({"varname": varname}))
def CodeArrayFree(self, var):
code = ["%(refname)s_free(%(var)s);" % self.GetTranslation({"var": var})]
return code
def CodeArrayAssign(self, var, srcvar):
code = [
"int had_error = 0;",
"struct evbuffer *tmp = NULL;",
"%(refname)s_clear(%(var)s);",
"if ((tmp = evbuffer_new()) == NULL) {",
' event_warn("%%s: evbuffer_new()", __func__);',
" had_error = 1;",
" goto done;",
"}",
"%(refname)s_marshal(tmp, %(srcvar)s);",
"if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {",
' event_warnx("%%s: %(refname)s_unmarshal", __func__);',
" had_error = 1;",
" goto done;",
"}",
"done:",
"if (tmp != NULL)",
" evbuffer_free(tmp);",
"if (had_error) {",
" %(refname)s_clear(%(var)s);",
" return (-1);",
"}",
]
return TranslateList(code, self.GetTranslation({"var": var, "srcvar": srcvar}))
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1) {" % name,
" msg->%s_data = %s_new();" % (name, self._refname),
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" }",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
code = (
"""int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}"""
% self.GetTranslation()
)
return code.split("\n")
def CodeComplete(self, structname, var_name):
code = [
"if (%(structname)s->%(name)s_set && "
"%(refname)s_complete(%(var)s) == -1)",
" return (-1);",
]
return TranslateList(
code, self.GetTranslation({"structname": structname, "var": var_name})
)
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"%(var)s = %(refname)s_new();",
"if (%(var)s == NULL)",
" return (-1);",
"if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, ",
" %(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);" % (self._refname, buf, tag_name, var_name)
]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" %s_free(%s->%s_data);" % (self._refname, structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" %s_free(%s->%s_data);" % (self._refname, name, self._name),
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryVarBytes, self).__init__(ent_type, name, tag)
self._ctype = "ev_uint8_t *"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, var):
return "%(var)s->%(name)s_length" % self.GetTranslation({"var": var})
@staticmethod
def CodeArrayAdd(varname, _value):
# xxx: copy
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *, ev_uint32_t *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s, ev_uint32_t);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, "
"const %s value, ev_uint32_t len)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_data != NULL)" % name,
" free (msg->%s_data);" % name,
" msg->%s_data = malloc(len);" % name,
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" msg->%s_length = len;" % name,
" memcpy(msg->%s_data, value, len);" % name,
" return (0);",
"}",
]
return code
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" *plen = msg->%s_length;" % name,
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)",
" return (-1);",
# We do not want DoS opportunities
"if (%(varlen)s > evbuffer_get_length(%(buf)s))",
" return (-1);",
"if ((%(var)s = malloc(%(varlen)s)) == NULL)",
" return (-1);",
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, "
"%(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name, "varlen": var_len}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free (%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_length = 0;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free(%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = [
"ev_uint8_t *%s_data;" % self._name,
"ev_uint32_t %s_length;" % self._name,
]
return dcl
class EntryArray(Entry):
_index = None
def __init__(self, entry):
# Init base class
super(EntryArray, self).__init__(entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = self._entry._ctype
self._optional = True
self._optpointer = self._entry._optpointer
self._optaddarg = self._entry._optaddarg
# provide a new function for accessing the variable name
def GetVarName(var_name):
return "%(var)s->%(name)s_data[%(index)s]" % self._entry.GetTranslation(
{"var": var_name, "index": self._index}
)
self._entry.GetVarName = GetVarName
def GetInitializer(self):
return "NULL"
def GetVarName(self, var):
return var
def GetVarLen(self, _var_name):
return "-1"
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
code = [
"int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);"
% self.GetTranslation({"funcname": funcname})
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, int, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AddDeclaration(self, funcname):
code = [
"%(ctype)s %(optpointer)s "
"%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);"
% self.GetTranslation({"funcname": funcname})
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}
""" % (
self.GetTranslation()
)
return code.splitlines()
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,",
" const %(ctype)s value)",
"{",
" if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)",
" return (-1);",
"",
" {",
]
code = TranslateList(code, self.GetTranslation())
codearrayassign = self._entry.CodeArrayAssign(
"msg->%(name)s_data[off]" % self.GetTranslation(), "value"
)
code += [" " + x for x in codearrayassign]
code += TranslateList([" }", " return (0);", "}"], self.GetTranslation())
return code
def CodeAdd(self):
codearrayadd = self._entry.CodeArrayAdd(
"msg->%(name)s_data[msg->%(name)s_length - 1]" % self.GetTranslation(),
"value",
)
code = [
"static int",
"%(parent_name)s_%(name)s_expand_to_hold_more("
"struct %(parent_name)s *msg)",
"{",
" int tobe_allocated = msg->%(name)s_num_allocated;",
" %(ctype)s* new_data = NULL;",
" tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;",
" new_data = (%(ctype)s*) realloc(msg->%(name)s_data,",
" tobe_allocated * sizeof(%(ctype)s));",
" if (new_data == NULL)",
" return -1;",
" msg->%(name)s_data = new_data;",
" msg->%(name)s_num_allocated = tobe_allocated;",
" return 0;",
"}",
"",
"%(ctype)s %(optpointer)s",
"%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg%(optaddarg)s)",
"{",
" if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {",
" if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)",
" goto error;",
" }",
]
code = TranslateList(code, self.GetTranslation())
code += [" " + x for x in codearrayadd]
code += TranslateList(
[
" msg->%(name)s_set = 1;",
" return %(optreference)s(msg->%(name)s_data["
"msg->%(name)s_length - 1]);",
"error:",
" --msg->%(name)s_length;",
" return (NULL);",
"}",
],
self.GetTranslation(),
)
return code
def CodeComplete(self, structname, var_name):
self._index = "i"
tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name))
# skip the whole loop if there is nothing to check
if not tmp:
return []
translate = self.GetTranslation({"structname": structname})
code = [
"{",
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
code += [" " + x for x in tmp]
code += [" }", "}"]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
translate = self.GetTranslation(
{
"var": var_name,
"buf": buf,
"tag": tag_name,
"init": self._entry.GetInitializer(),
}
)
code = [
"if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&",
" %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {",
' puts("HEY NOW");',
" return (-1);",
"}",
]
# the unmarshal code directly returns
code = TranslateList(code, translate)
self._index = "%(var)s->%(name)s_length" % translate
code += self._entry.CodeUnmarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += ["++%(var)s->%(name)s_length;" % translate]
return code
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = ["{", " int i;", " for (i = 0; i < %(var)s->%(name)s_length; ++i) {"]
self._index = "i"
code += self._entry.CodeMarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += [" }", "}"]
code = "\n".join(code) % self.GetTranslation({"var": var_name})
return code.split("\n")
def CodeClear(self, structname):
translate = self.GetTranslation({"structname": structname})
codearrayfree = self._entry.CodeArrayFree(
"%(structname)s->%(name)s_data[i]"
% self.GetTranslation({"structname": structname})
)
code = ["if (%(structname)s->%(name)s_set == 1) {"]
if codearrayfree:
code += [
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
if codearrayfree:
code += [" " + x for x in codearrayfree]
code += [" }"]
code += TranslateList(
[
" free(%(structname)s->%(name)s_data);",
" %(structname)s->%(name)s_data = NULL;",
" %(structname)s->%(name)s_set = 0;",
" %(structname)s->%(name)s_length = 0;",
" %(structname)s->%(name)s_num_allocated = 0;",
"}",
],
translate,
)
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
"%s->%s_num_allocated = 0;" % (name, self._name),
]
return code
def CodeFree(self, structname):
code = self.CodeClear(structname)
code += TranslateList(
["free(%(structname)s->%(name)s_data);"],
self.GetTranslation({"structname": structname}),
)
return code
def Declaration(self):
dcl = [
"%s *%s_data;" % (self._ctype, self._name),
"int %s_length;" % self._name,
"int %s_num_allocated;" % self._name,
]
return dcl
def NormalizeLine(line):
line = CPPCOMMENT_RE.sub("", line)
line = line.strip()
line = WHITESPACE_RE.sub(" ", line)
return line
ENTRY_NAME_RE = re.compile(r"(?P<name>[^\[\]]+)(\[(?P<fixed_length>.*)\])?")
ENTRY_TAG_NUMBER_RE = re.compile(r"(0x)?\d+", re.I)
def ProcessOneEntry(factory, newstruct, entry):
optional = False
array = False
entry_type = ""
name = ""
tag = ""
tag_set = None
separator = ""
fixed_length = ""
for token in entry.split(" "):
if not entry_type:
if not optional and token == "optional":
optional = True
continue
if not array and token == "array":
array = True
continue
if not entry_type:
entry_type = token
continue
if not name:
res = ENTRY_NAME_RE.match(token)
if not res:
raise RpcGenError(
r"""Cannot parse name: "%s" around line %d""" % (entry, LINE_COUNT)
)
name = res.group("name")
fixed_length = res.group("fixed_length")
continue
if not separator:
separator = token
if separator != "=":
raise RpcGenError(
r'''Expected "=" after name "%s" got "%s"''' % (name, token)
)
continue
if not tag_set:
tag_set = 1
if not ENTRY_TAG_NUMBER_RE.match(token):
raise RpcGenError(r'''Expected tag number: "%s"''' % (entry))
tag = int(token, 0)
continue
raise RpcGenError(r'''Cannot parse "%s"''' % (entry))
if not tag_set:
raise RpcGenError(r'''Need tag number: "%s"''' % (entry))
# Create the right entry
if entry_type == "bytes":
if fixed_length:
newentry = factory.EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = factory.EntryVarBytes(entry_type, name, tag)
elif entry_type == "int" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag)
elif entry_type == "int64" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag, bits=64)
elif entry_type == "string" and not fixed_length:
newentry = factory.EntryString(entry_type, name, tag)
else:
res = STRUCT_REF_RE.match(entry_type)
if res:
# References another struct defined in our file
newentry = factory.EntryStruct(entry_type, name, tag, res.group("name"))
else:
raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry))
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newentry = factory.EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(factory, data):
tokens = data.split(" ")
# First three tokens are: 'struct' 'name' '{'
newstruct = factory.Struct(tokens[1])
inside = " ".join(tokens[3:-1])
tokens = inside.split(";")
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(factory, newstruct, entry))
structs.append(newstruct)
return structs
C_COMMENT_START = "/*"
C_COMMENT_END = "*/"
C_COMMENT_START_RE = re.compile(re.escape(C_COMMENT_START))
C_COMMENT_END_RE = re.compile(re.escape(C_COMMENT_END))
C_COMMENT_START_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_START)))
C_COMMENT_END_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_END)))
C_MULTILINE_COMMENT_SUB_RE = re.compile(
r"%s.*?%s" % (re.escape(C_COMMENT_START), re.escape(C_COMMENT_END))
)
CPP_CONDITIONAL_BLOCK_RE = re.compile(r"#(if( |def)|endif)")
INCLUDE_RE = re.compile(r'#include (".+"|<.+>)')
def GetNextStruct(filep):
global CPP_DIRECT
global LINE_COUNT
got_struct = False
have_c_comment = False
data = ""
while True:
line = filep.readline()
if not line:
break
LINE_COUNT += 1
line = line[:-1]
if not have_c_comment and C_COMMENT_START_RE.search(line):
if C_MULTILINE_COMMENT_SUB_RE.search(line):
line = C_MULTILINE_COMMENT_SUB_RE.sub("", line)
else:
line = C_COMMENT_START_SUB_RE.sub("", line)
have_c_comment = True
if have_c_comment:
if not C_COMMENT_END_RE.search(line):
continue
have_c_comment = False
line = C_COMMENT_END_SUB_RE.sub("", line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if INCLUDE_RE.match(line):
CPP_DIRECT.append(line)
elif CPP_CONDITIONAL_BLOCK_RE.match(line):
CPP_DIRECT.append(line)
elif PREPROCESSOR_DEF_RE.match(line):
HEADER_DIRECT.append(line)
elif not STRUCT_DEF_RE.match(line):
raise RpcGenError("Missing struct on line %d: %s" % (LINE_COUNT, line))
else:
got_struct = True
data += line
continue
# We are inside the struct
tokens = line.split("}")
if len(tokens) == 1:
data += " " + line
continue
if tokens[1]:
raise RpcGenError("Trailing garbage after struct on line %d" % LINE_COUNT)
# We found the end of the struct
data += " %s}" % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r"/\*.*\*/", "", data)
return data
def Parse(factory, filep):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(filep)
if not data:
break
entities.extend(ProcessStruct(factory, data))
return entities
class CCodeGenerator(object):
def __init__(self):
pass
@staticmethod
def GuardName(name):
# Use the complete provided path to the input file, with all
# non-identifier characters replaced with underscores, to
# reduce the chance of a collision between guard macros.
return "EVENT_RPCOUT_%s_" % (NONIDENT_RE.sub("_", name).upper())
def HeaderPreamble(self, name):
guard = self.GuardName(name)
pre = """
/*
* Automatically generated from %s
*/
#ifndef %s
#define %s
""" % (
name,
guard,
guard,
)
if HEADER_DIRECT:
for statement in HEADER_DIRECT:
pre += "%s\n" % statement
pre += "\n"
pre += """
#include <event2/util.h> /* for ev_uint*_t */
#include <event2/rpc.h>
"""
return pre
def HeaderPostamble(self, name):
guard = self.GuardName(name)
return "#endif /* %s */" % (guard)
@staticmethod
def BodyPreamble(name, header_file):
global _NAME
global _VERSION
slash = header_file.rfind("/")
if slash != -1:
header_file = header_file[slash + 1 :]
pre = """
/*
* Automatically generated from %(name)s
* by %(script_name)s/%(script_version)s. DO NOT EDIT THIS FILE.
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <event2/event-config.h>
#include <event2/event.h>
#include <event2/buffer.h>
#include <event2/tag.h>
#if defined(EVENT__HAVE___func__)
# ifndef __func__
# define __func__ __func__
# endif
#elif defined(EVENT__HAVE___FUNCTION__)
# define __func__ __FUNCTION__
#else
# define __func__ __FILE__
#endif
""" % {
"name": name,
"script_name": _NAME,
"script_version": _VERSION,
}
for statement in CPP_DIRECT:
pre += "%s\n" % statement
pre += '\n#include "%s"\n\n' % header_file
pre += "void event_warn(const char *fmt, ...);\n"
pre += "void event_warnx(const char *fmt, ...);\n\n"
return pre
@staticmethod
def HeaderFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".h"
@staticmethod
def CodeFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".gen.c"
@staticmethod
def Struct(name):
return StructCCode(name)
@staticmethod
def EntryBytes(entry_type, name, tag, fixed_length):
return EntryBytes(entry_type, name, tag, fixed_length)
@staticmethod
def EntryVarBytes(entry_type, name, tag):
return EntryVarBytes(entry_type, name, tag)
@staticmethod
def EntryInt(entry_type, name, tag, bits=32):
return EntryInt(entry_type, name, tag, bits)
@staticmethod
def EntryString(entry_type, name, tag):
return EntryString(entry_type, name, tag)
@staticmethod
def EntryStruct(entry_type, name, tag, struct_name):
return EntryStruct(entry_type, name, tag, struct_name)
@staticmethod
def EntryArray(entry):
return EntryArray(entry)
class CommandLine(object):
def __init__(self, argv=None):
"""Initialize a command-line to launch event_rpcgen, as if
from a command-line with CommandLine(sys.argv). If you're
calling this directly, remember to provide a dummy value
for sys.argv[0]
"""
global QUIETLY
self.filename = None
self.header_file = None
self.impl_file = None
self.factory = CCodeGenerator()
parser = argparse.ArgumentParser(
usage="%(prog)s [options] rpc-file [[h-file] c-file]"
)
parser.add_argument("--quiet", action="store_true", default=False)
parser.add_argument("rpc_file", type=argparse.FileType("r"))
args, extra_args = parser.parse_known_args(args=argv)
QUIETLY = args.quiet
if extra_args:
if len(extra_args) == 1:
self.impl_file = extra_args[0].replace("\\", "/")
elif len(extra_args) == 2:
self.header_file = extra_args[0].replace("\\", "/")
self.impl_file = extra_args[1].replace("\\", "/")
else:
parser.error("Spurious arguments provided")
self.rpc_file = args.rpc_file
if not self.impl_file:
self.impl_file = self.factory.CodeFilename(self.rpc_file.name)
if not self.header_file:
self.header_file = self.factory.HeaderFilename(self.impl_file)
if not self.impl_file.endswith(".c"):
parser.error("can only generate C implementation files")
if not self.header_file.endswith(".h"):
parser.error("can only generate C header files")
def run(self):
filename = self.rpc_file.name
header_file = self.header_file
impl_file = self.impl_file
factory = self.factory
declare('Reading "%s"' % filename)
with self.rpc_file:
entities = Parse(factory, self.rpc_file)
declare('... creating "%s"' % header_file)
with open(header_file, "w") as header_fp:
header_fp.write(factory.HeaderPreamble(filename))
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
header_fp.write("\n")
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
header_fp.write(factory.HeaderPostamble(filename))
declare('... creating "%s"' % impl_file)
with open(impl_file, "w") as impl_fp:
impl_fp.write(factory.BodyPreamble(filename, header_file))
for entry in entities:
entry.PrintCode(impl_fp)
def main(argv=None):
try:
CommandLine(argv=argv).run()
return 0
except RpcGenError as e:
sys.stderr.write(e)
except EnvironmentError as e:
if e.filename and e.strerror:
sys.stderr.write("%s: %s" % (e.filename, e.strerror))
elif e.strerror:
sys.stderr.write(e.strerror)
else:
raise
return 1
if __name__ == "__main__":
sys.exit(main(argv=sys.argv[1:]))
|
[
"argparse.ArgumentParser",
"re.escape",
"sys.stderr.write",
"re.sub",
"argparse.FileType",
"re.compile"
] |
[((760, 783), 're.compile', 're.compile', (['"""\\\\/\\\\/.*$"""'], {}), "('\\\\/\\\\/.*$')\n", (770, 783), False, 'import re\n'), ((797, 814), 're.compile', 're.compile', (['"""\\\\W"""'], {}), "('\\\\W')\n", (807, 814), False, 'import re\n'), ((837, 859), 're.compile', 're.compile', (['"""^#define"""'], {}), "('^#define')\n", (847, 859), False, 'import re\n'), ((877, 937), 're.compile', 're.compile', (['"""^struct\\\\[(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)\\\\]$"""'], {}), "('^struct\\\\[(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)\\\\]$')\n", (887, 937), False, 'import re\n'), ((953, 1002), 're.compile', 're.compile', (['"""^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$"""'], {}), "('^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$')\n", (963, 1002), False, 'import re\n'), ((1020, 1038), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (1030, 1038), False, 'import re\n'), ((42246, 42309), 're.compile', 're.compile', (['"""(?P<name>[^\\\\[\\\\]]+)(\\\\[(?P<fixed_length>.*)\\\\])?"""'], {}), "('(?P<name>[^\\\\[\\\\]]+)(\\\\[(?P<fixed_length>.*)\\\\])?')\n", (42256, 42309), False, 'import re\n'), ((42329, 42358), 're.compile', 're.compile', (['"""(0x)?\\\\d+"""', 're.I'], {}), "('(0x)?\\\\d+', re.I)\n", (42339, 42358), False, 'import re\n'), ((46260, 46292), 're.compile', 're.compile', (['"""#(if( |def)|endif)"""'], {}), "('#(if( |def)|endif)')\n", (46270, 46292), False, 'import re\n'), ((46307, 46341), 're.compile', 're.compile', (['"""#include (".+"|<.+>)"""'], {}), '(\'#include (".+"|<.+>)\')\n', (46317, 46341), False, 'import re\n'), ((45882, 45908), 're.escape', 're.escape', (['C_COMMENT_START'], {}), '(C_COMMENT_START)\n', (45891, 45908), False, 'import re\n'), ((45940, 45964), 're.escape', 're.escape', (['C_COMMENT_END'], {}), '(C_COMMENT_END)\n', (45949, 45964), False, 'import re\n'), ((48140, 48170), 're.sub', 're.sub', (['"""/\\\\*.*\\\\*/"""', '""""""', 'data'], {}), "('/\\\\*.*\\\\*/', '', data)\n", (48146, 48170), False, 'import re\n'), ((46015, 46041), 're.escape', 're.escape', (['C_COMMENT_START'], {}), '(C_COMMENT_START)\n', (46024, 46041), False, 'import re\n'), ((46090, 46114), 're.escape', 're.escape', (['C_COMMENT_END'], {}), '(C_COMMENT_END)\n', (46099, 46114), False, 'import re\n'), ((52132, 52210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""%(prog)s [options] rpc-file [[h-file] c-file]"""'}), "(usage='%(prog)s [options] rpc-file [[h-file] c-file]')\n", (52155, 52210), False, 'import argparse\n'), ((46177, 46203), 're.escape', 're.escape', (['C_COMMENT_START'], {}), '(C_COMMENT_START)\n', (46186, 46203), False, 'import re\n'), ((46205, 46229), 're.escape', 're.escape', (['C_COMMENT_END'], {}), '(C_COMMENT_END)\n', (46214, 46229), False, 'import re\n'), ((54568, 54587), 'sys.stderr.write', 'sys.stderr.write', (['e'], {}), '(e)\n', (54584, 54587), False, 'import sys\n'), ((52353, 52375), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (52370, 52375), False, 'import argparse\n'), ((54672, 54725), 'sys.stderr.write', 'sys.stderr.write', (["('%s: %s' % (e.filename, e.strerror))"], {}), "('%s: %s' % (e.filename, e.strerror))\n", (54688, 54725), False, 'import sys\n'), ((54763, 54791), 'sys.stderr.write', 'sys.stderr.write', (['e.strerror'], {}), '(e.strerror)\n', (54779, 54791), False, 'import sys\n')]
|