text string | size int64 | token_count int64 |
|---|---|---|
'''
Crie uma classe Agenda que pode armazenar 10 pessoas e seja capaz de realizar as
seguintes operações:
* void armazenaPessoa(String nome, int idade, float altura);
* void removePessoa(String nome);
* int buscaPessoa(String nome); // informa em que posição da agenda está a pessoa
* void imprimeAgenda(); // imprime os dados de todas as pessoas da agenda
* int buscaPessoa(String nome); // imprime os dados da pessoa que está na posição 'i' da agenda.
'''
class Pessoa:
def __init__(self, nome, idade, altura):
self.__nome = nome
self.__idade = idade
self.__altura = altura
class Agenda:
agenda = []
def armazena_pessoa(self, pessoa):
self.agenda.append(pessoa)
def imprime_agenda(self):
print('*** IMPRIMINDO OS DADOS DA AGENDA ***')
for i in self.agenda:
print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
def imprime_pessoa(self, index):
for p, i in enumerate(self.agenda):
if p == index:
print('*** IMPRIMINDO OS DADOS DA PESSOA ***')
print(f'Dados da pessoa na posição {index}: Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
def busca_pessoa(self, nome):
for p, i in enumerate(self.agenda):
if nome == i._Pessoa__nome:
print('*** INFORMANDO A POSIÇÃO DA AGENDA. ***')
print(f'A posição da Agenda que a/o {nome} se encontra é na {p} posição.')
elif nome != i._Pessoa__nome and p == len(self.agenda) - 1:
print(f'O {nome} não existe na Agenda.')
print()
def remover_pessoa(self, nome):
print('*** REMOVENDO A PESSOA DA AGENDA. ***')
for i, n in enumerate(self.agenda):
if nome == n._Pessoa__nome:
print(f'Removendo o contato {n._Pessoa__nome}')
del self.agenda[i]
for i in self.agenda:
print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
# instancia do objeto para Pessoa
user1 = Pessoa('Bob Jack', 32, 1.85)
user2 = Pessoa('Billy Joe', 39, 1.89)
user3 = Pessoa('Ayn Rand', 69, 1.67)
user4 = Pessoa('Thomas Sowell', 85, 1.99)
user5 = Pessoa('Hermione Granger', 29, 1.65)
# instância do objeto para Agenda
agenda = Agenda()
# Armazenando os dados
Agenda.armazena_pessoa(agenda, user1)
Agenda.armazena_pessoa(agenda, user2)
Agenda.armazena_pessoa(agenda, user3)
Agenda.armazena_pessoa(agenda, user4)
Agenda.armazena_pessoa(agenda, user5)
# testando os comandos
Agenda.imprime_agenda(agenda)
Agenda.remover_pessoa(agenda, 'Bob Jack') #Removendo o contato
Agenda.imprime_pessoa(agenda, 2) #Imprimindo Agenda pela posição.
Agenda.busca_pessoa(agenda, 'Hermione Granger') #nome que existe
Agenda.busca_pessoa(agenda, 'Babu Rangel') #nome não existe na lista
| 2,989 | 1,156 |
import getopt, sys, os
import csv
import pandas as pd
import locale
from locale import atof
locale.setlocale(locale.LC_NUMERIC, '')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v:f:", ["help", "output=", "filepath"])
except getopt.GetoptError as err:
usage()
sys.exit(2)
output = None
verbose = False
filepath = os.getcwd()
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-f", "--filepath"):
filepath = a
else:
assert False, "unhandled option"
return filepath
def usage():
print ("=======================\n"\
"please input filepath\n"\
"ex: python get_feature.py -f ./data/20180427 \n"\
"=======================")
def get_feature_data(filepath, encode=None, **argv):
"""
input:
filepath
encode
argv:
Code,Date,CB,Open,High,Low,Close,Volumn
True or False
"""
params = []
for param in argv:
params = [i for i, t in argv.items() if t == True]
# abs filepath
filepath = os.path.abspath(filepath)
get_date = os.path.basename(filepath)
tetfp_file = os.path.join(filepath, "tetfp.csv")
save_process_path = os.path.join(os.path.abspath("./data/" + get_date + "_process"))
with open(tetfp_file, encoding=encode) as file:
rows = csv.reader(file, delimiter=",")
data = []
for row in rows:
new_index = []
for index in row:
if index:
index = index.strip()
new_index.append(index)
data.append(new_index)
df = pd.DataFrame(data=data[1:], columns=change_columns(*data[0]))
df = df.dropna()
df["Volumn"] = pd.to_numeric(df["Volumn"].replace('\.','', regex=True)
.replace(',','', regex=True)
.astype(int))
types = set(df.loc[:,"Code"])
if not os.path.exists(save_process_path):
os.mkdir(save_process_path)
for t in types:
str_t = str(int(t))
t_types = df.loc[df['Code'] == t][params]
t_types.to_csv(os.path.join(save_process_path, get_date + "_" + str_t + ".csv"), index=False)
def change_columns(*header):
"""
replace header to English
"""
column_dict = {
"代碼":"Code",
"日期":"Date",
"中文簡稱":"CB",
"開盤價(元)":"Open",
"最高價(元)":"High",
"最低價(元)":"Low",
"收盤價(元)":"Close",
"成交張數(張)": "Volumn"
}
return [column_dict[h] for h in header]
if __name__ == "__main__":
"""
choose data output column
"""
choose = {
"Code":True,
"Date":True,
"CB": False,
"Open": True,
"High": True,
"Low": True,
"Close": True,
"Volumn": True
}
filepath = main()
get_feature_data(filepath, "big5", **choose)
| 3,148 | 1,075 |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import os
import openmm.unit as unit
output_dir = "/home/mbowley/ANI-Peptides/outputs/equilibration_aaa_capped_amber_121250_310322"
STATE_DATA_FN = "equilibration_state_data.csv"
# Make some graphs
report = pd.read_csv(os.path.join(output_dir, STATE_DATA_FN))
report = report.melt()
with sns.plotting_context('paper'):
g = sns.FacetGrid(data=report, row='variable', sharey=False )
g.map(plt.plot, 'value')
# format the labels with f-strings
for ax in g.axes.flat:
ax.xaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f'{(x * 10*unit.femtoseconds).value_in_unit(unit.picoseconds):.1f}ns'))
plt.savefig(os.path.join(output_dir, 'graphs.png'), bbox_inches='tight')
| 811 | 320 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Methods to execute requests to Netflix API
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from functools import wraps
import resources.lib.common as common
import resources.lib.kodi.ui as ui
from resources.lib.common import cache_utils
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import g
from .exceptions import APIError, MissingCredentialsError, MetadataNotAvailable, CacheMiss
from .paths import EPISODES_PARTIAL_PATHS, ART_PARTIAL_PATHS, build_paths
def catch_api_errors(func):
"""Decorator that catches API errors and displays a notification"""
# pylint: disable=missing-docstring
@wraps(func)
def api_error_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except APIError as exc:
ui.show_notification(common.get_local_string(30118).format(exc))
return api_error_wrapper
def logout():
"""Logout of the current account"""
common.make_call('logout', g.BASE_URL)
def login(ask_credentials=True):
"""Perform a login"""
try:
if ask_credentials:
ui.ask_credentials()
if not common.make_call('login'):
# Login not validated
# ui.show_notification(common.get_local_string(30009))
return False
return True
except MissingCredentialsError:
# Aborted from user or leave an empty field
ui.show_notification(common.get_local_string(30112))
raise
def update_lolomo_context(context_name):
"""Update the lolomo list by context"""
lolomo_root = g.LOCAL_DB.get_value('lolomo_root_id', '', TABLE_SESSION)
context_index = g.LOCAL_DB.get_value('lolomo_{}_index'.format(context_name.lower()), '', TABLE_SESSION)
context_id = g.LOCAL_DB.get_value('lolomo_{}_id'.format(context_name.lower()), '', TABLE_SESSION)
if not context_index:
return
path = [['lolomos', lolomo_root, 'refreshListByContext']]
# The fourth parameter is like a request-id, but it doesn't seem to match to
# serverDefs/date/requestId of reactContext (g.LOCAL_DB.get_value('request_id', table=TABLE_SESSION))
# nor to request_id of the video event request
# has a kind of relationship with renoMessageId suspect with the logblob but i'm not sure because my debug crashed,
# and i am no longer able to trace the source.
# I noticed also that this request can also be made with the fourth parameter empty,
# but it still doesn't update the continueWatching list of lolomo, that is strange because of no error
params = [common.enclose_quotes(context_id),
context_index,
common.enclose_quotes(context_name),
'']
# path_suffixs = [
# [['trackIds', 'context', 'length', 'genreId', 'videoId', 'displayName', 'isTallRow', 'isShowAsARow',
# 'impressionToken', 'showAsARow', 'id', 'requestId']],
# [{'from': 0, 'to': 100}, 'reference', 'summary'],
# [{'from': 0, 'to': 100}, 'reference', 'title'],
# [{'from': 0, 'to': 100}, 'reference', 'titleMaturity'],
# [{'from': 0, 'to': 100}, 'reference', 'userRating'],
# [{'from': 0, 'to': 100}, 'reference', 'userRatingRequestId'],
# [{'from': 0, 'to': 100}, 'reference', 'boxarts', '_342x192', 'jpg'],
# [{'from': 0, 'to': 100}, 'reference', 'promoVideo']
# ]
callargs = {
'callpaths': path,
'params': params,
# 'path_suffixs': path_suffixs
}
try:
response = common.make_http_call('callpath_request', callargs)
common.debug('refreshListByContext response: {}', response)
except Exception: # pylint: disable=broad-except
# I do not know the reason yet, but sometimes continues to return error 401,
# making it impossible to update the bookmark position
if not common.is_debug_verbose():
return
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the lolomo context on netflix',
time=10000)
def update_videoid_bookmark(video_id):
"""Update the videoid bookmark position"""
# You can check if this function works through the official android app
# by checking if the status bar watched of the video will be updated
callargs = {
'callpaths': [['refreshVideoCurrentPositions']],
'params': ['[' + video_id + ']', '[]'],
}
try:
response = common.make_http_call('callpath_request', callargs)
common.debug('refreshVideoCurrentPositions response: {}', response)
except Exception: # pylint: disable=broad-except
# I do not know the reason yet, but sometimes continues to return error 401,
# making it impossible to update the bookmark position
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the status watched on netflix',
time=10000)
@common.time_execution(immediate=False)
def get_video_raw_data(videoids, custom_partial_path=None): # Do not apply cache to this method
"""Retrieve raw data for specified video id's"""
video_ids = [int(videoid.value) for videoid in videoids]
common.debug('Requesting video raw data for {}', video_ids)
if not custom_partial_path:
paths = build_paths(['videos', video_ids], EPISODES_PARTIAL_PATHS)
if videoids[0].mediatype == common.VideoId.EPISODE:
paths.extend(build_paths(['videos', int(videoids[0].tvshowid)], ART_PARTIAL_PATHS + [['title']]))
else:
paths = build_paths(['videos', video_ids], custom_partial_path)
return common.make_call('path_request', paths)
@catch_api_errors
@common.time_execution(immediate=False)
def rate(videoid, rating):
"""Rate a video on Netflix"""
common.debug('Rating {} as {}', videoid.value, rating)
# In opposition to Kodi, Netflix uses a rating from 0 to in 0.5 steps
rating = min(10, max(0, rating)) / 2
common.make_call(
'post',
{'endpoint': 'set_video_rating',
'data': {
'titleId': int(videoid.value),
'rating': rating}})
ui.show_notification(common.get_local_string(30127).format(rating * 2))
@catch_api_errors
@common.time_execution(immediate=False)
def rate_thumb(videoid, rating, track_id_jaw):
"""Rate a video on Netflix"""
common.debug('Thumb rating {} as {}', videoid.value, rating)
event_uuid = common.get_random_uuid()
response = common.make_call(
'post',
{'endpoint': 'set_thumb_rating',
'data': {
'eventUuid': event_uuid,
'titleId': int(videoid.value),
'trackId': track_id_jaw,
'rating': rating,
}})
if response.get('status', '') == 'success':
ui.show_notification(common.get_local_string(30045).split('|')[rating])
else:
common.error('Rating thumb error, response detail: {}', response)
ui.show_error_info('Rating error', 'Error type: {}' + response.get('status', '--'),
True, True)
@catch_api_errors
@common.time_execution(immediate=False)
def update_my_list(videoid, operation, params):
"""Call API to update my list with either add or remove action"""
common.debug('My List: {} {}', operation, videoid)
common.make_call(
'post',
{'endpoint': 'update_my_list',
'data': {
'operation': operation,
'videoId': videoid.value}})
ui.show_notification(common.get_local_string(30119))
_update_mylist_cache(videoid, operation, params)
def _update_mylist_cache(videoid, operation, params):
"""Update the my list cache to speeding up page load"""
# Avoids making a new request to the server to request the entire list updated
perpetual_range_start = params.get('perpetual_range_start')
mylist_identifier = 'mylist'
if perpetual_range_start and perpetual_range_start != 'None':
mylist_identifier += '_' + perpetual_range_start
if operation == 'remove':
try:
video_list_sorted_data = g.CACHE.get(cache_utils.CACHE_MYLIST, mylist_identifier)
del video_list_sorted_data.videos[videoid.value]
g.CACHE.add(cache_utils.CACHE_MYLIST, mylist_identifier, video_list_sorted_data)
except CacheMiss:
pass
try:
my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items')
my_list_videoids.remove(videoid)
g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids)
except CacheMiss:
pass
else:
try:
common.make_call('add_videoids_to_video_list_cache', {'cache_bucket': cache_utils.CACHE_MYLIST,
'cache_identifier': mylist_identifier,
'video_ids': [videoid.value]})
except CacheMiss:
pass
try:
my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items')
my_list_videoids.append(videoid)
g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids)
except CacheMiss:
pass
@common.time_execution(immediate=False)
def get_metadata(videoid, refresh=False):
"""Retrieve additional metadata for the given VideoId"""
# Delete the cache if we need to refresh the all metadata
if refresh:
g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.value)
metadata_data = {}, None
if videoid.mediatype not in [common.VideoId.EPISODE, common.VideoId.SEASON]:
metadata_data = _metadata(videoid), None
elif videoid.mediatype == common.VideoId.SEASON:
metadata_data = _metadata(videoid.derive_parent(None)), None
else:
try:
metadata_data = _episode_metadata(videoid)
except KeyError as exc:
# Episode metadata may not exist if its a new episode and cached
# data is outdated. In this case, delete the cache entry and
# try again safely (if it doesn't exist this time, there is no
# metadata for the episode, so we assign an empty dict).
common.debug('{}, refreshing cache', exc)
g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.tvshowid)
try:
metadata_data = _episode_metadata(videoid)
except KeyError as exc:
common.error(exc)
return metadata_data
@common.time_execution(immediate=False)
def _episode_metadata(videoid):
show_metadata = _metadata(videoid)
episode_metadata, season_metadata = common.find_episode_metadata(videoid, show_metadata)
return episode_metadata, season_metadata, show_metadata
@common.time_execution(immediate=False)
@cache_utils.cache_output(cache_utils.CACHE_METADATA, identify_from_kwarg_name='video_id')
def _metadata(video_id):
"""Retrieve additional metadata for a video.This is a separate method from
metadata(videoid) to work around caching issues when new episodes are added
to a show by Netflix."""
import time
common.debug('Requesting metadata for {}', video_id)
# Always use params 'movieid' to all videoid identifier
ipc_call = common.make_http_call if g.IS_SERVICE else common.make_call
metadata_data = ipc_call(
'get',
{
'endpoint': 'metadata',
'params': {'movieid': video_id.value,
'_': int(time.time())}
})
if not metadata_data:
# This return empty
# - if the metadata is no longer available
# - if it has been exported a tv show/movie from a specific language profile that is not
# available using profiles with other languages
raise MetadataNotAvailable
return metadata_data['video']
@common.time_execution(immediate=False)
def get_parental_control_data(password):
"""Get the parental control data"""
return common.make_call('parental_control_data', {'password': password})
@common.time_execution(immediate=False)
def set_parental_control_data(data):
"""Set the parental control data"""
try:
common.make_call(
'post',
{'endpoint': 'content_restrictions',
'data': {'action': 'update',
'authURL': data['token'],
'experience': data['experience'],
'guid': data['guid'],
'maturity': data['maturity']}}
)
return True
except Exception as exc: # pylint: disable=broad-except
common.error('Api call profile_hub raised an error: {}', exc)
return False
@common.time_execution(immediate=False)
def verify_pin(pin):
"""Send adult PIN to Netflix and verify it."""
try:
return common.make_call(
'post',
{'endpoint': 'pin_service',
'data': {'pin': pin}}
).get('success', False)
except Exception: # pylint: disable=broad-except
return False
@common.time_execution(immediate=False)
def verify_profile_lock(guid, pin):
"""Send profile PIN to Netflix and verify it."""
try:
return common.make_call(
'post',
{'endpoint': 'profile_lock',
'data': {'pin': pin,
'action': 'verify',
'guid': guid}}
).get('success', False)
except Exception: # pylint: disable=broad-except
return False
| 13,883 | 4,225 |
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.common import ExtractedFeatures
from cybox.test import EntityTestCase
# Need to do this so the binding class is registered.
import cybox.bindings.cybox_common
from cybox.bindings.address_object import AddressObjectType
setattr(cybox.bindings.cybox_common, "AddressObjectType", AddressObjectType)
class TestExtractedFeatures(EntityTestCase, unittest.TestCase):
klass = ExtractedFeatures
_full_dict = {
'strings': [
{'encoding': u"ASCII", 'string_value': u"A String", 'length': 8},
{'encoding': u"UTF-8", 'string_value': u"Another String"},
],
'imports': [u"CreateFileA", u"LoadLibrary"],
'functions': [u"DoSomething", u"DoSomethingElse"],
#TODO: Use CodeObject instead of AddressObject
'code_snippets': [
{'address_value': u"8.8.8.8", 'xsi:type': "AddressObjectType"},
{'address_value': u"1.2.3.4", 'xsi:type': "AddressObjectType"},
],
}
if __name__ == "__main__":
unittest.main()
| 1,138 | 365 |
from django.core.validators import MinValueValidator
from openslides.core.config import ConfigVariable
from openslides.poll.models import PERCENT_BASE_CHOICES
from .models import Workflow
def get_workflow_choices():
"""
Returns a list of all workflows to be used as choices for the config variable
'motions_workflow'. Each list item contains the pk and the display name.
"""
return [{'value': str(workflow.pk), 'display_name': workflow.name}
for workflow in Workflow.objects.all()]
def get_config_variables():
"""
Generator which yields all config variables of this app.
They are grouped in 'General', 'Amendments', 'Supporters', 'Voting and ballot
papers' and 'PDF'. The generator has to be evaluated during app loading
(see apps.py).
"""
# General
yield ConfigVariable(
name='motions_workflow',
default_value='1',
input_type='choice',
label='Workflow of new motions',
choices=get_workflow_choices,
weight=310,
group='Motions',
subgroup='General')
yield ConfigVariable(
name='motions_identifier',
default_value='per_category',
input_type='choice',
label='Identifier',
choices=(
{'value': 'per_category', 'display_name': 'Numbered per category'},
{'value': 'serially_numbered', 'display_name': 'Serially numbered'},
{'value': 'manually', 'display_name': 'Set it manually'}),
weight=315,
group='Motions',
subgroup='General')
yield ConfigVariable(
name='motions_preamble',
default_value='The assembly may decide,',
label='Motion preamble',
weight=320,
group='Motions',
subgroup='General',
translatable=True)
yield ConfigVariable(
name='motions_stop_submitting',
default_value=False,
input_type='boolean',
label='Stop submitting new motions by non-staff users',
weight=325,
group='Motions',
subgroup='General')
yield ConfigVariable(
name='motions_allow_disable_versioning',
default_value=False,
input_type='boolean',
label='Allow to disable versioning',
weight=330,
group='Motions',
subgroup='General')
# Amendments
# Amendments currently not implemented. (TODO: Implement it like in OpenSlides 1.7.)
yield ConfigVariable(
name='motions_amendments_enabled',
default_value=False,
input_type='boolean',
label='Activate amendments',
hidden=True,
weight=335,
group='Motions',
subgroup='Amendments')
yield ConfigVariable(
name='motions_amendments_prefix',
default_value='A',
label='Prefix for the identifier for amendments',
hidden=True,
weight=340,
group='Motions',
subgroup='Amendments')
# Supporters
yield ConfigVariable(
name='motions_min_supporters',
default_value=0,
input_type='integer',
label='Number of (minimum) required supporters for a motion',
help_text='Choose 0 to disable the supporting system.',
weight=345,
group='Motions',
subgroup='Supporters',
validators=(MinValueValidator(0),))
yield ConfigVariable(
name='motions_remove_supporters',
default_value=False,
input_type='boolean',
label='Remove all supporters of a motion if a submitter edits his motion in early state',
weight=350,
group='Motions',
subgroup='Supporters')
# Voting and ballot papers
yield ConfigVariable(
name='motions_poll_100_percent_base',
default_value='WITHOUT_INVALID',
input_type='choice',
label='The 100 % base of a voting result consists of',
choices=PERCENT_BASE_CHOICES,
weight=355,
group='Motions',
subgroup='Voting and ballot papers')
yield ConfigVariable(
name='motions_pdf_ballot_papers_selection',
default_value='CUSTOM_NUMBER',
input_type='choice',
label='Number of ballot papers (selection)',
choices=(
{'value': 'NUMBER_OF_DELEGATES', 'display_name': 'Number of all delegates'},
{'value': 'NUMBER_OF_ALL_PARTICIPANTS', 'display_name': 'Number of all participants'},
{'value': 'CUSTOM_NUMBER', 'display_name': 'Use the following custom number'}),
weight=360,
group='Motions',
subgroup='Voting and ballot papers')
yield ConfigVariable(
name='motions_pdf_ballot_papers_number',
default_value=8,
input_type='integer',
label='Custom number of ballot papers',
weight=365,
group='Motions',
subgroup='Voting and ballot papers',
validators=(MinValueValidator(1),))
# PDF
yield ConfigVariable(
name='motions_pdf_title',
default_value='Motions',
label='Title for PDF document (all motions)',
weight=370,
group='Motions',
subgroup='PDF',
translatable=True)
yield ConfigVariable(
name='motions_pdf_preamble',
default_value='',
label='Preamble text for PDF document (all motions)',
weight=375,
group='Motions',
subgroup='PDF')
yield ConfigVariable(
name='motions_pdf_paragraph_numbering',
default_value=False,
input_type='boolean',
label='Show paragraph numbering (only in PDF)',
weight=380,
group='Motions',
subgroup='PDF')
| 5,640 | 1,625 |
from mpyc.runtime import mpc
from src.dataset import ObliviousDataset, Sample
from src.output import output
from src.secint import secint as s
from src.forest import train_forest
def sample(ins, out):
return Sample([s(i) for i in ins], s(out))
spect_samples = ObliviousDataset.create(
sample([1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0], 1),
sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], 1),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1], 1),
sample([1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1], 1),
sample([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], 1),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0], 1),
sample([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1], 0),
sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0], 1),
sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], 0),
sample([0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0], 0),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1], 1),
sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0], 1),
sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], 0),
sample([1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1], 1),
sample([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1], 0),
sample([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0], 0),
sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], 0),
sample([1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], 0),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0], 0),
sample([1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 1),
sample([1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], 1),
sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0], 1),
sample([1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0], 0),
sample([1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1], 1),
sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], 1),
sample([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1], 1),
sample([1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0], 0),
sample([1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1], 1),
sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], 1),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1], 1),
sample([1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], 1),
sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], 1),
sample([1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0], 1),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0),
sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], 1),
sample([1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 1),
sample([1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1], 1),
sample([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1], 1),
sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0], 0),
sample([1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1),
sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1], 0),
sample([1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1], 0),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0], 1),
sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1], 1),
sample([1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 1),
sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1], 1),
sample([1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1], 1),
sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0], 0),
sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], 1),
sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0),
sample([1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 1),
sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1], 1),
sample([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0], 0),
sample([1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1], 1),
sample([1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], 0),
sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], 1),
sample([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0], 0),
sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1], 0),
sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1], 1),
sample([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1),
sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], 0),
sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0], 0),
sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0], 1),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0),
sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0),
sample([0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], 0),
sample([0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0], 0),
sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0)
)
async def main():
async with mpc:
forest = await output(await train_forest(spect_samples, amount=2, depth=4))
for index, tree in enumerate(forest):
print(f"Tree #{index}")
tree.pretty_print()
if __name__ == '__main__':
mpc.run(main())
| 22,743 | 19,417 |
import argparse
import os
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision.utils import save_image
import random
from math import *
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import cv2
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=128, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=1, help='number of image channels')
parser.add_argument('--n_critic', type=int, default=5, help='number of training steps for discriminator per iter')
parser.add_argument('--clip_value', type=float, default=0.01, help='lower and upper clip value for disc. weights')
parser.add_argument('--sample_interval', type=int, default=400, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
img_shape = (2,)
cuda = True if torch.cuda.is_available() else False
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
def block(in_feat, out_feat, normalize=True):
layers = [ nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(opt.latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
)
self.one_hot = nn.Linear(1024, 100)
self.one_hot_offset = nn.Linear(100, int(np.prod(img_shape)))
self.offset = nn.Linear(1024, int(np.prod(img_shape)))
self.softmax = nn.Softmax()
def forward(self, z):
img = self.model(z)
one_hot = self.one_hot(img)
one_hot_offset = self.one_hot_offset(self.softmax(one_hot))
img = one_hot_offset + self.offset(img)
img = img.view(img.shape[0], *img_shape)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1)
)
def forward(self, img):
img_flat = img.view(img.shape[0], -1)
validity = self.model(img_flat)
return validity
def gaussian_mixture(batchsize, ndim, num_labels):
if ndim % 2 != 0:
raise Exception("ndim must be a multiple of 2.")
def sample(x, y, label, num_labels):
shift = 1.4
r = 2.0 * np.pi / float(num_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.05
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, ndim // 2))
y = np.random.normal(0, y_var, (batchsize, ndim // 2))
z = np.empty((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], random.randint(0, num_labels - 1), num_labels)
return z
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
# Optimizers
optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=opt.lr)
optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Training
# ----------
batches_done = 0
for epoch in range(opt.n_epochs):
for i in range(1000):
imgs = gaussian_mixture(opt.batch_size, 2, 4)
imgs = Tensor(imgs)
#imgs = Tensor(np.random.uniform(low=1.3, high=5.7, size=(opt.batch_size, 2)))
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images
fake_imgs = generator(z).detach()
# Adversarial loss
loss_D = -torch.mean(discriminator(real_imgs)) + torch.mean(discriminator(fake_imgs))
loss_D.backward()
optimizer_D.step()
# Clip weights of discriminator
for p in discriminator.parameters():
p.data.clamp_(-opt.clip_value, opt.clip_value)
# Train the generator every n_critic iterations
if i % opt.n_critic == 0:
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_imgs = generator(z)
# Adversarial loss
loss_G = -torch.mean(discriminator(gen_imgs))
loss_G.backward()
optimizer_G.step()
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, opt.n_epochs,
batches_done % 1000, 1000,
loss_D.item(), loss_G.item()))
if batches_done % opt.sample_interval == 0:
Y = gen_imgs.detach().cpu().numpy()
plt.scatter(Y[:, 0], Y[:, 1])
plt.savefig('tmp.png')
plt.close()
image = cv2.imread('tmp.png')
cv2.imshow("image", image)
cv2.waitKey(1)
batches_done += 1
| 6,578 | 2,272 |
# -*- coding: utf-8 -*-
__author__ = 'gzp'
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from utils import Tree
class Solution(object):
def trimBST(self, root, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
return self._trimBST(root, L, R)
def _trimBST(self, node, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
if not node:
return
if node.val is not None and node.val < L:
node = self._trimBST(node.right, L, R)
elif node.val is not None and node.val > R:
node = self._trimBST(node.left, L, R)
if not node:
return
node.right = self._trimBST(node.right, L, R)
node.left = self.trimBST(node.left, L, R)
return node
if __name__ == '__main__':
s = Solution()
root = Tree([1, 0, 2])
print(root.get_nodes())
print(s.trimBST(root, 1, 2).get_nodes())
root = Tree([3, 0, 4, None, 2, None, None, 1])
print(root.get_nodes())
print(s.trimBST(root, 1, 3).get_nodes())
| 1,302 | 470 |
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
from scrapy_splash import SplashRequest
class A85Spider(CrawlSpider):
name = '85'
allowed_domains = ['houqi.gov.cn']
start_urls = ['http://houqi.gov.cn/kzhq/zwgk/zwgk.shtml']
rules = (
Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+\d+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/[a-z]+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/gsgg/list\.shtml'), follow=True),
Rule(LinkExtractor(restrict_xpaths='//ul[@class="ggnav"]//li'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'list_\d+.shtml'), follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
# if not isinstance(response, HtmlResponse):
# return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
# print(response.url)
# item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
# return item
if ('qzqd' in response.url) or ('gsgg' in response.url) or ('xwfbh' in response.url) or ('hqxw' in response.url) or ('tzgg' in response.url):
try:
item = ScrapySpiderItem()
item['url'] = response.url
#
date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/text()').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('//div[@class="content"]/h1/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="zhengw"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
print("there have no date in case 1.")
else:
try:
item = ScrapySpiderItem()
item['url'] = response.url
#
date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[7]/em/text()').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[3]/em/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="zhengw"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
print("there have no date in case 2.")
| 3,895 | 1,229 |
__all__ = ['GUM_Dispenser_Main', 'GUM_setup_parser', 'GUM_Describe_Source', 'GUM_Generate_NOMNOML', 'GUM_Exceptions']
| 118 | 52 |
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Dan Persons (dpersonsdev@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser
from argparse import FileType
from configparser import ConfigParser
from os.path import isfile
__version__ = '0.1'
def get_args():
"""Set argument options"""
arg_parser = ArgumentParser()
arg_parser.add_argument('--version', action = 'version',
version = '%(prog)s ' + str(__version__))
arg_parser.add_argument('-c',
action = 'store', dest = 'config',
default = '/etc/nothing.conf',
help = ('set the config file'))
arg_parser.add_argument('--full',
action = 'store_true',
help = ('Do nothing to the fullest'))
arg_parser.add_argument('files',
type = FileType('r'), metavar='FILE', nargs = '?',
help = ('set a file with which to do nothing'))
args = arg_parser.parse_args()
return args
def get_config(configfile):
"""Read the config file"""
config = ConfigParser()
if isfile(configfile):
myconf = args.config
config.read(myconf)
else: return None
def main_event():
"""Do the actual nothing"""
pass
def run_script():
"""Run the program that does nothing"""
try:
args = get_args()
config = get_config(args.config)
main_event()
except KeyboardInterrupt:
print('\nExiting on KeyboardInterrupt')
def main():
run_script()
if __name__ == "__main__":
main()
| 2,589 | 817 |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""QWidget displaying an overview of a 2D plot.
This shows the available range of the data, and the current location of the
plot view.
"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "22/02/2021"
import logging
import weakref
from ... import qt
from ...utils import LockReentrant
_logger = logging.getLogger(__name__)
class _DraggableRectItem(qt.QGraphicsRectItem):
"""RectItem which signals its change through visibleRectDragged."""
def __init__(self, *args, **kwargs):
super(_DraggableRectItem, self).__init__(
*args, **kwargs)
self._previousCursor = None
self.setFlag(qt.QGraphicsItem.ItemIsMovable)
self.setFlag(qt.QGraphicsItem.ItemSendsGeometryChanges)
self.setAcceptHoverEvents(True)
self._ignoreChange = False
self._constraint = 0, 0, 0, 0
def setConstraintRect(self, left, top, width, height):
"""Set the constraint rectangle for dragging.
The coordinates are in the _DraggableRectItem coordinate system.
This constraint only applies to modification through interaction
(i.e., this constraint is not applied to change through API).
If the _DraggableRectItem is smaller than the constraint rectangle,
the _DraggableRectItem remains within the constraint rectangle.
If the _DraggableRectItem is wider than the constraint rectangle,
the constraint rectangle remains within the _DraggableRectItem.
"""
self._constraint = left, left + width, top, top + height
def setPos(self, *args, **kwargs):
"""Overridden to ignore changes from API in itemChange."""
self._ignoreChange = True
super(_DraggableRectItem, self).setPos(*args, **kwargs)
self._ignoreChange = False
def moveBy(self, *args, **kwargs):
"""Overridden to ignore changes from API in itemChange."""
self._ignoreChange = True
super(_DraggableRectItem, self).moveBy(*args, **kwargs)
self._ignoreChange = False
def itemChange(self, change, value):
"""Callback called before applying changes to the item."""
if (change == qt.QGraphicsItem.ItemPositionChange and
not self._ignoreChange):
# Makes sure that the visible area is in the data
# or that data is in the visible area if area is too wide
x, y = value.x(), value.y()
xMin, xMax, yMin, yMax = self._constraint
if self.rect().width() <= (xMax - xMin):
if x < xMin:
value.setX(xMin)
elif x > xMax - self.rect().width():
value.setX(xMax - self.rect().width())
else:
if x > xMin:
value.setX(xMin)
elif x < xMax - self.rect().width():
value.setX(xMax - self.rect().width())
if self.rect().height() <= (yMax - yMin):
if y < yMin:
value.setY(yMin)
elif y > yMax - self.rect().height():
value.setY(yMax - self.rect().height())
else:
if y > yMin:
value.setY(yMin)
elif y < yMax - self.rect().height():
value.setY(yMax - self.rect().height())
if self.pos() != value:
# Notify change through signal
views = self.scene().views()
assert len(views) == 1
views[0].visibleRectDragged.emit(
value.x() + self.rect().left(),
value.y() + self.rect().top(),
self.rect().width(),
self.rect().height())
return value
return super(_DraggableRectItem, self).itemChange(
change, value)
def hoverEnterEvent(self, event):
"""Called when the mouse enters the rectangle area"""
self._previousCursor = self.cursor()
self.setCursor(qt.Qt.OpenHandCursor)
def hoverLeaveEvent(self, event):
"""Called when the mouse leaves the rectangle area"""
if self._previousCursor is not None:
self.setCursor(self._previousCursor)
self._previousCursor = None
class RadarView(qt.QGraphicsView):
"""Widget presenting a synthetic view of a 2D area and
the current visible area.
Coordinates are as in QGraphicsView:
x goes from left to right and y goes from top to bottom.
This widget preserves the aspect ratio of the areas.
The 2D area and the visible area can be set with :meth:`setDataRect`
and :meth:`setVisibleRect`.
When the visible area has been dragged by the user, its new position
is signaled by the *visibleRectDragged* signal.
It is possible to invert the direction of the axes by using the
:meth:`scale` method of QGraphicsView.
"""
visibleRectDragged = qt.Signal(float, float, float, float)
"""Signals that the visible rectangle has been dragged.
It provides: left, top, width, height in data coordinates.
"""
_DATA_PEN = qt.QPen(qt.QColor('white'))
_DATA_BRUSH = qt.QBrush(qt.QColor('light gray'))
_ACTIVEDATA_PEN = qt.QPen(qt.QColor('black'))
_ACTIVEDATA_BRUSH = qt.QBrush(qt.QColor('transparent'))
_ACTIVEDATA_PEN.setWidth(2)
_ACTIVEDATA_PEN.setCosmetic(True)
_VISIBLE_PEN = qt.QPen(qt.QColor('blue'))
_VISIBLE_PEN.setWidth(2)
_VISIBLE_PEN.setCosmetic(True)
_VISIBLE_BRUSH = qt.QBrush(qt.QColor(0, 0, 0, 0))
_TOOLTIP = 'Radar View:\nRed contour: Visible area\nGray area: The image'
_PIXMAP_SIZE = 256
def __init__(self, parent=None):
self.__plotRef = None
self._scene = qt.QGraphicsScene()
self._dataRect = self._scene.addRect(0, 0, 1, 1,
self._DATA_PEN,
self._DATA_BRUSH)
self._imageRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._imageRect.setVisible(False)
self._scatterRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._scatterRect.setVisible(False)
self._curveRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._curveRect.setVisible(False)
self._visibleRect = _DraggableRectItem(0, 0, 1, 1)
self._visibleRect.setPen(self._VISIBLE_PEN)
self._visibleRect.setBrush(self._VISIBLE_BRUSH)
self._scene.addItem(self._visibleRect)
super(RadarView, self).__init__(self._scene, parent)
self.setHorizontalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff)
self.setFocusPolicy(qt.Qt.NoFocus)
self.setStyleSheet('border: 0px')
self.setToolTip(self._TOOLTIP)
self.__reentrant = LockReentrant()
self.visibleRectDragged.connect(self._viewRectDragged)
self.__timer = qt.QTimer(self)
self.__timer.timeout.connect(self._updateDataContent)
def sizeHint(self):
# """Overridden to avoid sizeHint to depend on content size."""
return self.minimumSizeHint()
def wheelEvent(self, event):
# """Overridden to disable vertical scrolling with wheel."""
event.ignore()
def resizeEvent(self, event):
# """Overridden to fit current content to new size."""
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
super(RadarView, self).resizeEvent(event)
def setDataRect(self, left, top, width, height):
"""Set the bounds of the data rectangular area.
This sets the coordinate system.
"""
self._dataRect.setRect(left, top, width, height)
self._visibleRect.setConstraintRect(left, top, width, height)
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
def setVisibleRect(self, left, top, width, height):
"""Set the visible rectangular area.
The coordinates are relative to the data rect.
"""
self.__visibleRect = left, top, width, height
self._visibleRect.setRect(0, 0, width, height)
self._visibleRect.setPos(left, top)
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
def __setVisibleRectFromPlot(self, plot):
"""Update radar view visible area.
Takes care of y coordinate conversion.
"""
xMin, xMax = plot.getXAxis().getLimits()
yMin, yMax = plot.getYAxis().getLimits()
self.setVisibleRect(xMin, yMin, xMax - xMin, yMax - yMin)
def getPlotWidget(self):
"""Returns the connected plot
:rtype: Union[None,PlotWidget]
"""
if self.__plotRef is None:
return None
plot = self.__plotRef()
if plot is None:
self.__plotRef = None
return plot
def setPlotWidget(self, plot):
"""Set the PlotWidget this radar view connects to.
As result `setDataRect` and `setVisibleRect` will be called
automatically.
:param Union[None,PlotWidget] plot:
"""
previousPlot = self.getPlotWidget()
if previousPlot is not None: # Disconnect previous plot
plot.getXAxis().sigLimitsChanged.disconnect(self._xLimitChanged)
plot.getYAxis().sigLimitsChanged.disconnect(self._yLimitChanged)
plot.getYAxis().sigInvertedChanged.disconnect(self._updateYAxisInverted)
# Reset plot and timer
# FIXME: It would be good to clean up the display here
self.__plotRef = None
self.__timer.stop()
if plot is not None: # Connect new plot
self.__plotRef = weakref.ref(plot)
plot.getXAxis().sigLimitsChanged.connect(self._xLimitChanged)
plot.getYAxis().sigLimitsChanged.connect(self._yLimitChanged)
plot.getYAxis().sigInvertedChanged.connect(self._updateYAxisInverted)
self.__setVisibleRectFromPlot(plot)
self._updateYAxisInverted()
self.__timer.start(500)
def _xLimitChanged(self, vmin, vmax):
plot = self.getPlotWidget()
self.__setVisibleRectFromPlot(plot)
def _yLimitChanged(self, vmin, vmax):
plot = self.getPlotWidget()
self.__setVisibleRectFromPlot(plot)
def _updateYAxisInverted(self, inverted=None):
"""Sync radar view axis orientation."""
plot = self.getPlotWidget()
if inverted is None:
# Do not perform this when called from plot signal
inverted = plot.getYAxis().isInverted()
# Use scale to invert radarView
# RadarView default Y direction is from top to bottom
# As opposed to Plot. So invert RadarView when Plot is NOT inverted.
self.resetTransform()
if not inverted:
self.scale(1., -1.)
self.update()
def _viewRectDragged(self, left, top, width, height):
"""Slot for radar view visible rectangle changes."""
plot = self.getPlotWidget()
if plot is None:
return
if self.__reentrant.locked():
return
with self.__reentrant:
plot.setLimits(left, left + width, top, top + height)
def _updateDataContent(self):
"""Update the content to the current data content"""
plot = self.getPlotWidget()
if plot is None:
return
ranges = plot.getDataRange()
xmin, xmax = ranges.x if ranges.x is not None else (0, 0)
ymin, ymax = ranges.y if ranges.y is not None else (0, 0)
self.setDataRect(xmin, ymin, xmax - xmin, ymax - ymin)
self.__updateItem(self._imageRect, plot.getActiveImage())
self.__updateItem(self._scatterRect, plot.getActiveScatter())
self.__updateItem(self._curveRect, plot.getActiveCurve())
def __updateItem(self, rect, item):
"""Sync rect with item bounds
:param QGraphicsRectItem rect:
:param Item item:
"""
if item is None:
rect.setVisible(False)
return
ranges = item._getBounds()
if ranges is None:
rect.setVisible(False)
return
xmin, xmax, ymin, ymax = ranges
width = xmax - xmin
height = ymax - ymin
rect.setRect(xmin, ymin, width, height)
rect.setVisible(True)
| 14,077 | 4,128 |
import json
import time
import websocket
class Client:
def __init__(self, url):
self._url = url
self._updates = []
self._socket = create_socket(url, connection_timeout=2)
def sync(self):
recv = []
while True:
data = self._socket.recv()
if data:
recv.extend(json.loads(data))
outgoing = self._updates[:1000]
self._socket.send(json.dumps(outgoing))
self._updates[:1000] = []
if not self._updates:
break
for incoming in recv:
self._recv(incoming)
def serve(self, function=None):
while True:
if function is not None:
try:
function()
except StopIteration:
break
self.sync()
def _send(self, content, header):
self._updates.append({
'header': header,
'content': content,
})
def _recv(self, msg):
datatype = msg['header']['type']
method = '_on_%s' % datatype
if hasattr(self, method):
getattr(self, method)(msg['content'])
def create_socket(uri, *args, **kwargs):
start = time.time()
while True:
try:
return websocket.create_connection(uri, *args, **kwargs)
except ConnectionRefusedError:
if time.time() - start > kwargs.get('connection_timeout', 0):
raise
| 1,486 | 417 |
import dataiku
import glob
import pandas as pd
import os
def do(payload, config, plugin_config, inputs):
if "method" not in payload:
return {}
client = dataiku.api_client()
if payload["method"] == "get-valid-csv-filenames":
required_columns = ["id", "className"]
sep = ","
# Retrieving model folder
model_folder_full_name = [inp for inp in inputs if inp["role"] == "modelFolder"][0]["fullName"]
model_folder = dataiku.Folder(model_folder_full_name).get_path()
csv_files_root_mf = glob.glob(model_folder + "/*.csv")
# Filtering out files without required columns
csv_valid_filenames = []
for f in csv_files_root_mf:
schema = retrieve_schema_from_pandas_compatible_csv_file(f, sep)
if len([col for col in required_columns if col not in schema]) == 0 :
valid_file = {
"path": f,
"name": os.path.basename(f)
}
csv_valid_filenames.append(valid_file)
return {"csv_valid_filenames": csv_valid_filenames}
def retrieve_schema_from_pandas_compatible_csv_file(file_path, sep):
try :
df = pd.read_csv(file_path, sep=sep, nrows=0)
return df.columns
except Exception as e:
print "Unexpected exception : {}".format(e.message)
return [] | 1,378 | 421 |
#!/usr/bin/env python3
#
# Author: Vishwas K Singh
# Email: vishwasks32@gmail.com
#
# Script to convert Celcius Temperature to Farenheit
def temp_conv(temp_type, temp_val):
''' Function to convert Temperature from Celcius to farenheit
and vice versa'''
if(temp_type == 'f'):
temp_faren = ((9/5)*temp_val) + 32
return temp_faren
elif(temp_type == 'c'):
temp_cel = (5*(temp_val - 32))/9
return temp_cel
if __name__=='__main__':
print("Welcome to Temperature Converter")
print("Select 1. Farenheit to Celcius\n\t2. Celcius to Farenheit")
conv_type = input()
if conv_type == '1':
temp_type = 'c'
temp_val = float(input("Enter the farenheit value to be converted: "))
temp_celcius = temp_conv(temp_type,temp_val)
print("%.2f degree farenheit converts to %.2f degree celcius."%(temp_val, temp_celcius))
elif conv_type == '2':
temp_type = 'f'
temp_val = float(input("Enter the Celcius value to be converted: "))
temp_farenheit = temp_conv(temp_type,temp_val)
print("%.2f degree celcius converts to %.2f degree farenheit."%(temp_val, temp_farenheit))
else:
print("Invalid Input!! Exit..")
| 1,232 | 422 |
import os
import sys
puzzle_input_path = os.path.join(os.path.dirname(__file__), "input_1.txt")
with open(puzzle_input_path) as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
preamble = 25
numbers = [int(x) for x in puzzle_input_raw.splitlines()]
number = next(
n
for i, n
in enumerate(numbers[preamble:], start=preamble)
if all(n - e not in numbers[i - preamble:i] for e in numbers[i - preamble:i])
)
print(number)
# for idx, number in enumerate(numbers[preamble:], start=preamble):
# last_numbers = numbers[idx - preamble:idx]
# does_not_match = all(number - e not in last_numbers for e in last_numbers)
# if does_not_match:
# break | 696 | 262 |
"""Base architecture and Siamese Network.
"""
import tensorflow as tf
import dualing.utils.exception as e
class Base(tf.keras.Model):
"""A Base class is responsible for easily-implementing the
base twin architecture of a Siamese Network.
"""
def __init__(self, name=''):
"""Initialization method.
Args:
name (str): Naming identifier.
"""
super(Base, self).__init__(name=name)
def call(self, x):
"""Method that holds vital information whenever this class is called.
Note that you need to implement this method directly on its child. Essentially,
each neural network has its own forward pass implementation.
Args:
x (tf.Tensor): Tensor containing the input sample.
Raises:
NotImplementedError.
"""
raise NotImplementedError
class Siamese(tf.keras.Model):
"""An Siamese class is responsible for implementing the base of Siamese Neural Networks.
"""
def __init__(self, base, name=''):
"""Initialization method.
Args:
base (Base): Twin architecture.
name (str): Naming identifier.
"""
super(Siamese, self).__init__(name=name)
# Defines the Siamese's base twin architecture
self.B = base
@property
def B(self):
"""Base: Twin architecture.
"""
return self._B
@B.setter
def B(self, B):
if not isinstance(B, Base):
raise e.TypeError('`B` should be a child from Base class')
self._B = B
def compile(self, optimizer):
"""Method that builds the network by attaching optimizer, loss and metrics.
Note that you need to implement this method directly on its child. Essentially,
each type of Siamese has its own set of loss and metrics.
Args:
optimizer (tf.keras.optimizers): Optimization algorithm.
Raises:
NotImplementedError.
"""
raise NotImplementedError
def step(self, x, y):
"""Method that performs a single batch optimization step.
Note that you need to implement this method directly on its child. Essentially,
each type of Siamese has an unique step.
Args:
x (tf.Tensor): Tensor containing samples.
y (tf.Tensor): Tensor containing labels.
Raises:
NotImplementedError.
"""
raise NotImplementedError
def fit(self, batches, epochs=100):
"""Method that trains the model over training batches.
Note that you need to implement this method directly on its child. Essentially,
each type of Siamese may use a distinct type of dataset.
Args:
batches (Dataset): Batches of tuples holding training samples and labels.
epochs (int): Maximum number of epochs.
Raises:
NotImplementedError.
"""
raise NotImplementedError
def evaluate(self, batches):
"""Method that evaluates the model over validation or testing batches.
Note that you need to implement this method directly on its child. Essentially,
each type of Siamese may use a distinct type of dataset.
Args:
batches (Dataset): Batches of tuples holding validation / testing samples and labels.
Raises:
NotImplementedError.
"""
raise NotImplementedError
def predict(self, x):
"""Method that performs a forward pass over samples and returns the network's output.
Note that you need to implement this method directly on its child. Essentially,
each type of Siamese may predict in a different way.
Args:
x (tf.Tensor): Tensor containing samples.
Raises:
NotImplementedError.
"""
raise NotImplementedError
def extract_embeddings(self, x):
"""Method that extracts embeddings by performing a forward pass
over the base architecture (embedder).
Args:
x (np.array, tf.Tensor): Array or tensor containing the inputs to be embedded.
input_shape (tuple): Shape of the input layer.
Returns:
A tensor containing the embedded inputs.
"""
x = tf.convert_to_tensor(x)
x = self.B(x)
return x
| 4,416 | 1,186 |
#**
# * The Decentralized App (DApp):
# * This is where the App developer writes the decentralized app.
# * Make sure the code is written within the specified space region.
# *
# * IMPORTANT:
# * 1. Developer DApp CODE MUST BE WRITTEN WITHIN SPECIFIED SPACE REGION.
# * 2. DApp MUST return values through the 'results' variable.
# * 3. DApp MUST RETURN A JSON Object.
# * 4. DApp data crunching should not exceed 100MB of Data per peer task.
# * 5. If you change the name of 'results', make sure to change it at DApp's 'return results' code.
# *
# *
import sys, json
results = {} # Storage for successful results.
json_str = input() # Capture data input
params = json.loads(json_str) # Load parameters values (params) to process
#*********************************************************************************/
# /* START WRITING YOUR DAPP CODE BEGINNING HERE: */
#*********************************************************************************/
# EXAMPLE:
# Estimating the quality of images in a file directory (We'll consider image quality as calculating the area of an image divided by its size).
# Import necessary DApp resources, scripts, assets and modules needed for the task.
import numpy as np
import cv2
import os
import base64
# Variable to store image quality
imageQuality = {'imageQuality':0}
fileName = params['uParams'][0]['parameter2'] # Capture name of file
fileData = base64.b64decode(params['uParams'][0]['parameter1']) # Capture file
# Parse image file to Numpy array
img_buffer = np.frombuffer(fileData, dtype=np.uint8)
im = cv2.imdecode(img_buffer, flags=1)
# Save file to local directory
try:
cv2.imwrite(os.path.join('app/assets/media/', f'{fileName}'), im)
cv2.waitKey(0)
except:
print('Problem saving file!')
try:
img = cv2.imread(f'app/assets/media/{fileName}', cv2.IMREAD_UNCHANGED) # Load file to OpenCV
except:
print('Error processing file!')
# get dimensions of image
dimensions = img.shape
# height, width, number of channels in image
height = img.shape[0]
width = img.shape[1]
size = os.path.getsize(f'app/assets/media/{fileName}')
# We'll consider image quality as calculating the area of an image divided by its size
imageQuality['imageQuality']=(size/(height*width))
# Return results of processing
results=imageQuality
#*********************************************************************************/
# /* STOP WRITING YOUR DAPP CODE UP UNTIL HERE.*/
#*********************************************************************************/
# Results must return valid JSON Object
print(results)
sys.stdout.flush()
| 2,642 | 770 |
# Configure different hypothesis profiles
import os
from hypothesis import HealthCheck, Phase, settings
FAST_PROFILE = "fast"
CI_PROFILE = "ci"
# 'fast' profile for local development
settings.register_profile(
FAST_PROFILE,
# Set to true for test reproducibility
# https://hypothesis.readthedocs.io/en/latest/settings.html#hypothesis.settings.derandomize
derandomize=False,
max_examples=3,
# https://hypothesis.readthedocs.io/en/latest/settings.html#controlling-what-runs
phases=[Phase.generate, Phase.explain],
# (sblaisdo) fails `HealthCheck.too_slow` with initial schema/addon loading
suppress_health_check=[HealthCheck.too_slow],
# (sblaisdo) default deadline of 200ms is exceeded in some cases
deadline=None,
)
# 'ci' profile for pr_check.sh
settings.register_profile(
CI_PROFILE,
derandomize=False,
max_examples=5,
phases=[Phase.generate, Phase.explain],
suppress_health_check=[HealthCheck.too_slow],
deadline=None,
)
# Load profile
p = CI_PROFILE if os.getenv("CI") == "true" else FAST_PROFILE
print(f"Loading hypothesis profile: {p}")
settings.load_profile(p)
| 1,142 | 391 |
# coding: utf-8
from flask import render_template, redirect, request, url_for, flash, current_app
from flask_login import login_user, logout_user, login_required, UserMixin
from . import user
from .forms import LoginForm
from .. import login_manager
from config import Config
import time
users = Config.USER_LIST
class User(UserMixin):
pass
@login_manager.user_loader
def user_loader(username):
if username not in users:
return
user = User()
user.id = username
user.username = username
return user
@login_manager.user_loader
def get_user(username):
if username not in users:
return
user = User()
user.id = username
user.username = username
return user
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
if username not in users:
return
user = User()
user.id = username
if request.form['password'] == users[username]['password']:
return user
else:
return None
@user.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
if username is not None and users[username]['password'] == form.password.data:
user = User()
user.id = username
user.username = username
login_user(user, form.remember_me.data)
print username + " is login date - " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('user/login.html', form=form)
@user.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index')) | 1,859 | 540 |
"""
Laboratory Experiment 3 - Script
- Rootlocus project
@author Rafael Lima
"""
from sympy import *
def simplifyFraction(G,s):
"""
Expand numerator and denominator from given fraction
"""
num,den = fraction(G.expand().simplify())
num = Poly(num,s)
den = Poly(den,s)
return (num/den)
def partfrac(G,s):
"""
Split Fraction into several factors using residues theorem
"""
# Find Poles
poles = solve(sympy.fraction(G.expand().simplify())[1],s)
# Find Resudues
Gp = 0
for p in poles:
Gp = Gp + (G*(s-p)).subs(s,p)/(s-p)
return Gp
def roundExpr(expr, num_digits=4):
"""
Round Every Number in an expression
"""
return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(Number)})
printing.printer.Printer().set_global_settings(precision=3)
# Symbols
s = symbols("s",complex=True)
z = symbols("z",complex=True)
K,a1,a0,T,b = symbols("K alpha_1 alpha_0 T beta",real=True)
# Constants
na0 = 1
nT = 0.5
# Open Loop Transfer Function
sGo = 1/(s+na0)
# Z Transform (From table)
sGz = (1/na0)*(1-exp(-T))/(z-exp(-T))
# Controler TF
sGc = K*z/(z-1)
sGma = simplify(expand(sGc*sGz))
sGmf = simplify(expand(sGma/(1+sGma)))
# Characterist Equation
_,poly = fraction(sGmf)
# Find Critical Value for K
sK = solve(poly,K)[0]
Kmax = sK.subs([(T,nT),(z,-1)])
poles2 = solve(poly.subs([(T,nT),(K,2)]),z)
# Part 2
# Constants
na1 = 2
Ts = 0.2
# Open Loop Transfer Function
sGo2 = 1/(s+2)
# TODO find Z Transform (From table)
# BUG: not matching ZOH discretization from matlab
sGz2 = (z-1)*(1/(4*(z-exp(-2*T))) - 1/(4*(z-1)) + T*(1/((z-1)*(z-1))))
# Controler TF
sGc2 = K*(z-exp(-na1*T))/(z-b)
sGma2 = simplifyFraction(sGc2*sGz2,z)
sGmf2 = simplify(expand(sGma2/(1+sGma2)))
# Expression from matlab
mGz2 = ((2533546664982251*z)/144115188075855872 + 554410548014771/36028797018963968)/(z**2 - (3761226368457787*z)/2251799813685248 + 6037706219090157/9007199254740992)
# Controler TF from Matlab
mGc2 = K*(z-exp(-na1*T))/(z-b)
mGma2 = simplifyFraction(mGc2*mGz2,z)
mGmf2 = simplify(expand(mGma2/(1+mGma2)))
# Characterist Equation
_,poly2 = fraction(sGmf2)
# Request Conditions
desiredDamping = 0.5
desiredSettlingTime = 2
desiredOvershoot = exp(-desiredDamping*pi/sqrt(1-desiredDamping**2))
desiredPoles = [0,0]
desiredPoles[0] = -(4/desiredSettlingTime)*(1 + I*sqrt(1-desiredDamping**2)/desiredDamping)
desiredPoles[1] = -(4/desiredSettlingTime)*(1 - I*sqrt(1-desiredDamping**2)/desiredDamping)
desiredPolesZ = [exp(desiredPoles[0]*Ts),exp(desiredPoles[1]*Ts)]
# Solve Linear System to find K and b
sysKb = [K,b]
sysKb[0] = poly2.subs([(z,desiredPolesZ[0]),(T,Ts)]).evalf().collect(K).collect(b)
sysKb[1] = poly2.subs([(z,desiredPolesZ[1]),(T,Ts)]).evalf().collect(K).collect(b)
resp = list(linsolve(sysKb,(K,b)))[0]
nK = resp[0]
nb = resp[1]
# Find TF
nGmf2 = sGmf2.subs([(K,nK),(b,nb),(T,Ts)])
# Find Critical Value for K
sK2 = solve(poly2,K)[0]
#Kmax = sK.subs([(T,nT),(z,-1)])
| 2,994 | 1,453 |
import os
import unittest
from chase import Profile, Order, Reversal
merchant_id = os.environ.get('TEST_ORBITAL_MERCHANT_ID')
username = os.environ.get('TEST_ORBITAL_USERNAME')
password = os.environ.get('TEST_ORBITAL_PASSWORD')
def new_profile():
profile = Profile(
merchant_id=merchant_id,
username=username,
password=password
)
profile.name = "Test User"
profile.address1 = "101 Main St."
profile.address2 = "Apt. 4"
profile.city = "New York"
profile.state = "NY"
profile.zipCode = "10012"
profile.email = "test@example.com"
profile.phone = "9089089080"
profile.cc_num = "4788250000028291"
profile.cc_expiry = "1122"
return profile
def new_order():
return Order(
merchant_id=merchant_id,
username=username,
password=password
)
def new_reversal():
return Reversal(
merchant_id=merchant_id,
username=username,
password=password
)
class TestProfileFunctions(unittest.TestCase):
def assert_default_fields(self, result):
self.assertEqual(result['ProfileProcStatus'], '0')
self.assertEqual(result['CustomerName'], 'Test User')
self.assertEqual(result['CustomerAddress1'], '101 Main St.')
self.assertEqual(result['CustomerAddress2'], 'Apt. 4')
self.assertEqual(result['CustomerCity'], 'New York')
self.assertEqual(result['CustomerState'], 'NY')
self.assertEqual(result['CustomerZIP'], '10012')
self.assertEqual(result['CustomerEmail'], 'test@example.com')
self.assertEqual(result['CustomerPhone'], '9089089080')
self.assertEqual(result['CCAccountNum'], '4788250000028291')
self.assertEqual(result['CCExpireDate'], '1122')
def test_lifecycle(self):
# test profile creation
profile = new_profile()
result = profile.create()
self.assert_default_fields(result)
ident = result['CustomerRefNum']
# test profile reading
profile = new_profile()
profile.ident = ident
result = profile.read()
self.assert_default_fields(result)
# test profile updating
profile = new_profile()
profile.ident = ident
profile.name = 'Example Customer'
profile.city = 'Philadelphia'
profile.state = 'PA'
profile.zipCode = '19130'
result = profile.update()
self.assertEqual(result['ProfileProcStatus'], '0')
self.assertEqual(result['CustomerRefNum'], ident)
self.assertEqual(result['CustomerName'], 'Example Customer')
self.assertEqual(result['CustomerCity'], 'Philadelphia')
self.assertEqual(result['CustomerState'], 'PA')
self.assertEqual(result['CustomerZIP'], '19130')
result = profile.read()
self.assertEqual(result['ProfileProcStatus'], '0')
self.assertEqual(result['CustomerName'], 'Example Customer')
self.assertEqual(result['CustomerAddress1'], '101 Main St.')
self.assertEqual(result['CustomerAddress2'], 'Apt. 4')
self.assertEqual(result['CustomerCity'], 'Philadelphia')
self.assertEqual(result['CustomerState'], 'PA')
self.assertEqual(result['CustomerZIP'], '19130')
self.assertEqual(result['CustomerEmail'], 'test@example.com')
self.assertEqual(result['CustomerPhone'], '9089089080')
self.assertEqual(result['CCAccountNum'], '4788250000028291')
self.assertEqual(result['CCExpireDate'], '1122')
# test profile deletion
profile = new_profile()
profile.ident = ident
result = profile.destroy()
self.assertEqual(result['ProfileProcStatus'], '0')
self.assertEqual(result['CustomerRefNum'], ident)
class TestOrderFunctions(unittest.TestCase):
def test_profile_order(self):
self.profile = new_profile()
result = self.profile.create()
customer_num = result['CustomerRefNum']
order = new_order()
order.customer_num = customer_num
order.order_id = '100001'
order.amount = '10.00'
result = order.charge()
self.assertEqual(result['ProfileProcStatus'], '0')
txRefNum = result['TxRefNum']
txRefIdx = result['TxRefIdx']
self.assertTrue(txRefNum)
self.assertTrue(txRefIdx)
refund = new_reversal()
refund.tx_ref_num = txRefNum
refund.tx_ref_idx = txRefIdx
refund.order_id = '100001'
result = refund.void()
self.assertEqual(result['ProcStatus'], '0')
def test_cc_order(self):
order = new_order()
order.order_id = '100001'
order.amount = '10.00'
order.address1 = "101 Main St."
order.address2 = "Apt. 4"
order.city = "New York"
order.state = "NY"
order.zipCode = "10012"
order.email = "test@example.com"
order.phone = "9089089080"
order.cc_num = "4788250000028291"
order.cc_expiry = "1122"
result = order.charge()
txRefNum = result['TxRefNum']
txRefIdx = result['TxRefIdx']
self.assertTrue(txRefNum)
self.assertTrue(txRefIdx)
refund = new_reversal()
refund.tx_ref_num = txRefNum
refund.tx_ref_idx = txRefIdx
refund.order_id = '100001'
result = refund.void()
self.assertEqual(result['ProcStatus'], '0')
if __name__ == '__main__':
unittest.main()
| 5,439 | 1,762 |
import torch as t
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, vocab_size, latent_variable_size, rnn_size, rnn_num_layers, embed_size):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.latent_variable_size = latent_variable_size
self.rnn_size = rnn_size
self.embed_size = embed_size
self.rnn_num_layers = rnn_num_layers
self.cnn = nn.Sequential(
nn.ConvTranspose1d(self.latent_variable_size, 512, 4, 2, 0),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 512, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 256, 4, 2, 0),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 256, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 128, 4, 2, 0),
nn.BatchNorm1d(128),
nn.ELU(),
nn.ConvTranspose1d(128, self.vocab_size, 4, 2, 0)
)
self.rnn = nn.GRU(input_size=self.vocab_size + self.embed_size,
hidden_size=self.rnn_size,
num_layers=self.rnn_num_layers,
batch_first=True)
self.hidden_to_vocab = nn.Linear(self.rnn_size, self.vocab_size)
def forward(self, latent_variable, decoder_input):
"""
:param latent_variable: An float tensor with shape of [batch_size, latent_variable_size]
:param decoder_input: An float tensot with shape of [batch_size, max_seq_len, embed_size]
:return: two tensors with shape of [batch_size, max_seq_len, vocab_size]
for estimating likelihood for whole model and for auxiliary target respectively
"""
aux_logits = self.conv_decoder(latent_variable)
logits, _ = self.rnn_decoder(aux_logits, decoder_input, initial_state=None)
return logits, aux_logits
def conv_decoder(self, latent_variable):
latent_variable = latent_variable.unsqueeze(2)
out = self.cnn(latent_variable)
return t.transpose(out, 1, 2).contiguous()
def rnn_decoder(self, cnn_out, decoder_input, initial_state=None):
logits, final_state = self.rnn(t.cat([cnn_out, decoder_input], 2), initial_state)
[batch_size, seq_len, _] = logits.size()
logits = logits.contiguous().view(-1, self.rnn_size)
logits = self.hidden_to_vocab(logits)
logits = logits.view(batch_size, seq_len, self.vocab_size)
return logits, final_state
| 2,698 | 1,022 |
from cc3d import CompuCellSetup
from CircleSteppables import CircleSteppable
CompuCellSetup.register_steppable(steppable=CircleSteppable(frequency=1))
CompuCellSetup.run()
| 186 | 65 |
"""
Models for analytics application. Models used to store and operate all data received from the edx platform.
"""
from __future__ import division
from datetime import date, timedelta
import operator
import pycountry
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Sum, Count, DateField
from django.db.models.expressions import F, Func, Value
from django.db.models.functions import Trunc
def get_last_calendar_day():
"""
Get accurate start and end dates, that create segment between them equal to a full last calendar day.
Returns:
start_of_day (date): Previous day's start. Example for 2017-05-15 is 2017-05-15.
end_of_day (date): Previous day's end, it's a next day (tomorrow) toward day's start,
that doesn't count in segment. Example for 2017-05-15 is 2017-05-16.
"""
end_of_day = date.today()
start_of_day = end_of_day - timedelta(days=1)
return start_of_day, end_of_day
class EdxInstallation(models.Model):
"""
Model that stores overall data received from the edx-platform.
"""
access_token = models.UUIDField(null=True)
platform_name = models.CharField(max_length=255, null=True, blank=True)
platform_url = models.URLField(null=True, blank=True)
uid = models.CharField(null=True, max_length=32)
latitude = models.FloatField(
null=True, blank=True, help_text='Latitude coordinate of edX platform follows `float` type. Example: 50.10'
)
longitude = models.FloatField(
null=True, blank=True, help_text='Longitude coordinate of edX platform follows `float` type. Example: 40.05'
)
class InstallationStatistics(models.Model):
"""
Model that stores statistics data received from the edx-platform.
"""
active_students_amount_day = models.IntegerField(default=0)
active_students_amount_week = models.IntegerField(default=0)
active_students_amount_month = models.IntegerField(default=0)
registered_students = models.IntegerField(default=0)
enthusiastic_students = models.IntegerField(default=0)
generated_certificates = models.IntegerField(default=0)
courses_amount = models.IntegerField(default=0)
data_created_datetime = models.DateTimeField()
edx_installation = models.ForeignKey(EdxInstallation, on_delete=models.CASCADE)
statistics_level = models.CharField(
choices=(
('enthusiast', 'enthusiast'),
('paranoid', 'paranoid'),
),
max_length=255,
default='paranoid'
)
students_per_country = JSONField(
default=dict,
blank=True,
null=True,
help_text='This field has students country-count accordance. It follows `json` type. '
'Example: {"RU": 2632, "CA": 18543, "UA": 2011, "null": 1}'
)
unspecified_country_name = 'Country is not specified'
@staticmethod
def get_statistics_top_country(tabular_countries_list):
"""
Get first country from tabular format country list.
List is sorted, first country is a top active students rank country.
:param tabular_countries_list: list of the two elements tuples
:return: top country name as a string
"""
if not tabular_countries_list:
return ''
return tabular_countries_list[0][0]
@classmethod
def get_stats_for_the_date(cls, statistics_date, edx_installation_object=None):
"""
Provide statistic model instance for the given Edx installation.
:param edx_installation_object: specific installation object.
:return: statistic model instance if it is created at the specified day otherwise None
"""
stat_item = cls.objects.filter(
edx_installation=edx_installation_object,
data_created_datetime__gte=statistics_date,
data_created_datetime__lt=(statistics_date + timedelta(days=1))
).last()
return stat_item
@classmethod
def timeline(cls):
"""
Provide timeline in days for plotting on x axis.
"""
timeline_datetimes = cls.objects.order_by(
'data_created_datetime'
).values_list('data_created_datetime', flat=True).distinct()
timeline_dates = [x.date().strftime('%Y-%m-%d') for x in timeline_datetimes]
# Support case, when data are sent more often, for example when testing every 15 seconds.
# Then filter unique and sort back, because timeline should be ordered.
timeline_dates = sorted(set(timeline_dates))
return timeline_dates
@classmethod
def data_per_period(cls):
"""
Provide total students, courses and instances, from all services per period, day by default.
We summarize values per day, because in same day we can receive data from multiple different instances.
We suppose, that every instance send data only once per day.
"""
subquery = cls.objects.annotate(
date_in_days=Trunc('data_created_datetime', 'day', output_field=DateField())
).values('date_in_days').order_by('date_in_days')
students_per_day = subquery.annotate(
students=Sum('active_students_amount_day')
).values_list('students', flat=True)
courses_per_day = subquery.annotate(courses=Sum('courses_amount')).values_list('courses', flat=True)
instances_per_day = subquery.annotate(
instances=Count('edx_installation__access_token')
).values_list('instances', flat=True)
return list(students_per_day), list(courses_per_day), list(instances_per_day)
@classmethod
def overall_counts(cls):
"""
Provide total count of all instances, courses and students from all instances per previous calendar day.
Returns overall counts as dict.
{
"instances_count": <int:instances_count>,
"courses_count": <int:courses_count>,
"students_count": <int:students_count>,
"generated_certificates_count": <int:generated_certificates_count>,
}
"""
start_of_day, end_of_day = get_last_calendar_day()
all_unique_instances = cls.objects.filter(
data_created_datetime__gte=start_of_day, data_created_datetime__lt=end_of_day
)
instances_count = all_unique_instances.count()
courses_count = all_unique_instances.aggregate(
Sum('courses_amount')
)['courses_amount__sum']
students_count = all_unique_instances.aggregate(
Sum('active_students_amount_day')
)['active_students_amount_day__sum']
generated_certificates_count = all_unique_instances.aggregate(
Sum('generated_certificates')
)['generated_certificates__sum']
registered_students_count = all_unique_instances.aggregate(
Sum('registered_students')
)['registered_students__sum']
return {
"instances_count": instances_count or 0,
"courses_count": courses_count or 0,
"students_count": students_count or 0,
"generated_certificates_count": generated_certificates_count or 0,
"registered_students_count": registered_students_count or 0,
}
@classmethod
def get_charts_data(cls):
"""
Provide data about certificates and users for chart.
:return: dict
{
"19-01-28": [0, 1, 0],
"19-01-22": [0, 7, 0],
"19-01-31": [0, 0, 0],
}
"""
statistics = cls.objects.all()
charts = dict()
for item in statistics:
charts[item.data_created_datetime.strftime('%y-%m-%d')] = [
item.registered_students,
item.generated_certificates,
item.enthusiastic_students
]
return charts
@classmethod
def get_students_per_country_stats(cls):
"""
Total of students amount per country to display on world map from all instances per month.
Returns:
world_students_per_country (dict): Country-count accordance as pair of key-value.
"""
# Get list of instances's students per country data as unicode strings.
queryset = cls.objects.annotate(
month_verbose=Func(
F('data_created_datetime'), Value('TMMonth YYYY'), function='to_char'
),
month_ordering=Func(
F('data_created_datetime'), Value('YYYY-MM'), function='to_char'
),
)
result_rows = queryset.values_list(
'month_ordering', 'month_verbose', 'students_per_country'
)
return cls.aggregate_countries_by_months(result_rows)
@classmethod
def aggregate_countries_by_months(cls, values_list):
"""
Aggregate all the months and countries data by the month.
:param values_list: list queryset result with three elements for every row
:return: dictionary of months with the student countries statistics
"""
months = {}
for month_ordering, month_verbose, countries in values_list:
cls.add_month_countries_data(
month_ordering, month_verbose, countries, months
)
return months
@classmethod
def add_month_countries_data(
cls, month_ordering, month_verbose, countries, months
):
"""
Add a month data to the months dictionary.
:param month_ordering: sortable date key represented as a string
:param month_verbose: human friendly date represented as a string
:param countries: dictionary of countries where the key is the country code and
the value is the amount of the students
:param months: dictionary that needs to be updated by the data, passed to the method
"""
if month_ordering not in months:
months[month_ordering] = {
'countries': countries,
'label': month_verbose,
}
return
cls.add_up_new_month_data(months[month_ordering]['countries'], countries)
@classmethod
def add_up_new_month_data(cls, existing_data, new_data):
"""
Add a new month data to the resulting data dictionary.
Adds the counts from the new countries data dictionary to the existing ones or adds
new countries if the don't exist in the existing_data
"""
for existent_key in existing_data.keys():
existing_data[existent_key] += new_data.pop(existent_key, 0)
existing_data.update(new_data)
@classmethod
def create_students_per_country(cls, worlds_students_per_country):
"""
Create convenient and necessary data formats to render it from view.
Graphs require list-format data.
"""
datamap_format_countries_list = []
tabular_format_countries_map = {}
if not worlds_students_per_country:
tabular_format_countries_map[cls.unspecified_country_name] = [0, 0]
return datamap_format_countries_list, list(tabular_format_countries_map.items())
all_active_students = sum(worlds_students_per_country.values())
for country, count in worlds_students_per_country.items():
student_amount_percentage = cls.get_student_amount_percentage(count, all_active_students)
try:
country_info = pycountry.countries.get(alpha_2=country)
country_alpha_3 = country_info.alpha_3
datamap_format_countries_list += [[country_alpha_3, count]]
country_name = country_info.name
except KeyError:
# Create students without country amount.
country_name = cls.unspecified_country_name
if country_name in tabular_format_countries_map:
tabular_format_countries_map[country_name] = list(map(
operator.add,
tabular_format_countries_map[country_name],
[count, student_amount_percentage]
))
else:
tabular_format_countries_map[country_name] = [count, student_amount_percentage]
# Pop out the unspecified country
unspecified_country_values = tabular_format_countries_map.pop(cls.unspecified_country_name, None)
# Sort in descending order.
tabular_format_countries_list = sorted(
tabular_format_countries_map.items(),
key=lambda x: x[1][0],
reverse=True
)
if unspecified_country_values:
tabular_format_countries_list.append(
(cls.unspecified_country_name, unspecified_country_values)
)
return datamap_format_countries_list, tabular_format_countries_list
@classmethod
def get_students_per_country(cls):
"""
Gather convenient and necessary data formats to render it from view.
"""
months = cls.get_students_per_country_stats()
for month in months.values():
datamap_list, tabular_list = cls.create_students_per_country(month['countries'])
month['datamap_countries_list'] = datamap_list
month['tabular_countries_list'] = tabular_list
month['top_country'] = cls.get_statistics_top_country(tabular_list)
month['countries_amount'] = (
len(month['countries']) - (cls.unspecified_country_name in month['countries'])
)
return months
@staticmethod
def get_student_amount_percentage(country_count_in_statistics, all_active_students):
"""
Calculate student amount percentage based on total countries amount and particular county amount comparison.
"""
if all_active_students == 0:
return 0
students_amount_percentage = int(country_count_in_statistics / all_active_students * 100)
return students_amount_percentage
@classmethod
def get_students_countries_amount(cls, months):
"""
Provide countries amount from students per country statistics as table.
Calculate countries amount in world students per country statistics (from tabular countries list).
Tabular format countries list can be empty - countries amount is zero.
Tabular format countries list can be not empty - it contains particular country-count accordance
and `Country is not specified` field, that has students without country amount.
Actually `Country is not specified` field is not a country, so it does not fill up in countries amount.
"""
countries_amount = 0
for month in months.values():
countries = dict(month['tabular_countries_list'])
countries.pop(cls.unspecified_country_name, None)
countries_amount += len(countries)
return countries_amount
def update(self, stats):
"""
Update model from given dictionary and save it.
:param stats: dictionary with new data.
"""
for (key, value) in stats.items():
setattr(self, key, value)
self.save()
| 15,329 | 4,332 |
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from pypowervm import adapter as adp
from pypowervm import exceptions as exc
from pypowervm.tasks import cna
from pypowervm.tests import test_fixtures as fx
from pypowervm.tests.test_utils import test_wrapper_abc as twrap
from pypowervm.wrappers import entry_wrapper as ewrap
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import network as pvm_net
VSWITCH_FILE = 'fake_vswitch_feed.txt'
VNET_FILE = 'fake_virtual_network_feed.txt'
class TestCNA(twrap.TestWrapper):
"""Unit Tests for creating Client Network Adapters."""
mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits}
file = VSWITCH_FILE
wrapper_class_to_test = pvm_net.VSwitch
@mock.patch('pypowervm.tasks.cna._find_or_create_vnet')
def test_crt_cna(self, mock_vnet_find):
"""Tests the creation of Client Network Adapters."""
# Create a side effect that can validate the input into the create
# call.
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
self.assertEqual('LogicalPartition', kargs[1])
self.assertEqual('fake_lpar', kwargs.get('root_id'))
self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type'))
return pvm_net.CNA.bld(self.adpt, 1, 'href').entry
self.adpt.create.side_effect = validate_of_create
self.adpt.read.return_value = self.resp
n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5)
self.assertIsNotNone(n_cna)
self.assertIsInstance(n_cna, pvm_net.CNA)
self.assertEqual(1, mock_vnet_find.call_count)
@mock.patch('pypowervm.tasks.cna._find_or_create_vnet')
def test_crt_cna_no_vnet_crt(self, mock_vnet_find):
"""Tests the creation of Client Network Adapters.
The virtual network creation shouldn't be done in this flow.
"""
# PVMish Traits
self.adptfx.set_traits(fx.LocalPVMTraits)
self.adpt.read.return_value = self.resp
# Create a side effect that can validate the input into the create
# call.
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
self.assertEqual('LogicalPartition', kargs[1])
self.assertEqual('fake_lpar', kwargs.get('root_id'))
self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type'))
return pvm_net.CNA.bld(self.adpt, 1, 'href').entry
self.adpt.create.side_effect = validate_of_create
n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5, slot_num=1)
self.assertIsNotNone(n_cna)
self.assertIsInstance(n_cna, pvm_net.CNA)
self.assertEqual(0, mock_vnet_find.call_count)
def test_find_or_create_vswitch(self):
"""Validates that a vswitch can be created."""
self.adpt.read.return_value = self.resp
# Test that it finds the right vSwitch
vswitch_w = cna._find_or_create_vswitch(self.adpt, 'ETHERNET0', True)
self.assertIsNotNone(vswitch_w)
# Create a side effect that can validate the input into the create call
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
# Is the vSwitch create
self.assertEqual('ManagedSystem', kargs[1])
self.assertEqual('VirtualSwitch', kwargs.get('child_type'))
# Return a previously created vSwitch...
return self.dwrap.entry
self.adpt.create.side_effect = validate_of_create
# Test the create
vswitch_w = cna._find_or_create_vswitch(self.adpt, 'Temp', True)
self.assertIsNotNone(vswitch_w)
self.assertTrue(self.adpt.create.called)
# Make sure that if the create flag is set to false, an error is thrown
# when the vswitch can't be found.
self.assertRaises(exc.Error, cna._find_or_create_vswitch, self.adpt,
'Temp', False)
class TestVNET(twrap.TestWrapper):
mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits}
file = VNET_FILE
wrapper_class_to_test = pvm_net.VNet
def test_find_or_create_vnet(self):
"""Tests that the virtual network can be found/created."""
self.adpt.read.return_value = self.resp
fake_vs = mock.Mock()
fake_vs.switch_id = 0
fake_vs.name = 'ETHERNET0'
fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f/'
'VirtualSwitch/'
'ec8aaa54-9837-3c23-a541-a4e4be3ae489')
# This should find a vnet.
vnet_resp = cna._find_or_create_vnet(self.adpt, '2227', fake_vs)
self.assertIsNotNone(vnet_resp)
# Now flip to a CNA that requires a create...
resp = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {})
resp.entry = ewrap.EntryWrapper._bld(
self.adpt, tag='VirtualNetwork').entry
self.adpt.create.return_value = resp
vnet_resp = cna._find_or_create_vnet(self.adpt, '2228', fake_vs)
self.assertIsNotNone(vnet_resp)
self.assertEqual(1, self.adpt.create.call_count)
def test_find_free_vlan(self):
"""Tests that a free VLAN can be found."""
self.adpt.read.return_value = self.resp
# Mock data specific to the VNET File
fake_vs = mock.Mock()
fake_vs.name = 'ETHERNET0'
fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f/'
'VirtualSwitch/'
'ec8aaa54-9837-3c23-a541-a4e4be3ae489')
self.assertEqual(1, cna._find_free_vlan(self.adpt, fake_vs))
@mock.patch('pypowervm.wrappers.network.VNet.wrap')
def test_find_free_vlan_mocked(self, mock_vnet_wrap):
"""Uses lots of mock data for a find vlan."""
self.adpt.read.return_value = mock.Mock()
# Helper function to build the vnets.
def build_mock_vnets(max_vlan, vswitch_uri):
vnets = []
for x in range(1, max_vlan + 1):
vnets.append(mock.Mock(vlan=x,
associated_switch_uri=vswitch_uri))
return vnets
mock_vswitch = mock.Mock(related_href='test_vs')
# Test when all the vnet's are on a single switch.
mock_vnet_wrap.return_value = build_mock_vnets(3000, 'test_vs')
self.assertEqual(3001, cna._find_free_vlan(self.adpt, mock_vswitch))
# Test with multiple switches. The second vswitch with a higher vlan
# should not impact the vswitch we're searching for.
mock_vnet_wrap.return_value = (build_mock_vnets(2000, 'test_vs') +
build_mock_vnets(4000, 'test_vs2'))
self.assertEqual(2001, cna._find_free_vlan(self.adpt, mock_vswitch))
# Test when all the VLANs are consumed
mock_vnet_wrap.return_value = build_mock_vnets(4094, 'test_vs')
self.assertRaises(exc.Error, cna._find_free_vlan, self.adpt,
mock_vswitch)
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
def test_assign_free_vlan(self, mock_find_vlan):
mock_find_vlan.return_value = 2016
mocked = mock.MagicMock()
mock_cna = mock.MagicMock(pvid=31, enabled=False)
mock_cna.update.return_value = mock_cna
updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna)
self.assertEqual(2016, updated_cna.pvid)
self.assertEqual(mock_cna.enabled, updated_cna.enabled)
updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna,
ensure_enabled=True)
self.assertEqual(True, updated_cna.enabled)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_p2p_cna(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_p2p_cna."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes
mock_vio1 = mock.Mock(uuid='src_io_host_uuid')
mock_vio2 = mock.Mock(uuid='vios_uuid2')
mock_get_partitions.return_value = [mock_vio1, mock_vio2]
mock_cna = mock.MagicMock()
mock_trunk1, mock_trunk2 = mock.MagicMock(pvid=2050), mock.MagicMock()
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.side_effect = [mock_trunk1, mock_trunk2, mock_cna]
# Invoke the create
mock_ext_ids = {'test': 'value', 'test2': 'value2'}
client_adpt, trunk_adpts = cna.crt_p2p_cna(
self.adpt, None, 'lpar_uuid',
['src_io_host_uuid', 'vios_uuid2'], mock_vswitch, crt_vswitch=True,
slot_num=1, mac_addr='aabbccddeeff', ovs_bridge='br-ex',
ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href',
slot_num=1, mac_addr='aabbccddeeff')
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name=None,
ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=2, dev_name=None,
ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
# Make sure they were then created
self.assertIsNotNone(client_adpt)
self.assertEqual(2, len(trunk_adpts))
mock_cna.create.assert_called_once_with(
parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid')
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
mock_trunk2.create.assert_called_once_with(parent=mock_vio2)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_p2p_cna_single(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_p2p_cna with the mgmt lpar and a dev_name."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes
mock_vio1 = mock.Mock(uuid='mgmt_lpar_uuid')
mock_vio2 = mock.Mock(uuid='vios_uuid2')
mock_get_partitions.return_value = [mock_vio1, mock_vio2]
mock_cna = mock.MagicMock()
mock_trunk1 = mock.MagicMock(pvid=2050)
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.side_effect = [mock_trunk1, mock_cna]
# Invoke the create
client_adpt, trunk_adpts = cna.crt_p2p_cna(
self.adpt, None, 'lpar_uuid',
['mgmt_lpar_uuid'], mock_vswitch, crt_vswitch=True,
mac_addr='aabbccddeeff', dev_name='tap-12345')
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href',
mac_addr='aabbccddeeff', slot_num=None)
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345',
ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None)
# Make sure they were then created
self.assertIsNotNone(client_adpt)
self.assertEqual(1, len(trunk_adpts))
mock_cna.create.assert_called_once_with(
parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid')
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_trunk_with_free_vlan(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_trunk_with_free_vlan on mgmt based VIOS."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes.
mock_vio1 = mock.Mock(uuid='vios_uuid1')
mock_get_partitions.return_value = [mock_vio1]
mock_trunk1 = mock.MagicMock(pvid=2050)
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.return_value = mock_trunk1
# Invoke the create
mock_ext_id = {'test1': 'value1', 'test2': 'value2'}
trunk_adpts = cna.crt_trunk_with_free_vlan(
self.adpt, None, ['vios_uuid1'],
mock_vswitch, crt_vswitch=True, dev_name='tap-12345',
ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450)
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345',
ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450)
# Make sure that the trunk was created
self.assertEqual(1, len(trunk_adpts))
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
@mock.patch('pypowervm.wrappers.network.CNA.get')
def test_find_trunk_on_lpar(self, mock_cna_get):
parent_wrap = mock.MagicMock()
m1 = mock.Mock(is_trunk=True, pvid=2, vswitch_id=2)
m2 = mock.Mock(is_trunk=False, pvid=3, vswitch_id=2)
m3 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=1)
m4 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=2)
mock_cna_get.return_value = [m1, m2, m3]
self.assertIsNone(cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4))
self.assertTrue(mock_cna_get.called)
mock_cna_get.reset_mock()
mock_cna_get.return_value = [m1, m2, m3, m4]
self.assertEqual(m4, cna._find_trunk_on_lpar(self.adpt, parent_wrap,
m4))
self.assertTrue(mock_cna_get.called)
@mock.patch('pypowervm.tasks.cna._find_trunk_on_lpar')
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_find_trunks(self, mock_vios_get, mock_get_mgmt,
mock_find_trunk):
# Mocked responses can be simple, since they are just fed into the
# _find_trunk_on_lpar
mock_vios_get.return_value = [mock.MagicMock(), mock.MagicMock()]
mock_get_mgmt.return_value = mock.MagicMock()
# The responses back from the find trunk. Make it an odd trunk
# priority ordering to make sure we sort properly
v1 = mock.Mock(trunk_pri=3)
c1, c2 = mock.Mock(trunk_pri=1), mock.Mock(trunk_pri=2)
mock_find_trunk.side_effect = [v1, c1, c2]
# Invoke the method.
resp = cna.find_trunks(self.adpt, mock.Mock(pvid=2))
# Make sure four calls to the find trunk
self.assertEqual(3, mock_find_trunk.call_count)
# Order of the response is important. Should be based off of trunk
# priority
self.assertEqual([c1, c2, v1], resp)
@mock.patch('pypowervm.wrappers.network.CNA.get')
def test_find_all_trunks_on_lpar(self, mock_cna_get):
parent_wrap = mock.MagicMock()
m1 = mock.Mock(is_trunk=True, vswitch_id=2)
m2 = mock.Mock(is_trunk=False, vswitch_id=2)
m3 = mock.Mock(is_trunk=True, vswitch_id=1)
m4 = mock.Mock(is_trunk=True, vswitch_id=2)
mock_cna_get.return_value = [m1, m2, m3, m4]
returnVal = [m1, m3, m4]
self.assertEqual(returnVal, cna._find_all_trunks_on_lpar(self.adpt,
parent_wrap))
mock_cna_get.reset_mock()
mock_cna_get.return_value = [m1, m2, m3, m4]
self.assertEqual([m3],
cna._find_all_trunks_on_lpar(self.adpt,
parent_wrap=parent_wrap,
vswitch_id=1))
@mock.patch('pypowervm.wrappers.network.CNA.get')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.wrappers.logical_partition.LPAR.get')
def test_find_cna_wraps(self, mock_lpar_get, mock_vios_get, mock_cna_get):
# Mocked responses are simple since they are only used for
# pvm_net.CNA.get
mock_lpar_get.return_value = [mock.MagicMock()]
mock_vios_get.return_value = [mock.MagicMock()]
# Mocked cna_wraps
m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2)
m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1)
m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1)
mock_cna_get.side_effect = [[m1, m2], [m3]]
mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1)
self.assertEqual([m1, m2, m3], cna._find_cna_wraps(mock_trunk))
mock_cna_get.side_effect = [[m1, m2], [m3]]
self.assertEqual([m2, m3], cna._find_cna_wraps(mock_trunk, 1))
@mock.patch('pypowervm.tasks.cna._find_cna_wraps')
def test_find_cnas_on_trunk(self, mock_find_wraps):
# Mocked cna_wraps
m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2)
m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1)
m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1)
mock_find_wraps.return_value = [m1, m2, m3]
mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1)
self.assertEqual([m2], cna.find_cnas_on_trunk(mock_trunk))
mock_find_wraps.return_value = [m1, m3]
self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk))
mock_trunk = mock.Mock(adapter=self.adpt, uuid=3, pvid=3, vswitch_id=3)
self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk))
@mock.patch('pypowervm.tasks.cna._find_cna_wraps')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.tasks.cna._find_all_trunks_on_lpar')
@mock.patch('pypowervm.wrappers.network.VSwitch.search')
def test_find_orphaned_trunks(self, mock_vswitch, mock_trunks,
mock_get_mgmt, mock_vios_get, mock_wraps):
mock_vswitch.return_value = mock.MagicMock(switch_id=1)
mock_get_mgmt.return_value = mock.MagicMock()
mock_vios_get.return_value = [mock.MagicMock()]
# Mocked cna_wraps
m1 = mock.Mock(is_trunk=True, uuid=2, pvid=2, vswitch_id=1)
m2 = mock.Mock(is_trunk=False, uuid=3, pvid=3, vswitch_id=1)
m3 = mock.Mock(is_trunk=True, uuid=1, pvid=1, vswitch_id=1)
m4 = mock.Mock(is_trunk=False, uuid=4, pvid=1, vswitch_id=1)
mock_wraps.return_value = [m1, m2, m3, m4]
mock_trunks.side_effect = [[m1, m3], []]
self.assertEqual([m1], cna.find_orphaned_trunks(self.adpt,
mock.MagicMock))
| 20,560 | 7,622 |
from dispatch.plugins.bases import ParticipantGroupPlugin
class TestParticipantGroupPlugin(ParticipantGroupPlugin):
title = "Dispatch Test Plugin - Participant Group"
slug = "test-participant-group"
def create(self, participants, **kwargs):
return
def add(self, participant, **kwargs):
return
def remove(self, participant, **kwargs):
return
| 390 | 111 |
#!/usr/bin/python
from string import Formatter
_STATS_TEMPLATE = '''#ifndef {INCLUDE_GUARD}
#define {INCLUDE_GUARD}
// clang-format off
#ifdef {MACRO_PREFIX}ON
{INCLUDES}
#include <iomanip>
#include <sstream>
#include <string>
#include <vector>
// comment out any stats you don't want, and things will be just dandy
{MACROS_ON}
#define {MACRO_PREFIX}PUBLISH_RECORD() \\
do {{ \\
{NS_ACCESS}{CLASS_NAME}::get().publish({NS_ACCESS}tls_record()); \\
{NS_ACCESS}tls_record() = {{}}; \\
}} while(0) \\
/**/
#define {MACRO_PREFIX}CLEAR() {NS_ACCESS}{CLASS_NAME}::get().clear()
#ifndef {MACRO_PREFIX}DUMP
#include <iostream>
#define {MACRO_PREFIX}DUMP() (std::cout << {NS_ACCESS}{CLASS_NAME}::get().results())
#endif /* {MACRO_PREFIX}DUMP */
#else
#define {MACRO_PREFIX}PUBLISH_RECORD() /**/
#define {MACRO_PREFIX}CLEAR() /**/
#ifndef {MACRO_PREFIX}DUMP
#define {MACRO_PREFIX}DUMP() /**/
#endif /* {MACRO_PREFIX}DUMP */
#endif /* {MACRO_PREFIX}ON */
{MACROS_OFF}
// clang-format on
#ifdef {MACRO_PREFIX}ON
{NAMESPACE_BEGIN}
struct {CLASS_NAME}_tls_record
{{
{THREAD_RECORD_MEMBERS}
{CLASS_NAME}_tls_record() noexcept = default;
{THREAD_RECORD_MEMBER_FUNCTIONS}
std::string results() const
{{
std::ostringstream ostr;
ostr
{THREAD_RECORD_STREAM_OUTPUT};
return ostr.str();
}}
}};
inline {CLASS_NAME}_tls_record& tls_record() noexcept
{{
static LSTM_THREAD_LOCAL {CLASS_NAME}_tls_record record{{}};
return record;
}}
struct {CLASS_NAME}
{{
private:
using records_t = std::vector<{CLASS_NAME}_tls_record>;
using records_iter = typename records_t::iterator;
using records_value_type = typename records_t::value_type;
records_t records_;
{CLASS_NAME}() = default;
{CLASS_NAME}(const {CLASS_NAME}&) = delete;
{CLASS_NAME}& operator=(const {CLASS_NAME}&) = delete;
std::uint64_t
total_count(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept
{{
std::size_t result = 0;
for (auto& tid_record : records_)
result += accessor(&tid_record);
return result;
}}
std::uint64_t
max(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept
{{
std::size_t result = 0;
for (auto& tid_record : records_)
result = std::max(result, accessor(&tid_record));
return result;
}}
public:
static {CLASS_NAME}& get() noexcept
{{
static {CLASS_NAME} singleton;
return singleton;
}}
inline void publish({CLASS_NAME}_tls_record record) noexcept
{{
records_.emplace_back(std::move(record));
}}
{TRANSACTION_LOG_MEMBER_FUNCTIONS}
std::size_t thread_count() const noexcept {{ return records_.size(); }}
const records_t& records() const noexcept {{ return records_; }}
void clear() noexcept {{ records_.clear(); }}
std::string results(bool per_thread = true) const
{{
std::ostringstream ostr;
ostr
{TRANSACTION_LOG_STREAM_OUTPUT};
if (per_thread) {{
std::size_t i = 0;
for (auto& record : records_) {{
ostr << "--== Thread: " << std::setw(4) << i++ << " ==--" << '\\n';
ostr << record.results() << '\\n';
}}
}}
return ostr.str();
}}
}};
{NAMESPACE_END}
#endif /* {MACRO_PREFIX}ON */
#endif /* {INCLUDE_GUARD} */'''
types = {
'counter' : 'std::uint64_t',
'max' : 'std::uint64_t',
'sum' : 'std::uint64_t',
}
def indent(s, amount = 1):
return '\n'.join([' ' * amount * 4 + x for x in s.splitlines()])
def get_pretty_name(stat):
return ' '.join([w.capitalize() for w in stat.split()])
def get_mem_name(stat):
return stat.lower().replace(' ', '_')
def get_mem_fun_for_stat(compound_stat):
return get_mem_name(compound_stat) + '()'
def get_mem_or_func_call(stat, stats, compound_stats):
if stat in stats:
return get_mem_name(stat)
assert(stat in compound_stats)
return get_mem_fun_for_stat(stat)
def get_macro_name(stat, macro_prefix):
return macro_prefix + stat.upper().replace(' ', '_')
def get_macro_params(stat, stats_kinds):
param = {
'counter' : '',
'max' : 'amt',
'sum' : 'amt',
}
return param[stats_kinds[stat]]
def get_macro_define(stat, stats_kinds, macro_prefix):
return '#define %s(%s)' % (
get_macro_name(stat, macro_prefix),
get_macro_params(stat, stats_kinds)
)
def add_trailing_whitespace(strings):
max_length = max([len(x) for x in strings])
return ['{0: <{1}}'.format(x, max_length + 1) for x in strings]
def get_macro_defines(stats, stats_kinds, macro_prefix):
return add_trailing_whitespace([get_macro_define(stat, stats_kinds, macro_prefix)
for stat in stats])
def get_macro_expansion_on(stat, stats_kinds, ns_access):
param = {
'counter' : '++{NS_ACCESS}tls_record().{MEM_NAME}',
'max' : '{NS_ACCESS}tls_record().{MEM_NAME} = std::max({NS_ACCESS}tls_record().{MEM_NAME}, static_cast<std::uint64_t>({PARAMS}))',
'sum' : '{NS_ACCESS}tls_record().{MEM_NAME} += {PARAMS}',
}
return param[stats_kinds[stat]].format(
NS_ACCESS = ns_access,
MEM_NAME = get_mem_name(stat),
PARAMS = get_macro_params(stat, stats_kinds)
)
def get_macros_on(stats, stats_kinds, ns_access, macro_prefix):
param = {
'counter' : '++{MEM_NAME}',
'max' : '{MEM_NAME} = std::max({MEM_NAME}, {PARAMS})',
'sum' : '{MEM_NAME} += {PARAMS}',
}
defines = get_macro_defines(stats, stats_kinds, macro_prefix)
return '\n'.join([define + get_macro_expansion_on(stat, stats_kinds, ns_access)
for stat, define in zip(stats, defines)])
def get_macros_off(stats, stats_kinds, macro_prefix):
_FORMAT_STRING = '''#ifndef {MACRO_NAME}
{MACRO_DEFINE} /**/
#endif'''
result = []
for stat in stats:
result.append(_FORMAT_STRING.format(
MACRO_NAME = get_macro_name(stat, macro_prefix),
MACRO_DEFINE = get_macro_define(stat, stats_kinds, macro_prefix),
))
return '\n'.join(result)
def get_thread_record_mems(stats, stats_kinds):
initial_value = {
'counter' : '0',
'max' : '0',
'sum' : '0',
}
_FORMAT_STRING = '%s %s{%s};'
return '\n'.join([_FORMAT_STRING % (types[stats_kinds[stat]],
get_mem_name(stat),
initial_value[stats_kinds[stat]]) for stat in stats])
def map_get_mem_or_func_call(stat_list, stats, compound_stats):
return [get_mem_or_func_call(x, stats, compound_stats) for x in stat_list]
def get_assert(op, operands):
assert_kind = {
'/' : ' <= ',
'-' : ' >= ',
'+' : None,
}
mems = map_get_mem_or_func_call(operands, stats, compound_stats)
if assert_kind[op] != None:
return 'LSTM_ASSERT(%s);\n ' % assert_kind[op].join(mems)
return ''
def get_contents(stats, compound_stats, stat_data):
op = stat_data['op']
operands = stat_data['operands']
casted = map_get_mem_or_func_call(operands, stats, compound_stats)
if op == '/':
casted[-1] = 'float(%s)' % casted[-1]
return (' ' + op + ' ').join(casted)
def get_thread_record_mem_fun(compound_stat, stats, compound_stats, compound_stats_kinds):
_FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}'''
return _FORMAT_STRING.format(
NAME = get_mem_fun_for_stat(compound_stat),
CONTENTS = get_contents(stats, compound_stats, compound_stats_kinds[compound_stat]),
)
def get_thread_record_mem_funs(stats, compound_stats, compound_stats_kinds):
return '\n'.join([get_thread_record_mem_fun(compound_stat,
stats,
compound_stats,
compound_stats_kinds)
for compound_stat in compound_stats])
def get_thread_record_stream_output(all_stats, stats, compound_stats):
names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats])
values = add_trailing_whitespace([get_mem_or_func_call(s, stats, compound_stats) for s in all_stats])
return '\n'.join(['<< " ' + name + '" << ' + value + ' << \'\\n\''
for name, value in zip(names, values)])
def get_singleton_class_mem_fun_contents(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds):
if stat in stats:
if stats_kinds[stat] == 'counter' or stats_kinds[stat] == 'sum':
return 'total_count(&%s_tls_record::%s)' % (class_name, get_mem_name(stat))
elif stats_kinds[stat] == 'max':
return 'this->max(&%s_tls_record::%s)' % (class_name, get_mem_name(stat))
else:
assert(false)
stat_data = compound_stats_kinds[stat]
op = stat_data['op']
operands = map(get_mem_fun_for_stat, stat_data['operands'])
if op == '/':
operands[-1] = 'float(%s)' % operands[-1]
return (' ' + op + ' ').join(operands)
def get_singleton_class_mem_fun(class_name, stat, stats, stats_kinds, compound_stats_kinds):
_FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}'''
return _FORMAT_STRING.format(
NAME = get_mem_fun_for_stat(stat),
CONTENTS = get_singleton_class_mem_fun_contents(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds),
)
def get_singleton_class_mem_funs(class_name,
stats,
compound_stats,
stats_kinds,
compound_stats_kinds):
return '\n'.join([get_singleton_class_mem_fun(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds)
for stat in stats] +
[get_singleton_class_mem_fun(class_name,
compound_stat,
stats,
stats_kinds,
compound_stats_kinds)
for compound_stat in compound_stats])
def get_singleton_class_stream_output(all_stats):
names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats])
values = add_trailing_whitespace([get_mem_fun_for_stat(s)
for s in all_stats])
return '\n'.join(['<< "' + name + '" << ' + value + ' << \'\\n\''
for name, value in zip(names, values)])
def gen_stats(
the_stats,
include_guard,
class_name = 'perf_stats',
macro_prefix = '',
includes = '',
namespace_begin = '',
namespace_end = '',
namespace_access = '',
stat_output_ordering = [],
stats_member_ordering = [],
compound_stats_member_func_ordering = []
):
stats = stats_member_ordering
stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats]
compound_stats = compound_stats_member_func_ordering
compound_stats += [k for k,v in the_stats.items()
if type(v) != type('') and not k in compound_stats]
stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')}
compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')}
all_stats = stat_output_ordering
all_stats += [k for k in the_stats.keys() if not k in all_stats]
assert(sorted(all_stats) == sorted(compound_stats + stats))
return _STATS_TEMPLATE.format(
INCLUDE_GUARD = include_guard,
MACRO_PREFIX = macro_prefix,
INCLUDES = indent(includes),
CLASS_NAME = class_name,
NAMESPACE_BEGIN = namespace_begin,
NAMESPACE_END = namespace_end,
NS_ACCESS = namespace_access,
MACROS_ON = indent(get_macros_on(stats, stats_kinds, namespace_access, macro_prefix), 1),
MACROS_OFF = get_macros_off(stats, stats_kinds, macro_prefix),
THREAD_RECORD_MEMBERS = indent(get_thread_record_mems(stats, stats_kinds), 2),
THREAD_RECORD_MEMBER_FUNCTIONS = indent(get_thread_record_mem_funs(stats,
compound_stats,
compound_stats_kinds),
2),
THREAD_RECORD_STREAM_OUTPUT = indent(get_thread_record_stream_output(all_stats,
stats,
compound_stats), 4),
TRANSACTION_LOG_MEMBER_FUNCTIONS = indent(get_singleton_class_mem_funs(class_name,
stats,
compound_stats,
stats_kinds,
compound_stats_kinds), 2),
TRANSACTION_LOG_STREAM_OUTPUT = indent(get_singleton_class_stream_output(all_stats), 4),
)
def get_stats_func(stat, compound_stats, compound_stats_kinds):
if stat in compound_stats:
if compound_stats_kinds[stat]['op'] == '/':
return 'statsd_gauged'
return 'statsd_gauge'
def get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds):
_FORMAT_NAME = 'const_cast<char*>(LSTM_TESTNAME ".process.{NAME}")'
stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats]
names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats]
values = ['stats.' + get_mem_fun_for_stat(s) for s in all_stats]
return '\n'.join([stats_func + '(link, ' + name + ', ' + value + ');'
for stats_func, name, value in zip(stats_funcs, names, values)])
def get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds):
_FORMAT_NAME = 'const_cast<char*>((LSTM_TESTNAME ".thread" + std::to_string(i) + ".{NAME}").c_str())'
stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats]
names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats]
values = ['record.' + get_mem_or_func_call(s, stats, compound_stats) for s in all_stats]
header = '''
int i = 0;
for (auto& record : stats.records()) {{'''
body = indent('\n'.join([stats_func + '(link, ' + name + ', ' + value + ');'
for stats_func, name, value in zip(stats_funcs, names, values)]))
footer = ''' ++i;
}}'''
return '\n'.join([header, body, footer])
def gen_statsd_output(
the_stats,
include_guard,
class_name = 'perf_stats',
macro_prefix = '',
namespace_begin = '',
namespace_end = '',
namespace_access = '',
stat_output_ordering = [],
stats_member_ordering = [],
compound_stats_member_func_ordering = []
):
stats = stats_member_ordering
stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats]
compound_stats = compound_stats_member_func_ordering
compound_stats += [k for k,v in the_stats.items()
if type(v) != type('') and not k in compound_stats]
stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')}
compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')}
all_stats = stat_output_ordering
all_stats += [k for k in the_stats.keys() if not k in all_stats]
assert(sorted(all_stats) == sorted(compound_stats + stats))
result = '\n'.join([
get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds),
get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds)])
return indent(result, 2)
| 17,707 | 5,675 |
from . import ingest_tester
from whyis_test_case import WhyisTestCase
class IngestTestSetup(WhyisTestCase):
@classmethod
def setUpClass(cls):
print("Setting Up Class")
cls.maxDiff = None
cls.expected_data = ingest_tester.autoparse(cls.file_under_test)
def setUp(self):
ingest_tester.setUp(self, self.file_under_test)
def run_agent(self, agent, nanopublication=None):
app = self.app
agent.dry_run = True
agent.app = app
results = []
if nanopublication is not None:
results.extend(agent.process_graph(nanopublication))
elif agent.query_predicate == app.NS.whyis.globalChangeQuery:
results.extend(agent.process_graph(app.db))
else:
print("Running as update agent")
for resource in agent.getInstances(app.db):
print(resource.identifier)
for np_uri, in app.db.query('''select ?np where {
graph ?assertion { ?e ?p ?o.}
?np a np:Nanopublication;
np:hasAssertion ?assertion.
}''', initBindings={'e': resource.identifier}, initNs=app.NS.prefixes):
print(np_uri)
np = app.nanopub_manager.get(np_uri)
results.extend(agent.process_graph(np))
return results
class IngestTestTests(IngestTestSetup):
def test_nanocomposites(self):
ingest_tester.test_nanocomposites(self)
def test_authors(self):
ingest_tester.test_authors(self, self.expected_data["authors"])
def test_language(self):
ingest_tester.test_language(self, self.expected_data["language"])
def test_keywords(self):
ingest_tester.test_keywords(self, self.expected_data["keywords"])
def test_devices(self):
ingest_tester.test_devices(self, self.expected_data["equipment"])
def test_volume(self):
ingest_tester.test_volume(self, self.expected_data["journ_vol"])
def test_matrix_chemical_names(self):
ingest_tester.test_matrix_chemical_names(self)
def test_matrix_trade_names(self):
ingest_tester.test_matrix_trade_names(self)
def test_filler_chemical_names(self):
ingest_tester.test_filler_chemical_names(self)
def test_filler_trade_names(self):
ingest_tester.test_filler_trade_names(self)
# TODO Fix or remove
def test_temperatures(self):
ingest_tester.test_temperatures(self)
def test_abbreviations(self):
ingest_tester.test_abbreviations(self)
def test_manufacturers(self):
ingest_tester.test_manufacturers(self)
def test_complete_material(self):
ingest_tester.test_complete_material(self)
# TODO Fix or remove
def test_filler_processing(self):
ingest_tester.test_filler_processing(self)
def test_viscoelastic_measurement_mode(self):
ingest_tester.test_viscoelastic_measurement_mode(self)
# TODO add the following tests once completed
# test_stress
# test_melt_viscosity
# test_rheometer_mode
# test_specific_surface_area
# test_dielectric_real_permittivity
| 3,138 | 1,023 |
import copy
import ConfigParser
import conf_keys
from loggingex import LOG_WARNING
class mysql_conf_parser:
def parse(self, job_conf_path):
cp = ConfigParser.SafeConfigParser()
cp.read(job_conf_path)
sections = cp.sections()
conns_info = {}
for section in sections:
conn_info = {}
for key in conf_keys.mysql_conn_keys:
if False == cp.has_option(section, key):
LOG_WARNING()
continue
conn_info[key] = cp.get(section, key)
if cp.has_option(section, "range_max"):
range_max = int(cp.get(section, "range_max"))
db_name_base = conn_info["db"]
for index in range(0, range_max):
conn_info["db"] = db_name_base + "_" + str(index)
section_index_name = section + "_" + str(index)
conns_info[section_index_name] = copy.deepcopy(conn_info)
else:
conns_info[section] = conn_info
return conns_info
if __name__ == "__main__":
a = mysql_conf_parser()
print a.parse("../../conf/mysql_manager.conf")
pass
| 1,200 | 353 |
import torch.nn as nn
from n3 import ExternNode
class Conv2D(ExternNode):
kernel_size: int
padding: int
stride: int
input_channels: int
output_channels: int
bias: bool
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._inner = nn.Conv2d(
in_channels=self.input_channels,
out_channels=self.output_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=self.bias,
)
def forward(self, x):
return self._inner(x)
| 596 | 198 |
#!/usr/bin/env python3
"""
Copyright [2009-present] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
import os
import click
from utils import crw, rfam, ribovision, config
from utils.generate_model_info import generate_model_info
def get_ribotyper_output(fasta_input, output_folder, cm_library):
"""
Run ribotyper on the fasta sequences to select the best matching covariance
model.
"""
ribotyper_long_out = os.path.join(output_folder, os.path.basename(output_folder) + '.ribotyper.long.out')
if not os.path.exists(ribotyper_long_out):
cmd = 'ribotyper.pl --skipval -i {cm_library}/modelinfo.txt -f {fasta_input} {output_folder}'.format(
cm_library=cm_library,
fasta_input=fasta_input,
output_folder=output_folder
)
print(cmd)
os.system(cmd)
f_out = os.path.join(output_folder, 'hits.txt')
cmd = "cat %s | grep -v '^#' | grep -v MultipleHits | grep PASS | awk -v OFS='\t' '{print $2, $8, $3}' > %s" % (ribotyper_long_out, f_out)
os.system(cmd)
return f_out
def symlink_cms(source):
for cm_file in glob.glob(os.path.join(source, '*.cm')):
if 'all.cm' not in cm_file:
target = os.path.join(os.path.abspath(config.CM_LIBRARY), os.path.basename(cm_file))
if not os.path.exists(target):
cmd = 'ln -s {} {}'.format(os.path.abspath(cm_file), target)
os.system(cmd)
@click.group()
def cli():
pass
@cli.command()
def setup():
if not os.path.exists(config.CM_LIBRARY):
os.makedirs(config.CM_LIBRARY)
rfam.setup()
crw.setup()
symlink_cms(config.RIBOVISION_CM_LIBRARY)
symlink_cms(config.CRW_CM_LIBRARY)
generate_model_info(cm_library=config.CM_LIBRARY)
print('Done')
@cli.command()
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def draw(fasta_input, output_folder):
"""
Single entry point for visualising 2D for an RNA sequence.
Selects a template and runs Traveler using CRW, LSU, or Rfam libraries.
"""
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
print(line)
if model_id.count('.') >= 2:
crw.visualise_crw(fasta_input, output_folder, rnacentral_id, model_id)
elif model_id.count('_') == 2:
ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id)
else:
rfam.visualise_rfam(fasta_input, output_folder, rnacentral_id, model_id)
@cli.group('crw')
def crw_group():
pass
@crw_group.command('draw')
@click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences')
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def rrna_draw(fasta_input, output_folder, test):
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.CRW_CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
crw.visualise_crw(fasta_input,
output_folder,
rnacentral_id,
model_id)
@cli.group('ribovision')
def ribovision_group():
"""
Commands dealing with laying out sequences based upon RiboVision models.
"""
pass
@ribovision_group.command('draw')
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def ribovision_draw (fasta_input, output_folder):
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.RIBOVISION_CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id)
@cli.group('rfam')
def rfam_group():
"""
Commands dealing with laying out sequences based upon Rfam models.
"""
pass
@rfam_group.command('blacklisted')
def rfam_blacklist():
"""
Show all blacklisted families. These include rRNA families as well as
families that do not have any secondary structure.
"""
for model in sorted(rfam.blacklisted()):
print(model)
@rfam_group.command('draw')
@click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences')
@click.argument('rfam_accession', type=click.STRING)
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def rfam_draw(rfam_accession, fasta_input, output_folder, test=None):
"""
Visualise sequences using the Rfam/R-scape consensus structure as template.
RFAM_ACCESSION - Rfam family to process (RF00001, RF00002 etc)
"""
print(rfam_accession)
if rfam_accession == 'all':
rfam_accs = rfam.get_all_rfam_acc()
else:
rfam_accs = [rfam_accession]
for rfam_acc in rfam_accs:
if rfam.has_structure(rfam_acc):
rfam.rscape2traveler(rfam_acc)
rfam.generate_2d(rfam_acc, output_folder, fasta_input, test)
else:
print('{} does not have a conserved secondary structure'.format(rfam_acc))
@rfam_group.command('validate')
@click.argument('rfam_accession', type=click.STRING)
@click.argument('output', type=click.File('w'))
def rfam_validate(rfam_accession, output):
"""
Check if the given Rfam accession is one that should be drawn. If so it will
be output to the given file, otherwise it will not.
"""
if rfam_accession not in rfam.blacklisted():
output.write(rfam_accession + '\n')
if __name__ == '__main__':
cli()
| 6,482 | 2,239 |
import unittest
import textwrap
import jtbl.cli
class MyTests(unittest.TestCase):
def setUp(self):
self.SUCCESS, self.ERROR = True, False
def test_no_piped_data(self):
stdin = None
expected = textwrap.dedent('''\
jtbl: Missing piped data
''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected))
def test_null_data(self):
stdin = ''
expected = ''
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected))
def test_simple_key_value(self):
stdin = '[{"key": "value"}]'
expected = textwrap.dedent('''\
key
-----
value''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected))
def test_multi_key_value(self):
stdin = '[{"key1": "value1", "key2": "value1"}, {"key1": "value2", "key2": "value2"}]'
expected = textwrap.dedent('''\
key1 key2
------ ------
value1 value1
value2 value2''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected))
def test_null_string(self):
stdin = 'null'
expected = textwrap.dedent('''\
jtbl: Cannot represent this part of the JSON Object as a table.
(Could be an Element, an Array, or Null data instead of an Object):
[null]
''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected))
def test_hello_string(self):
stdin = 'hello'
expected = textwrap.dedent('''\
jtbl: Exception - Expecting value: line 1 column 1 (char 0)
Cannot parse line 1 (Not JSON or JSON Lines data):
hello
''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected))
def test_array_input(self):
stdin = '["value1", "value2", "value3"]'
expected = textwrap.dedent('''\
jtbl: Cannot represent this part of the JSON Object as a table.
(Could be an Element, an Array, or Null data instead of an Object):
["value1", "value2", "value3"]
''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected))
def test_deep_nest(self):
stdin = '{"this":{"is":{"a":{"deeply":{"nested":{"structure":"value1","item2":"value2"}}}}}}'
expected = textwrap.dedent('''\
this
---------------------------------------------------------------------------------
{'is': {'a': {'deeply': {'nested': {'structure': 'value1', 'item2': 'value2'}}}}}''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=100), (self.SUCCESS, expected))
def test_jc_dig(self):
stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]'
expected = textwrap.dedent('''\
+------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| id | opco | stat | flag | quer | answ | auth | addi | ques | answ | quer | serv | when | rcvd |
| | de | us | s | y_nu | er_n | orit | tion | tion | er | y_ti | er | | |
| | | | | m | um | y_nu | al_n | | | me | | | |
| | | | | | | m | um | | | | | | |
+======+========+========+========+========+========+========+========+========+========+========+========+========+========+
| 5565 | QUER | NOER | ['qr | 1 | 5 | 0 | 1 | {'na | [{'n | 44 | 2600 | Wed | 143 |
| 8 | Y | ROR | ', ' | | | | | me': | ame' | | | Mar | |
| | | | rd', | | | | | 'ww | : 'w | | | 18 1 | |
| | | | 'ra | | | | | w.cn | ww.c | | | 2:20 | |
| | | | '] | | | | | n.co | nn.c | | | :59 | |
| | | | | | | | | m.', | om.' | | | PDT | |
| | | | | | | | | 'cl | , 'c | | | 2020 | |
| | | | | | | | | ass' | lass | | | | |
| | | | | | | | | : 'I | ': ' | | | | |
| | | | | | | | | N', | IN', | | | | |
| | | | | | | | | 'typ | 'ty | | | | |
| | | | | | | | | e': | pe': | | | | |
| | | | | | | | | 'A'} | 'CN | | | | |
| | | | | | | | | | AME' | | | | |
| | | | | | | | | | , 't | | | | |
| | | | | | | | | | tl': | | | | |
| | | | | | | | | | 147 | | | | |
| | | | | | | | | | , 'd | | | | |
| | | | | | | | | | ata' | | | | |
| | | | | | | | | | : 't | | | | |
| | | | | | | | | | urne | | | | |
| | | | | | | | | | r-tl | | | | |
| | | | | | | | | | s.ma | | | | |
| | | | | | | | | | p.fa | | | | |
| | | | | | | | | | stly | | | | |
| | | | | | | | | | .net | | | | |
| | | | | | | | | | .'}, | | | | |
| | | | | | | | | | {'n | | | | |
| | | | | | | | | | ame' | | | | |
| | | | | | | | | | : 't | | | | |
| | | | | | | | | | urne | | | | |
| | | | | | | | | | r-tl | | | | |
| | | | | | | | | | s.ma | | | | |
| | | | | | | | | | p.fa | | | | |
| | | | | | | | | | stly | | | | |
| | | | | | | | | | .net | | | | |
| | | | | | | | | | .', | | | | |
| | | | | | | | | | 'cla | | | | |
| | | | | | | | | | ss': | | | | |
| | | | | | | | | | 'IN | | | | |
| | | | | | | | | | ', ' | | | | |
| | | | | | | | | | type | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | A', | | | | |
| | | | | | | | | | 'ttl | | | | |
| | | | | | | | | | ': 5 | | | | |
| | | | | | | | | | , 'd | | | | |
| | | | | | | | | | ata' | | | | |
| | | | | | | | | | : '1 | | | | |
| | | | | | | | | | 51.1 | | | | |
| | | | | | | | | | 01.1 | | | | |
| | | | | | | | | | .67' | | | | |
| | | | | | | | | | }, { | | | | |
| | | | | | | | | | 'nam | | | | |
| | | | | | | | | | e': | | | | |
| | | | | | | | | | 'tur | | | | |
| | | | | | | | | | ner- | | | | |
| | | | | | | | | | tls. | | | | |
| | | | | | | | | | map. | | | | |
| | | | | | | | | | fast | | | | |
| | | | | | | | | | ly.n | | | | |
| | | | | | | | | | et.' | | | | |
| | | | | | | | | | , 'c | | | | |
| | | | | | | | | | lass | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | IN', | | | | |
| | | | | | | | | | 'ty | | | | |
| | | | | | | | | | pe': | | | | |
| | | | | | | | | | 'A' | | | | |
| | | | | | | | | | , 't | | | | |
| | | | | | | | | | tl': | | | | |
| | | | | | | | | | 5, | | | | |
| | | | | | | | | | 'dat | | | | |
| | | | | | | | | | a': | | | | |
| | | | | | | | | | '151 | | | | |
| | | | | | | | | | .101 | | | | |
| | | | | | | | | | .65. | | | | |
| | | | | | | | | | 67'} | | | | |
| | | | | | | | | | , {' | | | | |
| | | | | | | | | | name | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | turn | | | | |
| | | | | | | | | | er-t | | | | |
| | | | | | | | | | ls.m | | | | |
| | | | | | | | | | ap.f | | | | |
| | | | | | | | | | astl | | | | |
| | | | | | | | | | y.ne | | | | |
| | | | | | | | | | t.', | | | | |
| | | | | | | | | | 'cl | | | | |
| | | | | | | | | | ass' | | | | |
| | | | | | | | | | : 'I | | | | |
| | | | | | | | | | N', | | | | |
| | | | | | | | | | 'typ | | | | |
| | | | | | | | | | e': | | | | |
| | | | | | | | | | 'A', | | | | |
| | | | | | | | | | 'tt | | | | |
| | | | | | | | | | l': | | | | |
| | | | | | | | | | 5, ' | | | | |
| | | | | | | | | | data | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | 151. | | | | |
| | | | | | | | | | 101. | | | | |
| | | | | | | | | | 129. | | | | |
| | | | | | | | | | 67'} | | | | |
| | | | | | | | | | , {' | | | | |
| | | | | | | | | | name | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | turn | | | | |
| | | | | | | | | | er-t | | | | |
| | | | | | | | | | ls.m | | | | |
| | | | | | | | | | ap.f | | | | |
| | | | | | | | | | astl | | | | |
| | | | | | | | | | y.ne | | | | |
| | | | | | | | | | t.', | | | | |
| | | | | | | | | | 'cl | | | | |
| | | | | | | | | | ass' | | | | |
| | | | | | | | | | : 'I | | | | |
| | | | | | | | | | N', | | | | |
| | | | | | | | | | 'typ | | | | |
| | | | | | | | | | e': | | | | |
| | | | | | | | | | 'A', | | | | |
| | | | | | | | | | 'tt | | | | |
| | | | | | | | | | l': | | | | |
| | | | | | | | | | 5, ' | | | | |
| | | | | | | | | | data | | | | |
| | | | | | | | | | ': ' | | | | |
| | | | | | | | | | 151. | | | | |
| | | | | | | | | | 101. | | | | |
| | | | | | | | | | 193. | | | | |
| | | | | | | | | | 67'} | | | | |
| | | | | | | | | | ] | | | | |
+------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=80), (self.SUCCESS, expected))
def test_jc_dig_150cols(self):
stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]'
expected = textwrap.dedent('''\
+----------+----------+-------+----------+---------+----------+----------+----------+----------+----------+----------+----------+--------+--------+
| opcode | server | id | status | flags | query_ | answer | author | additi | questi | answer | query_ | when | rcvd |
| | | | | | num | _num | ity_nu | onal_n | on | | time | | |
| | | | | | | | m | um | | | | | |
+==========+==========+=======+==========+=========+==========+==========+==========+==========+==========+==========+==========+========+========+
| QUERY | 2600 | 55658 | NOERRO | ['qr', | 1 | 5 | 0 | 1 | {'name | [{'nam | 44 | Wed Ma | 143 |
| | | | R | 'rd', | | | | | ': 'ww | e': 'w | | r 18 1 | |
| | | | | 'ra'] | | | | | w.cnn. | ww.cnn | | 2:20:5 | |
| | | | | | | | | | com.', | .com.' | | 9 PDT | |
| | | | | | | | | | 'clas | , 'cla | | 2020 | |
| | | | | | | | | | s': 'I | ss': ' | | | |
| | | | | | | | | | N', 't | IN', ' | | | |
| | | | | | | | | | ype': | type': | | | |
| | | | | | | | | | 'A'} | 'CNAM | | | |
| | | | | | | | | | | E', 't | | | |
| | | | | | | | | | | tl': 1 | | | |
| | | | | | | | | | | 47, 'd | | | |
| | | | | | | | | | | ata': | | | |
| | | | | | | | | | | 'turne | | | |
| | | | | | | | | | | r-tls. | | | |
| | | | | | | | | | | map.fa | | | |
| | | | | | | | | | | stly.n | | | |
| | | | | | | | | | | et.'}, | | | |
| | | | | | | | | | | {'nam | | | |
| | | | | | | | | | | e': 't | | | |
| | | | | | | | | | | urner- | | | |
| | | | | | | | | | | tls.ma | | | |
| | | | | | | | | | | p.fast | | | |
| | | | | | | | | | | ly.net | | | |
| | | | | | | | | | | .', 'c | | | |
| | | | | | | | | | | lass': | | | |
| | | | | | | | | | | 'IN', | | | |
| | | | | | | | | | | 'type | | | |
| | | | | | | | | | | ': 'A' | | | |
| | | | | | | | | | | , 'ttl | | | |
| | | | | | | | | | | ': 5, | | | |
| | | | | | | | | | | 'data' | | | |
| | | | | | | | | | | : '151 | | | |
| | | | | | | | | | | .101.1 | | | |
| | | | | | | | | | | .67'}, | | | |
| | | | | | | | | | | {'nam | | | |
| | | | | | | | | | | e': 't | | | |
| | | | | | | | | | | urner- | | | |
| | | | | | | | | | | tls.ma | | | |
| | | | | | | | | | | p.fast | | | |
| | | | | | | | | | | ly.net | | | |
| | | | | | | | | | | .', 'c | | | |
| | | | | | | | | | | lass': | | | |
| | | | | | | | | | | 'IN', | | | |
| | | | | | | | | | | 'type | | | |
| | | | | | | | | | | ': 'A' | | | |
| | | | | | | | | | | , 'ttl | | | |
| | | | | | | | | | | ': 5, | | | |
| | | | | | | | | | | 'data' | | | |
| | | | | | | | | | | : '151 | | | |
| | | | | | | | | | | .101.6 | | | |
| | | | | | | | | | | 5.67'} | | | |
| | | | | | | | | | | , {'na | | | |
| | | | | | | | | | | me': ' | | | |
| | | | | | | | | | | turner | | | |
| | | | | | | | | | | -tls.m | | | |
| | | | | | | | | | | ap.fas | | | |
| | | | | | | | | | | tly.ne | | | |
| | | | | | | | | | | t.', ' | | | |
| | | | | | | | | | | class' | | | |
| | | | | | | | | | | : 'IN' | | | |
| | | | | | | | | | | , 'typ | | | |
| | | | | | | | | | | e': 'A | | | |
| | | | | | | | | | | ', 'tt | | | |
| | | | | | | | | | | l': 5, | | | |
| | | | | | | | | | | 'data | | | |
| | | | | | | | | | | ': '15 | | | |
| | | | | | | | | | | 1.101. | | | |
| | | | | | | | | | | 129.67 | | | |
| | | | | | | | | | | '}, {' | | | |
| | | | | | | | | | | name': | | | |
| | | | | | | | | | | 'turn | | | |
| | | | | | | | | | | er-tls | | | |
| | | | | | | | | | | .map.f | | | |
| | | | | | | | | | | astly. | | | |
| | | | | | | | | | | net.', | | | |
| | | | | | | | | | | 'clas | | | |
| | | | | | | | | | | s': 'I | | | |
| | | | | | | | | | | N', 't | | | |
| | | | | | | | | | | ype': | | | |
| | | | | | | | | | | 'A', ' | | | |
| | | | | | | | | | | ttl': | | | |
| | | | | | | | | | | 5, 'da | | | |
| | | | | | | | | | | ta': ' | | | |
| | | | | | | | | | | 151.10 | | | |
| | | | | | | | | | | 1.193. | | | |
| | | | | | | | | | | 67'}] | | | |
+----------+----------+-------+----------+---------+----------+----------+----------+----------+----------+----------+----------+--------+--------+''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=150), (self.SUCCESS, expected))
def test_jc_dig_150cols_t(self):
stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]'
expected = textwrap.dedent('''\
opcode status server id flags query_nu answer_n authorit addition question answer query_ti when rcvd
-------- -------- -------- ----- -------- ---------- ---------- ---------- ---------- ---------- -------- ---------- ------- ------
QUERY NOERROR 2600 55658 ['qr', ' 1 5 0 1 {'name': [{'name' 44 Wed Mar 143''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, truncate=True, columns=150), (self.SUCCESS, expected))
def test_jc_dig_nowrap(self):
stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]'
expected = textwrap.dedent('''\
id opcode status flags query_num answer_num authority_num additional_num question answer query_time server when rcvd
----- -------- -------- ------------------ ----------- ------------ --------------- ---------------- ---------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------ -------- ---------------------------- ------
55658 QUERY NOERROR ['qr', 'rd', 'ra'] 1 5 0 1 {'name': 'www.cnn.com.', 'class': 'IN', 'type': 'A'} [{'name': 'www.cnn.com.', 'class': 'IN', 'type': 'CNAME', 'ttl': 147, 'data': 'turner-tls.map.fastly.net.'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.1.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.65.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.129.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.193.67'}] 44 2600 Wed Mar 18 12:20:59 PDT 2020 143''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, nowrap=True, columns=150), (self.SUCCESS, expected))
def test_jc_dig_nowrap_t_cols_80(self):
"""test that nowrap overrides both truncate and columns"""
stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]'
expected = textwrap.dedent('''\
id opcode status flags query_num answer_num authority_num additional_num question answer query_time server when rcvd
----- -------- -------- ------------------ ----------- ------------ --------------- ---------------- ---------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------ -------- ---------------------------- ------
55658 QUERY NOERROR ['qr', 'rd', 'ra'] 1 5 0 1 {'name': 'www.cnn.com.', 'class': 'IN', 'type': 'A'} [{'name': 'www.cnn.com.', 'class': 'IN', 'type': 'CNAME', 'ttl': 147, 'data': 'turner-tls.map.fastly.net.'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.1.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.65.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.129.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.193.67'}] 44 2600 Wed Mar 18 12:20:59 PDT 2020 143''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, nowrap=True, columns=80, truncate=True), (self.SUCCESS, expected))
def test_jc_dig_answer(self):
stdin = '[{"name":"www.cnn.com.","class":"IN","type":"CNAME","ttl":147,"data":"turner-tls.map.fastly.net."},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.1.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.65.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.129.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.193.67"}]'
expected = textwrap.dedent('''\
name class type ttl data
-------------------------- ------- ------ ----- --------------------------
www.cnn.com. IN CNAME 147 turner-tls.map.fastly.net.
turner-tls.map.fastly.net. IN A 5 151.101.1.67
turner-tls.map.fastly.net. IN A 5 151.101.65.67
turner-tls.map.fastly.net. IN A 5 151.101.129.67
turner-tls.map.fastly.net. IN A 5 151.101.193.67''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=80), (self.SUCCESS, expected))
def test_json_lines(self):
"""test JSON Lines data"""
stdin = textwrap.dedent('''\
{"name":"lo0","type":null,"ipv4_addr":"127.0.0.1","ipv4_mask":"255.0.0.0"}
{"name":"gif0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"stf0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"XHC0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"XHC20","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"VHC128","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"XHC1","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en5","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"ap1","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en0","type":null,"ipv4_addr":"192.168.1.221","ipv4_mask":"255.255.255.0"}
{"name":"p2p0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"awdl0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en1","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en2","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en3","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"en4","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"bridge0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"utun0","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"utun1","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"utun2","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"utun3","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"utun4","type":null,"ipv4_addr":null,"ipv4_mask":null}
{"name":"vmnet1","type":null,"ipv4_addr":"192.168.101.1","ipv4_mask":"255.255.255.0"}
{"name":"vmnet8","type":null,"ipv4_addr":"192.168.71.1","ipv4_mask":"255.255.255.0"}''')
expected = textwrap.dedent('''\
name type ipv4_addr ipv4_mask
------- ------ ------------- -------------
lo0 127.0.0.1 255.0.0.0
gif0
stf0
XHC0
XHC20
VHC128
XHC1
en5
ap1
en0 192.168.1.221 255.255.255.0
p2p0
awdl0
en1
en2
en3
en4
bridge0
utun0
utun1
utun2
utun3
utun4
vmnet1 192.168.101.1 255.255.255.0
vmnet8 192.168.71.1 255.255.255.0''')
self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected))
if __name__ == '__main__':
unittest.main()
| 49,994 | 14,419 |
"""
Autonomous Car Locator
This is a program that helps autonomous cars to find out the location and direction
of other autonomous cars. It is all based on what is provided by the car and from what the car detects.
By David Gameiro
"""
import random
def locator():
cars = [
{
"speed" : 50, #the speed of the current car
"compass" : "N", #the direction of the current car
"id" : 1423456, #the ID number of the car
"gps" : 36.00000 #the current GPS location of the current car
}
]
def info_input(): #this is to gather the speed and compass direction of your car and the other car
i = 0
while True:#len(cars) > i:
if i == 0:
print("YOUR CAR")
speed = speed_type()
cars[0][0] = speed
print("The current speed of your car is " + str(cars[0][0]) + "mph\n")
compass = compass_type()
cars[0][1] = compass
print("The current direction of your car is " + cars[0][1] + "\n")
else:
new_car = {
"speed" : 50, #the speed of the current car
"compass" : "N", #the direction of the current car
"id" : 1423456, #the ID number of the car
"gps" : 36.00000 #the current GPS location of the current car
}
print("Car " + str(i))
speed = speed_type()
new_car[0] = speed
print("The current speed of Car " + str(i) + " is " + str(new_car[0]) + "mph\n")
print("Both cars can only be traveling the same or opposite directions.")
print("Please only choose the same direction or the opposite on.")
compass = compass_type()
new_car[1] = compass
print("The current direction of Car " + str(i) + " is " + new_car[1] + "\n")
new_car[2] = random.randrange(1420,62416,2)
cars.append(new_car)
stop_count = input("If there are no more cars to add, type [E] for End or [C] for continue.\n")
stop_count = stop_count.upper()
if stop_count == "E" or stop_count == "END":
break
i += 1
print(len(cars))
def speed_type(): #if the input for the speed is not a number then it keeps asking for the speed
val_type = "str"
while val_type != "int":
speed = input("What is the speed of the car? ")
val_type = check_user_input(speed)
speed = int(speed)
return speed
def compass_type(): #if the input for the compass direction is not a number then it keeps asking
while True: #This loop through until an input is given that is one of the options
val_type = "int"
while val_type != "str":
compass = input("What is the direction that the car is traveling? [N], [S], [E}, [W] ")
val_type = check_user_input(compass)
compass = compass.upper()
if compass == "N" or compass == "S" or compass == "E" or compass == "W":
break #this verfies that the input is only as specifed, and then ends the loop, if not it continues
else:
continue
return compass
def check_user_input(input):
try:
# Convert it into integer
val = int(input)
val_type="int"
except ValueError:
try:
# Convert it into float
val = float(input)
val_type = "float"
except ValueError:
val_type = "str"
return val_type
info_input()
j = 1
while len(cars) > j:
print("The ID number of the car is " + str(cars[j][2]))
def speed_compare():
relative_speed = "faster" #the relative speed your car is going compared to other cars
if cars[0][0] > cars[j][0]:
print("Your car is going faster than Car " + str(j))
relative_speed = "faster"
elif cars[0][0] < cars[j][0]:
print("Your car is going slower than Car " + str(j))
relative_speed = "slower"
else:
print("Your car is going the same speed as Car " + str(j))
relative_speed = "same"
return relative_speed
def compass(): #used to compare the traveling direction of the two cars
if cars[0][1] == cars[j][1]:
print("You and Car " + str(j) + " are both going the same direction")
direction = "same"
elif cars[0][1] == "N" and cars[j][1] == "S":
print("You and Car " + str(j) + " are both going the opposite direction")
direction = "opposite"
elif cars[0][1] == "E" and cars[j][1] == "W":
print("You and Car " + str(j) + " are both going the opposite direction")
direction = "opposite"
elif cars[0][1] == "S" and cars[j][1] == "N":
print("You and Car " + str(j) + " are both going the opposite direction")
direction = "opposite"
elif cars[0][1] == "W" and cars[j][1] == "E":
print("You and Car " + str(j) + " are both going the opposite direction")
direction = "opposite"
return direction
def sensors(): #Which sensors are being triggered on your car, or where is the car is in relation to you
sensor = ["front", "right", "rear", "left"] #4 available sensors on the car, on all 4 sides
position = random.choice(sensor) #where is the other car located relative to yours
if position == "front":
print("The car is in front of your car")
elif position == "right":
print("The car is to the right of your car")
elif position == "left":
print("The car is to the left of your car")
else:
print("The car is behind your car")
return position
direction = compass()
relative_speed = speed_compare()
position = sensors()
def visual_before(): #displays what the current layout of the road is
print("\nCURRENT ROAD LAYOUT")
if direction == "same" and position == "front":
print("| | || | |")
print("| | || | " + str(j) + " |")
print("| | || Y | |")
print("| | || | |")
elif direction == "same" and position == "rear":
print("| | || | |")
print("| | || | Y |")
print("| | || " + str(j) + " | |")
print("| | || | |")
elif direction == "same" and position == "right":
print("| | || | |")
print("| | || | |")
print("| | || Y | " + str(j) + " |")
print("| | || | |")
elif direction == "same" and position == "left":
print("| | || | |")
print("| | || | |")
print("| | || " + str(j) + " | Y |")
print("| | || | |")
elif direction == "opposite":
print("| | || | |")
print("| | " + str(j) + " || | |")
print("| | || Y | |")
print("| | || | |")
def prediction(): #if the same conditions continue then this will be the predicted road layout
print("\nPREDICTED FUTURE LAYOUT")
if direction == "same" and (relative_speed == "same" or relative_speed == "slower") and position == "front":
print("The other car will remain in front of you.")
print("| | || | |")
print("| | || | " + str(j) + " |")
print("| | || Y | |")
print("| | || | |")
elif direction == "same" and (relative_speed == "same" or relative_speed == "faster") and position == "rear":
print("The other car will remain behind you.")
print("| | || | |")
print("| | || | Y |")
print("| | || " + str(j) + " | |")
print("| | || | |")
elif direction == "same" and relative_speed == "same" and position == "right":
print("The other car will remain to the right of you.")
print("| | || | |")
print("| | || | |")
print("| | || Y | " + str(j) + " |")
print("| | || | |")
elif direction == "same" and relative_speed == "same" and position == "left":
print("The other car will remain to the left of you.")
print("| | || | |")
print("| | || | |")
print("| | || " + str(j) + " | Y |")
print("| | || | |")
elif direction == "same" and relative_speed == "faster" and position == "front":
print("You will pass the other car and be in front of them.")
print("| | || | |")
print("| | || Y | |")
print("| | || | " + str(j) + " |")
print("| | || | |")
elif direction == "same" and relative_speed == "faster" and position == "left":
print("You will pass the other car and be in front of them.")
print("| | || | |")
print("| | || | Y |")
print("| | || " + str(j) + " | |")
print("| | || | |")
elif direction == "same" and relative_speed == "faster" and position == "right":
print("You will pass the other car and be in front of them.")
print("| | || | |")
print("| | || Y | |")
print("| | || | " + str(j) + " |")
print("| | || | |")
elif direction == "same" and relative_speed == "slower" and position == "right":
print("The other car will be in front of you.")
print("| | || | |")
print("| | || | " + str(j) + " |")
print("| | || Y | |")
print("| | || | |")
elif direction == "same" and relative_speed == "slower" and position == "left":
print("The other car will be in front of you.")
print("| | || | |")
print("| | || " + str(j) + " | |")
print("| | || | Y |")
print("| | || | |")
elif direction == "same" and relative_speed == "slower" and position == "rear":
print("The other car will pass you.")
print("| | || | |")
print("| | || " + str(j) + " | |")
print("| | || | Y |")
print("| | || | |")
elif direction == "opposite":
print("The other car will be behind you.")
print("| | || | |")
print("| | || Y | |")
print("| | " + str(j) + " || | |")
print("| | || | |")
visual_before()
prediction()
print("\n\n\n")
j += 1
locator()
| 11,862 | 3,352 |
from jenkinsdashboard.ci.jenkins import Jenkins
from jenkinsdashboard.ui.dashboard import Dashboard
import time
if __name__ == '__main__':
# jenkins = Jenkins('http://10.0.0.102:18081', 'jfm', 'c3po4all')
jenkins = Jenkins(
'http://jenkins.onboarding.liquid.int.tdk.dk', 'admin', '0nboarding')
dashboard = Dashboard(jenkins)
while True:
ci_rows = dashboard.generate()
dashboard.render(ci_rows)
time.sleep(30)
| 460 | 170 |
r"""
Using `fmas` as a black-box application
=======================================
This examples shows how to use `py-fmas` as a black-box application, that
only requires a minimal amount of scripting.
.. codeauthor:: Oliver Melchert <melchert@iqo.uni-hannover.de>
"""
###############################################################################
# We start by simply importing the required `fmas` into the current namespace.
#
import fmas
###############################################################################
# If an adequate input file is located within the current working directory,
# `fmas` can be used as shown below. It features a particular function called
# `run`, which reads-in the propagation setting stored in the input file
# `input_file.h5` and runs the simulaton
res = fmas.run('input_file.h5', model_type='FMAS_S_R', solver_type='IFM_RK4IP')
###############################################################################
# An example that shows how an adequate input file can be generated via python
# is shown under the link below:
#
# :ref:`sphx_glr_auto_tutorials_basics_ng_generate_infile.py`
#
# After the proapgation algorithm (specified in `input_file.h5`) terminates,
# a simple dictionary data structure with the following keys is available
print(res.keys())
###############################################################################
# A simple plot that shows the result of the simulation run can be produced
# using function `plot_evolution` implemented in module `tools`
from fmas.tools import plot_evolution
plot_evolution( res['z'], res['t'], res['u'], t_lim=(-500,2200), w_lim=(1.,4.))
###############################################################################
# The results can be stored for later postprocessing using the function
# `save_h5` implemented in module `data_io`. It will generate a file
# `out_file.h5` with HDF5 format in the current working directory
from fmas.data_io import save_h5
save_h5('out_file.h5', **res)
| 2,001 | 546 |
from unittest import TestCase
import xmltodict
from markdown import Markdown
from markdown.util import etree
from mdx_attr_cols import AttrColTreeProcessor, AttrColExtension, makeExtension
class XmlTestCaseMixin(object):
def mk_doc(self, s):
return etree.fromstring(
"<div>" + s.strip() + "</div>")
def assert_xml_equal(self, a, b):
self.assertEqual(
xmltodict.parse(etree.tostring(a)),
xmltodict.parse(etree.tostring(b)))
class TestAttrColTreeProcessor(XmlTestCaseMixin, TestCase):
def mk_processor(self, **conf):
md = Markdown()
return AttrColTreeProcessor(md, conf)
def test_config_none(self):
md = Markdown
p = AttrColTreeProcessor(md, None)
self.assertEqual(p.columns, 12)
self.assertEqual(p.attr, 'cols')
self.assertEqual(p.tags, set(['section']))
def test_config_defaults(self):
p = self.mk_processor()
self.assertEqual(p.columns, 12)
self.assertEqual(p.attr, 'cols')
self.assertEqual(p.tags, set(['section']))
def test_config_overrides(self):
p = self.mk_processor(
columns=16,
attr='columns',
tags=['section', 'div'],
)
self.assertEqual(p.columns, 16)
self.assertEqual(p.attr, 'columns')
self.assertEqual(p.tags, set(['section', 'div']))
def test_simple_rows(self):
root = self.mk_doc("""
<section cols='4'>Foo</section>
<section cols='6'>Bar</section>
<section cols='2'>Beep</section>
""")
p = self.mk_processor()
new_root = p.run(root)
self.assert_xml_equal(new_root, self.mk_doc("""
<div class="row"><div class="col-md-4"><section>Foo</section>
</div><div class="col-md-6"><section>Bar</section>
</div><div class="col-md-2"><section>Beep</section>
</div></div>
"""))
class TestAttrColExtension(TestCase):
def mk_markdown(self, extensions=None):
if extensions is None:
extensions = ['attr_list', 'mdx_outline']
md = Markdown(extensions=extensions)
return md
def assert_registered(self, md):
processor = md.treeprocessors['attr_cols']
self.assertTrue(isinstance(processor, AttrColTreeProcessor))
def assert_not_registered(self, md):
self.assertFalse('attr_cols' in md.treeprocessors)
def text_create(self):
ext = AttrColExtension({'a': 'b'})
self.assertEqual(ext.conf, {'a': 'b'})
def test_extend_markdown(self):
md = self.mk_markdown()
ext = AttrColExtension({})
ext.extendMarkdown(md)
self.assert_registered(md)
def test_missing_attr_list(self):
md = self.mk_markdown(['mdx_outline'])
ext = AttrColExtension({})
self.assertRaisesRegexp(
RuntimeError,
"The attr_cols markdown extension depends the following"
" extensions which must preceded it in the extension list:"
" attr_list, mdx_outline",
ext.extendMarkdown, md)
self.assert_not_registered(md)
def test_missing_outline(self):
md = self.mk_markdown([])
ext = AttrColExtension({})
self.assertRaisesRegexp(
RuntimeError,
"The attr_cols markdown extension depends the following"
" extensions which must preceded it in the extension list:"
" attr_list, mdx_outline",
ext.extendMarkdown, md)
self.assert_not_registered(md)
class TestExtensionRegistration(TestCase):
def test_make_extension(self):
configs = {'a': 'b'}
ext = makeExtension(**configs)
self.assertTrue(isinstance(ext, AttrColExtension))
self.assertEqual(ext.conf, configs)
| 3,852 | 1,183 |
# Generated by Django 3.0.5 on 2020-05-25 17:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cubes', '0001_initial'),
('tiles', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tilecomment',
name='parent_id',
),
migrations.AddField(
model_name='tilecomment',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cubes.CubeComment'),
),
]
| 621 | 211 |
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
n = len(nums)
if n == 0:
return None
if n == 1:
return [nums]
nums.sort()
res = []
def dfs(nums, res, path=None):
if path is None:
path = []
n = len(nums)
if n == 0:
res += [path]
return
last = nums[0] - 1
for i in range(n):
if last == nums[i]:
continue
dfs(nums[:i] + nums[i+1:], res, path + [nums[i]])
last = nums[i]
dfs(nums, res, path=None)
return res
| 720 | 229 |
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import time
from gensim import corpora, models, similarities
from dialogbot.reader.data_helper import load_corpus_file
from dialogbot.utils.log import logger
class TfidfModel:
def __init__(self, corpus_file, word2id):
time_s = time.time()
self.contexts, self.responses = load_corpus_file(corpus_file, word2id, size=50000)
self._train_model()
self.corpus_mm = self.tfidf_model[self.corpus]
self.index = similarities.MatrixSimilarity(self.corpus_mm)
logger.debug("Time to build tfidf model by %s: %2.f seconds." % (corpus_file, time.time() - time_s))
def _train_model(self, min_freq=1):
# Create tfidf model.
self.dct = corpora.Dictionary(self.contexts)
# Filter low frequency words from dictionary.
low_freq_ids = [id_ for id_, freq in
self.dct.dfs.items() if freq <= min_freq]
self.dct.filter_tokens(low_freq_ids)
self.dct.compactify()
# Build tfidf model.
self.corpus = [self.dct.doc2bow(s) for s in self.contexts]
self.tfidf_model = models.TfidfModel(self.corpus)
def _text2vec(self, text):
bow = self.dct.doc2bow(text)
return self.tfidf_model[bow]
def similarity(self, query, size=10):
vec = self._text2vec(query)
sims = self.index[vec]
sim_sort = sorted(list(enumerate(sims)),
key=lambda item: item[1], reverse=True)
return sim_sort[:size]
def get_docs(self, sim_items):
docs = [self.contexts[id_] for id_, score in sim_items]
answers = [self.responses[id_] for id_, score in sim_items]
return docs, answers
| 1,764 | 618 |
# https://leetcode.com/problems/single-number-ii/
class Solution:
def singleNumber(self, nums: List[int]) -> int:
sum_nums = sum(nums)
sum_set = sum(set(nums)) * 3
return (sum_set - sum_nums) // 2
| 237 | 91 |
#!/usr/bin/env python
import argparse
import kalibr_common as kc
from mpl_toolkits.mplot3d import art3d, Axes3D, proj3d
import numpy as np
import pylab as pl
import sm
import glob
def parse_arguments():
parser = argparse.ArgumentParser(
description='read calibration results from yaml and compare with ground truth')
parser.add_argument('--reference-sensor', dest='reference_sensor',
help='Specify the sensor as the reference coordinate system: camera0 or imu0', required=True)
parser.add_argument(
'--cam-ground-truth',
dest='cam_ground_truth',
help=
'the name of yaml file which stores the ground truth of camera extrinsics',
required=False)
parser.add_argument(
'--cam-file-name-prefix',
dest='cam_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of camera extrinsics',
required=False)
parser.add_argument(
'--lidar-ground-truth',
dest='lidar_ground_truth',
help=
'the name of yaml file which stores the ground truth of lidar extrinsics',
required=False)
parser.add_argument(
'--lidar-file-name-prefix',
dest='lidar_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of lidar extrinsics',
required=False)
parser.add_argument(
'--imu-ground-truth',
dest='imu_ground_truth',
help=
'the name of yaml file which stores the ground truth of imu extrinsics',
required=False)
parser.add_argument(
'--imu-file-name-prefix',
dest='imu_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of imu extrinsics',
required=False)
parsed_args = parser.parse_args()
return parsed_args
def calcErrorGTAndEstimation(ext_gt, ext):
err_T = ext_gt.inverse() * ext
err_vec = sm.fromTEuler(err_T.T())
return err_vec
def main():
parsed_args = parse_arguments()
if parsed_args.cam_ground_truth and parsed_args.cam_file_name_prefix:
cam_chain_ext_gt = kc.CameraChainParameters(parsed_args.cam_ground_truth)
ext_gt_list = []
num_cam = cam_chain_ext_gt.numCameras()
for camNr in range(1, num_cam):
ext_gt_list.append(cam_chain_ext_gt.getExtrinsicsReferenceToCam(camNr))
err_vec_list_list = [[] for _ in range(num_cam - 1)]
for file_name in glob.glob(parsed_args.cam_file_name_prefix):
cam_chain_ext = kc.CameraChainParameters(file_name, parsed_args.reference_sensor)
for camNr in range(1, num_cam):
ext = cam_chain_ext.getExtrinsicsReferenceToCam(camNr)
err_vec = calcErrorGTAndEstimation(ext_gt_list[camNr-1], ext)
err_vec_list_list[camNr-1].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("cam {} extrinsic calibration error".format(idx+1))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if parsed_args.lidar_ground_truth and parsed_args.lidar_file_name_prefix:
lidar_list_ext_gt = kc.LiDARListParameters(parsed_args.lidar_ground_truth, parsed_args.reference_sensor)
ext_gt_list = []
num_lidar = lidar_list_ext_gt.numLiDARs()
for idx in range(0, num_lidar):
lidar_parameter = lidar_list_ext_gt.getLiDARParameters(idx)
ext_gt_list.append(lidar_parameter.getExtrinsicsReferenceToHere())
err_vec_list_list = [[] for _ in range(num_lidar)]
for file_name in glob.glob(parsed_args.lidar_file_name_prefix):
lidar_list_ext = kc.LiDARListParameters(file_name, parsed_args.reference_sensor)
for idx in range(num_lidar):
lidar_parameter = lidar_list_ext.getLiDARParameters(idx)
ext = lidar_parameter.getExtrinsicsReferenceToHere()
err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext)
err_vec_list_list[idx].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("LiDAR {} extrinsic calibration error".format(idx))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if parsed_args.imu_ground_truth and parsed_args.imu_file_name_prefix:
imu_list_ext_gt = kc.ImuSetParameters(parsed_args.imu_ground_truth, parsed_args.reference_sensor)
ext_gt_list = []
num_imu = imu_list_ext_gt.numImus()
for idx in range(0, num_imu):
imu_parameter = imu_list_ext_gt.getImuParameters(idx)
ext_gt_list.append(imu_parameter.getExtrinsicsReferenceToHere())
err_vec_list_list = [[] for _ in range(num_imu)]
for file_name in glob.glob(parsed_args.imu_file_name_prefix):
imu_list_ext = kc.ImuSetParameters(file_name, parsed_args.reference_sensor)
for idx in range(num_imu):
imu_parameter = imu_list_ext.getImuParameters(idx)
ext = imu_parameter.getExtrinsicsReferenceToHere()
err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext)
err_vec_list_list[idx].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("IMU {} extrinsic calibration error".format(idx))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if __name__ == "__main__":
main()
| 6,052 | 1,967 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//antlir/bzl:image.bzl", "image")
load("//antlir/bzl:oss_shim.bzl", "buck_genrule")
load("//antlir/bzl/image/feature:defs.bzl", "feature")
def install_kernel_modules(kernel, module_list):
# This intermediate genrule is here to create a dir hierarchy
# of kernel modules that are needed for the initrd. This
# provides a single dir that can be cloned into the initrd
# layer and allows for kernel modules that might be missing
# from different kernel builds.
buck_genrule(
name = kernel.uname + "-selected--modules",
out = ".",
cmd = """
mkdir -p $OUT
pushd $OUT 2>/dev/null
# copy the needed modules out of the module layer
binary_path=( $(exe //antlir:find-built-subvol) )
layer_loc="$(location {module_layer})"
mod_layer_path=\\$( "${{binary_path[@]}}" "$layer_loc" )
mods="{module_list}"
for mod in $mods; do
mod_src="$mod_layer_path/kernel/$mod"
if [[ -f "$mod_src" ]]; then
mod_dir=\\$(dirname "$mod")
mkdir -p "$mod_dir"
cp "$mod_src" "$mod_dir"
fi
done
""".format(
module_layer = kernel.artifacts.modules,
module_list = " ".join(module_list),
),
antlir_rule = "user-internal",
)
buck_genrule(
name = kernel.uname + "selected--modules-load.conf",
cmd = "echo '{}' > $OUT".format("\n".join([
paths.basename(module).rsplit(".")[0]
for module in module_list
])),
antlir_rule = "user-internal",
visibility = [],
)
return [
# Install the kernel modules specified in module_list above into the
# layer
image.ensure_subdirs_exist("/usr/lib", paths.join("modules", kernel.uname)),
feature.install(
image.source(
source = ":" + kernel.uname + "-selected--modules",
path = ".",
),
paths.join("/usr/lib/modules", kernel.uname, "kernel"),
),
[
[
image.clone(
kernel.artifacts.modules,
paths.join("/modules.{}".format(f)),
paths.join("/usr/lib/modules", kernel.uname, "modules.{}".format(f)),
),
image.clone(
kernel.artifacts.modules,
paths.join("/modules.{}.bin".format(f)),
paths.join("/usr/lib/modules", kernel.uname, "modules.{}.bin".format(f)),
),
]
for f in ("dep", "symbols", "alias", "builtin")
],
# Ensure the kernel modules are loaded by systemd when the initrd is started
image.ensure_subdirs_exist("/usr/lib", "modules-load.d"),
feature.install(":" + kernel.uname + "selected--modules-load.conf", "/usr/lib/modules-load.d/initrd-modules.conf"),
]
| 3,251 | 948 |
__version__ = 'v2_2_8' | 22 | 13 |
"""Top level code that takes a atmosphere phase map and propagates a wavefront through the system"""
import os
import numpy as np
import traceback
import multiprocessing
import glob
import random
import pickle as pickle
import time
from proper_mod import prop_run
from medis.Utils.plot_tools import quicklook_im, view_datacube, loop_frames
from medis.Utils.misc import dprint
from medis.params import ap,cp,tp,mp,sp,iop,dp
import medis.Detector.MKIDs as MKIDs
import medis.Detector.H2RG as H2RG
import medis.Detector.pipeline as pipe
import medis.Detector.readout as read
import medis.Telescope.aberrations as aber
import medis.Atmosphere.atmos as atmos
sentinel = None
def gen_timeseries(inqueue, photon_table_queue, outqueue, conf_obj_tup):
"""
generates observation sequence by calling optics_propagate in time series
is the time loop wrapper for optics_propagate
this is where the observation sequence is generated (timeseries of observations by the detector)
thus, where the detector observes the wavefront created by optics_propagate (for MKIDs, the probability distribution)
:param inqueue: time index for parallelization (used by multiprocess)
:param photon_table_queue: photon table (list of photon packets) in the multiprocessing format
:param spectralcube_queue: series of intensity images (spectral image cube) in the multiprocessing format
:param xxx_todo_changeme:
:return:
"""
# TODO change this name
(tp,ap,sp,iop,cp,mp) = conf_obj_tup
try:
if tp.detector == 'MKIDs':
with open(iop.device_params, 'rb') as handle:
dp = pickle.load(handle)
start = time.time()
for it, t in enumerate(iter(inqueue.get, sentinel)):
kwargs = {'iter': t, 'params': [ap, tp, iop, sp]}
_, save_E_fields = prop_run('medis.Telescope.optics_propagate', 1, ap.grid_size, PASSVALUE=kwargs,
VERBOSE=False, PHASE_OFFSET=1)
print(save_E_fields.shape)
spectralcube = np.sum(np.abs(save_E_fields[-1, :, :]) ** 2, axis=1)
if tp.detector == 'ideal':
image = np.sum(spectralcube, axis=0)
vmin = np.min(spectralcube)*10
# cube = ideal.assign_calibtime(spectralcube,PASSVALUE['iter'])
# cube = rawImageIO.arange_into_cube(packets, value='phase')
# rawImageIO.make_phase_map(cube, plot=True)
# return ''
elif tp.detector == 'MKIDs':
packets = read.get_packets(spectralcube, t, dp, mp)
# packets = read.get_packets(save_E_fields, t, dp, mp)
# if sp.show_wframe or sp.show_cube or sp.return_spectralcube:
cube = pipe.arange_into_cube(packets, (mp.array_size[0], mp.array_size[1]))
if mp.remove_close:
timecube = read.remove_close_photons(cube)
if sp.show_wframe:
image = pipe.make_intensity_map(cube, (mp.array_size[0], mp.array_size[1]))
# Interpolating spectral cube from ap.nwsamp discreet wavelengths
# if sp.show_cube or sp.return_spectralcube:
spectralcube = pipe.make_datacube(cube, (mp.array_size[0], mp.array_size[1], ap.w_bins))
if sp.save_obs:
command = read.get_obs_command(packets,t)
photon_table_queue.put(command)
vmin = 0.9
if sp.show_wframe:
dprint((sp.show_wframe, sp.show_wframe == 'continuous'))
quicklook_im(image, logAmp=True, show=sp.show_wframe, vmin=vmin)
if sp.show_cube:
view_datacube(spectralcube, logAmp=True, vmin=vmin)
if sp.use_gui:
gui_images = np.zeros_like(save_E_fields, dtype=np.float)
phase_ind = sp.gui_map_type == 'phase'
amp_ind = sp.gui_map_type == 'amp'
gui_images[phase_ind] = np.angle(save_E_fields[phase_ind], deg=False)
gui_images[amp_ind] = np.absolute(save_E_fields[amp_ind])
outqueue.put((t, gui_images, spectralcube))
elif sp.return_E:
outqueue.put((t, save_E_fields))
else:
outqueue.put((t, spectralcube))
now = time.time()
elapsed = float(now - start) / 60.
each_iter = float(elapsed) / (it + 1)
print('***********************************')
dprint(f'{elapsed:.2f} minutes elapsed, each time step took {each_iter:.2f} minutes') #* ap.numframes/sp.num_processes TODO change to log #
except Exception as e:
traceback.print_exc()
# raise e
pass
def wait_until(somepredicate, timeout, period=0.25, *args, **kwargs):
mustend = time.time() + timeout
while time.time() < mustend:
if somepredicate(*args, **kwargs): return True
time.sleep(period)
return False
def run_medis(EfieldsThread=None, plot=False):
"""
main script to organize calls to various aspects of the simulation
initialize different sub-processes, such as atmosphere and aberration maps, MKID device parameters
sets up the multiprocessing features
returns the observation sequence created by gen_timeseries
:return: obs_sequence
"""
# Printing Params
dprint("Checking Params Info-print params from here (turn on/off)")
# TODO change this to a logging function
# for param in [ap, cp, tp, mp, sp, iop]:
# print('\n', param)
# pprint(param.__dict__)
iop.makedir() # make the directories at this point in case the user doesn't want to keep changing params.py
check = read.check_exists_obs_sequence(plot)
if check:
if iop.obs_seq[-3:] == '.h5':
obs_sequence = read.open_obs_sequence_hdf5(iop.obs_seq)
else:
obs_sequence = read.open_obs_sequence(iop.obs_seq)
return obs_sequence
begin = time.time()
print('Creating New MEDIS Simulation')
print('********** Taking Obs Data ***********')
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
# initialize atmosphere
print("Atmosdir = %s " % iop.atmosdir)
if tp.use_atmos and glob.glob(iop.atmosdir + '/*.fits') == []:
atmos.generate_maps()
# initialize telescope
if (tp.aber_params['QuasiStatic'] is True) and glob.glob(iop.aberdir + 'quasi/*.fits') == []:
aber.generate_maps(tp.f_lens)
if tp.aber_params['NCPA']:
aber.generate_maps(tp.f_lens, 'NCPA', 'lens')
# if tp.servo_error:
# aber.createObjMapsEmpty()
aber.initialize_CPA_meas()
if tp.active_null:
aber.initialize_NCPA_meas()
if sp.save_locs is None:
sp.save_locs = []
if 'detector' not in sp.save_locs:
sp.save_locs = np.append(sp.save_locs, 'detector')
sp.gui_map_type = np.append(sp.gui_map_type, 'amp')
# initialize MKIDs
if tp.detector == 'MKIDs' and not os.path.isfile(iop.device_params):
MKIDs.initialize()
photon_table_queue = multiprocessing.Queue()
inqueue = multiprocessing.Queue()
outqueue = multiprocessing.Queue()
jobs = []
if sp.save_obs and tp.detector == 'MKIDs':
proc = multiprocessing.Process(target=read.handle_output, args=(photon_table_queue, iop.obsfile))
proc.start()
if ap.companion is False:
ap.contrast = []
if tp.detector == 'MKIDs':
obs_sequence = np.zeros((ap.numframes, ap.w_bins, mp.array_size[1], mp.array_size[0]))
else:
obs_sequence = np.zeros((ap.numframes, ap.w_bins, ap.grid_size, ap.grid_size))
if sp.return_E:
e_fields_sequence = np.zeros((ap.numframes, len(sp.save_locs),
ap.nwsamp, 1 + len(ap.contrast),
ap.grid_size, ap.grid_size), dtype=np.complex64)
else:
e_fields_sequence = None
# Sending Queues to gen_timeseries
for i in range(sp.num_processes):
p = multiprocessing.Process(target=gen_timeseries, args=(inqueue, photon_table_queue, outqueue, (tp,ap,sp,iop,cp,mp)))
jobs.append(p)
p.start()
if tp.quick_ao:
for t in range(ap.startframe, ap.startframe + ap.numframes):
inqueue.put(t)
if sp.use_gui:
it, gui_images, spectralcube = outqueue.get()
while sp.play_gui is False:
time.sleep(0.005)
EfieldsThread.newSample.emit(gui_images)
EfieldsThread.sct.newSample.emit((it, spectralcube))
else:
dprint('If the code has hung here it probably means it cant read the CPA file at some iter')
for t in range(ap.startframe, ap.startframe+ap.numframes):
# time.sleep(rollout[t])
print(t)
if not tp.active_null:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
# print t, iter, 't, iter'
print(iters, 'iters')
while iters[0] + ap.startframe < t:
time.sleep(0.1)
print('looping', t)
try:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
iter = iters[0]
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
else:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
while iter < t:
time.sleep(0.1)
try:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
# if t in delay_inds:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# print iter, t
# while iter != t:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# # wait_until()
inqueue.put(t)
for i in range(sp.num_processes):
# Send the sentinal to tell Simulation to end
inqueue.put(sentinel)
for t in range(ap.numframes):
if sp.return_E:
t, save_E_fields = outqueue.get()
e_fields_sequence[t - ap.startframe] = save_E_fields
else:
t, spectralcube = outqueue.get()
obs_sequence[t - ap.startframe] = spectralcube # should be in the right order now because of the identifier
# for i, p in enumerate(jobs):
# p.join()
photon_table_queue.put(None)
outqueue.put(None)
if sp.save_obs and tp.detector == 'MKIDs':
proc.join()
obs_sequence = np.array(obs_sequence)
print('MEDIS Data Run Completed')
finish = time.time()
if sp.timing is True:
print(f'Time elapsed: {(finish-begin)/60:.2f} minutes')
print('**************************************')
print(f"Shape of obs_sequence = {np.shape(obs_sequence)}")
if tp.detector == 'H2RG':
obs_sequence = H2RG.scale_to_luminos(obs_sequence)
if tp.detector == 'H2RG' and hp.use_readnoise:
obs_sequence = H2RG.add_readnoise(obs_sequence, hp.readnoise)
if sp.return_E:
read.save_fields(e_fields_sequence, fields_file=iop.fields)
return e_fields_sequence
else:
dprint("Saving obs_sequence as hdf5 file:")
read.save_obs_sequence(obs_sequence, obs_seq_file=iop.obs_seq)
return obs_sequence
if __name__ == '__main__':
sp.timing = True
run_medis()
| 12,173 | 3,956 |
#3.3
user_ineteger_1 = int(input("Enter an integer: "))
user_integer_2 = int(input("Enter an other integer or the same as the first one: "))
def sameOrNot(user_ineteger_1, user_integer_2):
if (user_ineteger_1 ^ user_integer_2):
print("Not Same")
else:
print("Same")
sameOrNot(user_ineteger_1, user_integer_2) | 332 | 124 |
from tests.fixtures.regressors.simple_gaussian_mlp_regressor import (
SimpleGaussianMLPRegressor)
__all__ = ['SimpleGaussianMLPRegressor']
| 144 | 53 |
# Copyright 2021, Crepaldi Michele.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor Antonio Lioy and engineer Andrea Atzeni and with the support of engineer Andrea Marcelli.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import base64 # provides functions for encoding/decoding binary data to/from printable ASCII characters
import hashlib # implements a common interface to many different secure hash and message digest algorithms
import mlflow # open source platform for managing the end-to-end machine learning lifecycle
from logzero import logger # robust and effective logging for Python
from mlflow.entities import RunStatus # status of a Run
from mlflow.tracking.fluent import _get_experiment_id # get current experiment id function
from mlflow.utils import mlflow_tags # mlflow tags
class Hash:
""" Simple wrapper around hashlib sha256 functions. """
def __init__(self):
""" Initialize hash class using hashlib sha256 implementation. """
# initialize sha256 hash object
self.m = hashlib.sha256()
def update(self,
w): # string to update hash value with
""" Update current hash value.
Args:
w: String to update hash value with
"""
# update current hash with w
self.m.update(w.encode('utf-8'))
def copy(self):
""" Return a copy of the Hash object
Returns:
Copy of the current Hash instance
"""
# instantiate new hash object
copy = Hash()
# copy current object sha256 into the new instance
copy.m = self.m.copy()
# return the new instance
return copy
def get_b64(self):
""" Get base64 encoding of the current hash value digest.
Returns:
Base64 encoding of the hash digest.
"""
# return base64 encoded (url safe) hash digest
return base64.urlsafe_b64encode(self.m.digest()).decode('utf-8')
def _already_ran(entry_point_name, # entry point name of the run
parameters, # parameters of the run
git_commit, # git version of the code run
config_sha, # sha256 of config file
ignore_git=False, # whether to ignore git version or not (default: False)
experiment_id=None, # experiment id (default: None)
resume=False): # whether to resume a failed/killed previous run or not (default: False)
""" Best-effort detection of if a run with the given entrypoint name, parameters, and experiment id already ran.
The run must have completed successfully and have at least the parameters provided.
Args:
entry_point_name: Entry point name of the run
parameters: Parameters of the run
git_commit: Git version of the code run
config_sha: Sha256 of config file
ignore_git: Whether to ignore git version or not (default: False)
experiment_id: Experiment id (default: None)
resume: Whether to resume a failed/killed previous run (only for training) or not (default: False)
Returns:
Previously executed run if found, None otherwise.
"""
# if experiment ID is not provided retrieve current experiment ID
experiment_id = experiment_id if experiment_id is not None else _get_experiment_id()
# instantiate MLflowClient (creates and manages experiments and runs)
client = mlflow.tracking.MlflowClient()
# get reversed list of run information (from last to first)
all_run_infos = reversed(client.list_run_infos(experiment_id))
run_to_resume_id = None
# for all runs info
for run_info in all_run_infos:
# fetch run from backend store
full_run = client.get_run(run_info.run_id)
# get run dictionary of tags
tags = full_run.data.tags
# if there is no entry point, or the entry point for the run is different from 'entry_point_name', continue
if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name:
continue
# initialize 'match_failed' bool to false
match_failed = False
# for each parameter in the provided run parameters
for param_key, param_value in parameters.items():
# get run param value from the run dictionary of parameters
run_value = full_run.data.params.get(param_key)
# if the current parameter value is different from the run parameter set 'match_failed' to true and break
if str(run_value) != str(param_value):
match_failed = True
break
# if the current run is not the one we are searching for go to the next one
if match_failed:
continue
# get previous run git commit version
previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None)
# if the previous version is different from the current one, go to the next one
if not ignore_git and git_commit != previous_version:
logger.warning("Run matched, but has a different source version, so skipping (found={}, expected={})"
.format(previous_version, git_commit))
continue
# get config file sha256 from the run
run_config_sha = full_run.data.params.get('config_sha')
# if the config file sha256 for the run is different from the current sha, go to the next one
if str(run_config_sha) != str(config_sha):
logger.warning("Run matched, but config is different.")
continue
# if the run is not finished
if run_info.to_proto().status != RunStatus.FINISHED:
if resume:
# if resume is enabled, set current run to resume id -> if no newer completed run is found,
# this stopped run will be resumed
run_to_resume_id = run_info.run_id
continue
else: # otherwise skip it and try with the next one
logger.warning("Run matched, but is not FINISHED, so skipping " "(run_id={}, status={})"
.format(run_info.run_id, run_info.status))
continue
# otherwise (if the run was found and it is exactly the same), return the found run
return client.get_run(run_info.run_id)
# if no previously executed (and finished) run was found but a stopped run was found, resume such run
if run_to_resume_id is not None:
logger.info("Resuming run with entrypoint=%s and parameters=%s" % (entry_point_name, parameters))
# update new run parameters with the stopped run id
parameters.update({
'run_id': run_to_resume_id
})
# submit new run that will resume the previously interrupted one
submitted_run = mlflow.run(".", entry_point_name, parameters=parameters)
# log config file sha256 as parameter in the submitted run
client.log_param(submitted_run.run_id, 'config_sha', config_sha)
# return submitted (new) run
return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
# if the searched run was not found return 'None'
logger.warning("No matching run has been found.")
return None
def run(entrypoint, # entrypoint of the run
parameters, # parameters of the run
config_sha): # sha256 of config file
""" Launch run.
Args:
entrypoint: Entrypoint of the run
parameters: Parameters of the run
config_sha: Sha256 of config file
Returns:
Launched run.
"""
# get mlflow tracking client
client = mlflow.tracking.MlflowClient()
logger.info("Launching new run for entrypoint={} and parameters={}".format(entrypoint, parameters))
# submit (start) run
submitted_run = mlflow.run(".", entrypoint, parameters=parameters)
# log config file sha256 as parameter in the submitted run
client.log_param(submitted_run.run_id, 'config_sha', config_sha)
# return run
return client.get_run(submitted_run.run_id)
def get_or_run(entrypoint, # entrypoint of the run
parameters, # parameters of the run
git_commit, # git version of the run
config_sha, # sha256 of config file
ignore_git=False, # whether to ignore git version or not (default: False)
use_cache=True, # whether to cache previous runs or not (default: True)
resume=False): # whether to resume a failed/killed previous run or not (default: False)
""" Get previously executed run, if it exists, or launch run.
Args:
entrypoint: Entrypoint of the run
parameters: Parameters of the run
git_commit: Git version of the run
config_sha: Sha256 of config file
ignore_git: Whether to ignore git version or not (default: False)
use_cache: Whether to cache previous runs or not (default: True)
resume: Whether to resume a failed/killed previous run or not (default: False)
Returns:
Found or launched run.
"""
# get already executed run, if it exists
existing_run = _already_ran(entrypoint, parameters, git_commit,
ignore_git=ignore_git, resume=resume, config_sha=config_sha)
# if we want to cache previous runs and we found a previously executed run, return found run
if use_cache and existing_run:
logger.info("Found existing run for entrypoint={} and parameters={}".format(entrypoint, parameters))
return existing_run
# otherwise, start run and return it
return run(entrypoint=entrypoint, parameters=parameters, config_sha=config_sha)
| 10,286 | 2,779 |
'''Tests for the data module.'''
import pytest
import numpy as np
import torch
from torch.utils.data import TensorDataset
from torchutils.data import mean_std_over_dataset, image2tensor, tensor2image
@pytest.mark.parametrize('no_samples', [100, 1000])
@pytest.mark.parametrize('feature_shape', [(), (1,), (10,), (10,10)])
def test_mean_std_over_dataset(no_samples, feature_shape):
'''Test correctness of evaluating the mean and standard deviation.'''
torch.manual_seed(0)
X = torch.randn(no_samples, *feature_shape)
y = torch.randint(2, size=(no_samples,))
data_set = TensorDataset(X, y)
mean, std = mean_std_over_dataset(data_set)
ref_mean = X.numpy().mean()
ref_std = X.numpy().std()
assert np.isclose(mean, ref_mean, rtol=1e-02, atol=1e-03)
assert np.isclose(std, ref_std, rtol=1e-02, atol=1e-03)
@pytest.mark.parametrize('shape', [(10,10), (10,10,3), (1,10,10,3)])
def test_image2tensor2image(shape):
'''Test the transformation and back-transformation of an image.'''
np.random.seed(0)
image = np.random.randn(*shape)
tensor = image2tensor(image)
new_image = tensor2image(tensor)
assert np.allclose(image.squeeze(), new_image.squeeze())
@pytest.mark.parametrize('shape', [(10,10), (3,10,10), (1,3,10,10)])
def test_tensor2image2tensor(shape):
'''Test the transformation and back-transformation of a tensor.'''
torch.manual_seed(0)
tensor = torch.randn(*shape)
image = tensor2image(tensor)
new_tensor = image2tensor(image)
assert np.allclose(tensor.squeeze(), new_tensor.squeeze())
| 1,578 | 610 |
from __future__ import absolute_import
import importlib
import logging
import uuid
from contextlib import contextmanager
import fasteners
import json
import os
import shutil
import tarfile
import glob
import sys
import time
from oasislmf.model_execution.bin import prepare_model_run_directory, prepare_model_run_inputs
from oasislmf.model_execution import runner
from oasislmf.utils import status
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.log import oasis_log
from pathlib2 import Path
from celery import Celery
from celery.task import task
from ..utils.path import setcwd
from ..conf.settings import settings
from ..conf import celery as celery_conf
'''
Celery task wrapper for Oasis ktools calculation.
'''
ARCHIVE_FILE_SUFFIX = '.tar'
CELERY = Celery()
CELERY.config_from_object(celery_conf)
logging.info("Started worker")
logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY')))
logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY')))
logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY')))
logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY')))
logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT')))
logging.info("KTOOLS_ALLOC_RULE: {}".format(settings.get('worker', 'KTOOLS_ALLOC_RULE')))
logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT')))
logging.info("LOCK_TIMEOUT_IN_SECS: {}".format(settings.get('worker', 'LOCK_TIMEOUT_IN_SECS')))
logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format(settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS')))
logging.info("POST_ANALYSIS_SLEEP_IN_SECS: {}".format(settings.get('worker', 'POST_ANALYSIS_SLEEP_IN_SECS')))
class MissingInputsException(OasisException):
def __init__(self, input_archive):
super(MissingInputsException, self).__init__('Inputs location not found: {}'.format(input_archive))
class InvalidInputsException(OasisException):
def __init__(self, input_archive):
super(InvalidInputsException, self).__init__('Inputs location not a tarfile: {}'.format(input_archive))
class MissingModelDataException(OasisException):
def __init__(self, model_data_path):
super(MissingModelDataException, self).__init__('Model data not found: {}'.format(model_data_path))
@contextmanager
def get_lock():
lock = fasteners.InterProcessLock(settings.get('worker', 'LOCK_FILE'))
gotten = lock.acquire(blocking=True, timeout=settings.getfloat('worker', 'LOCK_TIMEOUT_IN_SECS'))
yield gotten
if gotten:
lock.release()
@task(name='run_analysis', bind=True)
def start_analysis_task(self, input_location, analysis_settings_json):
'''
Task wrapper for running an analysis.
Args:
analysis_profile_json (string): The analysis settings.
Returns:
(string) The location of the outputs.
'''
logging.info("LOCK_FILE: {}".format(settings.get('worker', 'LOCK_FILE')))
logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format(
settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS')))
with get_lock() as gotten:
if not gotten:
logging.info("Failed to get resource lock - retry task")
# max_retries=None is supposed to be unlimited but doesn't seem to work
# Set instead to a large number
raise self.retry(
max_retries=9999999,
countdown=settings.getint('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS'))
logging.info("Acquired resource lock")
try:
logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY')))
logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY')))
logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY')))
logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY')))
logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT')))
logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT')))
self.update_state(state=status.STATUS_RUNNING)
output_location = start_analysis(analysis_settings_json[0], input_location)
except Exception:
logging.exception("Model execution task failed.")
raise
time.sleep(settings.getint('worker', 'POST_ANALYSIS_SLEEP_IN_SECS'))
return output_location
@oasis_log()
def start_analysis(analysis_settings, input_location):
'''
Run an analysis.
Args:
analysis_profile_json (string): The analysis settings.
Returns:
(string) The location of the outputs.
'''
# Check that the input archive exists and is valid
input_archive = os.path.join(
settings.get('worker', 'INPUTS_DATA_DIRECTORY'),
input_location + ARCHIVE_FILE_SUFFIX
)
if not os.path.exists(input_archive):
raise MissingInputsException(input_archive)
if not tarfile.is_tarfile(input_archive):
raise InvalidInputsException(input_archive)
source_tag = analysis_settings['analysis_settings']['source_tag']
analysis_tag = analysis_settings['analysis_settings']['analysis_tag']
logging.info(
"Source tag = {}; Analysis tag: {}".format(analysis_tag, source_tag)
)
module_supplier_id = analysis_settings['analysis_settings']['module_supplier_id']
model_version_id = analysis_settings['analysis_settings']['model_version_id']
logging.info(
"Model supplier - version = {} {}".format(module_supplier_id, model_version_id)
)
# Get the supplier module and call it
use_default_model_runner = not Path(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY'), module_supplier_id).exists()
model_data_path = os.path.join(
settings.get('worker', 'MODEL_DATA_DIRECTORY'),
module_supplier_id,
model_version_id
)
if not os.path.exists(model_data_path):
raise MissingModelDataException(model_data_path)
logging.info("Setting up analysis working directory")
directory_name = "{}_{}_{}".format(source_tag, analysis_tag, uuid.uuid4().hex)
working_directory = os.path.join(settings.get('worker', 'WORKING_DIRECTORY'), directory_name)
if 'ri_output' in analysis_settings['analysis_settings'].keys():
ri = analysis_settings['analysis_settings']['ri_output']
else:
ri = False
prepare_model_run_directory(working_directory, ri=ri, model_data_src_path=model_data_path, inputs_archive=input_archive)
prepare_model_run_inputs(analysis_settings['analysis_settings'], working_directory, ri=ri)
with setcwd(working_directory):
logging.info("Working directory = {}".format(working_directory))
# Persist the analysis_settings
with open("analysis_settings.json", "w") as json_file:
json.dump(analysis_settings, json_file)
if use_default_model_runner:
model_runner_module = runner
else:
sys.path.append(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY'))
model_runner_module = importlib.import_module('{}.supplier_model_runner'.format(module_supplier_id))
##! to add check that RI directories take the form of RI_{ID} amd ID is a monotonic index
num_reinsurance_iterations = len(glob.glob('RI_[0-9]*'))
model_runner_module.run(
analysis_settings['analysis_settings'],
settings.getint('worker', 'KTOOLS_BATCH_COUNT'),
num_reinsurance_iterations=num_reinsurance_iterations,
ktools_mem_limit=settings.getboolean('worker', 'KTOOLS_MEMORY_LIMIT'),
set_alloc_rule=settings.getint('worker', 'KTOOLS_ALLOC_RULE'),
fifo_tmp_dir=False
)
output_location = uuid.uuid4().hex
output_filepath = os.path.join(
settings.get('worker', 'OUTPUTS_DATA_DIRECTORY'), output_location + ARCHIVE_FILE_SUFFIX)
output_directory = os.path.join(working_directory, "output")
with tarfile.open(output_filepath, "w:gz") as tar:
tar.add(output_directory, arcname="output")
if settings.getboolean('worker', 'DO_CLEAR_WORKING'):
shutil.rmtree(working_directory, ignore_errors=True)
logging.info("Output location = {}".format(output_location))
return output_location
| 8,593 | 2,732 |
import time
# Importo el modulo sys y aumento el limite de recursión, ya que viene predefinido con 1000
import sys
sys.setrecursionlimit(1000000) # 1 000 000
def factorial_iterativo(n):
respuesta = 1
while n > 1:
respuesta *= n
n -= 1
return respuesta
def factorial_recursivo(n):
if n == 1:
return 1
return n * factorial_iterativo(n - 1)
if __name__ == '__main__':
n = 10000000
print('Complejidad temporal de un algoritmo ITERATIVO. Factorial')
comienzo = time.time()
factorial_iterativo(n)
final = time.time()
tiempo_iterativo = final - comienzo
print(tiempo_iterativo)
print('--------------------')
print('Complejidad temporal de un algoritmo RECURSIVO. Factorial')
comienzo = time.time()
factorial_recursivo(n)
final = time.time()
tiempo_recursivo = final - comienzo
print(tiempo_recursivo)
print('-------------------')
diferencia = abs(tiempo_iterativo - tiempo_recursivo)
print(f'La diferencia de tiempo entre un algoritmo y otro es {diferencia}')
| 1,088 | 404 |
import smtplib
from email.header import Header
from email.mime.text import MIMEText
class EmailSender:
def __init__(self) -> None:
self.receiver = "kongandmarx@163.com"
self.sender = "kongandmarx@163.com"
self.smtp_obj = smtplib.SMTP_SSL("smtp.163.com", port=994)
# self.smtp_obj.connect("smtp.163.com", 25)
self.smtp_obj.login("kongandmarx@163.com", "YVLZXZWJBYAHLCAJ")
def send(self, to, subject, text):
t = f"""
<h1> {text} </h1>
"""
message = MIMEText(t, "html")
message["Subject"] = Header(subject)
message["From"] = Header(f"{self.sender}")
message["To"] = Header(f"{to}")
self.smtp_obj.sendmail(self.sender, self.receiver, message.as_string()) | 765 | 295 |
# -*- coding: utf-8 -*-
from biothings.www.settings.default import *
from www.api.query_builder import ESQueryBuilder
from www.api.query import ESQuery
from www.api.transform import ESResultTransformer
from www.api.handlers import GeneHandler, QueryHandler, MetadataHandler, StatusHandler, TaxonHandler, DemoHandler
# *****************************************************************************
# Elasticsearch variables
# *****************************************************************************
# elasticsearch server transport url
ES_HOST = 'localhost:9200'
# elasticsearch index name
ES_INDEX = 'mygene_current'
# elasticsearch document type
ES_DOC_TYPE = 'gene'
API_VERSION = 'v3'
HOST_ENVAR_NAME = "MG_HOST"
# *****************************************************************************
# App URL Patterns
# *****************************************************************************
APP_LIST = [
(r"/status", StatusHandler),
(r"/metadata/?", MetadataHandler),
(r"/metadata/fields/?", MetadataHandler),
(r"/demo/?$", DemoHandler),
(r"/{}/species/(\d+)/?".format(API_VERSION), TaxonHandler),
(r"/{}/taxon/(\d+)/?".format(API_VERSION), TaxonHandler),
(r"/{}/gene/(.+)/?".format(API_VERSION), GeneHandler),
(r"/{}/gene/?$".format(API_VERSION), GeneHandler),
(r"/{}/query/?".format(API_VERSION), QueryHandler),
(r"/{}/metadata/?".format(API_VERSION), MetadataHandler),
(r"/{}/metadata/fields/?".format(API_VERSION), MetadataHandler),
]
###############################################################################
# app-specific query builder, query, and result transformer classes
###############################################################################
# *****************************************************************************
# Subclass of biothings.www.api.es.query_builder.ESQueryBuilder to build
# queries for this app
# *****************************************************************************
ES_QUERY_BUILDER = ESQueryBuilder
# *****************************************************************************
# Subclass of biothings.www.api.es.query.ESQuery to execute queries for this app
# *****************************************************************************
ES_QUERY = ESQuery
# *****************************************************************************
# Subclass of biothings.www.api.es.transform.ESResultTransformer to transform
# ES results for this app
# *****************************************************************************
ES_RESULT_TRANSFORMER = ESResultTransformer
GA_ACTION_QUERY_GET = 'query_get'
GA_ACTION_QUERY_POST = 'query_post'
GA_ACTION_ANNOTATION_GET = 'gene_get'
GA_ACTION_ANNOTATION_POST = 'gene_post'
GA_TRACKER_URL = 'MyGene.info'
STATUS_CHECK_ID = '1017'
JSONLD_CONTEXT_PATH = 'www/context/context.json'
# MYGENE THINGS
# This essentially bypasses the es.get fallback as in myvariant...
# The first regex matched integers, in which case the query becomes against entrezgeneall annotation queries are now multimatch
# against the following fields
ANNOTATION_ID_REGEX_LIST = [(re.compile(r'^\d+$'), ['entrezgene', 'retired']),
(re.compile(r'.*'), ['ensembl.gene'])]
DEFAULT_FIELDS = ['name', 'symbol', 'taxid', 'entrezgene']
TAXONOMY = {
"human": {"tax_id": "9606", "assembly": "hg38"},
"mouse": {"tax_id": "10090", "assembly": "mm10"},
"rat": {"tax_id": "10116", "assembly": "rn4"},
"fruitfly": {"tax_id": "7227", "assembly": "dm3"},
"nematode": {"tax_id": "6239", "assembly": "ce10"},
"zebrafish": {"tax_id": "7955", "assembly": "zv9"},
"thale-cress": {"tax_id": "3702"},
"frog": {"tax_id": "8364", "assembly": "xenTro3"},
"pig": {"tax_id": "9823", "assembly": "susScr2"}
}
DATASOURCE_TRANSLATIONS = {
"refseq:": r"refseq.\\\*:",
"accession:": r"accession.\\\*:",
"reporter:": r"reporter.\\\*:",
"interpro:": r"interpro.\\\*:",
# GO:xxxxx looks like a ES raw query, so just look for
# the term as a string in GO's ID (note: searching every keys
# will raise an error because pubmed key is a int and we're
# searching with a string term.
"GO:": r"go.\\\*.id:go\\\:",
#"GO:": r"go.\\\*:go.",
"homologene:": r"homologene.\\\*:",
"reagent:": r"reagent.\\\*:",
"uniprot:": r"uniprot.\\\*:",
"ensemblgene:": "ensembl.gene:",
"ensembltranscript:": "ensembl.transcript:",
"ensemblprotein:": "ensembl.protein:",
# some specific datasources needs to be case-insentive
"hgnc:": r"HGNC:",
"hprd:": r"HPRD:",
"mim:": r"MIM:",
"mgi:": r"MGI:",
"ratmap:": r"RATMAP:",
"rgd:": r"RGD:",
"flybase:": r"FLYBASE:",
"wormbase:": r"WormBase:",
"tair:": r"TAIR:",
"zfin:": r"ZFIN:",
"xenbase:": r"Xenbase:",
"mirbase:": r"miRBase:",
}
SPECIES_TYPEDEF = {'species': {'type': list, 'default': ['all'], 'max': 10,
'translations': [(re.compile(pattern, re.I), translation['tax_id']) for (pattern, translation) in TAXONOMY.items()]}}
# For datasource translations
DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(pattern, re.I), translation) for
(pattern, translation) in DATASOURCE_TRANSLATIONS.items()]
TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(re.sub(r':.*', '', pattern).replace('\\', ''), re.I),
re.sub(r':.*', '', translation).replace('\\','')) for (pattern, translation) in DATASOURCE_TRANSLATIONS.items()]
# Kwarg control update for mygene specific kwargs
# ES KWARGS (_source, scopes,
ANNOTATION_GET_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS,
'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
ANNOTATION_POST_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS,
'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
QUERY_GET_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
QUERY_POST_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
# Control KWARGS
QUERY_GET_CONTROL_KWARGS['q'].update({'translations': DATASOURCE_TRANSLATION_TYPEDEF})
# query builder KWARGS
ANNOTATION_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF)
ANNOTATION_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_POST_ESQB_KWARGS['scopes'].update({'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
| 6,590 | 2,292 |
#!/usr/bin/env python
import os
from pathlib import Path
home = str(Path.home())
import datetime
now = datetime.datetime.now()
def main():
print("Please enter the following questions to generate your new Pip!")
name = input("Pip name: ")
description = input("Description: ")
author_name = input("Author name: ")
author_website = input("Author website: ")
author_email = input("Author email: ")
git_repo = input("Git repository: ")
input_script = input("Enter the absolute path to a python script (code should be wrapped in a `def main():` function): ")
pypi_username = input("Pypi username: ")
if name=="" or name==None:
return
os.system(f"mkdir -p {name}/{name}")
f = open(f"{name}/{name}/__init__.py", "w")
f.write("")
f.close()
f = open(f"{name}/{name}/__main__.py", "w")
f.write(f"from .{name} import main\n")
f.write("main()")
f.close()
setup_file = f"""
import setuptools
setuptools.setup(
name='{name}',
version='0.1',
author="{author_name}",
author_email="{author_email}",
description="{description}",
long_description="{description}",
url="{git_repo}",
packages=["{name}"],
entry_points = {{
"console_scripts": ['{name} = {name}.{name}:main']
}},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
"""
f = open(f"{name}/setup.py", "w")
f.write(setup_file)
f.close()
licence_file = f"""
Copyright (c) {now.year} {author_name} {author_website}
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
f = open(f"{name}/LICENCE", "w")
f.write(licence_file)
f.close()
readme_file = f"""
# {name}
{description}
"""
f = open(f"{name}/README.md", "w")
f.write(readme_file)
f.close()
pypirc_file = f"""
[distutils]
index-servers=pypi
[pypi]
repository = https://upload.pypi.org/legacy/
username = {pypi_username}
"""
f = open(f"{home}/.pypirc", "w")
f.write(pypirc_file)
f.close()
f = open(input_script, "r")
script = f.read()
f.close()
if not script:
return None
f = open(f"{name}/{name}/{name}.py", "w")
f.write(script)
f.close()
os.chdir(name)
os.system("python -m pip install --upgrade pip setuptools wheel")
os.system("python -m pip install tqdm")
os.system("python -m pip install twine")
os.system("python setup.py bdist_wheel")
os.system("python -m twine upload dist/*")
if git_repo:
os.system("git init")
os.system(f"git add LICENCE README.md {name}/ setup.py")
os.system(f"git commit -m 'Pushing code for {name} version 0.1'")
os.system(f"git remote add origin {git_repo}")
os.system("git push -u origin master")
print("Makepip is complete!")
if __name__ == "__main__":
main()
| 3,785 | 1,363 |
def funny(x,y):
return x+y | 30 | 15 |
#!/usr/bin/env python
from oauth2client.service_account import ServiceAccountCredentials
from os import path
import os, requests, sys, argparse
SERVICE_ACCOUNT_KEY_FILE_NAME='SERVICE_ACCOUNT_KEY_FILE'
HANGOUTS_CHATS_API='https://chat.googleapis.com/v1'
GOOGLE_CHAT_SCOPE='https://www.googleapis.com/auth/chat.bot'
SPACES_KEY='spaces'
MEMBERS_KEY='memberships'
def login(session, service_account_key_file):
scopes = [GOOGLE_CHAT_SCOPE]
credentials = ServiceAccountCredentials.from_json_keyfile_name(service_account_key_file, scopes)
access_token = credentials.get_access_token()
auth_headers = {
'Authorization': 'Bearer ' + access_token.access_token
}
session.headers.update(auth_headers)
def get_spaces(session):
return handle_pagination_items(session, "{0}/spaces".format(HANGOUTS_CHATS_API), SPACES_KEY)
def get_members_in_space(session, space):
members = handle_pagination_items(session, "{0}/{1}/members".format(HANGOUTS_CHATS_API, space["name"]), MEMBERS_KEY)
human_members = []
for member in members:
if member["state"] == "JOINED" and member["member"]["type"] == "HUMAN":
human_members.append(member)
return human_members
def get_spaces_with_members(session):
spaces_with_members = {}
spaces = get_spaces(session)
for space in spaces:
if space["type"] == "ROOM":
val = {}
val["space"] = space
# Get members
members = get_members_in_space(session, space)
val["members"] = members
spaces_with_members[space["name"]] = val
return spaces_with_members
def handle_pagination_items(session, url, key, next_page_token=None):
params = {}
if next_page_token is not None and next_page_token != "":
params["pageToken"] = next_page_token
response = session.get(url, params=params)
response_json = response.json()
if "nextPageToken" in response_json and response_json["nextPageToken"] != "":
return response_json[key] + handle_pagination_items(session, url, key, response_json["nextPageToken"])
else:
return response_json[key]
def encode_text(text):
if text:
return text.encode("utf-8")
return text
parser = argparse.ArgumentParser(description='Gather Google Hangouts Statistics.')
parser.add_argument("-m","--show-members", help="Show members in each space")
args = parser.parse_args()
show_members = args.show_members
service_account_key_file = os.environ.get(SERVICE_ACCOUNT_KEY_FILE_NAME)
if not service_account_key_file:
print "Error: Service Account Key File Location is Required!"
sys.exit(1)
if not path.exists(service_account_key_file):
print "Error: Service Account Key File Does Not Exist!"
sys.exit(1)
session = requests.Session()
error = login(session, service_account_key_file)
if error is not None:
print error
sys.exit(1)
spaces_with_members = get_spaces_with_members(session)
print "=== Statistics for Google Hangouts Chat\n"
for key, value in spaces_with_members.iteritems():
print "- {0} - {1} Members".format(encode_text(value["space"]["displayName"]), len(value["members"]))
if show_members is not None:
for member in value["members"]:
print " - {0}".format(encode_text(member["member"]["displayName"])) | 3,355 | 1,071 |
# import libraries
import clr
import os
from os import listdir
import System
from System.IO import SearchOption
from System import Environment
# import pyrevit libraries
from pyrevit import forms
from pyrevit import revit,DB
# get document
doc = revit.doc
# try to open cache path
try:
curdoc = DB.Document.PathName.GetValue(doc)
curdir = curdoc.rsplit('\\',1)
os.startfile(curdir[0])
except:
try:
guid = doc.WorksharingCentralGUID
AppDataList = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData).split("\\")
AppDataList.pop(-1)
AppData = "\\".join(AppDataList)
location = AppData + "\\Local\\Autodesk\\Revit"
os.startfile(location)
except:
forms.alert('Cannot find the file. This may be because the document is not yet saved to a location, or the path is not accessible to this script for opening.', title='Script cancelled') | 923 | 268 |
#!/usr/bin/python
# # Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# author Alok Ranjan (alok.ranjan2@hpe.com)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
description: Manage the storage network configuration on the HPE Nimble Storage group.
module: hpe_nimble_network
options:
activate:
required: False
type: bool
description:
- Activate a network configuration.
array:
required: False
type: list
elements: dict
description:
- List of array network configs.
change_name:
required: False
type: str
description:
- Change name of the existing network config.
iscsi_automatic_connection_method:
required: False
type: bool
description:
- Whether automatic connection method is enabled. Enabling this means means redirecting connections from the specified iSCSI
discovery IP address to the best data IP address based on connection counts.
iscsi_connection_rebalancing:
required: False
type: bool
description:
- Whether rebalancing is enabled. Enabling this means rebalancing iSCSI connections by periodically breaking existing
connections that are out-of-balance, allowing the host to reconnect to a more appropriate data IP address.
ignore_validation_mask:
required: False
type: int
description:
- Indicates whether to ignore the validation.
mgmt_ip:
required: False
type: str
description:
- Management IP address for the Group. Four numbers in the range (0,255) separated by periods.
name:
required: True
type: str
choices:
- active
- backup
- draft
description:
- Name of the network configuration. Use the name 'draft' when creating a draft configuration.
secondary_mgmt_ip:
required: False
type: str
description:
- Secondary management IP address for the Group. Four numbers in the range [0,255] separated by periods.
subnet:
required: False
type: list
elements: dict
description:
- List of subnet configs.
route:
required: False
type: list
elements: dict
description:
- List of static routes.
state:
required: True
choices:
- create
- present
- absent
type: str
description:
- The network config operation.
validate:
required: False
type: bool
description:
- Validate a network configuration.
extends_documentation_fragment: hpe.nimble.hpe_nimble
short_description: Manage the HPE Nimble Storage network configuration.
version_added: "2.9.0"
'''
EXAMPLES = r'''
# if state is create, then create network config, fails if it exist or cannot create
# if state is present, then create network config if not present ,else success
- name: Create network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
route: "{{ route }}"
subnet: "{{ subnet }}"
array: "{{ array }}"
iscsi_automatic_connection_method: true
iscsi_connection_rebalancing: False
mgmt_ip: "{{ mgmt_ip }}"
state: "{{ state | default('present') }}"
- name: Delete network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "absent"
- name: Validate network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
validate: true
- name: Activate Network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
activate: true
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
try:
from nimbleclient.v1 import client
except ImportError:
client = None
import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
def create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
**kwargs):
if utils.is_null_or_empty(name):
return (False, False, "Create network config failed as name is not present.", {}, {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
params = utils.remove_null_args(**kwargs)
network_resp = client_obj.network_configs.create(name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' created successfully.", {}, network_resp.attrs)
else:
if state == "create":
return (False, False, f"Network config '{name}' cannot be created as it is already present in given state.", {}, network_resp.attrs)
# update case
kwargs['name'] = change_name
changed_attrs_dict, params = utils.remove_unchanged_or_null_args(network_resp, **kwargs)
# even though some of the attributes have not changed but it still has to be passed in case of update.
params = utils.remove_null_args(**kwargs)
if changed_attrs_dict.__len__() > 0:
network_resp = client_obj.network_configs.update(id=network_resp.attrs.get("id"),
name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' already present. Modified the following attributes '{changed_attrs_dict}'",
changed_attrs_dict, network_resp.attrs)
else:
return (True, False, f"Network config '{network_resp.attrs.get('name')}' already present in given state.", {}, network_resp.attrs)
except Exception as ex:
return (False, False, f"Network config creation failed |'{ex}'", {}, {})
def delete_network_config(
client_obj,
name):
if utils.is_null_or_empty(name):
return (False, False, "Delete network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be deleted as it is not present.", {})
client_obj.network_configs.delete(id=network_resp.attrs.get("id"))
return (True, True, f"Deleted network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Delete network config failed |'{ex}'", {})
def validate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Validate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be validated as it is not present.", {})
client_obj.network_configs.validate_netconfig(
id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, False, f"Validated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Validate Network config failed |'{ex}'", {})
def activate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Activate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be activated as it is not present.", {})
client_obj.network_configs.activate_netconfig(id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, True, f"Activated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Activate Network config failed |'{ex}'", {})
def main():
fields = {
"activate": {
"required": False,
"type": "bool",
"no_log": False
},
"array": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"change_name": {
"required": False,
"type": "str",
"no_log": False
},
"iscsi_automatic_connection_method": {
"required": False,
"type": "bool",
"no_log": False
},
"iscsi_connection_rebalancing": {
"required": False,
"type": "bool",
"no_log": False
},
"ignore_validation_mask": {
"required": False,
"type": "int",
"no_log": False
},
"mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"name": {
"required": True,
"choices": ['active',
'backup',
'draft'
],
"type": "str",
"no_log": False
},
"secondary_mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"subnet": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"route": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"state": {
"required": True,
"choices": ['create',
'present',
'absent'
],
"type": "str"
},
"validate": {
"required": False,
"type": "bool",
"no_log": False
}
}
default_fields = utils.basic_auth_arg_fields()
fields.update(default_fields)
required_if = [('state', 'create', ['array', 'iscsi_automatic_connection_method', 'iscsi_connection_rebalancing', 'mgmt_ip', 'subnet', 'route'])]
module = AnsibleModule(argument_spec=fields, required_if=required_if)
if client is None:
module.fail_json(msg='Python nimble-sdk could not be found.')
hostname = module.params["host"]
username = module.params["username"]
password = module.params["password"]
activate = module.params["activate"]
array = module.params["array"]
iscsi_automatic_connection_method = module.params["iscsi_automatic_connection_method"]
iscsi_connection_rebalancing = module.params["iscsi_connection_rebalancing"]
ignore_validation_mask = module.params["ignore_validation_mask"]
mgmt_ip = module.params["mgmt_ip"]
name = module.params["name"]
change_name = module.params["change_name"]
secondary_mgmt_ip = module.params["secondary_mgmt_ip"]
subnet = module.params["subnet"]
route = module.params["route"]
state = module.params["state"]
validate = module.params["validate"]
if (username is None or password is None or hostname is None):
module.fail_json(
msg="Missing variables: hostname, username and password is mandatory.")
# defaults
return_status = changed = False
msg = "No task to run."
resp = None
try:
client_obj = client.NimOSClient(
hostname,
username,
password
)
# States
if ((validate is None or validate is False)
and (activate is None or activate is False)
and (state == "create" or state == "present")):
# if not client_obj.network_configs.get(id=None, name=name) or state == "create":
return_status, changed, msg, changed_attrs_dict, resp = create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
array_list=array,
ignore_validation_mask=ignore_validation_mask,
secondary_mgmt_ip=secondary_mgmt_ip,
subnet_list=subnet,
route_list=route)
elif state == "absent":
return_status, changed, msg, changed_attrs_dict = delete_network_config(client_obj, name)
elif state == "present" and validate is True:
return_status, changed, msg, changed_attrs_dict = validate_network_config(client_obj, name, ignore_validation_mask)
elif state == "present" and activate is True:
return_status, changed, msg, changed_attrs_dict = activate_network_config(client_obj, name, ignore_validation_mask)
except Exception as ex:
# failed for some reason.
msg = str(ex)
if return_status:
if utils.is_null_or_empty(resp):
module.exit_json(return_status=return_status, changed=changed, msg=msg)
else:
module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
else:
module.fail_json(return_status=return_status, changed=changed, msg=msg)
if __name__ == '__main__':
main()
| 15,323 | 4,325 |
from requests_html import HTMLSession
import re
from .blog.models import (Article, Tag, Category)
class Spider:
def __init__(self):
self.sesion = HTMLSession()
def get_list(self):
url = 'http://python.jobbole.com/all-posts/'
resp = self.sesion.get(url)
if resp.status_code == 200:
links = re.findall('(http://python.jobbole.com/\d+/)', resp.text)
return set(links)
return
def get_detail(self, detail_url):
resp = self.sesion.get(detail_url)
if resp.status_code == 200:
# text = resp.text
return resp
def parser(self, resp):
#text = resp.html.find('.entry > p')
text = ''.join(list(map(lambda x: x.text, resp.html.find('div.entry p'))))
author = resp.html.find('div.entry div.copyright-area a', first=True).text
temp = resp.html.find('p.entry-meta-hide-on-mobile', first=True).text.strip().split('·')
createtime = temp[0]
category = temp[1]
tag = temp[-1]
# print(createtime)
# print(category)
# print(tag)
# print('================================================')
Article.objects.create(created_time=createtime, )
if __name__ == '__main__':
test = Spider()
links = test.get_list()
if links:
for i in links:
resp = test.get_detail(i)
text = test.parser(resp)
| 1,428 | 458 |
"""
自定义信号
"""
import django.dispatch
mysignal = django.dispatch.Signal(providing_args=["arg1","arg2"])
# 内置的信号会自动触发,自定义信号不可以。
| 131 | 71 |
"""
Date: Jul 2018
Author: Aciel Eshky
A script to create positive and negative samples using self-supervision.
"""
import os
import sys
import random
import pandas as pd
from numpy.random import seed as np_seed
from ustools.folder_utils import get_utterance_id, get_dir_info
from ultrasync.create_sync_samples_utils import create_samples, save_samples_to_disk
random.seed(2018)
np_seed(2018)
def mirror_folder_structure(input_path, output_path):
"""
Function to create a mirror of the input path folder structure in output path.
Adapted from https://stackoverflow.com/a/40829525/5190279
:param input_path:
:param output_path:
:return: a list of pairs of core dir which contains files, and corresponding generated dir
"""
folder_pairs = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirnames.sort()
if any(fname.endswith('.ult') for fname in filenames):
new_output_folder = os.path.join(output_path, dirpath[len(input_path):])
if not os.path.isdir(new_output_folder):
print("Creating folder: \t" + new_output_folder)
os.makedirs(new_output_folder)
else:
print("Folder already exits: \t" + new_output_folder)
if filenames: # return the dirs that contain files
folder_pairs.append([dirpath, new_output_folder])
return folder_pairs
def get_file_basenames(directory):
files = os.listdir(directory)
return set([f.split('.')[0] for f in files])
def create_sync_data(folder_pairs):
files_created = []
df = pd.DataFrame(folder_pairs, columns=("core", "generated"))
for index, row in df.iterrows(): # itertools.islice(df.iterrows(), 80):
print("Processing: ", row['core'], row['generated'])
core_dir = row['core']
target_dir = row['generated']
basenames = get_file_basenames(core_dir)
target_basenames = get_file_basenames(target_dir)
for b in basenames:
# if os.path.isfile(os.path.join(target_dir, b + '.npz')):
# print(os.path.join(target_dir, b + '.npz'), "already exists.")
if [b in i for i in target_basenames]:
print(b, "files already exist in target directory.")
elif "E" in b:
print("Skipping utterance of type \"non-speech\" (E):", os.path.join(target_dir, b))
else:
try:
info = get_dir_info(core_dir)
root_id = get_utterance_id(info['dataset'], info['speaker'], info['session'], b)
print(root_id)
samples = create_samples(core_dir, b)
chunk_names = save_samples_to_disk(samples, root_id, target_dir)
list.extend(files_created, chunk_names)
except:
print("Unexpected error:", sys.exc_info()[0])
print("not_processed: ", core_dir, b)
return files_created
def main():
ultrasuite = ["uxtd", "uxssd", "upx"]
# the location of the original ultrasuite data
input_path = sys.argv[1] # "/group/project/ultrax2020/UltraSuite/"
# the destination: where the sync dataset will be stored.
# This will consists of of samples, each corresponding to 200 ms of ultrasound and audio.
output_path = sys.argv[2] # "/disk/scratch_big/../SyncDataSmall"
for dataset in ultrasuite:
docs = os.path.join(output_path, "docs", dataset)
if not os.path.exists(docs):
os.makedirs(docs)
input_path_data = os.path.join(input_path, "core-" + dataset, "core/") # this slash is very important!
output_path_data = os.path.join(output_path, dataset)
print("processing", dataset,
"input directory is:", input_path_data,
"output directory is:", output_path_data)
# source and destination folder pairs.
# destination is created by mirror source.
folder_pairs = mirror_folder_structure(input_path_data, output_path_data)
# save the pairs for logging purposes
pd.DataFrame.to_csv(pd.DataFrame(columns={"source", "target"}, data=folder_pairs),
os.path.join(docs, "folder_pairs.csv"), index=False)
# create and save the data
file_names = create_sync_data(folder_pairs)
# save the sample file names for logging purposes
pd.DataFrame.to_csv(pd.DataFrame(columns={"file_names"}, data=file_names),
os.path.join(docs, "file_names.csv"), index=False)
if __name__ == "__main__":
main()
| 4,674 | 1,457 |
import cx_Freeze
import os
os.environ['TCL_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tcl8.6'
os.environ['TK_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tk8.6'
executables = [cx_Freeze.Executable('game.py')]
cx_Freeze.setup(
name = '64-bit Ninja',
version = '1.05',
author = 'Khanh H',
options = {'build_exe': {'packages': ['pygame'], 'include_files': ['icon.png', 'idle1.png']}},
executables = executables
) | 515 | 220 |
from wordcloud import WordCloud
import jieba
import matplotlib.pyplot as plt
xiaozhai = ['佛伦萨·古典火炉披萨', '蘑菇爱上饭', '珍味林饺子馆', '巷子火锅', '千家粗粮王',
'猫堂小站猫咪主题馆', 'CoCo都可', '福气焖锅烤肉', '5号酒馆', '82°C魔力焖锅',
'小肥羊', '长安大牌档之长安集市', '泰熙家', '大自在火锅', '拉菲达牛排自助',
'猫咪餐厅', '京御煌三汁焖锅', '赵家腊汁肉', '米多多烘焙屋', '瑞可爺爺的店',
'阿姨奶茶专卖', '百富烤霸', '三姊妹香辣土豆片夹馍', '小哥过桥米线',
'太食獸泰式茶餐厅', '和記丸子専買', '0057香辣虾',
'M12铁板餐厅', '重庆鸡公煲',
'洪氏嗨捞·新派猪肚鸡'
]
hangtiancheng = ['辣条印象', '福临北京烤鸭', '味都西饼店', '刘大饼香辣土豆片夹馍', '韩味坊牛排自助',
'星期八工坊', '红透天自助涮烤', '和福顺养生焖锅', '臻膳轩自助涮烤城',
'李想大虾火锅花园',
'欧味轩艺术蛋糕', '王府臻品火锅', '艾米客蛋糕', '红透天自助涮烤',
'川渝小渔哥', '面道'
]
xiaozhai_words = []
hangtiancheng_words = []
for word in xiaozhai:
xiaozhai_words.append(jieba.cut(word))
for word in hangtiancheng:
hangtiancheng_words.append(jieba.cut(word))
res_xiaozhai = ""
res_hangtiancheng = ""
for i in range(len(xiaozhai_words)):
res_xiaozhai += ("/" + "/".join(xiaozhai_words[i]))
for i in range(len(hangtiancheng_words)):
res_hangtiancheng += ("/" + "/".join(hangtiancheng_words[i]))
w1 = WordCloud(font_path="simsun.ttf", background_color='white')
w1.generate(res_xiaozhai)
w1.to_file('小寨附近餐饮店铺词云图.png')
w2 = WordCloud(font_path="simsun.ttf", background_color='white')
w2.generate(res_hangtiancheng)
w2.to_file("航天城附近餐饮店铺词云图.png")
| 1,475 | 1,069 |
import logging
import json
import warnings
import time
import datetime as dt
from ast import literal_eval as make_tuple
from flask import jsonify, abort, Response, request
from server import app, cln_client
from cloudant.error import CloudantException, ResultException
from cloudant.query import Query
import yfinance as yf
import numpy as np
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
import pandas as pd
def ticker_details(symbol, backwards):
reply = dict()
dbs = {
'ratings': 'esg-ratings-ibm-cfc',
'indicators': 'esg-indicators-ibm-cfc',
'details': 'ticker-details-ibm-cfc'
}
for aspect, db in dbs.items():
try:
conn = cln_client.create_database(db)
except CloudantException as e:
logging.critical(f'DB/{aspect} connection failure: {e}')
reply[aspect] = {}
else:
if conn.exists():
logging.info(f'Using existing {aspect} DB: {db}')
field = ('cfc_company' if aspect == 'details' else 'stock_symbol')
selector={field: symbol.upper()}
try:
resp = conn.get_query_result(selector,
raw_result=True,
limit=100)
time.sleep(0.075)
except ResultException as e:
logging.critical(f'Query/{aspect} failed: {e}')
reply[aspect] = {}
else:
if len(resp['docs']) == 0:
logging.warning(f'{aspect} not found for {symbol}')
reply[aspect] = {}
else:
result = list()
for doc in resp['docs']:
result.append(doc)
reply[aspect] = result
return reply
@app.route("/catchall/<string:symbol>")
def catchall(symbol):
"""catchall route"""
if symbol is None or symbol == '':
abort(Response(json.dumps({'Error': 'Invalid symbol provided'}), 400))
backwards = request.args.get('period', '1mo')
pd.set_option('display.max_rows', None)
warnings.filterwarnings("ignore")
yf.pdr_override()
num_of_years = 1
start = dt.date.today() - dt.timedelta(days = int(365.25*num_of_years))
end = dt.date.today()
tickers = si.tickers_dow()
tickers.append(symbol)
dataset = pdr.get_data_yahoo(tickers, start, end)['Adj Close']
stocks_returns = np.log(dataset/dataset.shift(1))
pairs_to_drop = set()
cols = stocks_returns.columns
for i in range(0, stocks_returns.shape[1]):
for j in range(0, i+1):
if i == j:
pairs_to_drop.add((cols[i], cols[j]))
au_corr = stocks_returns.corr().abs().unstack()
au_corr = au_corr.drop(labels=pairs_to_drop)
final = list()
for ticker in tickers:
top = dict()
top['target'] = ticker
top['correlation'] = dict()
for tpl, corr in json.loads(au_corr.to_json()).items():
pair = make_tuple(tpl)
if ticker.lower() == pair[0].lower():
top['correlation'].update({pair[1]: corr})
top['correlation'] = dict(sorted(top['correlation'].items(), key=lambda item: item[1], reverse=True))
final.append(top)
for item in final:
if item['target'].lower() == symbol.lower():
item.update(ticker_details(symbol, backwards))
item['correlations'] = list()
for corp, corr_value in item['correlation'].items():
corr_item = dict()
corr_item['symbol'] = corp
corr_item['value'] = corr_value
corr_item.update(ticker_details(corp, backwards))
item['correlations'].append(corr_item)
del item['correlation']
return jsonify(item)
break
else:
abort(Response(json.dumps({'Error': 'Stock/Correlation/ESG details not found for symbol'}), 400))
| 4,055 | 1,212 |
from setuptools import setup
setup(
name='pymodoro',
version='0.4',
packages=['pymodoro'],
package_data={'pymodoro': ['data/*']},
entry_points={
"console_scripts": [
"pymodoro = pymodoro.pymodoro:main",
"pymodoroi3 = pymodoro.pymodoroi3:main"
]
},
)
| 315 | 122 |
from django.conf import settings
from ralph.apps import RalphAppConfig
class DNS(RalphAppConfig):
name = 'ralph.dns'
def get_load_modules_when_ready(self):
if settings.ENABLE_HERMES_INTEGRATION:
return ['publishers']
return []
| 267 | 85 |
#! /usr/bin/python
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
from MDP import MDP
import version23
__author__ = 'fyabc'
# random.seed(0)
def getRandomPolicyValue():
values = [0.0 for _ in range(10)]
num = 1000000
echoEpoch = 10000
mdp = MDP()
for k in range(1, num):
for initState in range(1, 6):
state = initState
isTerminal = False
gamma = 1.0
value = 0.0
while not isTerminal:
action = mdp.randomAction()
isTerminal, state, reward = mdp.transform(state, action)
value += gamma * reward
gamma *= mdp.gamma
values[initState] += value
if k % echoEpoch == 0:
print('k = %d, Average values of state 1-5 are:\n' % k,
[value / k for value in values[1:6]])
for i in range(len(values)):
values[i] /= num
return values
def test():
values = getRandomPolicyValue()
print('Average values of state 1-5 are:\n', values[1:6])
if __name__ == '__main__':
test()
| 1,132 | 387 |
#!/usr/bin/env python
import os
import glob
import shutil
import pytest
import hypothesis.strategies as st
from hypothesis import given, settings
from radical.entk import Task
from radical.entk import states
import radical.entk.exceptions as ree
# Hypothesis settings
settings.register_profile("travis", max_examples=100, deadline=None)
settings.load_profile("travis")
# ------------------------------------------------------------------------------
#
def test_task_initialization():
'''
**Purpose**: Test if the task attributes have, thus expect, the correct data types
'''
t = Task()
assert t._uid is None
assert t.name is None
assert t.state == states.INITIAL
assert t.state_history == [states.INITIAL]
assert t.executable is None
assert t.arguments == list()
assert t.pre_exec == list()
assert t.post_exec == list()
assert t.cpu_reqs['processes'] == 1
assert t.cpu_reqs['process_type'] is None
assert t.cpu_reqs['threads_per_process'] == 1
assert t.cpu_reqs['thread_type'] is None
assert t.gpu_reqs['processes'] == 0
assert t.gpu_reqs['process_type'] is None
assert t.gpu_reqs['threads_per_process'] == 0
assert t.gpu_reqs['thread_type'] is None
assert t.lfs_per_process == 0
assert t.upload_input_data == list()
assert t.copy_input_data == list()
assert t.link_input_data == list()
assert t.move_input_data == list()
assert t.copy_output_data == list()
assert t.move_input_data == list()
assert t.download_output_data == list()
assert t.stdout is None
assert t.stderr is None
assert t.exit_code is None
assert t.tag is None
assert t.path is None
assert t.parent_pipeline['uid'] is None
assert t.parent_pipeline['name'] is None
assert t.parent_stage['uid'] is None
assert t.parent_stage['name'] is None
# ------------------------------------------------------------------------------
#
@given(s=st.text(),
l=st.lists(st.text()),
i=st.integers().filter(lambda x: type(x) == int),
b=st.booleans())
def test_task_exceptions(s, l, i, b):
'''
**Purpose**: Test if all attribute assignments raise exceptions
for invalid values
'''
t = Task()
data_type = [s, l, i, b]
for data in data_type:
# special case due to backward compatibility
if not isinstance(data, str) and \
not isinstance(data, list):
with pytest.raises(ree.TypeError): t.executable = data
if not isinstance(data, str):
with pytest.raises(ree.TypeError): t.name = data
with pytest.raises(ree.TypeError): t.path = data
with pytest.raises(ree.TypeError): t.parent_stage = data
with pytest.raises(ree.TypeError): t.parent_pipeline = data
with pytest.raises(ree.TypeError): t.stdout = data
with pytest.raises(ree.TypeError): t.stderr = data
if not isinstance(data, list):
with pytest.raises(ree.TypeError): t.pre_exec = data
with pytest.raises(ree.TypeError): t.arguments = data
with pytest.raises(ree.TypeError): t.post_exec = data
with pytest.raises(ree.TypeError): t.upload_input_data = data
with pytest.raises(ree.TypeError): t.copy_input_data = data
with pytest.raises(ree.TypeError): t.link_input_data = data
with pytest.raises(ree.TypeError): t.move_input_data = data
with pytest.raises(ree.TypeError): t.copy_output_data = data
with pytest.raises(ree.TypeError): t.download_output_data = data
with pytest.raises(ree.TypeError): t.move_output_data = data
if not isinstance(data, str) and \
not isinstance(data, str):
with pytest.raises(ree.ValueError):
t.cpu_reqs = {'processes' : 1,
'process_type' : data,
'threads_per_process': 1,
'thread_type' : None}
t.cpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : data
}
t.gpu_reqs = {'processes' : 1,
'process_type' : data,
'threads_per_process': 1,
'thread_type' : None
}
t.gpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : data}
if not isinstance(data, int):
with pytest.raises(ree.TypeError):
t.cpu_reqs = {'processes' : data,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.cpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process' : data,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.gpu_reqs = {'processes' : data,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.gpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process' : data,
'thread_type' : None}
# ------------------------------------------------------------------------------
#
def test_dict_to_task():
# make sure the type checks kick in
d = {'name' : 1}
with pytest.raises(ree.TypeError):
Task(from_dict=d)
d = {'name' : 'foo',
'pre_exec' : ['bar'],
'executable': 'buz',
'arguments' : ['baz', 'fiz'],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process': 0,
'thread_type' : None}}
t = Task(from_dict=d)
for k,v in d.items():
assert(t.__getattribute__(k) == v), '%s != %s' \
% (t.__getattribute__(k), v)
# ------------------------------------------------------------------------------
#
def test_task_to_dict():
'''
**Purpose**: Test if the 'to_dict' function of Task class converts all
expected attributes of the Task into a dictionary
'''
t = Task()
d = t.to_dict()
assert d == {'uid' : None,
'name' : None,
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : [],
'executable' : None,
'arguments' : [],
'post_exec' : [],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process' : 0,
'thread_type' : None},
'lfs_per_process' : 0,
'upload_input_data' : [],
'copy_input_data' : [],
'link_input_data' : [],
'move_input_data' : [],
'copy_output_data' : [],
'move_output_data' : [],
'download_output_data' : [],
'stdout' : None,
'stderr' : None,
'exit_code' : None,
'path' : None,
'tag' : None,
'parent_stage' : {'uid' : None, 'name' : None},
'parent_pipeline' : {'uid' : None, 'name' : None}}
t = Task()
t.uid = 'test.0017'
t.name = 'new'
t.pre_exec = ['module load abc']
t.executable = ['sleep']
t.arguments = ['10']
t.cpu_reqs['processes'] = 10
t.cpu_reqs['threads_per_process'] = 2
t.gpu_reqs['processes'] = 5
t.gpu_reqs['threads_per_process'] = 3
t.lfs_per_process = 1024
t.upload_input_data = ['test1']
t.copy_input_data = ['test2']
t.link_input_data = ['test3']
t.move_input_data = ['test4']
t.copy_output_data = ['test5']
t.move_output_data = ['test6']
t.download_output_data = ['test7']
t.stdout = 'out'
t.stderr = 'err'
t.exit_code = 1
t.path = 'a/b/c'
t.tag = 'task.0010'
t.parent_stage = {'uid': 's1', 'name': 'stage1'}
t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'}
d = t.to_dict()
assert d == {'uid' : 'test.0017',
'name' : 'new',
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : ['module load abc'],
'executable' : 'sleep',
'arguments' : ['10'],
'post_exec' : [],
'cpu_reqs' : {'processes' : 10,
'process_type' : None,
'threads_per_process' : 2,
'thread_type' : None},
'gpu_reqs' : {'processes' : 5,
'process_type' : None,
'threads_per_process' : 3,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : ['test1'],
'copy_input_data' : ['test2'],
'link_input_data' : ['test3'],
'move_input_data' : ['test4'],
'copy_output_data' : ['test5'],
'move_output_data' : ['test6'],
'download_output_data' : ['test7'],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 1,
'path' : 'a/b/c',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}}
t.executable = 'sleep'
d = t.to_dict()
assert d == {'uid' : 'test.0017',
'name' : 'new',
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : ['module load abc'],
'executable' : 'sleep',
'arguments' : ['10'],
'post_exec' : [],
'cpu_reqs' : {'processes' : 10,
'process_type' : None,
'threads_per_process' : 2,
'thread_type' : None},
'gpu_reqs' : {'processes' : 5,
'process_type' : None,
'threads_per_process' : 3,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : ['test1'],
'copy_input_data' : ['test2'],
'link_input_data' : ['test3'],
'move_input_data' : ['test4'],
'copy_output_data' : ['test5'],
'move_output_data' : ['test6'],
'download_output_data' : ['test7'],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 1,
'path' : 'a/b/c',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}}
# ------------------------------------------------------------------------------
#
def test_task_from_dict():
'''
**Purpose**: Test if the 'from_dict' function of Task class converts a
dictionary into a Task correctly with all the expected
attributes
'''
d = {'uid' : 're.Task.0000',
'name' : 't1',
'state' : states.DONE,
'state_history' : [states.INITIAL, states.DONE],
'pre_exec' : [],
'executable' : '',
'arguments' : [],
'post_exec' : [],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process' : 0,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : [],
'copy_input_data' : [],
'link_input_data' : [],
'move_input_data' : [],
'copy_output_data' : [],
'move_output_data' : [],
'download_output_data' : [],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 555,
'path' : 'here/it/is',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipe1'}}
t = Task()
t.from_dict(d)
assert t._uid == d['uid']
assert t.name == d['name']
assert t.state == d['state']
assert t.state_history == d['state_history']
assert t.pre_exec == d['pre_exec']
assert t.executable == d['executable']
assert t.arguments == d['arguments']
assert t.post_exec == d['post_exec']
assert t.cpu_reqs == d['cpu_reqs']
assert t.gpu_reqs == d['gpu_reqs']
assert t.lfs_per_process == d['lfs_per_process']
assert t.upload_input_data == d['upload_input_data']
assert t.copy_input_data == d['copy_input_data']
assert t.link_input_data == d['link_input_data']
assert t.move_input_data == d['move_input_data']
assert t.copy_output_data == d['copy_output_data']
assert t.move_output_data == d['move_output_data']
assert t.download_output_data == d['download_output_data']
assert t.stdout == d['stdout']
assert t.stderr == d['stderr']
assert t.exit_code == d['exit_code']
assert t.path == d['path']
assert t.tag == d['tag']
assert t.parent_stage == d['parent_stage']
assert t.parent_pipeline == d['parent_pipeline']
d['executable'] = 'sleep'
t = Task()
t.from_dict(d)
assert t.executable == d['executable']
# ------------------------------------------------------------------------------
#
def test_task_assign_uid():
try:
home = os.environ.get('HOME', '/home')
folder = glob.glob('%s/.radical/utils/test*' % home)
for f in folder:
shutil.rmtree(f)
except:
pass
t = Task()
assert t.uid == 'task.0000'
# ------------------------------------------------------------------------------
#
def test_task_validate():
t = Task()
t._state = 'test'
with pytest.raises(ree.ValueError):
t._validate()
t = Task()
with pytest.raises(ree.MissingError):
t._validate()
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test_task_initialization()
test_task_exceptions()
test_dict_to_task()
test_task_to_dict()
test_task_from_dict()
test_task_assign_uid()
test_task_validate()
# ------------------------------------------------------------------------------
| 19,002 | 5,181 |
from enum import Enum
class CellState(Enum):
UNKNOWN = '.'
EMPTY = ' '
BOX = '■'
def __str__(self):
return self.value
class Cell():
def __init__(self, state=CellState.UNKNOWN):
self.state = state
def __str__(self):
return self.state
class Clue():
def __init__(self, value, filled=0):
self.value = value
self.filled = filled
def isFilled(self):
return self.remaining() == 0
def remaining(self):
return self.value - self.filled
def __str__(self):
return str(self.value)
class Clues():
def __init__(self, *clues):
self.clues = []
for c in clues:
self.clues.append(Clue(c))
def fillMin(self, start, end, mark):
space = sum(c.value + 1 for c in self.clues) - 1
left = end - start - space
index = 0
for c in self.clues:
if c.value > left:
fill = c.value - left
c.filled += fill
index += c.value - fill
for _ in range(fill):
mark(index, CellState.BOX)
index += 1
index += 1
else:
index += c.value + 1
def adjust(self, array):
if not self.isFilled():
for i in range(len(array)):
start = 0
end = 0
if self.isFilled():
for cell in array:
if cell.state == CellState.UNKNOWN:
cell.state = CellState.EMPTY
def isFilled(self):
return all(c.isFilled() for c in self.clues)
class Picross():
def __init__(self, rows=[], columns=[]):
self.rows = rows
self.columns = columns
self.board = []
for _ in range(len(self.rows)):
array = []
for _ in range(len(self.columns)):
array.append(Cell())
self.board.append(array)
def isSolved(self):
return all(r.isFilled() for r in self.rows)
def set(self, r, c, state):
self.board[r][c].state = state
def solve(self):
self.fillMin()
print(self)
while not self.isSolved():
self.adjust()
print(self)
def fillMin(self):
for r in range(len(self.rows)):
self.rows[r].fillMin(0, len(self.columns), lambda index, state: self.set(r,index,state))
for c in range(len(self.columns)):
self.columns[c].fillMin(0, len(self.rows), lambda index, state: self.set(index,c,state))
def adjust(self):
for r in range(len(self.rows)):
self.rows[r].adjust(self.board[r])
for c in range(len(self.columns)):
self.columns[c].adjust(list(map(lambda row: row[c], self.board)))
def __str__(self):
result = ''
for r in range(len(self.board)):
for cell in self.board[r]:
result += str(cell.state) + ' '
for clue in self.rows[r].clues:
result += str(clue) + ' '
result += '\n'
has_value = True
count = 0
while has_value:
has_value = False
for clues in self.columns:
if len(clues.clues) > count:
has_value = True
result += str(clues.clues[count]) + ' '
else:
result += ' '
result += '\n'
count += 1
return result
picross = Picross([Clues(1,1,1),Clues(1,1),Clues(1,2),Clues(5),Clues(1,1,1)],
[Clues(1,2), Clues(3), Clues(1,2),Clues(3),Clues(1,3)])
print(picross)
picross.solve()
| 3,689 | 1,173 |
'''
Copyright (C) 2016. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the MIT license.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
MIT License for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
import torchvision
class LegoConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, n_split, n_lego):
super(LegoConv2d, self).__init__()
self.in_channels, self.out_channels, self.kernel_size, self.n_split = in_channels, out_channels, kernel_size, n_split
self.basic_channels = in_channels // self.n_split
self.n_lego = int(self.out_channels * n_lego)
self.lego = nn.Parameter(nn.init.kaiming_normal_(torch.rand(self.n_lego, self.basic_channels, self.kernel_size, self.kernel_size)))
self.aux_coefficients = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1)))
self.aux_combination = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1)))
def forward(self, x):
self.proxy_combination = torch.zeros(self.aux_combination.size()).to(self.aux_combination.device)
self.proxy_combination.scatter_(2, self.aux_combination.argmax(dim = 2, keepdim = True), 1); self.proxy_combination.requires_grad = True
out = 0
for i in range(self.n_split):
lego_feature = F.conv2d(x[:, i*self.basic_channels: (i+1)*self.basic_channels], self.lego, padding = self.kernel_size // 2)
kernel = self.aux_coefficients[i] * self.proxy_combination[i]
out = out + F.conv2d(lego_feature, kernel)
return out
def copy_grad(self, balance_weight):
self.aux_combination.grad = self.proxy_combination.grad
# balance loss
idxs = self.aux_combination.argmax(dim = 2).view(-1).cpu().numpy()
unique, count = np.unique(idxs, return_counts = True)
unique, count = np.unique(count, return_counts = True)
avg_freq = (self.n_split * self.out_channels ) / self.n_lego
max_freq = 0
min_freq = 100
for i in range(self.n_lego):
i_freq = (idxs == i).sum().item()
max_freq = max(max_freq, i_freq)
min_freq = min(min_freq, i_freq)
if i_freq >= np.floor(avg_freq) and i_freq <= np.ceil(avg_freq):
continue
if i_freq < np.floor(avg_freq):
self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] - balance_weight * (np.floor(avg_freq) - i_freq)
if i_freq > np.ceil(avg_freq):
self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] + balance_weight * (i_freq - np.ceil(avg_freq))
| 3,055 | 1,122 |
# Generated by Django 3.1.2 on 2020-10-29 20:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trucks', '0004_auto_20201025_2222'),
]
operations = [
migrations.RemoveField(
model_name='car',
name='truck',
),
migrations.AddField(
model_name='car',
name='truck',
field=models.ManyToManyField(to='trucks.Truck'),
),
]
| 488 | 174 |
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= EOF =============================================
from uncertainties import ufloat
from pychron.dvc import analysis_path, dvc_dump
from pychron.processing.interpreted_age import InterpretedAge
class Tag(object):
name = None
path = None
note = ""
subgroup = ""
uuid = ""
record_id = ""
@classmethod
def from_analysis(cls, an, **kw):
tag = cls()
tag.name = an.tag
tag.note = an.tag_note
tag.record_id = an.record_id
tag.uuid = an.uuid
tag.repository_identifier = an.repository_identifier
# tag.path = analysis_path(an.record_id, an.repository_identifier, modifier='tags')
tag.path = analysis_path(an, an.repository_identifier, modifier="tags")
tag.subgroup = an.subgroup
for k, v in kw.items():
setattr(tag, k, v)
return tag
def dump(self):
obj = {"name": self.name, "note": self.note, "subgroup": self.subgroup}
if not self.path:
self.path = analysis_path(
self.uuid, self.repository_identifier, modifier="tags", mode="w"
)
dvc_dump(obj, self.path)
class DVCInterpretedAge(InterpretedAge):
labnumber = None
isotopes = None
repository_identifier = None
analyses = None
def load_tag(self, obj):
self.tag = obj.get("name", "")
self.tag_note = obj.get("note", "")
def from_json(self, obj):
for attr in ("name", "uuid"):
setattr(self, attr, obj.get(attr, ""))
pf = obj["preferred"]
for attr in ("age", "age_err"):
setattr(self, attr, pf.get(attr, 0))
sm = obj["sample_metadata"]
for attr in ("sample", "material", "project", "irradiation"):
setattr(self, attr, sm.get(attr, ""))
# for a in ('age', 'age_err', 'age_kind',
# # 'kca', 'kca_err','kca_kind',
# 'mswd',
# 'sample', 'material', 'identifier', 'nanalyses', 'irradiation',
# 'name', 'project', 'uuid', 'age_error_kind'):
# try:
# setattr(self, a, obj.get(a, NULL_STR))
# except BaseException as a:
# print('exception DVCInterpretdAge.from_json', a)
self.labnumber = self.identifier
# self.uage = ufloat(self.age, self.age_err)
self._record_id = "{} {}".format(self.identifier, self.name)
self.analyses = obj.get("analyses", [])
pkinds = pf.get("preferred_kinds")
if pkinds:
for k in pkinds:
attr = k["attr"]
if attr == "age":
attr = "uage"
setattr(self, attr, ufloat(k["value"], k["error"]))
def get_value(self, attr):
try:
return getattr(self, attr)
except AttributeError:
return ufloat(0, 0)
@property
def status(self):
return "X" if self.is_omitted() else ""
| 3,719 | 1,139 |
from __future__ import annotations
from typing import Optional
from dataclasses import dataclass
from logging import Logger
from processor.device_names import address_to_name
@dataclass
class GenRecord:
logger: Logger
_mac: Optional[str] = None
_sid: int = 0
_name: Optional[str] = None
# Nurse only, but in master class to make simpler
_nurse_name: str = ""
@property
def safemac(self) -> str:
"""Returns a safe filename (Windows does not allow colons)"""
keepcharacters = (".", "_")
return "".join(
c for c in self.mac if c.isalnum() or c in keepcharacters
).rstrip()
@property
def mac(self) -> str:
"""
The mac address. Returns <unknown> if the address is not known.
"""
return "<unknown>" if self._mac is None else self._mac
@mac.setter
def mac(self, value: str):
if self._mac is None or self._mac != value:
self._mac = value
self.logger.info(f"MAC addr: {self._mac}")
self.mac_changed()
@property
def sid(self) -> int:
"""
Sensor ID, as an integer. Printout with "X" format.
"""
return self._sid
@sid.setter
def sid(self, value: int):
if self._sid != value:
self._sid = value
self.logger.info(f"Sensor ID: {self._sid:X}")
self.sid_changed()
@property
def box_name(self) -> str:
"""
The name of the box, or <unknown>.
"""
if self._name is not None:
return self._name
if self._mac is None:
return "<unknown>"
try:
return address_to_name(self._mac).title()
except ValueError:
return self.mac
@box_name.setter
def box_name(self, value: Optional[str]):
if self._name != value:
self._name = value
self.logger.info(f"Box name: {self._name}")
self.mac_changed()
@property
def stacked_name(self) -> str:
"""
Return the box name stacked using a newline
If unknown, return Box name: <unknown>.
"""
if self._mac is None or self._mac == "00:00:00:00:00:00":
return "Box name\n<unknown>"
try:
return "\n".join(address_to_name(self._mac).title().split())
except ValueError:
return self.mac
@property
def title(self) -> str:
"""
The title to show in the dialog box. Will show box_name if unset.
"""
return self._nurse_name
@title.setter
def title(self, value: str):
if self._nurse_name is None or self._nurse_name != value:
self._nurse_name = value
self.logger.info(f"Changed title to {self._nurse_name!r}")
self.title_changed()
def title_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
def mac_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
def sid_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
| 3,214 | 964 |
from __future__ import print_function
import math
import nibabel as nib
import nrrd
import numpy as np
import operator
import os
import random
import torch
import warnings
from functools import reduce
from inputs import Image, ImageType
CHANNEL, DEPTH, HEIGHT, WIDTH = 0, 1, 2, 3
class ToNDTensor(object):
"""
Creates a torch.Tensor object from a numpy array.
The transformer supports 3D and 4D numpy arrays. The numpy arrays are transposed in order to create tensors with
dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
The dimensions are D: Depth, H: Height, W: Width, C: Channels.
"""
# noinspection PyArgumentList
def __call__(self, nd_array):
"""
:param nd_array: A 3D or 4D numpy array to convert to torch.Tensor
:return: A torch.Tensor of size (DxHxW) or (CxDxHxW)"""
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only {} are supporter".format(np.ndarray))
if nd_array.ndim == 3:
nd_tensor = torch.Tensor(nd_array.reshape(nd_array.shape + (1,)))
elif nd_array.ndim == 4:
nd_tensor = torch.Tensor(nd_array)
else:
raise NotImplementedError("Only 3D or 4D arrays are supported")
return nd_tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class ToNiftiFile(object):
"""
Creates a Nifti1Image from a given numpy ndarray
The numpy arrays are transposed to respect the standard Nifti dimensions (WxHxDxC)
"""
def __init__(self, file_path, affine):
self._file_path = file_path
self._affine = affine
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
output_dir = os.path.dirname(self._file_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if nd_array.shape[0] not in [6, 9]:
nd_array = np.squeeze(nd_array, axis=0)
else:
nd_array = np.moveaxis(nd_array, 0, 3)
nifti1_file = nib.Nifti1Image(nd_array, self._affine)
nib.save(nifti1_file, self._file_path)
def __repr__(self):
return self.__class__.__name__ + '()'
class ToNrrdFile(object):
"""
Create a .NRRD file and save it at the given path.
The numpy arrays are transposed to respect the standard NRRD dimensions (WxHxDxC)
"""
def __init__(self, file_path):
self._file_path = file_path
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray))
output_dir = os.path.dirname(self._file_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
header = self._create_header_from(nd_array)
nrrd.write(self._file_path, np.moveaxis(nd_array, 0, 3), header=header)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def _create_header_from(nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray))
return {
'type': nd_array.dtype,
'dimension': nd_array.ndim,
'sizes': nd_array.shape,
'kinds': ['domain', 'domain', 'domain', '3D-matrix'] if nd_array.ndim == 4 else ['domain', 'domain',
'domain'],
'endian': 'little',
'encoding': 'raw'
}
class ToNumpyArray(object):
"""
Creates a numpy ndarray from a given Nifti or NRRD image file path.
The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
"""
def __call__(self, image_path):
if Image.is_nifti(image_path):
nifti_image = nib.load(image_path)
nd_array = nifti_image.get_fdata().__array__()
affine = nifti_image._affine
elif Image.is_nrrd(image_path):
nd_array, header = nrrd.read(image_path)
else:
raise NotImplementedError(
"Only {} files are supported !".format(ImageType.ALL))
if nd_array.ndim == 3:
nd_array = np.moveaxis(np.expand_dims(nd_array, 3), 3, 0)
elif nd_array.ndim == 4:
nd_array = np.moveaxis(nd_array, 3, 0)
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
class ToUniqueTensorValues(object):
UNIQUE_TENSOR_VALUES_INDEX = [0, 1, 2, 4, 5, 8]
"""
Creates a numpy ndarray from a given Nifti or NRRD image file path.
The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
"""
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9:
raise TypeError(
"Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray))
return nd_array[self.UNIQUE_TENSOR_VALUES_INDEX, :, :, :]
def __repr__(self):
return self.__class__.__name__ + '()'
class ToLogEuclidean(object):
"""
Convert a DTI image in the Log-Euclidean space.
To convert the DTI image into the Log-Euclidean space, the eigen-decomposition of each tensor is performed and the
log of the eigen-values is computed.
It can mathematically be expressed as follow: log(D) = Ulog(V)U.T where D is a tensor, U is a matrix of eigen-vector
and V a diagonal matrix of eigen-values.
Based on: Arsigny, V., Fillard, P., Pennec, X., & Ayache, N. (2006). Log-Euclidean metrics for fast and
simple calculus on diffusion tensors https://www.ncbi.nlm.nih.gov/pubmed/16788917
"""
def __call__(self, nd_array):
"""
:param nd_array: The DTI image as a nd array of dimension CxDxHxW)
:return: he DTI image in the log-Euclidean space
"""
warnings.filterwarnings('ignore')
if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9:
raise TypeError(
"Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray))
image_as_vector = nd_array.reshape(
(3, 3, reduce(operator.mul, nd_array.shape[1:], 1)))
return self.apply(image_as_vector, np.zeros(image_as_vector.shape, dtype='float32')).reshape(nd_array.shape)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(image_vector, output):
index = 0
while index < image_vector.shape[2]:
diffusion_tensor = image_vector[:, :, index]
# Does not convert the background tensors to log-euclidean
if np.any(diffusion_tensor):
eig_val, eig_vec = np.linalg.eigh(diffusion_tensor)
output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.log(eig_val))),
np.ascontiguousarray(np.linalg.inv(eig_vec)))
else:
output[:, :, index] = diffusion_tensor
index = index + 1
@staticmethod
def undo(image_vector, output):
index = 0
while index < image_vector.shape[2]:
log_euclidean_diffusion_tensor = image_vector[:, :, index]
# Due to noise, negative eigenvalues can arise. Those noisy tensors cannot be converted back to Euclidean.
if np.any(log_euclidean_diffusion_tensor) and not np.isnan(log_euclidean_diffusion_tensor).any():
eig_val, eig_vec = np.linalg.eigh(
log_euclidean_diffusion_tensor)
output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.exp(eig_val))),
np.ascontiguousarray(np.linalg.inv(eig_vec)))
else:
output[:, :, index] = log_euclidean_diffusion_tensor
index = index + 1
class InterpolateNSDTensors(object):
"""
Interpolates Negative Semi-Definite tensors using trilinear interpolation.
It computed a weighted sum of the NSD tensors' neighbors in the Log-Euclidean domain.
"""
def __call__(self, log_euclidean_nd_array):
if not isinstance(log_euclidean_nd_array, np.ndarray) or log_euclidean_nd_array.ndim is not 4:
raise TypeError("Only {} are supported".format(np.ndarray.dtype))
d_index, h_index, w_index = np.where(
np.isnan(log_euclidean_nd_array[-1, :, :, :]))
for index in list(zip(d_index, h_index, w_index)):
neighbors = self._get_tri_linear_neighbors_and_weights(
index, log_euclidean_nd_array)
log_euclidean_nd_array[:, index[0], index[1], index[2]] = np.dot(np.array(neighbors[0]).T,
neighbors[1] / np.sum(neighbors[1]))
return log_euclidean_nd_array
def _get_tri_linear_neighbors_and_weights(self, nsd_index, log_euclidean_nd_array):
"""
Gets the 8 neighbors of the NSD tensors from which to interpolate. The weight associated with each neighbor
is inversely proportional to the distance between the interpolated tensor and the neighbor.
:param nsd_index: The index of the NSD tensor.
:param log_euclidean_nd_array: The log euclidean image as numpy ndarray
:return: A list of the 8 corner neighbors and their associated weights in separated tuples.
"""
front, left, down = -1, -1, -1
back, right, up = 1, 1, 1
directions = [(front, left, down), (front, left, up), (back, left, down), (back, left, up),
(front, right, down), (front, right, up), (back, right, up), (back, right, down)]
neighbors_and_weights = list(map(lambda direction:
self._get_closest_neighbor_of(
log_euclidean_nd_array, nsd_index, direction),
directions))
return list(zip(*neighbors_and_weights))
@staticmethod
def _get_closest_neighbor_of(log_euclidean_nd_array, nsd_index, direction):
"""
Gets the closest non-NSD tensor to the nsd_index and its weight following a given direction.
The associated weight is 1/distance, where the distance is the distance from the neighbor and the nsd_index.
:param log_euclidean_nd_array: The log-euclidean image as ndarray.
:param nsd_index: The index of the NSD tensor to interpolate.
:param direction: The direction in which the neighbor is searched.
:return: The closest neighbor as a 9 values vector and its associated weight.
"""
distance = 1
neighbor = None
try:
while neighbor is None:
d, h, w = tuple(((np.array(direction) * distance) + nsd_index))
if 0 < d < log_euclidean_nd_array.shape[1] and 0 < h < log_euclidean_nd_array.shape[2] and 0 < w < \
log_euclidean_nd_array.shape[3]:
potential_neighbor = log_euclidean_nd_array[:, d, h, w]
else:
raise IndexError
if not np.isnan(potential_neighbor).any():
neighbor = potential_neighbor
else:
distance = distance + 1
weight = 1 / distance
except IndexError:
neighbor = np.zeros(log_euclidean_nd_array.shape[0])
weight = 0
return neighbor, weight
def __repr__(self):
return self.__class__.__name__ + '()'
class CropToContent(object):
"""
Crops the image to its content.
The content's bounding box is defined by the first non-zero slice in each direction (D, H, W)
"""
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
c, d_min, d_max, h_min, h_max, w_min, w_max = self.extract_content_bounding_box_from(
nd_array)
return nd_array[:, d_min:d_max, h_min:h_max, w_min:w_max] if nd_array.ndim is 4 else \
nd_array[d_min:d_max, h_min:h_max, w_min:w_max]
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def extract_content_bounding_box_from(nd_array):
"""
Computes the D, H, W min and max values defining the content bounding box.
:param nd_array: The input image as a numpy ndarray
:return: The D, H, W min and max values of the bounding box.
"""
depth_slices = np.any(nd_array, axis=(2, 3))
height_slices = np.any(nd_array, axis=(1, 3))
width_slices = np.any(nd_array, axis=(1, 2))
d_min, d_max = np.where(depth_slices)[1][[0, -1]]
h_min, h_max = np.where(height_slices)[1][[0, -1]]
w_min, w_max = np.where(width_slices)[1][[0, -1]]
return nd_array.shape[CHANNEL], d_min, d_max, h_min, h_max, w_min, w_max
class PadToShape(object):
def __init__(self, target_shape, padding_value=0, isometric=False):
self._padding_value = padding_value
if isometric:
largest_dimension = max(target_shape[DEPTH], target_shape[WIDTH])
self._target_shape = (
target_shape[CHANNEL], largest_dimension, target_shape[HEIGHT], largest_dimension)
else:
self._target_shape = target_shape
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
elif nd_array.ndim is not len(self._target_shape):
raise ValueError(
"The input image and target shape's dimension does not match {} vs {}".format(nd_array.ndim,
len(self._target_shape)))
return self.apply(nd_array, self._target_shape, self._padding_value)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, target_shape, padding_value):
deltas = tuple(max(0, target - current)
for target, current in zip(target_shape, nd_array.shape))
if nd_array.ndim == 3:
nd_array = np.pad(nd_array, ((math.floor(deltas[0] / 2), math.ceil(deltas[0] / 2)),
(math.floor(deltas[1] / 2),
math.ceil(deltas[1] / 2)),
(math.floor(deltas[2] / 2), math.ceil(deltas[2] / 2))),
'constant', constant_values=padding_value)
elif nd_array.ndim == 4:
nd_array = np.pad(nd_array, ((0, 0),
(math.floor(deltas[1] / 2),
math.ceil(deltas[1] / 2)),
(math.floor(deltas[2] / 2),
math.ceil(deltas[2] / 2)),
(math.floor(deltas[3] / 2), math.ceil(deltas[3] / 2))),
'constant', constant_values=padding_value)
return nd_array
@staticmethod
def undo(nd_array, original_shape):
deltas = tuple(max(0, current - target)
for target, current in zip(original_shape, nd_array.shape))
if nd_array.ndim == 3:
nd_array = nd_array[
math.floor(deltas[0] / 2):-math.ceil(deltas[0] / 2),
math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2),
math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2)]
elif nd_array.ndim == 4:
nd_array = nd_array[
:,
math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2),
math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2),
math.floor(deltas[3] / 2):-math.ceil(deltas[3] / 2)]
return nd_array
class RandomFlip(object):
def __init__(self, exec_probability):
self._exec_probability = exec_probability
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in (0, 1, 2):
if random.uniform(0, 1) <= self._exec_probability:
nd_array = self.apply(nd_array, [axis])
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, axes):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in axes:
if nd_array.ndim is 3:
nd_array = np.flip(nd_array, axis)
else:
channels = [np.flip(nd_array[c], axis)
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
@staticmethod
def undo(nd_array, axes):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in axes[::-1]:
if nd_array.ndim is 3:
nd_array = np.flip(nd_array, axis)
else:
channels = [np.flip(nd_array[c], axis)
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
class RandomRotate90(object):
def __init__(self, exec_probability):
self._exec_probability = exec_probability
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if random.uniform(0, 1) <= self._exec_probability:
num_rotation = random.randint(0, 4)
nd_array = self.apply(nd_array, num_rotation)
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, num_rotation):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if nd_array.ndim == 3:
nd_array = np.rot90(nd_array, num_rotation, (1, 2))
else:
channels = [np.rot90(nd_array[c], num_rotation, (1, 2))
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
@staticmethod
def undo(nd_array, num_rotation):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if nd_array.ndim == 3:
nd_array = np.rot90(nd_array, num_rotation, (2, 1))
else:
channels = [np.rot90(nd_array[c], num_rotation, (2, 1))
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
class Normalize(object):
def __init__(self, mean, std):
self._mean = mean
self._std = std
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return self.apply(nd_array, self._mean, self._std)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, mean, std):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return (nd_array - mean) / std
@staticmethod
def undo(nd_array, mean, std):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return (nd_array * std) + mean
class Flip(object):
def __init__(self, axis):
self._axis = axis
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return self.apply(nd_array, self._axis)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, axis):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return np.flip(nd_array, axis).copy()
@staticmethod
def undo(nd_array, axis):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return np.flip(nd_array, axis).copy()
class TensorFlip(object):
def __init__(self, axis):
self._axis = axis
def __call__(self, tensor):
return self.apply(tensor, self._axis)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(tensor, axis):
return tensor.flip(axis)
@staticmethod
def undo(tensor, axis):
return tensor.flip(axis)
| 22,024 | 7,329 |
from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
def test_ix_semantic_filename2():
test_fn = "./BBBC022_v1_images_20585w1/IXMtest_L09_s3_w1538679C9-F03A-" \
"4656-9A57-0D4A440C1C62.tif"
expected = coll.OrderedDict([('directory', './BBBC022_v1_images_20585w1'),
('prefix', 'IXMtest'),
('plate', 20585),
('well', 'L09'),
('field', 2),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
| 1,198 | 425 |
import pytest
from PySide2.QtCore import Qt
from PySide2.QtGui import QClipboard
from PySide2.QtTest import QTest
from node_launcher.gui.components.copy_button import CopyButton
@pytest.fixture
def copy_button() -> CopyButton:
copy_button = CopyButton(button_text='Test Me', copy_text='copy_this')
return copy_button
class TestCopyButton(object):
def test_copy_button(self, copy_button: CopyButton, qtbot: QTest):
qtbot.mouseClick(copy_button.button, Qt.LeftButton)
assert QClipboard().text() == 'copy_this'
| 541 | 177 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for batch prediction pipeline."""
import argparse
from absl import logging
from kfp.v2.google import client
def run_training_pipeline():
"""Main function for batch prediction pipeline runner."""
parser = argparse.ArgumentParser()
parser.add_argument('--project_id', type=str)
parser.add_argument('--pipeline_region', type=str)
parser.add_argument('--pipeline_root', type=str)
parser.add_argument('--pipeline_job_spec_path', type=str)
# Staging path for running custom job
parser.add_argument('--data_pipeline_root', type=str)
# Parameters required for data ingestion and processing
parser.add_argument('--input_dataset_uri', type=str)
parser.add_argument('--gcs_data_output_folder', type=str)
parser.add_argument('--data_region', type=str)
parser.add_argument('--gcs_result_folder', type=str)
# Parameters required for training job
parser.add_argument('--model_resource_name', type=str, default='')
parser.add_argument('--endpoint_resource_name', type=str, default='')
# Parameters required for batch prediction job
parser.add_argument('--machine_type', type=str, default='n1-standard-4')
parser.add_argument('--accelerator_count', type=int, default=0)
parser.add_argument('--accelerator_type',
type=str, default='ACCELERATOR_TYPE_UNSPECIFIED')
parser.add_argument('--starting_replica_count', type=int, default=1)
parser.add_argument('--max_replica_count', type=int, default=2)
# Parameters required for pipeline scheduling
parser.add_argument('--pipeline_schedule',
type=str, default='', help='0 2 * * *')
parser.add_argument('--pipeline_schedule_timezone',
type=str, default='US/Pacific')
parser.add_argument('--enable_pipeline_caching',
action='store_true',
default=False,
help='Specify whether to enable caching.')
args, _ = parser.parse_known_args()
logging.info(args)
api_client = client.AIPlatformClient(args.project_id, args.pipeline_region)
params_to_remove = ['pipeline_region', 'pipeline_root',
'pipeline_job_spec_path', 'pipeline_schedule',
'pipeline_schedule_timezone', 'enable_pipeline_caching']
pipeline_params = vars(args).copy()
for item in params_to_remove:
pipeline_params.pop(item, None)
if not args.pipeline_schedule:
api_client.create_run_from_job_spec(
args.pipeline_job_spec_path,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
else:
api_client.create_schedule_from_job_spec(
args.pipeline_job_spec_path,
schedule=args.pipeline_schedule,
time_zone=args.pipeline_schedule_timezone,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
if __name__ == '__main__':
run_training_pipeline()
| 3,581 | 1,123 |
import discord
from discord.application_commands import ApplicationCommand, ApplicationCommandTree, option
tree = ApplicationCommandTree(guild_id=1234) # Replace with your guild ID, or ``None`` to commands global
class Ping(ApplicationCommand, name='ping', tree=tree):
"""Pong!"""
async def callback(self, interaction: discord.Interaction):
await interaction.response.send_message('Pong!')
class Math(ApplicationCommand, name='math', tree=tree):
"""Basic math operations."""
class Add(ApplicationCommand, name='add', parent=Math):
"""Sum of x + y."""
x: int = option(description='Value of "x"', required=True)
y: int = option(description='Value of "y"', required=True)
async def callback(self, interaction: discord.Interaction):
answer = self.x + self.y
await interaction.response.send_message(f'The value of {self.x} + {self.y} is **{answer}**.', ephemeral=True)
class Subtract(ApplicationCommand, name='subtract', parent=Math):
"""Difference of x - y."""
x: int = option(description='Value of "x"', required=True)
y: int = option(description='Value of "y"', required=True)
async def callback(self, interaction: discord.Interaction):
answer = self.x - self.y
await interaction.response.send_message(f'The value of {self.x} - {self.y} is **{answer}**.', ephemeral=True)
class Client(discord.Client):
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
client = Client(update_application_commands_at_startup=True)
client.add_application_command_tree(tree)
client.run('token')
| 1,642 | 510 |
#!/usr/bin/env python
import Tkinter
import tkMessageBox
import rwkpickle, rwkos, os, glob
from Tkinter import StringVar, IntVar, DoubleVar
pklpath = rwkos.FindFullPath('pygimp_lecturerc.pkl')
class myWindow:
def close(self, *args, **kwargs):
#print('got close event')
self.mw.destroy()
def __init__(self, title="Enter Quiz #"):
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+250+200")
self.var = Tkinter.StringVar()
entry = Tkinter.Entry(self.mw, textvariable=self.var)
entry.focus_set()
entry.pack()
#entry.bind("<KP_Enter>", self.close)
entry.bind("<Return>", self.close)
self.mw.title(title)
self.btn2 = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.btn2.pack()
self.mw.mainloop()
class width_and_dpi_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
#print('got close event')
self.width_float = float(self.width_string.get())
self.result = self.width_float, self.dpi_int.get()
print('result = %f, %i' % self.result)
self.mw.destroy()
def __init__(self, title="Width and DPI Dialog"):
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+250+200")
Tkinter.Label(self.mw, text="Width (in.):").grid(row=0)
Tkinter.Label(self.mw, text="dpi:").grid(row=1)
self.width_string = Tkinter.StringVar()#Tkinter.DoubleVar()
self.dpi_int = Tkinter.IntVar()
width_entry = Tkinter.Entry(self.mw, textvariable=self.width_string)
width_entry.grid(row=0, column=1)
dpi_entry = Tkinter.Entry(self.mw, textvariable=self.dpi_int)
dpi_entry.grid(row=1, column=1)
self.dpi_int.set(300)
self.width_string.set('')
#self.width_float.set(3.0)
#entry.pack()
#entry.bind("<KP_Enter>", self.close)
width_entry.bind("<Return>", self.close)
self.mw.title(title)
self.exit_btn = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.exit_btn.grid(row=2, column=0)
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.grid(row=2, column=1)
width_entry.focus_set()
self.mw.mainloop()
## def body(self, master):
## Label(master, text="First:").grid(row=0)
## Label(master, text="Second:").grid(row=1)
## self.e1 = Entry(master)
## self.e2 = Entry(master)
## self.e1.grid(row=0, column=1)
## self.e2.grid(row=1, column=1)
## return self.e1 # initial focus
## def apply(self):
## first = string.atoi(self.e1.get())
## second = string.atoi(self.e2.get())
## self.result = first, second
## print first, second # or something
## def btnClick(self):
## self.answer = tkMessageBox.askyesno(title = "Your Choice", message = 'Please click either "Yes" or "No".')
## if self.answer:
## tkMessageBox.showinfo(title = "Yes", message = "Your choice was: Yes.")
## else:
## tkMessageBox.showinfo(title = "No", message = "Your choice was: No.")
class pickle_entry(object):
def __init__(self, parent, mw, label, key, row, \
varclass=None):
if varclass is None:
varclass = StringVar
self.var = varclass()
self.parent = parent
self.mw = mw
self.label = label
self.key = key
self.row = row
curtext = label + ":"
Tkinter.Label(mw, text=curtext).grid(row=row, column=0, sticky='e')
self.entry = Tkinter.Entry(mw, textvariable=self.var, \
width=25)
self.entry.grid(row=row, column=1)
def get(self):
return self.key, self.var.get()
def load_pickle(self):
value = self.parent.pickle[self.key]
self.var.set(value)
class lecture_pickle_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
print('got close event')
#self.width_float = float(self.width_string.get())
#self.result = self.width_float, self.dpi_int.get()
#print('result = %f, %i' % self.result)
self.set_pickle()
self.save_pickle()
self.mw.destroy()
def load_pickle(self):
for entry in self.entries:
entry.load_pickle()
def set_pickle(self):
for entry in self.entries:
key, val = entry.get()
self.pickle[key] = val
def save_pickle(self):
rwkpickle.SavePickle(self.pickle, pklpath)
def __init__(self, title="Lecture Pickle Dialog"):
self.pickle = rwkpickle.LoadPickle(pklpath)
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+400+300")
self.labels = ['Lecture Path', 'Course Num.', \
'Search Pattern', 'Date Stamp', \
'Pat', 'Current Slide', \
'Outline Slide']
self.keys = ['lecture_path', 'course_num', \
'search_pat', 'date_stamp' , \
'pat', 'current_slide', 'outline_slide']
self.data = [('Lecture Path', 'lecture_path', StringVar), \
('Course Num.', 'course_num', StringVar), \
('Search Pattern', 'search_pat', StringVar), \
('Date Stamp', 'date_stamp', StringVar), \
('Pat', 'pat', StringVar), \
('Current Slide', 'current_slide', IntVar), \
('Outline Slide', 'outline_slide', IntVar), \
]
self.entries = []
for i, tup in enumerate(self.data):
label = tup[0]
key = tup[1]
varclass = tup[2]
pickle = pickle_entry(self, self.mw, \
label=label, \
key=key, \
row=i, \
varclass=varclass)
self.entries.append(pickle)
N = len(self.data)
self.mw.title('Pickle Editor')
self.exit_btn = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.exit_btn.grid(row=N, column=0)
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.grid(row=N, column=1)
self.load_pickle()
self.mw.mainloop()
class reset_lecture_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
print('got close event')
self.pickle['current_slide'] = 0
#self.width_float = float(self.width_string.get())
#self.result = self.width_float, self.dpi_int.get()
#print('result = %f, %i' % self.result)
if self.var1.get():
print('reseting outline slide')
self.reset_outline()
if self.var2.get():
print('deleting existing slides')
self.delete_existing_slides()
rwkpickle.SavePickle(self.pickle, pklpath)
self.mw.destroy()
def reset_outline(self):
self.pickle['outline_slide'] = 0
clear_list = ['outline_pat','outline_dir']
for key in clear_list:
if self.pickle.has_key(key):
self.pickle.pop(key)
def _build_pat(self, end='*'):
lp = self.pickle['lecture_path']
pat = self.pickle['search_pat'] + end
return os.path.join(lp, pat)
def build_xcf_pat(self):
self.xcf_pat = self._build_pat(end='*.xcf')
def build_delete_pat(self):
self.delete_pat = self._build_pat(end='*')
def delete_existing_slides(self):
self.build_delete_pat()
rwkos.delete_from_glob_pat(self.delete_pat)
def __init__(self, title="Reset Lecture Dialog"):
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+300+300")
self.pickle = rwkpickle.LoadPickle(pklpath)
#Need to display the number of existing slides and the
#current outline slide number
label1 = Tkinter.Label(self.mw, \
text='Number of existing slides')
label1.grid(row=0, column=0, sticky='w')
self.num_slides = IntVar()
self.entry1 = Tkinter.Entry(self.mw, \
textvariable=self.num_slides, \
width=5)
self.entry1.grid(row=0, column=1)
self.build_xcf_pat()
self.existing_slides = glob.glob(self.xcf_pat)
self.num_slides.set(len(self.existing_slides))
label2 = Tkinter.Label(self.mw, \
text='Outline Slide')
label2.grid(row=1, column=0, sticky='w')
self.outline_slide = IntVar()
self.entry2 = Tkinter.Entry(self.mw, \
textvariable=self.outline_slide, \
width=5)
self.entry2.grid(row=1, column=1)
self.outline_slide.set(self.pickle['outline_slide'])
self.var1 = IntVar()
check1 = Tkinter.Checkbutton(self.mw, \
text="Reset outline slide", \
variable=self.var1)
check1.var = self.var1
check1.grid(row=2, sticky='w')
self.var2 = IntVar()
check2 = Tkinter.Checkbutton(self.mw, \
text="Delete existing slides", \
variable=self.var2)
check2.var = self.var2
check2.grid(row=3, sticky='w')
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.bind("<Return>", self.close)
self.go_btn.grid(row=4)
self.go_btn.focus_set()
self.mw.title(title)
self.mw.mainloop()
if __name__ == "__main__":
#app = myWindow()
#app = width_and_dpi_dialog()
#app = lecture_pickle_dialog()
app = reset_lecture_dialog()
| 10,887 | 3,569 |
"""
Constraint functions for grasp sampling
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import numpy as np
class GraspConstraintFn(object):
"""
Abstract constraint functions for grasp sampling.
"""
__metaclass__ = ABCMeta
def __init__(self, config):
# set params
self._config = config
def __call__(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
return self.satisfies_constraints(grasp)
@abstractmethod
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
pass
class DiscreteApproachGraspConstraintFn(GraspConstraintFn):
"""
Constrains the grasp approach direction into a discrete set of
angles from the world z direction.
"""
def __init__(self, config):
# init superclass
GraspConstraintFn.__init__(self, config)
self._max_approach_angle = self._config['max_approach_angle']
self._angular_tolerance = self._config['angular_tolerance']
self._angular_step = self._config['angular_step']
self._T_camera_world = self._config['camera_pose']
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid by evaluating the
angle between the approach axis and the world z direction.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
# find grasp angle in world coordinates
axis_world = self._T_camera_world.rotation.dot(grasp.approach_axis)
angle = np.arccos(-axis_world[2])
# check closest available angle
available_angles = np.array([0.0])
if self._angular_step > 0:
available_angles = np.arange(start=0.0,
stop=self._max_approach_angle,
step=self._angular_step)
diff = np.abs(available_angles - angle)
angle_index = np.argmin(diff)
closest_angle = available_angles[angle_index]
if diff[angle_index] < self._angular_tolerance:
return True
return False
class GraspConstraintFnFactory(object):
@staticmethod
def constraint_fn(fn_type, config):
if fn_type == 'none':
return None
elif fn_type == 'discrete_approach_angle':
return DiscreteApproachGraspConstraintFn(config)
else:
raise ValueError('Grasp constraint function type %s not supported!' %(fn_type))
| 3,159 | 825 |
"""Modelo de pacientes"""
# Django
from django.db import models
# Utilidades
from apis.utils.models import ModelUtil
class Memberships(ModelUtil):
"""Modelo de pacientes
Un paciente puede tener un grupo familiar asociado,
si el paciente crea el grupo sera el titular de la familia,
los miembros del grupo familiar no podran agregar mas familiares.
Solo el titular podra actualizar o eliminar miembros de su grupo familiar
"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE)
family_group = models.ForeignKey('family_group.FamilyGroup', on_delete=models.CASCADE)
pacient = models.ForeignKey('pacient.Pacient', on_delete=models.CASCADE)
is_admin = models.BooleanField('Titular', default=False)
affiliated = models.PositiveIntegerField(default=0)
remaining_affiliates = models.PositiveIntegerField(default=0)
affiliated_by = models.ForeignKey(
'users.User',
null=True,
on_delete=models.CASCADE,
related_name='affiliated_by'
)
def __str__(self):
"""Regresa el username y el grupo familiar al que pertenece"""
return f'{self.user.username} hace parte del grupo familiar'
| 1,198 | 364 |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class AmenitiesConfig(AppConfig):
name = 'django_amenities'
verbose_name = _("Amenities App")
| 193 | 60 |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import absolute_import
import os
from aiida.cmdline.baseclass import VerdiCommand
default_modules_list = [
("aiida.orm", "Node", "Node"),
("aiida.orm.utils", "load_node", "load_node"),
("aiida.orm", "Calculation", "Calculation"),
("aiida.orm", "JobCalculation", "JobCalculation"),
("aiida.orm.code", "Code", "Code"),
("aiida.orm", "Data", "Data"),
("aiida.orm", "CalculationFactory", "CalculationFactory"),
("aiida.orm", "DataFactory", "DataFactory"),
("aiida.orm", "WorkflowFactory", "WorkflowFactory"),
("aiida.orm.computer", "Computer", "Computer"),
("aiida.orm.group", "Group", "Group"),
("aiida.orm.workflow", "Workflow", "Workflow"),
("aiida.orm", "load_workflow", "load_workflow"),
("aiida.orm.querybuilder", "QueryBuilder", "QueryBuilder"),
# ("aiida.backends.djsite.db", "models", "models"),
# ("aiida.backends.sqlalchemy", "models", "models"),
]
class Shell(VerdiCommand):
"""
Run the interactive shell with the AiiDA environment loaded.
This command opens an ipython shell with the AiiDA environment loaded.
"""
shells = ['ipython', 'bpython']
def get_start_namespace(self):
"""Load all default and custom modules"""
from aiida import load_dbenv, is_dbenv_loaded
from aiida.backends import settings
if not is_dbenv_loaded():
load_dbenv(profile=settings.AIIDADB_PROFILE)
from aiida.common.setup import get_property
user_ns = {}
# load default modules
for app_mod, model_name, alias in default_modules_list:
user_ns[alias] = getattr(__import__(app_mod, {}, {},
model_name), model_name)
# load custom modules
custom_modules_list = [(str(e[0]), str(e[2])) for e in
[p.rpartition('.') for p in get_property(
'verdishell.modules', default="").split(
':')]
if e[1] == '.']
for app_mod, model_name in custom_modules_list:
try:
user_ns[model_name] = getattr(
__import__(app_mod, {}, {}, model_name), model_name)
except AttributeError:
# if the module does not exist, we ignore it
pass
return user_ns
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
user_ns = self.get_start_namespace()
if user_ns:
shell = IPShell(argv=[], user_ns=user_ns)
else:
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
user_ns = self.get_start_namespace()
if user_ns:
app.shell.user_ns.update(user_ns)
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
user_ns = self.get_start_namespace()
if user_ns:
start_ipython(argv=[], user_ns=user_ns)
else:
start_ipython(argv=[])
def ipython(self):
"""Start any version of IPython"""
for ip in (
self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError as ie:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self):
import bpython
user_ns = self.get_start_namespace()
if user_ns:
bpython.embed(user_ns)
else:
bpython.embed()
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, *args):
import argparse
parser = argparse.ArgumentParser(prog='verdi shell')
parser.add_argument('--plain', dest='plain', action='store_true',
help='Tells Django to use plain Python, not '
'IPython or bpython.)')
parser.add_argument('--no-startup', action='store_true',
dest='no_startup',
help='When using plain Python, ignore the '
'PYTHONSTARTUP environment variable and '
'~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', action='store',
choices=self.shells, dest='interface',
help='Specify an interactive interpreter '
'interface. Available options: "ipython" '
'and "bpython"')
parsed_args = parser.parse_args(args)
use_plain = parsed_args.plain
no_startup = parsed_args.no_startup
interface = parsed_args.interface
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=interface)
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(
rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not no_startup:
for pythonrc in (
os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec (compile(handle.read(), pythonrc, 'exec'),
imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
def run(self, *args):
# pass_to_django_manage([execname, 'customshell'] + list(args))
self.handle_noargs(*args)
def complete(self, subargs_idx, subargs):
# disable further completion
print ""
| 8,051 | 2,168 |
import os
import tensorflow as tf
import numpy as np
C = 1e-13
# Predefined function to build a feedforward neural network
def build_mlp(input_placeholder,
output_size,
scope,
n_layers=2,
size=500,
activation=tf.tanh,
output_activation=None
):
out = input_placeholder
with tf.variable_scope(scope):
for _ in range(n_layers):
out = tf.layers.dense(out, size, activation=activation)
out = tf.layers.dense(out, output_size, activation=output_activation)
return out
class NNDynamicsModel():
def __init__(self,
env,
n_layers,
size,
activation,
output_activation,
normalization,
batch_size,
iterations,
learning_rate,
sess
):
""" Note: Be careful about normalization """
self.mean_obs,self.std_obs,self.mean_deltas,self.std_deltas,self.mean_actions,self.std_actions = normalization
self.obs_dim = env.observation_space.shape[0]
self.actions_dim = env.action_space.shape[0]
self.in_states_acts= tf.placeholder(tf.float32,[None,self.obs_dim + self.actions_dim],name='states_actions')
self.out_states_deltas = tf.placeholder(tf.float32,[None,self.obs_dim],name='states_deltas')
self.epochs = iterations
self.gstep = tf.Variable(0, dtype=tf.int32,trainable=False, name='global_step')
self.pred_delt = build_mlp(self.in_states_acts,self.obs_dim,"pred_state_delta",n_layers,size,activation,output_activation)
self.batch_size = batch_size
self.lr = learning_rate
self.loss = tf.losses.mean_squared_error(self.out_states_deltas,self.pred_delt)
self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
self.sess=sess
def fit(self, data):
"""
a function to take in a dataset of (unnormalized)states, (unnormalized)actions, (unnormalized)next_states and fit the dynamics model going from normalized states, normalized actions to normalized state differences (s_t+1 - s_t)
"""
obs = np.vstack([path['observations'] for path in data])
actions = np.vstack([path['actions'] for path in data])
next_obs = np.vstack([path['next_observations'] for path in data])
norm_obs = (obs - self.mean_obs) / (self.std_obs + C)
norm_actions = (actions - self.mean_actions) / (self.std_actions + C)
norm_delta = (next_obs - self.mean_deltas) / (self.std_deltas + C)
obs_actions = np.vstack((norm_obs,norm_actions))
n_batches = obs.shape[0]//self.batch_size+1
for ep in range(self.epochs):
perm_ids = np.random.choice(obs.shape[0])
tl=0.
for st in range(n_batches):
start_id = st*self.batch_size
perms_ids_batch = perm_ids[start_id:start_id+self.batch_size]
in_batch = obs_actions[perms_ids_batch:]
out_batch = norm_delta[perms_ids_batch:]
l,_ = self.sess.run([self.loss,self.opt],feed_dict={self.in_states_acts:in_batch,self.out_states_deltas:out_batch})
tl+=l
print("Epoch {0}/{1}: Train_loss = {2:.6f}".format(ep,self.epochs,tl/n_batches))
def predict(self, states, actions):
""" a function to take in a batch of (unnormalized) states and (unnormalized) actions and return the (unnormalized) next states as predicted by using the model """
norm_obs = (states - self.mean_obs) / (self.std_obs + C)
norm_actions = (actions - self.mean_actions) / (self.std_actions + C)
obs_actions = np.vstack((norm_obs,norm_actions))
pred_states_deltas = self.sess.run([self.pred_delt],feed_dict={self.in_states_acts: obs_actions})
unnormalized = states + self.mean_deltas + pred_states_deltas*self.std_deltas
return unnormalized
| 4,052 | 1,295 |
#!/usr/bin/env python
# Copyright (C) 2006 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
===================================================
Detecting the topology of a running Kamaelia system
===================================================
The Introspector component introspects the current local topology of a Kamaelia
system - that is what components there are and how they are wired up.
It continually outputs any changes that occur to the topology.
Example Usage
-------------
Introspect and display whats going on inside the system::
MyComplexSystem().activate()
Pipeline( Introspector(),
text_to_token_lists()
AxonVisualiser(),
)
How does it work?
-----------------
Once activated, this component introspects the current local topology of a
Kamaelia system.
Local? This component examines its scheduler to find components and postmen.
It then examines them to determine their inboxes and outboxes and the linkages
between them. In effect, it determines the current topology of the system.
If this component is not active, then it will see no scheduler and will report
nothing.
What is output is how the topology changes. Immediately after activation, the
topology is assumed to be empty, so the first set of changes describes adding
nodes and linkages to the topology to build up the current state of it.
Subsequent output just describes the changes - adding or deleting linkages and
nodes as appropriate.
Nodes in the topology represent components and postboxes. A linkage between
a component node and a postbox node expresses the fact that that postbox belongs
to that component. A linkage between two postboxes represents a linkage in the
Axon system, from one component to another.
This topology change data is output as string containing one or more lines. It
is output through the "outbox" outbox. Each line may be one of the following:
* "DEL ALL"
- the first thing sent immediately after activation - to ensure that
the receiver of this data understand that we are starting from nothing
* "ADD NODE <id> <name> randompos component"
* "ADD NODE <id> <name> randompos inbox"
* "ADD NODE <id> <name> randompos outbox"
- an instruction to add a node to the topology, representing a component,
inbox or outbox. <id> is a unique identifier. <name> is a 'friendly'
textual label for the node.
* "DEL NODE <id>"
- an instruction to delete a node, specified by its unique id
* "ADD LINK <id1> <id2>"
- an instruction to add a link between the two identified nodes. The link is
deemed to be directional, from <id1> to <id2>
* "DEL LINK <id1> <id2>"
- an instruction to delete any link between the two identified nodes. Again,
the directionality is from <id1> to <id2>.
the <id> and <name> fields may be encapsulated in double quote marks ("). This
will definitely be so if they contain space characters.
If there are no topology changes then nothing is output.
This component ignores anything arriving at its "inbox" inbox.
If a shutdownMicroprocess message is received on the "control" inbox, it is sent
on to the "signal" outbox and the component will terminate.
"""
from Axon.Introspector import Introspector as _AxonIntrospector
class Introspector(_AxonIntrospector):
pass
__kamaelia_components__ = ( Introspector, )
if __name__ == '__main__':
import Axon
i = Introspector()
i.activate()
from Kamaelia.Util.Console import ConsoleEchoer
e = ConsoleEchoer()
e.activate()
i.link((i,"outbox"), (e, "inbox"))
print "You should see the Introspector find that it and a ConsoleEchoer component exist."
print "We both have inbox, control, signal and outbox postboxes"
print "The Introspector's outbox is linked to the ConsoleEchoer's inbox"
print
Axon.Scheduler.scheduler.run.runThreads(slowmo=0)
| 4,881 | 1,442 |
import maya
from py2neo.ogm import Node
from app.graph_context import GraphContext
from .cypher_queries import get_product_by_id_query
class ProductService():
'''
This Product Service houses all the actions can be performed against the product object
'''
def fetch(self, id):
'''Fetch a single product with matching id'''
try:
value = GraphContext().exec_cypher(get_product_by_id_query(id), id=id)
print(f'{value}')
return value
except Exception as ex:
print(f'X exception: {ex}')
return None
def fetch_all(self, limit=100):
'''Fetch all Product nodes stored ordered by firstname limited (default=100)'''
try:
matcher = GraphContext().get_node_matcher
response = list(matcher.match('Product').order_by(
"_.name").limit(limit))
return response
except Exception as ex:
print(f'X exception: {ex}')
return []
| 1,017 | 277 |
from .parser import parser
def parse(text):
return parser(text)
__all__ = [
'parse'
]
__version__ = '1.1.0'
| 119 | 49 |
import pytest
from adlib.adversaries.feature_deletion import AdversaryFeatureDeletion
from sklearn import svm
from adlib.learners import SimpleLearner
from data_reader.dataset import EmailDataset
from data_reader.operations import load_dataset
@pytest.fixture
def data():
dataset = EmailDataset(path='./data_reader/data/test/100_instance_debug.csv', raw=False)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)
return {'training_data': training_data, 'testing_data': testing_data}
@pytest.fixture
def learner(data):
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, data['training_data'])
learner.train()
return learner
@pytest.fixture
def feature_deletion(learner):
return AdversaryFeatureDeletion(learner=learner)
def test_change_instance(feature_deletion, data):
sample = next((x for x in data['testing_data'] if x.get_label() == 1), None)
result = feature_deletion.change_instance(sample)
assert sample.label == result.label
def test_set_params(feature_deletion):
feature_deletion.set_params({'num_deletion': 50, 'all_malicious': True})
dict = feature_deletion.get_available_params()
assert dict['num_deletion'] == 50
assert dict['all_malicious'] == True
def test_attack(feature_deletion, data):
result = feature_deletion.attack(data['testing_data'])[0]
sample = data['testing_data'][0]
num = sample.get_feature_vector().get_feature_count()
for i in range(num):
assert result.get_feature_vector().get_feature(
i) == sample.get_feature_vector().get_feature(i)
def test_attack_different(feature_deletion, data):
feature_deletion.set_params({'num_deletion': 100, 'all_malicious': False})
result = feature_deletion.attack(data['testing_data'])[0]
sample = data['testing_data'][0]
assert result.get_feature_vector().indptr[1] != sample.get_feature_vector().indptr[1]
| 2,041 | 682 |
import plistlib
import os
import numpy as np
from PIL import Image
def read_plist(plist_path):
with open(plist_path, "rb") as fp:
return plistlib.load(fp)
def to_list(x):
return x.replace("{", "").replace("}", "").split(",")
def cut_plist(output, texture, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for key in output:
data = output[key]
rect = to_list(data["textureRect"])
rotated = data["textureRotated"]
x = int(rect[0])
y = int(rect[1])
width = int(rect[2])
height = int(rect[3])
if rotated:
width, height = height, width
box = (x, y, x + width, y + height)
newSize = np.array([width, height])
offset = np.array(to_list(data["spriteOffset"])).astype("float")*(-1,1)
srcSize = np.array(to_list(data["spriteSourceSize"])).astype("float")
offset = ((newSize-srcSize)/2+offset).astype("int")
sprite = texture.crop(box).crop((*offset,*(offset+srcSize)))
if rotated:
sprite = sprite.transpose(Image.ROTATE_90)
save_path = os.path.splitext(os.path.join(save_dir, key))[0] + ".png"
sprite.save(save_path)
| 1,225 | 430 |
# -*- coding: utf-8 -*-
from aiida.scheduler.plugins.pbspro import *
import unittest
#import logging
import uuid
text_qstat_f_to_test = """Job Id: 68350.mycluster
Job_Name = cell-Qnormal
Job_Owner = usernum1@mycluster.cluster
job_state = Q
queue = Q_express
server = mycluster
Checkpoint = u
ctime = Tue Apr 9 15:01:47 2013
Error_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTOs
caletest/testjob.err
Hold_Types = n
Join_Path = n
Keep_Files = n
Mail_Points = a
mtime = Mon Apr 22 13:13:53 2013
Output_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTO
scaletest/testjob.out
Priority = 0
qtime = Tue Apr 9 18:26:32 2013
Rerunable = False
Resource_List.mpiprocs = 15
Resource_List.ncpus = 240
Resource_List.nodect = 15
Resource_List.place = free
Resource_List.select = 15:ncpus=16
Resource_List.walltime = 01:00:00
substate = 10
Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash,
PBS_O_HOME=/home/usernum1,PBS_O_LOGNAME=usernum1,
PBS_O_WORKDIR=/home/usernum1/scratch/cptest/scaletest/PTOscaletest,
PBS_O_LANG=en_US.UTF-8,
PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc
al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b
in:/opt/software/python/3.3.0/bin:/opt/software/bin,
PBS_O_MAIL=/var/spool/mail/usernum1,PBS_O_QUEUE=P_share_queue,
PBS_O_HOST=mycluster.cluster
comment = Not Running: Node is in an ineligible state: offline
etime = Tue Apr 9 18:26:32 2013
Submit_arguments = job-PTO64cell-Qnormal.6.15.1.64.4
project = _pbs_project_default
Job Id: 68351.mycluster
Job_Name = cell-Qnormal
Job_Owner = usernum1@mycluster.cluster
job_state = Q
queue = Q_express
server = mycluster
Checkpoint = u
ctime = Tue Apr 9 15:01:47 2013
Error_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTOs
caletest/testjob.err
Hold_Types = n
Join_Path = n
Keep_Files = n
Mail_Points = a
mtime = Mon Apr 22 13:13:53 2013
Output_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTO
scaletest/testjob.out
Priority = 0
qtime = Tue Apr 9 18:26:32 2013
Rerunable = False
Resource_List.mpiprocs = 15
Resource_List.ncpus = 240
Resource_List.nodect = 15
Resource_List.place = free
Resource_List.select = 15:ncpus=16
Resource_List.walltime = 01:00:00
substate = 10
Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash,
PBS_O_HOME=/home/usernum1,PBS_O_LOGNAME=usernum1,
PBS_O_WORKDIR=/home/usernum1/scratch/cptest/scaletest/PTOscaletest,
PBS_O_LANG=en_US.UTF-8,
PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc
al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b
in:/opt/software/python/3.3.0/bin:/opt/software/bin,
PBS_O_MAIL=/var/spool/mail/usernum1,PBS_O_QUEUE=P_share_queue,
PBS_O_HOST=mycluster.cluster
comment = Not Running: Node is in an ineligible state: offline
etime = Tue Apr 9 18:26:32 2013
Submit_arguments = job-PTO64cell-Qnormal.6.15.1.64.8
project = _pbs_project_default
Job Id: 69301.mycluster
Job_Name = Cu-dbp
Job_Owner = user02@mycluster.cluster
resources_used.cpupercent = 6384
resources_used.cput = 4090:56:03
resources_used.mem = 13378420kb
resources_used.ncpus = 64
resources_used.vmem = 9866188kb
resources_used.walltime = 64:26:16
job_state = R
queue = P_lsu
server = mycluster
Account_Name = lsu
Checkpoint = u
ctime = Wed Apr 10 17:10:29 2013
depend = afterok:69299.mycluster@mycluster.cluster,
beforeok:69302.mycluster@mycluster.cluster
Error_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7/C
u-dbp.e69301
exec_host = b141/0*16+b142/0*16+b143/0*16+b144/0*16
exec_vnode = (b141:ncpus=16)+(b142:ncpus=16)+(b143:ncpus=16)+(b144:ncpus=16
)
Hold_Types = n
Join_Path = oe
Keep_Files = n
Mail_Points = a
mtime = Sat Apr 20 01:37:01 2013
Output_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7/
Cu-dbp.o69301
Priority = 0
qtime = Wed Apr 10 17:10:29 2013
Rerunable = False
Resource_List.mpiprocs = 4
Resource_List.ncpus = 64
Resource_List.nodect = 4
Resource_List.place = excl
Resource_List.select = 4:ncpus=16
Resource_List.walltime = 72:00:00
stime = Sat Apr 20 01:36:59 2013
session_id = 118473
Shell_Path_List = /bin/tcsh
jobdir = /home/user02
substate = 42
Variable_List = SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass,
PERL_BADLANG=0,KDE_IS_PRELINKED=1,PBS_O_HOME=/home/user02,
module=() { eval `/usr/bin/modulecmd bash $*`,},
LESSOPEN=|/usr/bin/lesspipe.sh %s,PBS_O_LOGNAME=user02,
SSH_CLIENT=128.178.54.94 46714 22,CVS_RSH=ssh,PBS_O_LANG=C,USER=user02,
HOME=/home/user02,LIBGL_ALWAYS_INDIRECT=yes,
PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/local/bin
:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bin:/op
t/software/python/3.3.0/bin:/opt/software/bin,
LD_LIBRARY_PATH=/opt/software/python/3.3.0/lib,
SSH_CONNECTION=128.178.54.94 46714 128.178.209.70 22,LANG=C,
QTLIB=/usr/lib64/qt-3.3/lib,TERM=xterm,SHELL=/bin/bash,
QTINC=/usr/lib64/qt-3.3/include,G_BROKEN_FILENAMES=1,HISTSIZE=1000,
PBS_O_WORKDIR=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7,
PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc
al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b
in:/opt/software/python/3.3.0/bin:/opt/software/bin,
MANPATH=/opt/xcat/share/man:,XCATROOT=/opt/xcat,
MODULESHOME=/usr/share/Modules,PBS_O_SYSTEM=Linux,MSM_PRODUCT=MSM,
HOST=mycluster,MAIL=/var/spool/mail/user02,
PBS_O_MAIL=/var/spool/mail/user02,_=/opt/pbs/default/bin/qsub,
MODULEPATH=/etc/modulefiles:/opt/software/modulefiles:/opt/software/cs
e-software/modulefiles,KDEDIRS=/usr,PBS_O_SHELL=/bin/bash,
SSH_TTY=/dev/pts/55,OLDPWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN6,
LOADEDMODULES=,HISTCONTROL=ignoredups,SHLVL=1,
PWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7,HOSTNAME=mycluster,
MSM_HOME=/usr/local/MegaRAID Storage Manager,LOGNAME=user02,
PBS_O_HOST=mycluster.cluster
comment = Job run at Sat Apr 20 at 01:36 on (b141:ncpus=16)+(b142:ncpus=16)
+(b143:ncpus=16)+(b144:ncpus=16)
etime = Sat Apr 20 01:36:59 2013
Submit_arguments = job.sh
project = _pbs_project_default
Job Id: 69302.mycluster
Job_Name = Cu-dbp
Job_Owner = user02@mycluster.cluster
job_state = H
queue = P_lsu
server = mycluster
Account_Name = lsu
Checkpoint = u
ctime = Wed Apr 10 17:11:21 2013
depend = afterok:69301.mycluster@mycluster.cluster
Error_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8/C
u-dbp.e69302
Hold_Types = s
Join_Path = oe
Keep_Files = n
Mail_Points = a
mtime = Wed Apr 10 17:11:21 2013
Output_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8/
Cu-dbp.o69302
Priority = 0
qtime = Wed Apr 10 17:11:21 2013
Rerunable = False
Resource_List.mpiprocs = 4
Resource_List.ncpus = 64
Resource_List.nodect = 4
Resource_List.place = excl
Resource_List.select = 4:ncpus=16
Resource_List.walltime = 72:00:00
Shell_Path_List = /bin/tcsh
substate = 22
Variable_List = SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass,
PERL_BADLANG=0,KDE_IS_PRELINKED=1,PBS_O_HOME=/home/user02,
module=() { eval `/usr/bin/modulecmd bash $*`,},
LESSOPEN=|/usr/bin/lesspipe.sh %s,PBS_O_LOGNAME=user02,
SSH_CLIENT=128.178.54.94 46714 22,CVS_RSH=ssh,PBS_O_LANG=C,USER=user02,
HOME=/home/user02,LIBGL_ALWAYS_INDIRECT=yes,
PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/local/bin
:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bin:/op
t/software/python/3.3.0/bin:/opt/software/bin,
LD_LIBRARY_PATH=/opt/software/python/3.3.0/lib,
SSH_CONNECTION=128.178.54.94 46714 128.178.209.70 22,LANG=C,
QTLIB=/usr/lib64/qt-3.3/lib,TERM=xterm,SHELL=/bin/bash,
QTINC=/usr/lib64/qt-3.3/include,G_BROKEN_FILENAMES=1,HISTSIZE=1000,
PBS_O_WORKDIR=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8,
PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc
al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b
in:/opt/software/python/3.3.0/bin:/opt/software/bin,
MANPATH=/opt/xcat/share/man:,XCATROOT=/opt/xcat,
MODULESHOME=/usr/share/Modules,PBS_O_SYSTEM=Linux,MSM_PRODUCT=MSM,
HOST=mycluster,MAIL=/var/spool/mail/user02,
PBS_O_MAIL=/var/spool/mail/user02,_=/opt/pbs/default/bin/qsub,
MODULEPATH=/etc/modulefiles:/opt/software/modulefiles:/opt/software/cs
e-software/modulefiles,KDEDIRS=/usr,PBS_O_SHELL=/bin/bash,
SSH_TTY=/dev/pts/55,OLDPWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7,
LOADEDMODULES=,HISTCONTROL=ignoredups,SHLVL=1,
PWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8,HOSTNAME=mycluster,
MSM_HOME=/usr/local/MegaRAID Storage Manager,LOGNAME=user02,
PBS_O_HOST=mycluster.cluster
Submit_arguments = job.sh
project = _pbs_project_default
Job Id: 74164.mycluster
Job_Name = u-100-l-96.job
Job_Owner = user3@mycluster.cluster
resources_used.cpupercent = 3889
resources_used.cput = 343:11:42
resources_used.mem = 1824176kb
resources_used.ncpus = 32
resources_used.vmem = 3796376kb
resources_used.walltime = 10:45:13
job_state = R
queue = Q_normal
server = mycluster
Checkpoint = u
ctime = Fri Apr 12 15:21:55 2013
depend = afterany:74163.mycluster@mycluster.cluster,
beforeany:74165.mycluster@mycluster.cluster
Error_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-from
-left/production/u-100-l-96.job.e74164
exec_host = b270/0*16+b275/0*16
exec_vnode = (b270:ncpus=16)+(b275:ncpus=16)
Hold_Types = n
Join_Path = oe
Keep_Files = n
Mail_Points = abe
Mail_Users = enrico.user3@epfl.ch
mtime = Mon Apr 22 07:17:36 2013
Output_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-fro
m-left/production/u-100-l-96.job.o74164
Priority = 0
qtime = Fri Apr 12 15:21:55 2013
Rerunable = False
Resource_List.mpiprocs = 32
Resource_List.ncpus = 32
Resource_List.nodect = 2
Resource_List.place = excl
Resource_List.select = 2:ncpus=16:mpiprocs=16
Resource_List.walltime = 24:00:00
stime = Mon Apr 22 07:17:36 2013
session_id = 14147
jobdir = /home/user3
substate = 42
Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash,
PBS_O_HOME=/home/user3,PBS_O_LOGNAME=user3,
PBS_O_WORKDIR=/scratch/user3/ubiquitin/100gL/starting-from-left/produc
tion,PBS_O_LANG=en_US.utf8,
PBS_O_PATH=/opt/pbs/default/sbin/:/home/bovigny/bin:/opt/xcat/bin:/opt
/xcat/sbin:/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loca
l/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bi
n:/opt/software/python/3.3.0/bin:/opt/software/bin:/opt/pbs/default/bin
:/opt/software/python/3.3.0/bin:/opt/software/bin,
PBS_O_MAIL=/var/spool/mail/user3,PBS_O_QUEUE=P_share_queue,
PBS_O_HOST=mycluster.cluster
comment = Job run at Mon Apr 22 at 07:17 on (b270:ncpus=16)+(b275:ncpus=16)
etime = Mon Apr 22 07:17:34 2013
Submit_arguments = -W depend=afterany:74163 u-100-l-96.job
project = _pbs_project_default
Job Id: 74165.mycluster
Job_Name = u-100-l-97.job
Job_Owner = user3@mycluster.cluster
job_state = H
queue = Q_normal
server = mycluster
Checkpoint = u
ctime = Fri Apr 12 15:22:01 2013
depend = afterany:74164.mycluster@mycluster.cluster,
beforeany:74166.mycluster@mycluster.cluster
Error_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-from
-left/production/u-100-l-97.job.e74165
Hold_Types = s
Join_Path = oe
Keep_Files = n
Mail_Points = abe
Mail_Users = enrico.user3@epfl.ch
mtime = Fri Apr 12 15:22:07 2013
Output_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-fro
m-left/production/u-100-l-97.job.o74165
Priority = 0
qtime = Fri Apr 12 15:22:01 2013
Rerunable = False
Resource_List.mpiprocs = 32
Resource_List.ncpus = 32
Resource_List.nodect = 2
Resource_List.place = excl
Resource_List.select = 2:ncpus=16:mpiprocs=16
Resource_List.walltime = 24:00:00
substate = 22
Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash,
PBS_O_HOME=/home/user3,PBS_O_LOGNAME=user3,
PBS_O_WORKDIR=/scratch/user3/ubiquitin/100gL/starting-from-left/produc
tion,PBS_O_LANG=en_US.utf8,
PBS_O_PATH=/opt/pbs/default/sbin/:/home/bovigny/bin:/opt/xcat/bin:/opt
/xcat/sbin:/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loca
l/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bi
n:/opt/software/python/3.3.0/bin:/opt/software/bin:/opt/pbs/default/bin
:/opt/software/python/3.3.0/bin:/opt/software/bin,
PBS_O_MAIL=/var/spool/mail/user3,PBS_O_QUEUE=P_share_queue,
PBS_O_HOST=mycluster.cluster
Submit_arguments = -W depend=afterany:74164 u-100-l-97.job
project = _pbs_project_default
"""
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.4.0"
__contributors__ = "Andrea Cepellotti, Giovanni Pizzi, Marco Dorigo"
class TestParserQstat(unittest.TestCase):
"""
Tests to verify if teh function _parse_joblist_output behave correctly
The tests is done parsing a string defined above, to be used offline
"""
def test_parse_common_joblist_output(self):
"""
Test whether _parse_joblist can parse the qstat -f output
"""
s = PbsproScheduler()
retval = 0
stdout = text_qstat_f_to_test
stderr = ''
job_list = s._parse_joblist_output(retval, stdout, stderr)
# The parameters are hard coded in the text to parse
job_on_cluster = 6
job_parsed = len(job_list)
self.assertEquals(job_parsed, job_on_cluster)
job_running = 2
job_running_parsed = len([ j for j in job_list if j.job_state \
and j.job_state == job_states.RUNNING ])
self.assertEquals(job_running,job_running_parsed)
job_held = 2
job_held_parsed = len([ j for j in job_list if j.job_state \
and j.job_state == job_states.QUEUED_HELD ])
self.assertEquals(job_held,job_held_parsed)
job_queued = 2
job_queued_parsed = len([ j for j in job_list if j.job_state \
and j.job_state == job_states.QUEUED ])
self.assertEquals(job_queued,job_queued_parsed)
running_users = ['user02','user3']
parsed_running_users = [ j.job_owner for j in job_list if j.job_state \
and j.job_state == job_states.RUNNING ]
self.assertEquals( set(running_users) , set(parsed_running_users) )
running_jobs = ['69301.mycluster','74164.mycluster']
parsed_running_jobs = [ j.job_id for j in job_list if j.job_state \
and j.job_state == job_states.RUNNING ]
self.assertEquals( set(running_jobs) , set(parsed_running_jobs) )
for j in job_list:
if j.allocated_machines:
num_machines = 0
num_cpus = 0
for n in j.allocated_machines:
num_machines += 1
num_cpus += n.num_cpus
self.assertTrue( j.num_machines==num_machines )
self.assertTrue( j.num_cpus==num_cpus )
# TODO : parse the env_vars
# TODO: WHEN WE USE THE CORRECT ERROR MANAGEMENT, REIMPLEMENT THIS TEST
# def test_parse_with_error_retval(self):
# """
# The qstat -f command has received a retval != 0
# """
# s = PbsproScheduler()
# retval = 1
# stdout = text_qstat_f_to_test
# stderr = ''
# # Disable logging to avoid excessive output during test
# logging.disable(logging.ERROR)
# with self.assertRaises(SchedulerError):
# job_list = s._parse_joblist_output(retval, stdout, stderr)
# # Reset logging level
# logging.disable(logging.NOTSET)
# def test_parse_with_error_stderr(self):
# """
# The qstat -f command has received a stderr
# """
# s = PbsproScheduler()
# retval = 0
# stdout = text_qstat_f_to_test
# stderr = 'A non empty error message'
# # TODO : catch the logging error
# job_list = s._parse_joblist_output(retval, stdout, stderr)
# # print s._logger._log, dir(s._logger._log),'!!!!'
class TestSubmitScript(unittest.TestCase):
def test_submit_script(self):
"""
"""
from aiida.scheduler.datastructures import JobTemplate
s = PbsproScheduler()
job_tmpl = JobTemplate()
job_tmpl.argv = ["mpirun", "-np", "23", "pw.x", "-npool", "1"]
job_tmpl.stdin_name = 'aiida.in'
job_tmpl.job_resource = s.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1)
job_tmpl.uuid = str(uuid.uuid4())
job_tmpl.max_wallclock_seconds = 24 * 3600
submit_script_text = s.get_submit_script(job_tmpl)
self.assertTrue( '#PBS -r n' in submit_script_text )
self.assertTrue( submit_script_text.startswith('#!/bin/bash') )
self.assertTrue( '#PBS -l walltime=24:00:00' in submit_script_text )
self.assertTrue( '#PBS -l select=1' in submit_script_text )
self.assertTrue( "'mpirun' '-np' '23' 'pw.x' '-npool' '1'" + \
" < 'aiida.in'" in submit_script_text )
| 18,160 | 8,079 |
# Generated by Django 3.1.11 on 2021-07-27 13:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20210727_1319'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='creation_time',
new_name='created',
),
]
| 368 | 138 |
#!/usr/bin/python3
import random
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
train_batches=2000
eval_batches=50
train_sequlen=32
train_inputs=1
lstm_states=6
#activation="relu"
activation=None
rec_activation="hard_sigmoid"
x_train = np.zeros((train_batches*train_sequlen,1,train_inputs))
y_train = np.zeros((train_batches*train_sequlen,1,1))
x_test = np.zeros((eval_batches*train_sequlen,1,train_inputs))
y_test = np.zeros((eval_batches*train_sequlen,1,1))
random.seed(1234)
# generate input of random sine waves, feed one at a time to the network
def random_sample():
ampl = random.uniform(0.5,1)
freq = random.uniform(18,32)
phase= random.uniform(-math.pi,math.pi)
return (ampl,freq,phase)
def waveform(ampl,freq,phase,idx):
return ampl*math.sin(idx/freq*2*math.pi+phase)
# calculate train data
for i in range(train_batches):
(ampl,freq,phase) = random_sample()
for j in range(train_sequlen): # subsequent measurements
for k in range(train_inputs):
x_train[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k)
y_train[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs)
for i in range(eval_batches):
(ampl,freq,phase) = random_sample()
for j in range(train_sequlen): # subsequent measurements
for k in range(train_inputs):
x_test[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k)
y_test[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs)
print(x_train[0][0:5], y_train[0][0:5])
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
def create_model(train=True):
if train:
input0 = tf.keras.Input(batch_shape=(train_sequlen,1,train_inputs))
# stateful is worse
x = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=False, stateful=False)(input0)
#x = Dropout(0.1)(x) makes it a bit worse
else:
input0 = tf.keras.Input(batch_shape=(1,1,train_inputs),name="data")
input1 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_h")
input2 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_c")
x, state,state2 = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=True, stateful=True, unroll=True)(input0, initial_state=(input1, input2))
x = Dense(units=1)(x)
if train:
model = tf.keras.Model(inputs=input0, outputs=x, name="sine")
else:
model = tf.keras.Model(inputs=(input0,input1,input2), outputs=(x,state,state2), name="sine")
model.summary()
return model
model=create_model()
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(8):
model.fit(x_train, y_train, epochs=1, batch_size=train_sequlen, verbose=1, shuffle=False,
validation_data=(x_test,y_test))
model.reset_states()
model.save('mymodel')
model.save('mymodel_w.h5', save_format="h5")
model2= create_model(False)
model2.load_weights('mymodel_w.h5')
model2.save('evalmodel.h5', save_format="h5")
model2.compile(loss='mean_squared_error', optimizer='adam')
state_h2 = np.zeros((1,lstm_states))
state_c2 = np.zeros((1,lstm_states))
for i in range(train_sequlen):
testx, testy = x_test[i], y_test[i]
testx = testx.reshape(1, 1, 1)
res = model2.predict([testx,state_h2,state_c2], batch_size=1)
print('In=%.1f, Expected=%.1f, Predicted=%.1f' % (testx[0][0][0], testy, res[0]))
state_h2=res[1]
state_c2=res[2]
# to convert to tflite use
# tflite_convert --keras_model_file evalmodel.h5 --output_file evalmodel.tflite --inference_type FLOAT
# from tensorflow 1.15 (2.2 doesn't work)
| 3,791 | 1,485 |
import critiquebrainz.frontend.external.musicbrainz_db.exceptions as mb_exceptions
RELATABLE_TYPES = [
'area',
'artist',
'label',
'place',
'event',
'recording',
'release',
'release-group',
'series',
'url',
'work',
'instrument'
]
RELATION_INCLUDES = [entity + '-rels' for entity in RELATABLE_TYPES]
TAG_INCLUDES = ["tags", "user-tags"]
RATING_INCLUDES = ["ratings", "user-ratings"]
VALID_INCLUDES = {
'place': ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
'event': ["aliases"] + RELATION_INCLUDES + TAG_INCLUDES,
'release_group': ["artists", "media", "releases"] + TAG_INCLUDES + RELATION_INCLUDES,
'release': ["artists", "labels", "recordings", "release-groups", "media", "annotation", "aliases"]
+ TAG_INCLUDES + RELATION_INCLUDES,
'artist': ["recordings", "releases", "media", "aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
}
def check_includes(entity, includes):
"""Check if includes specified for an entity are valid includes."""
for include in includes:
if include not in VALID_INCLUDES[entity]:
raise mb_exceptions.InvalidIncludeError("Bad includes: {inc} is not a valid include".format(inc=include))
| 1,251 | 466 |
# Generated by Django 2.2.11 on 2020-05-07 19:13
from django.db import migrations, models
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0006_2_3'),
]
operations = [
migrations.AlterField(
model_name='plan',
name='amount',
field=djstripe.fields.StripeDecimalCurrencyAmountField(decimal_places=2, default=0, help_text='Amount (as decimal) to be charged on the interval specified.', max_digits=11),
preserve_default=False,
),
migrations.AlterField(
model_name='plan',
name='nickname',
field=models.TextField(default='', help_text='A brief description of the plan, hidden from customers.', max_length=5000),
),
]
| 801 | 256 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 1 01:07:37 2019
@author: prasad
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
def get_data(column_names):
'''
Args
column_names: names of the features in dataset
Returns
train_df: training data
test_df: testing data
'''
train_df = pd.read_csv('./data/housing_train.txt', delim_whitespace=True, header = None)
test_df = pd.read_csv('./data/housing_test.txt', delim_whitespace=True, header = None)
test_df.columns = column_names
train_df.columns = column_names
return train_df, test_df
def normalize(dataset):
'''
Args
dataset: data to be normalized using shift-scale normalization
Returns
dataset: normalized dataset
maxs: max parameters for each feature normalization
mins: min parameters for each feature normalization
'''
maxs = dataset.max()
mins = dataset.min()
for feature in dataset.columns[:-1]:
for i, entry in dataset.iterrows():
dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature])
return dataset, maxs, mins
def normalize_params(dataset, maxs, mins):
'''
Args
dataset: data to be normalized
maxs: max parameters for each feature normalization
mins: min parameters for each feature normalization
Returns:
dataset: normalized dataset
'''
for feature in dataset.columns[:-1]:
for i, entry in dataset.iterrows():
dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature])
return dataset
def predict(test_data, weights):
'''
Args
test_data: data for which predictions are to be calculated
weights: weights to obtain predictions based on
Returns
preds: predictions based on given weights applied on dataset
'''
test_data = test_data.drop(['MEDV'], axis = 1).values
test_data = np.append(np.ones([len(test_data),1]),test_data,1)
preds = {}
for i in range(len(test_data)):
preds[i] = np.dot(weights, test_data[i])
return preds
def get_mse(test_data, preds):
'''
Args
test_data: data for which model is to be tested using MSE
preds: predictions on given test_data obtained from model
Returns
mse: mean squared error
'''
test_labels = test_data['MEDV'].values
errors = []
for i, label in enumerate(test_labels):
errors.append(np.square(label - preds[i]))
mse = pd.Series(errors).mean()
return mse
def cost(data, labels, weights):
'''
Args
data: data for which cost needs to be calculated
labels: actual labels for data used
weights: optimized weights for prediction
Returns
cost on the given data
'''
preds = np.dot(data, weights)
preds = preds.flatten()
return np.sum(np.square(np.subtract(preds, labels))) / len(data)
def train(train_data, learn_rate = 0.001, max_iter = 3000):
'''
Args
train_data : normalized data for training
learn_rate : learning rate for Gradient Descent
max_iter : maximum number of iterations to run GD
'''
# get data without the labels
x = train_data.drop(['MEDV'], axis = 1).values
# add 1s to the data for bias calculations
x = np.append(np.ones([len(x),1]),x,1)
# get labels of the training set
y = train_data['MEDV'].values
# initialize weights with random values
w = np.random.normal(scale = 1 / math.sqrt(len(x[0])),size = (len(x[0]), 1))
w = w.flatten()
# keep records of costs as we keep performing iteration of GD
costs = []
for itr in range(max_iter):
# predictions based on current weights
predicts = np.dot(x, w)
predicts = predicts.flatten()
# difference between current predictions and actual labels
loss = np.subtract(predicts, y)
grads = np.dot(x.T, loss)
# update weights
w = np.subtract(w, learn_rate * grads)
# record cost after weight updates
costs.append(cost(x,y,w))
if itr % 100 == 0:
print('{}: Cost: {}'.format(itr, costs[itr]))
return w, costs
def plot_cost(costs):
plt.figure(figsize = (20,10))
plt.title('Cost function')
plt.ylabel('Costs')
plt.xlabel('Iterations')
plt.plot(costs)
#### EXECUTION
# names for the features
column_names = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']
# extract data from files
train_data, test_data = get_data(column_names)
# normalize data
train_data, maxs, mins = normalize(train_data)
# normalize test data using same parameters as for the training set
test_data = normalize_params(test_data,maxs, mins)
# optimize weights using Gradient Descent
w,costs = train(train_data)
# get predictions for optimized weights
pred_train = predict(train_data, w)
print('MSE for Housing dataset using Gradient Descent on Train Data: {}'.format(get_mse(train_data, pred_train)))
# get predictions for optimized weights
preds = predict(test_data, w)
print('MSE for Housing dataset using Gradient Descent on Test Data: {}'.format(get_mse(test_data, preds)))
plot_cost(costs) | 5,511 | 1,746 |
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from django.views.generic import ListView
from django.views.generic.edit import FormView
from teamspirit.catalogs.models import Product
from teamspirit.preorders.forms import AddToCartForm, DropFromCartForm
from teamspirit.preorders.models import ShoppingCart, ShoppingCartLine
class ShoppingCartView(ListView):
model = ShoppingCartLine
template_name = "preorders/shopping_cart.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['shopping_cart_amount'] = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0].get_cart_amount()
return context
def get_queryset(self):
queryset = super().get_queryset()
shopping_cart = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0]
queryset = ShoppingCartLine.objects.filter(
shopping_cart=shopping_cart
)
return queryset
shopping_cart_view = ShoppingCartView.as_view()
shopping_cart_view = login_required(shopping_cart_view)
class AddToCartView(FormView):
template_name = "preorders/add_to_cart.html"
form_class = AddToCartForm
success_url = reverse_lazy('catalogs:catalog')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['product'] = Product.objects.get(id=self.kwargs['product_id'])
return context
def get_initial(self):
initial = super().get_initial()
initial['shopping_cart'] = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0]
initial['product'] = Product.objects.get(id=self.kwargs['product_id'])
return initial
add_to_cart_view = AddToCartView.as_view()
add_to_cart_view = login_required(add_to_cart_view)
class DropFromCartView(FormView):
template_name = "preorders/drop_from_cart.html"
form_class = DropFromCartForm
success_url = reverse_lazy('preorders:shopping_cart')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['shopping_cart_line'] = ShoppingCartLine.objects.get(
id=self.kwargs['line_id']
)
return context
def get_form_kwargs(self):
kwargs = super(DropFromCartView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update({'line_id': self.kwargs['line_id']})
kwargs.update({
'shopping_cart_line': ShoppingCartLine.objects.get(
id=self.kwargs['line_id']
)
})
return kwargs
drop_from_cart_view = DropFromCartView.as_view()
drop_from_cart_view = login_required(drop_from_cart_view)
| 2,833 | 882 |
# -*- coding: utf-8 -*-
""" TaskSets and tasks for the Prime & Support APIs """
import logging
import json
import random
from copy import deepcopy
from typing import Dict
from locust import tag, task, TaskSet
from utils.constants import (
INTERNAL_API_KEY,
TEST_PDF,
ZERO_UUID,
PRIME_API_KEY,
SUPPORT_API_KEY,
MOVE_TASK_ORDER,
MTO_SHIPMENT,
MTO_AGENT,
MTO_SERVICE_ITEM,
PAYMENT_REQUEST,
)
from .base import check_response, CertTaskMixin, ParserTaskMixin
logger = logging.getLogger(__name__)
def prime_path(url: str) -> str:
return f"/prime/v1{url}"
def support_path(url: str) -> str:
return f"/support/v1{url}"
class PrimeDataStorageMixin:
"""
TaskSet mixin used to store data from the Prime API during load testing so that it can be passed around and reused.
We store a number of objects in a local store that can be requested by tasks.
The tasks then hit an endpoint and call add or replace to update our local store with a list of viable objects.
This mixin allows storing multiple items of each kind.
"""
DATA_LIST_MAX: int = 50
# contains the ID values needed when creating moves using createMoveTaskOrder:
default_mto_ids: Dict[str, str] = {
"contractorID": "",
"destinationDutyStationID": "",
"originDutyStationID": "",
"uploadedOrdersID": "",
}
local_store: Dict[str, list] = {
MOVE_TASK_ORDER: [],
MTO_SHIPMENT: [],
MTO_SERVICE_ITEM: [],
PAYMENT_REQUEST: [],
} # data stored will be shared among class instances thanks to mutable dict
def get_stored(self, object_key, *args, **kwargs):
"""
Given an object_key that represents an object type from the MilMove app, returns an object of that type from the
list.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
"""
data_list = self.local_store[object_key]
if len(data_list) > 0: # otherwise we return None
return random.choice(data_list)
def get_stored_shipment_address(self, mto_shipment=None):
"""
Grabs one of either pickupAddress or destinationAddress from a shipment and returns the specific field and
payload for that address.
:param mto_shipment: JSON/dict of a specific MTO Shipment payload (optional)
:return: tuple(str name of the address field, dict address payload)
"""
if not mto_shipment:
mto_shipment = self.get_stored(MTO_SHIPMENT) or {}
address_fields = ["pickupAddress", "destinationAddress"]
valid_addresses = [
(field, mto_shipment[field])
for field in address_fields
if mto_shipment.get(field) and mto_shipment[field].get("id", ZERO_UUID) != ZERO_UUID
]
if len(valid_addresses) > 0: # otherwise we return None
return random.choice(valid_addresses)
def add_stored(self, object_key, object_data):
"""
Adds data to the list for the object key provided. Also checks if the list is already at the max number of
elements, and if so, it randomly removes 1 to MAX number of elements so that the cycle can start again (and so
we don't hog too much memory).
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param object_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
if len(data_list) >= self.DATA_LIST_MAX:
num_to_delete = random.randint(1, self.DATA_LIST_MAX)
del data_list[:num_to_delete]
# Some creation endpoint auto-create multiple objects and return an array,
# but each object in the array should still be considered individually here:
if isinstance(object_data, list):
data_list.extend(object_data)
else:
data_list.append(object_data)
def update_stored(self, object_key, old_data, new_data):
"""
Given an object key, replaces a stored object in the local store with a new updated object.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param old_data: JSON/dict
:param new_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
# Remove all instances of the stored object, in case multiples were added erroneously:
while True:
try:
data_list.remove(old_data)
except ValueError:
break # this means we finally cleared the list
data_list.append(new_data)
def set_default_mto_ids(self, moves):
"""
Given a list of Move Task Orders, gets the four ID values needed to create more MTOs:
- contractorID
- uploadedOrdersID
- destinationDutyStationID
- originDutyStationID
To get these values, this function hits the getMoveTaskOrder endpoint in the Support API to get all of the
details on an MTO. The Prime API doesn't have access to all of this info, which is why we need to use the
Support API instead. It will go through and hit this endpoint for all of the moves in the list until it finally
gets a complete set of IDs.
CAN ONLY be used when subclassed with TaskSet and CertTaskMixin.
:param moves: list of JSON/dict objects
:return: None
"""
# Checks that we have a full set of MTO IDs already and halts processing if so:
if self.has_all_default_mto_ids():
return
headers = {"content-type": "application/json"}
for move in moves:
# Call the Support API to get full details on the move:
resp = self.client.get(
support_path(f"/move-task-orders/{move['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.cert_kwargs,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not success:
continue # try again with the next move in the list
# Get the values we need from the move and set them in self.default_move_ids.
# If this move is missing any of these values, we default to using whatever value is already in
# self.default_mto_ids, which could be nothing, or could be a value gotten from a previous move.
# This way we never override good ID values from earlier moves in the list.
self.default_mto_ids["contractorID"] = move_details.get(
"contractorID", self.default_mto_ids["contractorID"]
)
if order_details := move_details.get("order"):
self.default_mto_ids["uploadedOrdersID"] = order_details.get(
"uploadedOrdersID", self.default_mto_ids["uploadedOrdersID"]
)
self.default_mto_ids["destinationDutyStationID"] = order_details.get(
"destinationDutyStationID", self.default_mto_ids["destinationDutyStationID"]
)
self.default_mto_ids["originDutyStationID"] = order_details.get(
"originDutyStationID", self.default_mto_ids["originDutyStationID"]
)
# Do we have all the ID values we need? Cool, then stop processing.
if self.has_all_default_mto_ids():
logger.info(f"☑️ Set default MTO IDs for createMoveTaskOrder: \n{self.default_mto_ids}")
break
# If we're in the local environment, and we have gone through the entire list without getting a full set of IDs,
# set our hardcoded IDs as the default:
if not self.has_all_default_mto_ids() and self.user.is_local:
logger.warning("⚠️ Using hardcoded MTO IDs for LOCAL env")
self.default_mto_ids.update(
{
"contractorID": "5db13bb4-6d29-4bdb-bc81-262f4513ecf6",
"destinationDutyStationID": "71b2cafd-7396-4265-8225-ff82be863e01",
"originDutyStationID": "1347d7f3-2f9a-44df-b3a5-63941dd55b34",
"uploadedOrdersID": "c26421b0-e4c3-446b-88f3-493bb25c1756",
}
)
def has_all_default_mto_ids(self) -> bool:
"""Boolean indicating that we have all the values we need for creating new MTOs."""
return self.default_mto_ids and all(self.default_mto_ids.values())
@tag("prime")
class PrimeTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Prime API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier.
"""
def __init__(self, parent):
self.csrf_token = None
self.session_token = None
super().__init__(parent)
def customer_path(self, url: str) -> str:
return f"{self.user.alternative_host}{url}"
def on_start(self):
self.client.get(self.customer_path("/devlocal-auth/login"))
self.csrf_token = self.client.cookies.get("masked_gorilla_csrf")
self.client.headers.update({"x-csrf-token": self.csrf_token})
resp = self.client.post(
self.customer_path("/devlocal-auth/create"),
data={"userType": "milmove", "gorilla.csrf.Token": self.csrf_token},
)
self.session_token = self.client.cookies.get("mil_session_token")
if resp.status_code != 200:
self.interrupt()
logged_in_user = self.client.get(self.customer_path("/internal/users/logged_in"))
json_resp = logged_in_user.json()
service_member_id = json_resp["service_member"]["id"]
email = json_resp["email"]
user_id = json_resp["id"]
origin_duty_stations = self.client.get(self.customer_path("/internal/duty_stations?search=29"))
current_station_id = origin_duty_stations.json()[0]["id"]
overrides = {
"id": service_member_id,
"user_id": user_id,
"edipi": "9999999999",
"personal_email": email,
"email_is_preferred": True,
"current_station_id": current_station_id,
}
payload = self.fake_request("/service_members/{serviceMemberId}", "patch", INTERNAL_API_KEY, overrides, True)
self.client.patch(
self.customer_path(f"/internal/service_members/{service_member_id}"),
name="/internal/service_members/{serviceMemberId}",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
overrides = {"permission": "NONE"}
payload = self.fake_request(
"/service_members/{serviceMemberId}/backup_contacts", "post", INTERNAL_API_KEY, overrides
)
self.client.post(
self.customer_path(f"/internal/service_members/{service_member_id}/backup_contacts"),
name="/internal/service_members/{serviceMemberId}/backup_contacts",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
@tag(MOVE_TASK_ORDER, "listMoves")
@task
def list_moves(self):
timeout = {}
if self.user.is_local:
timeout["timeout"] = 15 # set a timeout of 15sec if we're running locally - just for this endpoint
resp = self.client.get(prime_path("/moves"), **self.cert_kwargs, **timeout)
moves, success = check_response(resp, "listMoves")
# Use these MTOs to set the ID values we'll need to create more MTOs
# (NOTE: we don't care about a failure here because we can set the default IDs instead,
# if this is running locally)
self.set_default_mto_ids(moves or [])
@tag(MTO_SERVICE_ITEM, "createMTOServiceItem")
@task
def create_mto_service_item(self, overrides=None):
# If mtoShipmentID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("createMTOServiceItem: ⚠️ No mto_shipment found")
return None
overrides_local = {
# override moveTaskOrderID because we don't want a random one
"moveTaskOrderID": mto_shipment["moveTaskOrderID"],
# override mtoShipmentID because we don't want a random one
"mtoShipmentID": mto_shipment["id"],
}
# Merge local overrides with passed-in overrides
overrides_local.update(overrides or {})
payload = self.fake_request("/mto-service-items", "post", PRIME_API_KEY, overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-service-items"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_service_items, success = check_response(resp, f"createMTOServiceItem {payload['reServiceCode']}", payload)
if success:
self.add_stored(MTO_SERVICE_ITEM, mto_service_items)
return mto_service_items
@tag(MTO_SHIPMENT, "createMTOShipment")
@task
def create_mto_shipment(self, overrides=None):
def guarantee_unique_agent_type(agents):
agent_types = {agent["agentType"] for agent in agents}
if len(agents) >= 2 and len(agent_types) < 2:
possible_types = {"RELEASING_AGENT", "RECEIVING_AGENT"}
agents[1]["agentType"] = (possible_types - agent_types).pop()
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation
"agents": {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID},
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
guarantee_unique_agent_type(payload["agents"]) # modifies the payload directly
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_shipment, success = check_response(resp, "createMTOShipment", payload)
if success:
self.add_stored(MTO_SHIPMENT, mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "createMTOShipment", "expectedFailure")
@task
def create_mto_shipment_with_duplicate_agents(self, overrides=None):
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment — expected failure: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
agent_type = random.choice(["RELEASING_AGENT", "RECEIVING_AGENT"])
agent_override = {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID, "agentType": agent_type}
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation and guarantee two agents
"agents": [agent_override, agent_override],
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"),
name=prime_path("/mto-shipments — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
check_response(resp, "createMTOShipmentFailure", payload, "422")
@tag(PAYMENT_REQUEST, "createUpload")
@task
def create_upload(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
upload_file = {"file": open(TEST_PDF, "rb")}
resp = self.client.post(
prime_path(f"/payment-requests/{payment_request['id']}/uploads"),
name=prime_path("/payment-requests/{paymentRequestID}/uploads"),
files=upload_file,
**self.user.cert_kwargs,
)
check_response(resp, "createUpload")
@tag(PAYMENT_REQUEST, "createPaymentRequest")
@task
def create_payment_request(self, overrides=None):
# If mtoServiceItemID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoServiceItemID") if overrides else None
service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not service_item:
return
payload = {
"moveTaskOrderID": service_item["moveTaskOrderID"],
"serviceItems": [{"id": service_item["id"]}],
"isFinal": False,
}
shipment = self.get_stored(MTO_SHIPMENT, service_item["mtoShipmentID"])
if not shipment:
logger.info("unable to find shipment of payment request service item")
headers = {"content-type": "application/json"}
# if the actual weight hasn't been provided, creating the payment request will fail
if not shipment.get("primeActualWeight"):
self.client.post(
prime_path("/payment-requests"),
name=prime_path("/payment-requests — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
return None
resp = self.client.post(
prime_path("/payment-requests"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
payment_request, success = check_response(resp, "createPaymentRequest", payload)
if success:
self.add_stored(PAYMENT_REQUEST, payment_request)
return payment_request
@tag(MTO_SHIPMENT, "updateMTOShipment")
@task
def update_mto_shipment(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
payload = self.fake_request("/mto-shipments/{mtoShipmentID}", "patch", PRIME_API_KEY, overrides)
# Agents and addresses should not be updated by this endpoint, and primeEstimatedWeight cannot be updated after
# it is initially set (and it is set in create_mto_shipment)
fields_to_remove = [
"agents",
"pickupAddress",
"destinationAddress",
"secondaryPickupAddress",
"secondaryDeliveryAddress",
"primeEstimatedWeight",
]
# nts weight is only valid when the shipment type is nts release
if payload.get("ntsRecordedWeight"):
shipmentType = payload.get("shipmentType") or mto_shipment.get("shipmentType")
if shipmentType != "HHG_OUTOF_NTS_DOMESTIC":
fields_to_remove.append("ntsRecordedWeight")
for f in fields_to_remove:
payload.pop(f, None)
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-shipments/{mto_shipment['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipment", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return new_mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentAddress")
@task
def update_mto_shipment_address(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return
address_tuple = self.get_stored_shipment_address(mto_shipment) # returns a (field_name, address_dict) tuple
if not address_tuple:
return # this shipment didn't have any addresses, we will try again later with a different shipment
field, address = address_tuple
overrides_local = {"id": address["id"]}
overrides_local.update(overrides or {})
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/addresses/{addressID}", "put", PRIME_API_KEY, overrides=overrides_local
)
headers = {"content-type": "application/json", "If-Match": address["eTag"]}
# update mto_shipment address
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/addresses/{address['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/addresses/{addressID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_address, success = check_response(resp, "updateMTOShipmentAddress", payload)
if success:
# we only got the address, so we're gonna pop it back into the shipment to store
updated_shipment = deepcopy(mto_shipment)
updated_shipment[field] = updated_address
self.update_stored(MTO_SHIPMENT, mto_shipment, updated_shipment)
return updated_shipment
@tag(MTO_AGENT, "updateMTOAgent")
@task
def update_mto_agent(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
if mto_shipment.get("agents") is None:
return # can't update agents if there aren't any
overrides = {}
mto_agents = mto_shipment["agents"]
mto_agent = mto_shipment["agents"][0]
if len(mto_agents) >= 2:
overrides = {"agentType": mto_agent["agentType"]} # ensure agentType does not change
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/agents/{agentID}", "put", PRIME_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_agent["eTag"]}
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/agents/{mto_agent['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/agents/{agentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_agent, success = check_response(resp, "updateMTOAgent", payload)
if success:
# we only got the agent, so we're gonna pop it back into the shipment to store
new_shipment = deepcopy(mto_shipment)
new_shipment["agents"][0] = updated_agent
self.update_stored(MTO_SHIPMENT, mto_shipment, new_shipment)
return new_shipment
@tag(MTO_SERVICE_ITEM, "updateMTOServiceItem")
@task
def update_mto_service_item(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not mto_service_item:
return # can't run this task
try:
re_service_code = mto_service_item["reServiceCode"]
except KeyError:
logger.error(f"⛔️ update_mto_service_item recvd mtoServiceItem without reServiceCode \n{mto_service_item}")
return
if re_service_code not in ["DDDSIT", "DOPSIT"]:
logging.info(
"update_mto_service_item recvd mtoServiceItem from store. Discarding because reServiceCode not in "
"[DDDSIT, DOPSIT]"
)
return
payload = self.fake_request(
"/mto-service-items/{mtoServiceItemID}",
"patch",
overrides={
"id": mto_service_item["id"],
"sitDestinationFinalAddress": {
"id": mto_service_item["sitDestinationFinalAddress"]["id"]
if mto_service_item.get("sitDestinationFinalAddress")
and mto_service_item["sitDestinationFinalAddress"].get("id")
else ZERO_UUID,
},
},
)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-service-items/{mto_service_item['id']}"),
name=prime_path("/mto-service-items/{mtoServiceItemID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_service_item, success = check_response(resp, f"updateMTOServiceItem {re_service_code}", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, updated_service_item)
return updated_service_item
@tag(MOVE_TASK_ORDER, "updateMTOPostCounselingInformation")
@task
def update_post_counseling_information(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("updateMTOPostCounselingInformation: ⚠️ No move_task_order found")
return # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
payload = self.fake_request("/move-task-orders/{moveTaskOrderID}/post-counseling-info", "patch", PRIME_API_KEY)
move_task_order_id = move_task_order["id"] # path parameter
headers = {"content-type": "application/json", "If-Match": move_task_order["eTag"]}
resp = self.client.patch(
prime_path(f"/move-task-orders/{move_task_order_id}/post-counseling-info"),
name=prime_path("/move-task-orders/{moveTaskOrderID}/post-counseling-info"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "updateMTOPostCounselingInformation", payload)
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
@tag("support")
class SupportTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Support API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier. Ex:
@tag('updates', 'shipments')
@task
def update_mto_shipment_status(self):
# etc.
"""
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus")
@task(2)
def update_mto_shipment_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
# To avoid issues with the mto shipment being stale
# retrieve the move associated with the shipment
# and then use the newly fetched move to the find most up to date version of the shipment
move_id = mto_shipment["moveTaskOrderID"]
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_id}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not move_details:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
for fetched_mto_shipment in move_details["mtoShipments"]:
if fetched_mto_shipment["id"] == mto_shipment["id"]:
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides
)
if fetched_mto_shipment["status"] == "CANCELLATION_REQUESTED" and payload["status"] != "CANCELED":
return None
elif fetched_mto_shipment["status"] == "SUBMITTED" and payload["status"] not in [
"APPROVED",
"REJECTED",
]:
return None
elif fetched_mto_shipment["status"] == "DIVERSION_REQUESTED" and payload["status"] != "APPROVED":
return None
elif fetched_mto_shipment["status"] == "APPROVED" and payload["status"] != "DIVERSION_REQUESTED":
return None
elif fetched_mto_shipment["status"] in ["DRAFT", "REJECTED", "CANCELED"]:
return None
headers = {"content-type": "application/json", "If-Match": fetched_mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{fetched_mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status"),
data=json.dumps(payload),
headers=headers,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipmentStatus", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus", "expectedFailure")
# run this task less frequently than the others since this is testing an expected failure
@task(1)
def update_mto_shipment_with_invalid_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
overrides_local = {"status": "DRAFT"}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides_local)
payload["status"] = "DRAFT"
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status — expected failure"),
data=json.dumps(payload),
headers=headers,
)
check_response(resp, "updateMTOShipmentStatusFailure", payload, "422")
@tag(MOVE_TASK_ORDER, "createMoveTaskOrder")
@task(2)
def create_move_task_order(self):
# Check that we have all required ID values for this endpoint:
if not self.has_all_default_mto_ids():
logger.debug(f"⚠️ Missing createMoveTaskOrder IDs for environment {self.user.env}")
return
overrides = {
"contractorID": self.default_mto_ids["contractorID"],
# Moves that are in DRAFT or CANCELED mode cannot be used by the rest of the load testing
"status": "SUBMITTED",
# If this date is set here, the status will not properly transition to APPROVED
"availableToPrimeAt": None,
"order": {
"status": "APPROVED",
"tac": "F8J1",
# We need these objects to exist
"destinationDutyStationID": self.default_mto_ids["destinationDutyStationID"],
"originDutyStationID": self.default_mto_ids["originDutyStationID"],
"uploadedOrdersID": self.default_mto_ids["uploadedOrdersID"],
# To avoid the overrides being inserted into these nested objects...
"entitlement": {},
"customer": {},
},
}
payload = self.fake_request("/move-task-orders", "post", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json"}
resp = self.client.post(
support_path("/move-task-orders"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
json_body, success = check_response(resp, "createMoveTaskOrder", payload)
if not success:
return # no point continuing if it didn't work out
move_task_order_id = json_body["id"]
e_tag = json_body["eTag"]
headers["if-match"] = e_tag
resp = self.client.patch(
support_path(f"/move-task-orders/{move_task_order_id}/available-to-prime"),
name=support_path("/move-task-orders/{moveTaskOrderID}/available-to-prime"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "makeMoveTaskOrderAvailable")
if success:
self.add_stored(MOVE_TASK_ORDER, new_mto)
return new_mto
# @tag(MTO_SERVICE_ITEM, "updateMTOServiceItemStatus")
@task(2)
def update_mto_service_item_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
# if we don't have an mto shipment we can't run this task
if not mto_service_item:
logger.debug("updateMTOServiceItemStatus: ⚠️ No mto_service_item found")
return None
payload = self.fake_request("/mto-service-items/{mtoServiceItemID}/status", "patch", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
support_path(f"/mto-service-items/{mto_service_item['id']}/status"),
name=support_path("/mto-service-items/{mtoServiceItemID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
mto_service_item, success = check_response(resp, "updateMTOServiceItemStatus", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, mto_service_item)
return mto_service_item
@tag(PAYMENT_REQUEST, "updatePaymentRequestStatus")
@task(2)
def update_payment_request_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
payload = self.fake_request("/payment-requests/{paymentRequestID}/status", "patch", SUPPORT_API_KEY)
headers = {"content-type": "application/json", "If-Match": payment_request["eTag"]}
resp = self.client.patch(
support_path(f"/payment-requests/{payment_request['id']}/status"),
name=support_path("/payment-requests/{paymentRequestID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_payment_request, success = check_response(resp, "updatePaymentRequestStatus", payload)
if success:
self.update_stored(PAYMENT_REQUEST, payment_request, new_payment_request)
return new_payment_request
@tag(MOVE_TASK_ORDER, "getMoveTaskOrder")
@task(2)
def get_move_task_order(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("getMoveTaskOrder: ⚠️ No move_task_order found")
return
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_task_order['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "getMoveTaskOrder")
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
| 40,079 | 12,022 |
#######################################################################################################################
# Author: Richie Magnotti
#
# Goal of code is to demonstrate sorting via bubble sort algorithm
#######################################################################################################################
def bubble_sort(arr):
n = len(arr)
print("before", arr)
for i in range(0, n):
print("i", i, "item", arr[i])
for j in range(0,n-1):
print("j", j, "item", arr[j])
if arr[j]>arr[j+1]:
temp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = temp
print("after", arr)
def main():
A = [0,5,3,13,1,7,4,3,7,6,5,9,2,7]
bubble_sort(A)
if __name__ == "__main__":
main()
| 848 | 252 |
"""Data and functions for dealing with cell contents."""
# pylint: disable=no-member, too-many-instance-attributes, too-many-arguments
import numpy as np
from skimage import util
from skimage.transform import probabilistic_hough_line
from lib.util import Crop, Offset, intersection
class Cell:
"""Data and functions for dealing with cell contents."""
row_label_threshold = 20
col_label_threshold = 15
crop = Crop(top=4, bottom=4, left=4, right=4)
forward_slashes = np.deg2rad(np.linspace(65.0, 25.0, num=161))
label_lines = np.deg2rad(np.linspace(0.0, 65.0, num=181))
label_lines += np.deg2rad(np.linspace(-65.0, 0.0, num=181))
def __init__(self, grid, top=None, bottom=None, left=None, right=None):
"""
Build a cell from the 4 surrounding grid lines.
We will also get the for corners of the cell by finding the
intersection of the grid lines.
"""
self.image = grid.edges
self.top_left = intersection(top, left)
self.bottom_left = intersection(bottom, left)
self.top_right = intersection(top, right)
self.bottom_right = intersection(bottom, right)
self.width = self.top_right.x - self.top_left.x
self.height = self.bottom_left.y - self.top_left.y
self.offset = Offset(x=grid.offset.x + self.top_left.x,
y=grid.offset.y + self.top_left.y)
def interior(self, crop=None):
"""
Get the interior image of the cell.
Sometimes we will want to crop the interior to try and remove the
surrounding grid lines. That is, we want the cell contents, not the
grid lines.
"""
top = max(0, self.top_left.y, self.top_right.y)
bottom = max(0, self.image.shape[0]
- min(self.bottom_left.y, self.bottom_right.y))
left = max(0, self.top_left.x, self.bottom_left.x)
right = max(0, self.image.shape[1]
- min(self.top_right.x, self.bottom_right.x))
inside = util.crop(self.image, ((top, bottom), (left, right)))
if crop and inside.shape[1] > (crop.right + crop.left) \
and inside.shape[0] > (crop.bottom + crop.top):
inside = util.crop(
inside,
((crop.top, crop.bottom), (crop.left, crop.right)))
return inside
def is_label(self, crop=None):
"""Determine if the cell is a column label."""
if not crop:
crop = self.crop
inside = self.interior(crop=crop)
lines = self.has_line(self.label_lines, line_length=12)
if not min(inside.shape):
return False
return bool(len(lines)) or np.mean(inside) > self.col_label_threshold
def has_line(self, angles=None, line_length=15):
"""Determine if the cell has a line at any of the given angles."""
return probabilistic_hough_line(
self.interior(crop=self.crop),
line_length=line_length,
line_gap=2,
theta=angles)
def get_patch(self):
"""Get the cell patch for output."""
width = self.top_right.x - self.top_left.x
height = self.bottom_left.y - self.top_left.y
offset_x = self.offset.x
offset_y = self.offset.y
return (offset_x, offset_y), width, height
| 3,350 | 1,088 |