id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1664107 | <filename>DonkiDirector/DirectorBgnThread.py
import time
import threading
import os
from DataAcqThread import DataAcqThread
import zmq
from DonkiOrchestraLib import CommunicationClass
import traceback
import socket
import multiprocessing
from InfoServer import infoServerThread
from DataServer import DataServer
THREAD_DELAY_SEC = 1
DEBUG = False
class directorThread(threading.Thread):
#-----------------------------------------------------------------------------------
# __init__
#-----------------------------------------------------------------------------------
def __init__(self,tango_dev):
threading.Thread.__init__(self)
self._alive = True
self._started = False
self._paused = False
self.dev = tango_dev
self.actual_priority = 0
self.PlayersInfo = {}
self.DataAliases = {}
self.Players_per_level = {}
self.busy_Players = []
self.trg = -1
self.max_triggers = 10
self._state = 'STANDBY'
self.EnableDataSaving = False
self.Report = ''
self.daq_threads = {}
#
self.zcc = CommunicationClass()
#
self.sub_socks = {}
self.sub_ports = {}
self.infoServer = infoServerThread(Port=50010, notif_function = self.info_db_changed)
#
director_tokens = ['donkidirector','director',self.zcc.my_pub_socket_info()]
self.infoServer.write_to_db(director_tokens)
#
for da in eval(self.infoServer.get_from_db(['dataaliases'])):
self.DataAliases[str(da['name'])] = str(da['data'])
#
self.infoServer.start()
#
daq_xml_config = "test.xml"
#
self.dataserver_task_queue = multiprocessing.JoinableQueue()
self.dataserver_data_queue = multiprocessing.JoinableQueue() #(maxsize=50)
self.dataserver_notif_queue = multiprocessing.JoinableQueue()
self.dataserver = DataServer(daq_xml_config,
data_queue=self.dataserver_data_queue,
task_queue=self.dataserver_task_queue,
notif_queue=self.dataserver_notif_queue)
self.dataserver.start()
# set default properties about file saving
self.set_file_prefix('daqfile')
self.set_files_contiguous(True)
self.set_files_to_save(1)
self.set_file_size(100)
self.set_file_path(".")
#-----------------------------------------------------------------------------------
# players_changed
#-----------------------------------------------------------------------------------
def info_db_changed(self, tablename, new_info):
if str(tablename) == 'donkidirector':
return
elif str(tablename) == 'donkiplayers':
if DEBUG:
print "players changed",new_info
else:
print "players changed",new_info
try:
self.PlayersInfo.clear()
for pl in new_info:
self.PlayersInfo[str(pl['name'])] = {'url':str(pl['data'])}
except:
traceback.print_exc()
elif str(tablename) == 'dataaliases':
if DEBUG:
print "data aliases changed",new_info
else:
print "data aliases changed"
try:
self.DataAliases.clear()
for da in new_info:
self.DataAliases[da['name']] = str(da['data'])
except:
traceback.print_exc()
#-----------------------------------------------------------------------------------
# retrieve_players_info
#-----------------------------------------------------------------------------------
def retrieve_players_info(self, reconnect = False):
#
not_active_players = []
max_priority = -1
self.Players_per_level.clear()
self.Players_per_level[0] = []
try:
for pl_key in self.PlayersInfo.keys():
pl_name = str(pl_key)
if reconnect and (not self.zcc.create_sub_socket(pl_name,self.PlayersInfo[pl_name]['url'])):
continue
print "Asking info to", pl_name
info = self.zcc.ask_for_info(pl_name)
print info
if len(info) == 0:
not_active_players.append(pl_name)
continue
if info['data'] == []:
self.PlayersInfo[pl_name]['type'] ='ack'
else:
self.PlayersInfo[pl_name]['type'] ='data'
zmq_pub_url = self.PlayersInfo[pl_name]['url']
for dataname in info['data']:
daq_th_name = pl_name+"/"+dataname.lower()
if daq_th_name not in self.DataAliases:
self.infoServer.write_to_db(['dataaliases',daq_th_name,daq_th_name])
data_alias = self.DataAliases[daq_th_name]
if daq_th_name not in self.daq_threads:
self.daq_threads[daq_th_name] = DataAcqThread(self, zmq_pub_url, dataname, data_alias)
self.daq_threads[daq_th_name].start()
if (self.daq_threads[daq_th_name].player_pub_url != zmq_pub_url):
# Player ZMQ PUB URL has changed
self.daq_threads[daq_th_name].set_new_url(zmq_pub_url)
if (self.daq_threads[daq_th_name].data_alias != data_alias):
# Player Data alias has changed
self.daq_threads[daq_th_name].data_alias = data_alias
dprio = info['prio']
if dprio < 0:
self.PlayersInfo[pl_name]['prio'] = "Disabled"
else:
self.PlayersInfo[pl_name]['prio'] = str(dprio)
if dprio > max_priority:
max_priority = dprio
if dprio not in self.Players_per_level.keys():
self.Players_per_level[dprio] = []
if not pl_name in self.Players_per_level[dprio]:
self.Players_per_level[dprio].append(pl_name)
#
for pl_name in not_active_players:
self.PlayersInfo.pop(pl_name)
except:
traceback.print_exc()
return max_priority
#-----------------------------------------------------------------------------------
# start_stop_Players
#-----------------------------------------------------------------------------------
def start_stop_Players(self, bool_in):
max_priority = 0
#
for a in self.PlayersInfo.keys():
if self.PlayersInfo[a]['prio'] == "Disabled":
continue
try:
if (bool_in):
ok = self.zcc.publish_command('start',a)
else:
ok = self.zcc.publish_command('stop',a)
print "Reply", ok
except:
if DEBUG:
traceback.print_exc()
for k in self.daq_threads:
self.daq_threads[k]._started = bool_in
if bool_in:
self.dataserver_task_queue.put(['start_daq'])
self.dataserver_task_queue.join()
return
for k in self.MetadataSources:
if self.MetadataSources[k]['enabled']:
if bool_in:
self.MetadataSources[k]['status'] = 'ON'
elif self.MetadataSources[k]['status'] != 'ALARM':
self.MetadataSources[k]['status'] = 'STANDBY'
#-----------------------------------------------------------------------------------
# set_player_priority
#-----------------------------------------------------------------------------------
def set_player_priority(self, player_name, priority):
print "Set Priority"
try:
ok = self.zcc.publish_command('priority', player_name, priority)
if ok:
self.PlayersInfo[player_name]['prio'] = str(priority)
print "Ok"
print self.PlayersInfo
else:
print "Error: player did not reply"
except:
if DEBUG:
traceback.print_exc()
#-----------------------------------------------------------------------------------
# notify_new_data
#-----------------------------------------------------------------------------------
def notify_new_data(self, data_name, trg_in, trg_f, data_in):
print "NEW DATA",data_name,trg_in, trg_f
if self._state != 'ON':
return
if abs(trg_in - self.trg) > 1000:
# Fake trigger value
return
if not self.EnableDataSaving:
return
#elif not self.PlayersInfo[daq_thread.player_nickname]['enabled']:
# return
if DEBUG:
print "NEW DATA",data_name,trg_in, trg_f
self.dataserver_data_queue.put([data_name,trg_in,trg_f,data_in])
#-----------------------------------------------------------------------------------
# report_message
#-----------------------------------------------------------------------------------
def report_message(self, message_in, with_date = False):
if with_date:
message_in = time.asctime() + " " + message_in
new_report = ("\n".join([self.Report,message_in])).split("\n")
self.Report = "\n".join(new_report[-10000:])
#-----------------------------------------------------------------------------------
# ResetReport
#-----------------------------------------------------------------------------------
def ResetReport(self):
self.Report = ""
#-----------------------------------------------------------------------------------
# set_DataAlias
#-----------------------------------------------------------------------------------
def set_DataAlias(self, player_data_name, alias_name):
self.infoServer.write_to_db(['dataaliases',player_data_name,alias_name])
#-----------------------------------------------------------------------------------
# set_file_prefix
#-----------------------------------------------------------------------------------
def set_file_prefix(self, prefix):
self.dataserver_task_queue.put(['file_prefix',prefix])
self.dataserver_task_queue.join()
self.file_prefix = prefix
#-----------------------------------------------------------------------------------
# set_file_path
#-----------------------------------------------------------------------------------
def set_file_path(self, fpath):
self.dataserver_task_queue.put(['file_path',fpath])
self.dataserver_task_queue.join()
self.file_path = fpath
#-----------------------------------------------------------------------------------
# set_files_contiguous
#-----------------------------------------------------------------------------------
def set_files_contiguous(self, bool_in):
self.dataserver_task_queue.put(['Files_contiguous',bool_in])
self.dataserver_task_queue.join()
self.files_contiguous = bool_in
#-----------------------------------------------------------------------------------
# set_files_to_save
#-----------------------------------------------------------------------------------
def set_files_to_save(self, nFiles):
self.dataserver_task_queue.put(['Files_to_save',nFiles])
self.dataserver_task_queue.join()
self.files_to_save = nFiles
#-----------------------------------------------------------------------------------
# set_file_size
#-----------------------------------------------------------------------------------
def set_file_size(self, nTriggers):
self.dataserver_task_queue.put(['File_size',nTriggers])
self.dataserver_task_queue.join()
self.file_size = nTriggers
#-----------------------------------------------------------------------------------
# get_file_prefix
#-----------------------------------------------------------------------------------
def get_file_prefix(self):
return self.file_prefix
#-----------------------------------------------------------------------------------
# get_file_path
#-----------------------------------------------------------------------------------
def get_file_path(self):
return self.file_path
#-----------------------------------------------------------------------------------
# get_files_contiguous
#-----------------------------------------------------------------------------------
def get_files_contiguous(self):
return self.files_contiguous
#-----------------------------------------------------------------------------------
# get_files_to_save
#-----------------------------------------------------------------------------------
def get_files_to_save(self):
return self.files_to_save
#-----------------------------------------------------------------------------------
# get_file_size
#-----------------------------------------------------------------------------------
def get_file_size(self):
return self.file_size
#-----------------------------------------------------------------------------------
# quit_and_exit
#-----------------------------------------------------------------------------------
def quit_and_exit(self):
self._alive = False
#-----------------------------------------------------------------------------------
# run
#-----------------------------------------------------------------------------------
def run(self):
knownPlayersInfo = self.PlayersInfo.copy()
while self._alive:
if not self._started:
if knownPlayersInfo != self.PlayersInfo:
self.retrieve_players_info(reconnect = True)
knownPlayersInfo = self.PlayersInfo.copy()
else:
# Send a dummy negative trigger, something like a 'ping'
self.zcc.publish_trigger(-1, -1)
not_responding_Players = self.PlayersInfo.keys()
t0 = time.time()
while not_responding_Players and not self._started:
pl_msgs = self.zcc.wait_message(not_responding_Players)
if pl_msgs is not None and len(pl_msgs):
for pl in pl_msgs:
idx = not_responding_Players.index(pl)
del not_responding_Players[idx]
elif (time.time() - t0) > 5:
print "NOT RESPONDING",not_responding_Players
knownPlayersInfo = None
break
else:
upper_priority = self.retrieve_players_info()
self.start_stop_Players(True)
self.ResetReport()
self._state = "ON"
self.report_message("DonkiDirector started")
self._paused = False
self.trg = 1
while ((self.trg <= self.max_triggers) or (self.max_triggers < 0)):
if not self._started :
break
if (self._paused):
self._state = "STANDBY"
time.sleep(0.1)
continue
self._state = "ON"
for priority in range(upper_priority+1):
if not priority in self.Players_per_level.keys():
continue
self.busy_Players= self.Players_per_level[priority][:]
if DEBUG:
print "----","TRIGGER:",self.trg,"PRIORITY:",priority,"----"
self.actual_priority = priority
t0 = time.time()
#self.dev.push_event ("Trigger",[], [], priority, self.trg,PyTango._PyTango.AttrQuality.ATTR_VALID)
self.zcc.publish_trigger(self.trg, priority)
while self.busy_Players and self._started:
pl_msgs = self.zcc.wait_message(self.busy_Players)
if len(pl_msgs):
for pl in pl_msgs:
new_msg = pl_msgs[pl]
topic = new_msg[0].lower()
trg = new_msg[1]
prio = new_msg[2]
if topic == 'ack' and trg == self.trg:
idx = self.busy_Players.index(pl)
del self.busy_Players[idx]
elif (time.time() - t0) > 60:
self.zcc.publish_trigger(self.trg, priority)
#self.dev.push_event ("Trigger",[], [], priority, self.trg,PyTango._PyTango.AttrQuality.ATTR_VALID)
t0 = time.time()
if DEBUG:
print "Delay:",(time.time()-t0) * 1000,"ms"
self.trg += 1
self.start_stop_Players(False)
self._state = "OFF"
self.report_message("DonkiDirector stopped")
self._started = False
time.sleep(THREAD_DELAY_SEC)
#
self.infoServer._stop_ = True
#
self.dataserver_task_queue.put(['stop'])
#
for dtkey in self.daq_threads.keys():
self.daq_threads[dtkey]._started = False
self.daq_threads[dtkey]._alive = False
| StarcoderdataPython |
3357416 | <reponame>kvg/pydm<filename>tests/test_acceptance/example_sge.py
from pathlib import Path
from dmpy import DistributedMake, get_dm_arg_parser
# Pass --dry-run to command line
test_args = ['--scheduler', 'sge']
args = get_dm_arg_parser().parse_args(test_args)
m = DistributedMake(args_object=args)
target = Path(__file__).with_name("test_output_file").absolute()
m.add(target, None, "echo 'hi world'", { 'h_vmem': 4, 'queue': 'default', 'threads': 4 })
m.execute()
| StarcoderdataPython |
195070 | <reponame>bytedance/raylink
from unittest import TestCase
from raylink.util.resource import Machine, ResourceManager
class TestResource(TestCase):
def setUp(self):
cluster_nodes = {
'worker': {
'worker-0': {'cpu': 10},
'worker-1': {'cpu': 10},
'worker-2': {'cpu': 10}
},
'server': {
'server-0': {'cpu': 10, 'gpu': 1}
},
'scheduler': {
'scheduler-0': {'cpu': 10}
}
}
cluster_res = {}
for role, identities in cluster_nodes.items():
for identity, resource in identities.items():
additional = {}
if role == 'worker' or role == 'scheduler':
additional['ps'] = 2
m = Machine(role=role, identity=identity,
basic=resource['cpu'], additional=additional)
cluster_res[m.identity] = m
self.cluster_res = cluster_res
def test_allocate(self):
node_res = {'worker': {'num_cpus': 1, 'resources': {'worker': 1}}}
rm = ResourceManager(self.cluster_res, node_res)
resources = rm.allocate('worker')
true = {'num_cpus': 1, 'resources': {'worker': 1, 'worker-0': 1}}
self.assertDictEqual(resources, true)
node_res = {
'worker': {'num_cpus': 0.1, 'resources': {'worker': 0.1, 'ps': 1}}}
rm = ResourceManager(self.cluster_res, node_res)
resources = rm.allocate('worker')
true = {'num_cpus': 0.1, 'resources': {'worker': 0.1, 'worker-1': 0.1}}
self.assertDictEqual(resources, true)
def test_allocate_same(self):
node_res = {'worker': {'num_cpus': 1, 'resources': {'worker': 1}}}
rm = ResourceManager(self.cluster_res, node_res)
parent_res = rm.allocate('worker')
true = {'num_cpus': 1, 'resources': {'worker': 1, 'worker-0': 1}}
self.assertDictEqual(parent_res, true)
res = rm.allocate('worker', parent_res)
true = {'num_cpus': 1, 'resources': {'worker': 1, 'worker-0': 1}}
self.assertDictEqual(res, true)
def test_allocate_2(self):
node_res = {
'ps': {'num_cpus': 1, 'resources': {'scheduler': 1, 'ps': 1}}}
rm = ResourceManager(self.cluster_res, node_res)
true = {'num_cpus': 1, 'resources': {'scheduler-0': 1, 'scheduler': 1}}
res = rm.allocate('ps')
self.assertDictEqual(res, true)
res = rm.allocate('ps')
self.assertDictEqual(res, true)
def test_allocate_3(self):
cluster_res = {
'scheduler-0': Machine('scheduler', 'scheduler-0', 1840,
{'replay': 16, 'BRAIN': 1}),
'scheduler-1': Machine('scheduler', 'scheduler-1', 1840,
{'replay': 16}),
'scheduler-2': Machine('scheduler', 'scheduler-2', 1840,
{'replay': 16}),
'scheduler-3': Machine('scheduler', 'scheduler-3', 1840,
{'replay': 16}),
'server-0': Machine('server', 'server-0', 920,
{'learner': 4, 'ps': 1}),
'worker-0': Machine('worker', 'worker-0', 1840, {}),
'worker-1': Machine('worker', 'worker-1', 1840, {}),
'worker-10': Machine('worker', 'worker-10', 1840, {}),
'worker-11': Machine('worker', 'worker-11', 1840, {}),
'worker-12': Machine('worker', 'worker-12', 1840, {}),
'worker-13': Machine('worker', 'worker-13', 1840, {}),
'worker-14': Machine('worker', 'worker-14', 1840, {}),
'worker-15': Machine('worker', 'worker-15', 1840, {}),
'worker-2': Machine('worker', 'worker-2', 1840, {}),
'worker-3': Machine('worker', 'worker-3', 1840, {}),
'worker-4': Machine('worker', 'worker-4', 1840, {}),
'worker-5': Machine('worker', 'worker-5', 1840, {}),
'worker-6': Machine('worker', 'worker-6', 1840, {}),
'worker-7': Machine('worker', 'worker-7', 1840, {}),
'worker-8': Machine('worker', 'worker-8', 1840, {}),
'worker-9': Machine('worker', 'worker-9', 1840, {})}
node_res = {
'BRAIN': {'num_cpus': 1, 'resources': {'BRAIN': 1, 'scheduler': 1}},
'head': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'learner': {'num_cpus': 1, 'num_gpus': 1,
'resources': {'server': 1}},
'logger': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'manager': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'ps': {'num_cpus': 1, 'resources': {'server': 1}},
'replay': {'num_cpus': 1,
'resources': {'replay': 1, 'scheduler': 1}},
'runner': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'sampler': {'num_cpus': 1, 'resources': {'server': 1}},
'storage': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'util': {'num_cpus': 1, 'resources': {'scheduler': 1}},
'worker': {'num_cpus': 1, 'resources': {'worker': 1}}}
rm = ResourceManager(cluster_res, node_res)
res = rm.allocate('BRAIN', None)
res = rm.allocate('logger', {'num_cpus': 1,
'resources': {'scheduler-0': 1,
'scheduler': 1}})
res = rm.allocate('storage', {'num_cpus': 1,
'resources': {'scheduler-0': 1,
'scheduler': 1}})
res = rm.allocate('manager', {'num_cpus': 1,
'resources': {'scheduler-0': 1,
'scheduler': 1}})
print('manager', res)
res = rm.allocate('util', {'num_cpus': 1,
'resources': {'scheduler-0': 1,
'scheduler': 1}})
print('util', res)
res = rm.allocate('runner', {'num_cpus': 1,
'resources': {'scheduler-0': 1,
'scheduler': 1}})
print('runner', res)
res = rm.allocate('replay', None)
[rm.allocate('head', res) for _ in range(1)]
print('replay', res)
res = rm.allocate('replay', None)
[rm.allocate('head', res) for _ in range(1)]
print('replay', res)
res = rm.allocate('replay', None)
[rm.allocate('head', res) for _ in range(1)]
print('replay', res)
res = rm.allocate('replay', None)
[rm.allocate('head', res) for _ in range(1)]
print('replay', res)
| StarcoderdataPython |
1782174 | <gh_stars>0
import sys
import yaml
from tqdm import tqdm
import numpy as np
import torch
from sync_batchnorm import DataParallelWithCallback
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from animate import normalize_kp
if sys.version_info[0] < 3:
raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
def make_animation(source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True, cpu=False):
with torch.no_grad():
predictions = []
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not cpu:
source = source.cuda()
driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
kp_source = kp_detector(source)
kp_driving_initial = kp_detector(driving[:, :, 0])
for frame_idx in tqdm(range(driving.shape[2])):
driving_frame = driving[:, :, frame_idx]
if not cpu:
driving_frame = driving_frame.cuda()
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)
out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
return predictions
| StarcoderdataPython |
1698096 | <reponame>cptchloroplast/rosalind
c = {'A':71.03711,
'C':103.00919,
'D':115.02694,
'E':129.04259,
'F':147.06841,
'G':57.02146,
'H':137.05891,
'I':113.08406,
'K':128.09496,
'L':113.08406,
'M':131.04049,
'N':114.04293,
'P':97.05276,
'Q':128.05858,
'R':156.10111,
'S':87.03203,
'T':101.04768,
'V':99.06841,
'W':186.07931,
'Y':163.06333}
| StarcoderdataPython |
77916 | <gh_stars>0
from setuptools import setup, PEP420PackageFinder
setup(
name="cascade",
version="0.0.1",
packages=PEP420PackageFinder.find("src"),
package_data={"cascade.executor": ["data/*.toml"]},
package_dir={"": "src"},
install_requires=["numpy", "pandas", "scipy", "toml", "sqlalchemy"],
extras_require={
"testing": ["hypothesis", "pytest", "pytest-mock"],
"documentation": ["sphinx", "sphinx_rtd_theme", "sphinx-autobuild", "sphinxcontrib-napoleon"],
"ihme_databases": ["db_tools"],
},
entry_points={
"console_scripts": [
["dmchat=cascade.executor.chatter:chatter"],
["dmdummy=cascade.executor.chatter:dismod_dummy"],
]
},
)
| StarcoderdataPython |
4817442 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.contrib import admin
from django.utils.safestring import mark_safe
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import HtmlFormatter
from . import models
@admin.register(models.OccurrenceTaxon)
class OccurrenceTaxonAdmin(admin.ModelAdmin):
list_display = (
"id",
"taxon",
"location",
"observation",
)
@admin.register(models.Taxon)
class TaxonAdmin(admin.ModelAdmin):
list_display = (
"name",
"rank",
"kingdom",
)
readonly_fields = (
"name",
"common_names",
"rank",
"kingdom",
"_upper_ranks",
)
fieldsets = (
(
None,
{
"classes": (
"wide",
),
"fields": (
"tsn",
"name",
"common_names",
"rank",
"kingdom",
),
}
),
(
"Other taxonomic ranks",
{
"classes": (
"collapse",
"wide",
),
"fields": (
"_upper_ranks",
),
}
),
(
"Statuses",
{
"classes": (
"collapse",
"wide",
),
"fields": (
"cm_status",
"s_rank",
"n_rank",
"g_rank",
"native",
"leap_concern",
"oh_status",
"usfws_status",
"iucn_red_list_category",
"other_code",
"ibp_english",
"ibp_scientific",
"bblab_number",
"nrcs_usda_symbol",
"synonym_nrcs_usda_symbol",
"epa_numeric_code",
)
}
)
)
def _upper_ranks(self, instance):
upper_ranks = instance.upper_ranks
if upper_ranks is not None:
result = self._get_pretty_json(upper_ranks)
else:
result = None
return result
_upper_ranks.short_description = "Upper ranks"
def _get_pretty_json(self, data):
to_prettify = json.dumps(data, indent=4)
formatter = HtmlFormatter(style="colorful")
highlighted = highlight(to_prettify, JsonLexer(), formatter)
style = "<style>{}</style><br>".format(formatter.get_style_defs())
return mark_safe(style + highlighted)
admin.site.register(models.OccurrenceCategory)
admin.site.register(models.DayTime)
admin.site.register(models.Season)
admin.site.register(models.RecordOrigin)
admin.site.register(models.RecordingStation)
admin.site.register(models.CmStatus)
admin.site.register(models.SRank)
admin.site.register(models.NRank)
admin.site.register(models.GRank)
admin.site.register(models.RegionalStatus)
admin.site.register(models.UsfwsStatus)
admin.site.register(models.IucnRedListCategory)
admin.site.register(models.ElementType)
admin.site.register(models.MushroomGroup)
admin.site.register(models.Preservative)
admin.site.register(models.Storage)
admin.site.register(models.Repository)
admin.site.register(models.AquaticHabitatCategory)
admin.site.register(models.Gender)
admin.site.register(models.Marks)
admin.site.register(models.DiseasesAndAbnormalities)
admin.site.register(models.TerrestrialSampler)
admin.site.register(models.AquaticSampler)
admin.site.register(models.TerrestrialStratum)
admin.site.register(models.PondLakeType)
admin.site.register(models.PondLakeUse)
admin.site.register(models.ShorelineType)
admin.site.register(models.LakeMicrohabitat)
admin.site.register(models.StreamDesignatedUse)
admin.site.register(models.ChannelType)
admin.site.register(models.HmfeiLocalAbundance)
admin.site.register(models.LoticHabitatType)
admin.site.register(models.WaterFlowType)
admin.site.register(models.WetlandType)
admin.site.register(models.WetlandLocation)
admin.site.register(models.WetlandConnectivity)
admin.site.register(models.WaterSource)
admin.site.register(models.WetlandHabitatFeature)
admin.site.register(models.SlimeMoldClass)
admin.site.register(models.SlimeMoldMedia)
admin.site.register(models.PlantCount)
admin.site.register(models.MoistureRegime)
admin.site.register(models.GroundSurface)
admin.site.register(models.CanopyCover)
admin.site.register(models.GeneralHabitatCategory)
admin.site.register(models.LandscapePosition)
admin.site.register(models.Aspect)
admin.site.register(models.Slope)
admin.site.register(models.FernLifestages)
admin.site.register(models.FloweringPlantLifestages)
admin.site.register(models.MossLifestages)
| StarcoderdataPython |
86704 | import unittest
import plotly.graph_objs as go
class TestPlotly(unittest.TestCase):
def test_figure(self):
trace = {'x': [1, 2], 'y': [1, 3]}
data = [ trace ]
go.Figure(data=data)
| StarcoderdataPython |
3392053 | <gh_stars>1-10
import unittest
from base_test_case import BaseTestCase
from pyspark_proxy.ml.linalg import *
class MLLinalgTestCase(BaseTestCase):
def test_ml_linalg_dense_vector(self):
dv = DenseVector([1.0, 2.0])
self.assertEqual(type(dv), DenseVector)
def test_ml_linalg_dense_vector_dataframe_nested(self):
df = self.sqlContext.createDataFrame([(DenseVector([1.0, 2.0]),),(DenseVector([0.0, 1.0]),),(DenseVector([3.0, 0.2]),)], ["tf"])
row = df.head()
self.assertIn('DenseVector', str(type(row.tf)))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
13582 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('convos', '0004_auto_20150511_0945'),
]
operations = [
migrations.AddField(
model_name='convothread',
name='last_message_at',
field=models.DateTimeField(null=True, verbose_name='Last message at', blank=True),
),
]
| StarcoderdataPython |
92257 | <filename>test_pyreds.py
#!/usr/bin/env python
import sys
import unittest
import redis
import pyreds.reds as reds
db = redis.StrictRedis(db=1)
reds.set_client(db)
def decode(byte):
if sys.version > '3' and type(byte) == bytes:
return byte.decode('utf-8')
else:
return byte
class SearchTestCase(unittest.TestCase):
def setUp(self):
self.search = reds.create_search('reds')
self.search.index('Tobi wants 4 dollars', 0)
self.search.index('Loki is a ferret', 2)
self.search.index('Tobi is also a ferret', 3)
self.search.index('Jane is a bitchy ferret', 4)
self.search.index('Tobi is employed by LearbBoost', 5)
self.search.index('stuff compute', 6)
self.search.index('simple words do not mean simple ideas', 7)
self.search.index('The dog spoke the words, much to our unbelief', 8)
self.search.index('puppy dog eagle puppy frog puppy dog simple', 9)
def tearDown(self):
db.flushdb()
def test_words(self):
assert reds._words('foo bar baz ') == ['foo', 'bar', 'baz']
assert reds._words(' Punctuation and whitespace; should be, handled.') == [
'Punctuation',
'and',
'whitespace',
'should',
'be',
'handled'
]
assert reds._words('Tobi wants 4 dollars') == ['Tobi', 'wants', '4', 'dollars']
def test_strip_stopwords(self):
assert reds._strip_stopwords(['this', 'is', 'just', 'a', 'test']) == ['test']
def test_count_words(self):
assert reds._count_words(['foo', 'bar', 'baz', 'foo', 'jaz', 'foo', 'baz']) == {
'foo': 3,
'bar': 1,
'baz': 2,
'jaz': 1
}
def test_metaphone_map(self):
assert reds._metaphone_map(['foo', 'bar', 'baz']) == {'foo': 'F', 'bar': 'BR', 'baz': 'BS'}
def test_metaphone_list(self):
assert reds._metaphone_list(['foo', 'bar', 'baz']) == ['F', 'BR', 'BS']
def test_metaphone_keys(self):
assert reds._metaphone_keys('reds', ['foo', 'bar', 'baz']) == ['reds:word:F', 'reds:word:BR', 'reds:word:BS']
assert reds._metaphone_keys('foobar', ['foo', 'bar', 'baz']) == ['foobar:word:F', 'foobar:word:BR', 'foobar:word:BS']
def test_query(self):
ids = self.search.query('stuff compute').end()
ids = [decode(id) for id in ids]
assert ids == ['6']
ids = self.search.query('Tobi').end()
ids = [decode(id) for id in ids]
assert len(ids) == 3
assert '0' in ids
assert '3' in ids
assert '5' in ids
ids = self.search.query('tobi').end()
ids = [decode(id) for id in ids]
assert len(ids) == 3
assert '0' in ids
assert '3' in ids
assert '5' in ids
ids = self.search.query('bitchy').end()
ids = [decode(id) for id in ids]
assert ids == ['4']
ids = self.search.query('bitchy jane').end()
ids = [decode(id) for id in ids]
assert ids == ['4']
ids = self.search.query('loki and jane').type('or').end()
ids = [decode(id) for id in ids]
assert len(ids) == 2
assert '2' in ids
assert '4' in ids
ids = self.search.query('loki and jane', 'or').end()
ids = [decode(id) for id in ids]
assert len(ids) == 2
assert '2' in ids
assert '4' in ids
ids = self.search.query('loki and jane').end()
assert ids == []
ids = self.search.query('loki and jane', 'invalid type').end()
assert ids == []
ids = self.search.query('<NAME>').end()
ids = [decode(id) for id in ids]
assert ids == ['4']
ids = self.search.query('is a').end()
assert ids == []
ids = self.search.query('simple').end()
ids = [decode(id) for id in ids]
assert len(ids) == 2
assert '7' in ids
assert '9' in ids
assert ids[0] == '7'
assert ids[1] == '9'
def test_search(self):
self.search.index('keyboard cat', 6)
ids = self.search.query('keyboard').end()
ids = [decode(id) for id in ids]
assert ids == ['6']
ids = self.search.query('cat').end()
ids = [decode(id) for id in ids]
assert ids == ['6']
self.search.remove(6)
ids = self.search.query('keyboard').end()
assert ids == []
ids = self.search.query('cat').end()
assert ids == []
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
119052 | <gh_stars>10-100
import json
import codecs
__author__ = 'cmakler'
js_directories = [
'build/bundled/',
'docs/js/',
'docs/playground/code/',
'../bh-textbook/code/',
'../core-interactives/code/',
'../econgraphs/static/js/'
]
js_local_directories = [
'build/bundled/',
'docs/js/',
'docs/playground/code/'
]
css_directories = [
'build/bundled/',
'docs/css/',
'docs/playground/code/',
'../bh-textbook/code/',
'../core-interactives/code/',
'../econgraphs/static/css/'
]
bundles = [
{
"name": "kg-lib.js",
"dest_directories": ["build/lib/"],
"order": [
"node_modules/katex/dist/katex.min.js",
"node_modules/katex/dist/contrib/auto-render.min.js",
"node_modules/d3/dist/d3.min.js",
"node_modules/mathjs/dist/math.min.js",
"node_modules/js-yaml/dist/js-yaml.min.js"
]
},
{
"name": "kg3d-lib.js",
"dest_directories": ["build/lib/"],
"order": [
"node_modules/katex/dist/katex.min.js",
"node_modules/katex/dist/contrib/auto-render.min.js",
"node_modules/d3/dist/d3.min.js",
"node_modules/mathjs/dist/math.min.js",
"node_modules/js-yaml/dist/js-yaml.min.js",
"build/lib/mathbox-bundle.min.js"
]
},
{
"name": "kg-lib.css",
"dest_directories": ["build/lib/"],
"order": [
"node_modules/katex/dist/katex.min.css"
]
},
{
"name": "kg-tufte.css",
"dest_directories": ["build/lib/"],
"order": [
"node_modules/katex/dist/katex.min.css",
"node_modules/tufte-css/tufte.min.css"
]
},
{
"name": "kg.0.2.6.js",
"dest_directories": js_directories,
"order": [
"build/lib/kg-lib.js",
"build/kg.js"
]
},
{
"name": "kg3d.0.2.6.js",
"dest_directories": js_directories,
"order": [
"build/lib/kg3d-lib.js",
"build/kg.js"
]
},
{
"name": "kg-lib.js",
"dest_directories": js_local_directories,
"order": [
"build/lib/kg-lib.js"
]
},
{
"name": "kg3d-lib.js",
"dest_directories": js_local_directories,
"order": [
"build/lib/kg3d-lib.js"
]
},
{
"name": "kg.js",
"dest_directories": js_local_directories,
"order": [
"build/kg.js"
]
},
{
"name": "kg.js.map",
"dest_directories": js_local_directories,
"order": [
"build/kg.js.map"
]
},
{
"name": "kg.0.2.6.css",
"dest_directories": css_directories,
"order": [
"node_modules/katex/dist/katex.min.css",
"build/kg.css"
]
},
{
"name": "kg-tufte.0.2.6.css",
"dest_directories": css_directories,
"order": [
"node_modules/katex/dist/katex.min.css",
"node_modules/tufte-css/tufte.min.css",
"build/kg.css"
]
}
]
for bundle in bundles:
for dest_directory in bundle['dest_directories']:
result = ''
bundle_name = bundle['name']
print 'Processing bundle ' + bundle_name + '\n'
for file_name in bundle['order']:
with codecs.open(file_name, 'r', encoding='utf8') as infile:
print ' Appending ' + file_name + '\n'
result += infile.read() + "\n\n"
infile.close()
with codecs.open(dest_directory + bundle_name, 'w', encoding='utf8') as outfile:
outfile.write(result)
outfile.close()
| StarcoderdataPython |
106159 | import itertools
import traceback
import pikepdf
from pdf_preflight.issue import Issue
class Profile:
rules = []
@classmethod
def get_preflight_check_text(cls, issues, exceptions):
if issues or exceptions:
exception_text = f"PDF failed Preflight checks with the following Issues & exceptions:\n"
if issues:
exception_text += "ISSUES:\n"
combined_issues = cls._combine_similar_issues(issues)
for i in combined_issues:
if i.page == "Metadata":
exception_text += f"Rule '{i.rule}' found an error in document metadata: {i.desc}\n"
else:
exception_text += f"Rule '{i.rule}' found an error on page {i.page}: {i.desc}\n"
if exceptions:
exception_text += "EXCEPTIONS:\n"
for e in exceptions:
exception_text += e + "\n"
return exception_text
@classmethod
def check_preflight(cls, file):
issues, exceptions = cls.run_preflight_checks(file)
if issues or exceptions:
exception_text = cls.get_preflight_check_text(issues, exceptions)
raise Exception(exception_text)
@classmethod
def run_preflight_checks(cls, file):
issues = []
exceptions = []
with pikepdf.open(file) as pdf:
for row in cls.rules:
rule = row[0]
args = row[1:]
i, e = cls._run_rule(rule, pdf, *args)
issues += i
exceptions += e
return issues, exceptions
@classmethod
def _run_rule(cls, rule, pdf, *args):
issues = []
exceptions = []
try:
result = rule.check(pdf, *args)
if result:
issues += result
except Exception:
exception_string = traceback.format_exc()
exceptions.append(exception_string)
return issues, exceptions
@classmethod
def _combine_similar_issues(cls, issues):
combined_issues = []
key = lambda i: (i.rule, i.desc)
for k, g in itertools.groupby(sorted(issues, key=key), key=key):
g = list(g)
first = g[0]
last = g[len(g) - 1]
if first.page == "Metadata" or first.page == last.page:
pages = first.page
else:
pages = str(first.page) + "-" + str(last.page)
issue = Issue(
rule=first.rule,
page=pages,
desc=first.desc
)
combined_issues.append(issue)
return combined_issues
| StarcoderdataPython |
1668478 | import os
import time
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, cast
import hydra
import jax
import numpy as np
import ptvsd
import pytorch_lightning as pl
import wandb
from hydra.utils import instantiate
from omegaconf import OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from typer import Argument, Typer
from fourierflow.utils import (delete_old_results, get_experiment_id,
import_string, upload_code_to_wandb)
app = Typer()
@app.callback(invoke_without_command=True)
def main(config_path: Path,
overrides: Optional[List[str]] = Argument(None),
force: bool = False,
resume: bool = False,
checkpoint_id: Optional[str] = None,
trial: int = 0,
debug: bool = False,
no_logging: bool = False):
"""Train a Pytorch Lightning experiment."""
config_dir = config_path.parent
config_name = config_path.stem
hydra.initialize(config_path=Path('../..') /
config_dir, version_base='1.2')
config = hydra.compose(config_name, overrides=overrides)
OmegaConf.set_struct(config, False)
# This debug mode is for those who use VS Code's internal debugger.
if debug:
ptvsd.enable_attach(address=('0.0.0.0', 5678))
ptvsd.wait_for_attach()
# ptvsd doesn't play well with multiple processes.
config.builder.num_workers = 0
jax.config.update('jax_disable_jit', True)
# jax.config.update("jax_debug_nans", True)
# Set up directories to save experimental outputs.
delete_old_results(config_dir, force, trial, resume)
# Set seed for reproducibility.
rs = np.random.RandomState(7231 + trial)
seed = config.get('seed', rs.randint(1000, 1000000))
pl.seed_everything(seed, workers=True)
config.seed = seed
wandb_id = get_experiment_id(checkpoint_id, trial, config_dir, resume)
config.trial = trial
if 'seed' in config.trainer:
config.trainer.seed = seed
# Initialize the dataset and experiment modules.
builder = instantiate(config.builder)
routine = instantiate(config.routine)
# Support fine-tuning mode if a pretrained model path is supplied.
pretrained_path = config.get('pretrained_path', None)
if pretrained_path:
routine.load_lightning_model_state(pretrained_path)
# Resume from last checkpoint. We assume that the checkpoint file is from
# the end of the previous epoch. The trainer will start the next epoch.
# Resuming from the middle of an epoch is not yet supported. See:
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5325
chkpt_path = Path(config_dir) / 'checkpoints' / wandb_id / 'last.ckpt' \
if resume else None
# Initialize the main trainer.
callbacks = [instantiate(p) for p in config.get('callbacks', [])]
multi_gpus = config.trainer.get('gpus', 0) > 1
plugins = DDPPlugin(find_unused_parameters=False) if multi_gpus else None
if no_logging:
logger = False
enable_checkpointing = False
callbacks = []
else:
# We use Weights & Biases to track our experiments.
config.wandb.name = f"{config.wandb.group}/{trial}"
wandb_opts = cast(dict, OmegaConf.to_container(config.wandb))
logger = WandbLogger(save_dir=str(config_dir),
mode=os.environ.get('WANDB_MODE', 'offline'),
config=deepcopy(OmegaConf.to_container(config)),
id=wandb_id,
**wandb_opts)
upload_code_to_wandb(Path(config_dir) / 'config.yaml', logger)
enable_checkpointing = True
c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache()
c.cleanup(wandb.util.from_human_size("100GB"))
Trainer = import_string(config.trainer.pop(
'_target_', 'pytorch_lightning.Trainer'))
trainer = Trainer(logger=logger,
enable_checkpointing=enable_checkpointing,
callbacks=callbacks,
plugins=plugins,
weights_save_path=config_dir,
resume_from_checkpoint=chkpt_path,
enable_model_summary=False,
**OmegaConf.to_container(config.trainer))
# Tuning only has an effect when either auto_scale_batch_size or
# auto_lr_find is set to true.
trainer.tune(routine, datamodule=builder)
trainer.fit(routine, datamodule=builder)
# Load best checkpoint before testing.
chkpt_dir = Path(config_dir) / 'checkpoints'
paths = list(chkpt_dir.glob(f'trial-{trial}-*/epoch*.ckpt'))
assert len(paths) == 1
checkpoint_path = paths[0]
routine.load_lightning_model_state(str(checkpoint_path))
trainer.test(routine, datamodule=builder)
# Compute inference time
if logger:
batch = builder.inference_data()
T = batch['data'].shape[-1]
n_steps = routine.n_steps or (T - 1)
routine = routine.cuda()
batch = routine.convert_data(batch)
routine.warmup()
start = time.time()
routine.infer(batch)
elapsed = time.time() - start
elapsed /= len(batch['data'])
elapsed /= routine.step_size * n_steps
logger.experiment.log({'inference_time': elapsed})
if __name__ == "__main__":
app()
| StarcoderdataPython |
164520 | <reponame>Holaplace/path_to_python
name_list = ['Amy','Bob','Candy','Ellen']
print(len(name_list)) | StarcoderdataPython |
60586 | <reponame>cclauss/redis-websocket-api<filename>redis_websocket_api/exceptions.py
class APIError(Exception):
"""Base exception for errors raised by high-level websocket API."""
class MessageHandlerError(APIError):
"""Decoding or parsing a message failed."""
class RemoteMessageHandlerError(MessageHandlerError):
"""Raised for errors directly caused by messages from the client."""
class InternalMessageHandlerError(MessageHandlerError):
"""Raised for errors directly caused by messages from internal sources."""
| StarcoderdataPython |
61937 | <reponame>nachom12/my_portfolio<filename>python_little_things/entrada.py
minombre = "NaCho"
minombre = minombre.lower()
print (minombre)
for i in range(100):
print(i)
break
| StarcoderdataPython |
131119 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for the L{array} L{pyamf.adapters._array} module.
@since: 0.5
"""
try:
import array
except ImportError:
array = None
import unittest
import pyamf
class ArrayTestCase(unittest.TestCase):
"""
"""
def setUp(self):
if not array:
self.skipTest("'array' not available")
self.orig = [ord('f'), ord('o'), ord('o')]
self.obj = array.array('b')
self.obj.append(ord('f'))
self.obj.append(ord('o'))
self.obj.append(ord('o'))
def encdec(self, encoding):
return next(pyamf.decode(
pyamf.encode(self.obj, encoding=encoding),
encoding=encoding))
def test_amf0(self):
self.assertEqual(self.encdec(pyamf.AMF0), self.orig)
def test_amf3(self):
self.assertEqual(self.encdec(pyamf.AMF3), self.orig)
| StarcoderdataPython |
98710 | from qfengine.risk.risk_model import RiskModel
from abc import ABCMeta
import numpy as np
import pandas as pd
class CovarianceMatrixRiskModel(RiskModel):
__metaclass__ = ABCMeta
def __init__(self,
universe,
data_handler,
logarithmic_returns:bool = True,
ret_filter_op = None,
ret_std_op = None,
ret_corr_op = None,
**kwargs
):
self.universe = universe
self.data_handler = data_handler
self.logarithmic_returns = logarithmic_returns
self.ret_filter_op = ret_filter_op
self.ret_std_op = ret_std_op
self.ret_corr_op = ret_corr_op
#---| Computing Returns TimeSeries Data
def _closes_to_returns_df(self, closes_df:pd.DataFrame, **kwargs)->pd.DataFrame:
return (
np.log(closes_df/closes_df.shift(1)).dropna()
if self.logarithmic_returns else
closes_df.pct_change().dropna()
)
def _get_universe_historical_daily_close_df(self, dt, **kwargs)->pd.DataFrame:
return self.data_handler.get_assets_historical_closes(
self.universe.get_assets(dt),
end_dt = dt)
def _filter_returns_df(self, returns_df:pd.DataFrame, **kwargs)->pd.DataFrame:
if self.ret_filter_op:
return self.ret_filter_op(returns_df)
else:
return returns_df
def get_returns_df(self, dt, **kwargs):
return self._filter_returns_df(
self._closes_to_returns_df(
closes_df = self._get_universe_historical_daily_close_df(dt, **kwargs),
**kwargs
)
)
#---| Computing Covariance Matrix
def _returns_volatility(self, ret):
if self.ret_std_op is not None:
assert callable(self.ret_std_op)
std = self.ret_std_op(ret)
assert len(std) == ret.shape[1]
assert set(std.index).issubset(set(ret.columns))
return std
else:
return ret.std()
def _returns_correlation(self, ret):
if self.ret_corr_op is not None:
assert callable(self.ret_corr_op)
corr = self.ret_corr_op(ret)
assert corr.shape[0] == corr.shape[1] == ret.shape[1]
assert set(corr.index).issubset(set(ret.columns))
assert set(corr.columns).issubset(set(ret.columns))
return corr
else:
return ret.corr()
def _is_symmetric(self, matrix:pd.DataFrame, rtol=1e-05, atol=1e-08):
return matrix.shape[0] == matrix.shape[1]
# Covariance = VOL' * CORR * VOL
def _compute_covariance_matrix(self, std:pd.Series, corr:pd.DataFrame):
assert self._is_symmetric(corr)
assert set(std.index).issubset(set(corr.index))
assert set(corr.columns).issubset(set(corr.index))
vol = std.copy().reindex(corr.columns).dropna()
assert len(vol) == len(std), str([i for i in corr.columns if i not in vol.index])
vol = np.diag(vol)
return pd.DataFrame(
data = (np.dot(vol,np.dot(corr,vol))),
index = corr.index,
columns = corr.columns
)
def calculate_returns_covariance_matrix(self, ret):
std = self._returns_volatility(ret)
corr = self._returns_correlation(ret)
return self._compute_covariance_matrix(std = std, corr = corr)
#---| __call__()
def __call__(self, dt, **kwargs):
ret_df = self.get_returns_df(dt, **kwargs)
return self.calculate_returns_covariance_matrix(ret_df)
| StarcoderdataPython |
1624193 | '''
http://pythontutor.ru/lessons/str/problems/swap_two_words/
Дана строка, состоящая ровно из двух слов, разделенных пробелом.
Переставьте эти слова местами. Результат запишите в строку и выведите получившуюся строку.
При решении этой задачи не стоит пользоваться циклами и инструкцией if.
'''
s = input()
space = s.find(' ')
print(s[space:], s[:space])
| StarcoderdataPython |
4832950 | <reponame>dssg/mlpolicylab_fall20_schools3_public
import nujson
import pandas as pd
from schools3.data.datasets.dataset import Dataset
from schools3.ml.base.hyperparameters import Hyperparameters
from schools3.config.ml.metrics import performance_metrics_config
# base model class, which typically wraps around an sklearn or tensorflow model and provides
# a common interface for the rest of the code.
class Model():
def __init__(self, core_model, hps: Hyperparameters):
self.core_model = core_model
self.hps = hps
self.feature_train_stats = None
self.cacheable = True
def get_model_name(self):
return self.core_model.__class__.__name__
# convert the model's hyperparameters to JSON format
def jsonify_hps(self):
return nujson.dumps(self.hps.get_val_dict()) if self.hps else None
# trains the model and returns the result of testing it
def train_test(self, train_dataset: Dataset, test_dataset: Dataset, use_test_as_val=True):
self.train(train_dataset, test_dataset if use_test_as_val else None)
return self.test(test_dataset)
def train(self, train_dataset: Dataset, val_dataset: Dataset=None):
raise NotImplementedError
# returns a list of score predictions given a Dataframe of features
def predict(self, features: pd.DataFrame):
raise NotImplementedError
# a place for model-specific FeatureProcessors
def get_feature_processor(self, train_stats=None):
import schools3.config.main_config as main_config
return main_config.get_default_feature_processors(train_stats)
# abstract method to link each model class with its corresponding hyperparameter class
@classmethod
def get_hp_type(cls):
raise NotImplementedError
# predicts scores and returns in a Dataframe with student lookup and labels
def test(self, dataset: Dataset):
X_test, y_test = dataset.get_features_labels()
y_hat = self.predict(X_test)
results_df = y_test
results_df['score'] = y_hat
return results_df
# predicts labels by treating the top k% scoring rows as positive class
def predict_labels(self, dataset: Dataset, return_full_df=False):
features, labels = dataset.get_features_labels()
pred_labels = self.test(dataset).score.sort_values(ascending=False)
num_positives = int(len(pred_labels) * performance_metrics_config.best_precision_percent)
pred_labels[:num_positives] = 1
pred_labels[num_positives:] = 0
pred_labels.name = 'pred_label'
if return_full_df:
return pd.concat({
'features': features,
'pred_labels': pred_labels,
'labels': labels
}, axis=1)
else:
return pred_labels
| StarcoderdataPython |
3311120 | # Generated by Django 3.1 on 2020-10-24 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('experiments', '0011_auto_20201023_0924'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='status',
field=models.CharField(choices=[('', 'Blank'), ('text-gray-500', 'Unmaintained'), ('text-green-500', 'Active'), ('text-red-500', 'Abandoned'), ('text-blue-500', 'On-Hold')], default='Blank', help_text="What's the status of this experiment?", max_length=32),
),
]
| StarcoderdataPython |
3326807 | from adresseLib import getAdresse
import pymysql
from config import getMysqlConnection
mysql = getMysqlConnection()
requete = mysql.cursor()
requete.execute('SELECT code, latitude, longitude FROM stations')
stations = requete.fetchall()
for station in stations:
adresse = getAdresse(station[1], station[2])
requete = mysql.cursor()
requete.execute('UPDATE stations SET adresse = "'+adresse+'" WHERE code = '+str(station[0])) | StarcoderdataPython |
3266515 | <reponame>chawel/python-shoper
# coding=utf-8
# shoperapi
# Copyright 2018 <NAME>
# See LICENSE for details.
"""
E-commerce platform Shoper REST API Wrapper
"""
__version__ = '1.0.0'
__author__ = '<NAME>'
__license__ = 'MIT'
from .base import ShoperBaseApi
from .shoperapi import ShoperWrapper
class ShoperClient(ShoperBaseApi):
"""
Shoper REST API Client - handles all endpoints with magic.
Converts attribute to resource endpoint
:param api_url: (required) Shoper REST API url (ex. 'https://shop11111.shoparena.pl/webapi/rest')
:type api_url: str
:param client_login: (optional) Shoper consumer key
:type client_login: str
:param client_secret: (optional) Shoper consumer secret
:type client_secret: str
:param access_token: (optional) Shoper consumer access_token
:type access_token: str
:param refresh_token: (optional) Shoper consumer refresh_token
:type refresh_token: str
"""
def __init__(self, api_url, client_login=None, client_secret=None, access_token=None, refresh_token=None):
super(ShoperClient, self).__init__(api_url, client_login, client_secret, access_token, refresh_token)
def __getattr__(self, name):
return ShoperWrapper(self, str(name).replace('_', '-'))
| StarcoderdataPython |
3344585 | # coding: utf-8
import types
import json
import os
import subprocess
import sys
import zipfile
import marshal
import imp
import time
import struct
import shutil
import sys
import tempfile
from datetime import datetime
from xml.etree import ElementTree
from xml.dom import minidom
def call(*args):
try:
return subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT, universal_newlines=True).strip()
except subprocess.CalledProcessError as e:
return e.output
def getVersion(meta):
if 'version' in meta:
return meta['version']
if 'not a git repository' not in call('git', 'log'):
tree = call('git', 'rev-parse', '--abbrev-ref', 'HEAD')
commit = int(call('git', 'rev-list', 'HEAD', '--count'))
values = (tree == 'master', commit % 1000 / 100, commit % 100 / 10, commit % 10)
return '.'.join(map(str, map(int, values)))
else:
return '1.0.0'
def readSWF(path):
path = str(os.path.abspath(path))
name, _ = os.path.splitext(os.path.basename(path))
swf = os.path.join(os.path.dirname(path), os.path.basename(name) + '.swf')
if os.path.isfile(swf):
with open(swf, 'rb') as f:
return f.read()
else:
print swf, 'not found'
def buildFLA(projects):
if '-f' in sys.argv and projects:
with open('build.jsfl', 'wb') as fh:
for path in projects.keys():
path = str(os.path.abspath(path))
fh.write('fl.publishDocument("file:///%s", "Default");' % path.replace('\\', '/').replace(':', '|'))
fh.write('\r\n')
fh.write('fl.quit(false);')
try:
subprocess.check_output([os.environ.get('ANIMATE'), '-e', 'build.jsfl', '-AlwaysRunJSFL'],
universal_newlines=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
print error.output.strip()
try:
os.remove('build.jsfl')
except Exception as ex:
print ex.message
return {
dst: readSWF(src)
for src, dst in projects.items()
}
def buildFlashFD(path):
path = str(os.path.abspath(path))
if os.path.isfile(path):
try:
fdbuild = os.environ.get('FDBUILD')
flexsdk = os.environ.get('FLEXSDK')
if fdbuild and os.path.exists(fdbuild) and flexsdk and os.path.exists(flexsdk):
args = [fdbuild, '-compiler:' + flexsdk, path]
subprocess.check_output(args,
shell=True,
universal_newlines=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
print path
print error.output.strip()
name, _ = os.path.splitext(os.path.basename(path))
swf = os.path.join(os.path.dirname(path), 'bin', os.path.basename(name) + '.swf')
return readSWF(path)
else:
print path, 'not found'
def buildPython(path, filename):
def read(self, path, filename):
with open(path, 'r') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(time.time())
return f.read(), struct.pack('L', timestamp)
def repack(code, co_filename, co_name):
co_consts = []
for const in code.co_consts:
if isinstance(const, types.CodeType):
const = repack(const, co_filename, const.co_name)
co_consts.append(const)
code = types.CodeType(
code.co_argcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
tuple(co_consts),
code.co_names,
code.co_varnames,
co_filename,
co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return code
if filename.startswith('/'):
filename = filename[1:]
with open(path, 'rb') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(time.time())
basename = os.path.basename(path)
code = compile(f.read(), filename, 'exec')
code = repack(code, filename, basename)
return imp.get_magic() + struct.pack('L', timestamp) + marshal.dumps(code)
def buildGO(path):
"""
Calls and returns stdout go in project work dir
"""
env = dict(os.environ)
if 'GOBIN' not in env:
env['GOBIN'] = os.path.join(os.environ.get('GOPATH'), 'bin')
with tempfile.NamedTemporaryFile(delete=True) as f:
filename = f.name
try:
subprocess.check_output(['go', 'build', '-o', filename],
cwd=path, env=env, shell=True,
stderr=subprocess.STDOUT, universal_newlines=True)
with open(filename, 'rb') as f:
return f.read()
except subprocess.CalledProcessError as e:
print path, e.output.strip()
def createMeta(**meta):
metaET = ElementTree.Element('root')
for key, value in meta.iteritems():
ElementTree.SubElement(metaET, key).text = value
metaStr = ElementTree.tostring(metaET)
metaDom = minidom.parseString(metaStr)
metaData = metaDom.toprettyxml(encoding='utf-8').split('\n')[1:]
return '\n'.join(metaData)
def write(excludes, package, path, data):
if path in excludes:
print 'Excluded', path
return
if not data:
data = ''
print 'Write', path, len(data)
now = tuple(datetime.now().timetuple())[:6]
path = path.replace('\\', '/')
dirname = os.path.dirname(path)
while dirname:
if dirname + '/' not in package.namelist():
package.writestr(zipfile.ZipInfo(dirname + '/', now), '')
dirname = os.path.dirname(dirname)
if data:
info = zipfile.ZipInfo(path, now)
info.external_attr = 33206 << 16 # -rw-rw-rw-
package.writestr(info, data)
def deploy(pathLine, gamePath):
# Deploying by adding path into paths.xml
for dirName, _, files in os.walk(gamePath):
for filename in files:
if filename == 'paths.xml':
print 'Deploy into', dirName
path = os.path.join(dirName, filename)
with open(path, 'r') as p:
paths = p.read().split('\n')
for idx, line in enumerate(paths):
if line == pathLine:
break
if '<Packages>' in line:
paths.insert(idx, pathLine)
break
with open(path, 'w') as p:
p.write('\n'.join(paths))
def clear(pathLine, gamePath):
# Remove deployed from paths.xml
for dirName, _, files in os.walk(gamePath):
for filename in files:
if filename == 'paths.xml':
print 'Clear from', dirName
path = os.path.join(dirName, filename)
with open(path, 'r') as p:
paths = p.read().split('\n')
paths = filter(lambda x: x.strip() != pathLine, paths)
with open(path, 'w') as p:
p.write('\n'.join(paths))
def build(packageFile, config):
with zipfile.ZipFile(packageFile, 'w') as package:
write(excludes, package, 'meta.xml', createMeta(**CONFIG['meta']))
sources = os.path.abspath('./sources')
for dirName, _, files in os.walk(sources):
for filename in files:
path = os.path.join(dirName, filename)
name = path.replace(sources, '').replace('\\', '/')
dst = 'res' + name
fname, fext = os.path.splitext(dst)
if fext == '.py':
write(excludes, package, fname + '.pyc', buildPython(path, name))
elif fext == '.po':
import polib
write(excludes, package, fname + '.mo', polib.pofile(path).to_binary())
elif fext != '.pyc' or CONFIG.get('pass_pyc_files', False):
with open(path, 'rb') as f:
write(excludes, package, dst, f.read())
for source, dst in CONFIG.get('flash_fdbs', {}).items():
write(excludes, package, dst, buildFlashFD(source))
for dst, data in buildFLA(CONFIG.get('flash_fla', {})).items():
write(excludes, package, dst, data)
for source, dst in CONFIG.get('go', {}).items():
write(excludes, package, dst, buildGO(source))
for path, dst in CONFIG.get('copy', {}).items():
with open(path, 'rb') as f:
write(excludes, package, dst, f.read())
if __name__ == '__main__':
with open('./build.json', 'r') as fh:
CONFIG = json.loads(fh.read())
excludes = CONFIG.get('excludes', [])
CONFIG['meta']['version'] = getVersion(CONFIG['meta'])
if CONFIG.get('append_version', True):
packageName = '%s_%s.wotmod' % (CONFIG['meta']['id'], CONFIG['meta']['version'])
else:
packageName = '%s.wotmod' % CONFIG['meta']['id']
if os.path.exists('bin'):
shutil.rmtree('bin')
if not os.path.exists('bin'):
os.makedirs('bin')
pathLine = '<Path mode="recursive" mask="*.wotmod" root="res">' + os.path.abspath('bin').replace('\\', '/') + '</Path>'
if 'clear' in sys.argv:
clear(pathLine, sys.argv[sys.argv.index('clear') + 1])
else:
build(os.path.abspath(os.path.join('bin', packageName)), CONFIG)
if 'deploy' in sys.argv:
deploy(pathLine, sys.argv[sys.argv.index('deploy') + 1])
| StarcoderdataPython |
142976 | import h5py
import numpy as np
import os
import sys
import time
usage = '''
driver.py D F N outputfile
where D = delay in microseconds between writes
F = number of writes between flushes and chunksize
N = total number of elements to write
outputfile = name of output file
'''
def run_command(command):
t0 = time.time()
print("-----------")
print(command)
res = os.system(command)
print(" -- finished in %.2f sec" % (time.time()-t0,))
assert res == 0, "command failed"
if __name__ == '__main__':
assert len(sys.argv)==5, usage
delay = int(sys.argv[1])
flush = int(sys.argv[2])
num = int(sys.argv[3])
outputfile = sys.argv[4]
t0 = time.time()
run_command('h5c++ writer.cpp -o writer')
print("compiled writer in %.2f sec" % (time.time()-t0,))
run_command('./writer %d %d %d %s &' % (delay, flush, num, outputfile))
run_command('h5watch --dim %s/data' % outputfile)
| StarcoderdataPython |
1666651 | class Template(object):
"""
a rudimentary replacement for Jinja2. currently in place
due to a bug with the typing module.
"""
def __init__(self, template_string):
self._template_string = template_string
def render(self, **parameters):
chunks = []
iterator = iter(self._template_string)
while True:
try:
c = next(iterator)
if c == "{":
next_c = next(iterator)
if next_c == "{":
chunks.append(_capture_variable(iterator, parameters))
else:
chunks.append(c)
chunks.append(next_c)
continue
chunks.append(c)
except StopIteration:
break
return "".join(chunks)
def _capture_variable(iterator, parameters):
"""
return the replacement string.
this assumes the preceeding {{ has already been
popped off.
"""
key = ""
next_c = next(iterator)
while next_c != "}":
key += next_c
next_c = next(iterator)
# remove the final "}"
next(iterator)
return parameters[key]
| StarcoderdataPython |
1782869 | from __future__ import print_function
from unittest import TestCase
from indexdigest.linters.linter_0164_empty_database import check_empty_database
from indexdigest.test import DatabaseTestMixin
class TestLinter(TestCase, DatabaseTestMixin):
def test_empty_database(self):
reports = list(check_empty_database(self.connection))
print(reports, reports[0].context)
assert len(reports) == 1
assert str(reports[0]) == 'index_digest_empty: "index_digest_empty" database has no tables'
assert reports[0].table_name == 'index_digest_empty'
| StarcoderdataPython |
145308 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-10-01 12:05
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=255, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('phone_number', models.CharField(blank=True, max_length=15, null=True, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
('last_login', models.DateTimeField(blank=True, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Token',
fields=[
('key', models.CharField(max_length=40, primary_key=True, serialize=False, verbose_name='Key')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('logout', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_tokens', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name_plural': 'Tokens',
'ordering': ['-updated', '-created'],
'verbose_name': 'Token',
},
),
]
| StarcoderdataPython |
3261193 | import os
from faunadb import query as q
from faunadb.objects import Ref
from faunadb.client import FaunaClient
from faunadb.errors import BadRequest
class FaunaWrapper:
def __init__(self):
url = os.getenv("FAUNA_KEY")
self.client = FaunaClient(secret=url)
# initialize faunadb client
pass
def get_documents_in_index(self, index="unique_news_items", size=100000):
"""Assume index name exists
Validation not needed personal script
unique_halts, unique_news, unique_short_news
"""
documents = self.client.query(q.paginate(q.match(q.index(index)), size=100000))
return documents
# Have a faunadb class with a refer to the client
def create_document_in_collection(self, collection, collection_data):
"""Assumes that collection name exists
collections are halts and news and short_news
Validation not needed, personal project <3
"""
try:
result = self.client.query(
q.create(q.collection(collection), {"data": collection_data})
)
print(result)
return True
except BadRequest as error:
# get properties of bad request
# print(dir(bad))
if hasattr(error, "_get_description"):
if error._get_description() == "document is not unique.":
ticker = collection_data.get("ticker")
print(f"skipping {ticker} since doc is not unique")
return False
# unknown error, stop everything
except Exception as error:
print(collection_data)
# raise Exception(error)
pass
| StarcoderdataPython |
3230238 | from marshmallow import INCLUDE, Schema, fields, post_load, pre_load
class Dates:
def __init__(self, on_sale=None, foc=None, unlimited=None, **kwargs):
self.on_sale = on_sale
self.foc = foc
self.unlimited = unlimited
self.unknown = kwargs
class DatesSchema(Schema):
onsaleDate = fields.DateTime(attribute="on_sale")
focDate = fields.DateTime(attribute="foc")
unlimitedDate = fields.DateTime(attribute="unlimited")
class Meta:
unknown = INCLUDE
@pre_load
def process_input(self, data, **kwargs):
new_data = {}
for d in data:
# Marvel comic 4373, and maybe others, returns a focDate of
# "-0001-11-30T00:00:00-0500". The best way to handle this is
# probably just to ignore it, since I don't know how to fix it.
if d["date"][0] != "-":
new_data[d["type"]] = d["date"]
return new_data
@post_load
def make(self, data, **kwargs):
return Dates(**data)
| StarcoderdataPython |
58176 | import unittest.mock
import pytest
import requests
from lektor_twitter_embed import _init_params, _tweet
def _mock_request_valid(url, params):
return {"html": "<blockquote..."}
def _mock_request_exception(url, params):
raise requests.exceptions.HTTPError()
_tweet_url = "https://twitter.com/thisstuartlaws/status/1353838316198756352"
def test_init_params_none():
assert _init_params(_tweet_url, None) == {
"url": _tweet_url,
"dnt": "true",
}
def test_init_params_dict():
assert _init_params(_tweet_url, {"dnt": "false", "align": "center"},) == {
"url": _tweet_url,
"dnt": "false",
"align": "center",
}
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_valid)
def test_tweet_valid():
assert _tweet(_tweet_url) == "<blockquote..."
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_exception)
def test_tweet_exception_no_fallback():
with pytest.raises(requests.exceptions.HTTPError):
_tweet(_tweet_url)
@unittest.mock.patch("lektor_twitter_embed._send_request", _mock_request_exception)
def test_tweet_exception_with_fallback():
assert (
_tweet(_tweet_url, fallback=True) == f'<a href="{_tweet_url}">{_tweet_url}</a>'
)
| StarcoderdataPython |
164742 | <filename>venv/lib/python3.6/site-packages/ansible_collections/cisco/mso/plugins/modules/mso_rest.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, <NAME> (@anvitha-jain) <<EMAIL>>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_rest
short_description: Direct access to the Cisco MSO REST API
description:
- Enables the management of the Cisco MSO fabric through direct access to the Cisco MSO REST API.
- This module is not idempotent and does not report changes.
options:
method:
description:
- The HTTP method of the request.
- Using C(delete) is typically used for deleting objects.
- Using C(get) is typically used for querying objects.
- Using C(post) is typically used for modifying objects.
- Using C(put) is typically used for modifying existing objects.
- Using C(patch) is typically also used for modifying existing objects.
type: str
choices: [ delete, get, post, put, patch ]
default: get
aliases: [ action ]
path:
description:
- URI being used to execute API calls.
type: str
required: yes
aliases: [ uri ]
content:
description:
- Sets the payload of the API request directly.
- This may be convenient to template simple requests.
- For anything complex use the C(template) lookup plugin (see examples).
type: raw
aliases: [ payload ]
extends_documentation_fragment:
- cisco.mso.modules
notes:
- Most payloads are known not to be idempotent, so be careful when constructing payloads.
seealso:
- module: cisco.mso.mso_tenant
author:
- <NAME> (@anvitha-jain)
'''
EXAMPLES = r'''
- name: Add schema (JSON)
cisco.mso.mso_rest:
host: mso
username: admin
password: <PASSWORD>
path: /mso/api/v1/schemas
method: post
content:
{
"displayName": "{{ mso_schema | default('ansible_test') }}",
"templates": [{
"name": "Template_1",
"tenantId": "{{ add_tenant.jsondata.id }}",
"displayName": "Template_1",
"templateSubType": [],
"templateType": "stretched-template",
"anps": [],
"contracts": [],
"vrfs": [],
"bds": [],
"filters": [],
"externalEpgs": [],
"serviceGraphs": [],
"intersiteL3outs": []
}],
"sites": [],
"_updateVersion": 0
}
delegate_to: localhost
- name: Query schema
cisco.mso.mso_rest:
host: mso
username: admin
password: <PASSWORD>
path: /mso/api/v1/schemas
method: get
delegate_to: localhost
- name: Patch schema (YAML)
cisco.mso.mso_rest:
host: mso
username: admin
password: <PASSWORD>
path: "/mso/api/v1/schemas/{{ add_schema.jsondata.id }}"
method: patch
content:
- op: add
path: /templates/Template_1/anps/-
value:
name: AP2
displayName: AP2
epgs: []
_updateVersion: 0
delegate_to: localhost
- name: Add a tenant from a templated payload file from templates
cisco.mso.mso_rest:
host: mso
username: admin
password: <PASSWORD>
method: post
path: /api/v1/tenants
content: "{{ lookup('template', 'mso/tenant.json.j2') }}"
delegate_to: localhost
'''
RETURN = r'''
'''
# Optional, only used for YAML validation
try:
import yaml
HAS_YAML = True
except Exception:
HAS_YAML = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import MSOModule, mso_argument_spec
from ansible.module_utils._text import to_text
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
path=dict(type='str', required=True, aliases=['uri']),
method=dict(type='str', default='get', choices=['delete', 'get', 'post', 'put', 'patch'], aliases=['action']),
content=dict(type='raw', aliases=['payload']),
)
module = AnsibleModule(
argument_spec=argument_spec,
)
content = module.params.get('content')
path = module.params.get('path')
mso = MSOModule(module)
# Validate content/payload
if content and isinstance(content, str) and HAS_YAML:
try:
# Validate YAML/JSON string
content = yaml.safe_load(content)
except Exception as e:
module.fail_json(msg='Failed to parse provided JSON/YAML payload: %s' % to_text(e), exception=to_text(e), payload=content)
mso.method = mso.params.get('method').upper()
# Perform request
if module.check_mode:
mso.result['jsondata'] = content
else:
mso.result['jsondata'] = mso.request(path, method=mso.method, data=content, api_version=None)
mso.result['status'] = mso.status
if mso.method != 'GET':
mso.result['changed'] = True
if mso.method == 'DELETE':
mso.result['jsondata'] = None
# Report success
mso.exit_json(**mso.result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1669501 | <reponame>mia-jingyi/simglucose
from simglucose.envs.simglucose_gym_env import DeepSACT1DEnv
| StarcoderdataPython |
1786475 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
class SupplierGroup(NestedSet):
nsm_parent_field = 'parent_supplier_group'
def validate(self):
if not self.parent_supplier_group:
self.parent_supplier_group = get_root_of("Supplier Group")
def on_update(self):
NestedSet.on_update(self)
self.validate_one_root()
def on_trash(self):
NestedSet.validate_if_child_exists(self)
frappe.utils.nestedset.update_nsm(self)
| StarcoderdataPython |
64983 | <filename>helloworld/cli.py
"""
command line interface
"""
from argparse import ONE_OR_MORE, ArgumentParser
from colorama import Fore
from . import __version__
from .model import User
def run():
"""
entry point
"""
parser = ArgumentParser(prog="helloworld", description="some documentation here")
parser.add_argument("--version", action="version", version=f"version {__version__}")
parser.add_argument(dest="users", nargs=ONE_OR_MORE, type=User, help="your name")
args = parser.parse_args()
for user in args.users:
print(f"Hello {Fore.YELLOW}{user.name}{Fore.RESET}")
| StarcoderdataPython |
1759862 | <gh_stars>0
def read_matrix(is_tet=False):
if is_tet:
return [
[7, 1, 3, 3, 2, 1],
[1, 3, 9, 8, 5, 6],
[4, 6, 7, 9, 1, 0],
]
else:
(rows_count, columns_count) = map(int, input().split(', '))
matrix = []
for row_index in range(rows_count):
row = [int(s) for s in input().split()]
matrix.append(row)
return matrix
def get_sum_matrix_columns(matrix):
rows_count = len(matrix)
columns_count = len(matrix[0])
sum_matrix_columns = [0] * columns_count
for r in range(rows_count):
for c in range(columns_count):
sum_matrix_columns[c] += matrix[r][c]
return sum_matrix_columns
def print_result(values):
[print(s) for s in values]
matrix = read_matrix()
result = get_sum_matrix_columns(matrix)
print_result(result) | StarcoderdataPython |
171779 |
class User():
def __init__(self , Name , userName , username_type , Game , data_dict):
self.index = data_dict.getIndex()+1
self.name = Name
self.username = userName
self.username_type = username_type
self.game = Game
self.data = data_dict
self.data.AddUser(self)
class DataStorage():
def __init__(self):
self.dict = self.CreateDict()
def CreateDict(self):
return dict()
def AddUser(self,user):
self.dict[user.index] = user
def getIndex(self):
max_index = 0
for user in self.dict.values():
if user.index > max_index:
max_index = user.index
return max_index
def getUser(self,name="",index=0):
if name:
for user in self.dict:
if user.name == name:
return user
elif index:
for user in self.data:
if user.index == index:
return user
else:
return None
def CheckUsername(self , username,game):
for user in self.dict.values():
if user.username == username and user.game == game:
return True
return False
def UpdateJSON(self):
pass
def CreateJSON(self):
pass
class GameSort():
def __init__(self,data_dict):
self.game_dict = dict()
self.data = data_dict
self.__init_gameslist()
def __init_gameslist(self):
list_ = ['Destiny2' ,'Fortnite','Call Of Duty Warzone','PUBG' , 'Apex Legends','Clash Royale','Clash of Clans','Houseparty']
for game in list_:
self.game_dict[game] = 0
def Count(self):
for user in self.data.values():
if user.game in self.game_dict.keys():
self.game_dict[user.game]+=1
else:
self.game_dict[user.game] = 1
def CheckGame(self , game):
if game in self.game_dict.keys():
self.game_dict[game]+=1
else:
self.game_dict[game]=1
def GetList(self):
self.Count()
return self.game_dict.keys() | StarcoderdataPython |
3222875 | <filename>scripts/save_images.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Save some representative images from each dataset to disk.
"""
import random
import torch
import argparse
import hparams_registry
import datasets
import imageio
import torchvision.utils as vutils
import os
from tqdm import tqdm
def __write_images(image_outputs, display_image_num, file_name, run):
image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels
image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)
image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True, scale_each=True)
vutils.save_image(image_grid, file_name, nrow=1)
run.log_image('images', file_name)
def write_2images(image_outputs, display_image_num, image_directory, postfix, run):
n = len(image_outputs)
__write_images(image_outputs[0:n], display_image_num, '%s/gen_%s.jpg' % (image_directory, postfix), run)
#__write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix), run)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO']
for dataset_name in tqdm(datasets_to_save):
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
args.data_dir,
list(range(datasets.num_environments(dataset_name))),
hparams)
for env_idx, env in enumerate(tqdm(dataset)):
for i in tqdm(range(50)):
idx = random.choice(list(range(len(env))))
x, y = env[idx]
while y > 10:
idx = random.choice(list(range(len(env))))
x, y = env[idx]
if x.shape[0] == 2:
x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:]
if x.min() < 0:
mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None]
std = torch.tensor([0.229, 0.224, 0.225])[:,None,None]
x = (x * std) + mean
assert(x.min() >= 0)
assert(x.max() <= 1)
x = (x * 255.99)
x = x.numpy().astype('uint8').transpose(1,2,0)
imageio.imwrite(
os.path.join(args.output_dir,
f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'),
x)
| StarcoderdataPython |
4815147 | from twisted.web import http
from twisted.web.http import HTTPChannel
from twisted.internet import reactor, defer
import threading
from settings import LIMIT_FPS, PASSWORD
class BotHandler(http.Request, object):
BOUNDARY = "jpgboundary"
def get_frame(self):
return self.api.recorder.frame
def writeBoundary(self):
self.write("--%s\n" % (self.BOUNDARY))
def writeStop(self):
self.write("--%s--\n" % (self.BOUNDARY))
def __init__(self, api, *args, **kwargs):
self.api = api
self.frame_delay = 1.0/LIMIT_FPS if LIMIT_FPS else 0.05
super(BotHandler, self).__init__(*args, **kwargs)
def render(self, content, headers):
for (header_name, header_value) in headers:
self.setHeader(header_name, header_value)
self.write(content)
self.finish()
def simple_render(self, content, content_type="text/plain"):
self.render(content, [("Content-Type", content_type)])
def not_found(self, message=None):
self.setResponseCode(404, message)
return self.simple_render("no no...")
def wait(self, seconds, result=None):
"""Returns a deferred that will be fired later"""
d = defer.Deferred()
reactor.callLater(seconds, d.callback, result)
return d
@defer.inlineCallbacks
def serve_stream(self):
""" Serve video stream as multi-part jpg. """
boundary = "jpgboundary"
self.setHeader('Connection', 'Keep-Alive')
self.setHeader('Content-Type', "multipart/x-mixed-replace;boundary=%s" % boundary)
while True:
if not self.transport._isSendBufferFull():
content = self.get_frame()
self.write("Content-Type: image/jpg\n")
self.write("Content-Length: %s\n\n" % (len(content)))
self.write(content)
self.write("--%s\n" % (boundary))
if self.transport.disconnected:
break
yield self.wait(self.frame_delay)
def serve_stream_container(self):
headers = [("content-type", "text/html")]
url = '/stream.avi'
if 'pwd' in self.args:
url += '?pwd={}'.format(self.args['pwd'][0])
content = "<html><head><title>MJPG Server</title></head><body><img src='{}' alt='stream'/></body></html>".format(url)
self.render(content, headers)
def serve_frame(self):
return self.simple_render(self.get_frame(), "image/jpg")
def process(self):
command_args_list = [x for x in self.path.split("/") if x]
command = ""
args = []
if command_args_list:
command = command_args_list[0]
args = command_args_list[1:]
if PASSWORD:
if 'pwd' not in self.args or self.args['pwd'][0].split("?")[0] != PASSWORD:
self.setResponseCode(403)
return self.simple_render("Password required")
try:
if command.startswith("stream"):
return self.serve_stream()
elif command == "snapshot":
return self.serve_frame()
else:
return self.serve_stream_container()
except Exception, e:
return self.simple_render(e.message)
return self.not_found()
class BotHandlerFactory(object):
def __init__(self, api):
self.api = api
def __call__(self, *args, **kwargs):
return BotHandler(self.api, *args, **kwargs)
class StreamFactory(http.HTTPFactory):
protocol = HTTPChannel
class Api(object):
def __init__(self, recorder):
# This I believe is what you find when you look up "ugly" in the dictionary
# But I really don't want to try and understand this FactoryFactoryFactory stuff properly
HTTPChannel.requestFactory = BotHandlerFactory(api=self)
self.recorder = recorder
self.events = []
self.lock = threading.Lock()
def demonize(self, port=8080):
reactor.listenTCP(port, StreamFactory())
t = threading.Thread(target=reactor.run)
t.daemon = True
t.start()
def run(self, port=8080):
reactor.listenTCP(port, StreamFactory())
reactor.run()
| StarcoderdataPython |
1642061 | import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument | StarcoderdataPython |
1621066 | import unittest
from datasetio.datasetwriter import DatasetWriter
import h5py
import os
import numpy as np
import string
import random
class TestDatasetWriter(unittest.TestCase):
def setUp(self):
self.feat_length = 10
self.seq_length = 20
self.buffer_size = 5
self.num_rows = 100
self.dataset_file_path = 'test.hdf'
self.dtypes=[('feat_seq', 'float', (self.seq_length, self.feat_length)), ('label', 'int'), ('file', h5py.string_dtype())]
self.dataset_writer = DatasetWriter('test', self.num_rows, self.dtypes, self.dataset_file_path, self.buffer_size)
self.taken_files = set()
def tearDown(self):
os.remove(self.dataset_file_path)
def initialize_expected_rows(self):
expected_rows = []
for i in range(0, self.num_rows):
zero_features = np.zeros((self.seq_length, self.feat_length))
row = self.generate_row(zero_features, 0, '')
expected_rows.append(row)
return expected_rows
def generate_row(self, features, label, file):
return {'feat_seq': features, 'label': label, 'file': file}
def generate_random_row(self):
features = np.random.rand(self.seq_length, self.feat_length)
label = np.random.randint(2)
letters = string.ascii_lowercase
# Generate a unique file name, i.e. one that hasn't been used in this test yet.
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
while file in self.taken_files:
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
self.taken_files.add(file)
return {'feat_seq': features, 'label': label, 'file': file}
def check_equality(self, expected_row, actual_row):
expected_row_tuple = tuple([expected_row[name] for name in [dtype[0] for dtype in self.dtypes]])
actual_row_tuple = tuple(actual_row)
for expected_val, actual_val in zip(expected_row_tuple, actual_row_tuple):
if isinstance(expected_val, np.ndarray):
if not np.array_equal(expected_val, actual_val):
return False
else:
if expected_val != actual_val:
return False
return True
def check_db(self, expected_rows):
db = h5py.File(self.dataset_file_path, 'r')
actual_rows = db['test']
for expected_row, actual_row in zip(expected_rows, actual_rows):
self.assertTrue(self.check_equality(expected_row, actual_row))
def test_empty(self):
expected_rows = self.initialize_expected_rows()
self.check_db(expected_rows)
def test_add_one_less_than_buffer_size(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.buffer_size - 1):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
def test_add_one_more_than_buffer_size(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.buffer_size + 1):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
def test_full(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
self.check_db(expected_rows)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3381230 | <reponame>chrgraham/tcex
# -*- coding: utf-8 -*-
"""Test the TcEx Args Config Module."""
from ..tcex_init import tcex
# pylint: disable=W0201
class TestArgsConfig:
"""Test TcEx Args Config."""
@staticmethod
def test_address_get():
"""Test args."""
assert tcex.args.tc_token
assert tcex.args.tc_token_expires
assert tcex.args.api_default_org == 'TCI'
| StarcoderdataPython |
3321342 | import os
scriptPath = os.path.dirname(os.path.abspath(__file__))
os.system('docker build -t urodoz/sailfish-runner-base:1.0 '+scriptPath+'/.') | StarcoderdataPython |
4817076 | import inspect
def find_attributes(clazz):
attrs = inspect.getmembers(clazz, lambda a: (not inspect.isroutine(a)))
attrs = filter(lambda a: not (a[0].endswith('__')), attrs)
attrs = filter(lambda a: a[0].startswith('_' + clazz.__name__), attrs)
return attrs
def getter(original_class):
def add_property(clazz):
class_name = clazz.__name__
for attr in find_attributes(clazz):
original_attr_name = attr[0] # _Foo__attr
property_attr_name = attr[0].replace('_' + class_name + '__', '') # attr
# getter name:
# get_Foo__attr
getter_name = 'get' + original_attr_name
# add func:
# def get_Foo__attr(self): return self._Foo__attr
setattr(clazz, getter_name, make_getter(original_attr_name))
# add property func:
# attr = property(get_Foo__attr)
setattr(clazz, property_attr_name, property(getattr(clazz, getter_name)))
def make_getter(name):
def fg(self):
return getattr(self, name)
return fg
add_property(original_class)
return original_class
def setter(original_class):
def add_property_setter(clazz):
class_name = clazz.__name__
for attr in find_attributes(clazz):
original_attr_name = attr[0] # _Foo__attr
property_attr_name = attr[0].replace('_' + class_name + '__', '') # attr
# setter name:
# set_Foo__attr
setter_name = 'set' + original_attr_name
getter_name = 'get' + original_attr_name
if not hasattr(clazz, getter_name):
getter(clazz)
# add func:
# def set_Foo__attr(self, v): self._Foo__attr = v
setattr(clazz, setter_name, make_setter(original_attr_name))
# add property func:
# attr = property(get_Foo__attr, set_Foo__attr)
setattr(clazz, property_attr_name, property(getattr(clazz, getter_name), getattr(clazz, setter_name)))
def make_setter(name):
def fs(self, v):
setattr(self, name, v)
return fs
add_property_setter(original_class)
return original_class
def setter_validator(original_class):
def add_property_validation_setter(clazz):
class_name = clazz.__name__
for attr in find_attributes(clazz):
original_attr_name = attr[0] # _Foo__attr
property_attr_name = attr[0].replace('_' + class_name + '__', '') # attr
# setter name:
# set_Foo__attr
setter_name = 'set' + original_attr_name
getter_name = 'get' + original_attr_name
if not hasattr(clazz, getter_name):
getter(clazz)
field_value = getattr(clazz, original_attr_name)
# add func:
# def set_Foo__attr(self, v):
# if type(v) != CustomeType:
# raise ValueError()
#
# self._Foo__attr = v
#
if isinstance(field_value, type(None)):
setter(clazz)
else:
setattr(clazz, setter_name, make_validation_setter(original_attr_name, type(field_value)))
# add property func:
# attr = property(get_Foo__attr, set_Foo__attr)
setattr(clazz, property_attr_name, property(getattr(clazz, getter_name), getattr(clazz, setter_name)))
def make_validation_setter(name, custom_type):
def fs(self, v):
if type(v) != custom_type:
raise ValueError('value must be ' + custom_type.__name__ + ' type.')
setattr(self, name, v)
return fs
add_property_validation_setter(original_class)
return original_class
| StarcoderdataPython |
1646156 | <gh_stars>0
"""
VERSION
"""
__version__ = '0.0.2' | StarcoderdataPython |
1623078 | import numpy as np
from rllab.core.serializable import Serializable
from rllab.exploration_strategies.base import ExplorationStrategy
from sandbox.rocky.tf.spaces.box import Box
from sandbox.gkahn.gcg.utils import schedules
class GaussianStrategy(ExplorationStrategy, Serializable):
"""
Add gaussian noise
"""
def __init__(self, env_spec, endpoints, outside_value):
assert isinstance(env_spec.action_space, Box)
Serializable.quick_init(self, locals())
self._env_spec = env_spec
self.schedule = schedules.PiecewiseSchedule(endpoints=endpoints, outside_value=outside_value)
def reset(self):
pass
def add_exploration(self, t, action):
return np.clip(action + np.random.normal(size=len(action)) * self.schedule.value(t),
self._env_spec.action_space.low, self._env_spec.action_space.high)
| StarcoderdataPython |
1793467 | <filename>slides/slide_92_ejemplo_grafico.py
import streamlit as st
import pandas as pd
import numpy as np
import time
import altair as alt
from urllib.error import URLError
from code.shared_functions import skip_echo
def display():
c1, c2 = st.columns([9,1])
c1.title("Ejemplo - gráfico en altair")
show_code = c2.checkbox("Código")
with st.echo("above") if show_code else skip_echo():
# Basado en ejemplo dataframes de Streamlit
@st.cache
def get_UN_data():
AWS_BUCKET_URL = "http://streamlit-demo-data.s3-us-west-2.amazonaws.com"
df = pd.read_csv(AWS_BUCKET_URL + "/agri.csv.gz")
return df.set_index("Region")
df = get_UN_data()
with st.expander("Explorando el dataframe"):
st.code("df.head()")
st.write(df.head())
st.code("df.describe(include='all').T")
st.write(df.describe(include='all').T)
st.code("df.T.describe(include='all').T")
st.write(df.T.describe(include='all').T)
countries = st.multiselect("Elegir País(es)", list(df.index), [])
if not countries:
st.error("Seleccionar al menos 1 país.")
else:
data = df.loc[countries]
data /= 1000000.0
st.write("### Producción Agrícola Neta (Gross Agricultural Production) ($1000M)", data.sort_index())
data = data.T.reset_index()
data_plot = pd.melt(data, id_vars=["index"]).rename(
columns={"index": "year", "value": "Gross Agricultural Product ($1000M)"}
)
chart = (
alt.Chart(data_plot)
.mark_area(opacity=0.3)
.encode(
x="year:T",
y=alt.Y("Gross Agricultural Product ($1000M):Q", stack=None),
color="Region:N",
)
)
st.altair_chart(chart, use_container_width=True) | StarcoderdataPython |
107928 | import logging; module_logger = logging.getLogger(__name__)
from pathlib import Path
import sys, re, subprocess, datetime, collections, itertools, pprint, json
from acmacs_base.json import read_json
from . import latex
from .map import sLabDisplayName
# ======================================================================
def make_report(source_dir, source_dir_2, output_dir, report_name="report", report_settings_file="report.json"):
output_dir.mkdir(exist_ok=True)
report_settings = read_json(report_settings_file)
output_name = report_settings.get("output_name", report_name)
report_type = report_settings.get("type", "report")
if report_type == "report":
report = LatexReport(source_dir=source_dir, source_dir_2=source_dir_2, output_dir=output_dir, output_name=output_name, settings=report_settings)
elif report_type == "signature_pages":
report = LatexSignaturePageAddendum(source_dir=source_dir, output_dir=output_dir, output_name=output_name, settings=report_settings)
else:
raise RuntimeError("Unrecognized report type: {!r}".format(report_type))
report.make_compile_view(update_toc=True)
# ----------------------------------------------------------------------
def make_report_abbreviated(source_dir, source_dir_2, output_dir):
output_dir.mkdir(exist_ok=True)
report_settings = read_json("report-abbreviated.json")
report = LatexReport(source_dir=source_dir, source_dir_2=source_dir_2, output_dir=output_dir, output_name="report-abbreviated", settings=report_settings)
report.make_compile_view(update_toc=True)
# ----------------------------------------------------------------------
def make_report_serumcoverage(source_dir, source_dir_2, output_dir):
output_dir.mkdir(exist_ok=True)
report_settings = read_json("report-serumcoverage.json")
report = LatexSerumCoverageAddendum(source_dir=source_dir, source_dir_2=source_dir_2, output_dir=output_dir, output_name="report-serumcoverage", settings=report_settings)
report.make_compile_view(update_toc=True)
# ----------------------------------------------------------------------
def make_signature_page_addendum(source_dir, output_dir, title="Addendum 1 (integrated genetic-antigenic analyses)", output_name="sp-addendum", T_SerumCirclesDescriptionEggCell=False):
output_dir.mkdir(exist_ok=True)
report_settings = read_json("report.json")
report_settings["cover"]["teleconference"] = title
addendum = LatexSignaturePageAddendum(source_dir=source_dir, output_dir=output_dir, settings=report_settings, output_name=output_name, T_SerumCirclesDescriptionEggCell=T_SerumCirclesDescriptionEggCell)
addendum.make_compile_view(update_toc=True)
# ----------------------------------------------------------------------
def make_signature_page_addendum_interleave(source_dirs, output_dir, title="Addendum 1 (integrated genetic-antigenic analyses)", output_name="sp-addendum", T_SerumCirclesDescriptionEggCell=True):
output_dir.mkdir(exist_ok=True)
report_settings = read_json("report.json")
report_settings["cover"]["teleconference"] = title
addendum = LatexSignaturePageAddendumInterleave(source_dirs=source_dirs, output_dir=output_dir, settings=report_settings, output_name=output_name, T_SerumCirclesDescriptionEggCell=T_SerumCirclesDescriptionEggCell)
addendum.make_compile_view(update_toc=True)
# ----------------------------------------------------------------------
class LatexReportError (Exception):
pass
# ----------------------------------------------------------------------
class LatexReport:
sLatexCommand = "cd '{run_dir}' && pdflatex -interaction=nonstopmode -file-line-error '{latex_source}'"
sViewCommand = "open '{output}'"
def __init__(self, source_dir, source_dir_2, output_dir, output_name, settings):
self.source_dir = source_dir.resolve()
self.source_dir_2 = source_dir_2
self.latex_source = output_dir.joinpath(output_name + ".tex")
self.settings = settings
self.data = []
LOCAL_TIMEZONE = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo # https://stackoverflow.com/questions/2720319/python-figure-out-local-timezone
paper_size = settings.get("paper_size", "a4") # a4, letter https://tug.org/TUGboat/tb35-3/tb111thurnherr.pdf
landscape = "landscape" if settings.get("landscape") else "portreat"
usepackage = settings.get("usepackage", "")
self.substitute = {
"documentclass": "\documentclass[%spaper,%s,12pt]{article}" % (paper_size,landscape),
"cover_top_space": settings.get("cover_top_space", "130pt"),
"cover_after_meeting_date_space": settings.get("cover_after_meeting_date_space", "180pt"),
"usepackage": usepackage,
"cover_quotation": "\\quotation",
"now": datetime.datetime.now(LOCAL_TIMEZONE).strftime("%Y-%m-%d %H:%M %Z"),
"$0": sys.argv[0],
}
if settings.get("time_series"):
settings_dates = settings["time_series"]["date"]
self.start = datetime.datetime.strptime(settings_dates["start"], "%Y-%m-%d").date()
self.end = datetime.datetime.strptime(settings_dates["end"], "%Y-%m-%d").date() # - datetime.timedelta(days=1)
self.substitute["time_series_start"] = self.start.strftime("%B %Y")
self.substitute["time_series_end"] = (self.end - datetime.timedelta(days=1)).strftime("%B %Y")
self._make_ts_dates()
def make_compile_view(self, update_toc):
self.make()
try:
self.compile(update_toc=update_toc)
finally:
self.view()
def make(self):
self.data.extend([latex.T_Head, latex.T_ColorsBW, latex.T_Setup, latex.T_Begin])
if not self.settings.get("page_numbering", True):
self.data.append(latex.T_NoPageNumbering)
for page in self.settings["pages"]:
if isinstance(page, dict):
if not page.get("type") and (page.get("?type") or page.get("type?")):
pass # commented out
elif page["type"][0] != "?": # ? in front to comment out
if page.get("lab") and not page.get("lab_display"):
page["lab_display"] = sLabDisplayName[page["lab"].upper()]
getattr(self, "make_" + page["type"])(page)
elif isinstance(page, str):
if page[0] != "?":
getattr(self, "make_" + page)({"type": page})
else:
raise LatexReportError('Unrecognized page description: {!r}'.format(page))
if self.data[-1].endswith("\\newpage"):
self.data[-1] = self.data[-1][:-8] + "\\par\\vspace*{\\fill}\\tiny{Report generated: %now%}\n\\newpage"
self.data.append(latex.T_Tail)
self.write()
def compile(self, update_toc=True):
pf = self.pdf_file()
if pf.exists():
pf.chmod(0o644)
cmd = self.sLatexCommand.format(run_dir=self.latex_source.parent, latex_source=str(self.latex_source.name))
try:
for i in range(2):
module_logger.info('Executing {}'.format(cmd))
log_file = self.latex_source.parent.joinpath("latex.log")
stdout = log_file.open("w")
if subprocess.run(cmd, shell=True, stdout=stdout, stderr=stdout).returncode:
raise LatexReportError('Compilation failed')
finally:
if pf.exists():
pf.chmod(0o444)
def view(self):
cmd = self.sViewCommand.format(output=str(self.pdf_file()))
module_logger.info('Executing {}'.format(cmd))
if subprocess.run(cmd, shell=True).returncode:
raise LatexReportError('Viewer failed')
def pdf_file(self):
return self.latex_source.parent.joinpath(self.latex_source.stem + '.pdf')
# ----------------------------------------------------------------------
def make_cover(self, page):
if self.settings["cover"].get("hemisphere"):
self.data.append(latex.T_Cover)
self.substitute.update({
"report_hemisphere": self.settings["cover"]["hemisphere"],
"report_year": self.settings["cover"]["year"],
"teleconference": self.settings["cover"]["teleconference"],
"addendum": self.settings["cover"].get("addendum", ""),
"meeting_date": self.settings["cover"]["meeting_date"],
})
else:
self.data.append(latex.T_CoverSimple)
self.substitute.update({
"title": self.settings["cover"]["title"],
"date": self.settings["cover"]["date"],
})
def make_toc(self, page):
self.data.append(latex.T_TOC)
def make_section_begin(self, page):
self.data.append(latex.T_Section.format(title=page["title"], subtype=page.get("subtype", "")))
def make_subsection_begin(self, page):
self.data.append(latex.T_Subsection.format(subtype=page["subtype"], lab=page.get("lab", ""), title=page.get("title", page["subtype"])))
def make_new_page(self, page=None):
self.data.append("\\newpage")
def make_blank_page(self, page=None):
self.data.append("\\blankpage")
def make_appendices(self, page=None):
self.data.append(latex.T_Appendices)
def make_serum_circle_description(self, page=None):
self.data.append(latex.T_SerumCirclesDescription)
def make_latex(self, page):
if isinstance(page["text"], str):
self.data.append(page["text"])
else:
self.data.append("\n".join(page["text"]))
# ----------------------------------------------------------------------
def make_geographic_data_description(self, page):
if page["coloring"] == "h3_clade":
self.data.append(latex.T_GeographicDataH3ColoredByCladeDescription)
elif page["coloring"] == "h1_clade":
self.data.append(latex.T_GeographicDataH1ColoredByCladeDescription)
elif page["coloring"] == "b_lineage":
self.data.append(latex.T_GeographicVicYamDataDescription)
elif page["coloring"] == "b_lineage_vic_deletion_mutants":
self.data.append(latex.T_GeographicVicDelMutYamDataDescription)
elif page["coloring"] == "continents":
self.data.append(latex.T_GeographicDataDescription)
else:
raise ValueError("Unrecognized \"coloring\" for \"geographic\" page: " + repr(page["coloring"]))
def make_geographic_ts(self, page):
no = 0
for image in (self.source_dir.joinpath("geo", "{}-geographic-{}.pdf".format(page["subtype"].upper(), d)) for d in self.ts_dates):
if image.exists():
if (no % 3) == 0:
if no:
self.data.append("\\end{GeographicMapsTable}")
self.data.append("\\newpage")
self.data.append("\\begin{GeographicMapsTable}")
self.data.append(" \\GeographicMap{{{}}} \\\\".format(image))
no += 1
if no:
self.data.append("\\end{GeographicMapsTable}")
# ----------------------------------------------------------------------
def make_raw(self, page):
data = page["raw"]
if isinstance(data, list):
data = "\n".join(data)
self.data.append(data)
def make_antigenic_ts_description(self, page):
self.data.append(latex.T_AntigenicTsDescription)
self.data.append(latex.T_AntigenicGridDescription)
if page["coloring"] == "continents":
self.data.append(latex.T_AntigenicColoredByRegionDescription)
else:
raise ValueError("Unrecognized \"coloring\" for antigenic_ts page: " + repr(page["coloring"]))
self.data.append(latex.T_AntigenicBigSmallDotsDescription)
def make_neut_ts_description(self, page):
self.data.append(latex.T_AntigenicTsDescription)
self.data.append(latex.T_NeutGridDescription)
if page["coloring"] == "continents":
self.data.append(latex.T_AntigenicColoredByRegionDescription)
else:
raise ValueError("Unrecognized \"coloring\" for antigenic_ts page: " + repr(page["coloring"]))
self.data.append(latex.T_AntigenicBigSmallDotsDescription)
def make_statistics_table(self, page):
self.data.append(StatisticsTableMaker(
subtype=page["subtype"],
lab=page.get("lab", ""),
source=self.source_dir.joinpath("stat", "stat.json.xz"),
previous_source=self._previous_stat_source(),
start=self.start,
end=self.end,
period='month'
).make())
def make_antigenic_ts(self, page):
# {"type": "antigenic_ts", "lab": "CDC", "subtype": "H3", "assay": "HI"}
image_dir = self.source_dir.joinpath("{}-{}".format(page["subtype"].lower(), page["assay"].lower()))
images = [image for image in (image_dir.joinpath("ts-{}-{}.pdf".format(page["lab"].lower(), date)) for date in self.ts_dates) if image.exists()]
for page_no, images_on_page in enumerate(itertools.zip_longest(*([iter(images)] * 6))):
if page_no:
self.make_new_page()
self._antigenic_map_table(images_on_page)
def make_pdf(self, page):
image = Path(page.get("image", ""))
module_logger.info("PDF {}".format(image))
if image.exists():
self.data.append(latex.T_PhylogeneticTree.format(image=image.resolve()))
def make_signature_page(self, page):
image = Path(page.get("image", ""))
module_logger.info("Signature page (pdf) {}".format(image))
if image.exists():
self.data.append(latex.T_SignaturePage.format(image=image.resolve()))
def make_phylogenetic_description(self, page):
self.data.append(latex.T_PhylogeneticTreeDescription)
def make_phylogenetic_description_h3_142(self, page):
self.data.append(latex.T_PhylogeneticTreeDescription_H3_142)
def make_phylogenetic_description_bvic_del(self, page):
self.data.append(latex.T_PhylogeneticTreeDescription_BVicDeletion)
def make_phylogenetic_tree(self, page):
infix = page.get("filename_infix", "")
image = self.source_dir.joinpath("tree", page["subtype"].lower() + ".tree" + infix + ".pdf")
if not image.exists() and self.source_dir_2:
image = self.source_dir_2.joinpath("tree", page["subtype"].lower() + ".tree.pdf")
module_logger.info("Phylogenetic tree {}".format(image))
if image.exists():
self.data.append(latex.T_PhylogeneticTree.format(image=image))
def make_description(self, page):
self.data.append(page["text"])
def make_map(self, page):
if page.get("image"):
image = Path(page["image"]).resolve()
else:
image = self.source_dir.joinpath("{}-{}".format(page["subtype"].lower(), page["assay"].lower()), "{}-{}.pdf".format(page["map_type"], page["lab"].lower()))
if image and image.exists():
self.data.append(latex.T_OverviewMap.format(image=image))
def make_maps(self, page):
image_scale = page.get("scale", "9 / 30")
tabcolsep = page.get("tabcolsep", 7.0)
arraystretch = page.get("arraystretch", 3.5)
title = page.get("title")
fontsize = page.get("fontsize", R"\normalsize")
for page_no, images_on_page in enumerate(itertools.zip_longest(*([iter(image and Path(image).resolve() for image in page["images"])] * 6))):
if page_no:
self.make_new_page()
self._antigenic_map_table(images_on_page, title=title, image_scale=image_scale, tabcolsep=tabcolsep, arraystretch=arraystretch, fontsize=fontsize)
def make_map_with_title(self, page):
image = Path(page["image"]).resolve()
tabcolsep = page.get("tabcolsep", 1.0)
arraystretch = page.get("arraystretch", 2.5)
image_scale = page.get("scale", "16 / 60")
title = page.get("title")
if image and image.exists():
self.data.append("\\begin{AntigenicMapTableWithSep}{%fpt}{%f}{%s}" % (tabcolsep, arraystretch, image_scale))
if title:
self.data.append("%s \\\\" % title)
self.data.append("\\AntigenicMap{%s}" % image)
self.data.append("\\end{AntigenicMapTableWithSep}")
# ----------------------------------------------------------------------
def write(self):
text = self.do_substitute('\n\n'.join(self.data))
# utility.backup_file(self.latex_source)
with self.latex_source.open('w') as f:
f.write(text)
def do_substitute(self, text):
text = text.replace('%no-eol%\n', '')
for option, value in self.substitute.items():
if isinstance(value, (str, int, float)):
text = text.replace('%{}%'.format(option), str(value))
return text
def _make_ts_dates(self):
self.ts_dates = []
d = self.start
while d < self.end:
self.ts_dates.append(d.strftime('%Y-%m'))
if d.month == 12:
d = datetime.date(year=d.year + 1, month=1, day=1)
else:
d = datetime.date(year=d.year, month=d.month+1, day=1)
module_logger.debug('make_ts_dates {} - {}: {}'.format(self.start, self.end, self.ts_dates))
# def _ts_date_pairs(self):
# for no in range(0, len(self.ts_dates), 2):
# if no < (len(self.ts_dates) - 1):
# yield self.ts_dates[no], self.ts_dates[no+1]
# else:
# yield self.ts_dates[no], None
def _antigenic_map_table(self, images, title=None, image_scale=None, tabcolsep=None, arraystretch=None, fontsize=R"\normalsize"):
if image_scale is not None:
self.data.append("\\begin{AntigenicMapTableWithSep}{%fpt}{%f}{%s}" % (tabcolsep, arraystretch, image_scale))
else:
self.data.append("\\begin{AntigenicMapTable}")
if title:
self.data.append("\multicolumn{2}{>{\hspace{0.3em}}c<{\hspace{0.3em}}}{{%s %s}} \\\\" % (fontsize, title))
for no in range(0, len(images), 2):
if images[no] and images[no + 1]:
self.data.append("\\AntigenicMap{%s} & \\AntigenicMap{%s} \\\\" % (images[no], images[no + 1]))
elif images[no]:
self.data.append("\\AntigenicMap{%s} & \\hspace{18em} \\\\" % (images[no], ))
elif images[no+1]:
self.data.append("\\hspace{18em} & \\AntigenicMap{%s} \\\\" % (images[no + 1], ))
if image_scale is not None:
self.data.append("\\end{AntigenicMapTableWithSep}")
else:
self.data.append("\\end{AntigenicMapTable}")
def _previous_stat_source(self):
previous_dir = self.settings.get("previous")
if previous_dir:
previous_dir = Path(previous_dir).resolve()
source = previous_dir.joinpath("stat", "stat.json.xz")
if not source.exists():
source = previous_dir.joinpath("maps", "stat", "stat.pydata.bz2")
if not source.exists():
raise ValueError("No previous stat data found under " + str(previous_dir))
else:
source = None
return source
# ----------------------------------------------------------------------
class LatexSignaturePageAddendum (LatexReport):
def __init__(self, source_dir, output_dir, output_name="sp-addendum.tex", settings=None, T_SerumCirclesDescriptionEggCell=False):
super().__init__(source_dir, None, output_dir, output_name, settings)
self.T_SerumCirclesDescriptionEggCell = T_SerumCirclesDescriptionEggCell
# self.latex_source = output_dir.joinpath("sp-addendum.tex")
self.substitute.update({
"documentclass": "\documentclass[a4paper,landscape,12pt]{article}",
"cover_top_space": "40pt",
"cover_after_meeting_date_space": "100pt",
#"usepackage": "\\usepackage[noheadfoot,nomarginpar,margin=0pt,bottom=20pt,paperheight=1400.0pt,paperwidth=900.0pt]{geometry}",
"usepackage": "\\usepackage[noheadfoot,nomarginpar,margin=0pt,bottom=10pt,paperheight=900.0pt,paperwidth=565.0pt]{geometry}",
"cover_quotation": "\\quotation",
})
def make(self):
self.data.extend([latex.T_Head, latex.T_Setup, latex.T_Begin])
self.make_cover()
if self.T_SerumCirclesDescriptionEggCell:
self.data.append(latex.T_SerumCirclesDescriptionEggCell)
else:
self.make_blank_page()
self.add_pdfs()
self.data.append(latex.T_Tail)
self.write()
def make_cover(self):
self.data.append(latex.T_Cover)
self.substitute.update({
"report_hemisphere": self.settings["cover"]["hemisphere"],
"report_year": self.settings["cover"]["year"],
"teleconference": self.settings["cover"]["teleconference"],
"addendum": self.settings["cover"].get("addendum", ""),
"meeting_date": self.settings["cover"]["meeting_date"],
})
def add_pdfs(self):
if self.settings.get("files"):
for filename in (Path(f) for f in self.settings["files"]):
module_logger.debug("{}".format(filename))
if filename.exists():
self.data.append(latex.T_SignaturePage.format(image=filename.resolve()))
else:
from .stat import sLabOrder
self.add_pdf(subtype="h1", assay="hi", lab="all")
for lab in sLabOrder:
self.add_pdf(subtype="h1", assay="hi", lab=lab.lower())
for lab in sLabOrder:
self.add_pdf(subtype="h3", assay="hi", lab=lab.lower())
self.add_pdf(subtype="h3", assay="neut", lab=lab.lower())
for subtype in ["bv", "by"]:
for lab in sLabOrder:
# if not (subtype == "byam" and lab == "NIMR"):
self.add_pdf(subtype=subtype, assay="hi", lab=lab.lower())
def add_pdf(self, subtype, assay, lab):
filename = self.source_dir.joinpath("{}-{}-{}.pdf".format(subtype, lab, assay))
if filename.exists():
self.data.append(latex.T_SignaturePage.format(image=filename))
# ----------------------------------------------------------------------
class LatexSignaturePageAddendumInterleave (LatexSignaturePageAddendum):
def __init__(self, source_dirs, output_dir, output_name="sp-spsc-addendum.tex", settings=None, T_SerumCirclesDescriptionEggCell=False):
super().__init__(source_dirs[0], output_dir, output_name, settings, T_SerumCirclesDescriptionEggCell)
self.source_dirs = [sd.resolve() for sd in source_dirs]
def add_pdf(self, subtype, assay, lab):
for source_dir in self.source_dirs:
filename = source_dir.joinpath("{}-{}-{}.pdf".format(subtype, lab, assay))
if filename.exists():
self.data.append(latex.T_SignaturePage.format(image=filename))
# ----------------------------------------------------------------------
class LatexSerumCoverageAddendum (LatexReport):
def make_cover(self, *args):
self.data.append(latex.T_Cover)
self.substitute.update({
"report_hemisphere": self.settings["cover"]["hemisphere"],
"report_year": self.settings["cover"]["year"],
"teleconference": self.settings["cover"]["teleconference"],
"addendum": self.settings["cover"]["addendum"],
"meeting_date": self.settings["cover"]["meeting_date"],
})
# {"type": "maps", "arraystretch": 1.0, "images": ["h3-hi/serumcoverage-ANTANANARIVO_2018-007.empirical.all-cdc.pdf", "h3-hi/serumcoverage-ANTANANARIVO_2018-007.theoretical.all-cdc.pdf"],
# "title": "CDC HI A(H3N2)/ANTANANARIVO/1067/2016 SIAT3 (2017-10-01) CDC 2018-007"},
def make_serum_coverage_map_set(self, page):
reviewed = sorted(Path(".").glob("serumcoverage-reviewed-{lab}-{virus_type}-{assay}.*.json".format(**page).lower()))[-1:]
if reviewed:
map_data = json.load(reviewed[0].open())
for map_info in map_data:
prefix = map_info["prefix"].replace('#', '')
serum_name = map_info["serum_name"].replace('#', '\\#').replace('&', '\\&')
title = "{} {} {}".format(sLabDisplayName[page["lab"]], page["assay"], serum_name)
if len(title) < 70:
fontsize = R"\normalsize"
elif len(title) < 90:
fontsize = R"\footnotesize"
else:
fontsize = R"\scriptsize"
subpage = {
"type": "maps",
"arraystretch": 1.0,
"images": ["{}.{}-{}.pdf".format(prefix, infix, page["lab"].lower()) for infix in ["empirical.12m", "theoretical.12m"]],
"title": title,
"fontsize": fontsize
}
self.make_maps(subpage)
# ----------------------------------------------------------------------
class StatisticsTableMaker:
sSubtypeForStatistics = {'h3': 'A(H3N2)', 'h1': 'A(H1N1)', 'bvic': "BVICTORIA", 'bv': "BVICTORIA", 'byam': "BYAMAGATA", 'by': "BYAMAGATA"}
sFluTypePrevious = {'h3': 'H3', 'h1': 'H1PDM', 'bvic': "BVICTORIA", 'byam': "BYAMAGATA"}
sContinents = ['ASIA', 'AUSTRALIA-OCEANIA', 'NORTH-AMERICA', 'EUROPE', 'RUSSIA', 'AFRICA', 'MIDDLE-EAST', 'SOUTH-AMERICA', 'CENTRAL-AMERICA', 'all', 'sera', 'sera_unique']
sHeader = {'ASIA': 'Asia', 'AUSTRALIA-OCEANIA': 'Oceania', 'NORTH-AMERICA': 'N Amer', 'EUROPE': 'Europe', 'RUSSIA': 'Russia', 'AFRICA': 'Africa',
'MIDDLE-EAST': 'M East', 'SOUTH-AMERICA': 'S Amer', 'CENTRAL-AMERICA': 'C Amer', 'all': 'TOTAL', 'month': 'Year-Mo', 'year': 'Year',
'sera': 'Sera', 'sera_unique': 'Sr Uniq'}
sReYearMonth = {'month': re.compile(r'^\d{6}$', re.I), 'year': re.compile(r'^\d{4}$', re.I)}
sLabsForGetStat = {"CDC": ["CDC"], "NIMR": ["Crick", "CRICK", "NIMR"], "CRICK": ["Crick", "CRICK", "NIMR"], "MELB": ["VIDRL", "MELB"], "VIDRL": ["VIDRL", "MELB"]}
def __init__(self, subtype, lab, source, previous_source=None, start=None, end=None, period='month'):
self.subtype = subtype.lower()
self.lab = lab
self.data = self._read_data(source)
self.previous_data = self._read_data(previous_source)
self.start = start
self.end = end
self.period = period
def _read_data(self, path):
module_logger.info('reading stat data from {}'.format(path))
if not path:
data = None
elif path.suffixes == [".json", ".xz"]:
data = read_json(path)
# elif path.suffixes == [".pydata", ".bz2"]:
# data = utility.read_pydata(path)
else:
raise ValueError("Cannot read stat data from " + str(path))
return data
def make(self):
module_logger.info('Statistics table for {} {}'.format(self.lab, self.subtype))
flu_type = self.sSubtypeForStatistics[self.subtype]
lab = self.lab if self.lab == 'all' else self.lab.upper()
data_antigens = self._get_for_lab(self.data['antigens'][flu_type], lab)
# module_logger.debug(f"stat table for antigens {flu_type} {lab}:\n{pprint.pformat(self.data['antigens'][flu_type])}")
data_sera_unique = self._get_for_lab(self.data['sera_unique'].get(flu_type, {}), lab)
data_sera = self._get_for_lab(self.data['sera'].get(flu_type, {}), lab)
if self.previous_data:
if flu_type not in self.previous_data['antigens']:
flu_type_previous = self.sFluTypePrevious[self.subtype]
else:
flu_type_previous = flu_type
previous_data_antigens = self._get_for_lab(self.previous_data['antigens'].get(flu_type_previous, {}), lab)
previous_data_sera_unique = self._get_for_lab(self.previous_data['sera_unique'].get(flu_type_previous, {}), lab)
previous_data_sera = self._get_for_lab(self.previous_data['sera'].get(flu_type_previous, {}), lab)
previous_sum = collections.defaultdict(int)
else:
previous_data_antigens, previous_data_sera_unique, previous_data_sera = {}, {}, {}
r = [self.make_header()]
for date in self.make_dates(data_antigens):
r.append(self.make_line(date, data_antigens=data_antigens.get(date, {}), data_sera=data_sera.get(date, {}), data_sera_unique=data_sera_unique.get(date, {}), previous_data_antigens=previous_data_antigens.get(date, {}), previous_data_sera=previous_data_sera.get(date, {}).get('all', 0), previous_data_sera_unique=previous_data_sera_unique.get(date, {}).get('all', 0)))
if self.previous_data:
for continent in self.sContinents[:-2]:
previous_sum[continent] += previous_data_antigens.get(date, {}).get(continent, 0)
previous_sum['sera'] += previous_data_sera.get(date, {}).get('all', 0)
previous_sum['sera_unique'] += previous_data_sera_unique.get(date, {}).get('all', 0)
total_line = self.make_line('all', data_antigens=data_antigens.get('all', {}), data_sera=data_sera.get('all', {}), data_sera_unique=data_sera_unique.get('all', {}), previous_data_antigens=previous_sum, previous_data_sera=previous_sum['sera'], previous_data_sera_unique=previous_sum['sera_unique'])
r.extend([
self.make_separator(),
total_line,
self.make_separator(),
self.make_footer()
])
return '\n'.join(r)
def _get_for_lab(self, source, lab):
for try_lab in self.sLabsForGetStat.get(lab, [lab]):
r = source.get(try_lab)
if r is not None:
return r
return {}
def make_header(self):
r = ''.join(('\\vspace{3em}\\begin{WhoccStatisticsTable}\n \\hline\n ',
'\\PeriodHeading{{{}}} & {} \\\\\n'.format(self.period.capitalize(), ' & '.join('\\ContinentHeading{{{}}}'.format(n) for n in (self.sHeader[nn] for nn in self.sContinents))),
' \\hline'))
r = r.replace('{TOTAL}', 'Total{TOTAL}').replace('{Sr Unique}', 'Last{Sr Unique}').replace('{Sr Uniq}', 'Last{Sr Uniq}')
return r
def make_line(self, date, data_antigens, data_sera, data_sera_unique, previous_data_antigens, previous_data_sera, previous_data_sera_unique):
def diff_current_previous(continent):
diff = data_antigens.get(continent, 0) - previous_data_antigens.get(continent, 0)
if diff < 0:
module_logger.error('{} {}: Current: {} Previous: {}'.format(self.format_date(date), continent, data_antigens.get(continent, 0), previous_data_antigens.get(continent, 0)))
diff = 0
return diff
data = [self.format_date(date)]
if self.previous_data:
if date == 'all':
data.extend(['\WhoccStatisticsTableCellTwoTotal{{{}}}{{{}}}'.format(data_antigens.get(continent, 0), diff_current_previous(continent)) for continent in self.sContinents[:-3]])
data.append( '\WhoccStatisticsTableCellTwoTotal{{{}}}{{{}}}'.format(data_antigens.get('all', 0), diff_current_previous('all')))
data.append( '\WhoccStatisticsTableCellTwoTotal{{{}}}{{{}}}'.format(data_sera.get('all', 0), data_sera.get('all', 0) - previous_data_sera))
data.append( '\WhoccStatisticsTableCellTwoTotal{{{}}}{{{}}}'.format(data_sera_unique.get('all', 0), data_sera_unique.get('all', 0) - previous_data_sera_unique))
else:
data.extend(['\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_antigens.get(continent, 0), diff_current_previous(continent)) for continent in self.sContinents[:-3]])
data.append( '\WhoccStatisticsTableCellTwoTotal{{{}}}{{{}}}'.format(data_antigens.get(self.sContinents[-3], 0), diff_current_previous(self.sContinents[-3])))
data.append( '\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_sera.get('all', 0), data_sera.get('all', 0) - previous_data_sera))
data.append( '\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_sera_unique.get('all', 0), data_sera_unique.get('all', 0) - previous_data_sera_unique))
else:
data.extend(['\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_antigens.get(continent, 0)) for continent in self.sContinents[:-2]])
data.append( '\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_sera.get('all', 0)))
data.append( '\WhoccStatisticsTableCellTwo{{{}}}{{{}}}'.format(data_sera_unique.get('all', 0)))
return ' ' + ' & '.join(data) + ' \\\\'
def make_dates(self, data, **sorting):
rex = self.sReYearMonth[self.period]
start = None
end = None
# if self.period == 'month':
# start = self.start and self.start.strftime('%Y%m')
# end = self.end and self.end.strftime('%Y%m')
return sorted((date for date in data if rex.match(date) and (not start or date >= start) and (not end or date < end)), **sorting)
def make_separator(self):
return ' \\hline'
def make_footer(self):
return '\\end{WhoccStatisticsTable}\n'
def format_date(self, date):
if date[0] == '9':
result = 'Unknown'
elif date == 'all':
result = '\\color{WhoccStatisticsTableTotal} TOTAL'
elif len(date) == 4 or date[4:] == '99':
if self.period == 'month':
result = '{}-??'.format(date[:4])
else:
result = '{}'.format(date[:4])
else:
result = '{}-{}'.format(date[:4], date[4:])
return result
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
| StarcoderdataPython |
3310407 | <filename>module.py
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 15:07:43 2018
@author: KalcikR
"""
# from git import Repo
import textwrap
from reportlab.graphics import shapes
from reportlab.lib.colors import PCMYKColor, PCMYKColorSep, Color, black, blue, red, transparent
from reportlab.platypus import Paragraph
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.utils import simpleSplit
from reportlab.lib.pagesizes import A4, landscape
import ftplib
cm = 28.346456692913385
import emoji_unicode
import emoji
from emojipy import Emoji
import re
# from ftplib import FTP
import smtplib
from email.message import EmailMessage
import logging
import os
import tweepy
from key import *
logger = logging.getLogger()
# from googleapiclient.discovery import build
# from httplib2 import Http
# from oauth2client import file, client, tools
# from oauth2client.service_account import ServiceAccountCredentials
# import gspread
# If modifying these scopes, delete the file token.json.
#scope = ['https://spreadsheets.google.com/feeds']
#store = file.Storage('token.json')
#creds = store.get()
#if not creds or creds.invalid:
# flow = client.flow_from_clientsecrets('credentials.json', scope)
# creds = tools.run_flow(flow, store)
#service = build('sheets', 'v4', http=creds.authorize(Http()))
#sheet = client.open("fakenewz").sheet1
#SPREADSHEET_ID = '1kHJmRvcfnIW38hbp8wpzbYh7iThsOrf04_C6PbBxhIQ'
#RANGE_NAME = 'Class Data!A2:E'
#result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
# range=RANGE_NAME).execute()
#values = result.get('values', [])
#
## The ID and range of a sample spreadsheet.
#SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
#SAMPLE_RANGE_NAME = 'Class Data!A2:E'
font_file = 'font/Symbola_hint.ttf'
# font_file = 'font/NotoSans-Regular.ttf'
# font_file = 'font/NotoEmoji-Regular.ttf'
# font_file = 'font/OpenSansEmoji.ttf'
# open_font = TTFont('OpenSansEmoji', font_file)
# emoji_font = TTFont('Noto Emoji', font_file)
symbola_font = TTFont('Symbola', font_file)
# noto_font = TTFont('Noto Sans', font_file)
pdfmetrics.registerFont(symbola_font)
# pdfmetrics.registerFont(emoji_font)
# pdfmetrics.registerFont(open_font)
from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT, TA_CENTER
stylesheet=getSampleStyleSheet()
normalStyle = stylesheet['Normal']
emoji_dict = {
'1f680':'arms', '1f52b':'arms', '2694':'arms', '1f3f9':'arms', '1f5e1':'arms',
"1f6e2":"oil", "26fd":"oil",
"1f6e9":"air", "2708":"air", "1f6eb":"airstrike", "1f6ec":"airlift",
"1f480":"skull", "2620":"skull",
'1f335':'bio', '1f333':'bio', 'f330 ':'bio', 'f95c ':'bio', 'f344 ':'bio', 'f966 ':'bio', 'f952 ':'bio', 'f336 ':'bio', 'f33d ':'bio', 'f955 ':'bio', 'f954 ':'bio', 'f346 ':'bio', 'f951 ':'bio', 'f965 ':'bio', 'f345 ':'bio', 'f95d ':'bio', 'f353 ':'bio', 'f352 ':'bio', 'f351 ':'bio', 'f350 ':'bio', 'f34f ':'bio', 'f34e ':'bio', 'f34d ':'bio', 'f34c ':'bio', 'f34b ':'bio', 'f34a ':'bio', 'f349 ':'bio', 'f348 ':'bio', 'f347':'bio',
'1f951':'bio', '1f346':'bio', '1f954':'bio', '1f955':'bio', '1f33d':'bio', '1f336':'bio', '1f952':'bio', '1f96c':'bio', '1f966':'bio', '1f344':'bio', '1f95c':'bio', '1f330':'bio', '1f347':'bio', '1f348':'bio', '1f349':'bio', '1f34a':'bio', '1f34b':'bio', '1f34c':'bio', '1f34d':'bio', '1f96d':'bio', '1f34e':'bio', '1f34f':'bio', '1f350':'bio', '1f351':'bio', '1f352':'bio', '1f353':'bio', '1f95d':'bio', '1f345':'bio', '1f965':'bio', '1f331':'bio', '1f332':'bio', '1f334':'bio', '1f33e':'bio', '1f33f':'bio', '2618':'bio', '1f340':'bio', '1f341':'bio', '1f342':'bio', '1f343':'bio', '1f490':'bio', '1f338':'bio', '1f4ae':'bio', '1f3f5':'bio', '1f339':'bio', '1f940':'bio', '1f33a':'bio', '1f33b':'bio', '1f33c':'bio', '1f337':'bio', '1f9a0':'bio',
'1f3c6':'gold', '1f947':'gold', '1f3c5':'gold', '1f396':'gold', '1f3f5':'gold', '1f4b0':'gold', '1f48e':'gold',
'1f48a':'chem', '2697':'chem', '1f321':'chem', '1f489':'chem', '2623':'chem', '2622':'chem',
'1f579':'tech', '1f4f1':'tech', '1f4f2':'tech', '1f4be':'tech', '1f4bd':'tech', '1f4bb':'tech', '1f39a':'tech', '2699':'tech', 'fe0f':'tech',
'1f468-200d-1f393':'sage', '1f469-200d-1f393':'sage', '1f9d9-200d-2642':'sage', '1f9d9-200d-2640':'sage','1f535':'sage',
'1f534':'general', '1f468-200d-2708':'general', '1f469-200d-2708':'general', '1f46e':'general',
'1f4b2':'yollo', '1f4b4':'yollo', '1f4b3':'yollo', '1f4b6':'yollo', '1f4b7':'yollo', '1f4b5':'yollo', '1f4b8':'yollo',
'1f468-200d-1f468-200d-1f466-200d-1f466':'corpz', '1f690':'corpz', '1f463':'corpz', '1f691':'corpz', '1f69b':'corpz', '1f697':'corpz', '1f68c':'corpz', '1f69a':'corpz', '1f68d':'corpz', '1f68e':'corpz', '1f696':'corpz',
'1f682':'corpz', '1f683':'corpz', '1f684':'corpz', '1f685':'corpz', '1f686':'corpz', '1f687':'corpz', '1f688':'corpz', '1f689':'corpz', '1f68a':'corpz', '1f69d':'corpz', '1f69e':'corpz', '1f68b':'corpz', '1f68c':'corpz', '1f68d':'corpz', '1f68e':'corpz', '1f690':'corpz', '1f691':'corpz', '1f692':'corpz', '1f693':'corpz', '1f694':'corpz', '1f695':'corpz', '1f696':'corpz', '1f697':'corpz', '1f698':'corpz', '1f699':'corpz', '1f69a':'corpz', '1f69b':'corpz', '1f69c':'corpz', '1f3ce':'corpz', '1f3cd':'corpz', '1f6f5':'corpz', '1f6b2':'corpz', '1f6f4':'corpz', '1f6f9':'corpz'
}
#def fetch_sheet():
# """Shows basic usage of the Sheets API.
# Prints values from a sample spreadsheet.
# """
# store = file.Storage('token.json')
# creds = store.get()
# if not creds or creds.invalid:
# flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
# creds = tools.run_flow(flow, store)
# service = build('sheets', 'v4', http=creds.authorize(Http()))
#
# # Call the Sheets API
# SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
# RANGE_NAME = 'Class Data!A2:E'
# result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
# range=RANGE_NAME).execute()
# values = result.get('values', [])
#
# if not values:
# print('No data found.')
# else:
# print('Name, Major:')
# for row in values:
# # Print columns A and E, which correspond to indices 0 and 4.
# print('%s, %s' % (row[0], row[4]))
# def upload_pdf(file_list, repo_path):
# repo = Repo(repo_path)
# commit_message = 'Add new pdf'
# repo.index.add(file_list)
# repo.index.commit(commit_message)
# origin = repo.remote('origin')
# origin.push('master')
# def upload_ftp(filename, ftp_user, ftp_password):
# ftp = FTP('files.000webhost.com')
# print(ftp.login(user=ftp_user, passwd = ftp_password))
# print(ftp.storbinary('STOR '+ 'public_html/web/' + filename, open(filename, 'rb')))
# ftp.quit()
def scale(drawing, scaling_factor):
"""
Scale a reportlab.graphics.shapes.Drawing()
object while maintaining the aspect ratio
"""
scaling_x = scaling_factor
scaling_y = scaling_factor
drawing.width = drawing.minWidth() * scaling_x
drawing.height = drawing.height * scaling_y
drawing.scale(scaling_x, scaling_y)
return drawing
def draw_label(label, width, height, obj):
# + ' <img src="images/air.png" valign="middle"/>',
i = 90
for t in obj[0]:
for r in t:
label.add(shapes.String(10, i, str(r), fontName="Helvetica", fontSize=9))
p = Paragraph("lol", normalStyle)
i = i - 10
i = i - 20
# i = 50
# for t in obj[1]:
# label.add(shapes.String(10, i, str(t), fontName="Helvetica", fontSize=9))
# i = i - 10
i = 10
# for t in obj[2]:
for t in obj[1]:
label.add(shapes.String(10, i, '<b>' + str(t) + '</b>', fontName="Helvetica", fontSize=9))
# label.add(drawing)
i = i - 10
s = shapes.Rect(5, 8, 150, 11, fill = True)
s.fillColor = transparent
label.add(s)
# t = _l[0] # worldcontrol[0]
# with open(fName, 'w') as f:
#def process_text(tweet):
# t = tweet.text.replace("@truWorldControl", "").replace("#fakenewz", "").strip()
# # t = worldcontrol[2].text
# if t.find("[")!=-1:
# desc = t[0:t.find("[")].strip().split("\n")
# desc = [textwrap.wrap(x, 30) for x in desc]
# effect = t[t.find("[")+1:t.find("]")]
# effect = textwrap.wrap(effect, 30)
# r = [desc, effect]
# else:
# text = t.split("\n")[1:4]
# r = [textwrap.wrap(x, 30) for x in text]
# return(r)
def text2paragraph(text):
#TODO If you dont find an effect, still work on the description
#TODO Icons can also be mentioned in the description --> No because Golden would change to emoji
font_file = 'font/Symbola_hint.ttf'
# font_file = 'font/NotoSans-Regular.ttf'
# font_file = 'font/NotoEmoji-Regular.ttf'
# font_file = 'font/OpenSansEmoji.ttf'
# open_font = TTFont('OpenSansEmoji', font_file)
# emoji_font = TTFont('Noto Emoji', font_file)
symbola_font = TTFont('Symbola', font_file)
# noto_font = TTFont('Noto Sans', font_file)
pdfmetrics.registerFont(symbola_font)
# pdfmetrics.registerFont(emoji_font)
# pdfmetrics.registerFont(open_font)
# t = text.replace("@truWorldControl", "").replace("#fakenewz", "").strip()
t = text
t = re.sub("(?i)@truworldcontrol", "", t)
t = re.sub("(?i)#fakenewz", "", t)
t = t.strip()
t = '\n'.join([x for x in t.splitlines() if x.strip()])
# t = worldcontrol[2].text
style_desc = getSampleStyleSheet()
style_desc = style_desc["BodyText"]
style_desc.alignment = TA_LEFT
# style_desc.fontName = 'Noto Emoji'
# style_desc.spaceAfter = 30
style_effect = getSampleStyleSheet()
style_effect = style_effect["BodyText"]
# style_effect.fontSize = 16
# style_effect.fontName = 'Noto Emoji'
style_effect.borderPadding = 2
style_effect.alignment = TA_CENTER
style_effect.borderWidth = 1
style_effect.borderColor = '#000000'
# effect = re.search("\[(.*?)\]", t)
r = []
p_desc = []
p_effect = []
# Needs to be refactored
if t.find("[")!=-1:
desc = t[0:t.find("[")].strip()
effect = t[t.find("[")+1:t.find("]")].upper()
effect_emoji = replace_arrows(effect)
effect_emoji = replace_icon_names(effect_emoji)
effect_emoji = replace_emoji(effect_emoji, style_effect)
effect_emoji = effect_emoji.replace("\n", "<br />")
p_effect = Paragraph(effect_emoji, style_effect)
else:
desc = t
effect = '_'
p_effect = Paragraph(effect, style_effect)
desc_emoji = replace_emoji(desc, style_desc)
if desc_emoji.find("\n")!=-1:
d = desc_emoji.split("\n")
d[0] = "<u>" + d[0] + "</u>"
desc_emoji = "<br />".join(d)
lines_desc = simpleSplit(desc, 'Helvetica', 12, 6.5*cm)
lines_effect = simpleSplit(effect, 'Helvetica', 12, 6.5*cm)
lineSpacing = 3.88*cm/(len(lines_desc) + len(lines_effect)) - 3
style_desc.leading = lineSpacing
p_desc = Paragraph(desc_emoji, style_desc)
r.append(p_desc)
if p_effect:
r.append(p_effect)
return(r)
def replace_arrows(effect):
effect = effect.replace("<", u"<img src='images/arrow_left.png' valign='middle' width = '15' height = '15' />")
effect = effect.replace("<", u"<img src='images/arrow_left.png' valign='middle' width = '15' height = '15' />")
effect = effect.replace("<", u"<img src='images/arrow_left.png' valign='middle' width = '15' height = '15' />")
effect = effect.replace(">", u"<img src='images/arrow_right.png' valign='middle' width = '15' height = '15' />")
effect = effect.replace(">", u"<img src='images/arrow_right.png' valign='middle' width = '15' height = '15' />")
effect = effect.replace(">", u"<img src='images/arrow_right.png' valign='middle' width = '15' height = '15' />")
return(effect)
def replace_icon_names(effect):
icon = ['arms', 'oil', 'airlift', 'airstrike', 'skull', 'bio', 'gold', 'chem', 'tech', 'sage', 'general', 'yollo', 'corpz']
for i in icon:
effect = re.sub("(?i)" + i, i, effect)
if effect.find(i)!=-1:
effect = effect.replace(i, u"<img src='images/{filename}.png' valign='middle' width = '20' height = '20' />".format(filename = i))
return(effect)
def replace_emoji(effect, style):
# t = text.encode('unicode-escape')
# Würfel Icon
# Figure icon
# Alle bauen/roten icons für general/sage
# Replace text
# TODO Case insensitive
try:
t = emoji_unicode.replace(
effect,
# lambda e: u"<img src='images/{filename}.svg' valign='middle' width = '20' height = '20' alt= '{raw}' />".format(filename=emoji_dict[e.code_points], raw=e.unicode)
lambda e: u"<img src='images/{filename}.png' valign='middle' width = '20' height = '20' />".format(filename=emoji_dict[e.code_points])
)
t = replace_with_emoji(t, style.fontSize)
except KeyError:
print("Key Error")
# t = emoji_unicode.replace(
# text,
# # lambda e: u"<img src='images/{filename}.svg' valign='middle' width = '20' height = '20' alt= '{raw}' />".format(filename=emoji_dict[e.code_points], raw=e.unicode)
# lambda e: u"<font name=Symbola>{raw}</font>".format(raw=e.unicode)
# )
# t = replace_with_emoji_pdf(Emoji.to_image(text), style.fontSize)
t = replace_with_emoji(effect, style.fontSize)
return(t)
# Pdf doesn't need any unicode inside <image>'s alt attribute
Emoji.unicode_alt = False
def replace_with_emoji(effect, size):
"""
Reportlab's Paragraph doesn't accept normal html <image> tag's attributes
like 'class', 'alt'. Its a little hack to remove those attrbs
"""
e = ""
for i, c in enumerate(effect):
if c in emoji.UNICODE_EMOJI:
e += Emoji.to_image(c)
else:
e += c
# e = effect
# for item in sorted(emoji.UNICODE_EMOJI.keys(), key = len, reverse = True):
# if item in effect:
# print(item)
# if Emoji.to_image(item) != item:
# e = re.sub(item, Emoji.to_image(item), e)
# print(item)
# Emoji.to_image(c)
# e = re.sub(re.escape(item), re.escape(emoji.UNICODE_EMOJI[item]), e)
e = e.replace('class="emojione " style="" ', 'height=%s width=%s' %
(size, size))
return re.sub('alt="'+Emoji.shortcode_regexp+'"', '', e)
def print_emoji_dict(emoji_dict = emoji_dict):
for i in emoji_dict:
for j in i.split('-'):
print(chr(int(j, 16)))
# sval("u" + "'{}'".format(n))
def send_email(email_address, email_password, error = 'None'):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(email_address, email_password)
msg = EmailMessage()
msg['Subject'] = 'Father, I failed you'
msg['From'] = email_address
msg['To'] = "<EMAIL>"
msg.set_content(error)
server.send_message(msg)
server.quit()
print('Email sent')
def create_api(account = "@MinisterVlatin"):
if account == "@truWorldControl":
consumer_key = CONSUMER_KEY_WC # os.getenv("CONSUMER_KEY_WC")
consumer_secret = CONSUMER_SECRET_WC # os.getenv("CONSUMER_SECRET_WC")
access_token = ACCESS_KEY_WC # os.getenv("ACCESS_TOKEN_WC")
access_token_secret = ACCESS_SECRET_WC # os.getenv("ACCESS_TOKEN_SECRET_WC")
print("Login for " + account)
if account == "@MinisterVlatin":
consumer_key = CONSUMER_KEY # os.getenv("CONSUMER_KEY_WC")
consumer_secret = CONSUMER_SECRET # os.getenv("CONSUMER_SECRET_WC")
access_token = ACCESS_KEY # os.getenv("ACCESS_TOKEN_WC")
access_token_secret = ACCESS_SECRET # os.getenv("ACCESS_TOKEN_SECRET_WC")
print("Login for " + account)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except Exception as e:
logger.error("Error creating API", exc_info=True)
raise e
logger.info("API created for "+ account)
return api | StarcoderdataPython |
1696359 | <reponame>amureki/covidapp
from django.contrib import admin
from data.models import Summary
class SummaryAdmin(admin.ModelAdmin):
list_display = (
"id",
"confirmed",
"deaths",
"recovered",
"created",
"is_latest_for_day",
)
list_display_links = ("id", "created")
list_filter = ("is_latest_for_day",)
admin.site.register(Summary, SummaryAdmin)
| StarcoderdataPython |
1734190 | <reponame>lajarre/euphrosyne<filename>lab/admin/__init__.py
from .institution import InstitutionAdmin # noqa: F401
from .object import ObjectGroupAdmin # noqa: F401
from .project import ProjectAdmin # noqa: F401
from .run import RunAdmin # noqa: F401
| StarcoderdataPython |
1781535 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'very-secret-key'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'intercom'
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| StarcoderdataPython |
1611612 | #!/usr/bin/env python
# encoding: utf-8
"""
config.py
Created by mmiyaji on 2016-07-17.
Copyright (c) 2016 <EMAIL>. All rights reserved.
"""
from views import *
def home(request):
"""
Case of GET REQUEST '/config/'
Arguments:
- `request`:
"""
page=1
span = 15
order = "-created_at"
page = request.GET.get('page', page)
span = request.GET.get('span', span)
config_list,entry_count = Config.get_items(span=span, page=page)
temp_values = {
"target":"config",
"title":u"Config定義一覧ページ",
"config_list":config_list,
"subscroll":True,
}
return render(request, 'server/index.html', temp_values)
def detail(request, target_id):
"""
Case of GET REQUEST '/config/{id}'
Arguments:
- `request`:
"""
temp_values = {
"subscroll":True,
}
return render(request, 'server/detail.html', temp_values)
| StarcoderdataPython |
4802932 | import os
import warnings
warnings.filterwarnings('ignore')
from pandas.core.series import Series
#Data Source
import yfinance as yf
#Data viz
import plotly.graph_objs as go
#Interval required 5 minutes
df = yf.download(tickers='RELIANCE.NS', period='7d', interval='5m')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import lag_plot
from datetime import datetime
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
train_data, test_data = df[0:int(len(df)*0.7)], df[int(len(df)*0.7):]
training_data = train_data['Close'].values
test_data = test_data['Close'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
from pmdarima.arima.utils import ndiffs
y = training_data
## Adf Test
p1 = ndiffs(y, test='adf') # p
# KPSS test
d1 = ndiffs(y, test='kpss') # d
# PP test:
q1 = ndiffs(y, test='pp') # q
#print(p1,d1,q1)
for time_point in range(N_test_observations):
model = ARIMA(history, order = (p1,d1,q1))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat)
true_test_value = test_data[time_point]
history.append(true_test_value)
# MSE_error = mean_squared_error(test_data, model_predictions)
# print('Testing Mean Squared Error is {}'.format(MSE_error))
# test_set_range = df[int(len(df)*0.7):].index
# plt.plot(test_set_range, model_predictions, color='blue', marker='o', linestyle='dashed',label='Predicted Price')
# plt.plot(test_set_range, test_data, color='red', label='Actual Price')
# plt.title('NIFTY Prices Prediction')
# plt.xlabel('Date')
# plt.ylabel('Prices')
# plt.xticks(np.arange(174,248,20), df.Date[174:248:20])
# plt.legend()
# plt.show()
print(model_predictions[len(model_predictions)-1]) | StarcoderdataPython |
62281 | import yaml
document = """
"name": "example_app"
"version": "1.0.0"
"main": "example_app/main.py"
"description": "A example structure for building projects cross-platform using kivy"
"license": "MIT"
"repository":
"type": "git"
"url": "<EMAIL>:VictorManhani/kivy_build.git"
"engines":
"python": "3.7.7"
"kivy": "1.11.1"
"modules":
"example_app/kivy_modules"
"""
a = yaml.load(document)
a['files'] = "hello"
print(a) | StarcoderdataPython |
4810873 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 17:53:42 2018
@author: vicen
"""
import sys
sys.path.append("../../")
from topopy import Flow, Grid
import numpy as np
from scipy.sparse import csc_matrix
import matplotlib.pyplot as plt
fd = Flow()
fd.load_gtiff("../data/in/fd_tunez.tif")
threshold = 1000
fac = fd.get_flow_accumulation(nodata=False, asgrid=False)
w = fac > threshold
w = w.ravel()
I = w[fd._ix]
ix = fd._ix[I]
ixc = fd._ixc[I]
ixcix = np.zeros(fd._ncells, np.int)
ixcix[fd._ix] = np.arange(len(fd._ix))
ixcix[ix] = np.arange(len(ix))
x = 507194.338
y = 4060151.087
row, col = fd.xy_2_cell(x, y)
channel_ix = fd.cell_2_ind(row, col)
#channel_points = [channel_ix]
#
#new_ind = channel_ix
#while ixcix[new_ind] != 0:
# new_ind = fd._ixc[ixcix[new_ind]]
# channel_points.append(new_ind)
#
##while ixcix[channel_points[-1]] != 0:
## channel_points.append(fd._ixc[ixcix[channel_points[-1]]])
#
#marr = np.zeros(fd._ncells, np.int)
#marr[channel_points] = 1
#marr = marr.reshape(fd._dims)
#plt.imshow(marr)
def get_channels(start_cell):
channel_points = [start_cell]
new_ind = start_cell
while ixcix[new_ind] != 0:
new_ind = fd._ixc[ixcix[new_ind]]
channel_points.append(new_ind)
return channel_points
def get_channels2(start_cell):
channel_points = [start_cell]
ind = int(np.where(ix == start_cell)[0])
cond = True
while cond:
ncell = ixc[int(ind)]
channel_points.append(ncell)
ind = np.where(ix == ncell)[0]
if len(ind) == 0:
cond = False
return channel_points
pp = get_channels(channel_ix)
pp2 = get_channels2(channel_ix)
#channel_ix = fd.cell_2_ind(row, col)
#channel_points = []
#
#add_ind = channel_ix
#channel_points.append(add_ind)
#
#while ixcix[add_ind] != 0:
# add_ind = ixcix[add_ind]
# channel_points.append(fd._ixc[add_ind]) | StarcoderdataPython |
3209306 | <gh_stars>1000+
import numpy as np
import tensorflow as tf
import lucid.optvis.render as render
import itertools
from lucid.misc.gradient_override import gradient_override_map
def maxpool_override():
def MaxPoolGrad(op, grad):
inp = op.inputs[0]
op_args = [
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
]
smooth_out = tf.nn.avg_pool(inp ** 2, *op_args) / (
1e-2 + tf.nn.avg_pool(tf.abs(inp), *op_args)
)
inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]
return inp_smooth_grad
return {"MaxPool": MaxPoolGrad}
def get_acts(model, layer_name, obses):
with tf.Graph().as_default(), tf.Session():
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
return t_acts.eval()
def default_score_fn(t):
return tf.reduce_sum(t, axis=list(range(len(t.shape)))[1:])
def get_grad_or_attr(
model,
layer_name,
prev_layer_name,
obses,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
grad_or_attr,
override=None,
integrate_steps=1
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
if integrate_steps > 1:
acts_prev = t_acts_prev.eval()
grad = (
sum(
[
t_grad.eval(feed_dict={t_acts_prev: acts_prev * alpha})
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]
]
)
/ integrate_steps
)
else:
acts_prev = None
grad = t_grad.eval()
if grad_or_attr == "grad":
return grad
elif grad_or_attr == "attr":
if acts_prev is None:
acts_prev = t_acts_prev.eval()
return acts_prev * grad
else:
raise NotImplementedError
def get_attr(model, layer_name, prev_layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "attr"
return get_grad_or_attr(model, layer_name, prev_layer_name, obses, **kwargs)
def get_grad(model, layer_name, obses, **kwargs):
kwargs["grad_or_attr"] = "grad"
return get_grad_or_attr(model, layer_name, None, obses, **kwargs)
def get_paths(acts, nmf, *, max_paths, integrate_steps):
acts_reduced = nmf.transform(acts)
residual = acts - nmf.inverse_transform(acts_reduced)
combs = itertools.combinations(range(nmf.features), nmf.features // 2)
if nmf.features % 2 == 0:
combs = np.array([comb for comb in combs if 0 in comb])
else:
combs = np.array(list(combs))
if max_paths is None:
splits = combs
else:
num_splits = min((max_paths + 1) // 2, combs.shape[0])
splits = combs[
np.random.choice(combs.shape[0], size=num_splits, replace=False), :
]
for i, split in enumerate(splits):
indices = np.zeros(nmf.features)
indices[split] = 1.0
indices = indices[tuple(None for _ in range(acts_reduced.ndim - 1))]
complements = [False, True]
if max_paths is not None and i * 2 + 1 == max_paths:
complements = [np.random.choice(complements)]
for complement in complements:
path = []
for alpha in np.linspace(0, 1, integrate_steps + 1)[1:]:
if complement:
coordinates = (1.0 - indices) * alpha ** 2 + indices * (
1.0 - (1.0 - alpha) ** 2
)
else:
coordinates = indices * alpha ** 2 + (1.0 - indices) * (
1.0 - (1.0 - alpha) ** 2
)
path.append(
nmf.inverse_transform(acts_reduced * coordinates) + residual * alpha
)
yield path
def get_multi_path_attr(
model,
layer_name,
prev_layer_name,
obses,
prev_nmf,
*,
act_dir=None,
act_poses=None,
score_fn=default_score_fn,
override=None,
max_paths=50,
integrate_steps=10
):
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_obses = tf.placeholder_with_default(
obses.astype(np.float32), (None, None, None, None)
)
T = render.import_model(model, t_obses, t_obses)
t_acts = T(layer_name)
if prev_layer_name is None:
t_acts_prev = t_obses
else:
t_acts_prev = T(prev_layer_name)
if act_dir is not None:
t_acts = act_dir[None, None, None] * t_acts
if act_poses is not None:
t_acts = tf.gather_nd(
t_acts,
tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1),
)
t_scores = score_fn(t_acts)
assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim"
t_score = tf.reduce_sum(t_scores)
t_grad = tf.gradients(t_score, [t_acts_prev])[0]
acts_prev = t_acts_prev.eval()
path_acts = get_paths(
acts_prev, prev_nmf, max_paths=max_paths, integrate_steps=integrate_steps
)
deltas_of_path = lambda path: np.array(
[b - a for a, b in zip([np.zeros_like(acts_prev)] + path[:-1], path)]
)
grads_of_path = lambda path: np.array(
[t_grad.eval(feed_dict={t_acts_prev: acts}) for acts in path]
)
path_attrs = map(
lambda path: (deltas_of_path(path) * grads_of_path(path)).sum(axis=0),
path_acts,
)
total_attr = 0
num_paths = 0
for attr in path_attrs:
total_attr += attr
num_paths += 1
return total_attr / num_paths
| StarcoderdataPython |
1757930 | from pathlib import Path
import numpy as np
import pytest
import aimsprop as ap
@pytest.fixture(scope="module")
def trajectory():
# 1. Parse a series of FMS90 trajectories that Hayley has run for ethylene
trajs = [
ap.parse_fms90(Path(__file__).parent / "test_data" / f"000{x}") for x in [2, 3]
]
# 2. Merge the trajectories into one super-big Trajectory with uniform weights
traj = ap.Trajectory.merge(trajs, ws=[1.0 / len(trajs)] * len(trajs), labels=[2, 3])
# 3. Interpolate trajectory with ~1 fs intervals, removing adaptive timesteps from AIMS
ts = np.arange(0.0, max(traj.ts), 40.0)
traj = traj.interpolate_nearest(ts)
yield traj
| StarcoderdataPython |
142566 | <reponame>Yi-Zoey/adversarial-robustness-toolbox
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the `LowProFool` attack. This is a white-box attack.
Its main objective is to take a valid tabular sample and transform it, so that a given classifier predicts it to be some
target class.
`LowProFool` attack transforms the provided real-valued tabular data into adversaries of the specified target classes.
The generated adversaries have to be as close as possible to the original samples in terms of the weighted Lp-norm,
where the weights determine each feature's importance.
| Paper link: https://arxiv.org/abs/1911.03274
"""
import logging
from typing import Callable, Optional, Union, TYPE_CHECKING
import numpy as np
from scipy.stats import pearsonr
from tqdm.auto import trange
from sklearn.metrics import log_loss
from art.attacks.attack import EvasionAttack
from art.estimators.estimator import LossGradientsMixin
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
if TYPE_CHECKING:
from art.utils import CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE
logger = logging.getLogger(__name__)
class LowProFool(EvasionAttack):
"""
`LowProFool` attack.
| Paper link: https://arxiv.org/abs/1911.03274
"""
attack_params = EvasionAttack.attack_params + [
"n_steps",
"threshold",
"lambd",
"eta",
"eta_decay",
"eta_min",
"norm",
"importance",
"verbose",
]
_estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin)
def __init__(
self,
classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE",
n_steps: int = 100,
threshold: Union[float, None] = 0.5,
lambd: float = 1.5,
eta: float = 0.2,
eta_decay: float = 0.98,
eta_min: float = 1e-7,
norm: Union[int, float, str] = 2,
importance: Union[Callable, str, np.ndarray] = "pearson",
verbose: bool = False,
) -> None:
"""
Create a LowProFool instance.
:param classifier: Appropriate classifier's instance
:param n_steps: Number of iterations to follow
:param threshold: Lowest prediction probability of a valid adversary
:param lambd: Amount of lp-norm impact on objective function
:param eta: Rate of updating the perturbation vectors
:param eta_decay: Step-by-step decrease of eta
:param eta_min: Minimal eta value
:param norm: Parameter `p` for Lp-space norm (norm=2 - euclidean norm)
:param importance: Function to calculate feature importance with
or vector of those precomputed; possibilities:
> 'pearson' - Pearson correlation (string)
> function - Custom function (callable object)
> vector - Vector of feature importance (np.ndarray)
:param verbose: Verbose mode / Show progress bars.
"""
super().__init__(estimator=classifier)
self.n_steps = n_steps
self.threshold = threshold
self.lambd = lambd
self.eta = eta
self.eta_decay = eta_decay
self.eta_min = eta_min
self.norm = norm
self.importance = importance
self.verbose = verbose
self._targeted = True
self.n_classes = self.estimator.nb_classes
self.n_features = self.estimator.input_shape[0]
self.importance_vec = None
if self.estimator.clip_values is None:
logger.warning(
"The `clip_values` attribute of the estimator is `None`, therefore this instance of LowProFool will by "
"default generate adversarial perturbations without clipping them."
)
self._check_params()
if isinstance(self.importance, np.ndarray):
self.importance_vec = self.importance
if eta_decay < 1 and eta_min > 0:
steps_before_min_eta_reached = np.ceil(np.log(eta_min / eta) / np.log(eta_decay))
if steps_before_min_eta_reached / self.n_steps < 0.8:
logger.warning(
"The given combination of 'n_steps', 'eta', 'eta_decay' and 'eta_min' effectively sets learning "
"rate to its minimal value after about %d steps out of all %d.",
steps_before_min_eta_reached,
self.n_steps,
)
def __weighted_lp_norm(self, perturbations: np.ndarray) -> np.ndarray:
"""
Lp-norm of perturbation vectors weighted by feature importance.
:param perturbations: Perturbations of samples towards being adversarial.
:return: Array with weighted Lp-norm of perturbations.
"""
return self.lambd * np.linalg.norm(
self.importance_vec * perturbations, axis=1, ord=(np.inf if self.norm == "inf" else self.norm)
).reshape(-1, 1)
def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray:
"""
Gradient of the weighted Lp-space norm with regards to the data vector.
:param perturbations: Perturbations of samples towards being adversarial.
:return: Weighted Lp-norm gradients array.
"""
norm = self.norm
if isinstance(norm, (int, float)) and norm < np.inf and self.importance_vec is not None:
numerator = (
self.importance_vec * self.importance_vec * perturbations * np.power(np.abs(perturbations), norm - 2)
)
denominator = np.power(np.sum(np.power(self.importance_vec * perturbations, norm)), (norm - 1) / norm)
numerator = np.where(denominator > 1e-10, numerator, np.zeros(numerator.shape[1]))
denominator = np.where(denominator <= 1e-10, 1.0, denominator)
return numerator / denominator
# L-infinity norm (norm in ["inf", np.inf]).
numerator = np.array(self.importance_vec * perturbations)
optimum = np.max(np.abs(numerator))
return np.where(abs(numerator) == optimum, np.sign(numerator), 0)
def __get_gradients(self, samples: np.ndarray, perturbations: np.ndarray, targets: np.ndarray) -> np.ndarray:
"""
Gradient of the objective function with regards to the data vector, i.e. sum of the classifier's loss gradient
and weighted lp-space norm gradient, both with regards to data vector.
:param samples: Base design matrix.
:param perturbations: Perturbations of samples towards being adversarial.
:param targets: The target labels for the attack.
:return: Aggregate gradient of objective function.
"""
clf_loss_grad = self.estimator.loss_gradient(
(samples + perturbations).astype(np.float32), targets.astype(np.float32)
)
norm_grad = self.lambd * self.__weighted_lp_norm_gradient(perturbations)
return clf_loss_grad + norm_grad
def __loss_function(self, y_probas: np.ndarray, perturbations: np.ndarray, targets: np.ndarray) -> np.ndarray:
"""
Complete loss function to optimize, where the adversary loss is given by the sum of logistic loss of
classification and weighted Lp-norm of the perturbation vectors. Do keep in mind that not all classifiers
provide a well defined loss estimation function - therefore it is logistic loss, which is used instead.
:param y_probas: Class-wise prediction probabilities.
:param perturbations: Perturbations of samples towards being adversarial.
:param targets: The target labels for the attack.
:return: Aggregate loss score.
"""
clf_loss_part = log_loss(y_probas, targets)
norm_part = self.__weighted_lp_norm(perturbations)
return clf_loss_part + self.lambd * norm_part
def __apply_clipping(self, samples: np.ndarray, perturbations: np.ndarray) -> np.ndarray:
"""
Function for clipping perturbation vectors to forbid the adversary vectors to go beyond the allowed ranges of
values.
:param samples: Base design matrix.
:param perturbations: Perturbations of samples towards being adversarial.
:return: Clipped perturbation array.
"""
if self.estimator.clip_values is None:
return perturbations
mins = self.estimator.clip_values[0]
maxs = self.estimator.clip_values[1]
np.clip(perturbations, mins - samples, maxs - samples, perturbations)
return perturbations
def __calculate_feature_importances(self, x: np.ndarray, y: np.ndarray) -> None:
"""
This function calculates feature importances using a specified built-in function or applies a provided custom
function (callable object). It calculates those values on the passed training data.
:param x: Design matrix of the dataset used to train the classifier.
:param y: Labels of the dataset used to train the classifier.
:return: None.
"""
if self.importance == "pearson":
# Apply a simple Pearson correlation calculation.
pearson_correlations = [pearsonr(x[:, col], y)[0] for col in range(x.shape[1])]
absolutes = np.abs(np.array(pearson_correlations))
self.importance_vec = absolutes / np.power(np.sum(absolutes ** 2), 0.5)
elif callable(self.importance):
# Apply a custom function to call on the provided data.
try:
self.importance_vec = np.array(self.importance(x, y))
except Exception as exception:
logger.exception("Provided importance function has failed.")
raise exception
if not isinstance(self.importance_vec, np.ndarray):
self.importance_vec = None
raise TypeError("Feature importance vector should be of type np.ndarray or any convertible to that.")
if self.importance_vec.shape != (self.n_features,):
self.importance_vec = None
raise ValueError("Feature has to be one-dimensional array of size (n_features, ).")
else:
raise TypeError("Unrecognized feature importance function: {}".format(self.importance))
def fit_importances(
self,
x: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
importance_array: Optional[np.ndarray] = None,
normalize: Optional[bool] = True,
):
"""
This function allows one to easily calculate the feature importance vector using the pre-specified function,
in case it wasn't passed at initialization.
:param x: Design matrix of the dataset used to train the classifier.
:param y: Labels of the dataset used to train the classifier.
:param importance_array: Array providing features' importance score.
:param normalize: Assure that feature importance values sum to 1.
:return: LowProFool instance itself.
"""
if importance_array is not None:
# Use a pre-calculated vector of feature importances.
if np.array(importance_array).shape == (self.n_features,):
self.importance_vec = np.array(importance_array)
else:
raise ValueError("Feature has to be one-dimensional array of size (n_features, ).")
elif self.importance_vec is None:
# Apply a function specified at the LowProFool instance initialization.
self.__calculate_feature_importances(np.array(x), np.array(y))
if normalize:
# Make sure that importance vector sums to 1.
self.importance_vec = np.array(self.importance_vec) / np.sum(self.importance_vec)
return self
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversaries for the samples passed in the `x` data matrix, whose targets are specified in `y`,
one-hot-encoded target matrix. This procedure makes use of the LowProFool algorithm. In the case of failure,
the resulting array will contain the initial samples on the problematic positions - which otherwise should
contain the best adversary found in the process.
:param x: An array with the original inputs to be attacked.
:param y: One-hot-encoded target classes of shape (nb_samples, nb_classes).
:param kwargs:
:return: An array holding the adversarial examples.
"""
if self.importance_vec is None:
raise ValueError("No feature importance vector has been provided yet.")
if y is None:
raise ValueError("It is required to pass target classes as `y` parameter.")
# Make sure samples and targets are of type np.ndarray.
samples = np.array(x, dtype=np.float64)
targets = np.array(y, dtype=np.float64)
# Extract the target classes as integers implying their classes' indices.
targets_integer = np.argmax(y, axis=1)
if targets.shape[1] != self.n_classes:
raise ValueError("Targets shape is not compatible with number of classes.")
if samples.shape[1] != self.n_features:
raise ValueError("Samples shape is not compatible with number of features.")
# Initialize perturbation vectors and learning rate.
perturbations = np.zeros(samples.shape, dtype=np.float64)
eta = self.eta
# Initialize 'keep-the-best' variables.
best_norm_losses = np.inf * np.ones(samples.shape[0], dtype=np.float64)
best_perturbations = perturbations.copy()
# Success indicators per sample.
success_indicators = np.zeros(samples.shape[0], dtype=np.float64)
# Predicate used to determine whether the target was met based on the given probabilities.
def met_target(probas, target_class):
if self.threshold is None:
return np.argmax(probas) == target_class
return probas[target_class] > self.threshold
# Main loop.
for _ in trange(self.n_steps, desc="LowProFool", disable=not self.verbose):
# Calculate gradients, apply them to perturbations and clip if needed.
grad = self.__get_gradients(samples, perturbations, targets)
perturbations -= eta * grad
perturbations = self.__apply_clipping(samples, perturbations)
# Decrease learning rate for the next iteration.
eta = max(eta * self.eta_decay, self.eta_min)
# Calculate class-wise probabilities.
y_probas = self.estimator.predict((samples + perturbations).astype(np.float32))
# Examine the quality of adversaries in the current step.
for j, target_int in enumerate(targets_integer):
# Check for every sample whether the threshold probability is reached.
if met_target(y_probas[j], target_int):
success_indicators[j] = 1.0
# Calculate weighted Lp-norm loss.
norm_loss = self.__weighted_lp_norm(perturbations[j : j + 1])[0, 0]
# Note it, if the adversary improves.
if norm_loss < best_norm_losses[j]:
best_norm_losses[j] = norm_loss
best_perturbations[j] = perturbations[j].copy()
logger.info(
"Success rate of LowProFool attack: {:.2f}%".format(
100 * np.sum(success_indicators) / success_indicators.size
)
)
# The generated adversaries are a sum of initial samples and best perturbation vectors found by the algorithm.
return samples + best_perturbations
def _check_params(self) -> None:
"""
Check correctness of parameters.
:return: None.
"""
if not (isinstance(self.n_classes, int) and self.n_classes > 0):
raise ValueError("The argument `n_classes` has to be positive integer.")
if not (isinstance(self.n_features, int) and self.n_classes > 0):
raise ValueError("The argument `n_features` has to be positive integer.")
if not (isinstance(self.n_steps, int) and self.n_steps > 0):
raise ValueError("The argument `n_steps` (number of iterations) has to be positive integer.")
if not ((isinstance(self.threshold, float) and 0 < self.threshold < 1) or self.threshold is None):
raise ValueError("The argument `threshold` has to be either float in range (0, 1) or None.")
if not (isinstance(self.lambd, (float, int)) and self.lambd >= 0):
raise ValueError("The argument `lambd` has to be non-negative float or integer.")
if not (isinstance(self.eta, (float, int)) and self.eta > 0):
raise ValueError("The argument `eta` has to be positive float or integer.")
if not (isinstance(self.eta_decay, (float, int)) and 0 < self.eta_decay <= 1):
raise ValueError("The argument `eta_decay` has to be float or integer in range (0, 1].")
if not (isinstance(self.eta_min, (float, int)) and self.eta_min >= 0):
raise ValueError("The argument `eta_min` has to be non-negative float or integer.")
if not (
(isinstance(self.norm, (float, int)) and self.norm > 0)
or (isinstance(self.norm, str) and self.norm == "inf")
or self.norm == np.inf
):
raise ValueError('The argument `norm` has to be either positive-valued float or integer, np.inf, or "inf".')
if not (
isinstance(self.importance, str)
or callable(self.importance)
or (isinstance(self.importance, np.ndarray) and self.importance.shape == (self.n_features,))
):
raise ValueError(
"The argument `importance` has to be either string, "
+ "callable or np.ndarray of the shape (n_features, )."
)
if not isinstance(self.verbose, bool):
raise ValueError("The argument `verbose` has to be of type bool.")
| StarcoderdataPython |
3391497 | <filename>tests/unittest/test_package.py
"""
Test import and versioning of the package.
"""
def test_import():
"""
Test import of the package.
"""
import nocaselist # noqa: F401 pylint: disable=import-outside-toplevel
assert nocaselist
def test_versioning():
"""
Test import of the package.
"""
import nocaselist # noqa: F401 pylint: disable=import-outside-toplevel
assert nocaselist.__version__
| StarcoderdataPython |
3364074 | <gh_stars>0
import os
from flask import Flask
'''
To run the application:
- under Windows
set FLASK_APP=flaskr
set FLASK_ENV=development
flask run
- under Linux:
export FLASK_APP=flaskr
export FLASK_ENV=development
flask run
'''
def create_app(test_config=None):
# This is the application factory
# Create and configure the app (Flask instance)
app = Flask(__name__, instance_relative_config=True)
# Config files located relative to the instance dir
# The instance dir should not be under version control and can store
# deployment instance specific configs
app.config.from_mapping( # Default configs
SECRET_KEY='dev',
# Keep data safe; override in production with random value
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite')
# Location of SQLite db; in this case in the instance dir
)
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
# Override default config values using config.py; could set here
# SECRET_KEY for production for example
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# For tests, provide specific config independent values, rather than
# instance config
# Ensure the instance folder exists
try:
os.makedirs(app.instance_path)
# Ensure the instance dir exists so db can be created; instance dir not
# created by default
except OSError:
pass
# A simple page that says hello
@app.route('/hello')
def hello():
return 'Hello world'
# Registering additional commands with app
from . import db
db.init_app(app)
# Import and register blueprints
from . import auth, blog
app.register_blueprint(auth.bp)
app.register_blueprint(blog.bp)
# blog blueprint has no url_prefix, so index view in there provides index
# view for whole app. url_rule below ensures mapping of index endpoint -
# referenced in login/register to blog view (route defined as '/')
app.add_url_rule('/', endpoint='index')
return app
| StarcoderdataPython |
2277 | from . import utils
from . import display
from . import save
from . import FFTW
from . import stackregistration
__version__="0.2.1" | StarcoderdataPython |
4815007 | <filename>server/app/__init__.py
import os
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import routes, models # noqa: E401,E402,F401
| StarcoderdataPython |
185246 | <reponame>AronYang/flask-base-admin
# coding=utf-8
import logging
import os
from flask import Flask, current_app, jsonify, request
from flask_cache import Cache
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from werkzeug.exceptions import (HTTPException, InternalServerError,
default_exceptions)
import conf
from app import api
from app.models.base import db
def init_database(app):
db.init_app(app)
db.app = app
Migrate(app, db)
def init_logger(app):
#: 初始化日志
log_path = os.path.join(app.config['LOG_DIR'], 'app.log')
handler = logging.FileHandler(log_path, encoding='UTF-8')
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
if app.debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.ERROR)
app.logger.addHandler(handler)
def init_config(app):
app.config.from_object(conf)
if not app.config.get('LOG_DIR'):
app.config['LOG_DIR'] = app.root_path
def init_route(app):
app.register_blueprint(api.router, url_prefix='/api')
def init_cache(app):
app.cache = Cache(app)
def init_error(app):
def error_handler(error):
if not isinstance(error, HTTPException):
error = InternalServerError()
app.logger.error(f"'error {error.code}: [{str(error.description)}:{request.url}]'")
if isinstance(error.description, dict):
return error.description, error.code
return jsonify({"msg": error.description, "status": False}), error.code
for code in default_exceptions:
app.register_error_handler(code, error_handler)
def create_app():
app = Flask(__name__,)
init_config(app)
init_database(app)
init_logger(app)
init_route(app)
init_error(app)
return app
| StarcoderdataPython |
147407 | from collections import namedtuple
from enum import Enum
constants = [
"STATES", "STATE_LABELS", "ACTIONS", "REWARDS", "QS", "NEXT_STATES", "NEXT_STATE_LABELS",
"COMMAND", "NUM_EXP", "TIMESTAMP", "INPUT_SIZE", "NEURONS", "USE_BATCH_NORM", "USE_LAYER_NORM",
"ACTIVATION_LAST", "NUM_COMPONENTS", "NUM_ACTIONS", "BETA", "NO_SAMPLE", "FREEZE_HMM_VAR",
"FREEZE_HMM_PRIOR", "NO_NEXT_GRAD", "NO_HMM", "USE_SOFTPLUS", "LOAD_PATH", "LATENT_SIZE",
"ENCODER_LEARNING_RATE", "PRIOR_LEARNING_RATE", "ENCODER_WEIGHT_DECAY", "PRIOR_WEIGHT_DECAY",
"VALIDATION_FRACTION", "BATCH_SIZE", "DISCOUNT", "GT_QS", "LOG_QS", "ZERO_ONE_QS", "UNIT_NORMAL_QS",
"PLOT_DATASET_EXAMPLES", "NUM_TRAINING_STEPS", "DEVICE", "MODEL_SAVE_PATH", "MODEL_LOAD_PATH",
"QS_FACTOR", "PLOT_RESULTS", "HMM_MU_INIT_SD", "HMM_SIGMA_INIT_VAL", "FILTER_SIZES", "FILTER_COUNTS",
"STRIDES", "FLAT_OUTPUT", "DOUBLE_LEARNING", "PRIORITIZED_REPLAY", "TOTAL_REWARDS", "DISCOUNTED_REWARDS",
"TMP_REWARDS", "DUELING", "LEARNING_RATE", "GOAL", "MAX_STEPS", "MAX_EPISODES", "EXPLORATION_STEPS",
"PRIORITIZED_REPLAY_MAX_STEPS", "BUFFER_SIZE", "TARGET_NETWORK", "TARGET_NETWORK_SYNC",
"ABSTRACT_ACTIONS", "DONES", "NUM_FRUITS", "REACHED_GOAL", "WEIGHT_DECAY", "EPS", "GMM_MU_INIT_SD",
"GMM_SIGMA_INIT_VAL", "FULLY_CONV", "FREEZE_GMM_VAR", "FREEZE_GMM_PRIOR", "ANIMATE_LATENT",
"NO_ENCODER_BETA_LOSS", "PSEUDO_GT_QS", "PLOT_ABSTRACTION_EXAMPLES", "PURITIES", "SIZES", "MEAN_PURITY",
"INVERSE_PURITIES", "MEAN_INVERSE_PURITY", "TASK_INDEX", "TASK_LIST", "IGNORE_LIST", "EVAL_TOTAL_REWARDS",
"EVAL_DISCOUNTED_TOTAL_REWARDS", "EVAL_NUM_STEPS", "USE_ADVANTAGES", "FINE_LABELS", "NUM_TASKS",
"ZERO_ONE_QS_ALL_TASKS", "POLICY", "SOFTMAX", "INIT_TAU", "FINAL_TAU", "USE_LOG_ADVANTAGES",
"HAND_STATES", "NEXT_HAND_STATES", "SIMULATOR", "ROBOT", "WORKSPACE", "HEIGHTMAP_SIZE", "NUM_OBJECTS",
"ACTION_SEQUENCE", "NUM_PROCESSES", "NUM_SAMPLES", "SAVE_PATH", "NUM_ROTATIONS",
"HAND_BITS", "OBS", "HAND_OBS", "NEXT_HAND_BITS", "NEXT_OBS", "NEXT_HAND_OBS", "STEPS_LEFT",
"PRIORITIZED_BUFFER", "PRIORITIZED_BUFFER_EXPERT", "EXPERT_BUFFER", "BUFFER", "PATCH_SIZE",
"ACTION_SPACE", "MARGIN", "MARGIN_L", "MARGIN_WEIGHT", "MARGIN_BETA", "DIVIDE_FACTOR", "PER_ALPHA",
"INIT_EPS", "FINAL_EPS", "PER_EXPERT_EPS", "PER_EPS", "PER_BETA", "INIT_COEF", "FINAL_COEF",
"TARGET_UPDATE_FREQ", "FIXED_EPS", "TRUE_RANDOM", "TRAINING_OFFSET", "TRAINING_ITERS",
"BUFFER_TYPE", "EXPERT_FRACTION", "TOTAL_LOSS", "TD_ERROR", "ORIG_ACTIONS", "DATASET_LOAD_PATH",
"TOTAL_VALID_LOSS", "ACCURACY", "VALID_ACCURACY", "VALIDATION_FREQ", "ADVS", "LOG_ADVS", "IS_OPT",
"ALLOWED_TASKS", "DISCRETIZE_QS_NUM_BINS", "LIMIT", "POST_EPS", "NORMALIZE_STATE",
"INITIALIZE_GMM_WITH_EMBEDDINGS", "USE_HEIGHT_HEURISTIC", "TASK_CLASSIFIER_LOAD_PATH",
"TASK_CLASSIFIER_THRESHOLD", "GET_CUSTOM_LABELS", "LABELS_INTO_BINARY", "LABEL_MEAN_PURITY",
"SCORES_LOAD_PATH", "NUM_EVAL_EPISODES", "MINMAX_VAR", "NUM_POSITIVE_ACTIONS", "NUM_NEGATIVE_ACTIONS",
"ORIG_POSITIVE_ACTIONS", "MAX_Q_TOLERANCE", "BALANCE_LOSS", "BINARY_LABELS", "USE_BINARY_LABELS",
"POSITIVE_LABELS", "AMBIGUOUS_LABELS", "POS_AMB_LABELS_LOAD_PATH", "AMB_QS", "USE_AMB_LABELS",
"ALG", "FAKE_EXPERT", "GAMMA"
]
# all constants must be unique
assert len(constants) == len(set(constants))
Constants = Enum("Constants", {
c: c for c in constants
})
| StarcoderdataPython |
3239667 | <gh_stars>0
"""
This function is used to convert the passed video file into a audio file for further processing.
"""
def extract_audio(video_file_name, audio_output_file=audio.mp3, *args, **kwargs):
"""
#TODO
- Fill this with the documentation of the code. i.e. the docstring.
- Accomadate for the different video file types such as mp4, mkv, raw, etc.
- Add arguments, if needed
- Add requirements to requirements.txt usign "pip freeze > requirements.txt"
- Let the user have option to choose different audio formats such as .wav, .mp3, etc
"""
video_file_extensions = (".mp4", ".mkv", ".raw", ".mov", ".flv", ".wmv", ".avi", ".webm")
if video_file_name.endswith((video_file_extensions)):
'''
#code
'''
else:
print("Video file format not supported")
| StarcoderdataPython |
117943 |
# Reference : https://bigdatatinos.com/2016/02/08/using-spark-hdinsight-to-analyze-us-air-traffic/
import pyspark
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
import atexit
sc = SparkContext('local[*]')
sqlc = SQLContext(sc)
atexit.register(lambda: sc.stop())
import csv
from pyspark.sql.types import *
#==============
# parquetFile operations
parquetFile = sqlc.read.parquet('/Users/Shreeji/Desktop/SGNNN_INFORMS/Exp3/parquet_data/delayedflights.parquet')
parquetFile.registerTempTable("parquetFile");
#parquetFile.printSchema();
#1 select avg delay from flights group by day
sqlc.sql('SELECT DayOfWeek, AVG(DepDelayMinutes) AS Avg_Delay FROM parquetFile GROUP BY DayOfWeek').show(100)
#2 select avg delay from flights group by destination
sqlc.sql('SELECT DestCityName, AVG(DepDelayMinutes) AS Avg_Delay FROM parquetFile GROUP BY DestCityName').show(100)
#3 select avg delay from flights group by destination and by month
sqlc.sql('SELECT DestCityName, Month, AVG(DepDelayMinutes) AS Avg_Delay FROM parquetFile GROUP BY DestCityName, Month ORDER BY DestCityName, Month').show(100)
#4 Total number of flight cancelled group by year and month
sqlc.sql('SELECT Year, Month, COUNT(Cancelled) AS Total_Cancelled FROM parquetFile WHERE Cancelled=1 GROUP BY Year, Month ORDER BY Year, Month').show(100)
#==============
| StarcoderdataPython |
1777884 | # -*- coding: utf-8 -*-
"""@file isclose.py
Provides an implementation of the @c isclose() function
This function is found in the @c numpy library starting with version 1.7.0.
The MSEAS @c numpy library version is 1.5.1.
@author <NAME> (<EMAIL>)
"""
import numpy
def isclose(a, b, rtol=1e-05, atol=1e-08):
r"""Implements @c numpy.isclose
Returns a boolean array where two arrays are element-wise equal within
a tolerance. The tolerance values are positive, typically very small
numbers. For finite values, two floating point values are considered
equivalent if
@f[
|a-b|\leq T_\text{abs}+T_\text{rel}|b|
@f]
where @f$T_\text{abs}@f$ and @f$T_\text{rel}@f$ are the absolute and
relative tolerances, respectively. The above equation is not symmetric!
@param a: Input array to compare
@param b: Input array to compare
@param rtol: Relative tolerance
@param atol: Absolute tolerance
@return A boolean array of where the arrays are equal within the given
tolerance
"""
return numpy.abs(a - b) <= (atol + rtol * numpy.abs(b))
| StarcoderdataPython |
1763477 | import pytest
from keras.utils.test_utils import layer_test
from keras.utils.test_utils import keras_test
from keras import layers
@keras_test
def test_sine_relu():
for epsilon in [0.0025, 0.0035, 0.0045]:
layer_test(layers.SineReLU, kwargs={'epsilon': epsilon},
input_shape=(2, 3, 4))
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
3302293 | <gh_stars>0
from requests_html import HTMLSession
session = HTMLSession()
# TODO:
# - https://oddspedia.com/
# - https://www.oddsportal.com/
def oddschecker(url):
# Example: https://www.oddschecker.com/football/english/championship/brentford-v-fulham/winner
response = session.get(url)
container = response.html.find("#oddsTableContainer", first=True)
table = container.find("table", first=True)
# Find Bookmakers
table_header = table.find("thead", first=True)
row = table_header.find("tr")[3]
cells = row.find("td")[1:]
bookmakers = []
odds = []
for cell in cells:
link = cell.find("a", first=True)
if link is None:
continue
bookmakers.append(link.attrs["title"])
# Find odds
table_body = table.find("tbody", first=True)
rows = table_body.find("tr")
options = []
for row in rows:
options.append(row.find("td")[1:])
for i in range(len(options[0])):
book = []
for j in range(0, len(options)):
book.append(options[j][i])
# options[0][i], options[1][i], options[2][i]
float_book = []
for cell in book:
if cell.text == "":
continue
print("Found Cell: "+cell.text)
text = cell.text.split("/")
try:
if len(text) == 2:
float_book.append(round(int(text[0])/int(text[1])*100)/100)
elif len(text) == 1:
float_book.append(int(text[0]))
else:
print(text)
except:
print(text)
if len(float_book) > 0:
odds.append(float_book)
return bookmakers, odds
| StarcoderdataPython |
1771829 | # Generated by Django 2.2.13 on 2020-09-17 01:03
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('library', '0020_auto_20200904_1200'),
]
operations = [
migrations.AddField(
model_name='bookversion',
name='language',
field=models.TextField(default='en', max_length=2),
),
migrations.AddField(
model_name='bookversion',
name='mod_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| StarcoderdataPython |
1643910 | <reponame>rheinwerk-verlag/planning-poker
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
import planning_poker.routing
application = ProtocolTypeRouter({
'http': get_asgi_application(),
'websocket': AuthMiddlewareStack(URLRouter(planning_poker.routing.websocket_urlpatterns)),
})
| StarcoderdataPython |
1796794 | #831162.py
from graphics import *
def makeWindow(size):
return GraphWin("Collection of patches", size*100, size*100)
def main():
size, myColours = getInputs()
win = makeWindow(size)
displayAndMovePatches(win, size, myColours)
# triangle function for penultimate patch
def triangle(win ,p1, p2, p3, colour, colourLine):
tri = Polygon(p1, p2, p3)
tri.setFill(colour)
tri.setOutline(colourLine)
tri.draw(win)
return tri
# penultimate patch
def drawPatchP(win ,x , y, colour):
patternList = []
# assign a type so i can make decisions on the positions/colours later
TypeP = "P"
for i in range(0, 100, 20):
# making the half triangles on second and fourth rows
if i == 20 or i == 60:
for j in range(0, 110, 20):
if j == 0:
shape = triangle(win, Point(j + x, i + y ),
Point(j + x + 10, i + y), Point(j + x, i + 20 + y),
colour, colour)
elif j - 10 == 90:
shape = triangle(win, Point(j + x - 10, i + y),
Point(j + x, i + y), Point(j + x, i + 20 + y),
colour, colour)
else:
shape = triangle(win, Point(j + x - 10, i + y),
Point(j + x + 10, i + y), Point(j + x, i + 20 + y),
colour, colour)
patternList.append(shape)
# making first, third and fifth rows
else:
for k in range(0, 100, 20):
shape = triangle(win, Point(k + x, i + y),
Point(k + x + 20, i + y), Point(k + x + 10, i + 20 + y),
colour, colour)
# appending all my shapes in a list so i can use them later
patternList.append(shape)
return patternList, colour, TypeP
# line function for the final patch
def drawLine(win, p1, p2, colour):
line = Line(p1, p2)
line.setFill(colour)
line.draw(win)
return line
# final patch
def drawPatchF(win, x, y, colour):
# assign a type so i can make decisions on the positions/colours later
TypeF = "F"
patternList = []
for i in range(0, 110, 10):
line = drawLine(win, Point(x + i, y + 0), Point(x + 100 - i, y + 100),
colour)
patternList.append(line)
for j in range(10, 100, 10):
line = drawLine(win, Point(100 + x, j + y), Point(0 + x, 100 - j + y),
colour)
# appending all my shapes in a list so i can use them later
patternList.append(line)
return patternList, colour, TypeF
def displayAndMovePatches(win, size, myColours):
patchworks = []
# displaying all the patches in the antepenultimate disposition
for i in range(size):
for j in range(size):
colour = myColours[0]
myColours.remove(colour)
myColours.append(colour)
if i < j:
patchworks.append(drawPatchP(win, j*100, i*100, colour))
else:
patchworks.append(drawPatchF(win, j* 100, i*100, colour))
# making a while true loop to make the user swap as many patches as he wishes
while True:
mouse = win.getMouse()
mouse2 = win.getMouse()
# changing the x and y coordinates of the clicks so I can use them as indexes for my lists
mouseCoords = Point(int(mouse.getX()/100), int(mouse.getY()/100))
mouseCoords2 = Point(int(mouse2.getX()/100), int(mouse2.getY()/100))
#first click index
patternIndex = mouseCoords.getY() * size + mouseCoords.getX()
# second click index
patternIndex2 = mouseCoords2.getY() * size + mouseCoords2.getX()
# extract patch, colour and type values from the list at index mouse click
pattern, colour, Type1 = patchworks[int(patternIndex)] # first
pattern2, colour2, Type2 = patchworks[int(patternIndex2)] # second
patterns = pattern + pattern2
for shape in patterns: # undraw the patches from the click positions
shape.undraw()
# swapping patches from same position
if patternIndex == patternIndex2: # if the clicks are in the same
# 100*100 square
if Type1 == "P": # if the click is on a penultimate patch make it
# final and keep the colour
patchwork1 = drawPatchF(win, mouseCoords.getX()* 100,
mouseCoords.getY()*100, colour2)
else: # else if the click is on a final patch make it penultimate
# and keep the colour
patchwork1 = drawPatchP(win, mouseCoords2.getX()* 100,
mouseCoords2.getY()*100, colour2)
# Update patternList with patchwork swap
patchworks[int(patternIndex)] = patchwork1 # insert this patch at first index
# if the clicked patches are in different positions swap them and keep their original colour
else:
if Type2 == "P":
patchwork1 = drawPatchP(win, mouseCoords.getX()* 100,
mouseCoords.getY()*100, colour2)
else:
patchwork1 = drawPatchF(win, mouseCoords.getX()* 100,
mouseCoords.getY()*100, colour2)
if Type1 == "P":
patchwork2 = drawPatchP(win, mouseCoords2.getX()* 100,
mouseCoords2.getY()*100, colour)
else:
patchwork2 = drawPatchF(win, mouseCoords2.getX()* 100,
mouseCoords2.getY()*100, colour)
# Update patternList with both patchwork changes
patchworks[int(patternIndex)] = patchwork1
patchworks[int(patternIndex2)] = patchwork2
def getInputs():
validSizes = [5, 7, 9, 11] # make the user use these values only
while True: # for as long as the user inserts invalid sizes
size = str(input("Enter the size of the patch, the valid sizes are 5, 7, 9, 11: "))
if not size.isnumeric(): # if the input is not a number
print("Please enter a whole number.")
continue # it returns the control to the beginning of the while loop
# so that we don't get an error when the user enter any other type of value
size = int(size)
if size not in validSizes:
print("Non valid size.")
else:
size = int(size)
break # if size is in the validSizes list terminate the loop
validColours = ["red", "green", "blue", "magenta", "cyan", "orange", "brown",
"pink"] # make only these colours available for the patches
myColours = []
colourCount = 0
# making the user choose 3 colours only
while colourCount != 3:
coloursString = ", ".join(validColours)
colour = input("Enter the colours of the patch, valid colours are {0}: ".
format(coloursString))
# remove chosen colour from the available colours and add it to the
# colour list i use to draw my patches
if colour in validColours and colour not in myColours:
myColours.append(colour)
validColours.remove(colour)
colourCount += 1
return size, myColours
| StarcoderdataPython |
182350 | # RT - NickName Panel
from __future__ import annotations
from discord.ext import commands
import discord
from rtlib.common.json import loads
from rtutil.utils import is_json, replace_nl
from rtutil.content_data import ContentData
from rtutil.panel import extract_emojis
from core import Cog, RT, t
from data import NO_MORE_SETTING, FORBIDDEN
from .__init__ import FSPARENT
from .role import RolePanel
class NickNamePanelEventContext(Cog.EventContext):
"ニックネームパネルのニックネームを設定した際のイベントコンテキストです。"
member: discord.Member
nickname: str
class NickNamePanelView(discord.ui.View):
"ニックネームパネルのViewです。"
def __init__(self, cog: NickNamePanel, *args, **kwargs):
self.cog = cog
super().__init__(*args, **kwargs)
@discord.ui.select(placeholder="Set nickname", custom_id="nickpanel.select")
async def select_nickname(self, interaction: discord.Interaction, select: discord.ui.Select):
# ニックネームを設定します。
assert isinstance(interaction.user, discord.Member)
nickname = select.values[0]
nickname = f"{interaction.user.display_name}{nickname[1:]}" \
if nickname.startswith("+") else nickname
error = None
try:
await interaction.user.edit(nick=nickname)
except discord.Forbidden:
await interaction.response.send_message(t(FORBIDDEN, interaction), ephemeral=True)
error = FORBIDDEN
else:
await interaction.response.send_message("Ok", ephemeral=True)
self.cog.bot.rtevent.dispatch("on_nickname_panel", NickNamePanelEventContext(
self.cog.bot, interaction.guild, self.cog.detail_or(error), {
"ja": "ニックネームパネル", "en": "Nickname Panel"
}, self.cog.text_format({
"ja": "対象:{name}\nニックネーム:{nickname}",
"en": "Target: {name}\nNickname: {nickname}"
}, name=self.cog.name_and_id(interaction.user), nickname=nickname),
self.cog.nickname_panel, error, member=interaction.user, nickname=nickname
))
class NickNamePanel(Cog):
"ニックネームパネルのコグです。"
def __init__(self, bot: RT):
self.bot = bot
@commands.Cog.listener()
async def on_setup(self):
self.bot.add_view(NickNamePanelView(self, timeout=None))
@commands.command(
aliases=("nickpanel", "np", "ニックネームパネル", "ニックパネル", "にぱ"), fsparent=FSPARENT,
description="Create a panel to change nicknames. You can create a panel for setting nicknames."
)
@discord.app_commands.describe(
title="The title of the panel.", content="Nicknames separated by `<nl>`."
)
async def nickname_panel(self, ctx: commands.Context, title: str, *, content: str):
if not isinstance(ctx.channel, discord.TextChannel):
return await ctx.reply(t(dict(
ja="このコマンドはテキストチャンネル限定です。",
en="This command is limited to text channels."
), ctx))
# もし`Get content`のコードなら内容をそっから取る。
if is_json(content):
data: ContentData = loads(content)
content = data["content"]["embeds"][0]["description"]
content = replace_nl(content)
if len(nicknames := extract_emojis(content)) > 25:
return await ctx.reply(t(NO_MORE_SETTING, ctx))
# Viewを作る。
view = NickNamePanelView(self, timeout=0)
view.select_nickname.placeholder = t(dict(
ja="ニックネームを設定する", en="Set nickname"
), ctx)
embed = discord.Embed(title=title, description="", color=ctx.author.color)
assert isinstance(embed.description, str)
# 内容をViewと埋め込みに追加していく。
for emoji, nickname in nicknames.items():
raw = nickname
if nickname.startswith("+"):
nickname = nickname[1:]
view.select_nickname.add_option(label=nickname, value=raw, description=t(dict(
ja="あなたの名前の後ろにこれを付けます。",
en="Add this string after your name."
), ctx) if raw[0] == "+" else t(dict(
ja="ニックネームを変更します。", en="Change nickname."
), ctx), emoji=emoji)
embed.description += f"{emoji} {nickname}\n"
embed.description = embed.description[:-1]
# ブランド付けをする。
embed.set_footer(text=t(dict(ja="RTのニックネームパネル", en="RT's Nickname Panel"), ctx))
assert isinstance(self.bot.cogs["RolePanel"], RolePanel)
await self.bot.cogs["RolePanel"].reply(ctx, embed=embed, view=view)
(Cog.HelpCommand(nickname_panel)
.merge_description("headline", ja="ニックネームパネルを作ります。ニックネームを設定するためのパネルを作れます。")
.add_arg("title", "str",
ja="ニックネームパネルに設定するタイトルです。", en="The title of the panel.")
.add_arg("content", "str",
ja="""改行か`<nl>`または`<改行>`で区切ったニックネームです。
ニックネームの最初に`+`を付けることで、丸ごとニックネームを変更するのではなく、ニックネームを名前に後付けするようにすることができます。""",
en="""A nickname separated by a newline or `<nl>` or `<nl>`.
You can add a `+` at the beginning of the nickname to make the nickname follow the name instead of changing the nickname in its entirety.""")
.set_extra("Notes",
ja="""`Get content`を使って取得したコードで他のパネルの内容をコピーすることができます。
また、`rt!`形式でのコマンドをニックネームパネルに返信して実行した場合、そのパネルを新しい内容に上書きすることができます。""",
en="""You can copy the contents of other panels with the code obtained using `Get content`.
Also, if you execute a command in the form `rt!` in reply to a nick panel, you can overwrite that panel with the new content."""))
async def setup(bot: RT) -> None:
await bot.add_cog(NickNamePanel(bot)) | StarcoderdataPython |
1755339 | #!/usr/bin/env python3
"""Today Holiday?
Uses third party library holidays.
`pip install holidays`
"""
import holidays
from datetime import date
def main() -> str:
"""Determine if today is a US Holiday."""
today = date.today()
us_holidays = holidays.UnitedStates(years=today.year)
today_holiday = today in us_holidays
if today_holiday:
return us_holidays[today]
return "Not a holiday today."
if __name__ == "__main__":
print(main())
| StarcoderdataPython |
192129 | <gh_stars>0
# Generated by Django 2.2 on 2020-06-07 14:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rate', '0005_project_account'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criteria', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_name', models.CharField(max_length=30)),
('votes', models.IntegerField(default=0)),
('criteria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rate.Review')),
],
),
]
| StarcoderdataPython |
3368172 | <reponame>izzatnadzmi/ConcentricTubeRobot
"""
Simulate a CTR following a 3D trajectory
Author: <NAME>
Adapted from code by Python Robotics, <NAME> (daniel-s-ingram)
"""
from math import cos, sin
import numpy as np
import time
import sys
sys.path.append("../")
sys.path.append("./ConcentricTubeRobot/")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
# from ConcentricTubeRobot.CTR_model import moving_CTR
from CTRmodel import moving_CTR, plot_3D
from TrajectoryGenerator import TrajectoryGenerator, TrajectoryRetreiver
from controller import Jacobian
show_animation = True
def CTR_sim(a1_c, a2_c, a3_c, total_time, q_start, q_end):
"""
Calculates the necessary thrust and torques for the quadrotor to
follow the trajectory described by the sets of coefficients
x_c, y_c, and z_c.
"""
runtime = time.time()
total_time = 5 # (seconds)
dt = 0.1
time_stamp = int(total_time/dt)
t = dt
i = 1
jac_del_q = np.ones(6) * 1e-1
uz_0 = np.array([[0, 0, 0]]).transpose()
model = lambda q, uz_0: moving_CTR(q, uz_0)
q_des_pos = np.zeros((6, time_stamp)) # [BBBaaa]
x_des_pos = np.zeros((3, time_stamp)) # [r]
x_cur_pos = np.zeros((3, time_stamp)) # [r]
delta_q = np.zeros(6) # [BBBaaa]
delta_x = np.zeros(3) # [r]
quintic = TrajectoryRetreiver()
q_des_pos[:, 0] = q_start
while i < time_stamp:
# runtime = time.time()
x = np.zeros(3) # just for size TODO: change to just integer
x_des_pos[0, i] = quintic.calculate_position(a1_c[0], t)
x_des_pos[1, i] = quintic.calculate_position(a2_c[0], t)
x_des_pos[2, i] = quintic.calculate_position(a3_c[0], t)
# print('t:', t)
# print('i:', i)
# print(alpha_position(t, total_time))
delta_x = x_des_pos[:, i] - x_cur_pos[:, i-1]
# print('delta_x', delta_x)
# get trajectory from Jacobian
r_jac = Jacobian(jac_del_q, x, q_des_pos[:, i-1].flatten(), uz_0, model)
J_inv = r_jac.p_inv()
delta_q = J_inv @ delta_x
# print('delta_q', delta_q)
q_des_pos[:, i] = q_des_pos[:, i-1] + delta_q * dt
(r1,r2,r3,Uz) = model(q_des_pos[:, i], uz_0) # FORWARD KINEMATICS
x_cur_pos[:, i] = r1[-1]
# print(i, time.time()-runtime)
t += dt
i += 1
print("Done", time.time()-runtime)
print('x_des_pos[:, 1]:', x_des_pos[:, 1])
print('x_des_pos[:, -1]:', x_des_pos[:, -1])
print('q_des_pos[:, 0]:', q_des_pos[:, 0])
print('q_des_pos[:, -1]:', q_des_pos[:, -1])
fig = plt.figure()
ax = plt.axes(projection='3d')
colors = cm.rainbow(np.linspace(0, 1, len(x_cur_pos.transpose())))
for y, c in zip(x_cur_pos.transpose(), colors):
# plt.scatter(x, y, color=c)
ax.scatter(y[0], y[1], y[2], linewidth=1, color=c)
# ax.plot3D(x_des_pos[0], x_des_pos[1], x_des_pos[2], linewidth=1, label='x_des_pos')
ax.scatter(x_des_pos[0], x_des_pos[1], x_des_pos[2], linewidth=1, label='x_des_pos', marker='.')
(r1,r2,r3,Uz) = moving_CTR(q_des_pos[:, 1], uz_0)
plot_3D(ax, r1, r2, r3, 'start position')
(r1,r2,r3,Uz) = moving_CTR(q_des_pos[:, -1], uz_0)
plot_3D(ax, r1, r2, r3, 'final position')
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z ')
# plt.axis('equal')
# ax.set_aspect('equal')
# # Create cubic bounding box to simulate equal aspect ratio
# max_range = 0.2 # np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
# Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(0) # X.max()+X.min())
# Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(0) # Y.max()+Y.min())
# Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(0.3) # Z.max()+Z.min())
# # Comment or uncomment following both lines to test the fake bounding box:
# for xb, yb, zb in zip(Xb, Yb, Zb):
# ax.plot([xb], [yb], [zb], 'w')
plt.show()
def main():
"""
Calculates the x, y, z coefficients for the four segments
of the trajectory
"""
# all B (0 -> 0), all alpha (0 -> 2pi/3)
a_ans = (2*np.pi)/3
q_start = np.array([0.0001, 0.0001, 0.0001, a_ans, a_ans, a_ans])
q_end = np.array([0.0001, 0.0001, 0.0001, a_ans + 0.2, a_ans + 0.2, a_ans + 0.2])
uz_0 = np.array([[0, 0, 0]]).transpose()
(r1,r2,r3,Uz) = moving_CTR(q_start, uz_0)
x_cur_pos = r1[-1]
(r1e,r2e,r3e,Uze) = moving_CTR(q_end, uz_0)
x_end_pos = r1e[-1]
# waypoints = [[0.0, 0.0, 0.0], [a_ans, a_ans, a_ans]]
waypoints = [x_cur_pos, x_end_pos]
a1_coeffs = []
a2_coeffs = []
a3_coeffs = []
total_time = 5
for x in range(len(waypoints)):
traj = TrajectoryGenerator(waypoints[x], waypoints[(x + 1) % len(waypoints)], total_time)
traj.solve()
a1_coeffs.append(traj.x_c)
a2_coeffs.append(traj.y_c)
a3_coeffs.append(traj.z_c)
print('START x_cur_pos:', x_cur_pos)
print('END x_end_pos:', x_end_pos)
CTR_sim(a1_coeffs, a2_coeffs, a3_coeffs, total_time, q_start, q_end)
print('START q_start:', q_start)
print('END q_end:', q_end)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3322133 | <gh_stars>0
class NTI_Elev:
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
def __str__(self):
return f"NTI eleven heter {self.name}, de är {self.age} år gammal och är en {self.sex}."
def return_name(self):
return self.name
class ITaren(NTI_Elev):
def __init__(self, name, age, sex, cleanness):
super().__init__(name, age, sex)
self.cleanness = cleanness
def __str__(self):
return super().__str__() + f" {self.name} är {self.cleanness} ren"
def main():
Person1 = ITaren("Armand", "17", "Man", "Helt ok")
Person2 = ITaren("Bryan", "18", "Man", "inte")
# print(armand)
# print(bryan)
ITare = [Person1, Person2]
for ITaren in ITare:
print(ITaren)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1643743 | <reponame>TUDelft-CITG/Hydraulic-Infrastructure-Realisation
"""Test package."""
import shapely.geometry
import simpy
import openclsim.core as core
import openclsim.model as model
import openclsim.plugins as plugins
from .test_utils import assert_log
def test_delay_plugin():
"""Test the delay plugin."""
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
registry = {}
Site = type(
"Site",
(
core.Identifiable,
core.Log,
core.Locatable,
core.HasContainer,
core.HasResource,
),
{},
)
TransportProcessingResource = type(
"TransportProcessingResource",
(
core.Identifiable,
core.Log,
core.ContainerDependentMovable,
core.Processor,
core.HasResource,
core.LoadingFunction,
core.UnloadingFunction,
),
{},
)
DelaySequenceActivity = type(
"TestShiftActivity", (plugins.HasDelayPlugin, model.SequentialActivity), {}
)
DelayWhileActivity = type(
"TestShiftActivity", (plugins.HasDelayPlugin, model.WhileActivity), {}
)
DelayMoveActivity = type(
"TestMoveActivity", (plugins.HasDelayPlugin, model.MoveActivity), {}
)
DelayShiftActivity = type(
"TestShiftActivity", (plugins.HasDelayPlugin, model.ShiftAmountActivity), {}
)
DelayBasicActivity = type(
"TestShiftActivity", (plugins.HasDelayPlugin, model.BasicActivity), {}
)
location_from_site = shapely.geometry.Point(4.18055556, 52.18664444)
location_to_site = shapely.geometry.Point(4.25222222, 52.11428333)
from_site = Site(
env=my_env,
name="Winlocatie",
ID="6dbbbdf4-4589-11e9-a501-b469212bff5d",
geometry=location_from_site,
capacity=12,
level=12,
)
to_site = Site(
env=my_env,
name="Dumplocatie",
ID="6dbbbdf5-4589-11e9-82b2-b469212bff5c",
geometry=location_to_site,
capacity=12,
level=0,
)
hopper = TransportProcessingResource(
env=my_env,
name="Hopper 01",
ID="6dbbbdf6-4589-11e9-95a2-b469212bff5b",
geometry=location_from_site,
loading_rate=1,
unloading_rate=1,
capacity=4,
compute_v=lambda x: 10,
)
single_run = [
DelayMoveActivity(
env=my_env,
name="sailing empty",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5d",
registry=registry,
mover=hopper,
destination=from_site,
delay_percentage=10,
),
DelayShiftActivity(
env=my_env,
name="Transfer MP",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff52",
registry=registry,
processor=hopper,
origin=from_site,
destination=hopper,
amount=4,
duration=10,
delay_percentage=10,
),
DelayMoveActivity(
env=my_env,
name="sailing filler",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5b",
registry=registry,
mover=hopper,
destination=to_site,
delay_percentage=10,
),
DelayShiftActivity(
env=my_env,
name="Transfer TP",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff54",
registry=registry,
processor=hopper,
origin=hopper,
destination=to_site,
amount=4,
duration=10,
delay_percentage=10,
),
DelayBasicActivity(
env=my_env,
name="Basic activity",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5h",
registry=registry,
duration=0,
additional_logs=[hopper],
delay_percentage=10,
),
]
activity = DelaySequenceActivity(
env=my_env,
name="Single run process",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff60",
registry=registry,
sub_processes=single_run,
delay_percentage=10,
)
while_activity = DelayWhileActivity(
env=my_env,
name="while",
ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5g",
registry=registry,
sub_processes=[activity],
condition_event=[{"type": "container", "concept": to_site, "state": "full"}],
delay_percentage=10,
)
model.register_processes([while_activity])
my_env.run()
assert my_env.now == 6354.357654924601
assert_log(while_activity)
assert_log(hopper)
assert_log(from_site)
assert_log(to_site)
| StarcoderdataPython |
199941 | <filename>u_base/u_file.py
#!/usr/bin/python
# -*- coding: utf-8 -*
# file function
import os
import time
import json
import re
import urllib.parse
import requests
from PIL import Image
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import u_base.u_log as log
__all__ = [
'convert_windows_path',
'get_file_name_from_url',
'covert_url_to_filename',
'get_abs_cache_path',
'ready_dir',
'get_content_with_cache',
'get_content',
'get_json',
'read_content',
'read_file_as_list',
'write_content',
'download_file',
'download_files_with_pool',
'convert_image_format',
'get_all_sub_files_with_cache',
'get_all_sub_files',
'cache_json',
'dump_json_to_file',
'load_json_from_file',
'extract_init_json_data',
'COMMON_USER_AGENT',
'm_get'
]
COMMON_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/96.0.4664.45 Safari/537.36'
COMMON_HEADERS = {
'user-agent': COMMON_USER_AGENT
}
def convert_windows_path(path):
"""
将path中的特殊字符进行替换,转成Windows兼容的路径
:param path: 原始路径
:return: 转换后的路径
"""
return re.sub(r"[\\/?*<>|\":]+", '-', path)
def get_file_name_from_url(url):
"""
从url中获取文件名,适用于带后缀的url
:param url: 带后缀的url
:return: 文件名
"""
file_name = os.path.basename(url)
return urllib.parse.unquote(file_name)
def covert_url_to_filename(url, with_domain=True, with_path=True):
"""
将url转化为文件名,一帮用于缓存文件生成
:param with_domain: 文件名加上域名
:param with_path: 文件名加上请求路径
:param url: url
:return: filename
"""
parse_result = urllib.parse.urlsplit(url)
file_name = ''
if with_domain:
file_name += parse_result.netloc
if with_path:
file_name += parse_result.path
file_name += parse_result.query
file_name = convert_windows_path(file_name)
return file_name
def get_abs_cache_path():
"""
获取cache文件夹的绝对路径,方便缓存文件
:return:
"""
return os.path.join(os.getcwd(), 'cache')
def ready_dir(file_path: str):
"""
准备相关文件夹,检查path所在文件夹是否存在,若不存在则创建
:param file_path: 文件路径,不能是文件夹路径
:return: None
"""
dir_path = os.path.dirname(file_path)
if not os.path.isdir(dir_path):
log.info('the file path is not exist. create: {}'.format(dir_path))
os.makedirs(dir_path)
def get_content_with_cache(url: str, cache_file: str = None, use_cache=True, encoding=None, **kwargs):
if use_cache:
# 没有指定缓存文件则从url中生成缓存文件
if cache_file is None:
cache_file = os.path.join(get_abs_cache_path(), covert_url_to_filename(url))
cache_file = cache_file + '.txt'
# 如果缓存文件存在,则直接返回缓存文件内容
if os.path.isfile(cache_file):
log.info('load content from cache: {}'.format(cache_file))
return read_content(cache_file)
html_content = get_content(url, encoding, **kwargs)
if html_content:
ready_dir(cache_file)
write_content(cache_file, html_content)
return html_content
def get_content(path, encoding=None, retry=0, **kwargs):
"""
从文件中或者url中读取内容
:param path: 文件路径或者url
:param encoding: 返回值编码
:param retry: 重试次数
:return: 文件内容或者url返回值
"""
if not path:
return False
# if path is file, read from file
if os.path.isfile(path):
log.info('read content from file: {}'.format(path))
fin = open(path, 'r', encoding='UTF-8')
html_content = fin.read()
fin.close()
return html_content
try:
log.info('begin get info from web url: ' + path)
# 合并公用头部
default_headers = {}
default_headers.update(COMMON_HEADERS)
if kwargs.get('headers') is not None:
default_headers.update(kwargs.get('headers'))
kwargs['headers'] = default_headers
response = requests.get(path, timeout=60, **kwargs)
if encoding is not None:
response.encoding = encoding
log.info('end get info from web url: ' + path)
if not (400 <= response.status_code < 500):
response.raise_for_status()
if response.text is None or response.text == '':
log.error('The response text is empty.')
return response.text
except Exception as e:
log.error('get url content error. url: {}, error: {}'.format(path, e))
if retry > 0:
# 重试
log.info('retry get content. left times: {}'.format(retry - 1))
return get_content(path, encoding, retry - 1, **kwargs)
log.info('get content failed. {}'.format(e))
return False
def get_json(url, params=None, headers=None, **kwargs) -> dict:
"""
request json from url
:param url: url
:param params: params
:param headers: headers
:return: json
"""
default_headers = {}
default_headers.update(COMMON_HEADERS)
if headers is not None:
default_headers.update(headers)
try:
response = requests.get(url, params=params, headers=default_headers, verify=False, **kwargs)
except Exception as e:
log.warn('request error and try again. {}'.format(e))
response = requests.get(url, params=params, headers=default_headers, verify=False, **kwargs)
return json.loads(response.text)
def read_content(file_path):
"""
read content from file, use UTF-8 encoding
:param file_path: target file path
:return: file content
"""
if not os.path.isfile(file_path):
log.warn('The file is not exist')
return None
log.info('read content from file: {}'.format(file_path))
fin = open(file_path, 'r', encoding='UTF-8')
content = fin.read()
fin.close()
return content
def read_file_as_list(file_path: str) -> list:
"""
按行读取文件,并返回list,每一个元素是每一行记录
:param file_path: 文件绝对地址
:return:
"""
if not os.path.isfile(file_path):
log.warn('The file is not exist. {}'.format(file_path))
return []
file_handle = open(file_path, 'r', encoding='utf-8')
line = file_handle.readline()
contents = set()
while line:
line = line.strip('\n')
contents.add(line)
line = file_handle.readline()
file_handle.close()
log.info('read file end. list size: {}'.format(len(contents)))
return list(contents)
def write_content(file_path, content) -> str:
"""
write content to file, use UTF-8 encoding and overwrite
:param file_path: target file path
:param content: write content
:return: file_path
"""
ready_dir(file_path)
fout = open(file_path, 'w', encoding='UTF-8')
fout.write(content)
fout.close()
return file_path
def download_file(url, filename, path=os.path.curdir, replace=False, with_progress=False, **kwargs):
"""
download file from url
:param url: image_url
:param path: save directory path
:param filename: image name
:param replace: replace the same name file.
:param with_progress: with progress when download file.
:return:
"""
if not filename:
filename = os.path.basename(url)
elif os.path.splitext(filename)[-1].find('.') < 0:
# 所给文件名不带后缀的话,添加上后缀
filename += os.path.splitext(url)[-1]
# 指定文件夹不存在则创建
filename = filename[:200] # windows文件名称不能超过255个字符
file_path = os.path.join(path, filename)
ready_dir(file_path)
# 如果文件已经下载并且不替换,则直接结束
if os.path.exists(file_path) and not replace:
log.info('The file is exist and not replace: {}'.format(file_path))
return True
# Write stream to file
log.info('begin download file from url: {}, save filename: {}'.format(url, filename))
try:
response = requests.get(url, stream=True, headers=COMMON_HEADERS, **kwargs)
if with_progress:
# 带进度打印日志,控制台可以使用 tqdm 包实现
with open(file_path, 'ab') as out_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
out_file.write(chunk)
log.info('download 1034 success.')
else:
with open(file_path, 'wb') as out_file:
out_file.write(response.content)
del response
except Exception as e:
log.error('download file failed. {}'.format(e))
return False
log.info('end download file. save file: {}'.format(file_path))
return True
def download_files_with_pool(urls: list, path, replace=False, **kwargs):
pool = ThreadPoolExecutor(10)
tasks = []
for url in urls:
filename = get_file_name_from_url(urls)
future = pool.submit(download_file, url, filename, path, replace=replace, **kwargs)
tasks.append(future)
wait(tasks, return_when=ALL_COMPLETED)
log.info('all file download task pool finished.')
def convert_image_format(image_path, delete=False):
"""
转换WEBP的图片格式到JPEG
:param image_path: 图片地址,最好是绝对路径
:param delete: 是否删除原来的图片
:return:
"""
if not os.path.isfile(image_path):
log.warn('The image is not exist. path: {}'.format(image_path))
return None
image = Image.open(image_path)
image_format = image.format
# 如果是webp格式转为jpeg格式
if image_format == 'WEBP':
image.save(image_path, 'JPEG')
image.close()
if delete:
os.remove(image_path)
def get_all_sub_files_with_cache(root_path, contain_dir=False, use_cache=True):
cache_file = os.path.join(get_abs_cache_path(), convert_windows_path(root_path))
if use_cache and os.path.isfile(cache_file):
log.info('load content from cache: {}'.format(cache_file))
return load_json_from_file(cache_file)
else:
ready_dir(cache_file)
sub_files = get_all_sub_files(root_path, contain_dir=contain_dir)
cache_json(sub_files, cache_file)
return sub_files
def get_all_sub_files(root_path, all_files=None, contain_dir=False):
"""
递归获取所有子文件列表
:param root_path: 递归根目录
:param all_files: 递归过程中的所有文件列表
:param contain_dir: 返回值是否包含目录
:return:
"""
if all_files is None:
all_files = []
# root_path 不是目录直接返回file_list
if not os.path.isdir(root_path):
return all_files
else:
log.info('begin through path: {}'.format(root_path))
# 获取该目录下所有的文件名称和目录名称
dir_or_files = os.listdir(root_path)
for dir_or_file in dir_or_files:
dir_or_file = os.path.join(root_path, dir_or_file) # 拼接得到完整路径
if os.path.isdir(dir_or_file):
# 如果是文件夹,则递归遍历
if contain_dir:
all_files.append(dir_or_file)
get_all_sub_files(dir_or_file, all_files, contain_dir)
else:
# 否则将当前文件加入到 all_files
all_files.append(os.path.abspath(dir_or_file))
return all_files
def cache_json(json_data, cache_file=None) -> str:
"""
缓存json数据
:param json_data: json data
:param cache_file: cache file, auto generate
:return: cache file path
"""
if not cache_file:
cache_file = get_abs_cache_path()
cache_file = os.path.join(cache_file, 'cache-' + time.strftime('%Y-%m-%d-%H-%M-%S',
time.localtime(time.time())) + '.json')
ready_dir(cache_file)
json.dump(json_data, open(cache_file, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
return cache_file
def dump_json_to_file(json_file, json_data):
"""
将json数据存入文件中
:param json_file:
:param json_data:
:return:
"""
ready_dir(json_file)
file_handle = open(json_file, 'w', encoding='utf-8')
json.dump(json_data, file_handle, ensure_ascii=False, indent=4)
file_handle.close()
def load_json_from_file(json_file) -> dict:
"""
从文件中加载json数据
:param json_file:
:return:
"""
file_handle = open(json_file, encoding='utf-8')
json_data = None
if os.path.isfile(json_file):
json_data = json.load(file_handle)
file_handle.close()
log.info('load json from file success. file: {}'.format(json_file))
return json_data
def extract_init_json_data(html_content: str, pattern: re.Pattern) -> dict:
"""
匹配html中的初始化json数据,一般适用于那种将初始化json返回的html页面,他们通过json构建dom,爬虫直接提取json
:param html_content: html内容
:param pattern: json提取正则表达式,注意将json作为第一个分组, 示例 r'__INITIAL_STATE__=(.+);'
:return: json字典
"""
# 返回结果通过js处理成document,只能正则匹配
search_content = re.search(pattern, html_content)
if search_content is None:
log.error('Can not match any data.')
return {}
init_json = search_content.group(1)
try:
json_data = json.loads(init_json)
return json_data
except json.decoder.JSONDecodeError:
log.error('can not parse json data: {}'.format(init_json))
return {}
def m_get(data: dict, key: str, default=None):
"""
用于获取多层级的字典元素
:param data: dict自动
:param key: key字符串
:param default: 默认值
"""
keys = key.split('.')
return rget(data, keys, default)
def rget(data, keys, default=None):
"""
递归获取dict数据
"""
key = keys.pop(0)
try:
elem = data[key]
except KeyError:
return default
except TypeError:
log.warn('The data is not dict: {}'.format(data))
return None
if not keys:
return elem
return rget(elem, keys, default)
def get_path(file_name: str, with_file_name=True) -> str:
"""
从完整文件名称获取文件夹
:param file_name: 文件名称(绝对文件路径)
:param with_file_name: 是否包含文件名
:return: 文件路径
"""
path, file = os.path.split(file_name)
filename, suffix = os.path.splitext(file)
return path + filename if with_file_name else ''
| StarcoderdataPython |
3289257 | <reponame>rszeto/click-here-cnn
import lmdb
import os
import sys
import math
import numpy as np
import argparse
from PIL import Image
from multiprocessing import Pool
import datetime
from google.protobuf import text_format
import scipy.ndimage
import skimage.transform
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
sys.path.append(g_pycaffe_path)
import caffe
from caffe.proto import caffe_pb2
'''
@brief:
get serialized datum of image-label pair, used solely for caffe
@input:
img_label - (img_filename, label)
@output:
serialized datum of resized,colored,channel-swapped,transposed image
'''
def imglabel2datum(img_label):
imname, label = img_label
im = Image.open(imname)
# ** resize **
im = im.resize((g_images_resize_dim, g_images_resize_dim), Image.ANTIALIAS)
# convert to array
im = np.array(im)
# convert gray to color
if len(np.shape(im)) == 2: # gray image
im = im[:,:,np.newaxis]
im = np.tile(im, [1,1,3])
# change RGB to BGR
im = im[:,:,::-1]
# change H*W*C to C*H*W
im = im.transpose((2,0,1))
datum = caffe.io.array_to_datum(im, label)
datum = datum.SerializeToString()
return datum
'''
@brief:
Image LMDB writing with parallal data serialization (which takes most time).
@input:
image_file - txt file each line of which is image filename
output_lmdb - lmdb pathname
@output:
generate image LMDB (label is just idx of image in the image_file)
labels should be separately prepared
note: lmdb key is idx number (e.g. 0000000021) of image in image_file
'''
def write_image_lmdb(image_file, output_lmdb):
img_filenames = [line.rstrip().split(' ')[0] for line in open(image_file, 'r')]
N = len(img_filenames)
p = Pool(20)
batch_N = 1000
in_db = lmdb.open(output_lmdb, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx in range(N):
if (in_idx % batch_N) == 0:
print('[%s]: %d/%d' % (datetime.datetime.now(), in_idx, N))
batch_ims = [(img_filenames[k+in_idx], k+in_idx) \
for k in range(min(batch_N, N-in_idx))]
batch_datums = p.map(imglabel2datum, batch_ims)
in_txn.put('{:0>10d}'.format(in_idx), batch_datums[in_idx % batch_N])
in_db.close()
'''
@brief:
Vector LMDB writing.
@input:
input_txt_file - txt file each line is vector values separated by space
output_lmdb - lmdb pathname
@output:
generate vector LMDB (can be used as labels)
note: lmdb key is idx number (e.g. 0000000021) of image in image_file
'''
def write_vector_lmdb(input_txt_file, output_lmdb):
lines = [line.rstrip() for line in open(input_txt_file, 'r')]
N = len(lines)
in_db = lmdb.open(output_lmdb, map_size=int(1e12))
report_N = 1000
with in_db.begin(write=True) as in_txn:
for in_idx in range(N):
if (in_idx%report_N) == 0:
print('[%s]: %d/%d' % (datetime.datetime.now(), in_idx, N))
ll = lines[in_idx].split(' ')
datum = np.array([float(x) for x in ll])
datum = np.reshape(datum, [len(datum),1,1])
datum = caffe.io.array_to_datum(datum, in_idx)
in_txn.put('{:0>10d}'.format(in_idx), datum.SerializeToString())
in_db.close()
'''
@brief:
Load vectors from LMDB, return the vectors as NxD numpy array
'''
def load_vector_from_lmdb(dbname, feat_dim, max_num=float('Inf')):
in_db = lmdb.open(dbname, map_size=int(1e12))
print dbname, in_db.stat()
N = min(in_db.stat()['entries'], max_num)
feats = np.zeros((N,int(feat_dim)))
with in_db.begin(write=False) as in_txn:
for k in range(N):
print k
keyname = '%010d' % k
a = in_txn.get(keyname)
datum = caffe_pb2.Datum()
datum.ParseFromString(a)
array = caffe.io.datum_to_array(datum)
#print array, np.shape(array)
array = np.squeeze(array)
assert(array is not None)
feats[k,:] = array
in_db.close()
return feats
'''
@brief:
batch caffe net forwarding.
@inpiut:
model_deploy_file: caffe prototxt deploy file (new version using layer instead of layers)
model_params_file: .caffemodel file
BATCH_SIZE: depending on your GPU memory, can be as large as you want.
result_keys: names of features (1D vector) you want to extract
img_files: filenames of images to be tested
mean_file: used for substraction in preprocessing of the image
resize_dim (D): resize image to DxD
@output:
return features in a list [<features1>, <features2>,...] of len(result_keys)
<features1> is a list of [<nparray-feature1-of-image1>, <nparray-of-feature1-of-image2>,...] of len(img_files)
'''
def batch_predict(model_deploy_file, model_params_file, BATCH_SIZE, result_keys, img_files, mean_file, resize_dim = 0):
# set imagenet_mean
if mean_file is None:
imagenet_mean = np.array([104,117,123])
else:
imagenet_mean = np.load(mean_file)
net_parameter = caffe_pb2.NetParameter()
text_format.Merge(open(model_deploy_file, 'r').read(), net_parameter)
print net_parameter
print net_parameter.input_dim, imagenet_mean.shape
ratio = resize_dim*1.0/imagenet_mean.shape[1]
imagenet_mean = scipy.ndimage.zoom(imagenet_mean, (1, ratio, ratio))
# INIT NETWORK - NEW CAFFE VERSION
net = caffe.Net(model_deploy_file, model_params_file, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width
transformer.set_mean('data', imagenet_mean) #### subtract mean ####
transformer.set_raw_scale('data', 255) # pixel value range
transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
# set test batch size
data_blob_shape = net.blobs['data'].data.shape
data_blob_shape = list(data_blob_shape)
net.blobs['data'].reshape(BATCH_SIZE, data_blob_shape[1], data_blob_shape[2], data_blob_shape[3])
## BATCH PREDICTS
batch_num = int(math.ceil(len(img_files)/float(BATCH_SIZE)))
probs_lists = [[] for _ in range(len(result_keys))]
for k in range(batch_num):
start_idx = BATCH_SIZE * k
end_idx = min(BATCH_SIZE * (k+1), len(img_files))
print 'batch: %d/%d, idx: %d to %d' % (k, batch_num, start_idx, end_idx)
# prepare batch input data
input_data = []
for j in range(start_idx, end_idx):
im = caffe.io.load_image(img_files[j])
if resize_dim > 0: im = skimage.transform.resize(im, (resize_dim, resize_dim))
input_data.append(im)
for j in range(BATCH_SIZE - len(input_data)):
input_data.append(im)
inputs = input_data
# foward pass!
net.blobs['data'].data[...] = map(lambda x: transformer.preprocess('data', x), input_data)
out = net.forward()
for i,key in enumerate(result_keys):
probs = out[result_keys[i]]
for j in range(end_idx-start_idx):
probs_lists[i].append(np.array(np.squeeze(probs[j,:])))
return probs_lists
| StarcoderdataPython |
1752338 |
import numpy as np
#import scipy.stats
import random
import sys
import tables
from scipy import sparse
def zscore(mat, return_unzvals=False):
"""Z-scores the rows of [mat] by subtracting off the mean and dividing
by the standard deviation.
If [return_unzvals] is True, a matrix will be returned that can be used
to return the z-scored values to their original state.
"""
zmat = np.empty(mat.shape, mat.dtype)
unzvals = np.zeros((zmat.shape[0], 2), mat.dtype)
for ri in range(mat.shape[0]):
unzvals[ri,0] = np.std(mat[ri,:])
unzvals[ri,1] = np.mean(mat[ri,:])
zmat[ri,:] = (mat[ri,:]-unzvals[ri,1]) / (1e-10+unzvals[ri,0])
if return_unzvals:
return zmat, unzvals
return zmat
def center(mat, return_uncvals=False):
"""Centers the rows of [mat] by subtracting off the mean, but doesn't
divide by the SD.
Can be undone like zscore.
"""
cmat = np.empty(mat.shape)
uncvals = np.ones((mat.shape[0], 2))
for ri in range(mat.shape[0]):
uncvals[ri,1] = np.mean(mat[ri,:])
cmat[ri,:] = mat[ri,:]-uncvals[ri,1]
if return_uncvals:
return cmat, uncvals
return cmat
def unzscore(mat, unzvals):
"""Un-Z-scores the rows of [mat] by multiplying by unzvals[:,0] (the standard deviations)
and then adding unzvals[:,1] (the row means).
"""
unzmat = np.empty(mat.shape)
for ri in range(mat.shape[0]):
unzmat[ri,:] = mat[ri,:]*(1e-10+unzvals[ri,0])+unzvals[ri,1]
return unzmat
def ridge(A, b, alpha):
"""Performs ridge regression, estimating x in Ax=b with a regularization
parameter of alpha.
With $G=\alpha I(m_A)$, this function returns $W$ with:
$W=(A^TA+G^TG)^{-1}A^Tb^T$
Tantamount to minimizing $||Ax-b||+||\alpha I||$.
"""
G = np.matrix(np.identity(A.shape[1]) * alpha)
return np.dot(np.dot(np.linalg.inv(np.dot(A.T,A) + np.dot(G.T,G)), A.T), b.T)
def model_voxels(Rstim, Pstim, Rresp, Presp, alpha):
"""Use ridge regression with regularization parameter [alpha] to model [Rresp]
using [Rstim]. Correlation coefficients on the test set ([Presp] and [Pstim])
will be returned for each voxel, as well as the linear weights.
"""
print("Z-scoring stimuli (with a flip)... (or not)")
#zRstim = zscore(Rstim.T).T
#zPstim = zscore(Pstim.T).T
Rresp[np.isnan(Rresp)] = 0.0
Presp[np.isnan(Presp)] = 0.0
print("Running ridge regression...")
rwts = ridge(Rstim, Rresp.T, alpha)
print("Finding correlations...")
pred = np.dot(Pstim, rwts)
prednorms = np.apply_along_axis(np.linalg.norm, 0, pred)
respnorms = np.apply_along_axis(np.linalg.norm, 0, Presp)
correlations = np.array(np.sum(np.multiply(Presp, pred), 0)).squeeze()/(prednorms*respnorms)
print("Max correlation: %0.3f" % np.max(correlations))
print("Skewness: %0.3f" % scipy.stats.skew(correlations))
return np.array(correlations), rwts
def model_voxels_old(Rstim, Pstim, Rresp, Presp, alpha):
"""Use ridge regression with regularization parameter [alpha] to model [Rresp]
using [Rstim]. Correlation coefficients on the test set ([Presp] and [Pstim])
will be returned for each voxel, as well as the linear weights.
"""
print("Z-scoring stimuli (with a flip)...")
#zRstim = zscore(Rstim.T).T
#zPstim = zscore(Pstim.T).T
Rresp[np.isnan(Rresp)] = 0.0
Presp[np.isnan(Presp)] = 0.0
print("Running ridge regression...")
rwts = ridge(Rstim, Rresp.T, alpha)
print("Finding correlations...")
correlations = []
for vi in range(Presp.shape[1]):
rcorr = np.corrcoef(Presp[:,vi].T,np.array((np.matrix(Pstim) * np.matrix(rwts[:,vi]))).T)[0,1]
correlations.append(rcorr)
print("Max correlation: %0.3f" % np.max(correlations))
print("Skewness: %0.3f" % scipy.stats.skew(correlations))
return np.array(correlations), rwts
def gaussianize(vec):
"""Uses a look-up table to force the values in [vec] to be gaussian."""
ranks = np.argsort(np.argsort(vec))
cranks = (ranks+1).astype(float)/(ranks.max()+2)
vals = scipy.stats.norm.isf(1-cranks)
zvals = vals/vals.std()
return zvals
def gaussianize_mat(mat):
"""Gaussianizes each column of [mat]."""
gmat = np.empty(mat.shape)
for ri in range(mat.shape[1]):
gmat[:,ri] = gaussianize(mat[:,ri])
return gmat
def make_delayed(stim, delays, circpad=False):
"""Creates non-interpolated concatenated delayed versions of [stim] with the given [delays]
(in samples).
If [circpad], instead of being padded with zeros, [stim] will be circularly shifted.
"""
nt,ndim = stim.shape
dstims = []
for di,d in enumerate(delays):
dstim = np.zeros((nt, ndim))
if d<0: ## negative delay
dstim[:d,:] = stim[-d:,:]
if circpad:
dstim[d:,:] = stim[:-d,:]
elif d>0:
dstim[d:,:] = stim[:-d,:]
if circpad:
dstim[:d,:] = stim[-d:,:]
else: ## d==0
dstim = stim.copy()
dstims.append(dstim)
return np.hstack(dstims)
def sp_make_delayed(stim, delays, circpad=False):
"""Creates non-interpolated concatenated delayed versions of [stim] with the given [delays]
(in samples). Works with sparse matrices.
If [circpad], instead of being padded with zeros, [stim] will be circularly shifted.
"""
nt,ndim = stim.shape
dstims = []
T,N = stim.shape
for di,d in enumerate(delays):
dstim = stim.copy()[:-d]
dstim._shape = (T,N)
dstim.indices += d
dstims.append(dstim)
return sparse.hstack(dstims)
def mult_diag(d, mtx, left=True):
"""Multiply a full matrix by a diagonal matrix.
This function should always be faster than dot.
Input:
d -- 1D (N,) array (contains the diagonal elements)
mtx -- 2D (N,N) array
Output:
mult_diag(d, mts, left=True) == dot(diag(d), mtx)
mult_diag(d, mts, left=False) == dot(mtx, diag(d))
By <NAME>
From http://mail.scipy.org/pipermail/numpy-discussion/2007-March/026807.html
"""
if left:
return (d*mtx.T).T
else:
return d*mtx
import time
import logging
def counter(iterable, countevery=100, total=None, logger=logging.getLogger("counter")):
"""Logs a status and timing update to [logger] every [countevery] draws from [iterable].
If [total] is given, log messages will include the estimated time remaining.
"""
start_time = time.time()
## Check if the iterable has a __len__ function, use it if no total length is supplied
if total is None:
if hasattr(iterable, "__len__"):
total = len(iterable)
for count, thing in enumerate(iterable):
yield thing
if not count%countevery:
current_time = time.time()
rate = float(count+1)/(current_time-start_time)
if rate>1: ## more than 1 item/second
ratestr = "%0.2f items/second"%rate
else: ## less than 1 item/second
ratestr = "%0.2f seconds/item"%(rate**-1)
if total is not None:
remitems = total-(count+1)
remtime = remitems/rate
timestr = ", %s remaining" % time.strftime('%H:%M:%S', time.gmtime(remtime))
itemstr = "%d/%d"%(count+1, total)
else:
timestr = ""
itemstr = "%d"%(count+1)
formatted_str = "%s items complete (%s%s)"%(itemstr,ratestr,timestr)
if logger is None:
print(formatted_str)
else:
logger.info(formatted_str)
def save_table_file(filename, filedict):
"""Saves the variables in [filedict] in a hdf5 table file at [filename].
"""
hf = tables.openFile(filename, mode="w", title="save_file")
for vname, var in filedict.items():
hf.createArray("/", vname, var)
hf.close()
| StarcoderdataPython |
1683918 | <filename>src/server/endpoints/quidel.py
from flask import Blueprint
from .._config import AUTH
from .._query import execute_query, QueryBuilder
from .._validate import check_auth_token, extract_integers, extract_strings, require_all
# first argument is the endpoint name
bp = Blueprint("quidel", __name__)
alias = None
@bp.route("/", methods=("GET", "POST"))
def handle():
check_auth_token(AUTH["quidel"])
require_all("locations", "epiweeks")
locations = extract_strings("locations")
epiweeks = extract_integers("epiweeks")
# build query
q = QueryBuilder("quidel", "q")
fields_string = ["location"]
fields_int = ["epiweek"]
fields_float = ["value"]
q.set_fields(fields_string, fields_int, fields_float)
q.set_order(epiweek=True, location=True)
# build the filter
q.where_strings("location", locations)
q.where_integers("epiweek", epiweeks)
# send query
return execute_query(str(q), q.params, fields_string, fields_int, fields_float)
| StarcoderdataPython |
140072 | <filename>ooobuild/lo/form/tabulator_cycle.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Enum Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.form
# Libre Office Version: 7.3
from enum import Enum
class TabulatorCycle(Enum):
"""
Enum Class
See Also:
`API TabulatorCycle <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1form.html#acb5251eb1c7e6ff2149158596346de94>`_
"""
__ooo_ns__: str = 'com.sun.star.form'
__ooo_full_ns__: str = 'com.sun.star.form.TabulatorCycle'
__ooo_type_name__: str = 'enum'
CURRENT = 'CURRENT'
"""
a navigation bar is provided and navigation will be performed on the current/active form.
pressing the TAB key from the last control moves the focus to the first control in the tab order of the same record.
This is the default and most often encountered mode.
"""
PAGE = 'PAGE'
"""
pressing the TAB key from the last control of a form moves the focus to the first control of the next form in the tab order.
"""
RECORDS = 'RECORDS'
"""
pressing the TAB key from the last control moves the focus to the first control in the tab order of the next record.
"""
__all__ = ['TabulatorCycle']
| StarcoderdataPython |
1628206 | <gh_stars>0
#########################################################################################
# Copyright (c) <2012> #
# Author: <NAME> (<EMAIL>) #
# International Institute of Information Technology, Hyderabad, India. #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# * Neither the name of the <organization> nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################################
# About the package #
# Following package contains functions useful for processing speech signal. #
# Some of the important operations that can be done are Zero Frequency Filtering, #
# Speech Segmentation, FIR Filters, Linear Prediction, etc. #
# #
# All the functions use extensive functionality of numpy and scipy packages. Some #
# of the functions like FIR filters do not provide as much flexibility as compared #
# to scipy.signal package, but are provided for easy usage. Internally they use #
# filter response functions provided scipy. #
#########################################################################################
#!/usr/bin/python
import wave,struct,numpy,numpy.fft
import scipy.signal,matplotlib.pyplot
def wavread(wfname,stime=0,etime=0):
'''
Returns the contents of the wave file and its sampling frequency.
Input to the function is the path to the wave file
wfname is the input wave file path.
The input for stime and etime should be seconds.
wavread also reads a segment of the wave file.
stime and etime determines for what sections in the wave file
should the content be read. By default they are set to zero.
When both stime and etime are zero the function reads the whole
wave file.
'''
wavfp = wave.open(wfname,'r')
fs = wavfp.getframerate() #to obtain the sampling frequency
sig = []
wavnframes = wavfp.getnframes()
sframe = int(stime * fs) #input is in seconds and so they are to be convereted
wavfp.setpos(sframe) #to frame count
eframe = int(etime * fs)
if eframe == 0:
eframe = wavnframes
for i in range(sframe,eframe): #In wave format 2 bytes are used to represent one value
hexv = wavfp.readframes(1) #readframes is an iterative function which reads 2 bytes
sig.append(float(struct.unpack('h',hexv)[0]))
wavfp.close()
return numpy.array(sig,dtype='float'),fs
def wavwrite(sig,fs,wfname):
'''
Following function is used to create a wave file provided
the signal with its sampling frequency. It creates a
standard wave audio, PCM, 16 bit, mono with fs Hertz.
It takes two types of formats
1. If the amplitude ranges from -1 to 1 it scales the
amplitudes of the wave files
2. Else it converts the floats to integers and then writes
the wave file
Suggested normalization would be as follows:
wav = wav/(0.0001 + max(wav))
'''
if max(sig) <= 1 and min(sig) >= -1:
print '[Warning]: Scaling signal magnitudes'
sampwidth = 2 #nunber of bytes required to store an integer value
max_amplitude = float(int((2 ** (sampwidth * 8)) / 2) - 1)
sig = sig * max_amplitude
sig = numpy.array(sig,dtype='int')
wavfp = wave.open(wfname,'w')
wavfp.setparams((1,2,fs,sig.size,'NONE','not compressed')) #setting the params for the wave file
for i in sig: #params: nchannels, sampwidth(bytes), framerate, nframes, comptype, compname
hvalue = struct.pack('h',i) #only accepts 2 bytes => hex value
wavfp.writeframes(hvalue)
wavfp.close()
class Cepstrum:
"""
Following class consists of functions to generate the
cepstral coefficients given a speech signal.
Following are the functions it supports
1. Cepstral Coefficients
2. LP Cepstral Coefficients
"""
def __init__(self):
self.lpco = LPC()
def cep(self,sig,corder,L=0):
"""
Following function require a windowed signal along
with the cepstral order.
Following is the process to calculate the cepstral
coefficients
x[n] <--> X[k] Fourier Transform of the
signal
c = ifft(log(|X[k]|)
where c is the cepstral coefficients
For the liftering process two procedures were implemented
1. Sinusoidal
c[m] = (1+(L/2)sin(pi*n/L))c[m]
2. Linear Weighting
By default it gives linear weighted cepstral coefficients.
To obtain raw cepstral coefficients input L=None.
For any other value of L it performs sinusoidal liftering.
Note: The input signal is a windowed signal,
typically hamming or hanning
"""
c = numpy.fft.ifft(numpy.log(numpy.abs(numpy.fft.fft(sig))))
c = numpy.real(c)
if len(c) < corder:
print '[Warning]: Lenght of the windowed signal < cepstral order'
print '[Warning]: cepstral order set to length of the windowed signal'
corder = len(sig)
if L == None: #returning raw cepstral coefficients
return c[:corder]
elif L == 0: #returning linear weighted cepstral coefficients
for i in range(0,corder):
c[i] = c[i] * (i+1)
return c[:corder]
#cep liftering process as given in HTK
for i in range(0,corder):
c[i] = (1 + (float(L)/2) * numpy.sin((numpy.pi * (i+1))/L)) * c[i]
return c[:corder]
def cepfeats(self,sig,fs,wlent,wsht,corder,L=None):
"""
Following function is used to generate the cepstral coefficients
given a speech signal. Following are the input parameters
1. sig: input signal
2. fs: sampling frequency
3. wlent: window length
4. wsht: window shift
5. corder: cepstral order
"""
wlenf = (wlent * fs)/1000
wshf = (wsht * fs)/1000
sig = numpy.append(sig[0],numpy.diff(sig))
sig = sig + 0.001
ccs = []
noFrames = int((len(sig) - wlenf)/wshf) + 1
for i in range(0,noFrames):
index = i * wshf
window_signal = sig[index:index+wlenf]
smooth_signal = window_signal * numpy.hamming(len(window_signal))
c = self.cep(smooth_signal,corder,L)
ccs.append(c)
return numpy.array(ccs)
def lpccfeats(self,sig,fs,wlent,wsht,lporder,corder,L=None):
'''
Following fucntion in turn calls the lpccfeats from
the LPC class to obtain the cepstral coefficients.
Following are the input parameters
sig: input speech signal
fs: its sampling frequency
wlent: window length in milli seconds
wsht: window shift in milli seconds.
lporder: self explanatory
corder: no. of cepstral coefficients
(read the lpcc documentation for the description about
the features)
L(ceplifter) to lift the cepstral coefficients. Read the
documentation for the LPCC for the liftering process
Function returns only the cepstral coefficients
'''
Gs,nEs,lpcs,lpccs = self.lpco.lpccfeats(sig,fs,wlent,wsht,lporder,corder,L)
return lpccs
class FIRFilters:
'''Class consists of building the following FIR filters
a. Low Pass Filter
b. High Pass Filter
c. Band Pass Filter
d. Band Reject Filter
In general the filter is denoted as follows
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[2]e + .... + a[n]e
Where the roots of the numerator is denoted as zeros
and that of the denominator as poles. In FIR filters
are represented only via the zeros. Hence we only
compute the coefficients "b" as shown in the above
equation.
Class also consists of funtions to plot filters to view
the frequency response
'''
def __init__(self):
pass
def low_pass(self,M,cfreq,wtype='blackmanharris'):
"""
Following are the required parameters by the low pass filter
1. M determines the number of filter taps. M should always be
even
2. cfreq is the cutoff frequency. Make sure that the
cfreq lies between 0 and 1 with 1 being Nyquist
frequency.
3. wtype is the type of window to be provided. Supporting
window types are
a. blackmanharris
b. hamming
c. hanning
"""
lb = scipy.signal.firwin(M,cutoff=cfreq,window=wtype)
return lb
def high_pass(self,M,cfreq,wtype='blackmanharris'):
"""
Following are the required parameters by the high pass filter
1. M determines the number of filter taps. M should always be
even
2. cfreq is the cutoff frequency. Make sure that the
cfreq lies between 0 and 1 with 1 being Nyquist
frequency.
3. win_type is the type of window to be provided. Supporting
window types are
a. blackmanharris
b. hamming
c. hanning
The high pass filter is obtained by first obtaining the impulse
response of a low pass filter. A more detail explanation is given
Scientists and Engineers Guide to Digital Signal Processing,
chapter 14-16.
"""
lb = self.low_pass(M,cfreq,wtype) #to obtain the impulse response using the low pass filter
#and then reversing
hb = -1 * lb
hb[M/2] = 1 + hb[M/2]
return hb
def band_reject(self,M,cfreqb,cfreqe,wtype='blackmanharris'):
"""
Following are the required parameters by the high pass filter
1. M determines the number of filter taps. M should always be
even
2. cfreqb and cfreqe are the frequency ranges that are to be suppressed.
Make sure that the cfreqb and cfreqe lies between 0 and 1 with 1
being Nyquist frequency.
3. wtype is the type of window to be provided. Supporting
window types are
a. blackmanharris
b. hamming
c. hanning
The band reject filter is obtained by first obtaining by combining the
low pass filter and the high pass filter responses. A more detail explanation
is given Scientists and Engineers Guide to Digital Signal Processing,
chapter 14-16.
"""
lb = self.low_pass(M,cfreqb,wtype) #coefficients from the low pass filter
hb = self.high_pass(M,cfreqe,wtype) #coefficients from the high pass filter
brb = lb + hb
return brb
def band_pass(self,M,cfreqb,cfreqe,wtype='blackmanharris'):
"""
Following are the required parameters by the high pass filter
1. M determines the number of filter taps. M should always be
even
2. cfreqb and cfreqe are the frequency ranges that are to be captured.
Make sure that the cfreqb and cfreqe lies between 0 and 1, with 1
being Nyquist frequency.
3. wtype is the type of window to be provided. Supporting
window types are
a. blackmanharris
b. hamming
c. hanning
The band pass filter is obtained by using the band reject filter. A more
detail explanation is given Scientists and Engineers Guide to Digital
Signal Processing, chapter 14-16.
"""
brb = self.band_reject(M,cfreqb,cfreqe,wtype)
bpb = -1 * brb
bpb[M/2] = 1 + bpb[M/2]
return bpb
def fsignal(self,sig,b):
"""
Following function outputs the filtered signal
using the FIR filter coefficients b.
"""
fsig = scipy.signal.lfilter(b,[1],sig)
M = len(b) #fir filters has a delay of (M-1)/2
fsig[0:(M-1)/2] = 0 #setting the delay values to zero
return fsig
def plotResponse(self,b):
"""
Following function plots the amplitude and phase response
given the impulse response of the FIR filter. The impulse
response of the FIR filter is nothing but the numerator (b)
of the transfer function.
"""
w,h = scipy.signal.freqz(b)
h_db = 20.0 * numpy.log10(numpy.abs(h))
ph_angle = numpy.unwrap(numpy.angle(h))
fig = matplotlib.pyplot.figure()
subp1 = fig.add_subplot(3,1,1)
subp1.text(0.05,0.95,'Frequency Response',transform=subp1.transAxes,fontsize=16,fontweight='bold',va='top')
subp1.plot(w/max(w),numpy.abs(h))
subp1.set_ylabel('Magnitude')
subp2 = fig.add_subplot(3,1,2)
subp2.text(0.05,0.95,'Frequency Response',transform=subp2.transAxes,fontsize=16,fontweight='bold',va='top')
subp2.plot(w/max(w),h_db)
subp2.set_ylabel('Magnitude (DB)')
subp2.set_ylim(-150, 5)
subp3 = fig.add_subplot(3,1,3)
subp3.text(0.05,0.95,'Phase',transform=subp3.transAxes,fontsize=16,fontweight='bold',va='top')
subp3.plot(w/max(w),ph_angle)
subp3.set_xlabel('Normalized Frequency')
subp3.set_ylabel('Angle (radians)')
fig.show()
class SignalMath:
'''
Contains quite commonly used mathematical operations
needed for signal processing techniques.
'''
def __init__(self):
pass
def movavg(self,sig,fs,wlent):
'''
The following function is a mathematical representation for
a moving average. The input to the function is the signal and
the window length in milli seconds.
Following is the mathematical equation for the moving average.
y[n] = 1/(2*N+1)sum(sig[i+n]) for i = -N to +N
y[n], sig[n] are both discrete time signals
sig is the signal and wlent is the window length in milli seconds
'''
wlenf = (wlent * fs)/1000
window = numpy.array([1] * wlenf)
avg = numpy.convolve(sig,window,mode='full')
avg = avg[(window.size/2) - 1:avg.size - (window.size/2)]
norm = numpy.convolve(window,numpy.array([1] * avg.size),mode='full')
norm = norm[(window.size/2) - 1:norm.size - (window.size/2)]
return numpy.divide(avg,norm)
class ZeroFreqFilter:
'''
Containg functions required to obtain the Zero Frequncy Filtered Signal.
Following is the procedure to obtain the Zero Frequency Filtered Signal:
Let s[n] be the input signal then
1. Bias Removal:
x[n] = s[n] - s[n-1]
Used to remove any dc or low frequency bias that might have been
captured during recording.
2. Zero Frequency Filtering:
Pass the signal twice through a zero frequency resonator. That is
containes two poles on the unit circle with zero frequency.
y1[n] = -1 * SUMMATION( a(k) * y[n-k] ) + x[n] where k = 1,2
k
y2[n] = -1 * SUMMATION( a(k) * y2[n-k] ) + y1[n] where k = 1,2
k
where a(1) = -2 and a(2) = 1
The above two operations can be obtained by finding the cumulative
sum of the signal x[n] four times.
3. Trend Removal:
y[n] = y2[n] - (moving average of y2[n] of windlow lenght n)
The moving average function is clrealy mentioned in the Math class.
Window length n is important. The choice of the window length is not
very critical as long as it is in the range of about 1 to 2 times
the average pitch period.
'''
def __init__(self):
self.sm = SignalMath()
def getZFFSignal(self,sig,fs,wlent=30,wsht=20,mint=3):
'''
Following function returns the Zero Frequency Filtered Signal.
Following are the steps involved in generating the Zero
Frequency Filtered Signal:
1. Bias Removal
2. Zero Frequency Resonator
3. Average Pitch Estimation
4. Trend Removal
sig is the samples in the wave file.
fs is the sampling frequency in Hz.
wlent is the window length in milli-seconds.
wsht is the window shift in milli-seconds.
mint is the minimum pitch value in milli-seconds.
'''
sig = sig - (sum(sig)/sig.size)
dsig = numpy.diff(sig) #bias removal
dsig = numpy.append(dsig,dsig[-1])
dsig = numpy.divide(dsig,max(abs(dsig))) #normalization
csig = numpy.cumsum(numpy.cumsum(numpy.cumsum(numpy.cumsum(dsig)))) #zero frequency resonator
wlenpt = self.__avgpitch(dsig,fs,wlent,wsht,mint) #estimating the average pitch
wlenpf = int((wlenpt * fs)/1000) #converting pitch in milli seconds to pitch in number of samples
tr = numpy.subtract(csig,self.sm.movavg(csig,fs,wlenpt)) #trend removal
tr = numpy.subtract(tr,self.sm.movavg(tr,fs,wlenpt)) #trend removal
tr = numpy.subtract(tr,self.sm.movavg(tr,fs,wlenpt)) #trend removal
tr = numpy.subtract(tr,self.sm.movavg(tr,fs,wlenpt)) #trend removal
for i in range(dsig.size - (wlenpf*3) - 1,dsig.size): # To remove the trailing samples. Without removing the trailing samples
tr[i] = 0 # we cannot view the ZFF signal as they have huge values
tr = numpy.divide(tr,max(abs(tr))) # normalizing
return tr
def __avgpitch(self,sig,fs,wlent,wsht,mint=3):
'''
Gets the average pitch from the input signal. The window length
is for the remove trend function. Window length anything between
pitch and twice the pitch should be adequate.
sig is the samples in the wave file.
fs is the sampling frequency in Hz.
wlent is the window length in milli-seconds.
wsht is the window shift in milli-seconds.
mint is the minimum pitch value in milli-seconds.
'''
wlenf = (wlent * fs)/1000
wshtf = (wsht * fs)/1000
nof = (sig.size - wlenf)/wshtf
pitch = []
for i in range(0,nof):#block processing for obtaining pitch from each of the window
sigblock = sig[i*wshtf:i*wshtf + wlenf]
pitch.append(self.__getpitch(sigblock,fs,mint))
pitch = numpy.array(pitch)
pbins = numpy.arange(3,20,2)#min pitch is 3 msec and maximum is 20 msec. The bin rages from 3-5,5-7,..
phist = numpy.histogram(pitch,bins=pbins)[0]#plotting histogram for each of the pitch values
prange_index = 0 #to see the most commonly occuring pitch value
prange_count = 0
for i in range(0,phist.size):
if phist[i] > prange_count:
prange_count = phist[i]
prange_index = i
avgpitch = (pbins[prange_index] + pbins[prange_index+1])/2#finding the average pitch value
return avgpitch
def __getpitch(self,sig,fs,mint=3):
'''
To find the pitch given a speech signal of some window.
sig is the samples in the wave file.
fs is the sampling frequency in Hz.
mint is the minimum pitch value in milli-seconds.
'''
minf = (mint * fs)/1000 #to convert into number of frames/samples
cor = numpy.correlate(sig,sig,mode='full')
cor = cor[cor.size/2:]#auto correlation is symmetric about the y axis.
cor = cor/max(abs(cor))#normalizing the auto correlation values
dcor = numpy.diff(cor)#finding diff
dcor[0:minf] = 0#setting values of the frames below mint to be zero
locmax = numpy.array([1] * dcor.size) * (dcor > 0)
locmin = numpy.array([1] * dcor.size) * (dcor <= 0)
locpeaks = numpy.array([2] * (dcor.size - 1)) * (locmax[0:locmax.size - 1] + locmin[1:locmin.size] == 2)#to get the positive peaks
maxi,maxv = self.__getmax(cor,locpeaks)
return (maxi * 1000.0)/fs
def __getmax(self,src,peaks):
'''
To get peak which has the maximum value.
'''
maxi = 0
maxv = 0
for i in range(0,peaks.size): #diff values will be one sample less than the original signal
if src[i] > maxv and peaks[i] == 2: #consider only the diff values in the for loop
maxi = i
maxv = src[i]
return maxi,maxv
class SegmentSpeech:
'''
Following class provides functions, which are useful segment speech.
All the functions utilize the Zero Frequency Filtered signal to
segment the speech.
Functions of format segmentxc represents a function to segment
the speech into x categories.
'''
def __init__(self):
self.sm = SignalMath()
def vnv(self,zfs,fs,theta=2.0,wlent=30):
'''
To obtain the voiced regions in the speech segment.
Following are the input parameters
1. zfs is the Zero Frequency Filtered Signal
2. fs is the sampling rate
3. wlent is the window length required for the moving average.
'''
zfse = 1.0 * zfs * zfs #squaring each of the samples: to find the ZFS energy.
zfse_movavg = numpy.sqrt(self.sm.movavg(zfse,fs,wlent)) #averaging across wlent window
zfse_movavg = zfse_movavg/max(zfse_movavg) #normalzing
avg_energy = sum(zfse_movavg)/zfse_movavg.size #average energy across all the window.
voicereg = zfse_movavg * (zfse_movavg >= avg_energy/theta) #selecting segments whose energy is higher than the average.
return voicereg
def __zerocross(self,zfs):
'''
To obtain the postive zero crossing from the
Zero Frequency Filtered Signal.
'''
zc = numpy.array([1]) * (zfs >= 0)
dzc = numpy.diff(zc)
zcst = numpy.diff(zfs) * (dzc > 0)
return numpy.append(zcst,0)
def getGCI(self,zfs,voicereg):
'''
To obtain the Glottal Closure Instants and the strength of
excitations. We obtain the Strength of excitation by taking
the derivative at postive zero crossing from the ZFS.
Voiced regions are used to remove spurious GCI.
'''
gci = self.__zerocross(zfs) * (voicereg > 0)#only considering the GCI in regions where it was
return gci #detected as voiced regions by the vnv function
def segment2c(self,gci,fs,sildur=300):
'''
The following function returns a two category segmentation of
speech, speech and silence region
Following are the input parametrs
1. gci is the glottal closure instants.
2. sildur is the minimum duration without gci to
classify the segment as sil
'''
stime = 0#starting time
etime = 0#end time
wtime = (gci.size * 1000.0)/fs#total time of the wave file
wlab = []#array containing the lab information for the wave file
i = 0
vflag = False#flag to keep track of voiced sounds
while i < gci.size:
if gci[i] > 0:
etime = (i * 1000.0)/fs
if (etime - stime) >= sildur: #to check whether its a silence region or not
if stime == 0 and etime == wtime:
stime = stime
etime = etime
elif stime == 0:
stime = stime
etime = etime - 50
elif etime == wtime:
stime = stime + 50
etime = etime
else:
stime = stime + 50
etime = etime - 50
wlab.append((stime,etime,'SIL')) #to make sure that the end unvoiced sounds are
stime = etime #not classified as silence
else:
stime = etime
i += 1
#fixing the trailing silence. Because the trailing silence might not end with an epoch.
if etime != wtime:
if (wtime - etime) >= sildur:
wlab.append((etime+50,wtime,'SIL'))
#some times there might not be any silence in the wave file
if len(wlab) == 0:
wlab.append((0,wtime,'SPH'))
#fixing the missing time stamps
#the above loop only puts time stamps for the silence regions
cwlab = []
for i in range(0,len(wlab)):
tlab = wlab[i]
stime = tlab[0]
etime = tlab[1]
if len(cwlab) == 0:
if stime == 0:
cwlab.append(tlab[:])
else:
cwlab.append((0,stime,'SPH'))
cwlab.append(tlab[:])
else:
if cwlab[-1][1] == stime:
cwlab.append(tlab[:])
else:
cwlab.append((cwlab[-1][1],stime,'SPH'))
cwlab.append(tlab[:])
if wlab[-1][1] != wtime:
cwlab.append((wlab[-1][1],wtime,'SPH'))
return cwlab
def segmentvnvc(self,gci,fs,uvdur=18):
'''
The following function returns a three category segmentation of
speech, namely, voiced(VOI), unvoiced(UNV).
Following are the input parametrs
1. gci is the glottal closure instants.
2. fs is the sampling rate.
3. uvdur is the maximum duration between any two GCI
to classify them at VOI or else they would be
classified as UNV.
'''
stime = 0#start time
etime = 0#end time
ptime = stime#to keep track of previous time
wtime = (gci.size * 1000.0)/fs#total time of the wave file
wlab = []#array containing the time stamps of the wave file
i = 0
vflag = False#to keey track of voiced regions
while i < gci.size:
if gci[i] > 0:
etime = (i * 1000.0)/fs
if (etime - ptime) < uvdur:#to check whether VOICED or not
ptime = etime #if its more than uvdur then it is UNVOICED
vflag = True
else: #to tag it as UNVOICED
if vflag:
wlab.append((stime,ptime,'VOI'))
vflag = False
wlab.append((ptime,etime,'UNV'))
stime = etime
ptime = stime
i += 1
#fixing the trailing tags
if etime != wtime:
if vflag:
wlab.append((stime,etime,'VOI'))
wlab.append((etime,wtime,'UNV'))
return wlab
def segment3c(self,gci,fs,uvdur=18,sildur=300):
'''
The following function returns a three category segmentation of
speech, namely, voiced(VOI), unvoiced(UNV) and silence segments(SIL).
Following are the input parametrs
1. gci is the glottal closure instants.
2. fs is the sampling rate.
3. uvdur is the maximum duration between any two GCI
to classify them at VOI or else they would be
classified as UNV.
4. sildur is the minimum duration without gci to
classify the segment as SIL.
'''
stime = 0#start time
etime = 0#end time
ptime = stime#to keep track of previous time
wtime = (gci.size * 1000.0)/fs#total time of the wave file
wlab = []#array containing the time stamps of the wave file
i = 0
vflag = False#to keey track of voiced regions
while i < gci.size:
if gci[i] > 0:
etime = (i * 1000.0)/fs
if (etime - ptime) < uvdur:#to check whether VOICED or not
ptime = etime #if its more than uvdur then it is UNVOICED
vflag = True
elif (etime - ptime) > sildur:#to tag it as SILENCE
if vflag:
wlab.append((stime,ptime,'VOI'))
vflag = False
wlab.append((ptime,etime,'SIL'))
stime = etime
ptime = stime
else:#to tag it as UNVOICED
if vflag:
wlab.append((stime,ptime,'VOI'))
vflag = False
wlab.append((ptime,etime,'UNV'))
stime = etime
ptime = stime
i += 1
#fixing the trailing tags
#one assumption made is that the trailing speech is most likely
#contains the SILENCE
if etime != wtime:
if vflag:
wlab.append((stime,etime,'VOI'))
wlab.append((etime,wtime,'SIL'))
return wlab
class LPC:
'''
Following class consists functions relating to Linear Prediction
Analysis.
The methodology adopted is as given in Linear Prediction:
A Tutorial Review by <NAME>.
This is a preliminary version and the funtions are bound
to change.
Following are the functions present
1. lpc: to calculate the linear prediction coefficients
2. lpcfeats: to extract the linear prediction coefficients
from a wav signal
3. lpcc: to calculate the cepstral coefficients
4. lpccfeats: to extract the cepstral coefficients from a
wav signal
5. lpresidual: to obtain the lp residual signal.
6. plotSpec: to plot the power spectrum of a signal and its
lp spectrum
'''
def __init__(self):
pass
def __autocor(self,sig,lporder):
'''
To calculate the AutoCorrelation Matrix from the input signal.
Auto Correlation matrix is defined as follows:
N-i-1
r[i] = sum s[n]s[n+i] for all n
n
Following are the input parameters:
sig: input speech signal
lporder: self explanatory
'''
r = numpy.zeros(lporder + 1)
for i in range(0,lporder + 1):
for n in range(0,len(sig) - i):
r[i] += (sig[n] * sig[n+i])
return r
def lpc(self,sig,lporder):
'''
Levinson-Durbin algorithm was implemented for
calculating the lp coefficients. Please refer
to the Linear Prediction: A Tutorial Review by
<NAME>
Following are the input parameters:
sig: input signal (of fixed window).
lporder: self explanatory
Output of the function is the following:
1. Gain (E[0])
2. Normalized Error (E[lporder]/E[0])
3. LP Coefficients
Function returns the following
1. G: Gain
2. nE: normalized error E[p]/E[0]
3. LP Coefficients
'''
r = self.__autocor(sig,lporder) #Autocorrelation coefficients
a = numpy.zeros(lporder + 1) #to store the a(k)
b = numpy.zeros(lporder + 1) #to store the previous values of a(k)
k = numpy.zeros(lporder + 1) #PARCOR coefficients
E = numpy.zeros(lporder + 1)
E[0] = r[0] #Energy of the signal
for i in range(1,lporder+1):
Sum = 0.0
for j in range(1,i):
Sum += (a[j] * r[i-j])
k[i] = -(r[i] + Sum)/E[i-1]
a[i] = k[i]
for j in range(1,i):
b[j] = a[j]
for j in range(1,i):
a[j] = b[j] + (k[i] * b[i-j])
E[i] = (1.0 - (k[i]**2)) * E[i-1]
a[0] = 1
nE = E[lporder]/E[0] #normalized error
G = r[0] #gain parameter
for i in range(1,lporder+1):
G += a[i] * r[i]
return G,nE,a #G is the gain
def lpcfeats(self,sig,fs,wlent,wsht,lporder):
'''
Extract the LPC features from the wave file.
Following are the input parameters
sig: input speech signal
fs: its sampling frequency
wlent: window length in milli seconds
wsht: window shift in milli seconds.
lporder: self explanatory
Function returnes the following
1. G: Gain for each frame
2. nE: Normalized error for each frame
3. LP coefficients for each frame.
'''
wlenf = (wlent * fs)/1000
wshf = (wsht * fs)/1000
sig = numpy.append(sig[0],numpy.diff(sig))
sig = sig + 0.001
noFrames = int((len(sig) - wlenf)/wshf) + 1
lpcs = [] #to store the lp coefficients
nEs = [] #normalized errors
Gs = [] #gain values
for i in range(0,noFrames):
index = i * wshf
window_signal = sig[index:index+wlenf]
smooth_signal = window_signal * numpy.hamming(len(window_signal))
G,nE,a = self.lpc(smooth_signal,lporder)
lpcs.append(a)
nEs.append(nE)
Gs.append(G)
return numpy.array(Gs),numpy.array(nEs),numpy.array(lpcs)
def __ersignal(self,sig,a):
'''
Returns the error signal provided a set of LP
coefficients. Error computation is as follows
e[n] = s[n] + sum a[k]s[n-k] for k = 1,2,..p
k
Following are the input parameters
1. sig: input signal. Consider using rectangular
windowed signal, even though the LP
coefficients were computed on hamming windowed
signal
2. a: LP coefficients.
'''
residual = numpy.zeros(len(sig))
for i in range(0,len(sig)):
for k in range(0,len(a)):
if (i-k) >= 0:
residual[i] += a[k]*sig[i-k]
return residual
def lpresidual(self,sig,fs,wlent,wsht,lporder):
'''
Computes the LP residual for a given speech signal.
Signal is windowed using hamming window and LP
coefficients are computed. Using these LP coefficients
and the original signal residual for each frame of
window length wlent is computed (see function error_signal)
Following are the input parameters:
1. sig: input speech signal
2. fs: sampling rate of the signal
3. wlent: window length in milli seconds
4. wsht: window shift in milli seconds
5. lporder: self explanatory.
'''
wlenf = (wlent * fs)/1000
wshf = (wsht * fs)/1000
sig = numpy.append(sig[0],numpy.diff(sig)) #to remove the dc
sig = sig + 0.0001 #to make sure that there are no zeros in the signal
noFrames = int((len(sig) - wlenf)/wshf) + 1
residual_signal = numpy.zeros(len(sig))
residual_index = 0
for i in range(0,noFrames):
index = i * wshf
window_signal = sig[index:index+wlenf]
smooth_signal = window_signal * numpy.hamming(len(window_signal))
G,nE,a = self.lpc(smooth_signal,lporder)
er = self.__ersignal(window_signal,a)
for i in range(0,wshf):
residual_signal[residual_index] = er[i]
residual_index += 1
return residual_signal
def lpcc(self,G,a,corder,L=0):
"""
Following function returns the cepstral coefficients.
Following are the input parameters
1. G is the gain (energy of the signal)
2. a are the lp coefficients
3. corder is the cepstral order
4. L (ceplifter) to lift the cepstral values
The output of the function will the set of cepstral (optional)
coefficients with the first value log(G)
So the number of cepstral coefficients will one more than
the corder.
For the liftering process two procedures were implemented
1. Sinusoidal
c[m] = (1+(L/2)sin(pi*n/L))c[m]
2. Linear Weighting
By default it gives linear weighted cepstral coefficients.
To obtain raw cepstral coefficients input L=None.
For any other value of L it performs sinusoidal liftering.
Note that number of cepstral coefficients can be more than
lporder. Generally it is suggested that corder = (3/2)lporder
"""
c = numpy.zeros(corder+1)
c[0] = numpy.log(G)
p = len(a) -1 #lp order + 1, a[0] = 1
if corder <= p: #calculating if the corder is less than the lp order
for m in range(1,corder+1):
c[m] = a[m]
for k in range(1,m):
c[m] -= (float(k)/m) * c[k] * a[m-k]
else:
for m in range(1,p+1):
c[m] = a[m]
for k in range(1,m):
c[m] -= (float(k)/m) * c[k] * a[m-k]
for m in range(p+1,corder+1):
for k in range((m-p),m):
c[m] -= (float(k)/m) * c[k] * a[m-k]
if L == None: #returning raw cepstral coefficients
return c
elif L == 0: #returning linear weighted cepstral coefficients
for i in range(1,corder+1):
c[i] = c[i] * i
return c
#cep liftering process as given in HTK
for i in range(1,corder+1):
c[i] = (1 + (float(L)/2) * numpy.sin((numpy.pi * i)/L)) * c[i]
return c
def lpccfeats(self,sig,fs,wlent,wsht,lporder,corder,L=0):
'''
Computes the LPCC coefficients from the wave file.
Following are the input parameters
sig: input speech signal
fs: its sampling frequency
wlent: window length in milli seconds
wsht: window shift in milli seconds.
lporder: self explanatory
corder: no. of cepstral coefficients
(read the lpcc documentation for the description about
the features)
L(ceplifter) to lift the cepstral coefficients. Read the
documentation for the LPCC for the liftering process
Function returns the following
1. G: Gain for each of the frames
2. nE: Normalized errors for each of the frames
3. LP Coefficients for each of the frames
4. LP Cepstral Coefficients for each of the frames
'''
wlenf = (wlent * fs)/1000
wshf = (wsht * fs)/1000
sig = numpy.append(sig[0],numpy.diff(sig)) #to remove dc
sig = sig + 0.001 #making sure that there are no zeros in the signal
noFrames = int((len(sig) - wlenf)/wshf) + 1
lpcs = [] #to store the lp coefficients
lpccs = []
nEs = [] #normalized errors
Gs = [] #gain values
for i in range(0,noFrames):
index = i * wshf
window_signal = sig[index:index+wlenf]
smooth_signal = window_signal * numpy.hamming(len(window_signal))
G,nE,a = self.lpc(smooth_signal,lporder)
c = self.lpcc(G,a,corder,L)
lpcs.append(a)
lpccs.append(c)
nEs.append(nE)
Gs.append(G)
return numpy.array(Gs),numpy.array(nEs),numpy.array(lpcs),numpy.array(lpccs)
def plotSpec(self,sig,G,a,res=0):
"""
The following function plots the power spectrum of the wave
signal along with the lp spectrum. This function is primary
to analyse the lp spectrum
Input for the function is as follows:
sig is the input signal
G is the gain
a are the lp coefficients
res is the resolution factor which tell the number of zeros
to be appended before performing fft on the inverse filter
Following funcion provides the power spectrum for the following
Power spectrum of the signal
s[n] <-------> S(w)
P(w) = 20 * log(|S(w)|)
P'(w) = 20 * log(G /|A(w)|)
where A(w) is the inverse filter and is defined as follows
p -jkw
A(z) = 1 + SUMMATION a[k] * e
k=1
The xaxis in the plots give the frequencies in the rage 0-1,
where 1 represents the nyquist frequency
"""
for i in range(0,res):
a = numpy.insert(a,-1,0) #appending zeros for better resolution
fftA = numpy.abs(numpy.fft.fft(a))
Gs = numpy.ones(len(a)) * G
P1 = 10 * numpy.log10(Gs/fftA)
P1 = P1[0:len(P1)/2] #power spectrum of the lp spectrum
P = 10 * numpy.log10(numpy.abs(numpy.fft.fft(sig))) #power spectrum of the signal
P = P[:len(P)/2]
x = numpy.arange(0,len(P))
x = x/float(max(x))
matplotlib.pyplot.subplot(2,1,1)
matplotlib.pyplot.title('Power Spectrum of the Signal')
matplotlib.pyplot.plot(x,P)
matplotlib.pyplot.xlabel('Frequency')
matplotlib.pyplot.ylabel('Amplitude (dB)')
matplotlib.pyplot.subplot(2,1,2)
matplotlib.pyplot.title('LP Spectrum of the Signal')
matplotlib.pyplot.plot(x,P1)
matplotlib.pyplot.xlabel('Frequency')
matplotlib.pyplot.ylabel('Amplitude (dB)')
matplotlib.pyplot.show()
| StarcoderdataPython |
1602587 | <filename>metr/tests/test_db.py<gh_stars>0
import os
import uuid
import pytest
import sqlite3
from metr.db import migrate, get_points, set_point
TEST_DB = '/tmp/test-db-' + str(uuid.uuid4()) + '.sqlite'
@pytest.fixture()
def conn():
conn = sqlite3.connect(TEST_DB)
migrate(conn)
yield conn
print("teardown db")
conn.close()
os.remove(TEST_DB)
def test_get_points(conn):
assert [] == get_points(conn, 'unknown')
metric = 'test_get_points'
with conn:
conn.execute('insert into metric values (null, ?)', [metric])
assert [] == get_points(conn, metric)
with conn:
conn.execute('insert into point values (null, 1, 1, "2017-01-01")')
conn.execute('insert into point values (null, 1, 2, "2017-01-01")')
conn.execute('insert into point values (null, 1, 3, "2017-01-01")')
points = get_points(conn, metric)
assert 3 == len(points)
assert ['1', '2', '3'] == [p[1] for p in points]
assert ['2017-01-01'] * 3 == [p[0] for p in points]
def test_set_point(conn):
metric = 'abc'
with conn:
cur = conn.execute('select * from metric where name = ?', [metric])
assert cur.fetchone() is None
set_point(conn, metric, '1', '2017-01-01')
with conn:
cur = conn.execute('select name from metric where name = ?', [metric])
assert (metric,) == cur.fetchone()
cur = conn.execute('select value, created '
'from point where metric_id = 1')
assert [('1', '2017-01-01 00:00:00')] == cur.fetchall()
| StarcoderdataPython |
165337 | <filename>ondevice/core/exception.py
""" Some exception classes for the ondevice client """
class _Exception(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
self.msg = args[0]
for k,v in kwargs.items():
setattr(self, k, v)
class ConfigurationError(_Exception):
""" Indicates a missing/faulty configuration value """
pass
class ImplementationError(_Exception):
""" Indicates implementation issues with a command or module """
pass
class TransportError(_Exception):
""" Indicates a communication error with the server """
pass
class UsageError(_Exception):
""" Indicates issues with the commandline usage (unknown command, unsupported argument, etc.) """
pass
| StarcoderdataPython |
3289910 | <reponame>joyfulflyer/billboard-reader
import sys
import billboard
import sqlite3
import datetime
import time
def connect():
print('connecting to db')
sys.stdout.flush()
conn = sqlite3.connect('charts.db')
return conn
def getCursor(conn):
sys.stdout.flush()
c = conn.cursor()
return c
def dropTables(conn):
c = conn.cursor()
print('dropping table')
c.execute(''' DROP TABLE IF EXISTS songs''')
c.execute(''' DROP TABLE IF EXISTS charts ''')
c.execute(''' DROP TABLE IF EXISTS entries ''')
sys.stdout.flush()
def connectAndCreate():
conn = connect()
createTables(conn)
return conn
def createTables(conn):
c = conn.cursor()
print("creating table")
c.execute(''' CREATE TABLE IF NOT EXISTS entries
(id integer primary key,
name text,
artist text,
place integer,
peak_position integer,
last_position integer,
weeks_on_chart integer,
chart_id integer,
song_id integer) ''')
c.execute(''' CREATE TABLE IF NOT EXISTS charts
(id integer primary key,
type text,
date_string text unique) ''')
c.execute(''' CREATE TABLE IF NOT EXISTS songs
(id integer primary key,
name text,
artist text)''')
conn.commit()
sys.stdout.flush()
def getInitialChart():
chart = billboard.ChartData('hot-100',
fetch=True, timeout=30)
return chart
def scrapeDataFromChartIntoConnection(chart, conn):
while chart.previousDate and "2017" not in chart.date:
saveChart(chart, conn)
chart = billboard.ChartData('hot-100', chart.previousDate, timeout=45)
def saveChart(chart, conn):
c = conn.cursor()
try:
c.execute(''' INSERT INTO charts(type, date_string)
VALUES (?, ?) ''', ("hot-100", chart.date))
except sqlite3.IntegrityError:
return
rowId = c.lastrowid
for i, entry in enumerate(chart.entries):
c.execute(''' INSERT INTO entries(
name, artist, place, peak_position,
last_position, weeks_on_chart, chart_id)
VALUES (?, ?, ?, ?, ?, ?, ?) ''',
(entry.title, entry.artist, entry.rank,
entry.peakPos, entry.lastPos, entry.weeks,
rowId))
conn.commit()
def crawlEntriesForSongs(cursor):
songs = []
# gives me the distinct name/artist combos
entries = cursor.execute(''' SELECT DISTINCT name, artist FROM entries ''').fetchone()
entry = cursor.execute(''' SELECT * FROM entries WHERE name = ? AND artist = ? ''', (entries[0], entries[1])).fetchall()
return entry
def doesDatabaseContainDate(date, conn):
c = conn.cursor()
countTuple = c.execute(''' SELECT count(*) from charts
WHERE date_string IS ? ''', (date,)).fetchone()
count = countTuple[0]
return count > 0
def scrapeDataForYear(year, conn, onYearDone):
finalDate = getFinalDate(year)
lastChart = billboard.ChartData('hot-100', date=finalDate,
fetch=True, timeout=30)
prevYear = getPreviousYear(year)
chart = lastChart
while(chart.previousDate and prevYear not in chart.date):
saveChart(chart, conn)
onYearDone()
time.sleep(10)
chart = billboard.ChartData('hot-100', chart.previousDate, timeout=45)
def getFinalDate(year):
now = datetime.date.today()
delt = datetime.timedelta(weeks=1)
then = now - delt
if year == now.year:
finalDate = "{}-{:0>2}-{:0>2}".format(then.year, then.month, then.day)
else:
finalDate = str(year) + '-12-31'
return finalDate
def getPreviousYear(year):
return str(int(year) - 1)
def hasData(conn):
c = getCursor(conn)
c.execute(''' SELECT count(*) FROM songs ''')
data = c.fetchall()
return len(list(data)) is not 0
def getSavedSongsFromConnection(conn):
c = conn.cursor()
songs = c.execute('SELECT * FROM songs').fetchall()
return songs
# Grabs all the songs in the database as a list.
# Assumes it's of an ok size to have in memory
def getAllSavedSongs():
conn = connect()
c = conn.cursor()
songs = c.execute('SELECT * FROM songs').fetchall()
conn.close()
return songs
| StarcoderdataPython |
128819 | <filename>23/aoc23-2-parallel.py<gh_stars>0
import pyximport
pyximport.install()
from aoc23 import do_it_parallel
with open("data.sample2.txt", "r") as fh:
board = fh.readlines()
board = [i.rstrip() for i in board]
do_it_parallel(board)
| StarcoderdataPython |
3327851 | <gh_stars>1-10
import pytest
import sys
import logging
from pypika_orm import Manager, Model, fields
BACKEND_PARAMS = {
'aiomysql': ('aiomysql://root@127.0.0.1:3306/tests', {'maxsize': 2, 'autocommit': True}),
'aiopg': ('aiopg://test:test@localhost:5432/tests', {'maxsize': 2}),
'aiosqlite': ('aiosqlite:////tmp/aio-db-test.sqlite', {'convert_params': True}),
'asyncpg': ('asyncpg://test:test@localhost:5432/tests', {
'min_size': 2, 'max_size': 2, 'convert_params': True}),
'trio-mysql': ('trio-mysql://root@127.0.0.1:3306/tests', {}),
# there is a separate test for triopg
# 'triopg': ('triopg://test:test@localhost:5432/tests', {'min_size': 2, 'max_size': 2}),
# Doesnt supports python 3.9
# 'aioodbc': ('aioodbc://localhost', {
# 'dsn': 'Driver=/usr/local/lib/libsqlite3odbc.dylib;Database=db.sqlite',
# 'db_type': 'sqlite', 'maxsize': 2, 'minsize': 1,
# }),
}
@pytest.fixture(scope='session', params=[
pytest.param(('asyncio', {'use_uvloop': False}), id='asyncio'),
# pytest.param(('trio', {'trio_asyncio': True}), id='trio'),
'trio'
])
def aiolib(request):
"""Support asyncio only. Disable uvloop on tests it brokes breakpoints."""
return request.param
@pytest.fixture(scope='session', params=[name for name in BACKEND_PARAMS])
def backend(request):
return request.param
@pytest.fixture
def db(backend, aiolib):
from aio_databases import Database
# 2021-10-18 aiomysql doesnt support python 3.10
if backend == 'aiomysql' and sys.version_info >= (3, 10):
return pytest.skip()
if aiolib[0] == 'trio' and backend not in {'trio-mysql', 'triopg'}:
return pytest.skip()
if aiolib[0] == 'asyncio' and backend not in {'aiomysql', 'aiopg', 'aiosqlite', 'asyncpg'}:
return pytest.skip()
url, params = BACKEND_PARAMS[backend]
return Database(url, **params)
@pytest.fixture
async def pool(db):
async with db:
yield db
@pytest.fixture(scope='session', autouse=True)
def setup_logging():
logger = logging.getLogger('aio-databases')
logger.setLevel(logging.DEBUG)
@pytest.fixture
def manager(db):
return Manager(dialect=db.backend.db_type)
@pytest.fixture
def User():
class User(Model):
id = fields.Auto()
name = fields.Varchar()
fullname = fields.Varchar()
return User
@pytest.fixture
def Comment(User):
class Comment(Model):
id = fields.Auto()
body = fields.Varchar()
user_id = fields.ForeignKey(User.id)
return Comment
| StarcoderdataPython |
1700349 | <gh_stars>0
#!/usr/bin/python3
import re, sys
from matplotlib import pyplot
plotSlope = False
def centeredMovingAverage(values, order):
if (order % 2) != 1:
raise ValueError('Order has to be an odd number!')
result = []
for i in range(len(values)):
s = values[i]
n = 1
for j in range(int(order/2)):
if (i - j) >= 0:
s += values[i-j]
n += 1
if (i + j) < len(values):
s += values[i+j]
n += 1
result.append(s/n)
return result
values = []
first = True
with open(sys.argv[1], 'r') as f:
for l in f:
if re.match(r'^[0-9]+\.[0-9]+$', l) is not None:
if first:
first = False
# Skip first value, it may be corrupted.
else:
val = float(l)
values.append(val)
if plotSlope:
slope = [0]
for i in range(len(values)):
if i != 0:
slope.append(values[i] - values[i-1])
if i == 1:
slope[0] = slope[1]
if plotSlope:
pyplot.subplot(2, 1, 1)
pyplot.plot(values)
pyplot.grid(which='both')
pyplot.xlabel('time (seconds)')
pyplot.ylabel('temperature (centigrade)')
if plotSlope:
pyplot.subplot(2, 1, 2)
pyplot.plot(slope)
pyplot.grid(which='both')
pyplot.xlabel('time (seconds)')
pyplot.ylabel('temperature slope (K/s)')
pyplot.show()
| StarcoderdataPython |
89020 | from getratings.models.ratings import Ratings
class NA_Quinn_Top_Aatrox(Ratings):
pass
class NA_Quinn_Top_Ahri(Ratings):
pass
class NA_Quinn_Top_Akali(Ratings):
pass
class NA_Quinn_Top_Alistar(Ratings):
pass
class NA_Quinn_Top_Amumu(Ratings):
pass
class NA_Quinn_Top_Anivia(Ratings):
pass
class NA_Quinn_Top_Annie(Ratings):
pass
class NA_Quinn_Top_Ashe(Ratings):
pass
class NA_Quinn_Top_AurelionSol(Ratings):
pass
class NA_Quinn_Top_Azir(Ratings):
pass
class NA_Quinn_Top_Bard(Ratings):
pass
class NA_Quinn_Top_Blitzcrank(Ratings):
pass
class NA_Quinn_Top_Brand(Ratings):
pass
class NA_Quinn_Top_Braum(Ratings):
pass
class NA_Quinn_Top_Caitlyn(Ratings):
pass
class NA_Quinn_Top_Camille(Ratings):
pass
class NA_Quinn_Top_Cassiopeia(Ratings):
pass
class NA_Quinn_Top_Chogath(Ratings):
pass
class NA_Quinn_Top_Corki(Ratings):
pass
class NA_Quinn_Top_Darius(Ratings):
pass
class NA_Quinn_Top_Diana(Ratings):
pass
class NA_Quinn_Top_Draven(Ratings):
pass
class NA_Quinn_Top_DrMundo(Ratings):
pass
class NA_Quinn_Top_Ekko(Ratings):
pass
class NA_Quinn_Top_Elise(Ratings):
pass
class NA_Quinn_Top_Evelynn(Ratings):
pass
class NA_Quinn_Top_Ezreal(Ratings):
pass
class NA_Quinn_Top_Fiddlesticks(Ratings):
pass
class NA_Quinn_Top_Fiora(Ratings):
pass
class NA_Quinn_Top_Fizz(Ratings):
pass
class NA_Quinn_Top_Galio(Ratings):
pass
class NA_Quinn_Top_Gangplank(Ratings):
pass
class NA_Quinn_Top_Garen(Ratings):
pass
class NA_Quinn_Top_Gnar(Ratings):
pass
class NA_Quinn_Top_Gragas(Ratings):
pass
class NA_Quinn_Top_Graves(Ratings):
pass
class NA_Quinn_Top_Hecarim(Ratings):
pass
class NA_Quinn_Top_Heimerdinger(Ratings):
pass
class NA_Quinn_Top_Illaoi(Ratings):
pass
class NA_Quinn_Top_Irelia(Ratings):
pass
class NA_Quinn_Top_Ivern(Ratings):
pass
class NA_Quinn_Top_Janna(Ratings):
pass
class NA_Quinn_Top_JarvanIV(Ratings):
pass
class NA_Quinn_Top_Jax(Ratings):
pass
class NA_Quinn_Top_Jayce(Ratings):
pass
class NA_Quinn_Top_Jhin(Ratings):
pass
class NA_Quinn_Top_Jinx(Ratings):
pass
class NA_Quinn_Top_Kalista(Ratings):
pass
class NA_Quinn_Top_Karma(Ratings):
pass
class NA_Quinn_Top_Karthus(Ratings):
pass
class NA_Quinn_Top_Kassadin(Ratings):
pass
class NA_Quinn_Top_Katarina(Ratings):
pass
class NA_Quinn_Top_Kayle(Ratings):
pass
class NA_Quinn_Top_Kayn(Ratings):
pass
class NA_Quinn_Top_Kennen(Ratings):
pass
class NA_Quinn_Top_Khazix(Ratings):
pass
class NA_Quinn_Top_Kindred(Ratings):
pass
class NA_Quinn_Top_Kled(Ratings):
pass
class NA_Quinn_Top_KogMaw(Ratings):
pass
class NA_Quinn_Top_Leblanc(Ratings):
pass
class NA_Quinn_Top_LeeSin(Ratings):
pass
class NA_Quinn_Top_Leona(Ratings):
pass
class NA_Quinn_Top_Lissandra(Ratings):
pass
class NA_Quinn_Top_Lucian(Ratings):
pass
class NA_Quinn_Top_Lulu(Ratings):
pass
class NA_Quinn_Top_Lux(Ratings):
pass
class NA_Quinn_Top_Malphite(Ratings):
pass
class NA_Quinn_Top_Malzahar(Ratings):
pass
class NA_Quinn_Top_Maokai(Ratings):
pass
class NA_Quinn_Top_MasterYi(Ratings):
pass
class NA_Quinn_Top_MissFortune(Ratings):
pass
class NA_Quinn_Top_MonkeyKing(Ratings):
pass
class NA_Quinn_Top_Mordekaiser(Ratings):
pass
class NA_Quinn_Top_Morgana(Ratings):
pass
class NA_Quinn_Top_Nami(Ratings):
pass
class NA_Quinn_Top_Nasus(Ratings):
pass
class NA_Quinn_Top_Nautilus(Ratings):
pass
class NA_Quinn_Top_Nidalee(Ratings):
pass
class NA_Quinn_Top_Nocturne(Ratings):
pass
class NA_Quinn_Top_Nunu(Ratings):
pass
class NA_Quinn_Top_Olaf(Ratings):
pass
class NA_Quinn_Top_Orianna(Ratings):
pass
class NA_Quinn_Top_Ornn(Ratings):
pass
class NA_Quinn_Top_Pantheon(Ratings):
pass
class NA_Quinn_Top_Poppy(Ratings):
pass
class NA_Quinn_Top_Quinn(Ratings):
pass
class NA_Quinn_Top_Rakan(Ratings):
pass
class NA_Quinn_Top_Rammus(Ratings):
pass
class NA_Quinn_Top_RekSai(Ratings):
pass
class NA_Quinn_Top_Renekton(Ratings):
pass
class NA_Quinn_Top_Rengar(Ratings):
pass
class NA_Quinn_Top_Riven(Ratings):
pass
class NA_Quinn_Top_Rumble(Ratings):
pass
class NA_Quinn_Top_Ryze(Ratings):
pass
class NA_Quinn_Top_Sejuani(Ratings):
pass
class NA_Quinn_Top_Shaco(Ratings):
pass
class NA_Quinn_Top_Shen(Ratings):
pass
class NA_Quinn_Top_Shyvana(Ratings):
pass
class NA_Quinn_Top_Singed(Ratings):
pass
class NA_Quinn_Top_Sion(Ratings):
pass
class NA_Quinn_Top_Sivir(Ratings):
pass
class NA_Quinn_Top_Skarner(Ratings):
pass
class NA_Quinn_Top_Sona(Ratings):
pass
class NA_Quinn_Top_Soraka(Ratings):
pass
class NA_Quinn_Top_Swain(Ratings):
pass
class NA_Quinn_Top_Syndra(Ratings):
pass
class NA_Quinn_Top_TahmKench(Ratings):
pass
class NA_Quinn_Top_Taliyah(Ratings):
pass
class NA_Quinn_Top_Talon(Ratings):
pass
class NA_Quinn_Top_Taric(Ratings):
pass
class NA_Quinn_Top_Teemo(Ratings):
pass
class NA_Quinn_Top_Thresh(Ratings):
pass
class NA_Quinn_Top_Tristana(Ratings):
pass
class NA_Quinn_Top_Trundle(Ratings):
pass
class NA_Quinn_Top_Tryndamere(Ratings):
pass
class NA_Quinn_Top_TwistedFate(Ratings):
pass
class NA_Quinn_Top_Twitch(Ratings):
pass
class NA_Quinn_Top_Udyr(Ratings):
pass
class NA_Quinn_Top_Urgot(Ratings):
pass
class NA_Quinn_Top_Varus(Ratings):
pass
class NA_Quinn_Top_Vayne(Ratings):
pass
class NA_Quinn_Top_Veigar(Ratings):
pass
class NA_Quinn_Top_Velkoz(Ratings):
pass
class NA_Quinn_Top_Vi(Ratings):
pass
class NA_Quinn_Top_Viktor(Ratings):
pass
class NA_Quinn_Top_Vladimir(Ratings):
pass
class NA_Quinn_Top_Volibear(Ratings):
pass
class NA_Quinn_Top_Warwick(Ratings):
pass
class NA_Quinn_Top_Xayah(Ratings):
pass
class NA_Quinn_Top_Xerath(Ratings):
pass
class NA_Quinn_Top_XinZhao(Ratings):
pass
class NA_Quinn_Top_Yasuo(Ratings):
pass
class NA_Quinn_Top_Yorick(Ratings):
pass
class NA_Quinn_Top_Zac(Ratings):
pass
class NA_Quinn_Top_Zed(Ratings):
pass
class NA_Quinn_Top_Ziggs(Ratings):
pass
class NA_Quinn_Top_Zilean(Ratings):
pass
class NA_Quinn_Top_Zyra(Ratings):
pass
| StarcoderdataPython |
1619130 | <gh_stars>1-10
""" This module defines the class QueryPharos which connects to APIs at
http://www.uniprot.org/uploadlists/, querying reactome pathways from uniprot id.
"""
__author__ = ""
__copyright__ = ""
__credits__ = []
__license__ = ""
__version__ = ""
__maintainer__ = ""
__email__ = ""
__status__ = "Prototype"
import requests
import CachedMethods
class QueryUniprot:
API_BASE_URL = "http://www.uniprot.org/uploadlists/"
@staticmethod
@CachedMethods.register
def uniprot_id_to_reactome_pathways(uniprot_id):
"""returns a ``set`` of reactome IDs of pathways associated with a given string uniprot ID
:param uniprot_id: a ``str`` uniprot ID, like ``"P68871"``
:returns: a ``set`` of string Reactome IDs
"""
payload = { 'from': 'ACC',
'to': 'REACTOME_ID',
'format': 'tab',
'query': uniprot_id }
contact = "<EMAIL>"
header = {'User-Agent': 'Python %s' % contact}
res = requests.post(QueryUniprot.API_BASE_URL, data=payload, headers=header)
assert 200 == res.status_code
res_set = set()
for line in res.text.splitlines():
field_str = line.split("\t")[1]
if field_str != "To":
res_set.add(field_str)
return res_set
if __name__ == '__main__':
print(QueryUniprot.uniprot_id_to_reactome_pathways("P68871"))
print(QueryUniprot.uniprot_id_to_reactome_pathways("Q16621"))
print(QueryUniprot.uniprot_id_to_reactome_pathways("P09601"))
print(CachedMethods.cache_info())
| StarcoderdataPython |
3342187 | class Node:
def __init__(self, val):
self.val = val
self.ln = None
self.rn = None
def __repr__(self):
return "Node=({}, ln={}, rn={})".format(
self.val, self.ln, self.rn)
def get_bfs_alt(root, level, level_dict):
if not root:
return
if level not in level_dict:
level_dict[level] = list()
level_dict[level].append(root.val)
get_bfs_alt(root.ln, level + 1, level_dict)
get_bfs_alt(root.rn, level + 1, level_dict)
def get_boustrophedon(root):
level_dict = dict()
get_bfs_alt(root, 0, level_dict)
final_order = list()
for i in range(len(level_dict)):
final_order.extend(reversed(level_dict[i]) if i % 2 else level_dict[i])
return final_order
# Tests
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n7 = Node(7)
n2.ln = n4
n2.rn = n5
n3.ln = n6
n3.rn = n7
n1.ln = n2
n1.rn = n3
assert get_boustrophedon(n1) == [1, 3, 2, 4, 5, 6, 7]
| StarcoderdataPython |
3357243 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
*Projects* assigned this availability type have binary decision variables
for their availability in each timepoint. This type can be useful in
optimizing planned outage schedules. A *project* of this type is constrained
to be unavailable for at least a pre-specified number of hours in each
*period*. In addition, each unavailability event can be constrained to be
within a minimum and maximum number of hours, and constraints can also be
implemented on the minimum and maximum duration between unavailability events.
"""
import csv
import os.path
from pyomo.environ import Param, Set, Var, Constraint, Binary, value, \
NonNegativeReals
from gridpath.auxiliary.auxiliary import cursor_to_df
from gridpath.auxiliary.validations import write_validation_to_database, \
get_expected_dtypes, validate_dtypes, validate_missing_inputs, \
validate_column_monotonicity
from gridpath.project.availability.availability_types.common_functions import \
insert_availability_results
from gridpath.project.operations.operational_types.common_functions import \
determine_relevant_timepoints
from gridpath.project.common_functions import determine_project_subset,\
check_if_boundary_type_and_first_timepoint
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Sets |
+=========================================================================+
| | :code:`AVL_BIN` |
| |
| The set of projects of the :code:`binary` availability type. |
+-------------------------------------------------------------------------+
| | :code:`AVL_BIN_OPR_PRDS` |
| |
| Two-dimensional set with projects of the :code:`binary` availability |
| type and their operational periods. |
+-------------------------------------------------------------------------+
| | :code:`AVL_BIN_OPR_TMPS` |
| |
| Two-dimensional set with projects of the :code:`binary` availability |
| type and their operational timepoints. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Required Input Params |
+=========================================================================+
| | :code:`avl_bin_unavl_hrs_per_prd` |
| | *Defined over*: :code:`AVL_BIN` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The number of hours the project must be unavailable per period. |
+-------------------------------------------------------------------------+
| | :code:`avl_bin_min_unavl_hrs_per_event` |
| | *Defined over*: :code:`AVL_BIN` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The minimum number of hours an unavailability event should last for. |
+-------------------------------------------------------------------------+
| | :code:`avl_bin_min_avl_hrs_between_events` |
| | *Defined over*: :code:`AVL_BIN` |
| | *Within*: :code:`NonNegativeReals` |
| |
| The minimum number of hours a project should be available between |
| unavailability events. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Variables |
+=========================================================================+
| | :code:`AvlBin_Unavailable` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| | *Within*: :code:`Binary` |
| |
| Binary decision variable that specifies whether the project is |
| unavailable or not in each operational timepoint (1=unavailable). |
+-------------------------------------------------------------------------+
| | :code:`AvlBin_Start_Unavailability` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| | *Within*: :code:`Binary` |
| |
| Binary decision variable that designates the start of an unavailability |
| event (when the project goes from available to unavailable. |
+-------------------------------------------------------------------------+
| | :code:`AvlBin_Stop_Unavailability` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| | *Within*: :code:`Binary` |
| |
| Binary decision variable that designates the end of an unavailability |
| event (when the project goes from unavailable to available. |
+-------------------------------------------------------------------------+
|
+-------------------------------------------------------------------------+
| Constraints |
+=========================================================================+
| | :code:`AvlBin_Tot_Sched_Unavl_per_Prd_Constraint` |
| | *Defined over*: :code:`AVL_BIN_OPR_PRDS` |
| |
| The project must be unavailable for :code:`avl_bin_unavl_hrs_per_prd` |
| hours in each period. |
+-------------------------------------------------------------------------+
| | :code:`AvlBin_Unavl_Start_and_Stop_Constraint` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| |
| Link the three binary variables in each timepoint such that |
| :code:`AvlBin_Start_Unavailability` is 1 if the project goes from |
| available to unavailable, and :code:`AvlBin_Stop_Unavailability` is 1 |
| if the project goes from unavailable to available. |
+-------------------------------------------------------------------------+
| | :code:`AvlBin_Min_Event_Duration_Constraint` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| |
| The duration of each unavailability event should be larger than or |
| equal to :code:`avl_bin_min_unavl_hrs_per_event` hours. |
+-------------------------------------------------------------------------+
| | :code:`AvlBin_Min_Time_Between_Events_Constraint` |
| | *Defined over*: :code:`AVL_BIN_OPR_TMPS` |
| |
| The time between unavailability events should be larger than or equal |
| to :code:`avl_bin_min_avl_hrs_between_events` hours. |
+-------------------------------------------------------------------------+
"""
# Sets
###########################################################################
m.AVL_BIN = Set(within=m.PROJECTS)
m.AVL_BIN_OPR_PRDS = Set(
dimen=2, within=m.PRJ_OPR_PRDS,
initialize=lambda mod: list(
set((g, tmp) for (g, tmp) in mod.PRJ_OPR_PRDS
if g in mod.AVL_BIN)
)
)
# TODO: factor out this lambda rule, as it is used in all operational type
# modules and availability type modules
m.AVL_BIN_OPR_TMPS = Set(
dimen=2, within=m.PRJ_OPR_TMPS,
initialize=lambda mod: list(
set((g, tmp) for (g, tmp) in mod.PRJ_OPR_TMPS
if g in mod.AVL_BIN)
)
)
# Required Input Params
###########################################################################
m.avl_bin_unavl_hrs_per_prd = Param(
m.AVL_BIN, within=NonNegativeReals
)
m.avl_bin_min_unavl_hrs_per_event = Param(
m.AVL_BIN, within=NonNegativeReals
)
m.avl_bin_min_avl_hrs_between_events = Param(
m.AVL_BIN, within=NonNegativeReals
)
# Variables
###########################################################################
m.AvlBin_Unavailable = Var(
m.AVL_BIN_OPR_TMPS,
within=Binary
)
m.AvlBin_Start_Unavailability = Var(
m.AVL_BIN_OPR_TMPS,
within=Binary
)
m.AvlBin_Stop_Unavailability = Var(
m.AVL_BIN_OPR_TMPS,
within=Binary
)
# Constraints
###########################################################################
m.AvlBin_Tot_Sched_Unavl_per_Prd_Constraint = Constraint(
m.AVL_BIN_OPR_PRDS,
rule=total_scheduled_availability_per_period_rule
)
m.AvlBin_Unavl_Start_and_Stop_Constraint = Constraint(
m.AVL_BIN_OPR_TMPS,
rule=unavailability_start_and_stop_rule
)
m.AvlBin_Min_Event_Duration_Constraint = Constraint(
m.AVL_BIN_OPR_TMPS,
rule=event_min_duration_rule
)
m.AvlBin_Min_Time_Between_Events_Constraint = Constraint(
m.AVL_BIN_OPR_TMPS,
rule=min_time_between_events_rule
)
# Constraint Formulation Rules
###############################################################################
def total_scheduled_availability_per_period_rule(mod, g, p):
"""
**Constraint Name**: AvlBin_Tot_Sched_Unavl_per_Prd_Constraint
**Enforced Over**: AVL_BIN_OPR_PRDS
The project must be down for avl_bin_unavl_hrs_per_prd in each period.
TODO: it's possible that solve time will be faster if we make this
constraint >= instead of ==, but then degeneracy could be an issue
"""
return sum(
mod.AvlBin_Unavailable[g, tmp]
* mod.hrs_in_tmp[tmp]
for tmp in mod.TMPS_IN_PRD[p]
) == mod.avl_bin_unavl_hrs_per_prd[g]
def unavailability_start_and_stop_rule(mod, g, tmp):
"""
**Constraint Name**: AvlBin_Unavl_Start_and_Stop_Constraint
**Enforced Over**: AVL_BIN_OPR_TMPS
Constrain the start and stop availability variables based on the
availability status in the current and previous timepoint. If the
project is down in the current timepoint and was not down in the
previous timepoint, then the RHS is 1 and AvlBin_Start_Unavailability
must be set to 1. If the project is not down in the current
timepoint and was down in the previous timepoint, then the RHS is -1
and AvlBin_Stop_Unavailability must be set to 1.
"""
if check_if_boundary_type_and_first_timepoint(
mod=mod, tmp=tmp, balancing_type=mod.balancing_type_project[g],
boundary_type="linear"
):
return Constraint.Skip
else:
return mod.AvlBin_Start_Unavailability[g, tmp] \
- mod.AvlBin_Stop_Unavailability[g, tmp] \
== mod.AvlBin_Unavailable[g, tmp] \
- mod.AvlBin_Unavailable[g, mod.prev_tmp[
tmp, mod.balancing_type_project[g]]]
def event_min_duration_rule(mod, g, tmp):
"""
**Constraint Name**: AvlBin_Min_Event_Duration_Constraint
**Enforced Over**: AVL_BIN_OPR_TMPS
If a project became unavailable within avl_bin_min_unavl_hrs_per_event
from the current timepoint, it must still be unavailable in the current
timepoint.
"""
relevant_tmps, _ = determine_relevant_timepoints(
mod, g, tmp, mod.avl_bin_min_unavl_hrs_per_event[g]
)
if relevant_tmps == [tmp]:
return Constraint.Skip
return sum(
mod.AvlBin_Start_Unavailability[g, tp]
for tp in relevant_tmps
) <= mod.AvlBin_Unavailable[g, tmp]
def min_time_between_events_rule(mod, g, tmp):
"""
**Constraint Name**: AvlBin_Min_Time_Between_Events_Constraint
**Enforced Over**: AVL_BIN_OPR_TMPS
If a project became available within avl_bin_min_avl_hrs_between_events
from the current timepoint, it must still be available in the current
timepoint.
"""
relevant_tmps, _ = determine_relevant_timepoints(
mod, g, tmp, mod.avl_bin_min_avl_hrs_between_events[g]
)
if relevant_tmps == [tmp]:
return Constraint.Skip
return sum(
mod.AvlBin_Stop_Unavailability[g, tp]
for tp in relevant_tmps
) <= 1 - mod.AvlBin_Unavailable[g, tmp]
# Availability Type Methods
###############################################################################
def availability_derate_rule(mod, g, tmp):
"""
"""
return 1 - mod.AvlBin_Unavailable[g, tmp]
# Input-Output
###############################################################################
def load_module_specific_data(
m, data_portal, scenario_directory, subproblem, stage
):
"""
:param m:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
# Figure out which projects have this availability type
project_subset = determine_project_subset(
scenario_directory=scenario_directory,
subproblem=subproblem, stage=stage, column="availability_type",
type="binary"
)
data_portal.data()["AVL_BIN"] = {None: project_subset}
avl_bin_unavl_hrs_per_prd_dict = {}
avl_bin_min_unavl_hrs_per_event_dict = {}
avl_bin_min_avl_hrs_between_events_dict = {}
with open(os.path.join(scenario_directory, str(subproblem), str(stage),
"inputs", "project_availability_endogenous.tab"),
"r") as f:
reader = csv.reader(f, delimiter="\t", lineterminator="\n")
next(reader)
for row in reader:
if row[0] in project_subset:
avl_bin_unavl_hrs_per_prd_dict[row[0]] = float(row[1])
avl_bin_min_unavl_hrs_per_event_dict[row[0]] = float(row[2])
avl_bin_min_avl_hrs_between_events_dict[row[0]] = float(row[3])
data_portal.data()["avl_bin_unavl_hrs_per_prd"] = \
avl_bin_unavl_hrs_per_prd_dict
data_portal.data()["avl_bin_min_unavl_hrs_per_event"] = \
avl_bin_min_unavl_hrs_per_event_dict
data_portal.data()["avl_bin_min_avl_hrs_between_events"] = \
avl_bin_min_avl_hrs_between_events_dict
def export_module_specific_results(
scenario_directory, subproblem, stage, m, d):
"""
Export operations results.
:param scenario_directory:
:param subproblem:
:param stage:
:param m: The Pyomo abstract model
:param d: Dynamic components
:return: Nothing
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"project_availability_endogenous_binary.csv"),
"w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["project", "period", "subproblem_id", "stage_id",
"availability_type", "timepoint",
"timepoint_weight", "number_of_hours_in_timepoint",
"load_zone", "technology",
"unavailability_decision", "start_unavailability",
"stop_unavailability", "availability_derate"])
for (p, tmp) in m.AVL_BIN_OPR_TMPS:
writer.writerow([
p,
m.period[tmp],
1 if subproblem == "" else subproblem,
1 if stage == "" else stage,
m.availability_type[p],
tmp,
m.tmp_weight[tmp],
m.hrs_in_tmp[tmp],
m.load_zone[p],
m.technology[p],
value(m.AvlBin_Unavailable[p, tmp]),
value(m.AvlBin_Start_Unavailability[p, tmp]),
value(m.AvlBin_Stop_Unavailability[p, tmp]),
1-value(m.AvlBin_Unavailable[p, tmp])
])
# Database
###############################################################################
def get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn):
"""
:param subscenarios:
:param subproblem:
:param stage:
:param conn:
:return:
"""
# Get project availability if project_availability_scenario_id is not NUL
c = conn.cursor()
availability_params = c.execute("""
SELECT project, unavailable_hours_per_period,
unavailable_hours_per_event_min,
available_hours_between_events_min
FROM (
SELECT project
FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = {}
) as portfolio_tbl
INNER JOIN (
SELECT project, endogenous_availability_scenario_id
FROM inputs_project_availability
WHERE project_availability_scenario_id = {}
AND availability_type = 'binary'
AND endogenous_availability_scenario_id IS NOT NULL
) AS avail_char
USING (project)
LEFT OUTER JOIN
inputs_project_availability_endogenous
USING (endogenous_availability_scenario_id, project);
""".format(
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
subscenarios.PROJECT_AVAILABILITY_SCENARIO_ID
)
)
return availability_params
def write_module_specific_model_inputs(
scenario_directory, scenario_id, subscenarios, subproblem, stage, conn
):
"""
:param scenario_directory:
:param subscenarios:
:param subproblem:
:param stage:
:param conn:
:return:
"""
endogenous_availability_params = get_inputs_from_database(
scenario_id=scenario_id, subscenarios=subscenarios,
subproblem=subproblem, stage=stage, conn=conn
)
# Check if project_availability_endogenous.tab exists; only write header
# if the file wasn't already created
availability_file = os.path.join(
scenario_directory, subproblem, stage, "inputs",
"project_availability_endogenous.tab"
)
if not os.path.exists(availability_file):
with open(availability_file, "w", newline="") as f:
writer = csv.writer(f, delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(
["project",
"unavailable_hours_per_period",
"unavailable_hours_per_event_min",
"available_hours_between_events_min"]
)
with open(availability_file, "a", newline="") as f:
writer = csv.writer(f, delimiter="\t", lineterminator="\n")
# Write rows
for row in endogenous_availability_params:
replace_nulls = ["." if i is None else i for i in row]
writer.writerow(replace_nulls)
def import_module_specific_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param subproblem:
:param stage:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("project availability binary")
insert_availability_results(
db=db, c=c, results_directory=results_directory,
scenario_id=scenario_id,
results_file="project_availability_endogenous_binary.csv"
)
# Validation
###############################################################################
def validate_module_specific_inputs(scenario_id, subscenarios, subproblem, stage, conn):
"""
:param subscenarios:
:param subproblem:
:param stage:
:param conn:
:return:
"""
params = get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn)
df = cursor_to_df(params)
# Check data types availability
expected_dtypes = get_expected_dtypes(
conn, ["inputs_project_availability",
"inputs_project_availability_endogenous"])
dtype_errors, error_columns = validate_dtypes(df, expected_dtypes)
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_project_availability_endogenous",
severity="High",
errors=dtype_errors
)
# Check for missing inputs
msg = ""
value_cols = ["unavailable_hours_per_period",
"unavailable_hours_per_event_min",
"available_hours_between_events_min"]
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_project_availability_endogenous",
severity="Low",
errors=validate_missing_inputs(df, value_cols, "project", msg)
)
cols = ["unavailable_hours_per_event_min",
"unavailable_hours_per_period"]
write_validation_to_database(
conn=conn,
scenario_id=scenario_id,
subproblem_id=subproblem,
stage_id=stage,
gridpath_module=__name__,
db_table="inputs_project_availability_endogenous",
severity="High",
errors=validate_column_monotonicity(
df=df,
cols=cols,
idx_col=["project"]
)
)
| StarcoderdataPython |
19602 | <reponame>Sanghyun-Hong/NLPProjects<filename>Project1/cl1_p1_wsd.py
import numpy as np
import operator
# SHHONG: custom modules imported
import json
import random
import itertools
from math import pow, log
from collections import Counter
import os
import sys
sys.stdout = open(os.devnull, 'w')
"""
CMSC723 / INST725 / LING723 -- Fall 2016
Project 1: Implementing Word Sense Disambiguation Systems
"""
"""
read one of train, dev, test subsets
subset - one of train, dev, test
output is a tuple of three lists
labels: one of the 6 possible senses <cord, division, formation, phone, product, text >
targets: the index within the text of the token to be disambiguated
texts: a list of tokenized and normalized text input (note that there can be multiple sentences)
"""
import nltk
#### added dev_manual to the subset of allowable files
def read_dataset(subset):
labels = []
texts = []
targets = []
if subset in ['train', 'dev', 'test', 'dev_manual']:
with open('data/wsd_'+subset+'.txt') as inp_hndl:
for example in inp_hndl:
label, text = example.strip().split('\t')
text = nltk.word_tokenize(text.lower().replace('" ','"'))
if 'line' in text:
ambig_ix = text.index('line')
elif 'lines' in text:
ambig_ix = text.index('lines')
else:
ldjal
targets.append(ambig_ix)
labels.append(label)
texts.append(text)
return (labels, targets, texts)
else:
print '>>>> invalid input !!! <<<<<'
"""
computes f1-score of the classification accuracy
gold_labels - is a list of the gold labels
predicted_labels - is a list of the predicted labels
output is a tuple of the micro averaged score and the macro averaged score
"""
import sklearn.metrics
#### changed method name from eval because of naming conflict with python keyword
def eval_performance(gold_labels, predicted_labels):
return ( sklearn.metrics.f1_score(gold_labels, predicted_labels, average='micro'),
sklearn.metrics.f1_score(gold_labels, predicted_labels, average='macro') )
"""
a helper method that takes a list of predictions and writes them to a file (1 prediction per line)
predictions - list of predictions (strings)
file_name - name of the output file
"""
def write_predictions(predictions, file_name):
with open(file_name, 'w') as outh:
for p in predictions:
outh.write(p+'\n')
"""
Trains a naive bayes model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_bow_naivebayes_classifier(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels, test_texts, test_targets, test_labels):
# control variables
improved = True
alpha = 0.04
silent = True
# Part 2.1 (c_s/c_sw)
c_s = dict.fromkeys(set(train_labels), 0)
multiples = list(itertools.product(c_s.keys(), ['time', 'loss', 'export']))
c_sw = dict.fromkeys(multiples, 0)
t_w = [each_word for each_text in train_texts for each_word in each_text]
multiples = list(itertools.product(c_s.keys(), t_w))
t_sw = dict.fromkeys(multiples, 0)
for idx, label in enumerate(train_labels):
cur_text = train_texts[idx]
# compute c_s
c_s[label] += len(cur_text)
# compute c_sw
time_cnt = cur_text.count('time')
loss_cnt = cur_text.count('loss')
export_cnt = cur_text.count('export')
c_sw[(label, 'time')] += time_cnt
c_sw[(label, 'loss')] += loss_cnt
c_sw[(label, 'export')] += export_cnt
# compute t_sw (total occurances): of (label, word): occurances
for each_word in cur_text:
t_sw[(label, each_word)] += 1
# total # of distinct words: will be used for smoothing
t_dw = Counter(t_w)
if not silent:
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('s', 'cord', 'division', 'formation', 'phone', 'product', 'text')
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s)', c_s['cord'], c_s['division'], c_s['formation'], c_s['phone'], c_s['product'], c_s['text'])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,time)', c_sw[('cord', 'time')], c_sw[('division', 'time')], c_sw[('formation', 'time')], \
c_sw[('phone', 'time')], c_sw[('product', 'time')], c_sw[('text', 'time')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,loss)', c_sw[('cord', 'loss')], c_sw[('division', 'loss')], c_sw[('formation', 'loss')], \
c_sw[('phone', 'loss')], c_sw[('product', 'loss')], c_sw[('text', 'loss')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,export)', c_sw[('cord', 'export')], c_sw[('division', 'export')], c_sw[('formation', 'export')], \
c_sw[('phone', 'export')], c_sw[('product', 'export')], c_sw[('text', 'export')])
print '------------------------------------------------------------------------------------------'
print ' total distinct words: %d ' % (len(t_dw.keys()))
# Part 2.2 (p_s/p_ws)
total_occurances = float(sum(c_s.values()))
label_count = Counter(train_labels)
p_s = {key: (value / float( sum( label_count.values() )) ) for key, value in label_count.iteritems()}
if improved:
p_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in c_sw.iteritems()}
t_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in t_sw.iteritems()}
else:
p_ws = {key: (value / float(c_s[key[0]])) for key, value in c_sw.iteritems()}
t_ws = {key: (value / float(c_s[key[0]])) for key, value in t_sw.iteritems()}
# normalization steps
norm_denominators = {
'time': 0.0,
'loss': 0.0,
'export': 0.0
}
for key, value in p_ws.iteritems():
norm_denominators[key[1]] += value
p_ws_norm = {key: (value / norm_denominators[key[1]]) for key, value in p_ws.iteritems()}
p_ws = p_ws_norm
if not silent:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(s)', p_s['cord'], p_s['division'], p_s['formation'], p_s['phone'], p_s['product'], p_s['text'])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(time|s)', p_ws[('cord', 'time')], p_ws[('division', 'time')], p_ws[('formation', 'time')], \
p_ws[('phone', 'time')], p_ws[('product', 'time')], p_ws[('text', 'time')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(loss|s)', p_ws[('cord', 'loss')], p_ws[('division', 'loss')], p_ws[('formation', 'loss')], \
p_ws[('phone', 'loss')], p_ws[('product', 'loss')], p_ws[('text', 'loss')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(export|s)', p_ws[('cord', 'export')], p_ws[('division', 'export')], p_ws[('formation', 'export')], \
p_ws[('phone', 'export')], p_ws[('product', 'export')], p_ws[('text', 'export')])
# Part 2.3 (p_sxd, on the 1st line on test set)
p_sxd = dict.fromkeys(c_s.keys(), 0.0)
lp_sxd = dict.fromkeys(c_s.keys(), 0.0)
cur_text = dev_texts[0]
for key in p_sxd.keys():
# compute p for each class
if improved:
tp_sxd = p_s[key]
tlp_sxd = log(p_s[key])
else:
tp_sxd = p_s[key]
# compute for each word
for each_word in cur_text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxd *= t_ws[(key, each_word)]
tlp_sxd += log(t_ws[(key, each_word)])
else:
tp_sxd *= t_ws[(key, each_word)]
# add to the dict
if improved:
p_sxd[key] = tp_sxd
lp_sxd[key] = tlp_sxd
else:
p_sxd[key] = tp_sxd
if not silent:
print '------------------------------------------------------------------------------------------'
print ' %s | %s | %s | %s | %s | %s | %s |' % \
('p(s|X)', p_sxd['cord'], p_sxd['division'], p_sxd['formation'], \
p_sxd['phone'], p_sxd['product'], p_sxd['text'])
print '------------------------------------------------------------------------------------------'
print ' 1st label in dev : %s ' % (dev_labels[0])
print ' 1st text in dev[:5]: %s ' % (dev_texts[0][:5])
if improved:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('log(p(s|X))', lp_sxd['cord'], lp_sxd['division'], lp_sxd['formation'], \
lp_sxd['phone'], lp_sxd['product'], lp_sxd['text'])
# Part 2.4: compute all the prob on the test dataset
p_sx = list()
for idx, text in enumerate(test_texts):
t_prob = dict.fromkeys(c_s.keys(), 0.0)
for key in t_prob.keys():
# compute p for each class
if improved:
tp_sxt = log(p_s[key])
else:
tp_sxt = p_s[key]
for each_word in text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxt += log(t_ws[(key, each_word)])
else:
tp_sxt *= t_ws[(key, each_word)]
# add to the dict
t_prob[key] = tp_sxt
# add dict to the entire list
p_sx.append(t_prob)
# Part 2.4 (run the classifier for all)
labels_predicted = list()
for idx, label in enumerate(test_labels):
maximum_probs = max(p_sx[idx].values())
label_prediction = [key for key, value in p_sx[idx].iteritems() if value == maximum_probs]
label_prediction = random.choice(label_prediction)
# based on the prob
labels_predicted.append(label_prediction)
naivebayes_performance = eval_performance(test_labels, labels_predicted)
# save the implementation to the file
with open('q4p2.txt', 'wb') as q4p2_output:
for each_label in labels_predicted:
q4p2_output.write(each_label+'\n')
# Part 2.5 (do more tuning for the classifier)
# - Laplace smoothing
# - Log likelihoods
if not silent:
print '------------------------------------------------------------------------------------------'
return 'Naive Bayes: micro/macro = [%.2f, %.2f] @ (alpha: %s)' % \
(naivebayes_performance[0]*100, naivebayes_performance[1]*100, alpha)
## extract all the distinct words from a set of texts
## return a dictionary {word:index} that maps each word to a unique index
def extract_all_words(texts,prev_set=set()):
all_words = prev_set
for t in texts:
for w in t:
all_words.add(w)
all_words_idx = {}
for i,w in enumerate(all_words):
all_words_idx[w] = i
return all_words_idx
## extract all distinct labels from a dataset
## return a dictionary {label:index} that maps each label to a unique index
def extract_all_labels(labels):
distinct_labels = list(set(labels))
all_labels_idx = {}
for i,l in enumerate(distinct_labels):
all_labels_idx[l] = i
return all_labels_idx
## construct a bow feature matrix for a set of instances
## the returned matrix has the size NUM_INSTANCES X NUM_FEATURES
def extract_features(all_words_idx,all_labels_idx,texts):
NUM_FEATURES = len(all_words_idx.keys())
NUM_INSTANCES = len(texts)
features_matrix = np.zeros((NUM_INSTANCES,NUM_FEATURES))
for i,instance in enumerate(texts):
for word in instance:
if all_words_idx.get(word,None) is None:
continue
features_matrix[i][all_words_idx[word]] += 1
return features_matrix
## compute the feature vector for a set of words and a given label
## the features are computed as described in Slide #19 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_02.pdf
def get_features_for_label(instance,label,class_labels):
num_labels = len(class_labels)
num_feats = len(instance)
feats = np.zeros(len(instance)*num_labels+1)
assert len(feats[num_feats*label:num_feats*label+num_feats]) == len(instance)
feats[num_feats*label:num_feats*label+num_feats] = instance
return feats
## get the predicted label for a given instance
## the predicted label is the one with the highest dot product of theta*feature_vector
## return the predicted label, the dot product scores for all labels and the features computed for all labels for that instance
def get_predicted_label(inst,class_labels,theta):
all_labels_scores = {}
all_labels_features = {}
for lbl in class_labels:
feat_vec = get_features_for_label(inst,lbl,class_labels)
assert len(feat_vec) == len(theta)
all_labels_scores[lbl] = np.dot(feat_vec,theta)
predicted_label = max(all_labels_scores.iteritems(), key=operator.itemgetter(1))[0]
return predicted_label
## train the perceptron by iterating over the entire training dataset
## the algorithm is an implementation of the pseudocode from Slide #23 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_03.pdf
def train_perceptron(train_features,train_labels,class_labels,num_features):
NO_MAX_ITERATIONS = 20
np.random.seed(0)
theta = np.zeros(num_features)
print '# Training Instances:',len(train_features)
num_iterations = 0
cnt_updates_total = 0
cnt_updates_prev = 0
m = np.zeros(num_features)
print '# Total Updates / # Current Iteration Updates:'
for piter in range(NO_MAX_ITERATIONS):
shuffled_indices = np.arange(len(train_features))
np.random.shuffle(shuffled_indices)
cnt_updates_crt = 0
for i in shuffled_indices:
inst = train_features[i]
actual_label = train_labels[i]
predicted_label = get_predicted_label(inst,class_labels,theta)
if predicted_label != actual_label:
cnt_updates_total += 1
cnt_updates_crt += 1
theta = theta + get_features_for_label(inst,actual_label,class_labels) - get_features_for_label(inst,predicted_label,class_labels)
m = m + theta
num_iterations += 1
print cnt_updates_total,'/',cnt_updates_crt
if cnt_updates_crt == 0:
break
theta = m/cnt_updates_total
print '# Iterations:',piter
print '# Iterations over instances:',num_iterations
print '# Total Updates:',cnt_updates_total
return theta
## return the predictions of the perceptron on a test set
def test_perceptron(theta,test_features,test_labels,class_labels):
predictions = []
for inst in test_features:
predicted_label = get_predicted_label(inst,class_labels,theta)
predictions.append(predicted_label)
return predictions
"""
Trains a perceptron model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_bow_perceptron_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
all_words_idx = extract_all_words(train_texts)
all_labels_idx = extract_all_labels(train_labels)
num_features = len(all_words_idx.keys())*len(all_labels_idx.keys())+1
class_labels = all_labels_idx.values()
train_features = extract_features(all_words_idx,all_labels_idx,train_texts)
train_labels = map(lambda e: all_labels_idx[e],train_labels)
test_features = extract_features(all_words_idx,all_labels_idx,test_texts)
test_labels = map(lambda e: all_labels_idx[e],test_labels)
for l in class_labels:
inst = train_features[0]
ffl = get_features_for_label(inst,l,class_labels)
assert False not in (inst == ffl[l*len(inst):(l+1)*len(inst)])
theta = train_perceptron(train_features,train_labels,class_labels,num_features)
test_predictions = test_perceptron(theta,test_features,test_labels,class_labels)
eval_test = eval_performance(test_labels,test_predictions)
inverse_labels_index = {}
for k in all_labels_idx.keys():
inverse_labels_index[all_labels_idx[k]] = k
test_predictions_names = map(lambda e: inverse_labels_index[e],test_predictions)
with open('q3p3.txt', 'wb') as file_output:
for each_label in test_predictions_names:
file_output.write(each_label+'\n')
return ('test-micro=%d%%, test-macro=%d%%' % (int(eval_test[0]*100),int(eval_test[1]*100)))
"""
Trains a naive bayes model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_extended_bow_naivebayes_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
# control variables
improved = True
alpha = 0.04
silent = True
RUN_EXP = 'Both' # set to 'B', None, or 'Both'
# feature extensions (A)
if 'A' in RUN_EXP:
train_features, dev_features, test_features = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features[idx])))
# feature extensions (B)
elif 'B' in RUN_EXP:
train_features, dev_features, test_features = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(int(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(int(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(int(test_features[idx])))
# feature extensions with both two A and B
elif 'Both' in RUN_EXP:
train_features_A, dev_features_A, test_features_A = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
train_features_B, dev_features_B, test_features_B = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features_A[idx])))
each_text.append(str(intern(train_features_B[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
else:
train_features, dev_features, test_features = None, None, None
if not silent:
print ' extension of the Naive Bayes classifier w. feature set: [%s] ' % (RUN_EXP)
print '------------------------------------------------------------------------------------------'
# Part 2.1 (c_s/c_sw)
c_s = dict.fromkeys(set(train_labels), 0)
multiples = list(itertools.product(c_s.keys(), ['time', 'loss', 'export']))
c_sw = dict.fromkeys(multiples, 0)
t_w = [each_word for each_text in train_texts for each_word in each_text]
multiples = list(itertools.product(c_s.keys(), t_w))
t_sw = dict.fromkeys(multiples, 0)
for idx, label in enumerate(train_labels):
cur_text = train_texts[idx]
# compute c_s
c_s[label] += len(cur_text)
# compute c_sw
time_cnt = cur_text.count('time')
loss_cnt = cur_text.count('loss')
export_cnt = cur_text.count('export')
c_sw[(label, 'time')] += time_cnt
c_sw[(label, 'loss')] += loss_cnt
c_sw[(label, 'export')] += export_cnt
# compute t_sw (total occurances): of (label, word): occurances
for each_word in cur_text:
t_sw[(label, each_word)] += 1
# total # of distinct words: will be used for smoothing
t_dw = Counter(t_w)
if not silent:
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('s', 'cord', 'division', 'formation', 'phone', 'product', 'text')
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s)', c_s['cord'], c_s['division'], c_s['formation'], c_s['phone'], c_s['product'], c_s['text'])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,time)', c_sw[('cord', 'time')], c_sw[('division', 'time')], c_sw[('formation', 'time')], \
c_sw[('phone', 'time')], c_sw[('product', 'time')], c_sw[('text', 'time')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,loss)', c_sw[('cord', 'loss')], c_sw[('division', 'loss')], c_sw[('formation', 'loss')], \
c_sw[('phone', 'loss')], c_sw[('product', 'loss')], c_sw[('text', 'loss')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,export)', c_sw[('cord', 'export')], c_sw[('division', 'export')], c_sw[('formation', 'export')], \
c_sw[('phone', 'export')], c_sw[('product', 'export')], c_sw[('text', 'export')])
print '------------------------------------------------------------------------------------------'
print ' total distinct words: %d ' % (len(t_dw.keys()))
# Part 2.2 (p_s/p_ws)
total_occurances = float(sum(c_s.values()))
label_count = Counter(train_labels)
p_s = {key: (value / float( sum( label_count.values() )) ) for key, value in label_count.iteritems()}
if improved:
p_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in c_sw.iteritems()}
t_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in t_sw.iteritems()}
else:
p_ws = {key: (value / float(c_s[key[0]])) for key, value in c_sw.iteritems()}
t_ws = {key: (value / float(c_s[key[0]])) for key, value in t_sw.iteritems()}
# normalization steps
norm_denominators = {
'time': 0.0,
'loss': 0.0,
'export': 0.0
}
for key, value in p_ws.iteritems():
norm_denominators[key[1]] += value
p_ws_norm = {key: (value / norm_denominators[key[1]]) for key, value in p_ws.iteritems()}
p_ws = p_ws_norm
if not silent:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(s)', p_s['cord'], p_s['division'], p_s['formation'], p_s['phone'], p_s['product'], p_s['text'])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(time|s)', p_ws[('cord', 'time')], p_ws[('division', 'time')], p_ws[('formation', 'time')], \
p_ws[('phone', 'time')], p_ws[('product', 'time')], p_ws[('text', 'time')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(loss|s)', p_ws[('cord', 'loss')], p_ws[('division', 'loss')], p_ws[('formation', 'loss')], \
p_ws[('phone', 'loss')], p_ws[('product', 'loss')], p_ws[('text', 'loss')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(export|s)', p_ws[('cord', 'export')], p_ws[('division', 'export')], p_ws[('formation', 'export')], \
p_ws[('phone', 'export')], p_ws[('product', 'export')], p_ws[('text', 'export')])
# Part 2.3 (p_sxd, on the 1st line on test set)
p_sxd = dict.fromkeys(c_s.keys(), 0.0)
lp_sxd = dict.fromkeys(c_s.keys(), 0.0)
cur_text = dev_texts[0]
for key in p_sxd.keys():
# compute p for each class
if improved:
tp_sxd = p_s[key]
tlp_sxd = log(p_s[key])
else:
tp_sxd = p_s[key]
# compute for each word
for each_word in cur_text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxd *= t_ws[(key, each_word)]
tlp_sxd += log(t_ws[(key, each_word)])
else:
tp_sxd *= t_ws[(key, each_word)]
# add to the dict
if improved:
p_sxd[key] = tp_sxd
lp_sxd[key] = tlp_sxd
else:
p_sxd[key] = tp_sxd
if not silent:
print '------------------------------------------------------------------------------------------'
print ' %s | %s | %s | %s | %s | %s | %s |' % \
('p(s|X)', p_sxd['cord'], p_sxd['division'], p_sxd['formation'], \
p_sxd['phone'], p_sxd['product'], p_sxd['text'])
print '------------------------------------------------------------------------------------------'
print ' 1st label in dev : %s ' % (dev_labels[0])
print ' 1st text in dev[:5]: %s ' % (dev_texts[0][:5])
if improved:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('log(p(s|X))', lp_sxd['cord'], lp_sxd['division'], lp_sxd['formation'], \
lp_sxd['phone'], lp_sxd['product'], lp_sxd['text'])
# Part 2.4: compute all the prob on the test dataset
p_sx = list()
for idx, text in enumerate(test_texts):
t_prob = dict.fromkeys(c_s.keys(), 0.0)
for key in t_prob.keys():
# compute p for each class
if improved:
tp_sxt = log(p_s[key])
else:
tp_sxt = p_s[key]
for each_word in text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxt += log(t_ws[(key, each_word)])
else:
tp_sxt *= t_ws[(key, each_word)]
# add to the dict
t_prob[key] = tp_sxt
# add dict to the entire list
p_sx.append(t_prob)
# Part 2.4 (run the classifier for all)
labels_predicted = list()
for idx, label in enumerate(test_labels):
maximum_probs = max(p_sx[idx].values())
label_prediction = [key for key, value in p_sx[idx].iteritems() if value == maximum_probs]
label_prediction = random.choice(label_prediction)
# based on the prob
labels_predicted.append(label_prediction)
naivebayes_performance = eval_performance(test_labels, labels_predicted)
# save the implementation to the file
with open('q4p4_nb.txt', 'wb') as q4p4_nb_output:
for each_label in labels_predicted:
q4p4_nb_output.write(each_label+'\n')
# Part 2.5 (do more tuning for the classifier)
# - Laplace smoothing
# - Log likelihoods
if not silent:
print '------------------------------------------------------------------------------------------'
return 'Naive Bayes: micro/macro = [%.2f, %.2f] @ (alpha: %s)' % \
(naivebayes_performance[0]*100, naivebayes_performance[1]*100, alpha)
## this feature is just a random number generated for each instance
def get_feature_A(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_label):
# call this everytime, makes the same random number
np.random.seed(0)
train_feature_vector = np.random.random_sample((len(train_texts),))
dev_feature_vector = np.random.random_sample((len(dev_texts),))
test_feature_vector = np.random.random_sample((len(test_texts),))
return train_feature_vector,dev_feature_vector,test_feature_vector
## this feature encodes the number of distinct words in each instance
def get_feature_B(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_label):
train_feature_vector = np.zeros(len(train_texts))
dev_feature_vector = np.zeros(len(dev_texts))
test_feature_vector = np.zeros(len(test_texts))
for i,text in enumerate(train_texts):
nw = len(set(text))
train_feature_vector[i] = nw
for i,text in enumerate(dev_texts):
nw = len(set(text))
dev_feature_vector[i] = nw
for i,text in enumerate(test_texts):
nw = len(set(text))
test_feature_vector[i] = nw
return train_feature_vector,dev_feature_vector,test_feature_vector
"""
Trains a perceptron model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_extended_bow_perceptron_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
RUN_EXP_A = True # set to True for running on feature A
RUN_EXP_B = True # set to True for running on feature B
num_extra_features = 0
if RUN_EXP_A:
train_new_feature_vectorA,dev_new_feature_vectorA,test_new_feature_vectorA = get_feature_A(train_texts, train_targets,train_labels, dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels)
num_extra_features += 1
if RUN_EXP_B:
train_new_feature_vectorB,dev_new_feature_vectorB,test_new_feature_vectorB = get_feature_B(train_texts, train_targets,train_labels, dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels)
num_extra_features += 1
all_words_idx = extract_all_words(train_texts)
all_labels_idx = extract_all_labels(train_labels)
num_features = (len(all_words_idx.keys())+num_extra_features)*len(all_labels_idx.keys())+1
class_labels = all_labels_idx.values()
train_features = extract_features(all_words_idx,all_labels_idx,train_texts)
train_labels = map(lambda e: all_labels_idx[e],train_labels)
test_features = extract_features(all_words_idx,all_labels_idx,test_texts)
test_labels = map(lambda e: all_labels_idx[e],test_labels)
if RUN_EXP_A:
train_features = np.c_[train_features, train_new_feature_vectorA]
test_features = np.c_[test_features, test_new_feature_vectorA]
if RUN_EXP_B:
train_features = np.c_[train_features, train_new_feature_vectorB]
test_features = np.c_[test_features, test_new_feature_vectorB]
for l in class_labels:
inst = train_features[0]
ffl = get_features_for_label(inst,l,class_labels)
assert False not in (inst == ffl[l*len(inst):(l+1)*len(inst)])
theta = train_perceptron(train_features,train_labels,class_labels,num_features)
test_predictions = test_perceptron(theta,test_features,test_labels,class_labels)
eval_test = eval_performance(test_labels,test_predictions)
inverse_labels_index = {}
for k in all_labels_idx.keys():
inverse_labels_index[all_labels_idx[k]] = k
test_predictions_names = map(lambda e: inverse_labels_index[e],test_predictions)
with open('q4p4_pn.txt', 'wb') as file_output:
for each_label in test_predictions_names:
file_output.write(each_label+'\n')
return ('test-micro=%d%%, test-macro=%d%%' % (int(eval_test[0]*100),int(eval_test[1]*100)))
# Part 1.1
def run_most_frequent_class_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
labels_freq = {}
for l in train_labels:
if labels_freq.get(l,None) is None:
labels_freq[l] = 0
labels_freq[l] += 1
most_frequent_label = max(labels_freq.iteritems(), key=operator.itemgetter(1))[0]
train_pred = [most_frequent_label]*len(train_labels)
dev_pred = [most_frequent_label]*len(dev_labels)
assert train_pred[2] == train_labels[2]
eval_train = eval_performance(train_labels,train_pred)
eval_dev = eval_performance(dev_labels,dev_pred)
return ('training-micro=%d%%, training-macro=%d%%, dev-micro=%d%%, dev-macro=%d%%' % (int(eval_train[0]*100),int(eval_train[1]*100),int(eval_dev[0]*100),int(eval_dev[1]*100)))
# Part 1.2
def run_inner_annotator_agreement(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
dev_labels_manual, dev_targets_manual, dev_texts_manual = read_dataset('dev_manual')
return '%.2f' % sklearn.metrics.cohen_kappa_score(dev_labels[:20],dev_labels_manual)
"""
Main (able to change the classifier to other ones)
"""
if __name__ == "__main__":
# reading, tokenizing, and normalizing data
train_labels, train_targets, train_texts = read_dataset('train')
dev_labels, dev_targets, dev_texts = read_dataset('dev')
test_labels, test_targets, test_texts = read_dataset('test')
#running the classifier
test_scores = run_bow_perceptron_classifier(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels, test_texts, test_targets, test_labels)
print test_scores
| StarcoderdataPython |
86921 | <reponame>tkrajina/gpxchart<filename>make_examples.py<gh_stars>1-10
import subprocess
class Cmd:
def __init__(self, output_file, description, params):
self.output_file = output_file
self.description = description
self.params = params
cmds = [
Cmd("simple.png", "Simple", ""),
Cmd("smoothed.png", "With smoothed elevations", "-sme"),
Cmd("with_srtm_elevations.png", "With SRTM elevations", "-srtm"),
Cmd("simple.svg", "SVG output", "-s 200,100"),
Cmd("imperial.png", "Imperial units", "-im"),
Cmd("custom_size.png", "Custom size", "-s 900,300"),
Cmd("thicker_line.png", "Custom line width", "-lw 2"),
Cmd("no_padding.png", "No padding", "-p 0,0,0,0"),
Cmd("custom_padding.png", "Padding", "-p 50,20,20,20" ),
Cmd("custom_font_size.png", "Custom font size", "-p 100,20,0,0 -f 10,20"),
Cmd("custom_grid.png", "Custom grid", "-g 50,20"),
Cmd("custom_labels.png", "Custom labels", "-l 250,20"),
Cmd("custom_chart_padding.png", "Custom chart padding", "-cp 500,50,500,50"),
]
for cmd in cmds:
command = f"gpxchart {cmd.params} test_files/zbevnica.gpx examples/{cmd.output_file}"
subprocess.check_output(command.replace(" ", " ").split(" "))
print(f"### {cmd.description}\n")
print(f"`{command}`\n")
print(f"\n")
print("\n\n^^^^^ COPY TO README\n\n") | StarcoderdataPython |
175750 | from .choices import valid_extensions
def validate_file_extension(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1] # [0] returns path+filename
extensions = valid_extensions
if not ext.lower() in extensions:
raise ValidationError('Unsupported file extension.') | StarcoderdataPython |
3253254 | <reponame>Baduit/ScriptGUIfier
import json
import os
import subprocess
import tkinter as tk
from tkinter import ttk
class ListOption:
def __init__(self, parent_widget, json_conf: json):
self.literal = json_conf["literal"] if "literal" in json_conf else ""
self.name = json_conf["name"] if "name" in json_conf else self.literal
self.type = json_conf["type"]
self.frame = ttk.Frame(parent_widget)
self.label = ttk.Label(self.frame, text = self.name)
self.label.grid(column = 0, row = 0, padx = 5, pady = 2, sticky = tk.E + tk.W)
self.combo_box = ttk.Combobox(self.frame)
self.combo_box.grid(column = 1, row = 0, padx = 5, pady = 2, sticky = tk.E + tk.W)
self.combo_box['values'] = self._extract_default_value(json_conf)
self.combo_box.current(0)
def _extract_default_value(self, json_conf: json):
if "values" in json_conf:
return json_conf["values"]
elif "values_from_env" in json_conf:
splitted = os.environ[json_conf["values_from_env"]].split(';')
splitted.remove('')
return splitted
elif "values_from_script" in json_conf:
splitted = self._get_stdout_from_subprocess(json_conf["values_from_script"]).decode().split('\n')
splitted.remove('')
return splitted
else:
return ""
def _get_stdout_from_subprocess(self, process_name):
process = subprocess.Popen(process_name, stdout=subprocess.PIPE, shell=True)
stdout, _ = process.communicate()
return stdout
def retrieve_value(self):
return self.combo_box.get() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.