seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74854234897 | def json_to_html(input):
html = '<table class="table table-striped" style="width:100%">'
html+="<tr>"
for key in input[0]:
html+='<th>' + str(key) + "</th>"
html+="</tr>"
for row in range(0, len(input)):
html+="<tr>"
for key in input[row]:
html+='<td scope="col">' + str(input[row][key]) + "</td>"
html+="</tr>"
html += "</table>"
return html | ewenw/YelpMyProfessors | json_to_html.py | json_to_html.py | py | 432 | python | en | code | 0 | github-code | 13 |
1598085 | class TreeNode:
def __init__(self, val=0, children=[]):
self.val = val
self.children = children
def getDiameter(root: TreeNode | None) -> int:
"""
N = number of nodes in the tree
-------------
Time: O(N)
Space: O(N)
"""
def dfs(root: TreeNode | None) -> tuple[int, int]:
if not root:
return -1, 0
mx_h1, mx_h2, mx_res = -1, -1, 0
for child in root.children:
h, r = dfs(child)
if h > mx_h1:
mx_h1, mx_h2 = h, mx_h1
elif h > mx_h2:
mx_h2 = h
mx_res = max(mx_res, r)
return mx_h1 + 1, max(mx_res, mx_h1 + mx_h2 + 2)
return dfs(root)[1]
# 1
# \
# 3
# /|\
# 4 5 6
# / \ \
# 7 8 9
root = TreeNode(1, [TreeNode(3, [TreeNode(4, [TreeNode(7), TreeNode(8)]), TreeNode(5), TreeNode(6, [TreeNode(9)])])])
print(getDiameter(root)) # 4
# 1
# / / \ \
# 2 3 4 5
# / | \
# 6 7 8
# | | |
# 9 10 11
# |
# 12
root = TreeNode(
1,
[
TreeNode(2),
TreeNode(3),
TreeNode(4),
TreeNode(
5, [TreeNode(6, [TreeNode(9, [TreeNode(12)])]), TreeNode(7, [TreeNode(10)]), TreeNode(8, [TreeNode(11)])]
),
],
)
print(getDiameter(root)) # 5
| ironwolf-2000/Algorithms | Graphs/Trees/Diameter/diameter.py | diameter.py | py | 1,379 | python | en | code | 2 | github-code | 13 |
25103133833 | """Target monitoring via SSH"""
import base64
import getpass
import hashlib
import logging
import os.path
import re
import tempfile
import time
from collections import defaultdict
from xml.etree import ElementTree as etree
from ...common.util import SecuredShell
from ...common.interfaces import MonitoringDataListener
import sys
if sys.version_info[0] < 3:
import ConfigParser
else:
import configparser as ConfigParser
logger = logging.getLogger(__name__)
logging.getLogger("paramiko.transport").setLevel(logging.WARNING)
def parse_xml(config):
if os.path.exists(config):
return etree.parse(config)
else:
return etree.fromstring(config)
class Config(object):
"""Config reader helper"""
def __init__(self, config):
self.tree = parse_xml(config)
def loglevel(self):
"""Get log level from config file. Possible values: info, debug"""
log_level = 'info'
log_level_raw = self.tree.getroot().get('loglevel')
if log_level_raw in ('info', 'debug'):
log_level = log_level_raw
return log_level
class AgentClient(object):
"""Agent client connection"""
def __init__(self, adr, timeout):
self.run = []
self.host = adr['host']
self.username = adr['username']
self.python = adr['python']
self.metric = adr['metric']
self.port = adr['port']
self.interval = adr['interval']
self.custom = adr['custom']
self.startups = adr['startups']
self.shutdowns = adr['shutdowns']
self.session = None
self.buffer = ""
self.ssh = SecuredShell(self.host, self.port, self.username, timeout)
handle, cfg_path = tempfile.mkstemp('.cfg', 'agent_')
os.close(handle)
self.path = {
# Destination path on remote host
'AGENT_REMOTE_FOLDER': '/var/tmp/lunapark_monitoring',
# Source path on tank
'AGENT_LOCAL_FOLDER': os.path.dirname(__file__) + '/agent',
'METRIC_LOCAL_FOLDER': os.path.dirname(__file__) + '/agent/metric',
# Temp config path
'TEMP_CONFIG': cfg_path
}
def start(self):
"""Start remote agent"""
logger.debug('Start monitoring: %s', self.host)
self.session = self.ssh.async_session(
" ".join([
"DEBUG=1", self.python, self.path['AGENT_REMOTE_FOLDER'] +
'/agent.py', '-c', self.path['AGENT_REMOTE_FOLDER'] +
'/agent.cfg', '-t', str(int(time.time()))
]))
return self.session
def read_maybe(self):
chunk = self.session.read_maybe()
if chunk:
parts = chunk.rsplit('\n', 1)
if len(parts) > 1:
ready_chunk = self.buffer + parts[0] + '\n'
self.buffer = parts[1]
return ready_chunk
else:
self.buffer += parts[0]
return None
return None
def create_agent_config(self, loglevel):
"""Creating config"""
try:
float(self.interval)
except:
raise ValueError(
"Monitoring interval should be a number: '%s'" % self.interval)
cfg = ConfigParser.ConfigParser()
cfg.add_section('main')
cfg.set('main', 'interval', self.interval)
cfg.set('main', 'host', self.host)
cfg.set('main', 'loglevel', loglevel)
cfg.set('main', 'username', self.username)
cfg.add_section('metric')
cfg.set('metric', 'names', self.metric)
cfg.add_section('custom')
for method in self.custom:
if self.custom[method]:
cfg.set('custom', method, ','.join(self.custom[method]))
cfg.add_section('startup')
for idx, cmd in enumerate(self.startups):
cfg.set('startup', "cmd%s" % idx, cmd)
cfg.add_section('shutdown')
for idx, cmd in enumerate(self.shutdowns):
cfg.set('shutdown', "cmd%s" % idx, cmd)
with open(self.path['TEMP_CONFIG'], 'w') as fds:
cfg.write(fds)
return self.path['TEMP_CONFIG']
def install(self, loglevel):
"""Create folder and copy agent and metrics scripts to remote host"""
logger.info(
"Installing monitoring agent at %s@%s...", self.username, self.host)
# create remote temp dir
cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"'
logger.info("Creating temp dir on %s", self.host)
try:
out, errors, err_code = self.ssh.execute(cmd)
except:
logger.error(
"Failed to install monitoring agent to %s",
self.host,
exc_info=True)
return None
if errors:
logging.error("[%s] error: '%s'", self.host, errors)
return None
if err_code:
logging.error(
"Failed to create remote dir via SSH"
" at %s@%s, code %s: %s" %
(self.username, self.host, err_code, out.strip()))
return None
remote_dir = out.strip()
if remote_dir:
self.path['AGENT_REMOTE_FOLDER'] = remote_dir
logger.debug(
"Remote dir at %s:%s", self.host, self.path['AGENT_REMOTE_FOLDER'])
# Copy agent and config
agent_config = self.create_agent_config(loglevel)
try:
self.ssh.send_file(
self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
self.ssh.send_file(
agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
except:
logger.error(
"Failed to install agent on %s", self.host, exc_info=True)
return None
return agent_config
def uninstall(self):
"""
Remove agent's files from remote host
"""
if self.session:
self.session.send("stop\n")
self.session.close()
fhandle, log_filename = tempfile.mkstemp(
'.log', "agent_" + self.host + "_")
os.close(fhandle)
try:
self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] + "_agent.log", log_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except:
logger.error("Exception while uninstalling agent", exc_info=True)
logger.info("Removing agent from: %s@%s...", self.username, self.host)
return log_filename
class MonitoringCollector(object):
"""Aggregate data from several collectors"""
def __init__(self, disguise_hostnames):
self.config = None
self.default_target = None
self.agents = []
self.agent_sessions = []
self.filter_conf = {}
self.listeners = []
self.first_data_received = False
self.send_data = []
self.artifact_files = []
self.inputs, self.outputs, self.excepts = [], [], []
self.filter_mask = defaultdict(str)
self.ssh_timeout = 5
self.load_start_time = None
self.disguise_hostnames = disguise_hostnames
def add_listener(self, obj):
self.listeners.append(obj)
def prepare(self):
"""Prepare for monitoring - install agents etc"""
# Parse config
agent_config = []
if self.config:
[agent_config, self.filter_conf] = self.getconfig(
self.config, self.default_target)
loglevel = Config(self.config).loglevel()
logger.debug("filter_conf: %s", self.filter_conf)
# Filtering
for host in self.filter_conf:
self.filter_mask[host] = []
logger.debug("Filter mask: %s", self.filter_mask)
# Creating agent for hosts
logger.debug('Creating agents')
for adr in agent_config:
logger.debug('Creating agent: %s', adr)
agent = AgentClient(adr, timeout=self.ssh_timeout)
logger.debug('Install monitoring agent. Host: %s', agent.host)
agent_config = agent.install(loglevel)
if agent_config:
self.agents.append(agent)
self.artifact_files.append(agent_config)
def start(self):
"""Start N parallel agents"""
[agent.start() for agent in self.agents]
def poll(self):
"""Poll agents for data"""
for agent in self.agents:
block = agent.read_maybe()
if not block:
continue
lines = block.split("\n")
for data in lines:
logger.debug("Got data from agent: %s", data.strip())
self.send_data.append(
self.hash_hostnames(
self.filter_unused_data(
self.filter_conf, self.filter_mask, data)))
logger.debug("Data after filtering: %s", self.send_data)
if not self.first_data_received and self.send_data:
self.first_data_received = True
logger.info("Monitoring received first data")
else:
self.send_collected_data()
return len(self.outputs)
def stop(self):
"""Shutdown agents"""
logger.debug("Uninstalling monitoring agents")
for agent in self.agents:
self.artifact_files.append(agent.uninstall())
def send_collected_data(self):
"""sends pending data set to listeners"""
[
listener.monitoring_data(self.send_data)
for listener in self.listeners
]
self.send_data = []
def get_host_config(self, host, target_hint):
default = {
'System': 'csw,int',
'CPU': 'user,system,iowait',
'Memory': 'free,cached,used',
'Disk': 'read,write',
'Net': 'recv,send,rx,tx',
}
default_metric = ['CPU', 'Memory', 'Disk', 'Net']
names = defaultdict()
hostname = host.get('address').lower()
if hostname == '[target]':
if not target_hint:
raise ValueError(
"Can't use [target] keyword with "
"no target parameter specified")
logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower()
stats = []
startups = []
shutdowns = []
custom = {
'tail': [],
'call': [],
}
metrics_count = 0
for metric in host:
# known metrics
if metric.tag in default.keys():
metrics_count += 1
metr_val = default[metric.tag].split(',')
if metric.get('measure'):
metr_val = metric.get('measure').split(',')
for elm in metr_val:
if not elm:
continue
stat = "%s_%s" % (metric.tag, elm)
stats.append(stat)
agent_name = self.get_agent_name(metric.tag, elm)
if agent_name:
names[agent_name] = 1
# custom metric ('call' and 'tail' methods)
elif (str(metric.tag)).lower() == 'custom':
metrics_count += 1
isdiff = metric.get('diff')
if not isdiff:
isdiff = 0
stat = "%s:%s:%s" % (
base64.b64encode(metric.get('label')),
base64.b64encode(metric.text), isdiff)
stats.append('Custom:' + stat)
custom[metric.get('measure', 'call')].append(stat)
elif (str(metric.tag)).lower() == 'startup':
startups.append(metric.text)
elif (str(metric.tag)).lower() == 'shutdown':
shutdowns.append(metric.text)
logger.debug("Metrics count: %s", metrics_count)
logger.debug("Host len: %s", len(host))
logger.debug("keys: %s", host.attrib.keys())
logger.debug("values: %s", host.attrib.values())
# use default metrics for host
if metrics_count == 0:
for metric in default_metric:
metr_val = default[metric].split(',')
for elm in metr_val:
stat = "%s_%s" % (metric, elm)
stats.append(stat)
agent_name = self.get_agent_name(metric, elm)
if agent_name:
names[agent_name] = 1
metric = ','.join(names.keys())
if not metric and not custom:
metric = "cpu-stat"
return {
'metric': metric,
'interval': host.get('interval', 1),
'priority': host.get('priority', 0),
'port': int(host.get('port', 22)),
'python': host.get('python', '/usr/bin/env python2'),
'username': host.get('username', getpass.getuser()),
'custom': custom,
'host': hostname,
'startups': startups,
'shutdowns': shutdowns,
# XXX: should be separate?
'stats': {
hostname: stats
},
}
def getconfig(self, filename, target_hint):
"""Prepare config data"""
try:
tree = parse_xml(filename)
except IOError as exc:
logger.error("Error loading config: %s", exc)
raise RuntimeError("Can't read monitoring config %s" % filename)
hosts = tree.findall('Host')
config = []
filter_obj = defaultdict(str)
for host in hosts:
host_config = self.get_host_config(host, target_hint)
# XXX: why stats should be separated?
filter_obj.update(host_config.pop('stats'))
config.append(host_config)
return [config, filter_obj]
def filtering(self, mask, filter_list):
"""Filtering helper"""
host = filter_list[0]
initial = [0, 1]
res = []
if mask[host]:
keys = initial + mask[host]
for key in keys:
try:
res.append(filter_list[key])
except IndexError:
logger.warn(
"Problems filtering data: %s with %s", mask,
len(filter_list))
return None
return ';'.join(res)
def filter_unused_data(self, filter_conf, filter_mask, data):
"""Filter unselected metrics from data"""
logger.debug("Filtering data: %s", data)
out = ''
# Filtering data
keys = data.rstrip().split(';')
if re.match('^start;', data): # make filter_conf mask
host = keys[1]
for i in range(3, len(keys)):
if keys[i] in filter_conf[host]:
filter_mask[host].append(i - 1)
logger.debug("Filter mask: %s", filter_mask)
out = 'start;'
out += self.filtering(filter_mask, keys[1:]).rstrip(';') + '\n'
elif re.match('^\[debug\]', data): # log debug output
logger.debug('agent debug: %s', data.rstrip())
else:
# if we are in start_test() phase, check data's timestamp with load_start_time
# and skip data collected before load_start_time
if self.load_start_time is not None:
try:
if int(keys[1]) >= self.load_start_time:
filtered = self.filtering(filter_mask, keys)
if filtered:
out = filtered + '\n' # filtering values
except IndexError:
pass
return out
def get_agent_name(self, metric, param):
"""Resolve metric name"""
depend = {
'CPU': {
'idle': 'cpu-stat',
'user': 'cpu-stat',
'system': 'cpu-stat',
'iowait': 'cpu-stat',
'nice': 'cpu-stat'
},
'System': {
'la1': 'cpu-la',
'la5': 'cpu-la',
'la15': 'cpu-la',
'csw': 'cpu-stat',
'int': 'cpu-stat',
'numproc': 'cpu-stat',
'numthreads': 'cpu-stat',
},
'Memory': {
'free': 'mem',
'used': 'mem',
'cached': 'mem',
'buff': 'mem',
},
'Disk': {
'read': 'disk',
'write': 'disk',
},
'Net': {
'recv': 'net',
'send': 'net',
'tx': 'net-tx-rx',
'rx': 'net-tx-rx',
'retransmit': 'net-retrans',
'estab': 'net-tcp',
'closewait': 'net-tcp',
'timewait': 'net-tcp',
}
}
if depend[metric][param]:
return depend[metric][param]
else:
return ''
def hash_hostnames(self, data):
"""
'bus-receiver02g.load.maps.yandex.net;1491233043;659;83;480;21052.0820312;19541.8710938;476.0859375;87840.6210938;13228.0;8241.0;2.15557638238;1.15588878475;96.4698531709;0.0624804748516;39313;61537;0;8192;0.34;1.06;1.19;2;0;0;0;0;0'
'start;bus-receiver02g.load.maps.yandex.net;1491233263;Net_closewait;Net_estab;Net_timewait;'
"""
if not self.disguise_hostnames or not data:
return data
else:
data_entries = data.split(';')
if data_entries[0] == 'start':
data_entries[1] = hashlib.md5(data_entries[1]).hexdigest()
else:
data_entries[0] = hashlib.md5(data_entries[0]).hexdigest()
return ';'.join(data_entries)
class StdOutPrintMon(MonitoringDataListener):
"""Simple listener, writing data to stdout"""
def __init__(self):
MonitoringDataListener.__init__(self)
def monitoring_data(self, data_list):
[sys.stdout.write(data) for data in data_list]
class MonitoringDataDecoder(object):
"""The class that serves converting monitoring data lines to dict"""
NA = 'n/a'
def __init__(self):
self.metrics = {}
def decode_line(self, line):
"""convert mon line to dict"""
is_initial = False
data_dict = {}
data = line.strip().split(';')
timestamp = -1
if data[0] == 'start':
data.pop(0) # remove 'start'
host = data.pop(0)
if not data:
logger.warn("Wrong mon data line: %s", line)
else:
timestamp = data.pop(0)
self.metrics[host] = []
for metric in data:
if metric.startswith("Custom:"):
metric = base64.standard_b64decode(metric.split(':')[1])
self.metrics[host].append(metric)
data_dict[metric] = self.NA
is_initial = True
else:
host = data.pop(0)
timestamp = data.pop(0)
if host not in self.metrics.keys():
raise ValueError(
"Host %s not in started metrics: %s" % (host, self.metrics))
if len(self.metrics[host]) != len(data):
raise ValueError(
"Metrics len and data len differs: %s vs %s" %
(len(self.metrics[host]), len(data)))
for metric in self.metrics[host]:
data_dict[metric] = data.pop(0)
logger.debug("Decoded data %s: %s", host, data_dict)
return host, data_dict, is_initial, timestamp
# FIXME: 3 synchronize times between agent and collector better
| Alcereo/LoadTestingToolsCentos | tank/tank_src/yandextank/plugins/Monitoring/collector.py | collector.py | py | 19,951 | python | en | code | 0 | github-code | 13 |
74442165456 | import os
from stage import Stage
import subprocess
class Test(Stage):
"""
Class that containing and operating tests.
"""
def __init__(self, script_path, parent_module_name, interrupt_if_fail, is_logging, log_file_path,
only_fail_notification):
"""
Parameters
----------
script_path : str
an absolute path to tests
parent_module_name: str
a parent module name
interrupt_if_fail : bool
interrupt the execution of the all stages if an error has occurred
is_logging : bool
write messages to the log file or not
log_file_path : str
an absolute path to directory for the log file
only_fail_notification : bool
notification condition
"""
Stage.__init__(self, parent_module_name, interrupt_if_fail, log_file_path, 'Test', is_logging, "",
script_path, "", only_fail_notification)
def pre_exec(self):
return True
def exec(self):
test = subprocess.run(self._main_script_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.log("Test " + self._main_script_path + "\nStdout:")
self.log(test.stdout.decode('utf-8'))
self.log("Stderr:")
self.log(test.stderr.decode('utf-8'))
self.log("Test finished with code " + str(test.returncode))
if test.returncode != 0:
self.get_logger().set_execution_status(not self._get_interrupt_if_fail())
return not self._get_interrupt_if_fail()
return True
def post_exec(self):
return True
| xp10rd/simple-test-tool | src/test.py | test.py | py | 1,654 | python | en | code | 0 | github-code | 13 |
1539323137 | # Карасёв ИУ7-16Б
# Вводится матрица, найти столбец,в котором больше всего 0, перенести его в конец (сдвиг матрицы).
mtrx = []
zero_count = 0
to_compare = 0
zero_index = 0
m = int(input('Введите количество строк в матрице: '))
n = int(input('Введите количество столбцов в матрице: '))
print('Введите матрицу: ')
for i in range(m):
mtrx.append([])
for j in range(n):
mtrx[i].append(float(input()))
print('Строка №',i+1,'заполнена!')
print('Изначальная матрица: ')
for i in range(m):
print(mtrx[i])
print('-'*(n+4*n))
for i in range(n):
for j in range(m):
if mtrx[j][i] == 0:
zero_count += 1
if zero_count > to_compare:
zero_index = i
to_compare = zero_count
zero_count = 0
for i in range(m):
t = mtrx[i][zero_index]
mtrx[i][zero_index] = mtrx[i][n-1]
mtrx[i][n-1] = t
print('Матрица после перестановки: ')
for i in range(m):
print(mtrx[i]) | aversionq/University-tasks | BMSTU_1st_Semester/lab_7/lab7_3.py | lab7_3.py | py | 1,162 | python | ru | code | 0 | github-code | 13 |
9088461120 | #https://www.acmicpc.net/problem/16986
#백준 16986번 인싸들의 가위바위보 (구현, BFS)
#import sys
#input = sys.stdin.readline
from itertools import permutations
def dfs(p1,p2,idx,wins,player):
global result
if wins[0] == k :
result = 1
return
if wins[1] == k or wins[2] == k :
return
if idx[0] == n :
return
p3 = 3-(p1+p2)
pvp1 = player[p1][idx[p1]]-1
pvp2 = player[p2][idx[p2]]-1
idx[p1] += 1
idx[p2] += 1
if types[pvp1][pvp2] == 2 or (types[pvp1][pvp2] == 1 and p1 > p2):
wins[p1] += 1
dfs(p1,p3,idx,wins,player)
elif types[pvp1][pvp2] == 0 or (types[pvp1][pvp2] == 1 and p2 > p1):
wins[p2] += 1
dfs(p2,p3,idx,wins,player)
n, k = map(int, input().split())
types = [list(map(int, input().split())) for _ in range(n)]
cases = [i for i in range(1,n+1)]
kyunghee = list(map(int, input().split()))
minho = list(map(int, input().split()))
result = 0
for case in permutations(cases, n):
player = [case,kyunghee,minho]
idx = [0,0,0]
wins = [0,0,0]
dfs(0,1,idx,wins,player)
if result :
break
print(1 if result else 0) | MinsangKong/DailyProblem | 08-16/4-1.py | 4-1.py | py | 1,181 | python | en | code | 0 | github-code | 13 |
35299277548 | # @nzm_ort
# https://github.com/nozomuorita/atcoder-workspace-python
# import module ------------------------------------------------------------------------------
from collections import defaultdict, deque, Counter
import math
from itertools import combinations, permutations, product, accumulate, groupby, chain
from heapq import heapify, heappop, heappush
import bisect
import sys
# sys.setrecursionlimit(100000000)
inf = float('inf')
mod1 = 10**9+7
mod2 = 998244353
def ceil_div(x, y): return -(-x//y)
# main code ------------------------------------------------------------------------------------
n = int(input())
ll = set()
ans = 0
for i in range(n):
la = list(map(int, input().split()))
a = la[1:]
at = tuple(a)
if a in ll:
continue
else:
ans += 1
ll.add(at)
print(ans) | nozomuorita/atcoder-workspace-python | abc/abc226/B/answer.py | answer.py | py | 825 | python | en | code | 0 | github-code | 13 |
19110580670 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('about/', views.about, name="about"),
path('aboutContent/', views.aboutContent, name="aboutContent"),
path('education/', views.education, name="education"),
path('workExp/', views.workExp, name="workExperience"),
path('skills/', views.skills, name="skills"),
path('achievements/', views.achievements, name="achievements"),
]
| NikhilSegu/django_expensify | expensify/urls.py | urls.py | py | 462 | python | en | code | 0 | github-code | 13 |
4578697640 | import numpy as np
from .SegReader import SegReader
class MIMOSegReader(SegReader):
def __init__(self,flist_name, data_root,
batchsize,cropsize,step,samplerate,cell,
img_trans,gt_trans,joint_trans,
withgt=True,
bandlist=None,
sampleseed = -1,
data_name=['data'],
label_name=['softmax_label'],
lvreadertype='pil',
parsertype = 'common',
openfirstly = False):
self.samplerate = samplerate
self.cell = cell
self.data_name = data_name
self.label_name = label_name
super(SegReader, self).__init__(flist_name, data_root,
batchsize, cropsize, step,
img_trans, gt_trans, joint_trans,
withgt=withgt,
bandlist=bandlist,
sampleseed=sampleseed,
lvreadertype=lvreadertype,
parsertype=parsertype,
openfirstly=openfirstly)
def readOnIdxes(self,idx_list):
'''
It read the image based on the index.
:param idx_list: The list of the indexes.
:return: (data, label)
'''
data={}
label={}
for item in self.data_name:
data[item]=[]
for item in self.label_name:
label[item]=[]
for idx in idx_list:
data_,label_=self.__getitem__(idx)
for k,v in data_.items():
data[k].append(v)
for k,v in label_.items():
if not v is None:
label[k].append(v)
for item in self.data_name:
data[item] = np.asarray(data[item])
for item in self.label_name:
if len(label[item]) == 0:
label[item]=None
else:
label[item]=np.asarray(label[item])
return data,label
def read(self,sampleidx,initx,inity):
'''
This methods reads several patches from several samples.
:param sampleidx: The index of the sample to be read. If it is -1, then the sample will be chosen by random.
:param initx: The x-coordinate of the left-upper point of the crop in the image.
:param inity: The y-coordinate of the left-upper point of the crop in the image.
:return: (img, label)
'''
data={}
label={}
for item in self.data_name:
data[item]=[]
for item in self.label_name:
label[item]=[]
for i in range(self.batchsize):
sample = self.readerStore.getOneSample(sampleidx)
data_,label_=self.read_img(sample,initx,inity)
for k,v in data_.items():
data[k].append(v)
for k,v in label_.items():
if not v is None:
label[k].append(v)
for item in self.data_name:
data[item] = np.asarray(data[item])
for item in self.label_name:
if len(label[item]) == 0:
label[item]=None
else:
label[item]=np.asarray(label[item])
return data,label
def read_img(self,sample,initx=None,inity=None):
'''
This methods reads several patches from several samples.
:param sampleidx: The index of the sample to be read. If it is -1, then the sample will be chosen by random.
:param initx: The x-coordinate of the left-upper point of the crop in the image.
:param inity: The y-coordinate of the left-upper point of the crop in the image.
:return: (img, label)
'''
img,label = super(MIMOSegReader,self).read_img(sample,initx,inity)
return dict([(self.data_name[0], img)]), dict([(self.label_name[0], label)]) | ChenKQ/rsreader | rsreader/netreader/MIMOSegReader.py | MIMOSegReader.py | py | 4,046 | python | en | code | 2 | github-code | 13 |
69901044499 | # -*- coding: utf-8 -*-
'''
____ _____ ______ _____
/ __ \| __ \| ____| __ \ /\
| | | | |__) | |__ | |__) | / \
| | | | ___/| __| | _ / / /\ \
| |__| | | | |____| | \ \ / ____ \
\____/|_| |______|_| \_\/_/ \_\
@author: VMware Korea CMP TF
'''
#===============================================================================
# Import
#===============================================================================
from psycopg import AsyncConnection
from pydantic import BaseModel, PrivateAttr
from typing import Any
#===============================================================================
# Abstract
#===============================================================================
class Table(BaseModel):
hostname: str
hostport: int
database: str
username: str
password: str
table: str
primaryKey: str
fieldKeys: list
fieldTypes: list
querySelect: str
queryInsert: str
queryUpdate: str
queryDelete: str
_conn_: Any = PrivateAttr()
class Cursor:
def __init__(self, table):
self.table = table
async def __aenter__(self):
self.cursor = self.table._conn_.cursor()
return self
async def __aexit__(self, *args):
await self.cursor.close()
async def execute(self, query, **kargs):
await self.cursor.execute(query, kargs)
return self
async def commit(self):
await self.table._conn_.commit()
return self
async def fetchAll(self):
return await self.cursor.fetchall()
async def fetchOne(self):
return await self.cursor.fetchone()
async def getRecords(self, **conditions):
if conditions:
where = []
for k, v in conditions.items():
if isinstance(v, int): where.append("{}={}".format(k, int(v)))
else: where.append("{}='{}'".format(k, str(v)))
where = ' WHERE {}'.format(','.join(where))
else: where = ''
results = []
await self.execute(self.table.querySelect.format(where))
for record in await self.fetchAll():
result = {}
kidx = 0
for column in record:
result[self.table.fieldKeys[kidx]] = column
kidx += 1
results.append(result)
return results
async def createRecord(self, **record):
await self.execute(self.table.queryInsert.format(**record))
return self
async def updateRecord(self, **record):
await self.execute(self.table.queryUpdate.format(**record))
return self
async def deleteRecord(self, **record):
await self.execute(self.table.queryDelete.format(**record))
return self
def cursor(self): return Table.Cursor(self)
@classmethod
async def initialize(cls, config, table, fields):
hostname = config['psql']['hostname']
hostport = int(config['psql']['hostport'])
database = config['psql']['database']
username = config['cmp']['username']
password = config['cmp']['password']
fieldKeys = [f[0] for f in fields]
fieldTypes = [f[1] for f in fields]
insertParams = []
updateParams = []
for field in fields:
k, t = field
if t == 'int':
insertParams.append("{%s}" % k)
updateParams.append("%s={%s}" % (k, k))
elif t == 'char':
insertParams.append("'{%s}'" % k)
updateParams.append("%s='{%s}'" % (k, k))
elif t == 'pkey-char':
primaryKeyType = 'string'
insertParams.append("'{%s}'" % k)
primaryKey = k
elif t == 'pkey-int':
primaryKeyType = 'number'
insertParams.append("{%s}" % k)
primaryKey = k
elif t == 'pkey-default':
primaryKeyType = 'number'
insertParams.append("DEFAULT")
primaryKey = k
if primaryKeyType == 'string':
querySelect = 'SELECT * FROM %s{};' % table
queryInsert = 'INSERT INTO %s VALUES(%s);' % (table, ','.join(insertParams))
queryUpdate = "UPDATE %s SET %s WHERE %s='{%s}';" % (table, ','.join(updateParams), primaryKey, primaryKey)
queryDelete = "DELETE FROM %s WHERE %s='{%s}';" % (table, primaryKey, primaryKey)
elif primaryKeyType == 'number':
querySelect = 'SELECT * FROM %s{};' % table
queryInsert = 'INSERT INTO %s VALUES(%s);' % (table, ','.join(insertParams))
queryUpdate = 'UPDATE %s SET %s WHERE %s={%s};' % (table, ','.join(updateParams), primaryKey, primaryKey)
queryDelete = 'DELETE FROM %s WHERE %s={%s};' % (table, primaryKey, primaryKey)
# logging
LOG.INFO('Init Table')
LOG.INFO(LOG.KEYVAL('hostname', hostname))
LOG.INFO(LOG.KEYVAL('hostport', hostport))
LOG.INFO(LOG.KEYVAL('database', database))
LOG.INFO(LOG.KEYVAL('username', username))
LOG.INFO(LOG.KEYVAL('password', password))
LOG.INFO(LOG.KEYVAL('table', table))
LOG.INFO(LOG.KEYVAL('primaryKey', primaryKey))
LOG.INFO(LOG.KEYVAL('querySelect', querySelect))
LOG.INFO(LOG.KEYVAL('queryInsert', queryInsert))
LOG.INFO(LOG.KEYVAL('queryUpdate', queryUpdate))
LOG.INFO(LOG.KEYVAL('queryDelete', queryDelete))
return await (cls(
hostname=hostname,
hostport=hostport,
database=database,
username=username,
password=password,
table=table,
primaryKey=primaryKey,
fieldKeys=fieldKeys,
fieldTypes=fieldTypes,
querySelect=querySelect,
queryInsert=queryInsert,
queryUpdate=queryUpdate,
queryDelete=queryDelete
)).connect()
async def connect(self):
try:
self._conn_ = await AsyncConnection.connect(host=self.hostname, port=self.hostport, dbname=self.database, user=self.username, password=self.password)
LOG.INFO(f'Table[{self.table}] Connected [{self.hostname}:{self.hostport}/{self.database}]')
except Exception as e:
LOG.INFO(f'Table[{self.table}] Disconnected [{self.hostname}:{self.hostport}/{self.database}]')
raise e
return self
| vmware-cmbu-seak/opera | src/drivers/postgresql.py | postgresql.py | py | 6,829 | python | en | code | 0 | github-code | 13 |
73141314259 | import numpy as np
from tqdm import tqdm, trange
from random import random, randint
from environment import KArmsBandit
import matplotlib.pyplot as plt
import math
class EGreedyPolicy:
def __init__(self, K, epsilon=0.1):
self.K = K # 动作空间
self.Q = [5 for _ in range(K)] # 每个动作的预测动作值
self.epsilon = epsilon
self.N = [0 for _ in range(K)] # 每个动作被选中的次数
self.count = 0 # 总计运行多少次
self.total_reward = 0 # 累积回报
self.attack_q_star = 0 # 命中最优动作次数
def get_action(self, q_star):
self.count += 1
# 随机
if random() < self.epsilon:
action = randint(0, K - 1)
# 贪心
else:
tmp = max(self.Q)
idx = self.Q.index(tmp)
action = idx
self.N[action] += 1
if action == q_star:
self.attack_q_star += 1
return action
def update_Q(self, action, reward):
self.total_reward += reward
self.Q[action] = self.Q[action] + 1 / self.N[action] * (reward - self.Q[action])
class UCBPolicy:
def __init__(self, K, c=2):
self.K = K # 动作空间
self.Q = [5 for _ in range(K)] # 每个动作的预测动作值
self.c = c
self.N = [0 for _ in range(K)] # 每个动作被选中的次数
self.count = 0 # 总计运行多少次
self.total_reward = 0 # 累积回报
self.attack_q_star = 0 # 命中最优动作次数
def get_action(self, q_star):
self.count += 1
# UCB 算法
tmp = [(self.Q[idx] + math.sqrt(math.log(self.count / (self.N[idx] + 1e-8))) * self.c) for idx in range(self.K)]
action = tmp.index(max(tmp))
self.N[action] += 1
if action == q_star:
self.attack_q_star += 1
return action
def update_Q(self, action, reward):
self.total_reward += reward
self.Q[action] = self.Q[action] + 1 / self.N[action] * (reward - self.Q[action])
K = 10
rewards = []
for i in trange(2000):
bandit = KArmsBandit(K)
policy_e_greedy_0 = EGreedyPolicy(K, 0.1)
rewards.append([])
for j in range(1000):
action = policy_e_greedy_0.get_action(bandit.q_star)
rewards[-1].append(bandit.get_reward(action))
plt.plot(np.mean(np.array(rewards), axis=0))
plt.show()
# policy0 = EGreedyPolicy(K, 0)
# policy1 = EGreedyPolicy(K, 0.1)
# policy2 = EGreedyPolicy(K, 0.01)
# UCB_policy = UCBPolicy(K, 2)
#
# mean_reward_list0 = []
# best_action_rate0 = []
# mean_reward_list1 = []
# best_action_rate1 = []
# mean_reward_list2 = []
# best_action_rate2 = []
# mean_reward_list_ucb = []
# best_action_rate_ucb = []
# for i in tqdm(range(1000)):
# action = policy0.get_action(bandit.q_star)
# reward = bandit.get_reward(action)
# policy0.update_Q(action, reward)
# mean_reward_list0.append(policy0.total_reward / policy0.count)
# best_action_rate0.append(policy0.attack_q_star / policy0.count)
#
# action = policy1.get_action(bandit.q_star)
# reward = bandit.get_reward(action)
# policy1.update_Q(action, reward)
# mean_reward_list1.append(policy1.total_reward / policy1.count)
# best_action_rate1.append(policy1.attack_q_star / policy1.count)
#
# action = policy2.get_action(bandit.q_star)
# reward = bandit.get_reward(action)
# policy2.update_Q(action, reward)
# mean_reward_list2.append(policy2.total_reward / policy2.count)
# best_action_rate2.append(policy2.attack_q_star / policy2.count)
#
# action = UCB_policy.get_action(bandit.q_star)
# reward = bandit.get_reward(action)
# UCB_policy.update_Q(action, reward)
# mean_reward_list_ucb.append(UCB_policy.total_reward / UCB_policy.count)
# best_action_rate_ucb.append(UCB_policy.attack_q_star / UCB_policy.count)
#
# plt.title('mean reward')
# plt.plot(mean_reward_list0, label='e=0')
# plt.plot(mean_reward_list1, label='e=0.1')
# plt.plot(mean_reward_list2, label='e=0.01')
# plt.plot(mean_reward_list_ucb, label='ucb c=2')
# plt.legend()
# plt.show()
#
# plt.title('best action rate')
# plt.plot(best_action_rate0, label='e=0')
# plt.plot(best_action_rate1, label='e=0.1')
# plt.plot(best_action_rate2, label='e=0.01')
# plt.plot(best_action_rate_ucb, label='ucb c=2')
# plt.legend()
# plt.show() | dourgey/Reinforcement-Learning-Implements-With-PyTorch | bandits/policy.py | policy.py | py | 4,472 | python | en | code | 0 | github-code | 13 |
26739170419 | # import the function that will return an instance of a connection
from flask_app.config.mysqlconnection import connectToMySQL
from flask_app.models import dojo
# model the class after the user table from our database
class Ninja:
def __init__(self,data):
self.id = data['id']
self.first_name = data['first_name']
self.last_name = data['last_name']
self.age = data['age']
if "dojo_id" in data:
self.dojo = dojo.Dojo.get_one({'id' : data['dojo_id']})
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def create(cls,data):
query = """
INSERT INTO ninjas (first_name, last_name, age, dojo_id, created_at, updated_at)
VALUES (%(first_name)s,%(last_name)s,%(age)s,%(dojo_id)s, NOW(), NOW());
"""
result = connectToMySQL('dojos_and_ninjas').query_db(query,data)
return result
@classmethod
def get_all(cls):
query = "SELECT * FROM ninjas;"
results = connectToMySQL('dojos_and_ninjas').query_db(query)
ninjas = []
for ninja in results:
ninjas.append(cls(ninja))
return ninjas
@classmethod
def get_one(cls,data):
query = "SELECT * FROM ninjas WHERE id = %(id)s;"
result = connectToMySQL('dojos_and_ninjas').query_db(query,data)
return cls(result[0])
@classmethod
def update(cls,data):
query = """
UPDATE ninjas SET first_name = %(first_name)s, last_name = %(last_name)s, age = %(age)s, dojo_id = %(dojo_id)s, updated_at = NOW()
WHERE id = %(id)s;
"""
result = connectToMySQL('dojos_and_ninjas').query_db(query,data)
return result
@classmethod
def delete(cls,data):
query = "DELETE FROM ninjas WHERE id=%(id)s;"
results = connectToMySQL('dojos_and_ninjas').query_db(query,data)
return results | ChristianQ98/coding_dojo | python/Flask_MySQL/DB_Connection/dojos_and_ninjas/flask_app/models/ninja.py | ninja.py | py | 1,975 | python | en | code | 0 | github-code | 13 |
31868567218 | from Test.TestBase import *
import datetime
from Worker import Worker
from Repair import Repair
class TestRepair(MockTest):
def testSimpleSchedule(self):
repair_dct = {'repair_id': 12, 'repair_time': datetime.datetime(2022, 12, 29, 19, 50, 50), 'repair_state': '调度中', 'fault_name': '下水道',
'user_id': 1, 'source': 'phone', 'repair_content': '测试用例', 'complex_repair': 0, 'remaining_step': 0, }
repair = Repair(**repair_dct)
worker_dct = {'worker_id': 3, 'fault_name': '下水道', 'is_free': 0, }
worker = Worker(**worker_dct)
worker.handle_schedule_simple(repair=repair)
repair_dct['repair_state'] = '已调度'
self.assertEqual(self.instance.get_dict_data_select("""select * from repair where repair_id = %d;""" % repair_dct['repair_id']),
[repair_dct])
worker_dct['is_free'] = 1
worker_dct['schedule_id'] = None
self.assertEqual(self.instance.get_dict_data_select("""select * from worker where worker_id = %d;""" % worker_dct['worker_id']),
[worker_dct])
| renke999/ooad-lab2 | Test/TestWorker.py | TestWorker.py | py | 1,138 | python | en | code | 3 | github-code | 13 |
27706494503 | import json
import os
import numpy
import datetime
import DataUtility
from DataUtility import DataSetFormat, DataSetType
import Constants as Constant
def get_number_of_arrays_for_sensor(sensor):
if sensor == DataUtility.Sensor.EMG:
return Constant.NUMBER_OF_EMG_ARRAYS
elif sensor == DataUtility.Sensor.ACC:
return Constant.NUMBER_OF_ACC_ARRAYS
elif sensor == DataUtility.Sensor.GYR:
return Constant.NUMBER_OF_GYR_ARRAYS
elif sensor == DataUtility.Sensor.ORI:
return Constant.NUMBER_OF_ORI_ARRAYS
else:
return None
def get_frequency_of_sensor(sensor):
if sensor == DataUtility.Sensor.EMG:
return Constant.FREQUENCY_EMG
elif sensor == DataUtility.Sensor.ACC:
return Constant.FREQUENCY_ACC
elif sensor == DataUtility.Sensor.GYR:
return Constant.FREQUENCY_GYR
elif sensor == DataUtility.Sensor.ORI:
return Constant.FREQUENCY_ORI
else:
return None
def get_length_of_arrays_for_sensor(sensor):
if sensor == DataUtility.Sensor.EMG:
return Constant.DATA_LENGTH_EMG
elif sensor == DataUtility.Sensor.ACC:
return Constant.DATA_LENGTH_ACC
elif sensor == DataUtility.Sensor.GYR:
return Constant.DATA_LENGTH_GYR
elif sensor == DataUtility.Sensor.ORI:
return Constant.DATA_LENGTH_ORI
else:
return None
def get_json_array_name_for_sensor(sensor):
if sensor == DataUtility.Sensor.EMG:
return Constant.JSON_EMG_ARRAY_NAME
elif sensor == DataUtility.Sensor.ACC:
return Constant.JSON_ACC_ARRAY_NAME
elif sensor == DataUtility.Sensor.GYR:
return Constant.JSON_GYR_ARRAY_NAME
elif sensor == DataUtility.Sensor.ORI:
return Constant.JSON_ORI_ARRAY_NAME
else:
return None
# Function: get_json_data_from_file
# ----------------------------
# Open JSON-file
#
# file : JSON-file to open
#
# returns : JSON-data from file
#
def get_json_data_from_file(file):
with open(file.get_file_path()) as json_file:
json_data = json.load(json_file)
return json_data
# Function: is_file_already_compressed
# ----------------------------
# Check if file already are compressed
#
# file : JSON-file to compress
# data_set_type : Training or test data set
#
# returns : true if file exist in compressed folder, false else
#
def is_file_already_compressed(file, data_set_type):
compressed_file_path = DataUtility.get_data_set_path(DataSetFormat.COMPRESSED, data_set_type) + file.filename
return os.path.exists(compressed_file_path)
# Function: compress_json_file
# ----------------------------
# compress input json file
#
# file : JSON-file to compress
# data_set_type : Training or test data set
#
def compress_json_file(file, data_set_type):
print("Compressing file: " + file.filename)
raw_data = get_json_data_from_file(file)
compressed_data = {}
json_array_name_list = [Constant.JSON_EMG_ARRAY_NAME, Constant.JSON_ACC_ARRAY_NAME, Constant.JSON_GYR_ARRAY_NAME, Constant.JSON_ORI_ARRAY_NAME]
data_length_list = [Constant.DATA_LENGTH_EMG, Constant.DATA_LENGTH_ACC, Constant.DATA_LENGTH_GYR, Constant.DATA_LENGTH_ORI]
for json_array_name, data_length in zip(json_array_name_list, data_length_list):
compressed_data[json_array_name] = {}
# if file.is_recorded:
# transposed_raw_data = numpy.transpose(raw_data[json_array_name][Constant.JSON_ARRAY_DATA_TABLE_NAME][:data_length]).tolist()
# else:
# transposed_raw_data = raw_data[json_array_name][Constant.JSON_ARRAY_DATA_TABLE_NAME][:data_length]
transposed_raw_data = raw_data[json_array_name][Constant.JSON_ARRAY_DATA_TABLE_NAME][:data_length]
compressed_data[json_array_name][Constant.JSON_ARRAY_DATA_TABLE_NAME] = transposed_raw_data
compressed_file_path = DataUtility.get_data_set_path(DataSetFormat.COMPRESSED, data_set_type) + file.filename
with open(compressed_file_path, 'w') as outfile:
json.dump(compressed_data, outfile)
def NormalizeArray(array):
return array / numpy.linalg.norm(array)
def date_to_string(day, month, year):
if day < 10:
day = "0" + str(day)
if month < 10:
month = "0" + str(month)
return '{}-{}-{}'.format(year, month, day)
def is_int_input(i):
try:
i = int(i)
except ValueError:
print("That's not an int!")
return False
return True
def is_float_input(i):
try:
i = float(i)
except ValueError:
print("That's not a float!")
return False
return True
def second_to_HMS(current_time):
hours = current_time // 3600
current_time %= 3600
minutes = current_time // 60
current_time %= 60
seconds = current_time
return (hours, minutes, seconds)
def mean_absolute_value(values):
absolute_values = numpy.absolute(values)
return numpy.mean(absolute_values)
def root_mean_square(values):
square_value = numpy.square(values)
N = square_value.size
sum_value = numpy.sum(square_value)
return numpy.sqrt((1 / N) * sum_value)
def waveform_length(values):
diff_values = numpy.subtract(values[:len(values) - 1], values[1:])
absolute__diff_values = numpy.absolute(diff_values)
sum_absolute_diff_values = numpy.sum(absolute__diff_values)
return sum_absolute_diff_values
| Tonychausan/MyoArmbandPython | src/Utility.py | Utility.py | py | 5,376 | python | en | code | 3 | github-code | 13 |
42035419631 | import argparse
from PIL import Image
import os.path
def put_center(size, color, img_path, out_path):
im2 = Image.open(img_path)
if not color:
color = im2.getpixel((0, 0))
im1 = Image.new("RGB" ,size , color=color)
im1_width, im1_height = im1.size
im2_width, im2_height = im2.size
back_im = im1.copy()
back_im.paste(im2, (int((im1_width/2)-(im2_width/2)), int((im1_height/2)-(im2_height/2))))
back_im.save(out_path + '.jpg', quality=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path', help='path', type=str)
parser.add_argument('--size', '-s', help='size', type=str)
parser.add_argument('--color', '-c', help='color', type=str)
parser.add_argument('--out_path', '-o', help='out_path', type=str)
args = parser.parse_args()
path = args.path
size = (1920, 1080)
color = None
out_path = 'output'
if not os.path.isfile(args.path):
raise Exception('File not exist or wrong path')
if args.size:
pos = args.size.find('x')
if pos == -1 :
raise Exception('Wrong size arg')
size = (int(args.size[:pos]), int(args.size[pos+1:]))
if args.color:
color_len = len(args.color)
if args.color.find('#') == -1:
args.color = '#' + args.color
else:
color_len -= 1
if color_len < 3 or color_len > 6:
raise Exception('Wrong hex color len')
color = args.color
if args.out_path:
if args.out_path.find('.') != -1:
raise Exception('.')
out_path = args.out_path
put_center(size, color, path, out_path)
| simhisancak/wp_gen | main.py | main.py | py | 1,687 | python | en | code | 0 | github-code | 13 |
2167120520 | from aiohttp import ClientSession
from genie_common.utils import create_client_session, build_authorization_headers
from spotipyio.logic.authentication.spotify_session import SpotifySession
class SessionsComponentFactory:
@staticmethod
def get_spotify_session() -> SpotifySession:
return SpotifySession()
@staticmethod
def get_client_session() -> ClientSession:
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
return create_client_session(headers)
@staticmethod
def get_genius_session(bearer_token: str) -> ClientSession:
headers = {
"Accept": "application/json",
"User-Agent": "CompuServe Classic/1.22",
"Host": "api.genius.com",
"Authorization": f"Bearer {bearer_token}"
}
return create_client_session(headers)
@staticmethod
def get_openai_session(api_key: str) -> ClientSession:
headers = build_authorization_headers(api_key)
return create_client_session(headers)
@staticmethod
def get_google_geocoding_session(api_key: str) -> ClientSession:
headers = {
"X-RapidAPI-Key": api_key,
"X-RapidAPI-Host": "google-maps-geocoding.p.rapidapi.com"
}
return create_client_session(headers)
| nirgodin/radio-stations-data-collection | data_collectors/components/sessions_component_factory.py | sessions_component_factory.py | py | 1,352 | python | en | code | 0 | github-code | 13 |
25901084676 | from qgis.core import QgsExpressionNode, QgsExpression, QgsExpressionNodeBinaryOperator
class UnsupportedExpressionException(Exception):
pass
binaryOps = [
"Or",
"And",
"PropertyIsEqualTo",
"PropertyIsNotEqualTo",
"PropertyIsLessThanOrEqualTo",
"PropertyIsGreaterThanOrEqualTo",
"PropertyIsLessThan",
"PropertyIsGreaterThan",
None,
"PropertyIsLike",
None,
None,
None,
None,
None,
"Add",
"Sub",
"Mul",
"Div",
None,
None,
None,
None,
]
unaryOps = ["Not", "Sub"]
functions = {
"radians": "toRadians",
"degrees": "toDegrees",
"floor": "floor",
"ceil": "ceil",
"area": "area",
"buffer": "buffer",
"centroid": "centroid",
"if": "if_then_else",
"bounds": "envelope",
"distance": "distance",
"convex_hull": "convexHull",
"end_point": "endPoint",
"start_point": "startPoint",
"x": "getX",
"y": "getY",
"concat": "Concatenate",
"substr": "strSubstr",
"lower": "strToLower",
"upper": "strToUpper",
"replace": "strReplace",
"exterior_ring": "exteriorRing",
"intersects": "intersects",
"overlaps": "overlaps",
"touches": "touches",
"within": "within",
"relates": "relates",
"crosses": "crosses",
"disjoint": "disjoint",
"geom_from_wkt": "geomFromWKT",
"perimeter": "geomLength",
"union": "union",
"acos": "acos",
"asin": "asin",
"atan": "atan",
"atan2": "atan2",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"ln": "log",
"title": "strCapitalize",
"translate": "offset",
"min": "min",
"max": "max",
} # TODO
def walkExpression(node, layer):
if node.nodeType() == QgsExpressionNode.ntBinaryOperator:
exp = handleBinary(node, layer)
elif node.nodeType() == QgsExpressionNode.ntUnaryOperator:
exp = handleUnary(node, layer)
elif node.nodeType() == QgsExpressionNode.ntInOperator:
exp = handle_in(node, layer)
elif node.nodeType() == QgsExpressionNode.ntFunction:
exp = handleFunction(node, layer)
elif node.nodeType() == QgsExpressionNode.ntLiteral:
exp = handleLiteral(node)
elif node.nodeType() == QgsExpressionNode.ntColumnRef:
exp = handleColumnRef(node, layer)
else:
exp = None
# elif node.nodeType() == QgsExpression.ntCondition:
# filt = handle_condition(nod)
if exp is None:
raise UnsupportedExpressionException(
"Unsupported operator in expression: '%s'" % str(node)
)
return exp
# handle IN expression
# convert to a series of (A='a') OR (B='b')
def handle_in(node, layer):
if node.isNotIn():
raise UnsupportedExpressionException("expression NOT IN is unsupported")
# convert this expression to another (equivelent Expression)
if node.node().nodeType() != QgsExpressionNode.ntColumnRef:
raise UnsupportedExpressionException("expression IN doesn't ref column!")
if node.list().count() == 0:
raise UnsupportedExpressionException(
"expression IN doesn't have anything inside the IN"
)
colRef = handleColumnRef(node.node(), layer)
propEqualsExprs = [] # one for each of the literals in the expression
for item in node.list().list():
if item.nodeType() != QgsExpressionNode.ntLiteral:
raise UnsupportedExpressionException("expression IN isn't literal")
# equals_expr = QgsExpressionNodeBinaryOperator(2,colRef,item) #2 is "="
equals_expr = [binaryOps[2], colRef, handleLiteral(item)] # 2 is "="
propEqualsExprs.append(equals_expr)
# bulid into single expression
if len(propEqualsExprs) == 1:
return propEqualsExprs[0] # handle 1 item in the list
accum = [binaryOps[0], propEqualsExprs[0], propEqualsExprs[1]] # 0="OR"
for idx in range(2, len(propEqualsExprs)):
accum = [binaryOps[0], accum, propEqualsExprs[idx]] # 0="OR"
return accum
def handleBinary(node, layer):
op = node.op()
retOp = binaryOps[op]
left = node.opLeft()
right = node.opRight()
retLeft = walkExpression(left, layer)
retRight = walkExpression(right, layer)
return [retOp, retLeft, retRight]
def handleUnary(node, layer):
op = node.op()
operand = node.operand()
retOp = unaryOps[op]
retOperand = walkExpression(operand, layer)
if retOp == "Sub": # handle the particular case of a minus in a negative number
return [retOp, 0, retOperand]
else:
return [retOp, retOperand]
def handleLiteral(node):
val = node.value()
quote = ""
if isinstance(val, basestring):
quote = "'"
val = val.replace("\n", "\\n")
elif val is None:
val = "null"
return val
def handleColumnRef(node, layer):
if layer is not None:
attrName = node.name().casefold()
for field in layer.fields():
if field.name().casefold() == attrName:
return ["PropertyName", field.name()]
return ["PropertyName", node.name()]
def handleFunction(node, layer):
fnIndex = node.fnIndex()
func = QgsExpression.Functions()[fnIndex].name()
if func == "$geometry":
return ["PropertyName", "geom"]
elif func in functions:
elems = [functions[func]]
args = node.args()
if args is not None:
args = args.list()
for arg in args:
elems.append(walkExpression(arg, layer))
return elems
else:
raise UnsupportedExpressionException(
"Unsupported function in expression: '%s'" % func
)
| tomchadwin/qgis2web | qgis2web/bridgestyle/qgis/expressions.py | expressions.py | py | 5,642 | python | en | code | 494 | github-code | 13 |
42428851051 | import boto3
class GLOBAL_CONFIG:
client = boto3.client('ssm')
LANGUAGES = {
'ar': 'Arabic',
'zh': 'Chinese',
'en': 'English',
'fr': 'French',
'ru': 'Russian',
'es': 'Spanish'
}
GLOBAL_KWARGS = {
'lang': 'en',
'site_available_languages': ['ar','zh','en','fr','ru','es']
}
CACHE_KEY = client.get_parameter(Name='metadata_cache_key')['Parameter']['Value']
CACHE_SERVERS = [client.get_parameter(Name='ElastiCacheServer')['Parameter']['Value']] | dag-hammarskjold-library/metadata-un-org | metadata/config.py | config.py | py | 554 | python | en | code | 0 | github-code | 13 |
12779256791 | # #############################################################################
# RISClientDEA.py
# This module provides a wrapper for Requests HTTP Verbs, and additional functions for interface with RIS
#
# #############################################################################
# The information contained herein is subject to change without notice.
# The only warranties for HP products and services are set forth in the
# express warranty statements accompanying such products and services.
# Nothing herein should be construed as constituting an additional warranty.
# HP shall not be liable for technical or editorial errors or omissions
# contained herein.
#
# #############################################################################
import requests
from requests.packages import urllib3
from requests.adapters import HTTPAdapter
from RoboGalaxyLibrary.utilitylib import logging as logger
import pprint
import json
class PERISClient(object):
def __init__(self, host=None, proxy=None, http=False):
self._http = requests.Session()
self._http.mount('http://', HTTPAdapter(max_retries=3)) # requires Python 2.7.4+ and Requests 1.2.3 +
self._http.mount('https://', HTTPAdapter(max_retries=3)) # requires Python 2.7.4+ and Requests 1.2.3 +
self._sessionID = None
self._cred = None
self._headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/json'}
self._host = host
# leaving below here in case we need to test more than 1 active sessions
# self._active_sessions = {}
# self.session_uri = []
self._session_uri = None
self._session_index = None
self._base_url = 'https://'
if http:
self._base_url = 'http://'
if proxy:
self._http.proxies = proxy
else:
self._http.proxies = None
self._http.trust_env = False
# Disable the one-time warning thrown by urllib3 when bypassing SSL cert
urllib3.disable_warnings()
def set_host(self, host):
self._host = host
def get_host(self):
return self._host
def get_user(self):
if self._cred is not None:
return self._cred['UserName']
def get_password(self):
if self._cred is not None:
return self._cred['Password']
def clear_token(self):
self._sessionID = None
self._headers['X-Auth-Token'] = self._sessionID
def get_token(self):
return self._sessionID
def set_token(self, tokenID):
self._sessionID = tokenID
self._headers['X-Auth-Token'] = self._sessionID
def update_headers(self, key, value):
self._headers[key] = value
def update_index(self, value):
self._session_index = value
def set_base_url(self, base_url):
self._base_url = base_url + '://'
def close_session(self):
self._http.close()
# leaving below here in case we need to test more than 1 active sessions
# def get_active_sessions(self):
# return self._active_sessions
def _request(self, op, uri, headers=None, data=None, stream=False, etag=None, if_none_match=None, legacy=False, xauthtoken=None, username=None, password=None, timeout=180):
if headers == "no_auth_token":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/json'}
elif headers == "no_auth_token_with_secret":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/json',
'X-Secret': 'secret'}
elif headers == "Staging":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/octet-stream',
'X-Stage-Only': 1}
headers['X-Auth-Token'] = self._sessionID
elif headers == "Dummy1":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/octet-stream',
'X-Dummy': 1}
headers['X-Auth-Token'] = self._sessionID
elif headers == "Dummy2":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/octet-stream',
'X-Dummy': 1,
'X-Stage-Only': 1}
headers['X-Auth-Token'] = self._sessionID
elif headers == "more_than_four_headers":
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/octet-stream',
'X-Dummy1': 1,
'X-Stage-Only': 1,
'X-Dummy2': 1,
'X-Dummy3': 1,
'X-Dummy4': 1}
headers['X-Auth-Token'] = self._sessionID
elif headers == "long_headers":
header_name = "X-Stage"
padchar = 'A'
current_length = len(header_name)
if current_length < 1024:
for i in range(1024 - current_length):
header_name = header_name + padchar
headers = {'Accept': 'application/json, */*',
'Accept-language': 'en_US',
'Content-Type': 'application/octet-stream',
header_name: 1,
'X-Stage-Only': 1
}
headers['X-Auth-Token'] = self._sessionID
elif headers:
headers['X-Auth-Token'] = self._sessionID
else:
headers = self._headers
logger._debug('uri %s' % uri)
logger._debug('base %s' % self._base_url)
logger._debug('host %s' % self._host)
uri = self._base_url + self._host + uri
# Below check for legacy support of some existing calls made to HPCIManager which did not encode the data.
if isinstance(data, dict):
data = json.dumps(data)
try:
logger._debug('\n%s %s\nRequest Header: %s\nRequest Body: %s\n' % (op, uri, pprint.PrettyPrinter().pformat(headers), data))
resp = self._http.request(op, uri, data=data, headers=headers, verify=False, stream=stream, timeout=timeout)
logger._debug('\nStatus: %d' % resp.status_code)
logger._debug('\nResp Header: %s' % resp.headers)
# Below code for debugging purposes. Won't work for calls to Diags since that returns raw text instead of json
# TODO: add condition to check for call to Diags and print raw text instead of json
# if resp.status_code == 200 and op == 'GET' and stream == False:
# logger._debug('\nBody: %s' % resp.json())
except Exception as e:
msg = "Exception occurred while attempting to %s: %s" % (op, uri)
raise Exception(msg, e)
return resp
def delete(self, uri, headers=None):
return self._request('DELETE', uri, headers=headers)
def get(self, uri, headers=None, stream=False):
return self._request('GET', uri, headers=headers, stream=stream)
def post(self, uri, data=None, headers=None, stream=False, timeout=180):
return self._request('POST', uri, data=data, headers=headers, stream=stream, timeout=timeout)
def patch(self, uri, data=None, headers=None):
return self._request('PATCH', uri, data=data, headers=headers)
def put(self, uri, data=None, headers=None):
return self._request('PUT', uri, data=data, headers=headers)
| richa92/Jenkin_Regression_Testing | robo4.2/fusion/tests/DEA/resource/iLO/PERISClient.py | PERISClient.py | py | 7,945 | python | en | code | 0 | github-code | 13 |
73607397136 | from typing import Iterable, Optional, TypeVar
import torch
from torcheval.metrics.functional.classification.f1_score import (
_binary_f1_score_update,
_f1_score_compute,
_f1_score_param_check,
_f1_score_update,
)
from torcheval.metrics.metric import Metric
TF1Score = TypeVar("TF1Score")
TBinaryF1Score = TypeVar("TBinaryF1Score")
class MulticlassF1Score(Metric[torch.Tensor]):
"""
Compute f1 score, which is defined as the harmonic mean of precision and recall.
We convert NaN to zero when f1 score is NaN. This happens when either precision
or recall is NaN or when both precision and recall are zero.
Its functional version is :func:`torcheval.metrics.functional.multi_class_f1_score`.
See also :class:`BinaryF1Score <BinaryF1Score>`
Args:
num_classes (int):
Number of classes.
average (str, Optional):
- ``'micro'`` [default]: Calculate the metrics globally.
- ``'macro'``: Calculate metrics for each class separately, and return their unweighted mean.
Classes with 0 true and predicted instances are ignored.
- ``'weighted'``" Calculate metrics for each class separately, and return their weighted sum.
Weights are defined as the proportion of occurrences of each class in "target".
Classes with 0 true and predicted instances are ignored.
- ``None``: Calculate the metric for each class separately, and return
the metric for every class.
Examples::
>>> import torch
>>> from torcheval.metrics import MulticlassF1Score
>>> metric = MulticlassF1Score(num_classes=4)
>>> input = torch.tensor([0, 2, 1, 3])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5000)
>>> metric = MulticlassF1Score(average=None, num_classes=4)
>>> input = torch.tensor([0, 2, 1, 3])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor([1., 0., 0., 1.])
>>> metric = MulticlassF1Score(average="macro", num_classes=2)
>>> input = torch.tensor([0, 0, 1, 1, 1])
>>> target = torch.tensor([0, 0, 0, 0, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5833)
>>> metric = MulticlassF1Score(num_classes=4)
>>> input = torch.tensor([[0.9, 0.1, 0, 0], [0.1, 0.2, 0.4, 0.3], [0, 1.0, 0, 0], [0, 0, 0.2, 0.8]])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5)
"""
def __init__(
self: TF1Score,
*,
num_classes: Optional[int] = None,
average: Optional[str] = "micro",
device: Optional[torch.device] = None,
) -> None:
super().__init__(device=device)
_f1_score_param_check(num_classes, average)
self.num_classes = num_classes
self.average = average
if average == "micro":
self._add_state("num_tp", torch.tensor(0.0, device=self.device))
self._add_state("num_label", torch.tensor(0.0, device=self.device))
self._add_state(
"num_prediction",
torch.tensor(0.0, device=self.device),
)
else:
# num_classes has been verified as a positive integer. Add this line to bypass pyre.
assert isinstance(
num_classes, int
), f"num_classes must be a integer, but got {num_classes}"
self._add_state(
"num_tp",
torch.zeros(num_classes, device=self.device),
)
self._add_state(
"num_label",
torch.zeros(num_classes, device=self.device),
)
self._add_state(
"num_prediction",
torch.zeros(num_classes, device=self.device),
)
@torch.inference_mode()
# pyre-ignore[14]: inconsistent override on *_:Any, **__:Any
def update(self: TF1Score, input: torch.Tensor, target: torch.Tensor) -> TF1Score:
"""
Update states with the ground truth labels and predictions.
Args:
input (Tensor): Tensor of label predictions.
It could be the predicted labels, with shape of (n_sample, ).
It could also be probabilities or logits with shape of (n_sample, n_class).
``torch.argmax`` will be used to convert input into predicted labels.
target (Tensor): Tensor of ground truth labels with shape of (n_sample, ).
"""
input = input.to(self.device)
target = target.to(self.device)
num_tp, num_label, num_prediction = _f1_score_update(
input, target, self.num_classes, self.average
)
self.num_tp += num_tp
self.num_label += num_label
self.num_prediction += num_prediction
return self
@torch.inference_mode()
def compute(self: TF1Score) -> torch.Tensor:
"""
Return the f1 score.
0 is returned if no calls to ``update()`` are made before ``compute()`` is called.
"""
return _f1_score_compute(
self.num_tp, self.num_label, self.num_prediction, self.average
)
@torch.inference_mode()
def merge_state(self: TF1Score, metrics: Iterable[TF1Score]) -> TF1Score:
for metric in metrics:
self.num_tp += metric.num_tp.to(self.device)
self.num_label += metric.num_label.to(self.device)
self.num_prediction += metric.num_prediction.to(self.device)
return self
class BinaryF1Score(MulticlassF1Score):
"""
Compute binary f1 score, which is defined as the harmonic mean of precision and recall.
We convert NaN to zero when f1 score is NaN. This happens when either precision
or recall is NaN or when both precision and recall are zero.
Its functional version is :func:``torcheval.metrics.functional.binary_f1_score``.
See also :class:`MulticlassF1Score <MulticlassF1Score>`
Args:
threshold (float, optional) : Threshold for converting input into predicted labels for each sample.
``torch.where(input < threshold, 0, 1)`` will be applied to the ``input``.
Example::
>>> import torch
>>> from torcheval.metrics import BinaryF1Score
>>> metric = BinaryF1Score()
>>> input = torch.tensor([0, 1, 1, 0])
>>> target = torch.tensor([0, 1, 0, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5000)
>>> metric = BinaryF1Score(threshold=0.7)
>>> input = torch.tensor([.2, .8, .7, .6])
>>> target = torch.tensor([0, 1, 0, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5000)
>>> input2 = torch.tensor([.9, .5, .1, .7])
>>> target2 = torch.tensor([0, 1, 1, 1])
>>> metric.update(input2, target2)
>>> metric.compute()
tensor(0.4444)
"""
def __init__(
self: TBinaryF1Score,
*,
threshold: float = 0.5,
device: Optional[torch.device] = None,
) -> None:
super().__init__(average="micro", device=device)
self.threshold = threshold
@torch.inference_mode()
def update(
self: TBinaryF1Score, input: torch.Tensor, target: torch.Tensor
) -> TBinaryF1Score:
"""
Update states with the ground truth labels and predictions.
Args:
input (Tensor): Tensor of label predictions with shape of (n_sample,).
``torch.where(input < threshold, 0, 1)`` will be applied to the input.
target (Tensor): Tensor of ground truth labels with shape of (n_sample,).
"""
input = input.to(self.device)
target = target.to(self.device)
num_tp, num_label, num_prediction = _binary_f1_score_update(
input, target, self.threshold
)
self.num_tp += num_tp
self.num_label += num_label
self.num_prediction += num_prediction
return self
| pytorch/torcheval | torcheval/metrics/classification/f1_score.py | f1_score.py | py | 8,264 | python | en | code | 155 | github-code | 13 |
6558771590 | import os
import requests
from pathlib import Path
import argparse
import codecs
exchanges = "exchanges"
timeSeriesValues = "timeSeriesValues"
websites = "websites"
countryCurrencies = "countryCurrencies"
exchangeUrl = "http://127.0.0.1:8080/assets/crypto-currency-exchange-complete"
timeSeriesValuesUrl = "http://127.0.0.1:8080/assets/time-series-value"
websitesUrl = "http://127.0.0.1:8080/assets/website-data-list"
countryCurrencyUrl = "http://127.0.0.1:8080/assets/country-currency-list"
def readFilesFromDir(dir):
return os.listdir(dir)
def readFile(path):
file = codecs.open(path, "r", "utf-8")# open(path, 'r')
data = file.read().encode('utf-8')
file.close()
return data
def sendPost(url, data):
headers = {'Content-Type': 'application/json', 'Accept': 'text/plain', 'User-Agent': 'python-requests/2.4.3 Python/3.5.0'}
return requests.post(url, data=data, headers=headers)
def init():
dirs = [exchanges, timeSeriesValues, websites, countryCurrencies]
urls = [exchangeUrl, timeSeriesValuesUrl, websitesUrl, countryCurrencyUrl]
code = 0
if (len(dirs) == len(urls)):
for idx, dirr in enumerate(dirs):
files = readFilesFromDir(dirr)
url = urls[idx]
for file in files:
data = readFile(dirr + '/' + file)
print("Post: ", file, "to url: ", url)
response = sendPost(url, data)
print("Response code:", response.status_code)
print("Response content: ", response.text)
print("===================================")
print("\n")
code = response.status_code
if (response.status_code != 201):
break
else:
continue
break
if(code == 201):
print("Init Saved complete")
else:
print("Files not saved")
def saveFiles(path, url):
path = Path(path)
if (path.is_file()):
data = readFile(path.__str__())
print("Post: ", path.name, "to url: ", url)
response = sendPost(url, data)
print("Response code:", response.status_code)
print("Response content: ", response.text)
if (response.status_code != 201):
print("File not saved: ", path.name)
else:
print("File saved: ", path.name)
print("===================================")
else:
if (path.is_dir()):
files = readFilesFromDir(path.__str__())
for file in files:
data = readFile(path.name + '/' + file)
print("Post: ", file, "to url: ", url)
response = sendPost(url, data)
print("Response code:", response.status_code)
print("Response content: ", response.text)
if (response.status_code != 201):
print("File not saved: ", file)
else:
print("File saved: ", file)
print("===================================\n")
else:
print("Parameter data not found: ", path.name, "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--e", help="Create one or more exchanges")
parser.add_argument("--t", help="Create one or more timeSeriesValues")
parser.add_argument("--w", help="Create one or more websites")
parser.add_argument("--c", help="Create one or more country courrencies")
args = parser.parse_args()
if (args.e):
saveFiles(args.e, exchangeUrl)
elif (args.t):
saveFiles(args.t, timeSeriesValuesUrl)
elif (args.w):
saveFiles(args.w, websitesUrl)
elif (args.c):
saveFiles(args.c, countryCurrencyUrl)
else:
init()
if __name__ == "__main__":
main()
| 43ndr1k/Mappinng-Cryptocurrencies-with-News | backend/cryptoSkript/Main.py | Main.py | py | 3,840 | python | en | code | 0 | github-code | 13 |
34536891391 | #!/usr/bin/python
import numpy as np
from pprint import pprint
import csv
import math, time
import random
LEARNING_RATE = 1
num_iterations = 1000000
def get_perceptron(features, truth):
w = np.zeros(features.shape[1])
#w[-1] = 1
w = np.matrix(w)
for i in range(num_iterations):
misclassified_points = 0
for index, point in enumerate(features):
if np.inner(point.A1, w.A1) <= 0.0:
misclassified_points += 1
#pprint(("adding point " + str(index), np.inner(point.A1, w.A1), point))
w = np.matrix(w.A1 + point.A1)
pprint("iteration: " + str(i))
pprint("num_misclassified: " + str(misclassified_points))
if misclassified_points == 0:
pprint("final w")
pprint(w.A1)
return
#takes a matrix
# for every row where the last values is -1
# multiply entire row by -1
def flip(old_X):
X = old_X.copy()
pprint(X.shape)
number_flipped = 0
for i in range(X.shape[0]):
if X[i,-1] == -1:
number_flipped += 1
X[i,:] = X[i,:] * -1
pprint("number flipped: " + str(number_flipped))
return X
def read_csv_as_numpy_matrix(filename):
return np.matrix(list(csv.reader(open(filename,"rb"),
delimiter=','))).astype('float')
import unittest
data_dir = "./data/"
class TestLinearReg(unittest.TestCase):
def test_flip(self):
data = np.matrix('1 1 -1; 1 1 1')
new_data = flip(data)
self.assertEqual(new_data[0,2], 1)
self.assertEqual(new_data[1,2], 1)
self.assertEqual(new_data[0,0], -1)
self.assertEqual(new_data[1,0], 1)
def test_inner(self):
x = [ -0.3852046 , -0.18301087, -0.54516589, -0.59832594, 1. ]
w = [ 0.57642699, 0.23646118, 0.3197695 , 0.19114307, 2. ]
pprint(inner(x, w))
self.assertTrue(False)
def test_final_w(self):
w = np.array([-0.05679759, -0.02521043, -0.01362577, -0.00960582, 2.0])
spam_filename = data_dir + "perceptron.txt"
data = read_csv_as_numpy_matrix(spam_filename)
data = flip(data)
for point in data:
self.assertTrue(inner(point.A1, w) > 0)
if inner(point.A1, w) <= 0:
#print failing points
pprint(inner(point.A1, w))
pprint(point.A1)
self.assertTrue(False)
def test_all(self):
spam_filename = data_dir + "perceptron.txt"
data = read_csv_as_numpy_matrix(spam_filename)
data = flip(data)
features = data[:,:4]
features = np.hstack((features,
np.matrix(np.ones(features.shape[0])).T))
w = np.array([-0.05679759, -0.02521043, -0.01362577, -0.00960582, 2.0])
wrong_count = 0
for i, point in enumerate(features):
if inner(point.A1, w) <= 0:
pprint((i, inner(point.A1, w), point.A1))
wrong_count += 1
pprint(wrong_count)
self.assertTrue(False)
def test_perceptron():
spam_filename = data_dir + "perceptron.txt"
data = read_csv_as_numpy_matrix(spam_filename)
pprint(data)
features = data[:,:4]
truth = data[:,4]
#add in bias
features = np.hstack((features,
np.matrix(np.ones(features.shape[0])).T))
data = np.hstack((features, truth))
data = flip(data)
pprint(data)
#pprint("data")
#pprint(data)
features = data[:,:5]
truth = data[:,5]
plane = get_perceptron(features, truth)
#add bias before flipping hyperplane
if __name__ == "__main__":
test_perceptron()
| ohnorobo/machine-learning | Perceptron.py | Perceptron.py | py | 3,389 | python | en | code | 1 | github-code | 13 |
19502373792 | import pyttsx3
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id ) # Ravi 1 , David - 2 , zira - 3 , hetal - 0
#print(voices[1])
engine.setProperty('rate', 170)
def Say(Text):
print(" ")
print(f" Mark_IV : {Text}")
engine.say(Text)
engine.runAndWait()
print(" ")
| Himanshu6453/Artificial-Intelligent-Assistant-Mark- | Speak.py | Speak.py | py | 372 | python | en | code | 1 | github-code | 13 |
23176690330 | import numpy as np
import pandas as pd
import re
import phonenumbers
import warnings
warnings.simplefilter('ignore')
# set the max columns to none
pd.set_option('display.max_columns', None)
country_details = {
'India': {'code': 'IN', 'code_number': 91, 'len_with_code': 12, 'len_without_code': 10},
'South Africa': {'code': 'ZA', 'code_number': 27, 'len_with_code': 11, 'len_without_code': 9},
'UAE': {'code': 'AE', 'code_number': 971, 'len_with_code': 12, 'len_without_code': 9},
'Pakistan': {'code': 'PK', 'code_number': 92, 'len_with_code': 12, 'len_without_code': 10},
'Singapore': {'code': 'SG', 'code_number': 65, 'len_with_code': 11, 'len_without_code': 8},
'Egypt': {'code': 'EG', 'code_number': 20, 'len_with_code': 11, 'len_without_code': 10},
'Nigeria': {'code': 'NG', 'code_number': 234, 'len_with_code': 13, 'len_without_code': 11},
'Kuwait': {'code': 'KW', 'code_number': 965, 'len_with_code': 12, 'len_without_code': 8},
'Kenya': {'code': 'KE', 'code_number': 254, 'len_with_code': 12, 'len_without_code': 9},
'Australia': {'code': 'AU', 'code_number': 61, 'len_with_code': 11, 'len_without_code': 9},
'Qatar': {'code': 'QA', 'code_number': 974, 'len_with_code': 12, 'len_without_code': 8},
'United States': {'code': 'US', 'code_number': 1, 'len_with_code': 12, 'len_without_code': 10},
'Canada': {'code': 'CA', 'code_number': 1, 'len_with_code': 12, 'len_without_code': 10},
'United Kingdom': {'code': 'GB', 'code_number': 44, 'len_with_code': 12, 'len_without_code': 10},
'Malaysia': {'code': 'MY', 'code_number': 60, 'len_with_code': 11, 'len_without_code': 9},
'Philippines': {'code': 'PH', 'code_number': 63, 'len_with_code': 12, 'len_without_code': 10},
'New Zealand': {'code': 'NZ', 'code_number': 64, 'len_with_code': 11, 'len_without_code': 9},
'Sri Lanka': {'code': 'LK', 'code_number': 94, 'len_with_code': 12, 'len_without_code': 9},
'Indonesia': {'code': 'ID', 'code_number': 62, 'len_with_code': 12, 'len_without_code': 10},
'Germany': {'code': 'DE', 'code_number': 49, 'len_with_code': 12, 'len_without_code': 10},
'France': {'code': 'FR', 'code_number': 33, 'len_with_code': 12, 'len_without_code': 9},
'Brazil': {'code': 'BR', 'code_number': 55, 'len_with_code': 12, 'len_without_code': 10},
'Bangladesh': {'code': 'BD', 'code_number': 880, 'len_with_code': 13, 'len_without_code': 11},
'Hong Kong': {'code': 'HK', 'code_number': 852, 'len_with_code': 12, 'len_without_code': 8},
'Thailand': {'code': 'TH', 'code_number': 66, 'len_with_code': 11, 'len_without_code': 9},
'Netherlands': {'code': 'NL', 'code_number': 31, 'len_with_code': 11, 'len_without_code': 9},
'Italy': {'code': 'IT', 'code_number': 39, 'len_with_code': 12, 'len_without_code': 10},
'Spain': {'code': 'ES', 'code_number': 34, 'len_with_code': 12, 'len_without_code': 9},
'Turkey': {'code': 'TR', 'code_number': 90, 'len_with_code': 12, 'len_without_code': 10},
'Greece': {'code': 'GR', 'code_number': 30, 'len_with_code': 12, 'len_without_code': 10},
'Sweden': {'code': 'SE', 'code_number': 46, 'len_with_code': 11, 'len_without_code': 9},
'Norway': {'code': 'NO', 'code_number': 47, 'len_with_code': 11, 'len_without_code': 8},
'Portugal': {'code': 'PT', 'code_number': 351, 'len_with_code': 12, 'len_without_code': 9},
'Russia': {'code': 'RU', 'code_number': 7, 'len_with_code': 12, 'len_without_code': 10},
'Switzerland': {'code': 'CH', 'code_number': 41, 'len_with_code': 12, 'len_without_code': 9},
'Belgium': {'code': 'BE', 'code_number': 32, 'len_with_code': 11, 'len_without_code': 9},
'Poland': {'code': 'PL', 'code_number': 48, 'len_with_code': 12, 'len_without_code': 9},
'Ireland': {'code': 'IE', 'code_number': 353, 'len_with_code': 12, 'len_without_code': 9},
'Ukraine': {'code': 'UA', 'code_number': 380, 'len_with_code': 12, 'len_without_code': 9},
'Argentina': {'code': 'AR', 'code_number': 54, 'len_with_code': 12, 'len_without_code': 10},
'Mexico': {'code': 'MX', 'code_number': 52, 'len_with_code': 12, 'len_without_code': 10},
'Japan': {'code': 'JP', 'code_number': 81, 'len_with_code': 12, 'len_without_code': 10},
'China': {'code': 'CN', 'code_number': 86, 'len_with_code': 13, 'len_without_code': 11},
'South Korea': {'code': 'KR', 'code_number': 82, 'len_with_code': 12, 'len_without_code': 10},
'Vietnam': {'code': 'VN', 'code_number': 84, 'len_with_code': 12, 'len_without_code': 10},
'Israel': {'code': 'IL', 'code_number': 972, 'len_with_code': 12, 'len_without_code': 9}
}
def process_phone_numbers(row):
phone1 = row['Phone 1.1']
phone2 = row['Phone 2.1']
valid_numbers = []
def is_valid_phone_number(phone, country):
parsed_number = phonenumbers.parse(phone, country)
return phonenumbers.is_valid_number(parsed_number)
def add_valid_number(phone, country):
valid_numbers.append(phone if is_valid_phone_number(phone, country) else None)
if phone1 is not None:
for country, details in country_details.items():
code = '+' + str(details['code_number'])
if phone1.startswith(code):
if len(phone1) == details['len_with_code']:
add_valid_number(phone1, details['code'])
elif len(phone1) == details['len_without_code']:
add_valid_number(code + phone1, details['code'])
if phone2 is not None:
for country, details in country_details.items():
code = '+' + str(details['code_number'])
if phone2.startswith(code):
if len(phone2) == details['len_with_code']:
add_valid_number(phone2, details['code'])
elif len(phone2) == details['len_without_code']:
add_valid_number(code + phone2, details['code'])
return valid_numbers if len(valid_numbers) > 0 else None
def convert_number_format(phone_number):
if pd.isna(phone_number):
return None
# Remove non-digit characters
digits_only = re.sub(r'\D', '', str(phone_number))
# Convert scientific notation to normal number format
if 'E' in digits_only:
digits_only = str(float(digits_only))
return digits_only
df =pd.read_csv(r"C:\Users\ADITYA PC\Downloads\Part 1.csv",encoding='latin-1')
# Assuming the phone numbers are in a pandas DataFrame column called 'Phone Numbers'
df['Phone 1.1'] = df['Phone 1.1'].apply(convert_number_format)
df['Phone 2.1'] = df['Phone 2.1'].apply(convert_number_format)
df['Valid Phone Numbers'] = df.apply(process_phone_numbers, axis=1)
df['Valid Phone Numbers'] = df['Valid Phone Numbers'].apply(lambda x: [number for number in x if number is not None] if x is not None else 'None')
df['Valid Phone Numbers'][df['Valid Phone Numbers'] != "None"].count()
file_path = r"C:\Users\ADITYA PC\Downloads\CSV_1.csv"
# Save the concatenated data frame to the specified path
df.to_csv(file_path, index=False) | Adityag009/Phone-Number-Validation-and-Processing-for-International-Contacts | main.py | main.py | py | 6,983 | python | en | code | 0 | github-code | 13 |
28081762870 | # 문제 설명
# 정수가 담긴 리스트 num_list가 주어질 때, num_list의 원소 중 짝수와 홀수의 개수를 담은 배열을 return 하도록 solution 함수를 완성해보세요.
# 제한사항 : 1 ≤ num_list의 길이 ≤ 100, 0 ≤ num_list의 원소 ≤ 1,000
# 호출 결과 : num_list = [1, 2, 3, 4, 5], sum_result = [2, 3]
def solution(num_list):
len1 = len(num_list)
num1 = 0
num2 = 0
num_jac = []
num_hol = []
sum_result = []
for x in num_list:
if 1 <= len1 <= 100 and 0 <= x <= 1000:
if x % 2 == 0:
num_jac.append(x)
num1 = len(num_jac) # 2
elif x % 2 == 1:
num_hol.append(x)
num2 = len(num_hol) # 3
sum_result.append(num1)
sum_result.append(num2)
return sum_result
#개선사항_1 :
def solution(num_list) :
answer = [0,0]
for i in num_list:
answer[i%2] = answer[i%2] + 1
return answer
# 설명 : answer배열의 인덱스를 응용하여 문제 해석
# 출력
print("출력 : " + solution([1, 2, 3, 4, 5]))
print("출력 : " + solution([1, 2, 3, 4, 5, 7, 8, 8])) | Thompsonclass/Coding_Test_UsingPython | Level0-Python/CodingTestExample07.py | CodingTestExample07.py | py | 1,167 | python | ko | code | 1 | github-code | 13 |
13102793764 | import wx
from automata.organism import Organism
from layout import spring_layout
import itertools
import support
WIN_WIDTH = 800 # Main window width
WIN_HEIGHT = 800 # Main window height
FORCE_FQ = 100 # The frequency of a force-directed algorithm updates in ms
ITERATION_FQ = 1000 # The frequency of organism's iterations in ms
LAYOUT_ITERATIONS = 10 # The number of iterations in a force-directed algorithm per update
FRAME_WIDTH = 600 # Graph frame width
FRAME_HEIGHT = 600 # Graph frame height
class GraphVisualizerFrame(wx.Frame):
def __init__(self, parent, title, organism):
self.organism = organism
super(GraphVisualizerFrame, self).__init__(parent, title=title, size=(WIN_WIDTH, WIN_HEIGHT))
self.InitUI()
self.Show(True)
def InitUI(self):
"""
Initializes main UI elements: panel, toolbar, bitmap buffer.
"""
self.panel = wx.Panel(self)
self.SetMenuBar(wx.MenuBar())
toolbar = self.CreateToolBar()
toolbar.Realize()
self.buffer = wx.EmptyBitmap(WIN_WIDTH, WIN_HEIGHT)
self.draw(None)
def draw(self, event):
"""
Initializes timers related to displaying the organism.
"""
self.force_timer = wx.Timer(self)
self.force_timer.Start(FORCE_FQ)
self.iterate_timer = wx.Timer(self)
self.iterate_timer.Start(ITERATION_FQ)
self.Bind(wx.EVT_TIMER, self.update_layout, self.force_timer)
self.Bind(wx.EVT_TIMER, self.update_organism, self.iterate_timer)
dc = wx.BufferedDC(wx.ClientDC(self.panel), self.buffer)
spring_layout(self.organism.graph, width=FRAME_WIDTH, height=FRAME_HEIGHT, iterations=LAYOUT_ITERATIONS, c=0.2)
# Generate a color for each state
states = self.organism.genome.states()
self.colors = dict(zip(states, support.distinct_colors(len(states))))
self.draw_graph(dc)
def update_layout(self, event):
"""
Updates layout by calling force-directed algorithm.
"""
if not spring_layout(self.organism.graph, width=FRAME_WIDTH, height=FRAME_HEIGHT, iterations=LAYOUT_ITERATIONS, c=0.2):
self.force_timer.Destroy()
self.update(event)
def update_organism(self, event):
"""
Updates organism by performing one iteration.
"""
if not self.organism.iterate():
self.iterate_timer.Destroy()
def update(self, event):
"""
Updates bitmap buffer.
"""
if self.force_timer.IsRunning() or self.iterate_timer.IsRunning():
dc = wx.BufferedDC(wx.ClientDC(self.panel), self.buffer)
self.draw_graph(dc)
def draw_graph(self, dc):
"""
Draws graph in bitmap buffer, called by update()
"""
dc.Clear()
dc.SetBrush(wx.Brush('#000000'))
dc.DrawRectangle(0, 0, WIN_WIDTH, WIN_HEIGHT)
for pair in itertools.combinations(self.organism.graph.keys(), 2):
edge_state = None
if pair[0] in self.organism.graph[pair[1]]:
if pair[0] in pair[1].imediate_parents:
edge_state = pair[0].state
elif pair[1] in pair[0].imediate_parents:
edge_state = pair[1].state
dc.SetPen(wx.Pen(self.colors[edge_state]))
x1 = int(pair[0].pos['x']) + FRAME_WIDTH/2 + (WIN_WIDTH - FRAME_WIDTH)/2
y1 = int(pair[0].pos['y']) + FRAME_HEIGHT/2 + (WIN_HEIGHT - FRAME_HEIGHT)/2
x2 = int(pair[1].pos['x']) + FRAME_WIDTH/2 + (WIN_WIDTH - FRAME_WIDTH)/2
y2 = int(pair[1].pos['y']) + FRAME_HEIGHT/2 + (WIN_HEIGHT - FRAME_HEIGHT)/2
dc.DrawLine(x1, y1, x2, y2)
app = wx.App()
code = 'A|2|3|++|A'
organism = Organism(code)
frame = GraphVisualizerFrame(None, 'Organism', organism)
frame.Show()
app.MainLoop()
app.Destroy()
| olya-d/growing-graph | screen.py | screen.py | py | 3,937 | python | en | code | 0 | github-code | 13 |
38834254970 | import logging
import mock
from dining_philosophers.constants import PhilosopherState
from dining_philosophers.philosophers import Philosopher
from dining_philosophers.forks import Fork
class TestPhilosophers:
def test_create_philosopher(self):
ID = 0
left_fork = Fork(0)
right_fork = Fork(1)
philosopher = Philosopher(ID, (left_fork, right_fork))
assert philosopher.id == ID
assert philosopher.state == PhilosopherState.THINKING
def test_run_philosopher_thread(self):
ID = 0
left_fork = Fork(0)
right_fork = Fork(1)
philosopher = Philosopher(ID, (left_fork, right_fork))
with mock.patch(
'dining_philosophers.philosophers.threading.Thread.start'
) as mock_start_thread:
philosopher.start()
mock_start_thread.assert_called_once()
def test_run_philosopher_thread_with_philosopher_already_full_should_log_and_return( # noqa
self, philosopher: Philosopher, caplog
):
expected_log = f'{str(philosopher)} is full'
philosopher.full = 3
with caplog.at_level(logging.INFO):
philosopher.run()
caplog.records
log_messages = [log.message for log in caplog.records]
assert expected_log in log_messages
def test_run_philosopher_thread_with_philosopher_should_eat_until_he_is_hungry( # noqa
self, philosopher: Philosopher, caplog
):
expected_log = f'{str(philosopher)} is full'
with caplog.at_level(logging.INFO), mock.patch(
'dining_philosophers.philosophers.Philosopher.eat'
), mock.patch(
'dining_philosophers.philosophers.Philosopher.think'
):
philosopher.run()
caplog.records
log_messages = [log.message for log in caplog.records]
assert expected_log in log_messages
def test_eat_as_owner_of_both_forks_should_set_state_to_eat(
self, philosopher: Philosopher, mock_sleep
):
assert philosopher.state == PhilosopherState.THINKING
for fork in philosopher.forks:
fork._owner = philosopher
philosopher.eat()
assert philosopher.state == PhilosopherState.EATING
def test_eat_with_missing_ownership_of_forks_should_request_to_both_neighbors( # noqa
self, philosopher: Philosopher, mock_sleep
):
with mock.patch("dining_philosophers.forks.Fork.request"):
philosopher.eat()
for fork in philosopher.forks:
fork.request.assert_called_with(philosopher)
def test_think_should_set_state_to_thinking_and_done_eating_with_the_forks( # noqa
self, philosopher: Philosopher, mock_sleep
):
philosopher.state = PhilosopherState.EATING
with mock.patch("dining_philosophers.forks.Fork.done"):
philosopher.think()
for fork in philosopher.forks:
assert fork.done.called
assert philosopher.state == PhilosopherState.THINKING
| lievi/dining_philosophers | tests/test_philosophers.py | test_philosophers.py | py | 3,045 | python | en | code | 4 | github-code | 13 |
42158005520 | import sys
import pandas as pd
import numpy as np
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import os
from src.data import load_save_data
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import precision_recall_fscore_support
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
# these are required for the tokenize function to work. Download them once at the start here.
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
def select_inputs_labels(data_frame):
"""
Given the input dataframe, split it into our input data and our labels
:param data_frame:
:return:
"""
inputs = data_frame["message"]
labels = data_frame.drop(columns=["id", "message", "original", "genre"], axis=1)
return inputs, labels
def replace_urls(string_input: str, replace_by: str = "URL"):
"""
Replace url's in a string by replace_by
:param string_input: string input
:param replace_by: string, what we want to replace the url with
:return: string, with urls replaced by replaced_by
"""
return re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', replace_by,
string_input)
def remove_punctuation(text):
return re.sub(r"[^a-zA-Z0-9]", " ", text)
def tokenize(text: str):
"""
tokenize some input text. We convert to lower case, remove punctuation, replace urls, remove stop words,
etc..
:param text: string, some text we want to tokenize
:return: a list of strings, the tokens in the original text
"""
# lowercase
text = text.lower()
# remove punctuation
text = remove_punctuation(text)
# replace url's
text = replace_urls(text)
# remove numbers, replace with space (they don't really add much)
text = re.sub("\d", " ", text)
# tokenize into words
tokens = word_tokenize(text)
# lemmatize and remove stopwords
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
tokens = [lemmatizer.lemmatize(word, pos="v") for word in tokens]
return tokens
def build_model(do_gridsearch: bool = True):
"""
Create a pipeline and define parameters to search over. GridSearchCV will find the best set of parameter
using cross validation.
If we do not want to use a grid search (it may take long), just set do_gridsearch to False
:param do_gridsearch: boolean, if True then return a model which will perform a grid search, otherwise the model
just consistent of the steps in the pipeline.
:return: model
"""
pipeline = Pipeline([
("count_vec", CountVectorizer(tokenizer=tokenize)),
("tfidf", TfidfTransformer()),
("classifier", MultiOutputClassifier(RandomForestClassifier(n_estimators=100))),
])
parameters = {"count_vec__max_df": [0.95, 0.99, 1.0],
#"count_vec__min_df": [0.005, 0.01, 1],
"classifier__estimator__n_estimators": [50, 100],
"classifier__estimator__max_features": ["sqrt", "log2"]
}
if do_gridsearch:
model = GridSearchCV(pipeline, parameters, cv=5, n_jobs=4, verbose=2)
else:
model = pipeline
return model
def evaluate_model(model, inputs_test, labels_test, category_names):
"""
Given our model and some input test data with known labels, we evaluate how well the model performs.
We return a dataframe with the precision, recall and F1 score for each of our output categories.
:param model: sklearn estimator, the trained model which has a .predict() function
:param inputs_test: test input, should be e.g. a pandas series of strings
:param labels_test: pandas dataframe, the known outputs for our inputs, should have same number of rows as
inputs_test and has multiple column as we're predicting multiple categories
:param category_names: list of strings, names of our output categories
:return: pandas dataframe with scores for every output category, each category is a row.
"""
y_hat = model.predict(inputs_test)
score_df = pd.DataFrame({"category": category_names, "precision": np.nan, "recall": np.nan, "F1 score": np.nan})
for ii, col_name in enumerate(category_names):
pre, rec, score, support = precision_recall_fscore_support(labels_test.iloc[:, ii], y_hat[:, ii], average="weighted")
score_df.loc[score_df["category"] == col_name, "precision"] = pre
score_df.loc[score_df["category"] == col_name, "recall"] = rec
score_df.loc[score_df["category"] == col_name, "F1 score"] = score
print(score_df)
print(score_df.mean())
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print(f"Loading data...\n DATABASE: {database_filepath}")
print(f"current directory {os.getcwd()}")
df = load_save_data.load_data_from_database(database_filepath)
inputs, labels = select_inputs_labels(df)
category_names = labels.columns
inputs_train, inputs_test, labels_train, labels_test = train_test_split(inputs, labels, test_size=0.2)
print("Building model...")
model = build_model(do_gridsearch=False)
print("Training model...")
model.fit(inputs_train, labels_train)
# print(f"Parameters used are {model.best_params_}")
print("Evaluating model...")
evaluate_model(model, inputs_test, labels_test, category_names)
print(f"Saving model...\n MODEL: {model_filepath}")
load_save_data.pickle_dump(model, model_filepath)
print("Trained model saved!")
else:
print("Please provide the filepath of the disaster messages database as the first argument and the filepath "
"of the pickle file to save the model to as the second argument. \n\nExample: "
"python src/models/train_classifier.py disaster_response.db models/classifier.pkl")
if __name__ == '__main__':
main()
| Hannemit/disaster_response | src/models/train_classifier.py | train_classifier.py | py | 6,419 | python | en | code | 0 | github-code | 13 |
34988894699 | #coding=utf-8
'''
F(n) = F(1, n) + F(2, n) + ... + F(n, n).
Optimal Substructure:
Given a sequence 1…n, we pick a number i out of the sequence as the root,
then the number of unique BST with the specified root F(i),
is the cartesian product of the number of BST for its left and right subtrees.
Overlapping Subproblems:
the result only related to the number of the nodes, no need to care about the
specific value of each node (1, 2 and 3, 4 share the same number)
'''
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
dp = [0] * (n + 1)
dp[0] = 1
dp[1] = 1
for i in xrange(2, n + 1):
for j in xrange(1, i + 1):
dp[i] += dp[j - 1] * dp[i - j]
return dp[n] | claire-tr/algorithms | 96_Unique_Binary_Search_Trees.py | 96_Unique_Binary_Search_Trees.py | py | 828 | python | en | code | 0 | github-code | 13 |
73871228499 | """Define the Transport layer between AioWeb3 client and the Web3 server
This file includes 3 implementations of the Transport layer:
- IPCTransport: for IPC connection
- WebsocketTransport: for WebSocket connection
- HTTPTransport: for HTTP connection
They share a common interface defined by `BaseTransport`.
"""
import abc
import asyncio
import itertools
import json
import logging
from typing import Any, Dict, Literal, Optional, Tuple, Type, Union
import aiohttp
import pydantic
from aiohttp.payload import BytesPayload
from websockets.legacy.client import WebSocketClientProtocol, connect
from .endpoints import RPCMethod
from .exceptions import Web3APIError, Web3TimeoutError
class Subscription:
"""Holds information and data for a Web3 subscription"""
def __init__(self, subscription_id: str, queue: asyncio.Queue):
self.subscription_id = subscription_id
self.queue = queue
@property
def id(self) -> str:
"""Subscription ID"""
return self.subscription_id
def __aiter__(self):
return self
async def __anext__(self):
return await self.queue.get()
class RequestMessage(pydantic.BaseModel):
"""Representing a Web3 request"""
jsonrpc: Literal["2.0"]
method: str
params: Any
id: int
class ResponseMessage(pydantic.BaseModel):
"""Representing a Web3 response"""
jsonrpc: Literal["2.0"]
error: Any
result: Any
id: int
class NotificationParams(pydantic.BaseModel):
"""Representing a Web3 notification"""
subscription: str # subscription id, e.g., "0xcd0c3e8af590364c09d0fa6a1210faf5"
result: Any
class NotificationMessage(pydantic.BaseModel):
"""Representing a Web3 notification message
Doc: https://geth.ethereum.org/docs/rpc/pubsub
"""
jsonrpc: Literal["2.0"]
method: Literal["eth_subscription"]
params: NotificationParams
class BaseTransport(abc.ABC):
"""Base class for the transportation layer
AioWeb3 uses this instance to connect to a Web3 server.
"""
def __init__(self, uri: str):
self.uri = uri
self.logger = logging.getLogger(__name__)
self._rpc_counter = itertools.count(1)
@abc.abstractmethod
async def close(self) -> None:
"""Gracefully close the transport
Subclass should implement this method.
"""
async def send_request(self, method: str, params: Any = None, timeout: float = 60) -> Any:
"""Send a Web3 request and return the response
This method may raise Web3APIError if we got an error response from the server. It may also
raise Web3TimeoutError if the request timed out.
"""
request_id = next(self._rpc_counter)
rpc_dict = {
"jsonrpc": "2.0",
"method": method,
"params": params or [],
"id": request_id,
}
request = RequestMessage(**rpc_dict)
try:
response = await asyncio.wait_for(self._send_request(request), timeout=timeout)
if response.error:
raise Web3APIError(f"Received error response {response} for request {request}")
except asyncio.TimeoutError as exc:
raise Web3TimeoutError(
f"Timeout after {timeout} seconds for request {request}"
) from exc
return response.result
@abc.abstractmethod
async def _send_request(self, request: RequestMessage) -> ResponseMessage:
"""Actual implementation for `send_request`"""
async def subscribe(self, params: Any) -> Subscription:
"""Make a new subscription
Note that only TwoWayTransport (WebSocket and IPC) supports subscriptions.
"""
raise NotImplementedError
async def unsubscribe(self, subscription: Subscription) -> None:
"""Unsubscribe from a subscription
Note that only TwoWayTransport (WebSocket and IPC) supports subscriptions.
"""
raise NotImplementedError
def _parse_message(self, msg: bytes) -> Union[ResponseMessage, NotificationMessage]:
"""Parse the response message from Web3 server"""
self.logger.debug("inbound: %s", msg.decode().rstrip("\n"))
try:
j = json.loads(msg)
if "method" in j:
return NotificationMessage(**j)
else:
return ResponseMessage(**j)
except Exception as exc:
raise Web3APIError("Failed to parse message {!r}".format(msg)) from exc
class PersistentListener:
"""Helps TwoWayTransport continuously listen to new messages from the Web3 server
Each time we send a new request to the server, we will check if the listening task is still
alive. If it is not, this class will restart the listening task. This class is useful so that an
one-off expection does not make the TwoWayTransport class disfunctional for future requests.
"""
def __init__(self, listen_func) -> None:
self.listen_func = listen_func
self.is_listening: Optional[asyncio.Event] = None
self.task: Optional[asyncio.Task] = None
async def __aenter__(self) -> None:
if self.task is None or self.task.done():
self.is_listening = asyncio.Event()
self.task = asyncio.create_task(self.listen_func())
# make sure that we started listening before proceeding
await self.is_listening.wait()
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb
) -> None:
if exc_val is not None:
try:
if self.task is not None:
self.task.cancel()
except Exception: # pylint: disable=broad-except
pass
self.task = None
def is_ready(self) -> None:
"""Callback for the listening function to signal that it is ready to accept responses"""
if self.is_listening is not None:
self.is_listening.set()
def close(self):
"""Close this listener"""
if self.task is not None:
self.task.cancel()
self.task = None
class TwoWayTransport(BaseTransport, metaclass=abc.ABCMeta):
"""Shared base class for WebSocketTransport and IPCTransport"""
def __init__(self, uri: str):
super().__init__(uri)
self.listener = PersistentListener(self.listen)
self._requests: Dict[int, asyncio.Future[ResponseMessage]] = {}
self._subscriptions: Dict[str, asyncio.Queue[Any]] = {}
async def _send_request(self, request: RequestMessage) -> ResponseMessage:
data = json.dumps(request.dict(), separators=(",", ":")).encode("utf-8")
fut = asyncio.get_event_loop().create_future()
self._requests[request.id] = fut
try:
self.logger.debug("outbound: %s", data.decode())
async with self.listener:
await self.send(data)
result = await fut
finally:
# whether we got an error or not, we're done with this request
del self._requests[request.id]
return result
async def subscribe(self, params: Any) -> Subscription:
"""Make a new subscription
Documentation: https://geth.ethereum.org/docs/rpc/pubsub
"""
subscription_id = await self.send_request(RPCMethod.eth_subscribe, params)
queue: asyncio.Queue = asyncio.Queue()
self._subscriptions[subscription_id] = queue
return Subscription(subscription_id, queue)
async def unsubscribe(self, subscription: Subscription) -> None:
"""Unsubscribe from a subscription"""
assert isinstance(subscription, Subscription)
response = await self.send_request(RPCMethod.eth_unsubscribe, [subscription.id])
assert response
queue = self._subscriptions[subscription.id]
del self._subscriptions[subscription.id]
queue.task_done()
@abc.abstractmethod
async def send(self, data: bytes):
"""Send binary data to the Web3 server"""
@abc.abstractmethod
async def receive(self) -> bytes:
"""Receive binary data from the Web3 server"""
@abc.abstractmethod
async def close(self):
"""Close the transport
Subclass implementation should call `super().close()` to close the listener.
"""
self.listener.close()
def _handle_response_message(self, response: ResponseMessage):
if response.id in self._requests:
self._requests[response.id].set_result(response)
else:
self.logger.warning("Unsolicitated response message: %s", response)
def _handle_notification_message(self, notification: NotificationMessage):
sub_id = notification.params.subscription
if sub_id in self._subscriptions:
self._subscriptions[sub_id].put_nowait(notification.params.result)
else:
self.logger.warning("Unsolicitated notification message: %s", notification)
async def listen(self):
"""Listening to Web3 server for responses
The listener is shared across multiple requests. The `PersistentListener` class will make
sure that the listener is running for new requests.
"""
self.logger.info("Starting listening for messages %s", self.uri)
handlers = {
ResponseMessage: self._handle_response_message,
NotificationMessage: self._handle_notification_message,
}
while True:
msg = await self.receive()
parsed = self._parse_message(msg)
handlers[type(parsed)](parsed)
class PersistentSocket:
"""Helps IPCTransport to establish a persistent socket connection to the Web3 server"""
def __init__(self, ipc_path: str) -> None:
self.ipc_path = ipc_path
self.reader_writer: Optional[Tuple[asyncio.StreamReader, asyncio.StreamWriter]] = None
async def __aenter__(self) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
if self.reader_writer is None:
self.reader_writer = await asyncio.open_unix_connection(self.ipc_path)
return self.reader_writer
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb
) -> None:
if exc_val is not None:
try:
if self.reader_writer is not None:
_, writer = self.reader_writer
writer.close()
except Exception: # pylint: disable=broad-except
pass
self.reader_writer = None
async def close(self):
"""Close the socket connection"""
if self.reader_writer is not None:
_, writer = self.reader_writer
writer.close()
self.reader_writer = None
class IPCTransport(TwoWayTransport):
"""Transport via UNIX Socket"""
def __init__(self, local_ipc_path: str):
super().__init__(local_ipc_path)
self.socket = PersistentSocket(local_ipc_path)
async def send(self, data: bytes):
async with self.socket as (_, writer):
writer.write(data)
await writer.drain()
async def receive(self) -> bytes:
async with self.socket as (reader, _):
self.listener.is_ready()
msg = await reader.readuntil()
return msg
async def close(self) -> None:
await super().close() # first stop listening
await self.socket.close() # then stop connection
class PersistentWebSocket:
"""Helps WebSocketTransport to establish a persistent socket connection to the Web3 server"""
def __init__(self, endpoint_uri: str, websocket_kwargs: Any) -> None:
self.ws: Optional[WebSocketClientProtocol] = None
self.endpoint_uri = endpoint_uri
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> WebSocketClientProtocol:
if self.ws is None:
self.ws = await connect(uri=self.endpoint_uri, **self.websocket_kwargs)
return self.ws
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb
) -> None:
if exc_val is not None:
try:
if self.ws is not None:
await self.ws.close()
except Exception: # pylint: disable=broad-except
pass
self.ws = None
async def close(self):
"""Close the WebSocket connection"""
if self.ws is not None:
await self.ws.close()
self.ws = None
class WebsocketTransport(TwoWayTransport):
"""Transport via WebSocket"""
def __init__(self, websocket_uri: str, websocket_kwargs: Optional[Any] = None):
super().__init__(websocket_uri)
self.websocket_uri = websocket_uri
if websocket_kwargs is None:
websocket_kwargs = {}
self.conn = PersistentWebSocket(websocket_uri, websocket_kwargs)
async def send(self, data: bytes):
async with self.conn as conn:
await conn.send(data)
async def receive(self) -> bytes:
async with self.conn as conn:
self.listener.is_ready()
msg = await conn.recv()
if isinstance(msg, str):
return msg.encode()
else:
return msg
async def close(self) -> None:
await super().close() # first stop listening
await self.conn.close() # then stop connection
class PersistentHTTPSession:
"""Helps HTTPTransport to establish a persistent HTTP session to the Web3 server"""
def __init__(self):
self.session: Optional[aiohttp.ClientSession] = None
async def __aenter__(self) -> aiohttp.ClientSession:
if self.session is None:
self.session = aiohttp.ClientSession()
return self.session
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb
) -> None:
if exc_val is not None:
try:
if self.session is not None:
await self.session.close()
except Exception: # pylint: disable=broad-except
pass
self.session = None
async def close(self):
"""Close the HTTP session"""
if self.session is not None:
await self.session.close()
self.session = None
class HTTPTransport(BaseTransport):
"""Transport via HTTP"""
def __init__(self, http_uri: str):
super().__init__(http_uri)
self._http_uri = http_uri
self.session = PersistentHTTPSession()
async def _send_request(self, request: RequestMessage) -> ResponseMessage:
data = json.dumps(request.dict(), separators=(",", ":")).encode("utf-8")
self.logger.debug("outbound: %s", data.decode())
payload = BytesPayload(data, content_type="application/json")
async with self.session as session:
async with session.post(self._http_uri, data=payload) as resp:
res = await resp.read()
parsed = self._parse_message(res)
assert isinstance(parsed, ResponseMessage)
return parsed
async def close(self):
await self.session.close()
def get_transport(uri: str) -> BaseTransport:
"""Return the proper transport implementation based on uri"""
web3: BaseTransport
if uri.startswith("ws://") or uri.startswith("wss://"):
web3 = WebsocketTransport(uri)
elif uri.startswith("http://") or uri.startswith("https://"):
web3 = HTTPTransport(uri)
else:
web3 = IPCTransport(uri)
return web3
| desktable/aioweb3 | aioweb3/transport.py | transport.py | py | 15,686 | python | en | code | 0 | github-code | 13 |
42818857925 | def triangular_number_prompt():
"""Prompts user for an n value for triangular number calculation.
.. note::
This function is designed to work in conjunction with
triangular_number() from this same module.
:except ValueError:
The user is notified that the value for n may only be a positive
integer.
:return:
The positive integer n for which the user has chosen to
calculate a triangular number for.
:rtype: int
"""
print("A triangular number is the sum of the first n positive integers.")
while True:
prompt = input("Enter a value for n: ").strip()
try:
num = int(prompt)
except ValueError:
print(" n must be a positive integer.")
else:
if num < 1:
print(" n must be a positive integer.")
else:
return num
def triangular_number(n=None):
"""Calculates the triangular number a given value n.
A triangular number is the sum of the first n positive integers.
For more information:
https://en.wikipedia.org/wiki/Triangular_number
:param n:
If n=None, triangular_number_prompt() will be called so the user
may input a value for n. Otherwise, n may be specified as an int
at call. If not an int, the user will be prompted if they would
like to specify a new value. If so, that new value will be used.
:type n: int or None
:return:
If a valid n value was given, a tuple with the triangular number
as the first element and the n value as the second element.
Otherwise, a tuple with -1 as an indicator of invalidity as the
first element and the n value as the second element (for uniform
return as a tuple).
:rtype: tuple[int, int]
"""
# tri num will only be calculated if n is type int
n_type_valid = True
# will return a positive integer
if n is None:
n = triangular_number_prompt()
# n type must be int for tri num calculation
# non-positive integers cannot be used for tri num calculation
elif (type(n) is not int) or (n < 1):
print(f" {n} is not a valid, i.e., a positive integer.")
while True:
type_prompt = \
input("Would you like to change n? (y/n): ").lower().strip()
if type_prompt in ["y", "yes"]:
n = triangular_number_prompt()
break
elif type_prompt in ["n", "no"]:
n_type_valid = False
break
else:
print(f" {type_prompt} is not a valid response.")
if n_type_valid:
one_to_n = [num for num in range(1, n + 1)]
# this is the expedient method of finding the sum
# tri_num = sum(one_to_n)
# this is the method dictated by the assignment
tri_num = 0
iterations = 0
# the while loop specified by the assignment
while iterations < len(one_to_n):
tri_num += one_to_n[iterations]
iterations += 1
return tri_num, n
else:
return -1, n
def prime_number_checker(num=None):
"""Checks if a number is prime.
:param num:
An integer to check for primality. If None, the user is prompted
for an integer to check.
:type num: int or None
:except ValueError:
If given value for num cannot be converted to an int, the first
and second elements of return are marked False.
:return:
A tuple with three elements: Firstly, the value assigned to num.
This value will be converted to an int if possible. Secondly, a
bool indicating if the value of num is an int. Thirdly, a bool
indicating if num is a prime number.
:rtype: tuple[any, bool, bool]
"""
num_is_int = True
num_is_prime = True
# code may be improved by adding a loop to verify an int was entered
# alternatively, a separate function similar to
# triangular_number_prompt() found in this module
if num is None:
num = input("Enter an integer to check if it a prime number: ")\
.strip()
try:
num = int(num)
# code may be improved by asking user to modify num if ValueError
except ValueError:
num_is_int, num_is_prime = False, False
else:
if num <= 1:
num_is_prime = False
else:
# the for loop specified by the assignment
for i in range(2, num):
if num % i == 0:
num_is_prime = False
break
return num, num_is_int, num_is_prime
def odds_between_two_integers():
"""Identifies the odd integers between two user-defined integers.
This function has two inputs: Firstly, an integer. Secondly, a
greater integer. If the inputs are valid, the odd integers between
the two inputs will be identified.
..note::
The while loop specified by the assignment is implemented in
'TASK 3' block of main().
.. note::
The assignment does not mandate any exception handling aside
from displaying a message. This function may be improved by
implementing a loop to correct exceptions as they arise.
.. note::
The functionality of this function may be improved by
implementing default None parameters to indicate that integers
are to be input at call.
:except ValueError:
Results in a return message indicating that integers are the
only valid inputs.
:return:
If the inputs meet the conditions specified, the return is a
list of odd integers between the two inputs. If the conditions
are not met, the return is list with a single element, a str as
a description of the fault. If the conditions are met but there
are no odd integers between the inputs, the return is a list
with single element, a str as an innocuous message.
"""
odd_nums = []
first_num = input("Enter an integer: ").strip()
try:
first_num = int(first_num)
except ValueError:
odd_nums.append("Integers are the only valid inputs for this "
"function.")
else:
second_num = input("Enter a greater integer: ").strip()
try:
second_num = int(second_num)
except ValueError:
odd_nums.append("Integers are the only valid inputs for this "
"function.")
else:
if first_num >= second_num:
odd_nums.append("The second integer must be greater than the "
"first.")
else:
for num in range(first_num + 1, second_num):
if num % 2 == 1:
odd_nums.append(num)
if len(odd_nums) == 0:
odd_nums.append("There are odd integers between the first integer and"
"the second integer.")
return odd_nums
def print_this_string():
"""Prints the first four characters of the string 'CSCI161L'.
.. note::
The implementation of this function is bound to the assignment.
If the assignment were different, the implementation would
almost certainly be different.
:return: None
"""
this_string = "CSCI161L"
chars_printed = 0
for char in this_string:
if char == "1":
# the break statement specified by the assignment
break
else:
print(char)
def print_these_numbers():
"""Prints the numbers from 1 to 20 except 5, 10, and 15.
.. note::
Assignment verbiage: "Print the numbers from 1 to 20 excluding
5, 10, 15 using continue statement."
The verbiage was interpreted as 1 to 20 inclusive, thus, 20 is
included in the print-out.
:return: None
"""
# an alternate (and arguably better) solution to the problem using a
# for loop
# for num in range(1, 21):
# if (num % 5 != 0) or (num == 20):
# print(num)
i = 0
while i < 20:
i += 1
if (i % 5 == 0) and (i != 20):
# the use of 'continue' keyword specified by the assignment
continue
print(i)
def main():
print("TASK 1")
tri_num, n = triangular_number()
if tri_num == -1:
print("No valid value given for n. Triangular number not calculated.")
else:
print(f"{tri_num} is the triangular number for the first {n} "
f"positive integers.")
print("\nTASK 2")
num_to_check, num_is_int, num_is_prime = prime_number_checker()
if not num_is_int:
print(f"{num_to_check} is not an integer, thus, it is not a prime "
f"number.")
elif num_is_prime:
print(f"{num_to_check} is a prime number.")
else:
print(f"{num_to_check} is NOT a prime number.")
print("\nTASK 3")
# for my money, a for loop would be better than a while loop
# odds = odds_between_two_integers()
# for i in odds:
# print(i)
odds = odds_between_two_integers()
# working copy implemented should original data need to be preserved
working_copy_odds = odds.copy()
len_of_loop = len(working_copy_odds)
# the while loop specified by the assignment
while len_of_loop > 0:
print(working_copy_odds[0])
del working_copy_odds[0]
len_of_loop -= 1
print("\nTASK 4")
print_this_string()
print("\nTASK 5")
print_these_numbers()
if __name__ == "__main__":
main()
# All work and no play makes Jack a dull boy.
| smallpythoncode/csci161 | assignments/assignment03/jahnke_kenneth_3.py | jahnke_kenneth_3.py | py | 9,692 | python | en | code | 0 | github-code | 13 |
42135326136 | import signal
import sys
import ssl
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
from optparse import OptionParser
import json
clients = []
class SimpleEcho(WebSocket):
def handleMessage(self):
tab_data = json.loads(self.data)
# import ipdb; ipdb.set_trace();
print(tab_data)
for client in clients:
if client.__dict__ != self.__dict__:
client.sendMessage(self.data)
def handleConnected(self):
print("{0} connected!".format(self.address))
clients.append(self)
def handleClose(self):
clients.remove(self)
if __name__ == "__main__":
parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0")
parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)")
parser.add_option("--port", default=8000, type='int', action="store", dest="port", help="port (8000)")
parser.add_option("--example", default='echo', type='string', action="store", dest="example", help="echo, chat")
parser.add_option("--ssl", default=0, type='int', action="store", dest="ssl", help="ssl (1: on, 0: off (default))")
parser.add_option("--cert", default='./cert.pem', type='string', action="store", dest="cert", help="cert (./cert.pem)")
parser.add_option("--ver", default=ssl.PROTOCOL_TLSv1, type=int, action="store", dest="ver", help="ssl version")
(options, args) = parser.parse_args()
cls = SimpleEcho
server = SimpleWebSocketServer(options.host, options.port, cls)
def close_sig_handler(signal, frame):
server.close()
sys.exit()
signal.signal(signal.SIGINT, close_sig_handler)
server.serveforever()
| domspad/synchropazzo | synchropazzo_server.py | synchropazzo_server.py | py | 1,731 | python | en | code | 1 | github-code | 13 |
6576240536 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import networkx as nx
import matplotlib.pyplot as plt
from graph_utility import *
#-------------------------------------------------------------------------------
def plot_degree_dist (graph, path):
"""Plot log-log degree distribution of the graph and save the figure
at the given path. On X-axis we have degrees and on Y-axis we have
the percentage of nodes that have that degree"""
node_to_degree = graph.degree()
N = float(graph.order())
degree_to_percent = {}
# calculate percentages of nodes with certain degree
for node in node_to_degree:
degree_to_percent[node_to_degree[node]] = 1 + degree_to_percent.get(node_to_degree[node], 0)
for degree in degree_to_percent:
degree_to_percent[degree] = degree_to_percent[degree] / N * 100
x = sorted(degree_to_percent.keys(), reverse = True)
y = [degree_to_percent[i] for i in x]
plt.loglog(x, y, 'b-', marker = '.')
plt.title("Degree Distribution")
plt.ylabel("Log percentage")
plt.xlabel("Log degree")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_clustering_spectrum (graph, path):
"""Plot the clusttering spectrum of the graph and save the figure
at the given path. On X-axis we have degrees and on Y-axis we have
average clustering coefficients of the nodes that have that degree"""
node_to_degree = graph.degree()
node_to_clustering = nx.clustering(graph)
degree_to_clustering = {}
# calculate average clustering coefficients for nodes with certain degree
for node in node_to_degree:
deg = node_to_degree[node]
tmp = degree_to_clustering.get(deg, [])
tmp.append(node_to_clustering[node])
degree_to_clustering[deg] = tmp
for degree in degree_to_clustering:
tmp = degree_to_clustering[degree]
degree_to_clustering[degree] = float(sum(tmp)) / len(tmp)
x = sorted(degree_to_clustering.keys(), reverse = True)
y = [degree_to_clustering[i] for i in x]
plt.loglog(x, y, 'b-', marker = '.')
plt.title("Clustering Spectrum")
plt.ylabel("Average clustering coefficient")
plt.xlabel("Degree")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_shortest_path_spectrum (graph, path, paths_data):
"""Plot distribution of shortest paths of the graph and save the figure
at the given path. On X-axis we have distance values and on Y-axis we
have percentage of node pairs that have that distance value"""
diameter = graph_diameter(paths_data)
pairs = graph.order() * (graph.order()-1) * 0.5
distances_count = [0 for i in xrange(diameter + 1)]
for i in xrange(8):
with open('%s_%d' % (paths_data, i), 'r') as in_file:
for line in in_file:
tokens = line.split()
distances_count[int(tokens[2])] += 1
for i in xrange(diameter + 1):
distances_count[i] *= (100.0 / pairs)
y = distances_count
plt.loglog(y, 'b-', marker = '.')
plt.title("Shortest Paths Spectrum")
plt.ylabel("Percent of pairs")
plt.xlabel("Distance")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_closeness_dist (graph, path):
"""Plot distribution of closeness centrality of the graph and save the figure
at the given path. On X-axis we have closeness centrality values and on
Y-axis we have percentage of the nodes that have that closeness value"""
N = float(graph.order())
node_to_closeness = nx.closeness_centrality(graph)
closeness_to_percent = {}
# calculate percentages of nodes with certain closeness value
for node in node_to_closeness:
closeness_to_percent[node_to_closeness[node]] = 1 + \
closeness_to_percent.get(node_to_closeness[node], 0)
for c in closeness_to_percent:
closeness_to_percent[c] = closeness_to_percent[c] / N * 100
x = sorted(closeness_to_percent.keys(), reverse = True)
y = [closeness_to_percent[i] for i in x]
plt.loglog(x, y, 'b-', marker = '.')
plt.title("Closeness Centrality Distribution")
plt.ylabel("Percentage")
plt.xlabel("Closeness value")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_betweenness_dist (graph, path):
"""Plot distribution of betweenness centrality of the graph and save the figure
at the given path. On X-axis we have betweenness centrality values and on
Y-axis we have percentage of the nodes that have that betweenness value.
k is the number of samples for estimating the betweenness centrality."""
N = float(graph.order())
node_to_betweenness = nx.betweenness_centrality(graph)
betweenness_to_percent = {}
# calculate percentages of nodes with certain betweeness value
for node in node_to_betweenness:
betweenness_to_percent[node_to_betweenness[node]] = 1 + \
betweenness_to_percent.get(node_to_betweenness[node], 0)
for c in betweenness_to_percent:
betweenness_to_percent[c] = betweenness_to_percent[c] / N * 100
x = sorted(betweenness_to_percent.keys(), reverse = True)
y = [betweenness_to_percent[i] for i in x]
plt.loglog(x, y, 'b-', marker = '.')
plt.title("Betweenness Centrality Distribution")
plt.ylabel("Percentage")
plt.xlabel("Betweenness value")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_proteins_sharing_function(id_to_protein, \
annotation_file, distance_file, path):
"""Plot histogram of proteins sharing al least one common functiopn depending
on the distance between them and save the figure at the given path.
On X-axis we have the distance and on Y-axis we have percentage of pairs
that have at least one common function.
id_to_protein: dictionary where each node in the graph maps to a protein
annotation_file: path to the file that cointains proteins and their functions
distance_file: path to the file that contains shortest paths between the nodes"""
protein_to_functions = read_in_annotations(annotation_file)
distance_to_count = {}
distance_to_common = {}
for i in xrange(8):
with open('%s_%d' % (distance_file, i), 'r') as in_file:
for line in in_file:
tokens = line.split()
p1 = id_to_protein[int(tokens[0])]
p2 = id_to_protein[int(tokens[1])]
d = int(tokens[2])
distance_to_count[d] = 1 + distance_to_count.get(d, 0)
if p1 in protein_to_functions and \
p2 in protein_to_functions and \
common_elements(protein_to_functions[p1], protein_to_functions[p2]):
distance_to_common[d] = 1 + distance_to_common.get(d, 0)
for d in distance_to_common:
distance_to_common[d] *= (100.0 / distance_to_count[d])
# Plotting
diameter = graph_diameter(distance_file)
x = range(0, diameter + 1)
y = [distance_to_common.get(i, 0) for i in x]
plt.bar(x, y, width = 1, color = 'b')
plt.title("Proteins sharing common functions\n depending on the distance between them")
plt.ylabel("Percent of pairs sharing common functions")
plt.xlabel("Distance")
plt.axis('tight')
plt.savefig(path)
#-------------------------------------------------------------------------------
def plot_function_first_appearance(id_to_protein, annotation_file, \
distance_file, path, diameter):
""" Plot histogram of percentage of function annotations of a protein,
appearing for the first time in other proteins of distance k from the
given protein. Save the histogram at the given path.
On X-axis we have the distance and on Y-axis we have normalized number
of function appearances at distance d.
id_to_protein: dictionary where each node in the graph maps to a protein
annotation_file: path to the file that cointains proteins and their functions
distance_file: path to the file that contains shortest paths between the nodes
diameter: the diameter of the graph
"""
protein_to_functions = read_in_annotations(annotation_file)
distance_to_appearance = {}
with open(distance_file, 'r') as in_file:
for line in in_file:
tokens = line.split()
p1 = int(tokens[0])
pp1 = id_to_protein[p1]
p2 = int(tokens[1])
pp2 = id_to_protein[p2]
d = int(tokens[2])
if pp1 in protein_to_functions and pp2 in protein_to_functions:
intersection = protein_to_functions[pp1].intersection(protein_to_functions[pp2])
protein_to_functions[pp1].difference_update(intersection)
protein_to_functions[pp2].difference_update(intersection)
distance_to_appearance[d] = distance_to_appearance.get(d, 0) + \
(2 * len(intersection))
normalizer = float(sum(distance_to_appearance.values()))
# Plotting
x = range(0, diameter + 1)
y = [distance_to_appearance.get(i, 0) / normalizer for i in x]
plt.bar(x, y, width = 1, color = 'b')
plt.title("Number of functions first appearing at given distance")
plt.ylabel("Normalized number of functions")
plt.xlabel("Distance")
plt.axis('tight')
plt.savefig(path) | lazzova/protein-interaction | python-src/interaction_graph_info.py | interaction_graph_info.py | py | 9,840 | python | en | code | 0 | github-code | 13 |
73395768978 | import numpy as np
import matplotlib.pyplot as plt
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# 方格的宽度,单位mm
square_size = 27.5
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((9 * 6, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2) * square_size
# objp[:,2:3]=10
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('./frame_name/*.png')
index = 0
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
print(ret)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
cv2.imshow('img' + str(index), img)
index = index + 1
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print(ret)
print(mtx)
print(dist)
print(len(rvecs), rvecs[-1])
print(len(tvecs), tvecs[-1])
tot_error = 0
for i in range(len(imgpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
tot_error += error
average_error = (tot_error / len(imgpoints)) ** 0.5
print(ret, average_error)
# 畸变矫正部分程序
for fname in images:
img = cv2.imread(fname)
rows, cols = img.shape[:2]
newcamera_mtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (cols, rows), 0)
img_undistort = cv2.undistort(img, mtx, dist, None, newcamera_mtx)
x, y, cols, rows = roi
img_undistort = img_undistort[y:y + rows, x:x + cols]
print(roi)
plt.subplot(121), plt.imshow(img), plt.title("img")
plt.subplot(122), plt.imshow(img_undistort), plt.title("img_undistort")
plt.show()
# 畸变矫正部分(2)程序
for fname in images:
img = cv2.imread(fname)
rows, cols = img.shape[:2]
newcamera_mtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (cols, rows), 1)
img_undistort = cv2.undistort(img, mtx, dist, None)
map_x, map_y = cv2.initUndistortRectifyMap(mtx, dist, None, newcamera_mtx, (cols, rows), 5)
img_undistort = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)
print(map_x.shape)
print(map_y)
plt.subplot(121), plt.imshow(img), plt.title("img")
plt.subplot(122), plt.imshow(img_undistort), plt.title("img_undistort")
plt.show()
if cv2.waitKey(1000 * 60) & 0xFF == ord('q'):
cv2.destroyAllWindows()
| lijian103/demo_py_opencv | CalibarateCamera.py | CalibarateCamera.py | py | 2,993 | python | en | code | 6 | github-code | 13 |
32799121801 | import json
import os
from datetime import datetime
import mock
import pytest
from dallinger.experiment import Experiment
from dallinger.models import Participant
from dallinger.mturk import MTurkQualificationRequirements, MTurkQuestions
class TestModuleFunctions(object):
@pytest.fixture
def mod(self):
from dallinger import recruiters
return recruiters
def test__get_queue(self, mod):
from rq import Queue
assert isinstance(mod._get_queue(), Queue)
def test_for_experiment(self, mod):
mock_exp = mock.MagicMock(spec=Experiment)
mock_exp.recruiter = mock.sentinel.some_object
assert mod.for_experiment(mock_exp) is mock_exp.recruiter
def test_by_name_with_valid_name(self, mod):
assert isinstance(mod.by_name("CLIRecruiter"), mod.CLIRecruiter)
def test_by_name_with_valid_nickname(self, mod):
assert isinstance(mod.by_name("bots"), mod.BotRecruiter)
def test_by_name_with_invalid_name(self, mod):
assert mod.by_name("blah") is None
def test_for_debug_mode(self, mod, stub_config):
r = mod.from_config(stub_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_recruiter_config_value_used_if_not_debug(self, mod, stub_config):
stub_config.extend({"mode": "sandbox", "recruiter": "CLIRecruiter"})
r = mod.from_config(stub_config)
assert isinstance(r, mod.CLIRecruiter)
def test_debug_mode_trumps_recruiter_config_value(self, mod, stub_config):
stub_config.extend({"recruiter": "CLIRecruiter"})
r = mod.from_config(stub_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_bot_recruiter_trumps_debug_mode(self, mod, stub_config):
stub_config.extend({"recruiter": "bots"})
r = mod.from_config(stub_config)
assert isinstance(r, mod.BotRecruiter)
def test_default_is_mturk_recruiter_if_not_debug(self, mod, active_config):
active_config.extend({"mode": "sandbox"})
r = mod.from_config(active_config)
assert isinstance(r, mod.MTurkRecruiter)
def test_replay_setting_dictates_recruiter(self, mod, active_config):
active_config.extend(
{"replay": True, "mode": "sandbox", "recruiter": "CLIRecruiter"}
)
r = mod.from_config(active_config)
assert isinstance(r, mod.HotAirRecruiter)
def test_unknown_recruiter_name_raises(self, mod, stub_config):
stub_config.extend({"mode": "sandbox", "recruiter": "bogus"})
with pytest.raises(NotImplementedError):
mod.from_config(stub_config)
class TestRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import Recruiter
return Recruiter()
def test_open_recruitment(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.open_recruitment()
def test_recruit(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.recruit()
def test_close_recruitment(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.close_recruitment()
def test_compensate_worker(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.compensate_worker()
def test_reward_bonus(self, recruiter):
with pytest.raises(NotImplementedError):
recruiter.reward_bonus(None, 0.01, "You're great!")
def test_external_submission_url(self, recruiter):
assert recruiter.external_submission_url is None
def test_rejects_questionnaire_from_returns_none(self, recruiter):
dummy = mock.NonCallableMock()
assert recruiter.rejects_questionnaire_from(participant=dummy) is None
def test_notify_duration_exceeded_logs_only(self, recruiter):
recruiter.notify_duration_exceeded(participants=[], reference_time=None)
def test_backward_compat(self, recruiter):
assert recruiter() is recruiter
def test_normalize_entry_information(self, recruiter):
normalized = recruiter.normalize_entry_information(
{"assignmentId": "A", "workerId": "W", "hitId": "H", "extra_info": "E"}
)
assert normalized == {
"assignment_id": "A",
"worker_id": "W",
"hit_id": "H",
"entry_information": {"extra_info": "E"},
}
normalized = recruiter.normalize_entry_information(
{"assignment_id": "A", "worker_id": "W", "hit_id": "H"}
)
assert normalized == {
"assignment_id": "A",
"worker_id": "W",
"hit_id": "H",
}
@pytest.mark.usefixtures("active_config")
class TestCLIRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import CLIRecruiter
yield CLIRecruiter()
def test_recruit_recruits_one_by_default(self, recruiter):
result = recruiter.recruit()
assert len(result) == 1
def test_recruit_results_are_urls(self, recruiter):
assert "/ad?recruiter=cli&assignmentId=" in recruiter.recruit()[0]
def test_recruit_multiple(self, recruiter):
assert len(recruiter.recruit(n=3)) == 3
def test_open_recruitment_recruits_one_by_default(self, recruiter):
result = recruiter.open_recruitment()
assert len(result["items"]) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert 'Search for "New participant requested:"' in result["message"]
def test_open_recruitment_multiple(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result["items"]) == 3
def test_open_recruitment_results_are_urls(self, recruiter):
result = recruiter.open_recruitment()
assert "/ad?recruiter=cli&assignmentId=" in result["items"][0]
def test_open_recruitment_with_zero(self, recruiter):
result = recruiter.open_recruitment(n=0)
assert result["items"] == []
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit("any assignment id")
def test_reward_bonus(self, a, recruiter):
p = a.participant()
recruiter.reward_bonus(p, 0.01, "You're great!")
def test_open_recruitment_uses_configured_mode(self, recruiter, active_config):
active_config.extend({"mode": "new_mode"})
result = recruiter.open_recruitment()
assert "mode=new_mode" in result["items"][0]
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.on_completion_event() == "AssignmentSubmitted"
@pytest.mark.usefixtures("active_config")
class TestHotAirRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import HotAirRecruiter
yield HotAirRecruiter()
def test_recruit_recruits_one_by_default(self, recruiter):
result = recruiter.recruit()
assert len(result) == 1
def test_recruit_results_are_urls(self, recruiter):
assert "/ad?recruiter=hotair&assignmentId=" in recruiter.recruit()[0]
def test_recruit_multiple(self, recruiter):
assert len(recruiter.recruit(n=3)) == 3
def test_open_recruitment_recruits_one_by_default(self, recruiter):
result = recruiter.open_recruitment()
assert len(result["items"]) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert "requests will open browser windows" in result["message"]
def test_open_recruitment_multiple(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result["items"]) == 3
def test_open_recruitment_results_are_urls(self, recruiter):
result = recruiter.open_recruitment()
assert "/ad?recruiter=hotair&assignmentId=" in result["items"][0]
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit("any assignment id")
def test_reward_bonus(self, a, recruiter):
recruiter.reward_bonus(a.participant(), 0.01, "You're great!")
def test_open_recruitment_ignores_configured_mode(self, recruiter, active_config):
active_config.extend({"mode": "new_mode"})
result = recruiter.open_recruitment()
assert "mode=debug" in result["items"][0]
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.on_completion_event() == "AssignmentSubmitted"
class TestSimulatedRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import SimulatedRecruiter
return SimulatedRecruiter()
def test_recruit_returns_empty_result(self, recruiter):
assert recruiter.recruit() == []
def test_recruit_multiple_returns_empty_result(self, recruiter):
assert recruiter.recruit(n=3) == []
def test_open_recruitment_returns_empty_result(self, recruiter):
assert recruiter.open_recruitment()["items"] == []
def test_open_recruitment_multiple_returns_empty_result(self, recruiter):
assert recruiter.open_recruitment(n=3)["items"] == []
def test_returns_standard_submission_event_type(self, recruiter):
assert recruiter.on_completion_event() == "AssignmentSubmitted"
def test_close_recruitment(self, recruiter):
assert recruiter.close_recruitment() is None
class TestBotRecruiter(object):
@pytest.fixture
def recruiter(self):
from dallinger.recruiters import BotRecruiter
with mock.patch.multiple(
"dallinger.recruiters", _get_queue=mock.DEFAULT, get_base_url=mock.DEFAULT
) as mocks:
mocks["get_base_url"].return_value = "fake_base_url"
r = BotRecruiter()
r._get_bot_factory = mock.Mock()
yield r
def test_recruit_returns_list(self, recruiter):
result = recruiter.recruit(n=2)
assert len(result) == 2
def test_recruit_returns_urls(self, recruiter):
result = recruiter.recruit()
assert result[0].startswith("fake_base_url")
def test_open_recruitment_returns_list(self, recruiter):
result = recruiter.open_recruitment(n=2)
assert len(result["items"]) == 2
def test_open_recruitment_returns_urls(self, recruiter):
result = recruiter.open_recruitment()
assert result["items"][0].startswith("fake_base_url")
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert "recruitment started using Mock" in result["message"]
def test_close_recruitment(self, recruiter):
recruiter.close_recruitment()
def test_approve_hit(self, recruiter):
assert recruiter.approve_hit("any assignment id")
def test_reward_bonus(self, a, recruiter):
recruiter.reward_bonus(a.participant(), 0.01, "You're great!")
def test_returns_specific_submission_event_type(self, recruiter):
assert recruiter.on_completion_event() == "BotAssignmentSubmitted"
def test_notify_duration_exceeded_rejects_participants(self, a, recruiter):
bot = a.participant(recruiter_id="bots")
recruiter.notify_duration_exceeded([bot], datetime.now())
assert bot.status == "rejected"
@pytest.fixture
def notifies_admin():
from dallinger.notifications import NotifiesAdmin
mock_notifies_admin = mock.create_autospec(NotifiesAdmin)
yield mock_notifies_admin
@pytest.fixture
def mailer():
from dallinger.notifications import SMTPMailer
mock_mailer = mock.create_autospec(SMTPMailer)
yield mock_mailer
@pytest.fixture
def prolific_config(active_config):
prolific_extensions = {
"prolific_api_token": "fake Prolific API token",
"prolific_api_version": "v1",
"prolific_estimated_completion_minutes": 5,
"prolific_reward_cents": 10,
"prolific_recruitment_config": json.dumps(
{"peripheral_requirements": ["audio", "microphone"]}
),
}
active_config.extend(prolific_extensions)
return active_config
@pytest.fixture
def prolificservice(prolific_config, fake_parsed_prolific_study):
from dallinger.prolific import ProlificService
service = mock.create_autospec(
ProlificService,
api_token=prolific_config.get("prolific_api_token"),
api_version=prolific_config.get("prolific_api_version"),
)
service.published_study.return_value = fake_parsed_prolific_study
service.add_participants_to_study.return_value = fake_parsed_prolific_study
return service
@pytest.mark.usefixtures("prolific_config")
class TestProlificRecruiter(object):
@pytest.fixture
def recruiter(self, mailer, notifies_admin, prolificservice, hit_id_store):
from dallinger.recruiters import ProlificRecruiter
with mock.patch.multiple(
"dallinger.recruiters", os=mock.DEFAULT, get_base_url=mock.DEFAULT
) as mocks:
mocks["get_base_url"].return_value = "http://fake-domain"
mocks["os"].getenv.return_value = "fake-host-domain"
r = ProlificRecruiter(store=hit_id_store)
r.notifies_admin = notifies_admin
r.mailer = mailer
r.prolificservice = prolificservice
return r
def test_open_recruitment_with_valid_request(self, recruiter):
result = recruiter.open_recruitment(n=5)
assert result["message"] == "Study now published on Prolific"
def test_open_recruitment_raises_if_study_already_in_progress(self, recruiter):
from dallinger.recruiters import ProlificRecruiterException
recruiter.open_recruitment()
with pytest.raises(ProlificRecruiterException):
recruiter.open_recruitment()
def test_open_recruitment_raises_if_running_on_localhost(self, recruiter):
from dallinger.recruiters import ProlificRecruiterException
recruiter.study_domain = None
with pytest.raises(ProlificRecruiterException) as ex_info:
recruiter.open_recruitment(n=1)
assert ex_info.match("Can't run a Prolific Study from localhost")
def test_normalize_entry_information_standardizes_participant_data(self, recruiter):
prolific_format = {
"STUDY_ID": "some study ID",
"PROLIFIC_PID": "some worker ID",
"SESSION_ID": "some session ID",
}
dallinger_format = recruiter.normalize_entry_information(prolific_format)
assert dallinger_format == {
"hit_id": "some study ID",
"worker_id": "some worker ID",
"assignment_id": "some session ID",
"entry_information": prolific_format,
}
def test_defers_assignment_submission_via_null_on_completion_event(self, recruiter):
assert recruiter.on_completion_event() is None
@pytest.mark.usefixtures("experiment_dir_merged")
def test_exit_page_includes_submission_prolific_button(self, a, webapp, recruiter):
p = a.participant(recruiter_id="prolific")
response = webapp.get(f"/recruiter-exit?participant_id={p.id}")
assert recruiter.external_submission_url in response.data.decode("utf-8")
def test_reward_bonus_passes_only_whats_needed(self, a, recruiter):
participant = a.participant(assignment_id="some assignement")
recruiter.reward_bonus(
participant=participant,
amount=2.99,
reason="well done!",
)
recruiter.prolificservice.pay_session_bonus.assert_called_once_with(
study_id=recruiter.current_study_id,
worker_id=participant.worker_id,
amount=2.99,
)
def test_reward_bonus_logs_exception(self, a, recruiter):
from dallinger.prolific import ProlificServiceException
recruiter.prolificservice.pay_session_bonus.side_effect = (
ProlificServiceException("Boom!")
)
with mock.patch("dallinger.recruiters.logger") as mock_logger:
recruiter.reward_bonus(
participant=a.participant(),
amount=2.99,
reason="well done!",
)
mock_logger.exception.assert_called_once_with("Boom!")
def test_approve_hit(self, recruiter):
fake_id = "fake assignment id"
recruiter.approve_hit(fake_id)
recruiter.prolificservice.approve_participant_session.assert_called_once_with(
session_id=fake_id
)
def test_approve_hit_logs_exception(self, recruiter):
from dallinger.prolific import ProlificServiceException
recruiter.prolificservice.approve_participant_session.side_effect = (
ProlificServiceException("Boom!")
)
with mock.patch("dallinger.recruiters.logger") as mock_logger:
recruiter.approve_hit("fake-hit-id")
mock_logger.exception.assert_called_once_with("Boom!")
def test_recruit_calls_add_participants_to_study(self, recruiter):
recruiter.open_recruitment()
recruiter.recruit(n=1)
recruiter.prolificservice.add_participants_to_study.assert_called_once_with(
study_id="abcdefghijklmnopqrstuvwx", number_to_add=1
)
def test_submission_listener_enqueues_assignment_submitted_notification(
self, queue, webapp
):
exit_form_submission = {
"assignmentId": "some assignment ID",
"participantId": "some participant ID",
"somethingElse": "blah... whatever",
}
response = webapp.post(
"/prolific-submission-listener", data=exit_form_submission
)
assert response.status_code == 200
queue.enqueue.assert_called_once_with(
mock.ANY, "AssignmentSubmitted", "some assignment ID", "some participant ID"
),
def test_clean_qualification_attributes(self, recruiter):
json_path = os.path.join(
os.path.dirname(__file__), "datasets", "example_prolific_details.json"
)
with open(json_path, "r") as f:
details = json.load(f)
cleaned_details = recruiter.clean_qualification_attributes(details)
assert details.keys() == cleaned_details.keys(), "Keys should be the same"
requirements = cleaned_details["eligibility_requirements"]
assert requirements == [
{
"type": "select",
"attributes": [
{"label": "Spain", "name": "Spain", "value": True, "index": 5}
],
"query": {
"id": "54bef0fafdf99b15608c504e",
"title": "Current Country of Residence",
},
"_cls": "web.eligibility.models.SelectAnswerEligibilityRequirement",
},
{
"type": "select",
"attributes": [
{"label": "Spain", "name": "Spain", "value": True, "index": 5}
],
"query": {"id": "54ac6ea9fdf99b2204feb896", "title": "Nationality"},
"_cls": "web.eligibility.models.SelectAnswerEligibilityRequirement",
},
{
"type": "select",
"attributes": [
{"label": "Spain", "name": "Spain", "value": True, "index": 5}
],
"query": {
"id": "54ac6ea9fdf99b2204feb895",
"title": "Country of Birth",
},
"_cls": "web.eligibility.models.SelectAnswerEligibilityRequirement",
},
{
"type": "select",
"attributes": [
{"label": "Spanish", "name": "Spanish", "value": True, "index": 59}
],
"query": {"id": "54ac6ea9fdf99b2204feb899", "title": "First Language"},
"_cls": "web.eligibility.models.SelectAnswerEligibilityRequirement",
},
{
"type": "select",
"attributes": [
{
"label": "I was raised with my native language only",
"name": "I was raised with my native language only",
"value": True,
"index": 0,
}
],
"query": {
"id": "59c2434b5364260001dc4b0a",
"title": "Were you raised monolingual?",
},
"_cls": "web.eligibility.models.SelectAnswerEligibilityRequirement",
},
]
class TestMTurkRecruiterMessages(object):
@pytest.fixture
def summary(self, a, stub_config):
from datetime import timedelta
from dallinger.recruiters import ParticipationTime
p = a.participant()
one_min_over = 60 * stub_config.get("duration") + 1
return ParticipationTime(
participant=p,
reference_time=p.creation_time + timedelta(minutes=one_min_over),
config=stub_config,
)
@pytest.fixture
def whimsical(self, summary, stub_config):
from dallinger.recruiters import WhimsicalMTurkHITMessages
return WhimsicalMTurkHITMessages(summary)
@pytest.fixture
def nonwhimsical(self, summary, stub_config):
from dallinger.recruiters import MTurkHITMessages
return MTurkHITMessages(summary)
def test_resubmitted_msg_whimsical(self, whimsical):
data = whimsical.resubmitted_msg()
body = data["body"].replace("\n", " ")
assert data["subject"] == "A matter of minor concern."
assert "a full 1 minutes over" in body
def test_resubmitted_msg_nonwhimsical(self, nonwhimsical):
data = nonwhimsical.resubmitted_msg()
body = data["body"].replace("\n", " ")
assert data["subject"] == "Dallinger automated email - minor error."
assert "Dallinger has auto-corrected the problem" in body
def test_hit_cancelled_msg_whimsical(self, whimsical):
data = whimsical.hit_cancelled_msg()
body = data["body"].replace("\n", " ")
assert data["subject"] == "Most troubling news."
assert "a full 1 minutes over" in body
def test_hit_cancelled_msg_nonwhimsical(self, nonwhimsical):
data = nonwhimsical.hit_cancelled_msg()
body = data["body"].replace("\n", " ")
assert data["subject"] == "Dallinger automated email - major error."
assert "Dallinger has paused the experiment" in body
SNS_ROUTE_PATH = "/mturk-sns-listener"
@pytest.mark.usefixtures(
"experiment_dir"
) # Needed because @before_request loads the exp
class TestSNSListenerRoute(object):
@pytest.fixture
def recruiter(self, active_config):
active_config.extend({"mode": "sandbox"}) # MTurkRecruiter invalid if debug
with mock.patch("dallinger.recruiters.MTurkRecruiter") as klass:
instance = klass.return_value
yield instance
def test_answers_subscription_confirmation_request(self, webapp, recruiter):
post_data = {
"Type": "SubscriptionConfirmation",
"MessageId": "165545c9-2a5c-472c-8df2-7ff2be2b3b1b",
"Token": "some-long-token",
"TopicArn": "arn:aws:sns:us-west-2:123456789012:MyTopic",
"Message": "You have chosen to subscribe to the topic arn:aws:sns:us-west-2:123456789012:MyTopic.\nTo confirm the subscription, visit the SubscribeURL included in this message.",
"SubscribeURL": "https://some-confirmation-url-at-amazon",
"Timestamp": "2012-04-26T20:45:04.751Z",
"SignatureVersion": "1",
"Signature": "very-long-base64-encoded-string-i-think",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",
}
resp = webapp.post(SNS_ROUTE_PATH, data=json.dumps(post_data))
assert resp.status_code == 200
recruiter._confirm_sns_subscription.assert_called_once_with(
token="some-long-token", topic="arn:aws:sns:us-west-2:123456789012:MyTopic"
)
def test_routes_worker_event_notifications(self, webapp, recruiter):
post_data = {
"Type": "Notification",
"MessageId": "6af5c15c-64a3-54d1-94fb-949b81bf2019",
"TopicArn": "arn:aws:sns:us-east-1:047991105548:some-experiment-id",
"Subject": "1565385436809",
"Message": '{"Events":[{"EventType":"AssignmentSubmitted","EventTimestamp":"2019-08-09T21:17:16Z","HITId":"12345678901234567890","AssignmentId":"1234567890123456789012345678901234567890","HITTypeId":"09876543210987654321"},{"EventType":"AssignmentSubmitted","EventTimestamp":"2019-08-09T21:17:16Z","HITId":"12345678901234567890","AssignmentId":"1234567890123456789012345678900987654321","HITTypeId":"09876543210987654321"}],"EventDocId":"9928a491605538bb160590bb57b0596a9fbbcbba","SourceAccount":"047991105548","CustomerId":"AUYKYIHQXG6XR","EventDocVersion":"2014-08-15"}',
"Timestamp": "2019-08-09T21:17:16.848Z",
"SignatureVersion": "1",
"Signature": "very-long-base64-encoded-string-i-think",
"SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-6aad65c2f9911b05cd53efda11f913f9.pem",
"UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:047991105548:some-experiment-id:fd8f816c-7e93-4815-922b-ad1d1f8cb98b",
}
resp = webapp.post(SNS_ROUTE_PATH, data=json.dumps(post_data))
assert resp.status_code == 200
recruiter._report_event_notification.assert_called_once_with(
[
{
"EventType": "AssignmentSubmitted",
"EventTimestamp": "2019-08-09T21:17:16Z",
"HITId": "12345678901234567890",
"AssignmentId": "1234567890123456789012345678901234567890",
"HITTypeId": "09876543210987654321",
},
{
"EventType": "AssignmentSubmitted",
"EventTimestamp": "2019-08-09T21:17:16Z",
"HITId": "12345678901234567890",
"AssignmentId": "1234567890123456789012345678900987654321",
"HITTypeId": "09876543210987654321",
},
]
)
class TestRedisStore(object):
@pytest.fixture
def redis_store(self):
from dallinger.recruiters import RedisStore
rs = RedisStore()
yield rs
rs.clear()
def test_that_its_a_store(self, redis_store):
assert redis_store.get("some key") is None
redis_store.set("some key", "some value")
assert redis_store.get("some key") == "some value"
@pytest.fixture
def queue():
from rq import Queue
instance = mock.Mock(spec=Queue)
with mock.patch("dallinger.recruiters._get_queue") as mock_q:
mock_q.return_value = instance
yield instance
@pytest.fixture
def requests():
with mock.patch("dallinger.recruiters.requests", autospec=True) as mock_requests:
yield mock_requests
@pytest.fixture
def mturkservice(active_config, fake_parsed_hit):
from dallinger.mturk import MTurkService
mturk = mock.create_autospec(
MTurkService,
aws_key=active_config.get("aws_access_key_id"),
aws_secret=active_config.get("aws_secret_access_key"),
region_name=active_config.get("aws_region"),
is_sandbox=active_config.get("mode") != "live",
)
def create_qual(name, description):
return {"id": "QualificationType id", "name": name, "description": description}
mturk.check_credentials.return_value = True
mturk.create_hit.return_value = fake_parsed_hit
mturk.create_qualification_type.side_effect = create_qual
mturk.get_hits.return_value = iter([])
return mturk
@pytest.fixture
def hit_id_store():
# We don't want to depend on redis in tests.
# This class replicates the interface or our RedisStore for tests.
class PrimitiveHITIDStore(object):
def __init__(self):
self._store = {}
def set(self, key, value):
self._store[key] = value
def get(self, key):
return self._store.get(key)
def clear(self):
self._store = {}
return PrimitiveHITIDStore()
@pytest.mark.usefixtures("active_config", "requests", "queue")
class TestMTurkRecruiter(object):
@pytest.fixture
def recruiter(
self, active_config, notifies_admin, mailer, mturkservice, hit_id_store
):
from dallinger.recruiters import MTurkRecruiter
with mock.patch.multiple(
"dallinger.recruiters", os=mock.DEFAULT, get_base_url=mock.DEFAULT
) as mocks:
mocks["get_base_url"].return_value = "http://fake-domain"
mocks["os"].getenv.return_value = "fake-host-domain"
active_config.extend({"mode": "sandbox"})
r = MTurkRecruiter(store=hit_id_store)
r.notifies_admin = notifies_admin
r.mailer = mailer
r.mturkservice = mturkservice
return r
def test_instantiation_fails_with_invalid_mode(self, active_config):
from dallinger.recruiters import MTurkRecruiter, MTurkRecruiterException
active_config.extend({"mode": "nonsense"})
with pytest.raises(MTurkRecruiterException) as ex_info:
MTurkRecruiter()
assert ex_info.match('"nonsense" is not a valid mode')
def test_config_passed_to_constructor_sandbox(self, recruiter):
assert recruiter.config.get("title") == "fake experiment title"
def test_external_submission_url_sandbox(self, recruiter):
assert "workersandbox.mturk.com" in recruiter.external_submission_url
def test_external_submission_url_live(self, recruiter):
recruiter.config.set("mode", "live")
assert "www.mturk.com" in recruiter.external_submission_url
def test_open_recruitment_returns_one_item_recruitments_list(self, recruiter):
result = recruiter.open_recruitment(n=2)
assert len(result["items"]) == 1
def test_open_recruitment_describes_how_it_works(self, recruiter):
result = recruiter.open_recruitment()
assert "HIT now published to Amazon Mechanical Turk" in result["message"]
def test_open_recruitment_returns_urls(self, recruiter):
url = recruiter.open_recruitment(n=1)["items"][0]
assert url == "http://the-hit-url"
def test_open_recruitment_raises_if_no_external_hit_domain_configured(
self, recruiter
):
from dallinger.recruiters import MTurkRecruiterException
recruiter.hit_domain = None
with pytest.raises(MTurkRecruiterException):
recruiter.open_recruitment(n=1)
def test_open_recruitment_check_creds_before_calling_create_hit(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.check_credentials.assert_called_once()
def test_open_recruitment_single_recruitee_builds_hit(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
question=MTurkQuestions.external(
ad_url="http://fake-domain/ad?recruiter=mturk"
),
description="fake HIT description",
duration_hours=1.0,
experiment_id="TEST_EXPERIMENT_UID",
keywords=["kw1", "kw2", "kw3"],
lifetime_days=1,
max_assignments=1,
notification_url="http://fake-domain{}".format(SNS_ROUTE_PATH),
reward=0.01,
title="fake experiment title (dlgr-TEST_EXPERIMENT_UI)",
annotation="TEST_EXPERIMENT_UID",
qualifications=[
MTurkQualificationRequirements.min_approval(95),
MTurkQualificationRequirements.restrict_to_countries(["US"]),
],
)
def test_open_recruitment_creates_no_qualifications_if_so_configured(
self, recruiter
):
recruiter.config.set("group_name", "some group name")
recruiter.config.set("assign_qualifications", False)
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_qualification_type.assert_not_called()
def test_open_recruitment_when_qualification_already_exists(self, recruiter):
from dallinger.mturk import DuplicateQualificationNameError
mturk = recruiter.mturkservice
mturk.create_qualification_type.side_effect = DuplicateQualificationNameError
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once()
def test_open_recruitment_with_blocklist(self, recruiter):
recruiter.config.set("mturk_qualification_blocklist", "foo, bar")
# Our fake response will always return the same QualificationType ID
recruiter.mturkservice.get_qualification_type_by_name.return_value = {
"id": "fake id"
}
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
question=MTurkQuestions.external(
ad_url="http://fake-domain/ad?recruiter=mturk"
),
description="fake HIT description",
duration_hours=1.0,
experiment_id="TEST_EXPERIMENT_UID",
lifetime_days=1,
keywords=["kw1", "kw2", "kw3"],
max_assignments=1,
notification_url="http://fake-domain{}".format(SNS_ROUTE_PATH),
reward=0.01,
title="fake experiment title (dlgr-TEST_EXPERIMENT_UI)",
annotation="TEST_EXPERIMENT_UID",
qualifications=[
MTurkQualificationRequirements.min_approval(95),
MTurkQualificationRequirements.restrict_to_countries(["US"]),
MTurkQualificationRequirements.must_not_have("fake id"),
MTurkQualificationRequirements.must_not_have("fake id"),
],
)
def test_open_recruitment_with_explicit_qualifications(self, recruiter):
recruiter.config.set(
"mturk_qualification_requirements",
"""
[
{
"QualificationTypeId":"789RVWYBAZW00EXAMPLE",
"Comparator":"In",
"IntegerValues":[10, 20, 30]
}
]
""",
)
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
question=MTurkQuestions.external(
ad_url="http://fake-domain/ad?recruiter=mturk"
),
description="fake HIT description",
duration_hours=1.0,
experiment_id="TEST_EXPERIMENT_UID",
lifetime_days=1,
keywords=["kw1", "kw2", "kw3"],
max_assignments=1,
notification_url="http://fake-domain{}".format(SNS_ROUTE_PATH),
reward=0.01,
title="fake experiment title (dlgr-TEST_EXPERIMENT_UI)",
annotation="TEST_EXPERIMENT_UID",
qualifications=[
MTurkQualificationRequirements.min_approval(95),
MTurkQualificationRequirements.restrict_to_countries(["US"]),
{
"QualificationTypeId": "789RVWYBAZW00EXAMPLE",
"Comparator": "In",
"IntegerValues": [10, 20, 30],
},
],
)
def test_open_recruitment_raises_error_if_hit_already_in_progress(
self, fake_parsed_hit, recruiter
):
from dallinger.recruiters import MTurkRecruiterException
recruiter.open_recruitment()
with pytest.raises(MTurkRecruiterException):
recruiter.open_recruitment()
def test_supresses_assignment_submitted(self, recruiter):
assert recruiter.on_completion_event() is None
def test_current_hit_id_with_active_experiment(self, recruiter, fake_parsed_hit):
recruiter.open_recruitment()
assert recruiter.current_hit_id() == fake_parsed_hit["id"]
def test_current_hit_id_with_no_active_experiment(self, recruiter):
assert recruiter.current_hit_id() is None
def test_recruit_auto_recruit_on_recruits_for_current_hit(
self, fake_parsed_hit, recruiter
):
recruiter.open_recruitment()
recruiter.recruit()
recruiter.mturkservice.extend_hit.assert_called_once_with(
fake_parsed_hit["id"], number=1, duration_hours=1.0
)
def test_recruit_auto_recruit_off_does_not_extend_hit(
self, fake_parsed_hit, recruiter
):
recruiter.config["auto_recruit"] = False
recruiter.open_recruitment()
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
def test_recruit_no_current_hit_does_not_extend_hit(self, recruiter):
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
def test_recruit_extend_hit_error_is_logged_politely(self, recruiter):
from dallinger.mturk import MTurkServiceException
recruiter.open_recruitment()
recruiter.mturkservice.extend_hit.side_effect = MTurkServiceException("Boom!")
with mock.patch("dallinger.recruiters.logger") as mock_logger:
recruiter.recruit()
mock_logger.exception.assert_called_once_with("Boom!")
def test_reward_bonus_passes_only_whats_needed(self, a, recruiter):
participant = a.participant()
recruiter.reward_bonus(
participant=participant,
amount=2.99,
reason="well done!",
)
recruiter.mturkservice.grant_bonus.assert_called_once_with(
assignment_id=participant.assignment_id, amount=2.99, reason="well done!"
)
def test_reward_bonus_logs_exception(self, a, recruiter):
from dallinger.mturk import MTurkServiceException
participant = a.participant()
recruiter.mturkservice.grant_bonus.side_effect = MTurkServiceException("Boom!")
with mock.patch("dallinger.recruiters.logger") as mock_logger:
recruiter.reward_bonus(participant, 2.99, "fake reason")
mock_logger.exception.assert_called_once_with("Boom!")
def test_approve_hit(self, recruiter):
fake_id = "fake assignment id"
recruiter.approve_hit(fake_id)
recruiter.mturkservice.approve_assignment.assert_called_once_with(fake_id)
def test_approve_hit_logs_exception(self, recruiter):
from dallinger.mturk import MTurkServiceException
recruiter.mturkservice.approve_assignment.side_effect = MTurkServiceException(
"Boom!"
)
with mock.patch("dallinger.recruiters.logger") as mock_logger:
recruiter.approve_hit("fake-hit-id")
mock_logger.exception.assert_called_once_with("Boom!")
@pytest.mark.xfail
def test_close_recruitment(self, recruiter):
fake_parsed_hit_id = "fake HIT id"
recruiter.open_recruitment()
recruiter.close_recruitment()
recruiter.mturkservice.expire_hit.assert_called_once_with(fake_parsed_hit_id)
def test_compensate_worker(self, fake_parsed_hit, recruiter):
result = recruiter.compensate_worker(
worker_id="XWZ", email="w@example.com", dollars=10
)
assert result == {
"hit": fake_parsed_hit,
"qualification": {
"description": (
"You have received a qualification to allow you to complete "
"a compensation HIT from Dallinger for $10."
),
"id": "QualificationType id",
"name": mock.ANY,
},
"email": {
"subject": "Dallinger Compensation HIT",
"sender": "test@example.com",
"recipients": ["w@example.com"],
"body": mock.ANY, # Avoid overspecification
},
}
def test__assign_experiment_qualifications_creates_nonexistent_qualifications(
self, recruiter
):
# Rationale for testing a "private" method is that it does all the actual
# work behind an async call from the public method.
recruiter._assign_experiment_qualifications(
"some worker id",
[
{"name": "One", "description": "Description of One"},
{"name": "Two", "description": "Description of Two"},
],
)
assert recruiter.mturkservice.create_qualification_type.call_args_list == [
mock.call("One", "Description of One"),
mock.call("Two", "Description of Two"),
]
assert recruiter.mturkservice.increment_qualification_score.call_args_list == [
mock.call(
"QualificationType id",
"some worker id",
),
mock.call(
"QualificationType id",
"some worker id",
),
]
def test__assign_experiment_qualifications_assigns_existing_qualifications(
self, recruiter
):
# Rationale for testing a "private" method is that it does all the actual
# work behind an async call from the public method.
from dallinger.mturk import DuplicateQualificationNameError
recruiter.mturkservice.create_qualification_type.side_effect = (
DuplicateQualificationNameError
)
recruiter._assign_experiment_qualifications(
"some worker id",
[
{"name": "One", "description": "Description of One"},
{"name": "Two", "description": "Description of Two"},
],
)
assert (
recruiter.mturkservice.increment_named_qualification_score.call_args_list
== [mock.call("One", "some worker id"), mock.call("Two", "some worker id")]
)
def test_assign_experiment_qualifications_enques_work(self, recruiter, queue):
from dallinger.recruiters import _run_mturk_qualification_assignment
qualification_params = [
"some worker id",
[
{"name": "One", "description": "Description of One"},
],
]
recruiter.assign_experiment_qualifications(*qualification_params)
queue.enqueue.assert_called_once_with(
_run_mturk_qualification_assignment, *qualification_params
)
def test_rejects_questionnaire_from_returns_none_if_working(self, recruiter):
participant = mock.Mock(spec=Participant, status="working")
assert recruiter.rejects_questionnaire_from(participant) is None
def test_rejects_questionnaire_from_returns_error_if_already_submitted(
self, recruiter
):
participant = mock.Mock(spec=Participant, status="submitted")
rejection = recruiter.rejects_questionnaire_from(participant)
assert "already sumbitted their HIT" in rejection
#
# Begin notify_duration_exceeded tests
#
def test_sets_participant_status_if_approved(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": "Approved"}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
assert participants[0].status == "approved"
def test_sets_participant_status_if_rejected(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": "Rejected"}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
assert participants[0].status == "rejected"
def test_sends_replacement_mturk_notification_if_resubmitted(
self, a, recruiter, queue
):
recruiter.mturkservice.get_assignment.return_value = {"status": "Submitted"}
participants = [a.participant()]
from dallinger.recruiters import worker_function
recruiter.notify_duration_exceeded(participants, datetime.now())
queue.enqueue.assert_called_once_with(
worker_function, "AssignmentSubmitted", participants[0].assignment_id, None
)
recruiter.notifies_admin.send.assert_called_once()
def test_notifies_researcher_if_resubmitted(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": "Submitted"}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
recruiter.notifies_admin.send.assert_called_once()
def test_shuts_down_recruitment_if_no_status_from_mturk(
self, a, recruiter, requests
):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
assert requests.patch.call_args[1]["data"] == '{"auto_recruit": "false"}'
def test_treats_mturk_exception_as_status_none(self, a, recruiter):
recruiter.mturkservice.get_assignment.side_effect = Exception("Boom!")
assert recruiter._mturk_status_for(mock.Mock()) is None
def test_sends_notification_missing_if_no_status_from_mturk(
self, a, recruiter, queue
):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
from dallinger.recruiters import worker_function
recruiter.notify_duration_exceeded(participants, datetime.now())
queue.enqueue.assert_called_once_with(
worker_function, "NotificationMissing", participants[0].assignment_id, None
)
def test_notifies_researcher_when_hit_cancelled(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
recruiter.notifies_admin.send.assert_called_once()
def test_no_assignment_on_mturk_expires_hit(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
recruiter.notify_duration_exceeded(participants, datetime.now())
recruiter.mturkservice.expire_hit.assert_called_once_with(
participants[0].hit_id
)
def test_flag_prevents_disabling_autorecruit(self, a, recruiter, requests):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
recruiter.config.set("disable_when_duration_exceeded", False)
recruiter.notify_duration_exceeded(participants, datetime.now())
requests.patch.assert_not_called()
def test_flag_prevents_expiring_hit(self, a, recruiter):
recruiter.mturkservice.get_assignment.return_value = {"status": None}
participants = [a.participant()]
recruiter.config.set("disable_when_duration_exceeded", False)
recruiter.notify_duration_exceeded(participants, datetime.now())
recruiter.mturkservice.expire_hit.assert_not_called()
class TestRedisTally(object):
@pytest.fixture
def redis_tally(self):
from dallinger.recruiters import RedisTally
return RedisTally()
def test_that_its_a_counter(self, redis_tally):
assert redis_tally.current == 0
redis_tally.increment(3)
assert redis_tally.current == 3
@pytest.mark.usefixtures("active_config")
class TestMTurkLargeRecruiter(object):
@pytest.fixture
def counter(self):
# We don't want to depend on redis in these tests.
class PrimitiveCounter(object):
_count = 0
def increment(self, count):
self._count += count
@property
def current(self):
return self._count
return PrimitiveCounter()
@pytest.fixture
def recruiter(self, active_config, counter, mturkservice, hit_id_store):
from dallinger.recruiters import MTurkLargeRecruiter
with mock.patch.multiple(
"dallinger.recruiters", os=mock.DEFAULT, get_base_url=mock.DEFAULT
) as mocks:
mocks["get_base_url"].return_value = "http://fake-domain"
mocks["os"].getenv.return_value = "fake-host-domain"
active_config.extend({"mode": "sandbox"})
r = MTurkLargeRecruiter(counter=counter, store=hit_id_store)
r.mturkservice = mturkservice
return r
def test_open_recruitment_raises_error_if_experiment_in_progress(
self, fake_parsed_hit, recruiter
):
from dallinger.recruiters import MTurkRecruiterException
recruiter.open_recruitment()
with pytest.raises(MTurkRecruiterException):
recruiter.open_recruitment()
def test_open_recruitment_ignores_participants_from_other_recruiters(
self, a, recruiter
):
a.participant(recruiter_id="bot")
result = recruiter.open_recruitment(n=1)
assert len(result["items"]) == 1
recruiter.mturkservice.check_credentials.assert_called_once()
def test_open_recruitment_single_recruitee_actually_overrecruits(self, recruiter):
recruiter.open_recruitment(n=1)
recruiter.mturkservice.create_hit.assert_called_once_with(
question=MTurkQuestions.external(
ad_url="http://fake-domain/ad?recruiter=mturklarge"
),
description="fake HIT description",
duration_hours=1.0,
experiment_id="TEST_EXPERIMENT_UID",
keywords=["kw1", "kw2", "kw3"],
lifetime_days=1,
max_assignments=10,
notification_url="http://fake-domain{}".format(SNS_ROUTE_PATH),
reward=0.01,
title="fake experiment title (dlgr-TEST_EXPERIMENT_UI)",
annotation="TEST_EXPERIMENT_UID",
qualifications=[
MTurkQualificationRequirements.min_approval(95),
MTurkQualificationRequirements.restrict_to_countries(["US"]),
],
)
def test_open_recruitment_with_more_than_pool_size_uses_requested_count(
self, recruiter
):
num_recruits = recruiter.pool_size + 1
recruiter.open_recruitment(n=num_recruits)
recruiter.mturkservice.create_hit.assert_called_once_with(
question=MTurkQuestions.external(
ad_url="http://fake-domain/ad?recruiter=mturklarge"
),
description="fake HIT description",
duration_hours=1.0,
experiment_id="TEST_EXPERIMENT_UID",
keywords=["kw1", "kw2", "kw3"],
lifetime_days=1,
max_assignments=num_recruits,
notification_url="http://fake-domain{}".format(SNS_ROUTE_PATH),
reward=0.01,
title="fake experiment title (dlgr-TEST_EXPERIMENT_UI)",
annotation="TEST_EXPERIMENT_UID",
qualifications=[
MTurkQualificationRequirements.min_approval(95),
MTurkQualificationRequirements.restrict_to_countries(["US"]),
],
)
def test_recruit_draws_on_initial_pool_before_extending_hit(
self, fake_parsed_hit, recruiter
):
recruiter.open_recruitment(n=recruiter.pool_size - 1)
recruiter.recruit(n=1)
recruiter.mturkservice.extend_hit.assert_not_called()
recruiter.recruit(n=1)
recruiter.mturkservice.extend_hit.assert_called_once_with(
fake_parsed_hit["id"], duration_hours=1.0, number=1
)
def test_recruits_more_immediately_if_initial_recruitment_exceeds_pool_size(
self, fake_parsed_hit, recruiter
):
recruiter.open_recruitment(n=recruiter.pool_size + 1)
recruiter.recruit(n=5)
recruiter.mturkservice.extend_hit.assert_called_once_with(
fake_parsed_hit["id"], duration_hours=1.0, number=5
)
def test_recruit_auto_recruit_off_does_not_extend_hit(self, recruiter):
recruiter.config["auto_recruit"] = False
recruiter.recruit()
assert not recruiter.mturkservice.extend_hit.called
@pytest.mark.usefixtures("active_config", "db_session")
class TestMultiRecruiter(object):
@pytest.fixture
def recruiter(self, active_config):
from dallinger.recruiters import MultiRecruiter
active_config.extend({"recruiters": "cli: 2, hotair: 1"})
return MultiRecruiter()
def test_parse_spec(self, recruiter):
assert recruiter.spec == [("cli", 2), ("hotair", 1)]
def test_pick_recruiter(self, recruiter):
recruiters = list(recruiter.recruiters(3))
assert len(recruiters) == 2
subrecruiter, count = recruiters[0]
assert subrecruiter.nickname == "cli"
assert count == 2
subrecruiter, count = recruiters[1]
assert subrecruiter.nickname == "hotair"
assert count == 1
def test_open_recruitment(self, recruiter):
result = recruiter.open_recruitment(n=3)
assert len(result["items"]) == 3
assert result["items"][0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result["items"][1].startswith("http://localhost:5000/ad?recruiter=cli")
assert result["items"][2].startswith(
"http://localhost:5000/ad?recruiter=hotair"
)
def test_open_recruitment_over_recruit(self, recruiter):
result = recruiter.open_recruitment(n=5)
assert len(result["items"]) == 3
assert result["items"][0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result["items"][1].startswith("http://localhost:5000/ad?recruiter=cli")
assert result["items"][2].startswith(
"http://localhost:5000/ad?recruiter=hotair"
)
def test_open_recruitment_twice(self, recruiter):
result = recruiter.open_recruitment(n=1)
assert len(result["items"]) == 1
assert result["items"][0].startswith("http://localhost:5000/ad?recruiter=cli")
result2 = recruiter.open_recruitment(n=3)
assert len(result2["items"]) == 2
assert result2["items"][0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result2["items"][1].startswith(
"http://localhost:5000/ad?recruiter=hotair"
)
def test_recruit(self, recruiter):
result = recruiter.recruit(n=3)
assert len(result) == 3
assert result[0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[1].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[2].startswith("http://localhost:5000/ad?recruiter=hotair")
def test_over_recruit(self, recruiter):
result = recruiter.recruit(n=5)
assert len(result) == 3
assert result[0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[1].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[2].startswith("http://localhost:5000/ad?recruiter=hotair")
def test_recruit_partial(self, recruiter):
result = recruiter.open_recruitment(n=1)
assert len(result["items"]) == 1
assert result["items"][0].startswith("http://localhost:5000/ad?recruiter=cli")
result2 = recruiter.recruit(n=3)
assert len(result2) == 2
assert result2[0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result2[1].startswith("http://localhost:5000/ad?recruiter=hotair")
result3 = recruiter.recruit(n=2)
assert len(result3) == 0
def test_recruit_batches(self, active_config):
from dallinger.recruiters import MultiRecruiter
active_config.extend({"recruiters": "cli: 2, hotair: 1, cli: 3, hotair: 2"})
recruiter = MultiRecruiter()
result = recruiter.recruit(n=10)
assert len(result) == 8
assert result[0].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[1].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[2].startswith("http://localhost:5000/ad?recruiter=hotair")
assert result[3].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[4].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[5].startswith("http://localhost:5000/ad?recruiter=cli")
assert result[6].startswith("http://localhost:5000/ad?recruiter=hotair")
assert result[7].startswith("http://localhost:5000/ad?recruiter=hotair")
def test_close_recruitment(self, recruiter):
patch1 = mock.patch("dallinger.recruiters.CLIRecruiter.close_recruitment")
patch2 = mock.patch("dallinger.recruiters.HotAirRecruiter.close_recruitment")
with patch1 as f1, patch2 as f2:
recruiter.close_recruitment()
f1.assert_called_once()
f2.assert_called_once()
| Dallinger/Dallinger | tests/test_recruiters.py | test_recruiters.py | py | 57,547 | python | en | code | 113 | github-code | 13 |
34483489516 | #!/usr/bin/env python3
import json
import os
import shutil
import subprocess
import sys
import appdirs
import click
from termcolor import cprint, colored
PROGRAMS_FILE = os.path.join(
appdirs.user_config_dir("engi", "PurpleMyst"), "programs.json"
)
def choose(programs):
cprint(f"Choose a program to install", "blue")
for i, program in enumerate(programs):
cprint(f"{i + 1}. ", "yellow", end="")
print(program["name"])
while True:
try:
idx = int(input(colored("> ", "green")))
except ValueError:
cprint("Please enter a valid number.", "red")
continue
else:
if not (0 < idx <= len(programs)):
cprint(
f"Please enter a number between 1 and {len(programs)}."
"red"
)
continue
return programs[idx - 1]
def repo_path(program):
cache_dir = appdirs.user_cache_dir("engi", "PurpleMyst")
return os.path.join(cache_dir, program["name"].lower())
def download(program):
cprint(f"Checking ", "blue", end="")
print(program["name"])
repo = program["url"]
path = repo_path(program)
if os.path.exists(path):
if os.path.exists(os.path.join(path, ".git")):
proc = subprocess.run(["git", "-C", path, "pull"], stdout=subprocess.PIPE)
if b"up to date" in proc.stdout.lower():
print(program["name"], end="")
cprint(" is up to date", "green")
return False
else:
sys.exit(
colored(
f"{path} already exists but is not a git repository.",
"red",
)
)
cprint(f"Downloading ", "blue", end="")
print(program["name"])
proc = subprocess.run(["git", "clone", repo, path])
proc.check_returncode()
cprint("Downloaded ", "green", end="")
print(program["name"])
return True
def install(program):
stow_dir = "/usr/local/stow/"
package_name = program["name"].lower()
cprint("Installing ", "blue", end="")
print(program["name"])
os.chdir(repo_path(program))
for i, cmd in enumerate(program["commands"]):
program["commands"][i] = \
cmd.format(stow_dir=stow_dir, package_name=package_name)
proc = subprocess.run(["sh", "-c", " && ".join(program["commands"])])
if proc.returncode != 0:
shutil.rmtree(repo_path(program))
proc.check_returncode()
cprint("Installed ", "green", end="")
print(program["name"])
@click.group()
def cli():
os.makedirs(os.path.dirname(PROGRAMS_FILE), exist_ok=True)
if not os.path.exists(PROGRAMS_FILE):
open(PROGRAMS_FILE, "w").write("[]")
@cli.command("install")
def cli_install():
with open(PROGRAMS_FILE) as f:
programs = json.load(f)
cprint("Enter the name of the program.", "magenta")
name = input(colored("> ", "green"))
cprint("Enter the URL of the program.", "magenta")
url = input(colored("> ", "green"))
cprint(
"Enter the commands you want to run to install the program, terminated by an empty line.",
"magenta",
)
commands = []
while True:
cmd = input(colored("> ", "green"))
if cmd:
commands.append(cmd)
else:
break
programs.append({"name": name, "url": url, "commands": commands})
with open(PROGRAMS_FILE, "w") as f:
json.dump(programs, f, indent=4)
@cli.command()
def upgrade():
with open(PROGRAMS_FILE) as f:
programs = json.load(f)
for program in programs:
if download(program):
install(program)
if __name__ == "__main__":
cli()
| PurpleMyst/engi | engi.py | engi.py | py | 3,755 | python | en | code | 0 | github-code | 13 |
34736561829 | # coding: utf-8
'''
Created on Jun 14, 2011
FP-Growth FP means frequent pattern
the FP-Growth algorithm needs:
1. FP-tree (class treeNode)
2. header table (use dict)
This finds frequent itemsets similar to apriori but does not
find association rules.
@author: Peter
'''
class treeNode:
def __init__(self, nameValue, numOccur, parentNode):
# 节点名字的变量
self.name = nameValue
# 节点计数值
self.count = numOccur
# 用于链接相似的元素项
self.nodeLink = None
# 指向当前节点的父节点
self.parent = parentNode #needs to be updated
# 一个空字典变量,用于存放节点的子节点。
self.children = {}
# 增加计数。
def inc(self, numOccur):
self.count += numOccur
# 打印自己和子节点的名字和计数值。
def disp(self, ind=1):
# 打印自己的名字和计数值。
print(' '*ind, self.name, ' ', self.count)
# 打印自子节点的名字和计数值。
for child in self.children.values():
child.disp(ind+1)
# 这个函数的逻辑是这样的。
# 首先遍历一遍数据集。统计出来每个元素项出现的频度。放入headerTable。
# 之后过滤掉headerTable中那些些出现次数少于minSup的项。得到频繁集。
# 之后根据上面的成果和原始数据,生成FP树和更新headerTable。
# 这个headerTable是一个字典。Key是满足minSup的项的单个元素项。
# value包括两个值,一个是这个元素项出现次数。另一个是一个单向列表。
# 这个单项列表就是前面提到的相似项之间的链接即节点链接。
# 生成FP树的方法如下:
# 首先遍历数据集。找到包含的频繁集的一条数据。这里的每一条数据都包含好几个元素。
# 我们只记录满足出现次数多于minSup的元素项。
# 之后我们把记录下来的元素项按照这个元素项出现的次数排序。
# 通过把一个元素一个元素添加到子节点上面的方式,生成FP树。
# 因此上,出现次数多的更靠近根。
# 在这个FP树的构建过程中,每增加一个元素我们就需要同步更新headerTable的单向列表。
# 这个列表串联起来了相同的元素。
# 使用数据集以及最小支持度作为参数来构建FP树。
# 两个参数,
# 1. 数据集。
# 2. 最小支持度。
# Create FP-tree from dataset but don't mine
def createTree(dataSet, minSup=3): # minSup=1):
headerTable = {}
# Go over dataSet twice
# 树构建过程中会遍历数据集两次。
# headerTable的第一阶段:
# 第一次遍历扫描数据集并统计每个元素项出现的频度。这些信息被存储在头指针表中。
for trans in dataSet:#first pass counts frequency of occurance
for item in trans:
# 统计每个元素项出现的频度。
headerTable[item] = headerTable.get(item, 0) + dataSet[trans]
print("1 --- headerTable : ", headerTable)
# headerTable的第二阶段:
# 扫描头指针表删掉那些出现次数少于minSup的项。
#remove items not meeting minSup
# for k in headerTable.keys():
for k in list(headerTable.keys()):
if headerTable[k] < minSup:
# print("headerTable[", k, "] : ", headerTable[k])
del(headerTable[k])
# 程序执行到这里,出现次数少于minSup的项已经被删除。只剩下多于minSup的项。
print("2 --- headerTable : ", headerTable)
freqItemSet = set(headerTable.keys())
# 如果所有项都不频繁,就不需要进行下一步处理。
if len(freqItemSet) == 0:
return None, None #if no items meet min support -->get out
# headerTable的第三阶段:
# 对头指针表稍加扩展,以便可以保存计数值及指向每种类型第一个元素项的指针。
# 原来的value只保存一个计数值,现在保存两个值。一个计数值一个头指针。
# 例如: {'r': 3} --> {'r': [3, None]}
# 这个头指针表包含相同类型元素链表的起始指针。
for k in headerTable:
#reformat headerTable to use Node link
headerTable[k] = [headerTable[k], None]
print('3 --- headerTable: ',headerTable)
# 创建只包含空集合的根节点。
retTree = treeNode('Null Set', 1, None) #create tree
# 再一次遍历数据集,
# 值得注意的是,这里的dataSet的每一条包括两个元素,一条frozenset数据和一个计数。
for tranSet, count in dataSet.items(): #go through dataset 2nd time
# print("tranSet : ", tranSet, "count : ", count)
localD = {}
# 这次只考虑那些频繁项。
# 循环dataSet里面的每一条frozenset数据。
for item in tranSet: #put transaction items in order
# 如果这一条frozenset数据中的一个元素属于频繁项。
if item in freqItemSet:
# 把这个元素和对应的频繁项的计数放入localD中。
localD[item] = headerTable[item][0]
# 如果上面的一条frozenset数据中包含频繁项,导致localD有了数据。
if len(localD) > 0:
# 把获得的数据按照排序。排序基于元素项的绝对出现频率,也就是计数值来进行。
orderedItems = [v[0] for v in sorted(localD.items(),
key=lambda p: p[1], reverse=True)]
print("orderedItems : ", orderedItems)
# 然后调用updateTree()方法。
# populate tree with ordered freq itemset
updateTree(orderedItems, retTree, headerTable, count)
return retTree, headerTable #return tree and header table
# 为了让FP树生长,需调用updateTree。
# 其中的输入参数为:
# 一个已经按照绝对出现频率,也就是计数值排序的频繁项集。
# FP树。
# 满足最小支持度的元素列表。
# 这个频繁项集对应的frozenset数据的计数。
# 这个函数就是通过遍历这个已经按照绝对出现频率,也就是计数值排序的频繁项集,
# 让FP树生长,同时更新headerTable的链表节点元素。
def updateTree(items, inTree, headerTable, count):
# 首先测试事务中的第一个元素项是否作为子节点存在。
# 因为这个函数会被一层层迭代调用进去。早晚会遇到这种情况。当然一开始肯定不是这样的。
# 如果存在的话,
if items[0] in inTree.children:#check if orderedItems[0] in retTree.children
# 则更新该元素项的计数;
# 更新方法是增加元素所在的频繁项集对应的frozenset数据的计数。
inTree.children[items[0]].inc(count) #incrament count
# 如果不存在,
else: #add items[0] to inTree.children
# 则创建一个新的treeNode并将其作为一个子节点添加到树中。
inTree.children[items[0]] = treeNode(items[0], count, inTree)
# 头指针表也要更新以指向新的节点。
# 如果元素列表中items[0]对应的项的头指针没有被设置过,为空。
if headerTable[items[0]][1] == None: #update header table
# 第一次指向自己。
headerTable[items[0]][1] = inTree.children[items[0]]
else:
# 否则如果之前设置过,则需要更新头指针表。
# 这个头指针表包含相同类型元素链表的起始指针。
updateHeader(headerTable[items[0]][1], inTree.children[items[0]])
# 接着不断迭代调用自身,每次调用时会去掉列表中第一个元素。
if len(items) > 1:#call updateTree() with remaining ordered items
updateTree(items[1::], inTree.children[items[0]], headerTable, count)
# 用于确保节点链接指向树中该元素项的每一个实例。
# this version does not use recursion
def updateHeader(nodeToTest, targetNode):
# 从头指针表的nodeLink开始,一直沿着nodeLink直到到达链表末尾。
# Do not use recursion to traverse a linked list!
while (nodeToTest.nodeLink != None):
nodeToTest = nodeToTest.nodeLink
# 前面创建出来FP树的新节点加到头指针链表的尾部。
nodeToTest.nodeLink = targetNode
# 有了FP树之后,就可以抽取频繁项集了。
# 循环上溯FP树,
def ascendTree(leafNode, prefixPath): #ascends from leaf node to root
if leafNode.parent != None:
# 收集所有遇到的元素项的名称
prefixPath.append(leafNode.name)
ascendTree(leafNode.parent, prefixPath)
# 查找以所查找元素项为结尾的路径集合。
# 包括两个参数:
# 第一个参数:元素项的名字。没有使用。
# 第二个参数:元素项。
# 如果我们明白前面的headerTable的含义。方法就非常简单了。
# 首先headerTable这个字典中,每一个元素项都有一个链表。连接了FP树中所有的相同的元素项。
# 因此我们只需要遍历这个链表项。针对每一个链表项,就是找到一个节点。之后在FP树中上溯。
# 记录下沿途FP树中父节点的名字。就可以得到这个元素在FP树中的所有Path了。
def findPrefixPath(basePat, treeNode): #treeNode comes from header table
# 这里使用字典的原因是,后面添加的时候,会出现大量的重复性添加。
# 就是反反复复添加同样的内容。
condPats = {}
# 遍历链表直到到达结尾。
while treeNode != None:
prefixPath = []
# 每遇到一个元素项都会调用ascendTree()来上溯FP树,
# 在这个过程中,并收集所有遇到的元素项的名称。放在prefixPath中。
ascendTree(treeNode, prefixPath)
print("prefixPath : ", prefixPath)
# 该列表返回之后添加到条件模式基字典condPats中。
if len(prefixPath) > 1:
print("frozenset(prefixPath[1:]) : ", frozenset(prefixPath[1:]))
print("treeNode : ", treeNode.count)
condPats[frozenset(prefixPath[1:])] = treeNode.count
# 指向下一个元素。
treeNode = treeNode.nodeLink
return condPats
# 对于每一个频繁项,创建条件FP树的代码。
# 包括5个参数:
# 前面构建出来的FP树。这里没有用到。
# 前面返回的headerTable字典。
# 最小支持度。因为一个元素在整个FP上满足最小支持度不等于在一个频繁项的条件FP树上也满足。
# 前缀集合。后面频繁项集列表中每一个元素的前缀。
# 频繁项集列表。
# 这个函数的逻辑是这样的。首先因为headerTable保存了所有的相似元素。
# 因此上,我们基于这些相似元素挨个构建FP树。
# 并在构建过程中记录得到的频繁项集,保存在freqItemList中。
def mineTree(inTree, headerTable, minSup, preFix, freqItemList):
#(sort header table)
# bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1])]
# bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1][0])]
# 程序首先对头指针表中的元素项按照其出现频率进行排序。
#(记住这里的默认顺序是按照从小到大。)
bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: str(p[1]))]
print("bigL : ", bigL)
# 1. start from bottom of header table
for basePat in bigL:
newFreqSet = preFix.copy()
newFreqSet.add(basePat)
print('finalFrequent Item: ',newFreqSet) #append to set
# 将每一个频繁项添加到频繁项集列表freqItemList中。
freqItemList.append(newFreqSet)
# 递归调用findPrefixPath()函数来创建条件基。
condPattBases = findPrefixPath(basePat, headerTable[basePat][1])
print('condPattBases :',basePat, condPattBases)
# 2. construct cond FP-tree from cond. pattern base
# 该条件基被当成一个新数据集输送给createTree()函数。
myCondTree, myHead = createTree(condPattBases, minSup)
print('head from conditional tree: ', myHead)
# 最后,如果树中有元素项的话,递归调用mineTree()函数。
# 如果myHead为空,说明condPattBases的元素都不满足最小支持度,没有多于minSup的项。
# 否则如果myHead不为空,说明condPattBases中有一些满足最小支持度的元素。
# 而且createTree根据这些元素已经构建了FP树。
# 那就需要让这颗构建好的FP树继续生长。因此上,需要迭代调用mineTree。
# 3. mine cond. FP-tree
if myHead != None:
print('conditional tree for: ',newFreqSet)
myCondTree.disp(1)
mineTree(myCondTree, myHead, minSup, newFreqSet, freqItemList)
def loadSimpDat():
simpDat = [['r', 'z', 'h', 'j', 'p'],
['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],
['z'],
['r', 'x', 'n', 'o', 's'],
['y', 'r', 'x', 'z', 'q', 't', 'p'],
['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]
return simpDat
def createInitSet(dataSet):
retDict = {}
for trans in dataSet:
retDict[frozenset(trans)] = 1
return retDict
'''
import twitter
from time import sleep
import re
def textParse(bigString):
urlsRemoved = re.sub('(http:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*', '', bigString)
listOfTokens = re.split(r'\W*', urlsRemoved)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def getLotsOfTweets(searchStr):
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN_KEY = ''
ACCESS_TOKEN_SECRET = ''
api = twitter.Api(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET)
#you can get 1500 results 15 pages * 100 per page
resultsPages = []
for i in range(1,15):
print("fetching page %d" % i)
searchResults = api.GetSearch(searchStr, per_page=100, page=i)
resultsPages.append(searchResults)
sleep(6)
return resultsPages
def mineTweets(tweetArr, minSup=5):
parsedList = []
for i in range(14):
for j in range(100):
parsedList.append(textParse(tweetArr[i][j].text))
initSet = createInitSet(parsedList)
myFPtree, myHeaderTab = createTree(initSet, minSup)
myFreqList = []
mineTree(myFPtree, myHeaderTab, minSup, set([]), myFreqList)
return myFreqList
'''
#minSup = 3
#simpDat = loadSimpDat()
#initSet = createInitSet(simpDat)
#myFPtree, myHeaderTab = createTree(initSet, minSup)
#myFPtree.disp()
#myFreqList = []
#mineTree(myFPtree, myHeaderTab, minSup, set([]), myFreqList)
| lucelujiaming/luceluMachineLearingInAction | Ch12/fpGrowth.py | fpGrowth.py | py | 15,240 | python | zh | code | 0 | github-code | 13 |
20761621885 | import pygame
import random
WIDTH = 800
HEIGHT = 600
FPS = 30
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("SPACE INSANITY BETA")
clock = pygame.time.Clock()
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(pygame.image.load('alien.png'), (50, 54))
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH/2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
def update(self):
self.speedx = 0
keystate = pygame.key.get_pressed()
if keystate[pygame.K_RIGHT]:
self.speedx = 10
if keystate[pygame.K_LEFT]:
self.speedx = -10
self.rect.x += self.speedx
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > WIDTH:
self.rect.right = WIDTH
def shoot(self):
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
class Enemy(pygame.sprite.Sprite):
def __init__(self):
count = 1
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((25, 25))
self.image.fill(GREEN)
self.image = pygame.transform.scale(pygame.image.load('enemy.png'), (50, 54))
self.rect = self.image.get_rect()
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8)
def update(self):
self.rect.y += self.speedy
if self.rect.top > HEIGHT + 10:
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8)
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((5, 20))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -20
def update(self):
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
all_sprites = pygame.sprite.Group()
bullets = pygame.sprite.Group()
enemies = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
for i in range(200):
e = Enemy()
all_sprites.add(e)
enemies.add(e)
running = True
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
player.shoot()
all_sprites.update()
for bullet in bullets:
hit = pygame.sprite.spritecollide(bullet, enemies, True)
for enemy in hit:
bullets.remove(bullet)
all_sprites.remove(bullet)
enemies.remove(enemy)
all_sprites.remove(enemy)
hits = pygame.sprite.spritecollide(player, enemies, False)
if hits:
running = False
screen.fill(BLACK)
all_sprites.draw(screen)
pygame.display.flip()
pygame.quit()
| colgoo21/AWESOMENESS | Cole_Demo3.py | Cole_Demo3.py | py | 3,352 | python | en | code | 0 | github-code | 13 |
43263442292 | n, m = map(int, input().split())
mod = 10**9+7
fact = [1]
for i in range(1, max(n, m) + 1):
fact.append(fact[-1] * i % mod)
if abs(n - m) >= 2:
print(0)
elif abs(n - m) == 1:
print(fact[n] * fact[m] % mod)
elif abs(n - m) == 0:
print(2 * fact[n] * fact[m] % mod)
| Shirohi-git/AtCoder | arc058-/arc076_a.py | arc076_a.py | py | 281 | python | en | code | 2 | github-code | 13 |
11928331595 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
url = "https://www.basketball-reference.com/leagues/NBA_2020_per_game.html".format()
html = urlopen(url)
soup = BeautifulSoup(html, features="html.parser")
soup.findAll('tr', limit=2)
headers = [th.getText() for th in soup.findAll('tr', limit=2)
[0].findAll('th')]
headers = headers[1:]
rows = soup.findAll('tr')[1:]
player_stats = [[td.getText() for td in rows[i].findAll('td')]
for i in range(len(rows))]
stats = pd.DataFrame(player_stats, columns=headers)
print(stats)
| ShCHewitt/DFSAlgo | old_code/dataScrape.py | dataScrape.py | py | 588 | python | en | code | 1 | github-code | 13 |
15340273984 | """Receipt class to handle a receipt with filtering and parsing"""
import logging
from pathlib import Path
import imghdr
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import pytesseract as ocr
import pypdfium2 as pdfium
from skimage.color import rgb2gray
from skimage.transform import rotate
from . import image_filters
from .configs import config
from .configs import constants
from . import parsers
logger = logging.getLogger(__package__)
def _type_check(retrieved_data):
"""Ensures that the data type in each view column is correct"""
try:
retrieved_data = retrieved_data.astype(
{'PricePerUnit': 'float', 'Price': 'float', 'TaxClass': 'int', 'ArtNr': 'int'})
except ValueError:
logger.warning('Using float instead of int in some cols due to NaN are left')
retrieved_data = retrieved_data.astype(
{'PricePerUnit': 'float', 'Price': 'float', 'TaxClass': 'float', 'ArtNr': 'int'})
return retrieved_data
class _BaseReceipt():
"""
Base Receipt holds methods and attributes that are valid for either a pdf
or an image based receipt. Should not be called directly!
"""
def __init__(self):
self._type = None
self._gs_image = None
self._data_extracted = False
self._raw_text = ''
self._data = None
self._lang = 'deu'
self._vendor = None
self._patident = None
self._patset = None
self._fig = None
self.disp_ax = None
@property
def raw_text(self):
"""Raw extracted text from the receipt"""
if self._data_extracted:
return self._raw_text
else:
logger.warning('No valid data extracted (yet)')
return None
@property
def type(self):
"""Receipt type, image or pdf"""
return self._type
@property
def valid_data(self):
"""Returns if data has been extracted from the receipt"""
if self._data_extracted:
return self._data
else:
logger.warning('No valid data extracted (yet)')
return None
@property
def parsing_patterns(self):
"""Returns the current set of regexp parsing patterns"""
return self._patset
@property
def vendor(self):
"""Returns vendor"""
return self._vendor
# Template to allow chaining if receipt type not known beforehand
def filter_image(self, **kwargs):
"""Template, dont use directly"""
return self
def _create_figure(self):
"""Figure convenience function for high level use"""
self._fig, self._ax = plt.subplots(1, 2, sharex=True, sharey=True)
def parse_vendor(self, lang=config.options['lang']):
"""
Tries to extract the vendor from the receipt. Call this after
extract_data to get a meaningful result
"""
self._vendor, self._patident = parsers.get_vendor(self.raw_text)
if self._vendor == 'General':
logger.warning(
'No vendor found, set to General. Please add for best '
'parsing results using Receipt.set_vendor')
return self.set_vendor(self._vendor, lang)
def set_vendor(self, vendor, lang=config.options['lang']):
"""Manually set vendor if auto detect failed"""
self._vendor = vendor
self._patident = config.receipt_types.get(self._vendor, 'gen')
self._patset = parsers.get_patterns(self._patident, lang)
self._lang = lang
return self._vendor
def parse_data(self, fill=True):
"""
Parses extracted data into articles and prices - this is where the most
complicated functions are being called!
Parameters
----------
fill : `bool`
Fill missing and nans with some basic math. Defaults to `True`.
"""
if not self._data_extracted:
logger.info('Please extract data first')
return None
if self.vendor is None:
logger.info('Please set a vendor first')
return None
parsing_func = parsers.select_parser(self._patident, lang=self._lang)
retrieved_data, total_price = parsing_func(
self.valid_data, self._patset, self._patident, self.disp_ax)
# Fill
if fill:
retrieved_data = parsers.fill_missing_data(retrieved_data)
# Type check
retrieved_data = _type_check(retrieved_data)
# Tax Class corrections
if self.vendor in config.needs_tax_switch.keys():
sw_a, sw_b = config.needs_tax_switch[self.vendor]
logger.info(f'Switching Tax Classes {sw_a} and {sw_b}')
retrieved_data = parsers._flip_tax_class(
retrieved_data, sw_a, sw_b)
return retrieved_data, total_price
def parse_date(self):
"""Retrieves date from raw text. Call after extract_data"""
if 'date_pattern' in self._patset:
date = parsers.get_date(self._raw_text, self._patset['date_pattern'])
else:
logger.warning('No date matching pattern available')
date = None
return date
class ImgReceipt(_BaseReceipt):
"""
A receipt based on an image, this could be used solo but is wrapped in a
user class for handling all types of receipts
"""
def __init__(self, filepath):
_BaseReceipt.__init__(self)
self._type = 'img'
self._file = None
self.file = filepath
self._rotation = 0
self._has_rotation = False
self._is_filtered = False
self._proc_img = None
self._bin_img = None
@property
def file(self):
"""Holds the file path of the underlying image file"""
return self._file
@file.setter
def file(self, filepath):
filepath = Path(filepath)
if not filepath.is_file() or not filepath.exists() or imghdr.what(filepath) is None:
error = 'File does not exist or no valid image'
logger.error(error)
raise FileNotFoundError(error)
self._file = filepath
self._gs_image = image_filters.load_image(self._file)
# Reset
self._proc_img = None
self._rotation = 0
self._has_rotation = False
self._is_filtered = False
self._patset = None
@property
def rotation(self):
"""Returns current image rotation"""
if not self._has_rotation:
return None
return self._rotation
@rotation.setter
def rotation(self, inc):
self._rotation += inc
self._has_rotation = True
if self._rotation == 0:
self._has_rotation + False
@property
def valid_filter(self):
"""Returns the state of the image filter"""
return self._is_filtered
@property
def image(self):
"""Returns the original (rescaled) image"""
if not self._is_filtered:
logger.warning('Image is not filtered - using base grayscale')
ref_img = self._gs_image
else:
ref_img = self._proc_img
if self._has_rotation:
return rotate(ref_img, self._rotation, resize=True)
else:
return ref_img
@property
def bin_img(self):
"""Returns the binary filtered image if available"""
if not self._is_filtered:
error = 'Binary image is not filtered yet'
logger.error(error)
raise RuntimeError(error)
if self._has_rotation:
return rotate(self._bin_img, self._rotation, resize=True)
else:
return self._bin_img
def filter_image(self, **kwargs):
"""
Filters the receipt using the filter function defined in library. Any
kwargs are passed to `image_filters.preprocess_image()` so look there
for more information."""
self._proc_img, self._bin_img = image_filters.preprocess_image(
self._gs_image, **kwargs)
self._is_filtered = True
if self._fig is not None:
self.disp_ax = self._fig.axes[0]
# Chaining support
return self
def show_receipt(self):
"""Creates a plot with the receipt and its filtered view"""
if not self.valid_filter:
logger.warning('Please filter first')
return
self._create_figure()
self._ax[0].imshow(self.image)
self._ax[1].imshow(self.bin_img)
self.disp_ax = self._ax[0]
# Chaining support
return self
def extract_data(self, lang=config.options['lang']):
"""
Extracts text **and** converts to dataframe. Uses tesseract as backend
with the given language.
Parameters
----------
lang : `str`, optional
tesseract base language for text extraction, by default the
current default value from the config file.
Returns
-------
self ; `Receipt`
Self for chaining support
"""
tess_in = Image.fromarray(self.bin_img.astype(bool))
tess_in.format = 'TIFF'
logger.debug(f'Tesseract with lang: {lang}')
try:
data = ocr.image_to_data(tess_in, lang=lang, output_type='data.frame',
config=constants._TESS_OPTIONS).dropna(
subset=['text']).reset_index()
except (ocr.TesseractError, ocr.TesseractNotFoundError) as tess_e:
logger.exception(
'Tesseract nor found or failure. This has to be '
f'resolved on system level: {tess_e}')
return self
data['height_plus_top'] = data['height'] + data['top']
data['width_plus_left'] = data['width'] + data['left']
# Collapse into single lines
data_by_line = data.groupby('line_num')
data_combined = pd.concat((
data_by_line['text'].apply('_'.join),
data_by_line['top'].min(),
data_by_line['left'].min(),
data_by_line['height_plus_top'].max(),
data_by_line['width_plus_left'].max()),
axis=1).reset_index()
# Make BBox format for MPL
data_combined['width'] = data_combined['width_plus_left'] - data_combined['left']
data_combined['height'] = data_combined['height_plus_top'] - data_combined['top']
data_combined.drop(['height_plus_top', 'width_plus_left'], axis=1)
# Re-Get raw text instead of tesseract twice
self._raw_text = '\n'.join(data_combined.text)
self._data = data_combined
self._data_extracted = True
# Chaining support
return self
def reset_rotation(self):
"""Resets current rotation"""
self._rotation = 0
self._has_rotation = False
class PdfReceipt(_BaseReceipt):
"""
A Receipt based on a pdf. This **must** contain valid text and not just
images. Currenly, only single page is supported with page 1 being parsed!
"""
def __init__(self, filepath):
_BaseReceipt.__init__(self)
self._type = 'pdf'
self._file = None
self.file = filepath
@property
def file(self):
"""Holds the file apth of the underlying pdf file"""
return self._file
@file.setter
def file(self, filepath):
filepath = Path(filepath)
if not filepath.is_file() or not filepath.exists() or not filepath.suffix == '.pdf':
error = 'File does not exist or no valid PDF'
logger.error(error)
raise FileNotFoundError(error)
self._file = filepath
self._gs_image = None
self._data_extracted = False
@property
def image(self):
"""
Provides a simple image for plotting extracted from the pdf. Only use
this for plotting purposes!"""
if not self._data_extracted:
error = 'Image is not extracted yet'
logger.error(error)
raise RuntimeError(error)
else:
return self._gs_image
def show_receipt(self):
"""Helper function to diplay the extracted image"""
if not self._data_extracted:
logger.warning('Please extract data first')
return
self._create_figure()
self._ax[0].imshow(self.image)
self.disp_ax = self._ax[0]
return self
def extract_data(self, page=0, lang=None):
"""
Extracts text **and** converts to dataframe. lang is unused here in
case of pdf and is solely used for standardization of function
signatures.
Parameters
----------
page : `int`, optional
Page to parse, by default 0
lang : `str`, optional
Placeholder, by default None
Returns
-------
self ; `Receipt`
Self for chaining support
"""
# Split line-wise
pdf = pdfium.PdfDocument(self._file)
pagedata = pdf.get_page(page)
txt = pagedata.get_textpage().get_text_range().split('\n')
txt = [line.strip() for line in txt if line.strip()]
# Remove many spaces, dont need the layout
txt = [' '.join(line.split()) for line in txt]
# Spaces to underscore, better visibility
txt = [line.replace(' ', '_') for line in txt]
# Create raw and parse the rest into the DataFrame format which is used
# in the main text parser
raw_text = '\n'.join(txt)
data = pd.DataFrame(columns=['line_num', 'text'])
data['text'] = txt
data['line_num'] = [i + 1 for i in range(len(txt))]
scale = constants._TARGET_DPI / pagedata.get_width() * (80 / 25.4)
ref_img = rgb2gray(pagedata.render(scale=scale).to_numpy())
# Text BB
txtpage = pagedata.get_textpage()
rects = np.array([txtpage.get_rect(i) for i in range(txtpage.count_rects())])
# Now this is left, bottom, right and top in pdf, so scale, invert y
# and convert for MPL
data['left'] = rects[:, 0] * scale
data['top'] = ref_img.shape[0] - rects[:, 3] * scale
data['width'] = (rects[:, 2] - rects[:, 0]) * scale
data['height'] = (rects[:, 3] - rects[:, 1]) * scale
self._data = data
self._raw_text = raw_text
self._gs_image = ref_img
self._data_extracted = True
return self
def Receipt(file):
"""
The main wrapper function that calls an init from a specific base class
and then provides all needed methods.
Parameters
----------
file : `Path`
Receipt image or pdf path.
Returns
-------
Receipt : `Receipt`
The receipt class instance
Raises
------
FileNotFoundError
IOError
"""
file = Path(file)
if not file.is_file() or not file.exists():
error = 'File does not exist'
logger.error(error)
raise FileNotFoundError(error)
if imghdr.what(file) is not None:
logger.debug('Creating Image based receipt')
return ImgReceipt(file)
elif file.suffix == '.pdf':
logger.debug('Creating PDF based receipt')
return PdfReceipt(file)
else:
raise IOError('Only image files and pdf are supported!')
| max3-2/pybudgetbook | pybudgetbook/receipt.py | receipt.py | py | 15,406 | python | en | code | 0 | github-code | 13 |
36322900563 | from turtle import *
from math import *
def draw(a,n,end):
t=0
while t<=end:
x=a*sin(n*t)*cos(t)
y=a*sin(n*t)*sin(t)
goto(x,y)
t+=0.01
# draw(100,3/2,12.56)
def draw_heart():
up()
t=0
a=100
while t<2 * pi:
x=a*(1-sin(t))*cos(t)
y=a*(1-sin(t))*sin(t)
goto(x,y)
down()
t+=0.01
# draw_heart()
def draw_peach():
a,t=10,0
up()
while t<=2*pi:
x=a*15*sin(t)**3
y=a*(15*cos(t)-5*cos(2*t)-2*cos(3*t)-cos(4*t))
goto(x,y)
down()
t+=0.01
# draw_peach()
def draw_butterfly():
a,t=60,0
b=24*pi
while t<=b:
print(t)
begin_fill();
col = str(hex(int((t * 256 / b) * 65535))[2:])
col = '#' + (6 - len(col)) * '0' + col
color(col)
p=e**cos(t)-2*cos(4*t)+sin(t/12)**5
x=a*sin(t)*p
y=a*cos(t)*p
goto(x, y)
end_fill()
down()
t += 0.1
draw_butterfly() | initialencounter/code | Python/算法/27玫瑰曲线.py | 27玫瑰曲线.py | py | 987 | python | en | code | 0 | github-code | 13 |
35189584840 | import os
from collections import defaultdict
from flask import render_template, redirect, url_for, flash, send_from_directory
from flask_login import current_user, login_required
from app.crud import *
from app.models import *
from app import app, login_manager
GAMES = ("dota2", "overwatch", "csgo")
def unauthorized():
flash("Для доступа к странице требуется авторизация")
return redirect(url_for('auth.login'))
login_manager.unauthorized = unauthorized
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/')
def index():
return render_template("index.html", games=Game.query.all())
@app.route('/games/<game_name>')
@login_required
def games(game_name):
game = get_game_by_url(game_name)
if game is not None:
return render_template(f"game.html", game=game, games=Game.query.all())
@app.route("/profile")
@login_required
def profile():
me = defaultdict(lambda e: "Не указано", current_user.get_me())
groups = [
{
"name": "Личные данные",
"ФИО": me["last_name"] + " " + me["first_name"] + " " + me["middle_name"],
"Дата рождения": me["birthday"],
"Пол": "Мужской" if me["sex"] == 'm' else "Женский",
"Адрес": me["address"]
},
{
"name": "О себе",
"О себе": me["about"]
}
]
return render_template("profile.html", groups=groups)
@app.route("/methodology")
def methodology():
return render_template("methodology.html")
@app.route("/check-level")
@login_required
def check_level():
return render_template("check_level.html", games=Game.query.all())
| kerniee/kruzhok-games-front | app/views/all.py | all.py | py | 1,986 | python | en | code | 0 | github-code | 13 |
13375105153 | import tensorflow as tf
class NetModel(tf.keras.Model):
def __init__(self, feature_size):
super(NetModel, self).__init__()
self.feature_size = feature_size
model = []
model += [
tf.keras.layers.Conv2D(filters=self.feature_size, kernel_size=3, strides=2, padding='SAME', use_bias=True),
tf.keras.layers.Activation(tf.keras.activations.relu),
tf.keras.layers.Conv2D(filters=self.feature_size * 2, kernel_size=3, strides=2, padding='SAME',
use_bias=True),
tf.keras.layers.Activation(tf.keras.activations.relu)
]
model += [tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=10, use_bias=True)]
model = tf.keras.Sequential(model)
self.model = tf.keras.Sequential(model)
def call(self, x):
x = self.model(x)
return x | taki0112/tf-torch-template | tensorflow_src/tf_network.py | tf_network.py | py | 914 | python | en | code | 34 | github-code | 13 |
40890036461 | #!/usr/bin/python3.7
# 0=KathyUbuntu, 1=westteam
def get_settings(machine):
if machine == 0:
chain0 = 24442
url30 = "http://78.47.206.255:18003"
url40 = "http://78.47.206.255:18004/jsonrpc"
settings_d = {"chain": chain0, "url3": url30, "url4": url40}
return settings_d
elif machine == 1:
chain1 = 4810
url31 = "http://westteam.nulstar.com:18003"
url41 = "http://westteam.nulstar.com:18004/jsonrpc"
settings_d = {"chain": chain1, "url3": url31, "url4": url41}
return settings_d
elif machine == 2:
chain2 = 2
url32 = "http://127.2.0.1:18003"
url42 = "http://127.0.0.1:18004/jsonrpc"
settings_d = {"chain": chain2, "url3": url32, "url4": url42}
return settings_d
| nmschorr/nulspy-requests | src/user_inputs/settings_main.py | settings_main.py | py | 798 | python | en | code | 0 | github-code | 13 |
14890177801 | from collections import deque
import parameters as pt
import utils as ut
# 관건 1-A: 무지성으로 옆사람이랑 짝 지어주기
def get_next_user(waiting_queue, user_id, grades, matched):
if len(waiting_queue) > 0:
next_user = waiting_queue.popleft()
matched.add(next_user)
return next_user
else:
return -1
# 관건 1-B: 등급 차가 제일 큰 사람이랑 짝 지어주기
def get_nearest_user(waiting_queue, user_id, grades, matched):
"""
:param waiting_queue: 대기열 (1, 2, 4, 10...)
:param user_id: 짝 지을 유저
:param grades: 등급
:param matched: 매칭된 유저의 set
:return: user_id 랑 짝 지어줄 유저
"""
max_abs = -1
nearest_user = -1
# 실수: 큐를 for 문으로 참조함
for partner in waiting_queue:
# 파트너가 자기 자신이거나 이미 다른 유저랑 매칭이 됨
if partner in matched or partner == user_id:
continue
new_abs = abs(grades[user_id] - grades[partner])
if max_abs < new_abs:
max_abs = new_abs
nearest_user = partner
matched.add(nearest_user)
return nearest_user
# 관건 2-A: 레벨 차이를 최소-최대 정규화
# 이 정규화를 거치면 어떤 값이든 변환할 데이터의 최소값 ~ 최대값 사이로 나옴
# 여기선 레벨 차이를 등급으로 변환할 것이므로 등급의 최소에서 최대 사이의 값으로 변환됨
def min_max_normalize(x):
return int(((x - pt.LEVEL_MIN) / (pt.LEVEL_MAX - pt.LEVEL_MIN)) * pt.GRADE_MAX)
# 관건 2-B: 레벨 차이를 z-점수 정규화
# 문제에서 레벨의 평균이랑 표준편차를 줬음
# 아마 이걸 활용하려면 이 방법 밖에 없을 듯함.
# 일단 등급의 평균을 5000으로, 표준편차를 2500으로 잡고 (0 ~ 9999 라는 범위에서 대충...)
# 이대로 계산함 (등급 평균이 레벨 평균(=40000)의 1/8이니 z_score 도 8로 나누면 되지 않을까? 라는 정신나간 생각을 함)
# 사실은 레벨의 z-score 로 등급의 z-score 는 알아낼 수 없음
def z_score_normalize(x):
z_score = (x - pt.LEVEL_MEAN) / pt.LEVEL_STD
return int(min(max(pt.GRADE_MIN, 5000 + 2500 * (z_score / 8)), pt.GRADE_MAX))
# 관건 3-A: 등급 차를 반으로 나누어서 승자한테는 반을 더하고 패자한테는 반을 더함
def add_and_subtract_half(winner_grade, loser_grade, grade_diff):
half = grade_diff // 2
# 단 등급은 최대 9999를 넘을 수 없고 0보다 작아질 수 없으므로, min 과 max 을 적용함
return min(winner_grade + half, 9999), max(loser_grade - half, 0)
# 관건 3-B: 등급 차를 승자한테 다 더해줌
def add_all_to_winner(winner_grade, loser_grade, grade_diff):
return min(winner_grade + grade_diff, 9999), loser_grade
# 관건 4: 패자가 어뷰저임이 적발되면 둘의 등급을 스왑 (승자 등급 <- 패자 등급 , 패자 등급 <- 승자 등급)
def swap_when_abused(winner_grade, loser_grade, grade_diff):
return loser_grade, winner_grade
# 게임 결과랑 어뷰징 확률로 등급을 갱신할 유저의 리스트 반환
def naive_grade_changing(results, grades, abuse_rate, methods):
"""
:param results: 게임 결과
:param grades: 유저별 등급 -> {id: grade}
:param abuse_rate: 유저별 어뷰징 확률
:param methods: 이용할 메소드들
:return: 새로 갱신할 유저의 등급
"""
to_change = []
for result in results:
# 걸린 시간에서 레벨 차이를 추정
level_diff = ut.get_approx_level_diff(result["taken"])
# 승자와 패자의 현재 등급을 가져옴
winner_grade = grades[result["win"]]
loser_grade = grades[result["lose"]]
# 레벨 차이를 등급 차이로 변환함
grade_diff = methods["normalize"](level_diff)
loser_id = result["lose"]
# 2번 문제인데 패자가 어뷰저임이 적발되면
if pt.PROBLEM == 2 and abuse_rate[loser_id][1] != 0 and ut.get_probability(abuse_rate[loser_id]) >= 0.8:
# 승자와 패자 등급을 어뷰저 로직대로 계산함
new_winner_grade, new_loser_grade = methods["method_for_abuser"](winner_grade, loser_grade, grade_diff)
else:
# 아니면 일반적인 등급 반환 로직대로 계산함
new_winner_grade, new_loser_grade = methods["method_for_grade_revise"](winner_grade, loser_grade, grade_diff)
# 새로 바뀐 승자와 패자의 등급을 추가함
to_change += [
{"id": result["win"], "grade": new_winner_grade},
{"id": result["lose"], "grade": new_loser_grade}
]
return to_change
def make_pairs(waiting_line, grades, methods):
"""
:param waiting_line: 카카오에서 준 대기열 -> [{"from": 들어온 시간, "id": 유저 아이디},...]
:param grades: 유저별 등급 -> {id: grade}
:param methods: 사용할 메소드들
:return: 싸움 붙일 순서쌍
"""
# 대기열을 들어온 시간 순서대로 정렬함
waiting_queue = get_waiting_queue(waiting_line)
matched = set()
to_pair = []
while len(waiting_queue) > 0:
user_id = waiting_queue.popleft()
# user_id 랑 싸움 붙일 놈을 고름
get_partner = methods["pick_partner"](waiting_queue, user_id, grades, matched)
# 붙일 놈이 있을 경우
if get_partner != -1:
to_pair.append(sorted([user_id, get_partner]))
matched.add(user_id)
return to_pair
def get_waiting_queue(waiting_line):
"""
:param waiting_line: 대기열 -> [{"from": 들어온 시간, "id": 유저 아이디},...]
:return: 유저 아이디 값만 들어간 큐 (온 순으로 정렬된)
"""
waiting_queue = deque()
waiting_line = sorted(waiting_line, key=lambda x: x["from"])
while len(waiting_line) > 0:
waiting_queue.append(waiting_line.pop()["id"])
return waiting_queue
def get_rate(results, grades, abuse_rate):
"""
:param results: 게임 결과
:param grades: 유저별 등급 -> {id: grade}
:param abuse_rate: 어뷰징 확률
"""
for result in results:
winner_id = result["win"]
loser_id = result["lose"]
# 이거 확률 계산이 잘못됐음
if grades[winner_id] < grades[loser_id] and result["taken"] <= 10:
abuse_rate[loser_id][0] += 1
abuse_rate[loser_id][1] += 1
abuse_rate[winner_id][1] += 1
# 원래는 이거여야 함. 아마 여기서 질문 들어올 듯
if grades[winner_id] < grades[loser_id]:
if result["taken"] <= 10:
abuse_rate[loser_id][0] += 1
abuse_rate[loser_id][1] += 1
| jkjan/PS | Kakao_2022_2/algorithms.py | algorithms.py | py | 6,834 | python | ko | code | 0 | github-code | 13 |
22209818252 | import requests
import sys
import argparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser(description='Retrieve and Tabularize Bluetooth GATT Characteristics or Services')
parser.add_argument('type', choices=['characteristics', 'services', 'all'], help='Whether to retrieve characteristics, services, or both')
# parser.add_argument('--format', default='md', choices=['md', 'csv']) # TODO: Output CSV
parser.add_argument('--outfile', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
# Constants. Change if URLs change
root = 'https://www.bluetooth.com'
headers = {'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}
svc_table = '/specifications/gatt/services/'
chr_table = '/specifications/gatt/characteristics/'
characteristics = []
services = []
if args.type == 'characteristics' or args.type == 'all':
page = requests.get(root+chr_table, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
for row in soup.find_all('tr'):
# Skip header
if row.find('th'):
continue
link = row.find('a')
if link:
chr_name = link.text
# Set up xml parsing
xml_url = link['href'].split('src=')[0]
xml = requests.get(xml_url, headers=headers)
x_soup = BeautifulSoup(xml.text, 'lxml')
xml_name = x_soup.find('characteristic')['name']
if xml_name != chr_name:
print('Differing names: xml: {}, link: {}'.format(xml_name, chr_name), file=sys.stderr)
# Remove newlines, replace bullet points with hyphens.
description = ''
if x_soup.find('informativetext'):
description = x_soup.find('informativetext').get_text(' ', strip=True)
description = ''.join(description.splitlines())
description = description.replace('â\x80¢', '-')
description = description.replace('â', '"')
description = description.replace('â', '"')
else:
print('No description for {}'.format(chr_name), file=sys.stderr)
fields = []
for field in x_soup.find_all('field'):
fields.append(field['name'])
characteristic = {}
characteristic['name'] = chr_name
characteristic['description'] = description
characteristic['fields'] = fields
characteristics.append(characteristic)
else:
chr_name = row.contents[1].get_text()
print('No link for {}'.format(chr_name), file=sys.stderr)
characteristic = {}
characteristic['name'] = chr_name
characteristics.append(characteristic)
if args.type == 'services' or args.type == 'all':
page = requests.get(root+svc_table, headers=headers)
soup = BeautifulSoup(page.text, 'html.parser')
for row in soup.find_all('tr'):
# Skip header
if row.find('th'):
continue
link = row.find('a')
if link:
service_name = link.text
# Set up xml parsing
xml_url = link['href']
xml = requests.get(root+xml_url, headers=headers)
x_soup = BeautifulSoup(xml.text, 'lxml')
xml_name = x_soup.find('service')['name']
if xml_name != service_name:
print('Differing names: xml: {}, link: {}'.format(xml_name, service_name), file=sys.stderr)
# Remove newlines, replace bullet points with hyphens.
description = x_soup.find('informativetext').get_text(' ', strip=True)
description = ''.join(description.splitlines())
description = description.replace('â\x80¢', '-')
description = description.replace('â', '"')
description = description.replace('â', '"')
mandatory = []
optional = []
for characteristic in x_soup.find_all('characteristic'):
if characteristic.requirement.text == 'Mandatory':
mandatory.append(characteristic['name'])
else:
optional.append(characteristic['name'])
service = {}
service['name'] = service_name
service['description'] = description
service['mandatory'] = mandatory
service['optional'] = optional
services.append(service)
else:
svc_name = row.contents[1].get_text()
print('No link for {}'.format(svc_name), file=sys.stderr)
service = {}
service['name'] = svc_name
services.append(service)
if args.type == 'characteristics' or args.type == 'all':
args.outfile.write('| Characteristic Name | Description | Fields\n')
args.outfile.write('|---\n')
for characteristic in characteristics:
args.outfile.write(u'| {} | {} | {}\n'.format(
characteristic.get('name', 'Not Available'),
characteristic.get('description', 'Not Available'),
', '.join(characteristic.get('fields', ['Not Available']))
))
if args.type == 'services' or args.type == 'all':
args.outfile.write('| Service Name | Description | Mandatory Characteristics | Optional Characteristics\n')
args.outfile.write('|---\n')
for service in services:
args.outfile.write(u'| {} | {} | {} | {}\n'.format(
service.get('name', 'Not Available'),
service.get('description', 'Not Available'),
', '.join(service.get('mandatory', ['Not Available'])),
', '.join(service.get('optional', ['Not Available']))
))
| linkoep/gatt_scrape | main.py | main.py | py | 5,810 | python | en | code | 0 | github-code | 13 |
6422942814 | from django.shortcuts import render
from .models import Setting
from Product_app.models import Product
# Create your views here.
def HomePage(request):
context = {}
if Setting.objects.exists():
setting = Setting.objects.get(id=1)
context={'setting':setting}
if Product.objects.exists():
prod_slide_img = Product.objects.all().order_by('id')[:2]
context={'setting':setting,'prod_slide_img':prod_slide_img}
return render(request, 'ecommerceApp/home.html',context) | ALVI0017/ecommerce_with_django | ecommerceApp/views.py | views.py | py | 519 | python | en | code | 0 | github-code | 13 |
23062943686 | def sortear(* num):
from time import sleep
from random import randint
for c in range(1, 6):
lista.append(randint(1, 10))
print(f'A lista sorteada foi {lista}')
def somapar(* valores):
soma = 0
print('Os valores pares na lista é: ', end='')
for v in lista:
if v % 2 == 0:
print(f'{v} ', end='')
soma += v
print()
print(f'A soma dos números pares é {soma}.')
lista = []
sortear(lista)
somapar(lista)
| lucassale/python | Revisão para certificado/ex100 FUNÇÃO - sortear e somar.py | ex100 FUNÇÃO - sortear e somar.py | py | 481 | python | pt | code | 0 | github-code | 13 |
27964586360 | #%matplotlib inline
# useful additional packages
#import math tools
import numpy as np
# We import the tools to handle general Graphs
import networkx as nx
# We import plotting tools
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from qiskit.visualization import plot_histogram
# Generating the butterfly graph with 5 nodes
n = 5
V = np.arange(0,n,1)
E =[(0,1,1.0),(0,2,1.0),(1,2,1.0),(3,2,1.0),(3,4,1.0),(4,2,1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
# Generate plot of the Graph
colors = ['r' for node in G.nodes()]
default_axes = plt.axes(frameon=True)
pos = nx.spring_layout(G)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=1, ax=default_axes, pos=pos)
# Evaluate the function
step_size = 0.1;
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma,a_beta)
F1 = 3-(np.sin(2*a_beta)**2*np.sin(2*a_gamma)**2-0.5*np.sin(4*a_beta)*np.sin(4*a_gamma))*(1+np.cos(4*a_gamma)**2)
# Grid search for the minimizing variables
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0],result[1]))[0]
gamma = a[0]*step_size;
beta = a[1]*step_size;
# Plot the expetation value F1
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(a_gamma, a_beta, F1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
ax.set_zlim(1,4)
ax.zaxis.set_major_locator(LinearLocator(3))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
plt.clf()
#The smallest paramters and the expectation can be extracted
print('\n --- OPTIMAL PARAMETERS --- \n')
print('The maximal expectation value is: M1 = %.03f' % np.amax(F1))
print('This is attained for gamma = %.03f and beta = %.03f' % (gamma,beta))
# preapre the quantum and classical resisters
QAOA = QuantumCircuit(len(V), len(V))
# apply the layer of Hadamard gates to all qubits
QAOA.h(range(len(V)))
QAOA.barrier()
# apply the Ising type gates with angle gamma along the edges in E
for edge in E:
k = edge[0]
l = edge[1]
QAOA.cu1(-2*gamma, k, l)
QAOA.u1(gamma, k)
QAOA.u1(gamma, l)
# then apply the single qubit X - rotations with angle beta to all qubits
QAOA.barrier()
QAOA.rx(2*beta, range(len(V)))
# Finally measure the result in the computational basis
QAOA.barrier()
QAOA.measure(range(len(V)),range(len(V)))
### draw the circuit for comparison
QAOA.draw(output='mpl')
# Compute the value of the cost function
def cost_function_C(x,G):
E = G.edges()
if( len(x) != len(G.nodes())):
return np.nan
C = 0;
for index in E:
e1 = index[0]
e2 = index[1]
w = G[e1][e2]['weight']
C = C + w*x[e1]*(1-x[e2]) + w*x[e2]*(1-x[e1])
return C
# run on local simulator
backend = Aer.get_backend("qasm_simulator")
shots = 10000
simulate = execute(QAOA, backend=backend, shots=shots)
QAOA_results = simulate.result()
plot_histogram(QAOA_results.get_counts(),figsize = (8,6),bar_labels = False)
plt.savefig('Simulator_counts_1.png')
plt.clf()
# Evaluate the data from the simulator
counts = QAOA_results.get_counts()
avr_C = 0
max_C = [0,0]
hist = {}
for k in range(len(G.edges())+1):
hist[str(k)] = hist.get(str(k),0)
for sample in list(counts.keys()):
# use sampled bit string x to compute C(x)
x = [int(num) for num in list(sample)]
tmp_eng = cost_function_C(x,G)
# compute the expectation value and energy distribution
avr_C = avr_C + counts[sample]*tmp_eng
hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample]
# save best bit string
if( max_C[1] < tmp_eng):
max_C[0] = sample
max_C[1] = tmp_eng
M1_sampled = avr_C/shots
print('\n --- SIMULATION RESULTS ---\n')
print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1)))
print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1]))
print('The cost function is distributed as: \n')
plot_histogram(hist,figsize = (8,6),bar_labels = False)
plt.savefig('Simulator_counts_2.png')
plt.clf()
# Use the IBMQ essex device
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_essex')
shots = 2048
job_exp = execute(QAOA, backend=backend, shots=shots)
job_monitor(job_exp)
exp_results = job_exp.result()
plot_histogram(exp_results.get_counts(),figsize = (10,8),bar_labels = False)
plt.savefig('Essex_counts_1.png')
plt.clf()
# Evaluate the data from the experiment
counts = exp_results.get_counts()
avr_C = 0
max_C = [0,0]
hist = {}
for k in range(len(G.edges())+1):
hist[str(k)] = hist.get(str(k),0)
for sample in list(counts.keys()):
# use sampled bit string x to compute C(x)
x = [int(num) for num in list(sample)]
tmp_eng = cost_function_C(x,G)
# compute the expectation value and energy distribution
avr_C = avr_C + counts[sample]*tmp_eng
hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample]
# save best bit string
if( max_C[1] < tmp_eng):
max_C[0] = sample
max_C[1] = tmp_eng
M1_sampled = avr_C/shots
print('\n --- EXPERIMENTAL RESULTS ---\n')
print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1)))
print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1]))
print('The cost function is distributed as: \n')
plot_histogram(hist,figsize = (8,6),bar_labels = False)
plt.savefig('Essex_counts_2.png')
plt.clf()
| codecrap/QIProject | CopyPaste.py | CopyPaste.py | py | 5,965 | python | en | code | 3 | github-code | 13 |
74043927056 | # -*- coding: utf-8 -*-
"""Parametric Spatial Audio (PARSA).
.. plot::
:context: reset
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['axes.grid'] = True
import spaudiopy as spa
N_sph = 3
# Three sources
x_nm = spa.sph.src_to_sh(np.random.randn(3, 10000),
[np.pi/2, -np.pi/4, np.pi/3],
[np.pi/3, np.pi/2, 2/3 * np.pi], N_sph)
# Diffuse noise
x_nm += np.sqrt(16/(4*np.pi)) * np.random.randn(16, 10000)
spa.plot.sh_rms_map(x_nm, title="Input SHD Signal")
**Memory cached functions**
.. autofunction:: spaudiopy.parsa.pseudo_intensity(ambi_b, win_len=33, f_bp=None, smoothing_order=5, jobs_count=1)
.. autofunction:: spaudiopy.parsa.render_bsdm(sdm_p, sdm_phi, sdm_theta, hrirs, jobs_count=None)
"""
from itertools import repeat
from warnings import warn
import logging
import numpy as np
from joblib import Memory
import multiprocessing
from scipy import signal
from . import utils, sph
from . import process as pcs
# Prepare Caching
cachedir = './.spa_cache_dir'
memory = Memory(cachedir)
shared_array = None
lock = multiprocessing.RLock()
def sh_beamformer_from_pattern(pattern, N_sph, azi_steer, zen_steer):
"""Get spherical harmonics domain (SHD) beamformer coefficients.
Parameters
----------
pattern : string , or (N+1, ) array_like
Pattern description, e.g. `'cardioid'` or modal weights.
N_sph : int
SH order.
azi_steer : (J,) array_like
Azimuth steering directions.
zen_steer : (J,) array_like
Zenith/colatitude steering directions.
Returns
-------
w_nm : (J, (N+1)**2) numpy.ndarray
SHD Beamformer weights.
Examples
--------
See :py:func:`spaudiopy.parsa.sh_beamform`.
"""
if isinstance(pattern, str):
if pattern.lower() in ['hypercardioid', 'max_di']:
c_n = sph.hypercardioid_modal_weights(N_sph)
elif pattern.lower() in ['cardioid', 'inphase']:
c_n = sph.cardioid_modal_weights(N_sph)
elif pattern.lower() in ['max_re', 'maxre']:
c_n = sph.maxre_modal_weights(N_sph)
else:
raise ValueError("Pattern not available: " + pattern)
else:
c_n = utils.asarray1d(pattern)
assert len(c_n) == (N_sph+1), "Input not matching:" + c_n
w_nm = sph.repeat_per_order(c_n)
Y_steer = sph.sh_matrix(N_sph, azi_steer, zen_steer, sh_type='real')
return w_nm * Y_steer
def sh_beamform(w_nm, sig_nm):
"""Apply spherical harmonics domain (SHD) beamformer.
Parameters
----------
w_nm : ((N+1)**2,) array_like, or (J, (N+1)**2) np.ndarray
SHD beamformer weights (for `J` beamformers)
sig_nm : ((N+1)**2, l) np.ndarray
SHD signal of length l.
Returns
-------
y : (J, l) np.ndarray
Beamformer output signals.
Examples
--------
.. plot::
:context: close-figs
vecs, _ = spa.grids.load_maxDet(50)
dirs = spa.utils.vecs2dirs(vecs)
w_nm = spa.parsa.sh_beamformer_from_pattern('cardioid', N_sph,
dirs[:,0], dirs[:,1])
y = spa.parsa.sh_beamform(w_nm, x_nm)
spa.plot.spherical_function_map(spa.utils.rms(y), dirs[:,0], dirs[:,1],
TODB=True, title="Output RMS")
"""
W = np.atleast_2d(w_nm)
sig_nm = np.asarray(sig_nm)
if sig_nm.ndim == 1:
sig_nm = sig_nm[:, np.newaxis] # upgrade to handle 1D arrays
return W @ sig_nm
def estimate_num_sources(cov_x, a=None, w=None):
"""Active source count estimate from signal covariance.
Based on the relation of consecutive eigenvalues.
Parameters
----------
cov_x : (L, L) numpy.2darray
Signal covariance.
a : float, optional
Threshold condition (ratio), defaults to `1 + 2/len(cov_x)`
w : (L,) array_like, optional
Eigenvalues in ascending order, not using `cov_x` if available.
Returns
-------
num_src_est : int
Number of active sources estimate.
Examples
--------
See :py:func:`spaudiopy.parsa.sh_music`.
"""
if w is None:
w = np.linalg.eigvalsh(cov_x)
else:
w = utils.asarray_1d(w)
if a is None:
a = 1 + 2/len(w)
if np.var(w) < a:
num_src_est = 0
else:
c = w[1:] / (w[:-1] + 10e-8)
cn = np.argmax(c > a)
num_src_est = len(w)-1 - cn
return num_src_est
def separate_cov(cov_x, num_cut=None):
"""Separate Covariance matrix in signal and noise components.
Parameters
----------
S_xx : (L, L) numpy.2darray
Covariance.
num_cut : int, optional
Split point of Eigenvalues, default: `parsa.estimate_num_sources()`.
Returns
-------
S_pp : (L, L) numpy.2darray
Signal covariance.
S_nn : (L, L) numpy.2darray
Noise (residual) covariance.
Notes
-----
Signal model is :math:`S_x = S_p + S_n` .
Examples
--------
.. plot::
:context: close-figs
S_xx = x_nm @ x_nm.T
S_pp, S_nn = spa.parsa.separate_cov(S_xx, num_cut=3)
fig, axs = plt.subplots(1, 3, constrained_layout=True)
axs[0].matshow(S_xx)
axs[0].set_title("X")
axs[1].matshow(S_pp)
axs[1].set_title("S")
axs[2].matshow(S_nn)
axs[2].set_title("N")
"""
assert(cov_x.shape[0] == cov_x.shape[1])
w, v = np.linalg.eigh(cov_x)
if num_cut is None:
num_cut = estimate_num_sources([], w=w)
w_nn = 1. * w
w_r = w[-(num_cut+1)]
w_nn[-num_cut:] = w_r
S_nn = v @ np.diag(w_nn) @ v.T
S_pp = v[:, -num_cut:] @ (np.diag(w[-num_cut:] - w_r)) @ v[:, -num_cut:].T
return S_pp, S_nn
def sh_music(cov_x, num_src, dirs_azi, dirs_zen):
"""SH domain / Eigenbeam Multiple Signal Classification (EB-MUSIC).
Parameters
----------
cov_x : (L, L) numpy.2darray
SH signal covariance.
num_src : int
Number of sources.
dirs_azi : (g,) array_like
dirs_zen : (g,) array_like
Returns
-------
P_music : (g,) array_like
MUSIC (psuedo-) spectrum.
Examples
--------
.. plot::
:context: close-figs
S_xx = x_nm @ x_nm.T
num_src_est = spa.parsa.estimate_num_sources(S_xx)
vecs, _ = spa.grids.load_maxDet(50)
dirs = spa.utils.vecs2dirs(vecs)
P_music = spa.parsa.sh_music(S_xx, num_src_est, dirs[:,0], dirs[:,1])
spa.plot.spherical_function_map(P_music, dirs[:,0], dirs[:,1],
TODB=True, title="MUSIC spectrum")
"""
assert(cov_x.shape[0] == cov_x.shape[1])
N_sph = int(np.sqrt(cov_x.shape[0]) - 1)
dirs_azi = utils.asarray_1d(dirs_azi)
dirs_zen = utils.asarray_1d(dirs_zen)
Y = sph.sh_matrix(N_sph, dirs_azi, dirs_zen, sh_type='real')
_, v = np.linalg.eigh(cov_x)
Qn = v[:, :-num_src]
a = (Qn.T @ Y.T)
P_music = 1 / (np.sum(a * a, 0) + 10e-12)
return P_music
def sh_mvdr(cov_x, dirs_azi, dirs_zen):
"""Spherical Harmonics domain MVDR beamformer.
SH / Eigenbeam domain minimum variance distortionless response (EB-MVDR).
Often employed on signal `cov_x = S_xx`, instead of noise `cov_x = S_nn`,
then called minimum power distortionless response (MPDR) beamformer.
Parameters
----------
cov_x : (L, L) numpy.2darray
SH signal (noise) covariance.
dirs_azi : (g,) array_like
dirs_zen : (g,) array_like
Returns
-------
W_nm : (g, L) numpy.2darray
MVDR beampattern weights.
References
----------
Rafaely, B. (2015). Fundamentals of Spherical Array Processing. Springer.
ch. 7.2.
Examples
--------
.. plot::
:context: close-figs
S_xx = x_nm @ x_nm.T
num_src_est = spa.parsa.estimate_num_sources(S_xx)
_, S_nn = spa.parsa.separate_cov(S_xx, num_cut=num_src_est)
vecs, _ = spa.grids.load_maxDet(50)
dirs = spa.utils.vecs2dirs(vecs)
W_nm = spa.parsa.sh_mvdr(S_nn, dirs[:,0], dirs[:,1])
y = spa.parsa.sh_beamform(W_nm, x_nm)
spa.plot.spherical_function_map(spa.utils.rms(y), dirs[:,0], dirs[:,1],
TODB=True, title="MVDR output RMS")
"""
assert(cov_x.shape[0] == cov_x.shape[1])
N_sph = int(np.sqrt(cov_x.shape[0]) - 1)
dirs_azi = utils.asarray_1d(dirs_azi)
dirs_zen = utils.asarray_1d(dirs_zen)
Y_steer = sph.sh_matrix(N_sph, dirs_azi, dirs_zen, sh_type='real')
S_inv = np.linalg.inv(cov_x)
c = Y_steer @ S_inv
a = Y_steer @ S_inv @ Y_steer.conj().T
W_nm = c.T / np.diag(a)
return W_nm.T
def sh_lcmv(cov_x, dirs_azi_c, dirs_zen_c, c_gain):
"""Spherical Harmonics domain LCMV beamformer.
SH / Eigenbeam domain Linearly Constrained Minimum Variance (LCMV)
beamformer.
Often employed on signal `cov_x = S_xx`, instead of noise `cov_x = S_nn`,
then called linearly constrained minimum power (LCMP) beamformer.
Parameters
----------
cov_x : (L, L) numpy.2darray
SH signal (noise) covariance.
dirs_azi : (g,) array_like
dirs_zen : (g,) array_like
c_gain : (g,) array_like
Constraints (gain) on points `[dirs_azi, dirs_zen]`.
Returns
-------
w_nm : (L,) array_like
LCMV beampattern weights.
References
----------
Rafaely, B. (2015). Fundamentals of Spherical Array Processing. Springer.
ch. 7.5.
Examples
--------
.. plot::
:context: close-figs
S_xx = x_nm @ x_nm.T
num_src_est = spa.parsa.estimate_num_sources(S_xx)
_, S_nn = spa.parsa.separate_cov(S_xx, num_cut=num_src_est)
dirs_azi_c = [np.pi/2, 0., np.pi]
dirs_zen_c = [np.pi/2, np.pi/2, np.pi/4]
c = [1, 0.5, 0]
w_nm = spa.parsa.sh_lcmv(S_nn, dirs_azi_c, dirs_zen_c, c)
spa.plot.sh_coeffs(w_nm)
"""
assert(cov_x.shape[0] == cov_x.shape[1])
dirs_azi_c = utils.asarray_1d(dirs_azi_c)
dirs_zen_c = utils.asarray_1d(dirs_zen_c)
c_gain = utils.asarray_1d(c_gain)
assert(len(dirs_azi_c) == len(dirs_zen_c))
assert(len(dirs_azi_c) == len(c_gain))
N_sph = int(np.sqrt(cov_x.shape[0]) - 1)
V = sph.sh_matrix(N_sph, dirs_azi_c, dirs_zen_c, sh_type='real').T
S_inv = np.linalg.inv(cov_x)
w_nm = c_gain.T @ np.linalg.inv(V.T @ S_inv @ V) @ V.T @ S_inv
return w_nm
def sh_sector_beamformer(A_nm):
"""
Get sector pressure and intensity beamformers.
Parameters
----------
A_nm : (J, (N+1)**2), np.ndarray
SH beamformer matrix, see spa.sph.design_sph_filterbank().
Returns
-------
A_wxyz : ((4*J), (N+2)**2)
SH sector pattern beamformers.
"""
num_sec = A_nm.shape[0]
A_wxyz = np.zeros((4*num_sec, int(np.sqrt(A_nm.shape[1])+1)**2))
w_nm = np.sqrt(4*np.pi) * np.array([1, 0, 0, 0])
x_nm = np.sqrt(4/3*np.pi) * np.array([0, 0, 0, 1])
y_nm = np.sqrt(4/3*np.pi) * np.array([0, 1, 0, 0])
z_nm = np.sqrt(4/3*np.pi) * np.array([0, 0, 1, 0])
for idx_s in range(num_sec):
A_wxyz[idx_s*4+0, :] = sph.sh_mult(w_nm, A_nm[idx_s, :], 'real')
A_wxyz[idx_s*4+1, :] = sph.sh_mult(x_nm, A_nm[idx_s, :], 'real')
A_wxyz[idx_s*4+2, :] = sph.sh_mult(y_nm, A_nm[idx_s, :], 'real')
A_wxyz[idx_s*4+3, :] = sph.sh_mult(z_nm, A_nm[idx_s, :], 'real')
return A_wxyz
# part of parallel pseudo_intensity:
def _intensity_sample(i, W, X, Y, Z, win):
buf = len(win)
# global shared_array
shared_array[int(i + buf // 2), :] = np.asarray(
[np.trapz(win * W[i:i + buf] * X[i:i + buf]),
np.trapz(win * W[i:i + buf] * Y[i:i + buf]),
np.trapz(win * W[i:i + buf] * Z[i:i + buf])])
@memory.cache
def pseudo_intensity(ambi_b, win_len=33, f_bp=None, smoothing_order=5,
jobs_count=1):
"""Direction of arrival (DOA) for each time sample from pseudo-intensity.
Parameters
----------
ambi_b : sig.AmbiBSignal
Input signal, B-format.
win_len : int optional
Sliding window length.
f_bp : tuple(f_lo, f_hi), optional
Cutoff frequencies for bandpass, 'None' to disable.
smoothing_order : int, optional
Apply hanning(smoothing_order) smoothing to output.
jobs_count : int or None, optional
Number of parallel jobs, 'None' employs 'cpu_count'.
Returns
-------
I_azi, I_colat, I_r : array_like
Pseudo intensity vector for each time sample.
"""
# WIP
if jobs_count is None:
jobs_count = multiprocessing.cpu_count()
assert(win_len % 2)
win = np.hanning(win_len)
fs = ambi_b.fs
# Z_0 = 413.3
# T_int = 1/fs * win_len
# a = 1 / (np.sqrt(2) * T_int * Z_0)
# get first order signals
W = utils.asarray_1d(ambi_b.W)
X = utils.asarray_1d(ambi_b.X)
Y = utils.asarray_1d(ambi_b.Y)
Z = utils.asarray_1d(ambi_b.Z)
# Bandpass signals
if f_bp is not None:
f_lo = f_bp[0]
f_hi = f_bp[1]
b, a = signal.butter(N=2, Wn=(f_lo / (fs / 2), f_hi / (fs / 2)),
btype='bandpass')
W = signal.filtfilt(b, a, W)
X = signal.filtfilt(b, a, X)
Y = signal.filtfilt(b, a, Y)
Z = signal.filtfilt(b, a, Z)
# Initialize intensity vector
I_vec = np.c_[np.zeros(len(ambi_b)),
np.zeros(len(ambi_b)), np.zeros(len(ambi_b))]
if jobs_count == 1:
# I = p*v for each sample
for i in range(len(ambi_b) - win_len):
I_vec[int(i + win_len // 2), :] = np.asarray(
[np.trapz(win * W[i:i + win_len] * X[i:i + win_len]),
np.trapz(win * W[i:i + win_len] * Y[i:i + win_len]),
np.trapz(win * W[i:i + win_len] * Z[i:i + win_len])])
else:
logging.info("Using %i processes..." % jobs_count)
# preparation
shared_array_shape = np.shape(I_vec)
_arr_base = _create_shared_array(shared_array_shape)
_arg_itr = zip(range(len(ambi_b) - win_len),
repeat(W), repeat(X), repeat(Y), repeat(Z),
repeat(win))
# execute
with multiprocessing.Pool(processes=jobs_count,
initializer=_init_shared_array,
initargs=(_arr_base,
shared_array_shape,)) as pool:
pool.starmap(_intensity_sample, _arg_itr)
# reshape
I_vec = np.frombuffer(_arr_base.get_obj()).reshape(
shared_array_shape)
if smoothing_order > 0:
assert(smoothing_order % 2)
I_vec = np.apply_along_axis(signal.convolve, 0, I_vec,
np.hanning(smoothing_order), 'same')
I_azi, I_colat, I_r = utils.cart2sph(I_vec[:, 0], I_vec[:, 1],
I_vec[:, 2], steady_colat=True)
return I_azi, I_colat, I_r
def render_stereo_sdm(sdm_p, sdm_phi, sdm_theta):
"""Stereophonic SDM Render IR, with a cos(phi) pannign law.
This is only meant for quick testing.
Parameters
----------
sdm_p : (n,) array_like
Pressure p(t).
sdm_phi : (n,) array_like
Azimuth phi(t).
sdm_theta : (n,) array_like
Colatitude theta(t).
Returns
-------
ir_l : array_like
Left impulse response.
ir_r : array_like
Right impulse response.
"""
ir_l = np.zeros(len(sdm_p))
ir_r = np.zeros_like(ir_l)
for i, (p, phi, theta) in enumerate(zip(sdm_p, sdm_phi, sdm_theta)):
h_l = 0.5*(1 + np.cos(phi - np.pi/2))
h_r = 0.5*(1 + np.cos(phi + np.pi/2))
# convolve
ir_l[i] += p * h_l
ir_r[i] += p * h_r
return ir_l, ir_r
# part of parallel render_bsdm:
def _render_bsdm_sample(i, p, phi, theta, hrirs):
h_l, h_r = hrirs.nearest_hrirs(phi, theta)
# global shared_array
with lock: # synchronize access, operator += needs lock!
shared_array[i:i + len(h_l), 0] += p * h_l
shared_array[i:i + len(h_r), 1] += p * h_r
@memory.cache
def render_bsdm(sdm_p, sdm_phi, sdm_theta, hrirs, jobs_count=1):
"""Binaural SDM Render.
Convolves each sample with corresponding hrir. No Post-EQ.
Parameters
----------
sdm_p : (n,) array_like
Pressure p(t).
sdm_phi : (n,) array_like
Azimuth phi(t).
sdm_theta : (n,) array_like
Colatitude theta(t).
hrirs : sig.HRIRs
jobs_count : int or None, optional
Number of parallel jobs, 'None' employs 'cpu_count'.
Returns
-------
bsdm_l : array_like
Left binaural impulse response.
bsdm_r : array_like
Right binaural impulse response.
"""
if jobs_count is None:
jobs_count = multiprocessing.cpu_count()
bsdm_l = np.zeros(len(sdm_p) + len(hrirs) - 1)
bsdm_r = np.zeros_like(bsdm_l)
if jobs_count == 1:
for i, (p, phi, theta) in enumerate(zip(sdm_p, sdm_phi, sdm_theta)):
h_l, h_r = hrirs.nearest_hrirs(phi, theta)
# convolve
bsdm_l[i:i + len(h_l)] += p * h_l
bsdm_r[i:i + len(h_r)] += p * h_r
else:
logging.info("Using %i processes..." % jobs_count)
_shared_array_shape = np.shape(np.c_[bsdm_l, bsdm_r])
_arr_base = _create_shared_array(_shared_array_shape)
_arg_itr = zip(range(len(sdm_p)), sdm_p, sdm_phi, sdm_theta,
repeat(hrirs))
# execute
with multiprocessing.Pool(processes=jobs_count,
initializer=_init_shared_array,
initargs=(_arr_base,
_shared_array_shape,)) as pool:
pool.starmap(_render_bsdm_sample, _arg_itr)
# reshape
_result = np.frombuffer(_arr_base.get_obj()).reshape(
_shared_array_shape)
bsdm_l = _result[:, 0]
bsdm_r = _result[:, 1]
return bsdm_l, bsdm_r
def render_binaural_loudspeaker_sdm(sdm_p, ls_gains, ls_setup, fs,
post_eq_func='default', **kwargs):
"""Render sdm signal on loudspeaker setup as binaural synthesis.
Parameters
----------
sdm_p : (n,) array_like
Pressure p(t).
ls_gains : (n, l)
Loudspeaker (l) gains.
ls_setup : decoder.LoudspeakerSetup
fs : int
post_eq_func : None, 'default' or function
Post EQ applied to the loudspeaker signals. 'default' calls
'parsa.post_equalization', 'None' disables (not recommended).
You can also provide your custom post-eq-function with the signature
`post_eq_func(ls_sigs, sdm_p, fs, ls_setup, **kwargs)`.
Returns
-------
ir_l : array_like
Left binaural impulse response.
ir_r : array_like
Right binaural impulse response.
"""
n = len(sdm_p)
ls_gains = np.atleast_2d(ls_gains)
assert(n == ls_gains.shape[0])
# render loudspeaker signals
ls_sigs = ls_setup.loudspeaker_signals(ls_gains=ls_gains, sig_in=sdm_p)
# post EQ
if post_eq_func is not None:
if post_eq_func == 'default':
ls_sigs = post_equalization(ls_sigs, sdm_p, fs, ls_setup, **kwargs)
else: # user defined function
ls_sigs = post_eq_func(ls_sigs, sdm_p, fs, ls_setup, **kwargs)
else:
warn("No post EQ applied!")
ir_l, ir_r = ls_setup.binauralize(ls_sigs, fs)
return ir_l, ir_r
def post_equalization(ls_sigs, sdm_p, fs, ls_setup, soft_clip=True):
"""Post equalization to compensate spectral whitening.
Parameters
----------
ls_sigs : (L, S) np.ndarray
Input loudspeaker signals.
sdm_p : array_like
Reference (sdm) pressure signal.
fs : int
ls_setup : decoder.LoudspeakerSetup
soft_clip : bool, optional
Limit the compensation boost to +6dB.
Returns
-------
ls_sigs_compensated : (L, S) np.ndarray
Compensated loudspeaker signals.
References
----------
Tervo, S., et. al. (2015).
Spatial Analysis and Synthesis of Car Audio System and Car Cabin Acoustics
with a Compact Microphone Array. Journal of the Audio Engineering Society.
"""
ls_distance = ls_setup.d # ls distance
a = ls_setup.a # distance attenuation exponent
CHECK_SANITY = False
# prepare filterbank
filter_gs, ff = pcs.frac_octave_filterbank(n=1, N_out=2**16,
fs=fs, f_low=62.5, f_high=16000,
mode='amplitude')
# band dependent block size
band_blocksizes = np.zeros(ff.shape[0])
# proposed by Tervo
band_blocksizes[1:] = np.round(7 / ff[1:, 0] * fs)
band_blocksizes[0] = np.round(7 / ff[0, 1] * fs)
# make sure they are even
band_blocksizes = (np.ceil(band_blocksizes / 2) * 2).astype(int)
padsize = band_blocksizes.max()
ntaps = padsize // 2 - 1
assert(ntaps % 2), "N does not produce uneven number of filter taps."
irs = np.zeros([filter_gs.shape[0], ntaps])
for ir_idx, g_b in enumerate(filter_gs):
irs[ir_idx, :] = signal.firwin2(ntaps, np.linspace(0, 1, len(g_b)),
g_b)
# prepare Input
pad = np.zeros([ls_sigs.shape[0], padsize])
x_padded = np.hstack([pad, ls_sigs, pad])
p_padded = np.hstack([np.zeros(padsize), sdm_p, np.zeros(padsize)])
ls_sigs_compensated = np.hstack([pad, np.zeros_like(x_padded), pad])
ls_sigs_band = np.zeros([ls_sigs_compensated.shape[0],
ls_sigs_compensated.shape[1],
irs.shape[0]])
assert(len(p_padded) == x_padded.shape[1])
for band_idx in range(irs.shape[0]):
blocksize = band_blocksizes[band_idx]
hopsize = blocksize // 2
win = np.hanning(blocksize + 1)[0: -1]
start_idx = 0
while (start_idx + blocksize) <= x_padded.shape[1]:
if CHECK_SANITY:
dirac = np.zeros_like(irs)
dirac[:, blocksize // 2] = np.sqrt(1/(irs.shape[0]))
# blocks
block_p = win * p_padded[start_idx: start_idx + blocksize]
block_sdm = win[np.newaxis, :] * x_padded[:, start_idx:
start_idx + blocksize]
# block spectra
nfft = blocksize + blocksize - 1
H_p = np.fft.fft(block_p, nfft)
H_sdm = np.fft.fft(block_sdm, nfft, axis=1)
# distance
spec_in_origin = np.diag(1 / ls_distance**a) @ H_sdm
# magnitude difference by spectral division
sdm_mag_incoherent = np.sqrt(np.sum(np.abs(spec_in_origin)**2,
axis=0))
sdm_mag_coherent = np.sum(np.abs(spec_in_origin), axis=0)
# Coherent addition in the lows
if band_idx == 0:
mag_diff = np.abs(H_p) / \
np.clip(sdm_mag_coherent, 10e-10, None)
elif band_idx == 1:
mag_diff = np.abs(H_p) / \
(0.5 * np.clip(sdm_mag_coherent, 10e-10, None) +
0.5 * np.clip(sdm_mag_incoherent, 10e-10, None))
elif band_idx == 2:
mag_diff = np.abs(H_p) / \
(0.25 * np.clip(sdm_mag_coherent, 10e-10, None) +
0.75 * np.clip(sdm_mag_incoherent, 10e-10, None))
else:
mag_diff = np.abs(H_p) / np.clip(sdm_mag_incoherent, 10e-10,
None)
# soft clip gain
if soft_clip:
mag_diff = pcs.gain_clipping(mag_diff, 1)
# apply to ls input
Y = H_sdm * mag_diff[np.newaxis, :]
# inverse STFT
X = np.real(np.fft.ifft(Y, axis=1))
# Zero Phase
assert(np.mod(X.shape[1], 2))
# delay
zp_delay = X.shape[1] // 2
X = np.roll(X, zp_delay, axis=1)
# overlap add
ls_sigs_band[:, padsize + start_idx - zp_delay:
padsize + start_idx - zp_delay + nfft,
band_idx] += X
# increase pointer
start_idx += hopsize
# apply filter
for ls_idx in range(ls_sigs.shape[0]):
ls_sigs_band[ls_idx, :, band_idx] = signal.convolve(ls_sigs_band[
ls_idx, :,
band_idx],
irs[band_idx],
mode='same')
# sum over bands
ls_sigs_compensated = np.sum(ls_sigs_band, axis=2)
# restore shape
out_start_idx = int(2 * padsize)
out_end_idx = int(-(2 * padsize))
if np.any(np.abs(ls_sigs_compensated[:, :out_start_idx]) > 10e-5) or \
np.any(np.abs(ls_sigs_compensated[:, -out_end_idx]) > 10e-5):
warn('Truncated valid signal, consider more zero padding.')
ls_sigs_compensated = ls_sigs_compensated[:, out_start_idx: out_end_idx]
assert(ls_sigs_compensated.shape == ls_sigs.shape)
return ls_sigs_compensated
def post_equalization2(ls_sigs, sdm_p, fs, ls_setup,
blocksize=4096, smoothing_order=5):
"""Post equalization to compensate spectral whitening. This alternative
version works on fixed blocksizes with octave band gain smoothing.
Sonically, this seems not the preferred version, but it can gain some
insight through the band gains which are returned.
Parameters
----------
ls_sigs : (L, S) np.ndarray
Input loudspeaker signals.
sdm_p : array_like
Reference (sdm) pressure signal.
fs : int
ls_setup : decoder.LoudspeakerSetup
blocksize : int
smoothing_order : int
Block smoothing, increasing Hanning window up to this order.
Returns
-------
ls_sigs_compensated : (L, S) np.ndarray
Compensated loudspeaker signals.
band_gains_list : list
Each element contains the octave band gain applied as post eq.
"""
ls_distance = ls_setup.d # ls distance
a = ls_setup.a # distance attenuation exponent
CHECK_SANITY = False
hopsize = blocksize // 2
win = np.hanning(blocksize + 1)[0: -1]
# prepare Input
pad = np.zeros([ls_sigs.shape[0], blocksize])
x_padded = np.hstack([pad, ls_sigs, pad])
p_padded = np.hstack([np.zeros(blocksize), sdm_p, np.zeros(blocksize)])
ls_sigs_compensated = np.hstack([pad, np.zeros_like(x_padded), pad])
assert(len(p_padded) == x_padded.shape[1])
# prepare filterbank
filter_gs, ff = pcs.frac_octave_filterbank(n=1, N_out=blocksize//2 + 1,
fs=fs, f_low=62.5, f_high=16000)
ntaps = blocksize+1
assert(ntaps % 2), "N does not produce uneven number of filter taps."
irs = np.zeros([filter_gs.shape[0], ntaps])
for ir_idx, g_b in enumerate(filter_gs):
irs[ir_idx, :] = signal.firwin2(ntaps, np.linspace(0, 1, len(g_b)),
g_b)
band_gains_list = []
start_idx = 0
while (start_idx + blocksize) <= x_padded.shape[1]:
if CHECK_SANITY:
dirac = np.zeros_like(irs)
dirac[:, blocksize//2] = np.sqrt(1/(irs.shape[0]))
# blocks
block_p = win * p_padded[start_idx: start_idx + blocksize]
block_sdm = win[np.newaxis, :] * x_padded[:, start_idx:
start_idx + blocksize]
# block mags
p_mag = np.sqrt(np.abs(np.fft.rfft(block_p))**2)
sdm_H = np.diag(1 / ls_distance**a) @ np.fft.rfft(block_sdm, axis=1)
sdm_mag_incoherent = np.sqrt(np.sum(np.abs(sdm_H)**2, axis=0))
sdm_mag_coherent = np.sum(np.abs(sdm_H), axis=0)
assert(len(p_mag) == len(sdm_mag_incoherent) == len(sdm_mag_coherent))
# get gains
L_p = pcs.subband_levels(filter_gs * p_mag, ff[:, 2] - ff[:, 0], fs)
L_sdm_incoherent = pcs.subband_levels(filter_gs * sdm_mag_incoherent,
ff[:, 2] - ff[:, 0], fs)
L_sdm_coherent = pcs.subband_levels(filter_gs * sdm_mag_coherent,
ff[:, 2] - ff[:, 0], fs)
with np.errstate(divide='ignore', invalid='ignore'):
band_gains_incoherent = L_p / L_sdm_incoherent
band_gains_coherent = L_p / L_sdm_coherent
band_gains_incoherent[np.isnan(band_gains_incoherent)] = 1
band_gains_coherent[np.isnan(band_gains_coherent)] = 1
# clip gains
gain_clip = 1
band_gains_incoherent = np.clip(band_gains_incoherent, None, gain_clip)
band_gains_coherent = np.clip(band_gains_coherent, None, gain_clip)
# attenuate lows (coherent)
band_gains = np.zeros_like(band_gains_coherent)
band_gains[0] = band_gains_coherent[0]
band_gains[1] = 0.5 * band_gains_coherent[1] + \
0.5 * band_gains_incoherent[1]
band_gains[2] = 0.25 * band_gains_coherent[2] + \
0.75 * band_gains_incoherent[2]
band_gains[3:] = band_gains_incoherent[3:]
# gain smoothing over blocks
if len(band_gains_list) > 0:
# half-sided window, increasing in size
current_order = min(smoothing_order, len(band_gains_list))
w = np.hanning(current_order * 2 + 1)[-(current_order + 1): -1]
# normalize
w = w / w.sum()
band_gains_smoothed = w[0] * band_gains # current
for order_idx in range(1, current_order):
band_gains_smoothed += w[order_idx] * \
band_gains_list[-order_idx]
else:
band_gains_smoothed = band_gains
band_gains_list.append(band_gains_smoothed)
for ls_idx in range(ls_sigs.shape[0]):
# prepare output
X = np.zeros([irs.shape[0], blocksize + 2 * (irs.shape[1] - 1)])
# Transform
for band_idx in range(irs.shape[0]):
if not CHECK_SANITY:
X[band_idx, :blocksize + irs.shape[1] - 1] = \
signal.convolve(block_sdm[ls_idx, :], irs[band_idx, :])
else:
X[band_idx, :blocksize + irs.shape[1] - 1] = \
signal.convolve(block_sdm[ls_idx, :],
dirac[band_idx, :])
# Apply gains
if not CHECK_SANITY:
X = band_gains[:, np.newaxis] * X
else:
X = X
# Inverse, with zero phase
for band_idx in range(irs.shape[0]):
if not CHECK_SANITY:
X[band_idx, :] = np.flip(signal.convolve(
np.flip(X[band_idx, :blocksize + irs.shape[1] - 1]),
irs[band_idx, :]))
else:
X[band_idx, :] = np.flip(signal.convolve(
np.flip(X[band_idx, :blocksize + irs.shape[1] - 1]),
dirac[band_idx, :]))
# overlap add
ls_sigs_compensated[ls_idx,
start_idx + blocksize - (irs.shape[1] - 1):
start_idx + 2 * blocksize +
(irs.shape[1] - 1)] += np.sum(X, axis=0)
# increase pointer
start_idx += hopsize
# restore shape
out_start_idx = 2 * blocksize
out_end_idx = -(2 * blocksize)
if (np.sum(np.abs(ls_sigs_compensated[:, :out_start_idx])) +
np.sum(np.abs(ls_sigs_compensated[:, -out_end_idx]))) > 10e-3:
warn('Truncated valid signal, consider more zero padding.')
ls_sigs_compensated = ls_sigs_compensated[:, out_start_idx: out_end_idx]
assert(ls_sigs_compensated.shape == ls_sigs.shape)
return ls_sigs_compensated, band_gains_list[2:-2]
# Parallel worker stuff -->
def _create_shared_array(shared_array_shape, d_type='d'):
"""Allocate ctypes array from shared memory with lock."""
shared_array_base = multiprocessing.Array(d_type, shared_array_shape[0] *
shared_array_shape[1])
return shared_array_base
def _init_shared_array(shared_array_base, shared_array_shape):
"""Make 'shared_array' available to child processes."""
global shared_array
shared_array = np.frombuffer(shared_array_base.get_obj())
shared_array = shared_array.reshape(shared_array_shape)
# < --Parallel worker stuff
| chris-hld/spaudiopy | spaudiopy/parsa.py | parsa.py | py | 33,008 | python | en | code | 118 | github-code | 13 |
20203559763 | from typing import *
import os
import json
def is_facade_path(name: str) -> bool:
path = os.path.join("NNStructure", name)
if os.path.isdir(path):
subfiles = os.listdir(path)
return "facade.py" in subfiles
return False
def find_facades() -> List[str]:
items = os.listdir("NNStructure")
return list(filter(is_facade_path, items))
# map player names to paths to config files
def find_players() -> Dict[str, str]:
result = {}
for folder in os.listdir("Models"):
if os.path.isdir("Models/" + folder):
try:
config = open(os.path.join(os.path.join("Models", folder), "config.json"), 'r')
data = ''.join(config.readlines())
print(data)
config.close()
data = json.loads(data)
name = data['name']
if not name:
continue
result[name] = folder
except:
continue
return result
| KennelTeam/Tic-Tac-Toe-Player | utils/nn_iterator.py | nn_iterator.py | py | 1,018 | python | en | code | 0 | github-code | 13 |
8671032304 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from ProMP import ProMP,ProMPTuner
# ----------- Import position and orientation trajectories -----#
with open('/root/catkin_ws/MP/MP.txt', 'rb') as handle_1:
data = handle_1.read()
data = pickle.loads(data,encoding='latin1')
position = np.array(data['pos'])
orientation = np.array(data['ori'])
nt = position.shape[0]
X_coord = position[:,0]
Y_coord = position[:,1]
angle = orientation[:,0]
# ----------- Plot original trajectories --------- #
# fig, axarr = plt.subplots(1, 3, figsize=(8, 3))
# axarr[0].tick_params(axis='both', labelsize=5)
# axarr[1].tick_params(axis='both', labelsize=5)
# axarr[2].tick_params(axis='both', labelsize=5)
# fig.suptitle('X,Y,angle before MP ', fontweight="bold")
# plt.sca(axarr[0])
# plt.plot(X_coord, 'c', label='X', linewidth=1.5)
# plt.legend(loc=1, fontsize='x-small')
# plt.grid()
# plt.sca(axarr[1])
# plt.plot(Y_coord, 'c', label='Y', linewidth=1.5)
# plt.legend(loc=1, fontsize='x-small')
# plt.grid()
# plt.sca(axarr[2])
# plt.plot(angle, 'c', label='angle', linewidth=1.5)
# plt.legend(loc=1, fontsize='x-small')
# plt.grid()
#plt.show()
# ----------- MP TUNER --------- #
N_T = nt
N_DOF = 1
N_BASIS = 10
promp = ProMP(N_BASIS, N_DOF, N_T)
print(' Tuner X coordinate')
promp_tuner = ProMPTuner(np.expand_dims(np.expand_dims(X_coord, axis = 0).T, axis=0), promp)
promp_tuner.tune_n_basis(min=2, max=20, step=1)
N_BASIS_X = 9
print(' Tuner Y coordinate')
promp_tuner = ProMPTuner(np.expand_dims(np.expand_dims(Y_coord, axis = 0).T, axis=0), promp)
promp_tuner.tune_n_basis(min=2, max=20, step=1)
N_BASIS_Y = 10
print(' Tuner angle coordinate')
promp_tuner = ProMPTuner(np.expand_dims(np.expand_dims(angle, axis = 0).T, axis=0), promp)
promp_tuner.tune_n_basis(min=2, max=20, step=1)
N_BASIS_angle = 17
# ----------- MP fit --------- #
# Compute ProMP weights and reconstruct the trajectory : X
promp_X = ProMP(N_BASIS_X, N_DOF, N_T)
weights_X = promp_X.weights_from_trajectory(np.expand_dims(X_coord, axis = 0).T)
X_coord_MP = promp_X.trajectory_from_weights(weights_X)
# Compute ProMP weights and reconstruct the trajectory : Y
promp_Y = ProMP(N_BASIS_Y, N_DOF, N_T)
weights_Y = promp_Y.weights_from_trajectory(np.expand_dims(Y_coord, axis = 0).T)
Y_coord_MP = promp_Y.trajectory_from_weights(weights_Y)
# Compute ProMP weights and reconstruct the trajectory : angle
promp_angle = ProMP(N_BASIS_angle, N_DOF, N_T)
weights_angle = promp_angle.weights_from_trajectory(np.expand_dims(angle, axis = 0).T)
angle_MP = promp_angle.trajectory_from_weights(weights_angle)
# ----------- MP original and reconstructed Plot --------- #
fig, axarr = plt.subplots(1, 3, figsize=(8, 3))
axarr[0].tick_params(axis='both', labelsize=5)
axarr[1].tick_params(axis='both', labelsize=5)
axarr[2].tick_params(axis='both', labelsize=5)
fig.suptitle('X,Y,angle after MP ', fontweight="bold")
t = np.linspace(0, 200, 200)
plt.sca(axarr[0])
plt.plot(t, X_coord_MP, 'r', label='X MP', linewidth=1.5)
plt.plot(t, X_coord, 'c', label='X', linewidth=1.5)
plt.legend(loc=1, fontsize='x-small')
plt.grid()
plt.sca(axarr[1])
plt.plot(t, Y_coord_MP, 'r', label='Y MP', linewidth=1.5)
plt.plot(t, Y_coord, 'c', label='Y', linewidth=1.5)
plt.legend(loc=1, fontsize='x-small')
plt.grid()
plt.sca(axarr[2])
plt.plot(t, angle_MP, 'r', label='angle MP', linewidth=1.5)
plt.plot(t, angle, 'c', label='angle', linewidth=1.5)
plt.legend(loc=1, fontsize='x-small')
plt.grid()
plt.show()
# ----------- Weights and basis fuctions Plot --------- #
fig, axarr = plt.subplots(2, 3, figsize=(20, 10))
plt.sca(axarr[0,0])
all_phi_X = promp_X.all_phi()
for i in range(N_BASIS_X):
plt.plot(t, all_phi_X[:, i])
plt.grid()
plt.sca(axarr[0,1])
all_phi_Y = promp_Y.all_phi()
for i in range(N_BASIS_Y):
plt.plot(t, all_phi_Y[:, i])
plt.grid()
plt.sca(axarr[0,2])
all_phi_angle = promp_angle.all_phi()
for i in range(N_BASIS_angle):
plt.plot(t, all_phi_angle[:, i])
plt.grid()
plt.sca(axarr[1,0])
x = np.linspace(1, N_T, num=N_BASIS_X)
plt.bar(x, weights_X, align='center', alpha=0.8, ecolor='red', color=(1, 0, 0, .4), capsize=5)
plt.grid()
plt.sca(axarr[1,1])
x = np.linspace(1, N_T, num=N_BASIS_Y)
plt.bar(x, weights_Y, align='center', alpha=0.8, ecolor='red', color=(1, 0, 0, .4), capsize=5)
plt.grid()
plt.sca(axarr[1,2])
x = np.linspace(1, N_T, num=N_BASIS_angle)
plt.bar(x, weights_angle, align='center', alpha=0.8, ecolor='red', color=(1, 0, 0, .4), capsize=5)
plt.grid()
plt.show()
| TAFFI98/Real2Sim_ROS_Doosan | Projects/MP/MP.py | MP.py | py | 4,500 | python | en | code | 0 | github-code | 13 |
6427574903 | #!/bin/env python3
import numpy as np
import sys
sys.path.insert(0, '../src')
# import own modules
import complexes
import reactions
import datareader
import evaluator
# read experimental data
times_exp, map_oligos_exp, c_oligos_exp, c_oligos_exp_err, c_EDC_exp, c_EDC_exp_err = \
datareader.read_experimental_data_T25_EDC10()
# set system parameters
# maximum length of considered oligomer
n_max = 7
# length of template
Lt = 0
# maximum number of oligomers hybridized to template
degree_total = 0
# maximum number of O-Acylisourea oligomers hybridized to template
degree_O = 0
# maximum number of N-Acylisourea oligomers hybridized to template
degree_N = 0
# initialize table listing all chemical compounds of interest
table_sol = complexes.generate_table_full_complexity(n_max, Lt, \
degree_total, degree_O, degree_N)
# list all chemical reactions in solution
acts_sol, acts_sol_humanreadable = reactions.list_activations_solution(n_max, table_sol)
ligs_sol, ligs_sol_humanreadable = reactions.list_ligations_solution(n_max, table_sol)
losses_sol, losses_sol_humanreadable = reactions.list_losses_solution(n_max, table_sol)
hydros_sol, hydros_sol_humanreadable = reactions.list_hydrolysis_solution(n_max, table_sol)
cuts_sol, cuts_sol_humanreadable = reactions.list_cleavages_solution(n_max, table_sol)
# initial concentration
c_full_initial = np.zeros(len(table_sol)+1)
# monomer concentration
c_full_initial[table_sol['1,_,0']] = 25.
# EDC concentration
c_full_initial[-1] = 10.
# read reaction rate constant obtained via curve fit
ks = np.loadtxt('./rate_constants.txt')
# compute the time-evolution
times_theo, c_oligos_theo, map_oligos_theo, c_EDC_theo, res = \
evaluator.model_solution_scalar(np.log2(ks), c_full_initial, times_exp, \
c_oligos_exp, map_oligos_exp, c_EDC_exp, acts_sol, ligs_sol, losses_sol, \
hydros_sol, cuts_sol, table_sol, n_max, True, False)
# plot the time-evolution
evaluator.plot_trajectories_solution(times_exp, c_oligos_exp, map_oligos_exp, c_oligos_exp_err, \
c_EDC_exp, c_EDC_exp_err, times_theo, c_oligos_theo, map_oligos_theo, c_EDC_theo, \
True, "./timeevolution.pdf")
| gerland-group/ChemicallyFueledOligomers | without_template__length-independent_rate_constants/compute_timeevolution.py | compute_timeevolution.py | py | 2,147 | python | en | code | 0 | github-code | 13 |
32294897253 | """
Task
Given two integers a and b, find their least common multiple.
Input Format: The two integers 𝑎 and 𝑏 are given in the same line separated by space.
Constraints: 1 ≤ a, b ≤ 10**7.
Output Format: Output the least common multiple of a and b.
"""
def simple_numbers__iterator(stop=2):
yield 2
remembered = [2]
for i in range(3, stop+1, 2):
for j in remembered:
if i % j == 0:
return
yield i
# def multipliers__iterator(n):
# current = n
# for simple_number in simple_numbers__iterator(n):
# while current % simple_number == 0:
# yield simple_number
# current /= simple_number
def lcm(a, b):
multiple_simple_multipliers = 1
max_number = max(a, b)
current_a = a
current_b = b
for simple_number in simple_numbers__iterator(max_number):
a_include = current_a % simple_number == 0
b_include = current_b % simple_number == 0
while (a_include or b_include) and (current_a != 1 or current_b != 1):
multiple_simple_multipliers *= simple_number
if a_include:
current_a /= simple_number
if b_include:
current_b /= simple_number
a_include = current_a % simple_number == 0
b_include = current_b % simple_number == 0
return multiple_simple_multipliers
if __name__ == '__main__':
a, b = map(int, input().split())
print(lcm(a, b))
| boloninanajulia/challanges | lcm.py | lcm.py | py | 1,489 | python | en | code | 0 | github-code | 13 |
4320896031 | ##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
##############################################################################
from .error import FinError
from .date import Date
from .calendar import (Calendar, CalendarTypes)
from .calendar import (BusDayAdjustTypes, DateGenRuleTypes)
from .frequency import (annual_frequency, FrequencyTypes)
from .helpers import label_to_string
from .helpers import check_argument_types
###############################################################################
# TODO: Start and end date to allow for long stubs
###############################################################################
class Schedule:
""" A schedule is a set of dates generated according to ISDA standard
rules which starts on the next date after the effective date and runs up to
a termination date. Dates are adjusted to a provided calendar. The zeroth
element is the previous coupon date (PCD) and the first element is the
Next Coupon Date (NCD). We reference ISDA 2006."""
def __init__(self,
effective_date: Date, # Also known as the start date
# This is UNADJUSTED (set flag to adjust it)
termination_date: Date,
freq_type: FrequencyTypes = FrequencyTypes.ANNUAL,
calendar_type: CalendarTypes = CalendarTypes.WEEKEND,
bus_day_adjust_type: BusDayAdjustTypes = BusDayAdjustTypes.FOLLOWING,
date_gen_rule_type: DateGenRuleTypes = DateGenRuleTypes.BACKWARD,
adjust_termination_date: bool = True, # Default is to adjust
end_of_month: bool = False, # All flow dates are EOM if True
first_date=None, # First coupon date
next_to_last_date=None): # Penultimate coupon date
""" Create Schedule object which calculates a sequence of dates
following the ISDA convention for fixed income products, mainly swaps.
If the date gen rule type is FORWARD we get the unadjusted dates by stepping
forward from the effective date in steps of months determined by the period
tenor - i.e. the number of months between payments. We stop before we go past the
termination date.
If the date gen rule type is BACKWARD we get the unadjusted dates by
stepping backward from the termination date in steps of months determined by
the period tenor - i.e. the number of months between payments. We stop
before we go past the effective date.
- If the EOM flag is false, and the start date is on the 31st then the
the unadjusted dates will fall on the 30 if a 30 is a previous date.
- If the EOM flag is false and the start date is 28 Feb then all
unadjusted dates will fall on the 28th.
- If the EOM flag is false and the start date is 28 Feb then all
unadjusted dates will fall on their respective EOM.
We then adjust all of the flow dates if they fall on a weekend or holiday
according to the calendar specified. These dates are adjusted in
accordance with the business date adjustment.
The effective date is never adjusted as it is not a payment date.
The termination date is not automatically business day adjusted in a
swap - assuming it is a holiday date. This must be explicitly stated in
the trade confirm. However, it is adjusted in a CDS contract as standard.
Inputs first_date and next_to_last_date are for managing long payment stubs
at the start and end of the swap but *have not yet been implemented*. All
stubs are currently short, either at the start or end of swap. """
check_argument_types(self.__init__, locals())
if effective_date >= termination_date:
raise FinError("Effective date must be before termination date.")
self._effective_date = effective_date
self._termination_date = termination_date
if first_date is None:
self._first_date = effective_date
else:
if first_date > effective_date and first_date < termination_date:
self._first_date = first_date
print("FIRST DATE NOT IMPLEMENTED") # TODO
else:
raise FinError("First date must be after effective date and" +
" before termination date")
if next_to_last_date is None:
self._next_to_last_date = termination_date
else:
if next_to_last_date > effective_date and next_to_last_date < termination_date:
self._next_to_last_date = next_to_last_date
print("NEXT TO LAST DATE NOT IMPLEMENTED") # TODO
else:
raise FinError("Next to last date must be after effective date and" +
" before termination date")
self._freq_type = freq_type
self._calendar_type = calendar_type
self._bus_day_adjust_type = bus_day_adjust_type
self._date_gen_rule_type = date_gen_rule_type
self._adjust_termination_date = adjust_termination_date
if end_of_month is True:
self._end_of_month = True
else:
self._end_of_month = False
self._adjusted_dates = None
self._generate()
###############################################################################
def schedule_dates(self):
""" Returns a list of the schedule of Dates. """
if self._adjusted_dates is None:
self._generate()
return self._adjusted_dates
###############################################################################
def _generate(self):
""" Generate schedule of dates according to specified date generation
rules and also adjust these dates for holidays according to the
specified business day convention and the specified calendar. """
calendar = Calendar(self._calendar_type)
frequency = annual_frequency(self._freq_type)
num_months = int(12 / frequency)
unadjusted_schedule_dates = []
self._adjusted_dates = []
if self._date_gen_rule_type == DateGenRuleTypes.BACKWARD:
next_date = self._termination_date
flow_num = 0
while next_date > self._effective_date:
unadjusted_schedule_dates.append(next_date)
tot_num_months = num_months * (1 + flow_num)
next_date = self._termination_date.add_months(-tot_num_months)
if self._end_of_month is True:
next_date = next_date.eom()
flow_num += 1
# Add on the Previous Coupon Date
unadjusted_schedule_dates.append(next_date)
flow_num += 1
# reverse order and holiday adjust dates
# the first date is not adjusted as this was provided
dt = unadjusted_schedule_dates[flow_num - 1]
self._adjusted_dates.append(dt)
# We adjust all flows after the effective date and before the
# termination date to fall on business days according to their cal
for i in range(1, flow_num - 1):
dt = calendar.adjust(unadjusted_schedule_dates[flow_num - i - 1],
self._bus_day_adjust_type)
self._adjusted_dates.append(dt)
self._adjusted_dates.append(self._termination_date)
elif self._date_gen_rule_type == DateGenRuleTypes.FORWARD:
# This needs checking
next_date = self._effective_date
flow_num = 0
unadjusted_schedule_dates.append(next_date)
flow_num = 1
while next_date < self._termination_date:
unadjusted_schedule_dates.append(next_date)
tot_num_months = num_months * (flow_num)
next_date = self._effective_date.add_months(tot_num_months)
flow_num = flow_num + 1
# The effective date is not adjusted as it is given
for i in range(1, flow_num):
dt = calendar.adjust(unadjusted_schedule_dates[i],
self._bus_day_adjust_type)
self._adjusted_dates.append(dt)
self._adjusted_dates.append(self._termination_date)
if self._adjusted_dates[0] < self._effective_date:
self._adjusted_dates[0] = self._effective_date
# The market standard for swaps is not to adjust the termination date
# unless it is specified in the contract. It is standard for CDS.
# We change it if the adjust_termination_date flag is True.
if self._adjust_termination_date is True:
self._termination_date = calendar.adjust(self._termination_date,
self._bus_day_adjust_type)
self._adjusted_dates[-1] = self._termination_date
#######################################################################
# Check the resulting schedule to ensure that no two dates are the
# same in which case we remove the duplicate and that they are
# monotonic - this should never happen but ...
#######################################################################
if len(self._adjusted_dates) < 2:
raise FinError("Schedule has two dates only.")
prev_dt = self._adjusted_dates[0]
for dt in self._adjusted_dates[1:]:
# if the first date lands on the effective date then remove it
if dt == prev_dt:
self._adjusted_dates.pop(0)
if dt < prev_dt: # Dates must be ordered
raise FinError("Dates are not monotonic")
prev_dt = dt
#######################################################################
return self._adjusted_dates
##############################################################################
def __repr__(self):
""" Print out the details of the schedule and the actual dates. This
can be used for providing transparency on schedule calculations. """
s = label_to_string("OBJECT TYPE", type(self).__name__)
s += label_to_string("EFFECTIVE DATE", self._effective_date)
s += label_to_string("END DATE", self._termination_date)
s += label_to_string("FREQUENCY", self._freq_type)
s += label_to_string("CALENDAR", self._calendar_type)
s += label_to_string("BUSDAYRULE", self._bus_day_adjust_type)
s += label_to_string("DATEGENRULE", self._date_gen_rule_type)
s += label_to_string("ADJUST TERM DATE", self._adjust_termination_date)
s += label_to_string("END OF MONTH", self._end_of_month, "")
if 1 == 0:
if len(self._adjusted_dates) > 0:
s += "\n\n"
s += label_to_string("EFF", self._adjusted_dates[0], "")
if len(self._adjusted_dates) > 1:
s += "\n"
s += label_to_string("FLW", self._adjusted_dates[1:], "",
listFormat=True)
return s
###############################################################################
def _print(self):
""" Print out the details of the schedule and the actual dates. This
can be used for providing transparency on schedule calculations. """
print(self)
###############################################################################
| domokane/FinancePy | financepy/utils/schedule.py | schedule.py | py | 11,970 | python | en | code | 1,701 | github-code | 13 |
1434351263 | #3.写一个狗类。产生10条狗(姓名,攻击力(默认5),防御力
#(默认3),血量(默认100))。然后随机从10条狗中选2条狗打架,狗的血量初始值都为100.,
# 当血量为0的时候,这条狗,死亡,清出狗的队伍。
#.直到最后一条狗,输出获胜狗的编号
import random
#写一个狗类,添加属性 姓名 攻击力 防御力 血量 并且赋予默认值
list=[]
class Gou():
def __init__(self,name=None,shanghai=0,fangyu=0,HP=100):
while True:
s=random.randint(1,5)
f=random.randint(1,3)
self.name=name
self.shanghia=f'{s}'
self.fangyu=f"{f}"
self.HP=HP
def xuangou():
while True:
gou1=random.choice(list1)
gou2 = random.choice(list1)
if gou1==gou2:
continue
else:
return gou1,gou2
def pk():
while True:
s = random.randint(0, 1)
#生成十条狗
list=[]
list1=[]
for t in range(0,10):
list.append(Gou())
list1.append(t+1)
#选狗出战,直到list1长度为1停止 十回合
for i in range(1,10):
GO1,GO2=xuangou()
print(f"第{i}回合:编号{list1[GO1]}VS编号{list1[GO2]}")
death=pk(GO1+1,GO2+1)
print(list1)
print(f"删除狗{list1[death-1]}")
del list1[death-1]
del list[death-1]
print(f"狗{list1[0]}胜利,剩余血量{list[0].HP}HP") | sk0606-sk/python | python1/python3/作业1.py | 作业1.py | py | 1,396 | python | zh | code | 0 | github-code | 13 |
39352843935 | # @Time : 2022/4/6 14:54
# @Author : PEIWEN PAN
# @Email : 121106022690@njust.edu.cn
# @File : metric.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from skimage import measure
class SigmoidMetric():
def __init__(self, score_thresh=0):
self.score_thresh = score_thresh
self.reset()
def update(self, pred, labels):
correct, labeled = self.batch_pix_accuracy(pred, labels)
inter, union = self.batch_intersection_union(pred, labels)
self.total_correct += correct
self.total_label += labeled
self.total_inter += inter
self.total_union += union
def get(self):
"""Gets the current evaluation result."""
pixAcc = 1.0 * self.total_correct / (np.spacing(1) + self.total_label)
IoU = 1.0 * self.total_inter / (np.spacing(1) + self.total_union)
mIoU = IoU.mean()
return pixAcc, mIoU
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.total_inter = 0
self.total_union = 0
self.total_correct = 0
self.total_label = 0
def batch_pix_accuracy(self, output, target):
assert output.shape == target.shape
output = output.cpu().detach().numpy()
target = target.cpu().detach().numpy()
predict = (output > self.score_thresh).astype('int64') # P
pixel_labeled = np.sum(target > 0) # T
pixel_correct = np.sum((predict == target) * (target > 0)) # TP
assert pixel_correct <= pixel_labeled
return pixel_correct, pixel_labeled
def batch_intersection_union(self, output, target):
mini = 1
maxi = 1 # nclass
nbins = 1 # nclass
predict = (output.cpu().detach().numpy() > self.score_thresh).astype('int64') # P
target = target.cpu().numpy().astype('int64') # T
intersection = predict * (predict == target) # TP
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all()
return area_inter, area_union
class SamplewiseSigmoidMetric():
def __init__(self, nclass, score_thresh=0.5):
self.nclass = nclass
self.score_thresh = score_thresh
self.reset()
def update(self, preds, labels):
"""Updates the internal evaluation result."""
inter_arr, union_arr = self.batch_intersection_union(preds, labels,
self.nclass, self.score_thresh)
self.total_inter = np.append(self.total_inter, inter_arr)
self.total_union = np.append(self.total_union, union_arr)
def get(self):
"""Gets the current evaluation result."""
IoU = 1.0 * self.total_inter / (np.spacing(1) + self.total_union)
mIoU = IoU.mean()
return IoU, mIoU
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.total_inter = np.array([])
self.total_union = np.array([])
self.total_correct = np.array([])
self.total_label = np.array([])
def batch_intersection_union(self, output, target, nclass, score_thresh):
"""mIoU"""
# inputs are tensor
# the category 0 is ignored class, typically for background / boundary
mini = 1
maxi = 1 # nclass
nbins = 1 # nclass
predict = (F.sigmoid(output).cpu().detach().numpy() > score_thresh).astype('int64') # P
target = target.cpu().detach().numpy().astype('int64') # T
intersection = predict * (predict == target) # TP
num_sample = intersection.shape[0]
area_inter_arr = np.zeros(num_sample)
area_pred_arr = np.zeros(num_sample)
area_lab_arr = np.zeros(num_sample)
area_union_arr = np.zeros(num_sample)
for b in range(num_sample):
# areas of intersection and union
area_inter, _ = np.histogram(intersection[b], bins=nbins, range=(mini, maxi))
area_inter_arr[b] = area_inter
area_pred, _ = np.histogram(predict[b], bins=nbins, range=(mini, maxi))
area_pred_arr[b] = area_pred
area_lab, _ = np.histogram(target[b], bins=nbins, range=(mini, maxi))
area_lab_arr[b] = area_lab
area_union = area_pred + area_lab - area_inter
area_union_arr[b] = area_union
assert (area_inter <= area_union).all()
return area_inter_arr, area_union_arr
class ROCMetric():
"""Computes pixAcc and mIoU metric scores
"""
def __init__(self, nclass, bins): # bin的意义实际上是确定ROC曲线上的threshold取多少个离散值
super(ROCMetric, self).__init__()
self.nclass = nclass
self.bins = bins
self.tp_arr = np.zeros(self.bins + 1)
self.pos_arr = np.zeros(self.bins + 1)
self.fp_arr = np.zeros(self.bins + 1)
self.neg_arr = np.zeros(self.bins + 1)
self.class_pos = np.zeros(self.bins + 1)
# self.reset()
def update(self, preds, labels):
for iBin in range(self.bins + 1):
score_thresh = (iBin + 0.0) / self.bins
# print(iBin, "-th, score_thresh: ", score_thresh)
i_tp, i_pos, i_fp, i_neg, i_class_pos = cal_tp_pos_fp_neg(preds, labels, self.nclass, score_thresh)
self.tp_arr[iBin] += i_tp
self.pos_arr[iBin] += i_pos
self.fp_arr[iBin] += i_fp
self.neg_arr[iBin] += i_neg
self.class_pos[iBin] += i_class_pos
def get(self):
tp_rates = self.tp_arr / (self.pos_arr + 0.00001)
fp_rates = self.fp_arr / (self.neg_arr + 0.00001)
recall = self.tp_arr / (self.pos_arr + 0.00001)
precision = self.tp_arr / (self.class_pos + 0.00001)
f1_score = (2.0 * recall[5] * precision[5]) / (recall[5] + precision[4] + 0.00001)
return tp_rates, fp_rates, recall, precision, f1_score
def reset(self):
self.tp_arr = np.zeros([11])
self.pos_arr = np.zeros([11])
self.fp_arr = np.zeros([11])
self.neg_arr = np.zeros([11])
self.class_pos = np.zeros([11])
class PD_FA():
def __init__(self, nclass, bins, cfg):
super(PD_FA, self).__init__()
self.nclass = nclass
self.bins = bins
self.image_area_total = []
self.image_area_match = []
self.FA = np.zeros(self.bins + 1)
self.PD = np.zeros(self.bins + 1)
self.target = np.zeros(self.bins + 1)
self.cfg = cfg
def update(self, preds, labels):
for iBin in range(self.bins + 1):
score_thresh = iBin * (255 / self.bins)
predits = np.array((preds > score_thresh).cpu()).astype('int64')
predits = np.reshape(predits, (self.cfg.data['crop_size'], self.cfg.data['crop_size']))
labelss = np.array((labels).cpu()).astype('int64') # P
labelss = np.reshape(labelss, (self.cfg.data['crop_size'], self.cfg.data['crop_size']))
image = measure.label(predits, connectivity=2)
coord_image = measure.regionprops(image)
label = measure.label(labelss, connectivity=2)
coord_label = measure.regionprops(label)
self.target[iBin] += len(coord_label)
self.image_area_total = []
self.image_area_match = []
self.distance_match = []
self.dismatch = []
for K in range(len(coord_image)):
area_image = np.array(coord_image[K].area)
self.image_area_total.append(area_image)
for i in range(len(coord_label)):
centroid_label = np.array(list(coord_label[i].centroid))
for m in range(len(coord_image)):
centroid_image = np.array(list(coord_image[m].centroid))
distance = np.linalg.norm(centroid_image - centroid_label)
area_image = np.array(coord_image[m].area)
if distance < 3:
self.distance_match.append(distance)
self.image_area_match.append(area_image)
del coord_image[m]
break
self.dismatch = [x for x in self.image_area_total if x not in self.image_area_match]
self.FA[iBin] += np.sum(self.dismatch)
self.PD[iBin] += len(self.distance_match)
def get(self, img_num):
Final_FA = self.FA / ((self.cfg.data['crop_size'] * self.cfg.data['crop_size']) * img_num)
Final_PD = self.PD / self.target
return Final_FA, Final_PD
def reset(self):
self.FA = np.zeros([self.bins + 1])
self.PD = np.zeros([self.bins + 1])
def cal_tp_pos_fp_neg(output, target, nclass, score_thresh):
predict = (torch.sigmoid(output) > score_thresh).float()
if len(target.shape) == 3:
target = np.expand_dims(target.float(), axis=1)
elif len(target.shape) == 4:
target = target.float()
else:
raise ValueError("Unknown target dimension")
intersection = predict * ((predict == target).float())
tp = intersection.sum()
fp = (predict * ((predict != target).float())).sum()
tn = ((1 - predict) * ((predict == target).float())).sum()
fn = (((predict != target).float()) * (1 - predict)).sum()
pos = tp + fn
neg = fp + tn
class_pos = tp + fp
return tp, pos, fp, neg, class_pos
if __name__ == '__main__':
pred = torch.rand(8, 1, 512, 512)
target = torch.rand(8, 1, 512, 512)
m1 = SigmoidMetric()
m2 = SamplewiseSigmoidMetric(nclass=1, score_thresh=0.5)
m1.update(pred, target)
m2.update(pred, target)
pixAcc, mIoU = m1.get()
_, nIoU = m2.get()
| PANPEIWEN/Infrared-Small-Target-Segmentation-Framework | utils/metric.py | metric.py | py | 10,083 | python | en | code | 20 | github-code | 13 |
38787330719 | import time
import json
from flask import Flask, request
from pipeline import load_pipeline, HaystackEncoder
from haystack.nodes import PromptTemplate
application = Flask(__name__)
pipe = load_pipeline("data/mcare/")
@application.route('/', methods=['GET'])
@application.route('/index', methods=['GET'])
@application.route('/ping', methods=['GET'])
def ping():
return 'all good in the hood', 200
@application.route('/invocations', methods=['POST'])
def invocations():
start = time.time()
payload = request.get_json(force=True)
query = payload.get('query', None)
generator_kwargs = payload.get('generator_kwargs', {})
retriever_kwargs = payload.get('retriever_kwargs', {})
if 'invocation_context' in generator_kwargs and 'prompt_template' in generator_kwargs['invocation_context']:
generator_kwargs['invocation_context']['prompt_template'] = PromptTemplate(
name="question-answering",
prompt_text=generator_kwargs['invocation_context']['prompt_template']
)
if query is None:
return 'No query provided', 400
try:
response = pipe.run(
query=query,
params={
"Retriever": retriever_kwargs,
"Generator": generator_kwargs
}
)
except Exception as e:
return str(e), 500
print(time.time() - start)
return json.dumps(response, cls=HaystackEncoder)
if __name__ == '__main__':
application.run() | Lewington-pitsos/oopscover | application.py | application.py | py | 1,476 | python | en | code | 3 | github-code | 13 |
74461854097 | import datetime
from behave import *
from selenium.webdriver.common.by import By
@then(u'I should see my rides sorted by {parameter} {order}')
def step_impl(context, parameter, order):
rides = context.driver.find_elements(By.CSS_SELECTOR, "div[class*=css-109v0wb]")
rides_params = []
for ride in rides:
if parameter == 'date':
ride_elem = ride.find_elements(By.TAG_NAME, 'h4')[1]
param = datetime.datetime.strptime(ride_elem.text, "%d.%m.%Y").date()
elif parameter == 'duration':
ride_duration = ride.find_elements(By.TAG_NAME, 'span')[0]
param = datetime.datetime.strptime(ride_duration.text, "%Hh %M min").time()
else:
assert False
rides_params.append(param)
if order == 'decreasing':
assert rides_params == sorted(rides_params, reverse=True)
else:
assert rides_params == sorted(rides_params)
| LeviSforza/TraWell | TraWell-tests/features/steps/sorting_my_rides.py | sorting_my_rides.py | py | 952 | python | en | code | 0 | github-code | 13 |
17376283683 | import numpy as np
from Beam import *
import matplotlib.pyplot as plt
class Warp:
def __init__(self,type,par,wn,k, MAXITE):
'''
Name Description
------- --------------
nDim Number of Spatial Dimensions
nNodes Number of nodes in the mesh
nElements Number of elements in the mesh
nNodesElement Number of nodes per element
nDoF Number of DoF per node
nEquations Number of equations to solve
elements list contains nElements beam element class
ID Array of global eq. numbers, destination array (ID)
EBC EBC = 1 if DOF is on Essential B.C.
IEN Array of global node numbers
LM Array of global eq. numbers, location matrix (LM)
C Material Constant at each element
f Distributed Load at each node, an array(nDof, nNodes)
g Essential B.C. Value at each node is an array(nDoF, nNodes)
h Natural B.C. Value at each node
'''
self.type = type
self.par = par
#Penalty parameters
self.wn = wn
self.MAXITE = MAXITE
self.k = k
self.nDim = nDim = 2
self.nDoF = nDoF = 3
self.nNodesElement = 2
#build elements
if(type == 'straight beam'):
self._straight_beam_data( )
elif(type == 'sine beam0'):
self._sine_beam_data0( )
elif(type == 'sine beam'):
self._sine_beam_data( )
self.nEquations = (self.EBC == 0).sum()
#construct element nodes array
#IEM(i,e) is the global node id of element e's node i
self.IEM = np.zeros([self.nNodesElement, self.nElements], dtype = 'int')
self.IEM[0,:] = np.arange(self.nElements)
self.IEM[1,:] = np.arange(1, self.nElements + 1)
#construct destination array
#ID(d,n) is the global equation number of node n's dth freedom, -1 means no freedom
self.ID = np.zeros([self.nDoF, self.nNodes],dtype = 'int') - 1
eq_id = 0
for i in range(self.nNodes):
for j in range(self.nDoF):
if(self.EBC[j,i] == 0):
self.ID[j,i] = eq_id
eq_id += 1
#construct Local matrix
#LM(d,e) is the global equation number of element e's d th freedom
self.LM = np.zeros([self.nNodesElement*self.nDoF, self.nElements],dtype = 'int')
for i in range(self.nDoF):
for j in range(self.nNodesElement):
for k in range(self.nElements):
self.LM[j*self.nDoF + i, k] = self.ID[i,self.IEM[j,k]]
#contact information
self.contact_dist = contact_info = np.empty(self.nElements)
def _straight_beam_data(self ):
'''
g is dirichlet boundary condition
f is the internal force
'''
nDoF = self.nDoF
nDim = self.nDim
self.nElements = nElements = 5
self.elements = elements = []
self.nNodes = nNodes = self.nElements + 1
E = 1.0e4
r = 0.1
self.Coord = Coord = np.zeros([nDoF, nNodes])
Coord[0,:] = np.linspace(0,1.0,nNodes)
Coord[1,:] = np.linspace(0,1.0,nNodes)
for e in range(nElements):
Xa0,Xb0 = np.array([Coord[0,e],Coord[1,e],Coord[2,e]]),np.array([Coord[0,e+1],Coord[1,e+1],Coord[2,e]])
elements.append(LinearEBBeam(e, Xa0, Xb0,E,r))
# Essential bounary condition
self.g = np.zeros([nDoF, nNodes])
self.EBC = np.zeros([nDoF,nNodes],dtype='int')
self.EBC[:,0] = 1
# Force
fx,fy,m = 0.0,1, 0.0
self.f = np.zeros([nDoF, nNodes])
self.f[:, -1] = fx, fy, m
# Weft info
self.nWeft = nWeft = 1
self.wefts = wefts = np.zeros([nDim+1, nWeft]) # (x,y,r)
wefts[:,0] = 0.4,0.22,0.1
def _sine_beam_data0(self ):
'''
g is dirichlet boundary condition
f is the internal force
'''
nDoF = self.nDoF
nDim = self.nDim
self.nElements = nElements = 50
self.elements = elements = []
self.nNodes = nNodes = self.nElements + 1
#Young's module
E = 1.0e8
#beam radius
self.r = r = 0.02
#The curve is h*sin(k*x - pi/2.0)
k = 3
h = 0.1
self.Coord = Coord = np.zeros([nDoF, nNodes])
Coord[0, :] = np.linspace(0, 2*np.pi, nNodes) # x
Coord[1, :] = h*np.sin(k*Coord[0, :] - np.pi/2.0) + h # y
Coord[2, :] = h*k*np.cos(k*Coord[0, :] - np.pi/2.0) # rotation theta
for e in range(nElements):
Xa0,Xb0 = np.array([Coord[0,e],Coord[1,e],Coord[2, e]]),np.array([Coord[0,e+1],Coord[1,e+1], Coord[2, e+1]])
elements.append(LinearEBBeam(e,Xa0, Xb0,E,r))
# no wefts
self.nWeft = nWeft = 0
self.wefts = np.zeros([nDim + 1, nWeft]) # (x,y,r)
# Penalty parameters
self.wn = 1e7
# Essential bounary condition
self.g = np.zeros([nDoF, nNodes])
self.EBC = np.zeros([nDoF,nNodes],dtype='int')
self.EBC[:,0] = 1
self.EBC[:,-1] = 1
self.g[:,-1] = self.par
# Force
#fx,fy,m = 0.1, -0.1, 0.0
self.f = np.zeros([nDoF, nNodes])
#self.f[:, -1] = fx, fy, m
def _sine_beam_data(self ):
'''
g is dirichlet boundary condition
f is the internal force
'''
nDoF = self.nDoF
nDim = self.nDim
self.nElements = nElements = 50
self.elements = elements = []
self.nNodes = nNodes = self.nElements + 1
#Young's module
E = 1.0e8
#beam radius
self.r = r = 0.02
#The curve is h*sin(k*x - pi/2.0)
k = self.k
h = 0.1
self.Coord = Coord = np.zeros([nDoF, nNodes])
Coord[0, :] = np.linspace(0, 2*np.pi, nNodes) # x
Coord[1, :] = h*np.sin(k*Coord[0, :] - np.pi/2.0) + h # y
Coord[2, :] = h*k*np.cos(k*Coord[0, :] - np.pi/2.0) # rotation theta
for e in range(nElements):
Xa0,Xb0 = np.array([Coord[0,e],Coord[1,e],Coord[2, e]]),np.array([Coord[0,e+1],Coord[1,e+1], Coord[2, e+1]])
elements.append(LinearEBBeam(e, Xa0, Xb0,E,r))
# Weft info
rWeft = r
self.nWeft = nWeft = 2*k-1
self.wefts = wefts = np.zeros([nDim + 1, nWeft]) # (x,y,r)
for i in range(nWeft):
wefts[:,i] = np.pi*(i+1.0)/k, h, rWeft
# Essential bounary condition
self.g = np.zeros([nDoF, nNodes])
self.EBC = np.zeros([nDoF,nNodes],dtype='int')
self.EBC[:,0] = 1
self.EBC[:,-1] = 1
self.g[:,-1] = self.par
# Force
#fx,fy,m = 0.1, -0.1, 0.0
self.f = np.zeros([nDoF, nNodes])
#self.f[:, -1] = fx, fy, m
#self.f[:, nElements//2] = fx, fy, m
def reset_par(self,par):
self.par = par
self.g[:,-1] = self.par
def assembly(self,d):
'''
:param u: displacement of all freedoms
:return: dPi and Pi
Pi = Ku - F + \sum f_c^i
dPi = K + \sum df_c^i
'''
#Step 1: Access required global variables
nNodes = self.nNodes
nElements = self.nElements
nEquations = self.nEquations
nDoF = self.nDoF
nWeft = self.nWeft
ID = self.ID
LM = self.LM
EBC = self.EBC
g = self.g
elements = self.elements
wn = self.wn
wefts = self.wefts
#Step 2: Allocate K, F, dP and ddP
K = np.zeros([nEquations,nEquations])
F = np.zeros(nEquations);
#Step 3: Assemble K and F
for e in range(nElements):
[k_e,f_e,f_g] = self._linear_beam_arrays(e);
#Step 3b: Get Global equation numbers
P = LM[:,e]
#Step 3c: Eliminate Essential DOFs
I = (P >= 0);
P = P[I];
#Step 3d: Insert k_e, f_e, f_g, f_h
K[np.ix_(P,P)] += k_e[np.ix_(I,I)]
F[P] += f_e[I] + f_g[I]
disp = np.empty([nDoF, nNodes])
for i in range(nNodes):
for j in range(nDoF):
disp[j,i] = d[ID[j,i]] if EBC[j,i] == 0 else g[j,i]
#Step 4: Allocate dP and ddP
dP = np.zeros(nEquations)
ddP = np.zeros([nEquations,nEquations])
#Setp 5: Assemble K and F
contact_dist = self.contact_dist
contact_dist.fill(np.inf)
g_min = np.inf
for i in range(nWeft):
xm, rm = wefts[0:2, i], wefts[2, i]
closest_e = -1
for e in range(nElements):
ele = elements[e]
da, db = disp[:, e], disp[:, e + 1]
contact, penalty, info = ele.penalty_term(da, db, xm, rm, wn)
if (contact and info[1] < g_min):
closest_e, g_min = e, info[1]
#print('closest_e is ' , closest_e, 'g_min is ', g_min,' penalty is ', penalty)
if (closest_e >= 0):
ele = elements[closest_e]
da, db = disp[:, closest_e], disp[:, closest_e + 1]
contact, penalty, info = ele.penalty_term(da, db, xm, rm, wn)
if(info[1] < contact_dist[closest_e]):
contact_dist[closest_e] = info[1]
print('Weft ', i , ' contacts element', closest_e, ' local coordinate is ',
info[0], ' distance is ', info[1], ' side is ',info[2])
#print('closest_e is ' , closest_e, 'info is ', info,' penalty is ', penalty)
_, f_contact, k_contact = penalty
# Step 3b: Get Global equation numbers
P = LM[:, closest_e]
# Step 3c: Eliminate Essential DOFs
I = (P >= 0)
P = P[I]
# Step 3d: Insert k_e, f_e, f_g, f_h
ddP[np.ix_(P, P)] += k_contact[np.ix_(I, I)]
dP[P] += f_contact[I]
dPi = np.dot(K,d) - F + dP
ddPi = K + ddP
return dPi, ddPi
def compute_force(self,d):
'''
:param u: displacement of all freedoms
:return: return the force at each Dirichlet freedom
F_total = Ku - F + \sum f_c^i
'''
#Step 1: Access required global variables
nNodes = self.nNodes
nElements = self.nElements
nEquations = self.nEquations
nDoF = self.nDoF
nWeft = self.nWeft
ID = self.ID
LM = self.LM
EBC = self.EBC
g = self.g
elements = self.elements
wn = self.wn
wefts = self.wefts
#Step 2: Allocate K, F, dP and ddP
K = np.zeros([nDoF*nNodes,nDoF*nNodes])
F = np.zeros(nDoF*nNodes);
#Step 3: Assemble K and F
for e in range(nElements):
[k_e,f_e,f_g] = self._linear_beam_arrays(e);
#Step 3b: Get Global equation numbers
P = np.arange(e*nDoF,(e+2)*nDoF)
#Step 3d: Insert k_e, f_e, f_g, f_h
K[np.ix_(P,P)] += k_e
#Step 3b: Get Global equation numbers
#Step 3c: Eliminate Essential DOFs
I = (LM[:,e] >= 0);
P = P[I];
F[P] += f_e[I]
disp = np.empty([nDoF, nNodes])
for i in range(nNodes):
for j in range(nDoF):
disp[j,i] = d[ID[j,i]] if EBC[j,i] == 0 else g[j,i]
#Step 4: Allocate dP and ddP
dP = np.zeros(nDoF*nNodes)
#Setp 5: Assemble K and F
contact_dist = self.contact_dist
contact_dist.fill(np.inf)
g_min = np.inf
for i in range(nWeft):
xm, rm = wefts[0:2, i], wefts[2, i]
closest_e = -1
for e in range(nElements):
ele = elements[e]
da, db = disp[:, e], disp[:, e + 1]
contact, penalty, info = ele.penalty_term(da, db, xm, rm, wn)
if (contact and info[1] < g_min):
closest_e, g_min = e, info[1]
#print('closest_e is ' , closest_e, 'g_min is ', g_min,' penalty is ', penalty)
if (closest_e >= 0):
ele = elements[closest_e]
da, db = disp[:, closest_e], disp[:, closest_e + 1]
contact, penalty, info = ele.penalty_term(da, db, xm, rm, wn)
if(info[1] < contact_dist[closest_e]):
contact_dist[closest_e] = info[1]
print('Weft ', i , ' contacts element', closest_e, ' local coordinate is ',
info[0], ' distance is ', info[1], ' side is ',info[2])
#print('closest_e is ' , closest_e, 'info is ', info,' penalty is ', penalty)
_, f_contact, k_contact = penalty
# Step 3b: Get Global equation numbers
P = np.arange(closest_e*nDoF,(closest_e+2)*nDoF)
# Step 3d: Insert k_e, f_e, f_g, f_h
dP[P] += f_contact
F_total = np.dot(K,disp.flatten('F')) - F + dP
return F_total[(EBC==1).flatten('F')]
def _linear_beam_arrays(self,e):
'''
:param e:
:return: k_e stiffmatrix, f_e f_g
'''
nNodesElement = self.nNodesElement
nDoF = self.nDoF
g = self.g
f = self.f
IEM = self.IEM
ele = self.elements[e]
k_e = ele.stiffmatrix()
#Point force
f_e = np.reshape(f[:,IEM[:,e]], (nNodesElement*nDoF), order='F')
#Dirichlet boundary
g_e = np.reshape(g[:,IEM[:,e]], (nNodesElement*nDoF), order='F')
f_g = -np.dot(k_e,g_e)
return k_e, f_e, f_g
def compute_gap_lower_bound(self):
nEquations = self.nEquations
nElements = self.nElements
nNodesElements = self.nNodesElement
nDoF = self.nDoF
r = self.r
LM = self.LM
gap_lower_bound = np.empty(nEquations)
gap_lower_bound.fill(r)
contact_dist = self.contact_dist
for e in range(nElements):
if contact_dist[e] < 2*r:
e_dist = 2*r - contact_dist[e]
for i in range(nNodesElements):
for j in range(nDoF):
eq_id = LM[i*nDoF + j,e]
if eq_id >= 0:
gap_lower_bound[eq_id] = min(gap_lower_bound[eq_id], e_dist)
return gap_lower_bound
def fem_calc(self):
nEquations = self.nEquations
nDoF = self.nDoF
u = np.zeros(nEquations)
dPi,ddPi = self.assembly(u)
res0 = np.linalg.norm(dPi)
MAXITE = self.MAXITE
EPS = 1e-8
found = False
dt_max = 0.5
T = 0
for ite in range(MAXITE):
dPi,ddPi = self.assembly(u)
res = np.linalg.norm(dPi)
du = np.linalg.solve(ddPi,dPi)
################################
# Time stepping
###############################
du_abs = np.repeat(np.sqrt(du[0::nDoF]**2 + du[1::nDoF]**2 + du[2::nDoF]**2) + 1e-12, nDoF)
gap_lower_bound = self.compute_gap_lower_bound()
dt = min(dt_max, self.r/np.max(du_abs)/10.0 )
# linear search
if(ite > 1950):
for subite in range(50):
dPi, _ = self.assembly(u - dt*du)
if(np.linalg.norm(dPi) < res):
break
else:
dt /= 10
u = u - dt*du
print('Ite/MAXITE: ', ite, ' /', MAXITE, 'In fem_calc res is', res,' dt is ', dt )
if(res < EPS or res < EPS*res0 or np.max(du_abs) < EPS):
found = True
break
T += dt
if(not found):
print("Newton cannot converge in fem_calc")
print('T is ', T)
return u,res
def visualize_result(self, u, k=2):
'''
:param d: displacement of all freedoms
:param k: visualize points for each beam elements
'''
ID = self.ID
nDim = self.nDim
nNodes = self.nNodes
nDoF = self.nDoF
nElements = self.nElements
elements = self.elements
EBC = self.EBC
g = self.g
disp = np.empty([nDoF, nNodes])
for i in range(nNodes):
for j in range(nDoF):
disp[j,i] = u[ID[j,i]] if EBC[j,i] == 0 else g[j,i]
coord_ref, coord_cur = np.empty([nDim,(k - 1)*nElements + 1]), np.empty([nDim,(k - 1)*nElements + 1])
for e in range(nElements):
ele = elements[e]
X0 , X = ele.visualize(disp[:,e],disp[:,e+1], k, fig = 0)
coord_ref[:, (k-1)*e:(k-1)*(e+1) + 1] = X0
coord_cur[:, (k-1)*e:(k-1)*(e+1) + 1] = X
plt.plot(coord_ref[0,:], coord_ref[1,:], '-o', label='ref',markersize = 2)
plt.plot(coord_cur[0,:], coord_cur[1,:],'-o', label='current',markersize = 2)
wefts = self.wefts
plt.plot(wefts[0, :], wefts[1, :], 'o', label='weft',markersize = 2)
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
return
if __name__ == "__main__":
u_x,u_y,theta = 0.1,0.1,0.1
wn = 1e6
MAXITE = 2000
k = 3
warp = Warp('sine beam',[u_x,u_y,theta],wn,k, MAXITE)
#warp.assembly()
d,res = warp.fem_calc()
f = warp.compute_force(d)
print('Dirichlet freedom force is ', f)
warp.visualize_result(d,2)
| Zhengyu-Huang/Warp_and_Weft | Warp.py | Warp.py | py | 18,069 | python | en | code | 1 | github-code | 13 |
20758891532 | import logging
import time
import click
from odahuflow.cli.utils import click_utils
from odahuflow.cli.utils.click_utils import auth_options
from odahuflow.cli.utils.client import pass_obj
from odahuflow.cli.utils.error_handler import check_id_or_file_params_present, TIMEOUT_ERROR_MESSAGE, \
IGNORE_NOT_FOUND_ERROR_MESSAGE
from odahuflow.cli.utils.output import DEFAULT_OUTPUT_FORMAT, format_output, validate_output_format
from odahuflow.cli.utils.verifiers import positive_number
from odahuflow.sdk.clients.api import EntityAlreadyExists, WrongHttpStatusCode, RemoteAPIClient
from odahuflow.sdk.clients.api_aggregated import parse_resources_file_with_one_item
from odahuflow.sdk.clients.deployment import ModelDeployment, ModelDeploymentClient, READY_STATE, \
FAILED_STATE
DEFAULT_WAIT_TIMEOUT = 5
# 20 minutes
DEFAULT_DEPLOYMENT_TIMEOUT = 20 * 60
LOGGER = logging.getLogger(__name__)
@click.group(cls=click_utils.BetterHelpGroup)
@auth_options
@click.pass_context
def deployment(ctx: click.core.Context, api_client: RemoteAPIClient):
"""
Allow you to perform actions on deployments.\n
Alias for the command is dep.
"""
ctx.obj = ModelDeploymentClient.construct_from_other(api_client)
@deployment.command()
@click.option('--md-id', '--id', help='Model deployment ID')
@click.option('--output-format', '-o', 'output_format', help='Output format [json|table|yaml|jsonpath]',
default=DEFAULT_OUTPUT_FORMAT, callback=validate_output_format)
@pass_obj
def get(client: ModelDeploymentClient, md_id: str, output_format: str):
"""
\b
Get deployments.
The command without id argument retrieve all deployments.
\b
Get all deployments in json format:
odahuflowctl dep get --output-format json
\b
Get deployment with "git-repo" id:
odahuflowctl dep get --id model-wine
\b
Using jsonpath:
odahuflowctl dep get -o 'jsonpath=[*].spec.reference'
\f
:param client: Model deployment HTTP client
:param md_id: Model deployment ID
:param output_format: Output format
:return:
"""
mds = [client.get(md_id)] if md_id else client.get_all()
format_output(mds, output_format)
@deployment.command()
@click.option('--md-id', '--id', help='Replace model deployment ID from manifest')
@click.option('--file', '-f', type=click.Path(), required=True, help='Path to the file with deployment')
@click.option('--wait/--no-wait', default=True,
help='no wait until scale will be finished')
@click.option('--timeout', default=DEFAULT_DEPLOYMENT_TIMEOUT, type=int, callback=positive_number,
help='timeout in seconds. for wait (if no-wait is off)')
@click.option('--image', type=str, help='Override Docker image from file')
@click.option('--ignore-if-exists', is_flag=True,
help='Ignore if entity is already exists on API server. Return success status code')
@pass_obj
def create(client: ModelDeploymentClient, md_id: str, file: str, wait: bool, timeout: int, image: str,
ignore_if_exists: bool):
"""
\b
Create a deployment.
You should specify a path to file with a deployment. The file must contain only one deployment.
For now, CLI supports YAML and JSON file formats.
If you want to create multiple deployments, you should use "odahuflowctl bulk apply" instead.
If you provide the deployment id parameter, it will override before sending to API server.
\b
Usage example:
* odahuflowctl dep create -f dep.yaml --id examples-git
\f
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until deployment will be finished
:param client: Model deployment HTTP client
:param md_id: Model deployment ID
:param file: Path to the file with only one deployment
:param image: Override Docker image from file
:param ignore_if_exists: Return success status code if entity is already exists
"""
md = parse_resources_file_with_one_item(file).resource
if not isinstance(md, ModelDeployment):
raise ValueError(f'Model deployment expected, but {type(md)} provided')
if md_id:
md.id = md_id
if image:
md.spec.image = image
try:
res = client.create(md)
except EntityAlreadyExists as e:
if ignore_if_exists:
LOGGER.debug(f'--ignore-if-exists was passed: {e} will be suppressed')
click.echo('Deployment already exists')
return
raise
click.echo(res)
wait_deployment_finish(timeout, wait, md.id, client)
@deployment.command()
@click.option('--md-id', '--id', help='Replace model deployment ID from manifest')
@click.option('--file', '-f', type=click.Path(), required=True, help='Path to the file with deployment')
@click.option('--wait/--no-wait', default=True,
help='no wait until scale will be finished')
@click.option('--timeout', default=DEFAULT_DEPLOYMENT_TIMEOUT, type=int, callback=positive_number,
help='timeout in seconds. for wait (if no-wait is off)')
@click.option('--image', type=str, help='Override Docker image from file')
@pass_obj
def edit(client: ModelDeploymentClient, md_id: str, file: str, wait: bool, timeout: int, image: str):
"""
\b
Update a deployment.
You should specify a path to file with a deployment. The file must contain only one deployment.
For now, CLI supports YAML and JSON file formats.
If you want to update multiple deployments, you should use "odahuflowctl bulk apply" instead.
If you provide the deployment id parameter, it will override before sending to API server.
\b
Usage example:
* odahuflowctl dep update -f dep.yaml --id examples-git
\f
:param client: Model deployment HTTP client
:param md_id: Model deployment ID
:param file: Path to the file with only one deployment
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until edit will be finished
:param image: Override Docker image from file
"""
md = parse_resources_file_with_one_item(file).resource
if not isinstance(md, ModelDeployment):
raise ValueError(f'Model deployment expected, but {type(md)} provided')
if md_id:
md.id = md_id
if image:
md.spec.image = image
click.echo(client.edit(md))
wait_deployment_finish(timeout, wait, md.id, client)
@deployment.command()
@click.option('--md-id', '--id', help='Model deployment ID')
@click.option('--file', '-f', type=click.Path(), help='Path to the file with deployment')
@click.option('--wait/--no-wait', default=True,
help='no wait until scale will be finished')
@click.option('--timeout', default=DEFAULT_DEPLOYMENT_TIMEOUT, type=int, callback=positive_number,
help='timeout in seconds. for wait (if no-wait is off)')
@click.option('--ignore-not-found/--not-ignore-not-found', default=False,
help='ignore if Model Deployment is not found')
@pass_obj
def delete(client: ModelDeploymentClient, md_id: str, file: str, ignore_not_found: bool,
wait: bool, timeout: int):
"""
\b
Delete a deployment.
For this command, you must provide a deployment ID or path to file with one deployment.
The file must contain only one deployment.
If you want to delete multiple deployments, you should use "odahuflowctl bulk delete" instead.
For now, CLI supports YAML and JSON file formats.
The command will fail if you provide both arguments.
\b
Usage example:
* odahuflowctl dep delete --id examples-git
* odahuflowctl dep delete -f dep.yaml
\f
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until deletion will be finished
:param client: Model deployment HTTP client
:param md_id: Model deployment ID
:param file: Path to the file with only one deployment
:param ignore_not_found: ignore if Model Deployment is not found
"""
check_id_or_file_params_present(md_id, file)
if file:
md = parse_resources_file_with_one_item(file).resource
if not isinstance(md, ModelDeployment):
raise ValueError(f'Model deployment expected, but {type(md)} provided')
md_id = md.id
try:
message = client.delete(md_id)
wait_delete_operation_finish(timeout, wait, md_id, client)
click.echo(message)
except WrongHttpStatusCode as e:
if e.status_code != 404 or not ignore_not_found:
raise e
click.echo(IGNORE_NOT_FOUND_ERROR_MESSAGE.format(kind=ModelDeployment.__name__, id=md_id))
def wait_delete_operation_finish(timeout: int, wait: bool, md_id: str, md_client: ModelDeploymentClient):
"""
Wait delete operation
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until deletion will be finished
:param md_id: Model Deployment name
:param md_client: Model Deployment Client
:return: None
"""
if not wait:
return
start = time.time()
if timeout <= 0:
raise Exception('Invalid --timeout argument: should be positive integer')
while True:
elapsed = time.time() - start
if elapsed > timeout:
raise Exception('Time out: operation has not been confirmed')
try:
md_client.get(md_id)
except WrongHttpStatusCode as e:
if e.status_code == 404:
return
LOGGER.info('Callback have not confirmed completion of the operation')
print(f'Model deployment {md_id} is still being deleted...')
time.sleep(DEFAULT_WAIT_TIMEOUT)
def wait_deployment_finish(timeout: int, wait: bool, md_id: str, md_client: ModelDeploymentClient):
"""
Wait for deployment to finish according to command line arguments
:param timeout: timeout in seconds. for wait (if no-wait is off)
:param wait: no wait until deletion will be finished
:param md_id: Model Deployment name
:param md_client: Model Deployment Client
:return: None
"""
if not wait:
return
start = time.time()
if timeout <= 0:
raise Exception('Invalid --timeout argument: should be positive integer')
while True:
elapsed = time.time() - start
if elapsed > timeout:
raise Exception(TIMEOUT_ERROR_MESSAGE)
try:
md: ModelDeployment = md_client.get(md_id)
if md.status.state == READY_STATE:
if md.spec.min_replicas <= md.status.available_replicas:
print(f'Model {md_id} was deployed. '
f'Deployment process took {round(time.time() - start)} seconds')
return
else:
print(f'Model {md_id} was deployed. '
f'Number of available pods is {md.status.available_replicas}/{md.spec.min_replicas}')
elif md.status.state == FAILED_STATE:
raise Exception(f'Model deployment {md_id} was failed')
elif md.status.state == "":
print(f"Can't determine the state of {md.id}. Sleeping...")
else:
print(f'Current deployment state is {md.status.state}. Sleeping...')
except WrongHttpStatusCode:
LOGGER.info('Callback have not confirmed completion of the operation')
LOGGER.debug('Sleep before next request')
time.sleep(DEFAULT_WAIT_TIMEOUT)
| odahu/odahu-flow | packages/cli/odahuflow/cli/parsers/deployment.py | deployment.py | py | 11,525 | python | en | code | 12 | github-code | 13 |
72106304659 | def main():
t = int(input())
while(t):
num = int(input())
ing = [int(x) for x in input().split()]
ans = sum(ing)-(num-1)*1
print(ans)
t-=1
if __name__ == '__main__':
main() | JARVVVIS/ds-algo-python | long_challenge/feb2019/magicjar.py | magicjar.py | py | 225 | python | en | code | 0 | github-code | 13 |
24839328224 | """
Created on Sat Feb 24 16:20:17 2022
@author: mike_
"""
import pandas as pd
import matplotlib.pyplot as plt
# load rankings data here:
steel_rankings = pd.read_csv('Golden_Ticket_Award_Winners_Steel.csv')
wood_rankings = pd.read_csv('Golden_Ticket_Award_Winners_Wood.csv')
# print(steel_rankings.head(), wood_rankings.head())
rankings = []
ranking_year = []
# write function to plot rankings over time for 1 roller coaster here:
def one_roller_customer_rank(roller_coaster, roller_coaster_park, material):
"""
Parameters
----------
roller_coaster : TYPE
DESCRIPTION.
roller_coaster_park : TYPE
DESCRIPTION.
material : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if material == 'wood':
roller_coaster_rank = wood_rankings[(wood_rankings['Name'] ==
roller_coaster)& (wood_rankings['Park'] == roller_coaster_park)]
elif material =='steel':
roller_coaster_rank = steel_rankings[(wood_rankings['Name']
== roller_coaster)& (wood_rankings['Park'] == roller_coaster_park)]
else:
print(roller_coaster + 'is not ranked.')
x_values = roller_coaster_rank['Year of Rank']
y_values = roller_coaster_rank['Rank']
ax_1 = plt.subplot()
plt.plot(range(len(x_values)), y_values)
ax_1.invert_yaxis()
ax_1.set_xticks(range(len(x_values)))
ax_1.set_xticklabels(x_values)
plt.xlabel('Years')
plt.ylabel('Rank')
plt.title(roller_coaster + ' Rank by Year')
plt.show()
one_roller_customer_rank('El Toro', 'Six Flags Great Adventure','wood')
plt.clf()
# 4
# Create a function that compares Roller Coasters in the same graph
def two_coaster(coaster_1, park_1, coaster_2, park_2, material):
"""
Parameters
----------
coaster_1 : TYPE
DESCRIPTION.
park_1 : TYPE
DESCRIPTION.
coaster_2 : TYPE
DESCRIPTION.
park_2 : TYPE
DESCRIPTION.
material : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if material == 'wood':
roller_coaster_1_rank = wood_rankings[(wood_rankings['Name'] ==
coaster_1) & (wood_rankings['Park'] == park_1)]
roller_coaster_2_rank = wood_rankings[(wood_rankings['Name'] ==
coaster_2) & (wood_rankings['Park'] == park_2)]
else:
roller_coaster_1_rank = steel_rankings[(wood_rankings['Name']
== coaster_1) & (wood_rankings['Park'] == park_1)]
roller_coaster_2_rank = steel_rankings[(wood_rankings['Name']
== coaster_2) & (wood_rankings['Park'] == park_2)]
#print(roller_coaster_1_rank, roller_coaster_2_rank)
# Find values for x and y
coaster_1_x_values = roller_coaster_1_rank['Year of Rank']
coaster_1_y_values = roller_coaster_1_rank['Rank']
coaster_2_x_values = roller_coaster_2_rank['Year of Rank']
coaster_2_y_values = roller_coaster_2_rank['Rank']
# plot coasters
plt.plot(range(len(coaster_1_x_values)),coaster_1_y_values, '-', color='red', label=coaster_1)
ax_1 = plt.subplot()
ax_1.invert_yaxis()
ax_1.set_xticks(range(len(coaster_1_x_values)))
ax_1.set_xticklabels(coaster_1_x_values)
plt.xlabel('Years')
plt.ylabel('Rank')
plt.title(coaster_1 + " and " + coaster_2 + ' Ranked by Year')
plt.plot(range(len(coaster_2_x_values)),coaster_2_y_values, '--', color='blue',label=coaster_2)
plt.legend()
plt.show()
two_coaster('El Toro', 'Six Flags Great Adventure', 'Boulder Dash',
'Lake Compounce', 'wood')
plt.clf()
# write function to plot top n rankings over time here:
def ranking_coasters(n, material):
"""
Parameters
----------
rank : TYPE
DESCRIPTION.
material : TYPE
DESCRIPTION.
Returns
-------
None.
"""
if material == 'wood':
top_nth_ranks = wood_rankings[wood_rankings['Rank'] <= n]
else:
top_nth_ranks = steel_rankings[steel_rankings['Rank'] <= n]
#print(top_nth_ranks)
ax= plt.subplot()
for coaster in set(top_nth_ranks['Name']):
coaster_rankings = top_nth_ranks[top_nth_ranks['Name'] == coaster]
ax.plot(coaster_rankings['Year of Rank'],coaster_rankings['Rank'], label=coaster)
plt.show()
#x_values = top_nth_rank['Years in Rank']
ranking_coasters(5, 'wood')
plt.clf()
# 6
# load roller coaster data
roller_coasters_data = pd.read_csv('roller_coasters.csv')
# write function to plot histogram of column values here:
def roller_coaster_hist(df,column):
"""
Parameters
----------
df : TYPE
DESCRIPTION.
column : TYPE
DESCRIPTION.
Returns
-------
None.
"""
plt.hist(df[column].dropna())
plt.title('Roller Coaster Data')
plt.xlabel(column)
plt.ylabel('Quanity')
plt.show()
plt.clf()
# Create histogram of roller coaster speed
roller_coaster_hist(roller_coasters_data,'speed')
# Create histogram of roller coaster length
roller_coaster_hist(roller_coasters_data,'length')
# Create histogram of roller coaster number of inversions
roller_coaster_hist(roller_coasters_data,'num_inversions')
# Create a function to plot histogram of height values
heights = roller_coasters_data[roller_coasters_data['height'] <= 140]
roller_coaster_hist(heights,'height')
# write function to plot inversions by coaster at a park here:
def park_num_inversions(df,park):
number_inversions = df[df['park'] == park]
y_values = number_inversions['num_inversions']
x_values = number_inversions['name']
ax_1 = plt.subplot()
plt.plot(range(len(x_values)), y_values)
plt.title('Number of Inversions for ' + park)
plt.xlabel('Roller Coaster')
plt.ylabel('Number of Inversions')
ax_1.set_xticks(range(len(x_values)))
ax_1.set_xticklabels(x_values,rotation=45,ha='right')
plt.show()
plt.clf()
park_num_inversions(roller_coasters_data, 'Port Aventura')
# write function to plot pie chart of operating status here:
def operating_status(df):
pie_values = [len(df[df['status']=='status.operating']),len(df[df['status']=='status.closed.definitely'])]
pie_labels = ['Operating','Closed Definitely']
ax_1 = plt.subplot()
plt.pie(pie_values, labels=pie_labels, autopct='%0.1f%%')
plt.title('Operation Status')
ax_1.set_aspect('equal')
plt.show()
# Create pie chart of roller coasters
operating_status(roller_coasters_data)
plt.clf()
# write function to create scatter plot of any two numeric columns here:
def coaster_scatter_plot(df, column_1, column_2):
plt.scatter(df[column_1],df[column_2], color='blue')
plt.title('{} vs. {}'.format(column_1, column_2))
plt.xlabel(column_1)
plt.ylabel(column_2)
plt.show()
coaster_scatter_plot(roller_coasters_data,'speed','num_inversions')
plt.clf()
| gobr2005/codecademy | roller_coaster_starting/script.py | script.py | py | 6,816 | python | en | code | 0 | github-code | 13 |
12003291233 | from Path import Path
from Parameters import *
from MyFunctions import f
def get_curve_name(latex=False, rad_on=True, base_x=base_x, base_y=base_y, base_curve_coeffs=base_curve_coeffs,
curls_on=True, curls_x=curls_x, curls_y=curls_y, curls_curve_coeffs=curls_curve_coeffs,
radius_curve_coeffs=radius_curve_coeffs, speed=speed, q=q, rad_f=rad_f,
ORTHOGONAL_WAVES=ORTHOGONAL_WAVES, NORMALISE_WAVES=NORMALISE_WAVES, C=C):
if latex:
prod_char = ' \cdot '
prod_char2 = '\cdot '
pi_str = '\__pi'.replace('__', '')
else:
prod_char = ' '
prod_char2 = ' '
pi_str = 'pi'
def str_mult(a, b, prod_char=' \cdot '):
if not isinstance(b, str) and not isinstance(a, str):
return a * b
elif 0 in [a, b]:
return 0
else:
if 1 in [a, b]:
if a == 1:
a, b = b, a
return a
elif -1 in [a, b]:
if a == -1:
a, b = b, a
if a[0] == '-':
return a[1:]
return f'-{a}'
swapped = False
if isinstance(a, str):
a, b = b, a
swapped = True
if not isinstance(a, str):
n = 4
while n > 0 and a == round(a, n - 1):
n -= 1
if n == 0:
a = str(round(a))
else:
a = str(round(a, n))
if swapped:
a, b = b, a
return f'{a}{prod_char}{b}'
def str_sign(expr):
if not isinstance(expr, str):
return expr >= 0
return expr.strip()[0] != '-'
def str_add(a, b):
if not isinstance(a, str) and not isinstance(b, str):
return a + b
elif b == 0:
return a
elif a == 0:
return b
elif not str_sign(b):
if not isinstance(b, str):
return f'{a} - {-b}'
return f'{a} - {b[1:]}'
return f'{a} + {b}'
def change_sign(expr):
if not isinstance(expr, str):
return -expr
if expr.strip()[0] == '-':
return expr[1:]
return '-' + expr
def func_val_calc(coeffs, A='A', a='a', b='b', ff=base_x):
inner_prod_char = ''
t = str_mult(coeffs[a], 't', inner_prod_char)
if t == 0:
t = coeffs[b]
ff = f[ff](t)
else:
sign = str_sign(t)
# print(t, sign, ff, str_mult((-1) ** (sign + 1) * round(coeffs[b] / pi, 2), pi_str), end=' ')
t = str_add(t[1 - sign:], str_mult((-1) ** (sign + 1) * round(coeffs[b] / pi, 2), pi_str, inner_prod_char))
# print(t)
ff = f"{ff}({t})"
if not sign and ff[:3] not in ['cos', 'coz']:
return str_mult(change_sign(coeffs[A]), ff, prod_char=prod_char)
return str_mult(coeffs[A], ff, prod_char=prod_char)
# base_x_str = str_mult(base_curve_coeffs['A'], base_x + '('+ str_add(str_mult(base_curve_coeffs['a'], 't'), base_curve_coeffs['b'])+')')
x_str = ''
y_str = ''
def latexify(expr='', curve_type='base'):
if latex:
# if not isinstance(expr, str):
# return expr
if not str_sign(expr):
expr = f'-$:{curve_cols[curve_type]}[${change_sign(expr)}$]$'
else:
expr = f'$:{curve_cols[curve_type]}[${expr}$]$'
return expr
base_x_str = func_val_calc(base_curve_coeffs, ff=base_x)
base_y_str = func_val_calc(base_curve_coeffs, A='B', a='c', b='d', ff=base_y)
base_x_str = latexify(base_x_str, curve_type='base')
base_y_str = latexify(base_y_str, curve_type='base')
if curls_on:
curls_curve_coeffs2 = {key: (-curls_curve_coeffs[key] * speed if key in 'ac' else (
curls_curve_coeffs[key] / rad_ratio if key in 'AB' else curls_curve_coeffs[key])) for key in
curls_curve_coeffs}
# print(curls_curve_coeffs2)
curls_x_str = func_val_calc(curls_curve_coeffs2, ff=curls_x)
curls_y_str = func_val_calc(curls_curve_coeffs2, A='B', a='c', b='d', ff=curls_y)
if latex:
neg = not str_sign(curls_x_str)
if not isinstance(curls_x_str, str):
curls_curve_coeffs2['A'] = (-1) ** (neg) * curls_curve_coeffs['A']
curls_x_str = func_val_calc(curls_curve_coeffs2, ff=curls_x)
curls_x_str = ('-' if neg else '') + '\__frac{' + str(curls_x_str) + '}{' + str(rad_ratio) + '}'
else:
curls_curve_coeffs2['A'] = '\__frac{' + str(curls_curve_coeffs['A']) + '}{' + str(rad_ratio) + '}'
curls_x_str = func_val_calc(curls_curve_coeffs2, ff=curls_x)
neg = not str_sign(curls_y_str)
if not isinstance(curls_y_str, str):
curls_curve_coeffs2['B'] = (-1) ** (neg) * curls_curve_coeffs['B']
curls_y_str = func_val_calc(curls_curve_coeffs2, A='B', a='c', b='d', ff=curls_y)
curls_y_str = ('-' if neg else '') + '\__frac{' + str(curls_y_str) + '}{' + str(rad_ratio) + '}'
else:
curls_curve_coeffs2['B'] = '\__frac{' + str(curls_curve_coeffs['B']) + '}{' + str(rad_ratio) + '}'
curls_y_str = func_val_calc(curls_curve_coeffs2, A='B', a='c', b='d', ff=curls_y)
curls_x_str = latexify(curls_x_str, 'curls')
curls_y_str = latexify(curls_y_str, 'curls')
x_str = str_add(base_x_str, curls_x_str)
y_str = str_add(base_y_str, curls_y_str)
else:
x_str = base_x_str
y_str = base_y_str
if not rad_on:
return f'R {prod_char2}({x_str}; {y_str})'
if ORTHOGONAL_WAVES:
name = ' -- R(t, x(t), y(t)) = R(x(t) + r_x(t), y(t) + r_y(t))'
my_coeffs = {key: base_curve_coeffs[key] for key in base_curve_coeffs.keys()}
my_coeffs['A'] = -(1 - C) * base_curve_coeffs['A'] * base_curve_coeffs['a'] ** 2
my_coeffs['B'] = (1 - C) * base_curve_coeffs['B'] * base_curve_coeffs['c'] ** 2
my_coeffs['q'] = q
my_coeffs['rad_b'] = radius_curve_coeffs['b']
my_coeffs['rx'] = func_val_calc(my_coeffs, ff=base_x)
my_coeffs['ry'] = func_val_calc(my_coeffs, A='B', a='c', b='d', ff=base_y)
rad_x_str = func_val_calc(my_coeffs, A='rx', a='q', b='rad_b', ff=rad_f)
if rad_x_str != 0:
rad_x_str = ('normed({})' if NORMALISE_WAVES else '{}').format(rad_x_str)
# + (' div sqrt(square(d_2 base_x) + square(d_2 base_y))' if NORMALISE_WAVES else '')
rad_x_str = latexify(rad_x_str, 'rad')
rad_y_str = func_val_calc(my_coeffs, A='ry', a='q', b='rad_b', ff=rad_f)
if rad_y_str != 0:
rad_y_str = ('normed({})' if NORMALISE_WAVES else '{}').format(rad_y_str)
# + (' div sqrt(square(d_2 base_x) + square(d_2 base_y))' if NORMALISE_WAVES else '')
rad_y_str = latexify(rad_y_str, 'rad')
x_str = str_add(x_str, rad_x_str)
y_str = str_add(y_str, rad_y_str)
rad_f_str = 'R' # str(round(R))
else:
name = ' -- R(t, x(t), y(t)) = R(t)(x(t), y(t))'
my_coeffs = {'A': (1 - C), 'q': q, 'b': radius_curve_coeffs['b']}
rad_f_str = str_add(func_val_calc(my_coeffs, a="q", ff=rad_f), C)
rad_f_str = latexify(rad_f_str, 'rad')
if isinstance(rad_f_str, str):
rad_f_str = f'R{prod_char}({rad_f_str})'
else:
rad_f_str = f'{rad_f_str}R'
# str_mult(round(R), f'({str_add(func_val_calc(my_coeffs, a="q", ff=rad_f), C)})')
name = f'{rad_f_str} {prod_char2}({x_str}, {y_str})' # + name
if latex:
name = name.replace(', ', '; \quad ').replace('__', '')
name = '$' + name + '$'
# name = name.replace('(', '\left(')
# name = name.replace(')', '\__right)').replace('__', '')
while ' ' in name:
print('double space', name.index(' '), name)
name = name.replace(' ', ' ')
return name
class Name:
def __init__(self, path=None):
if path is not None:
self.PATH = path
else:
self.PATH = Path()
def get_name(self, name=None, stage=0, final_save=False):
if name is None:
name = ''
stage += 1
stage_len = len(str(stage))
if name == 'temp':
name = 'Images/temp.png'
# if self.st_res:
# self.st_im.save('Images/temp_st.png')
stage -= 1
elif final_save and stage == 1:
name = self.PATH + '/' + self.PATH.instant() + ' ' + name + '.png'
else:
if not final_save:
'Images/temp.png'
'Images/temp_st.png'
if len(self.PATH) == 17:
path = self.PATH.instant() + ' - ' + name
self.PATH.update(path)
name = self.PATH + '/' + '000'[:3 - stage_len] + str(stage) + ' ' + self.PATH.instant() + '.png'
return name, stage
| tkepes/spirograph | Name.py | Name.py | py | 9,120 | python | en | code | 0 | github-code | 13 |
10688750103 |
def date_boundary(filename,week):
#Returns True if the date is contained in the week given and False otherwise
date = int(filename[filename.find("201808") + 6: filename.find("201808") + 8])
if week == "1":
return bool(date <18)
if week == "2":
return bool(18<date<25)
if week == "3":
return bool(date>25)
| jt667/Hydralab-Pallet-Comparison | date_checker.py | date_checker.py | py | 374 | python | en | code | 0 | github-code | 13 |
20192078946 | # -*- coding: utf-8 -*-
"""
Verification: 验证爬来下的ip是否可用, 取出文本/SSDB/Redis 中的ip进行分布验证, 为1个进程, 6个进行验证的线程, 1个进行取出的线程
_check_proxy: 将传入的proxy值进行验证, 通过bool值返回
verify_ip: 验证方法, 同时启动四个线程来使用, 加快验证的时间
get_txt_ip: 将ip从文本中一个一个拿出来
main: 为该class的主控函数
UsableIP:
"""
import sys
sys.path.append('..')
import time
import requests
import threading
from multiprocessing import Queue, Process
from Logger.log import get_logger, get_folder
_logger = get_logger(__name__)
_file_path = get_folder()
class Verification(object):
def __init__(self, queue_a, queue_b):
self.queue_a = queue_a
self.queue_b = queue_b
self.check = False
self.main()
# 调用 检查ip可不可用
def _check_proxy(self, proxy):
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
proxies = {"http": "http://%s" % proxy}
try:
request = requests.get('http://httpbin.org/ip', proxies=proxies, timeout=10, verify=False)
if request.status_code == 200:
_logger.info('%s is ok' % proxy)
return True
except Exception as e:
_logger.warning(e)
return False
# 将文本的ip拿出来
def get_txt_ip(self, queue_a):
path = _file_path + "/proxy_pool.txt"
with open(path, 'r') as file:
while True:
_proxy = file.readline()
time.sleep(1)
if not _proxy:
break
_proxy = _proxy.replace("\n", "")
if queue_a.full():
_logger.info('The queue is full for 5s')
time.sleep(5)
queue_a.put(_proxy)
file.close()
_logger.info("%s file read finished" % path)
# 验证IP 通用接口 文本/SSDB/Redis
def verify_ip(self, queue_a, queue_b):
while True:
if self.check:
break
_proxy = queue_a.get(True)
if self._check_proxy(_proxy):
queue_b.put(_proxy)
def main(self):
verify_list = []
for x in range(6):
_verify = threading.Thread(target=self.verify_ip, args=(self.queue_a, self.queue_b), name="Verify %s" % x)
verify_list.append(_verify)
for x in verify_list:
x.start()
_logger.info("All Verification Thread Started")
get_ip = threading.Thread(target=self.get_txt_ip, args=(self.queue_a, ), name='Get Txt IP')
get_ip.start()
_logger.info("Get IP Thread Started")
get_ip.join()
_logger.info("Get IP Thread Out")
while True:
if self.queue_a.empty():
self.check = True
break
for x in verify_list:
x.join()
_logger.info("All Verification Thread Out")
class UsableIP(object):
def __init__(self, queue):
self.queue = queue
self.main()
# 取出队列值
def _get_queue(self, queue):
_proxy = queue.get(True)
if _proxy == 'sort':
_proxy = False
return _proxy
# 将队列的值放进指定文本/库中
def save_usable_IP(self, queue):
path = _file_path + "/usable_proxy_pool.txt"
with open(path, "a+") as file:
while True:
_proxy = self._get_queue(queue)
if not _proxy:
break
file.write(str(_proxy) + "\n")
file.close()
_logger.info("usable_proxy_pool.txt close")
def main(self):
save_ip = threading.Thread(target=self.save_usable_IP, args=(self.queue, ), name="Save Usable IP")
save_ip.start()
_logger.info("Usable IP Thread Started")
save_ip.join()
_logger.info("Usable IP Thread Out")
def main():
queue_a = Queue()
queue_b = Queue()
verify = Process(target=Verification, args=(queue_a, queue_b), name='Verification Proxy')
usable = Process(target=UsableIP, args=(queue_b, ), name='Usable IP')
verify.start()
usable.start()
_logger.info('Verification Proxy and Usable IP Start')
verify.join()
queue_b.put("sort")
usable.join()
_logger.info('Verification Proxy and Usable IP Out')
if __name__ == '__main__':
main()
| Eason-Chen0452/MyProject | ProxyPackage/VerificationProxy.py | VerificationProxy.py | py | 4,528 | python | en | code | 0 | github-code | 13 |
10548098166 | """
Contains base classes for Orders etc.
"""
from .const import GENERIC_PAYLOAD, HEADERS, NEXT_DAY_TIMESTAMP
import requests
from enum import Enum
class Exchange(Enum):
NSE = "N"
BSE = "B"
MCX = "M"
class ExchangeSegment(Enum):
CASH = "C"
DERIVATIVE = "D"
CURRENCY = "U"
class OrderFor(Enum):
PLACE = "P"
MODIFY = "M"
CANCEL = "C"
class OrderType(Enum):
BUY = "BUY"
SELL = "SELL"
class OrderValidity(Enum):
DAY = 0
GTD = 1
GTC = 2
IOC = 3
EOS = 4
FOK = 6
class AHPlaced(Enum):
AFTER_MARKET_CLOSED = "Y"
NORMAL_ORDER = "N"
class RequestType(Enum):
ORDER_PLACE="OP"
ORDER_CANCEL="OC"
ORDER_MODIFY="OM"
ORDER_STATUS="OS"
TRADE_INFO="TI"
MARKET_FEED="MF"
MARKET_DEPTH="MD"
TRADE_BOOK="TB"
MARKET_STATUS="MS"
MARKET_HISTORY="MH"
GET_BASKET="GB"
BRACKET_ORDER="BO"
BRACKET_MODIFY="BM"
CREATE_BASKET="CB"
class Order:
def __init__(self, order_type: str, quantity: int, exchange: str,
exchange_segment: str, price: float ,is_intraday: bool ,
remote_order_id: str = "", scrip_code: int=0, exch_order_id: int = 0,
stoploss_price: float = 0, is_stoploss_order: bool = False, ioc_order: bool = False,scripdata: str='',
order_id: int = 0,vtd: str = f"/Date({NEXT_DAY_TIMESTAMP})/",
ahplaced: str= 'N',IsGTCOrder:bool =False,IsEOSOrder:bool =False):
self.exchange = exchange
self.exchange_segment = exchange_segment
self.price = price
self.order_id = order_id
self.order_type = order_type
self.quantity = quantity
self.scrip_code = scrip_code
self.remote_order_id = remote_order_id
self.exch_order_id = exch_order_id
self.disqty = quantity
self.stoploss_price = stoploss_price
self.is_stoploss_order = is_stoploss_order
self.ioc_order = ioc_order
self.is_intraday = is_intraday
self.vtd = vtd
self.ahplaced = ahplaced
self.scripData=scripdata
self.IsGTCOrder=IsGTCOrder
self.IsEOSOrder=IsEOSOrder
class Bo_co_order:
def __init__(self,scrip_code: int, Qty: int,LimitPriceInitialOrder:float,TriggerPriceInitialOrder:float
,LimitPriceProfitOrder:float,BuySell:str,Exch: str,ExchType: str,RequestType: str,LimitPriceForSL:float,
TriggerPriceForSL:float,TrailingSL:int=0,StopLoss:int=0,
LocalOrderIDNormal:int=0,LocalOrderIDSL:int=0,LocalOrderIDLimit:int=0,
public_ip: str = '192.168.1.1',traded_qty: int = 0,
order_for: str="S",
DisQty: int=0,ExchOrderId:str="0",AtMarket: bool = False,UniqueOrderIDNormal:str="",
UniqueOrderIDSL:str="",UniqueOrderIDLimit:str=""):
self.order_for = order_for
self.Exch = Exch
self.ExchType = ExchType
self.RequestType=RequestType
self.BuySell=BuySell
self.scrip_code=scrip_code
self.DisQty=DisQty
self.LimitPriceInitialOrder=LimitPriceInitialOrder
self.LimitPriceForSL=LimitPriceForSL
self.TriggerPriceInitialOrder=TriggerPriceInitialOrder
self.LimitPriceProfitOrder=LimitPriceProfitOrder
self.AtMarket=AtMarket
self.TriggerPriceForSL=TriggerPriceForSL
self.TrailingSL=TrailingSL
self.StopLoss=StopLoss
self.UniqueOrderIDNormal=UniqueOrderIDNormal
self.UniqueOrderIDSL=UniqueOrderIDSL
self.UniqueOrderIDLimit=UniqueOrderIDLimit
self.LocalOrderIDNormal=LocalOrderIDNormal
self.LocalOrderIDSL=LocalOrderIDSL
self.LocalOrderIDLimit=LocalOrderIDLimit
self.public_ip=public_ip
self.ExchOrderId=ExchOrderId
self.traded_qty =traded_qty
self.Qty=Qty
if LimitPriceProfitOrder==0:
self.order_for="C"
class Basket_order:
def __init__(self,Exchange:str,ExchangeType:str,Price:float,OrderType:str,Qty:int,ScripCode:str,DelvIntra:str,AtMarket:bool= False,StopLossPrice:float=0,
IsStopLossOrder:bool =False,IOCOrder: bool =False,IsIntraday:bool = False,AHPlaced:str='N',PublicIP:str='0.0.0.0',DisQty:int=0,iOrderValidity:float=0):
self.Exchange = Exchange
self.ExchangeType = ExchangeType
self.Price = Price
self.OrderType=OrderType
self.Qty=Qty
self.ScripCode=ScripCode
self.DelvIntra=DelvIntra
self.IsIntraday = IsIntraday
self.AtMarket=AtMarket
self.StopLossPrice=StopLossPrice
self.IsStopLossOrder=IsStopLossOrder
self.IOCOrder=IOCOrder
self.AHPlaced=AHPlaced
self.PublicIP=PublicIP
self.DisQty=DisQty
self.iOrderValidity=iOrderValidity
if DelvIntra == 'I':
self.IsIntraday=True
| OpenApi-5p/py5paisa | py5paisa/order.py | order.py | py | 4,949 | python | en | code | 73 | github-code | 13 |
4162457612 | from netCDF4 import Dataset
import numpy as np
import xarray as xr
def mask_plainnetcdf():
with Dataset(mask_file, 'r') as mask, Dataset(input_file, 'a') as to_mask:
for var in to_mask.variables:
if len(to_mask[var].shape) == 4: # The dimensions are time,depth,lat,lon
for i in range(0, to_mask[var].shape[0]):
to_mask[var][i, :, :, :] = ma.masked_where(
np.logical_not(np.array(mask['tmask'][0, :, :, :], dtype=bool)),
np.array(to_mask[var][i, :, :, :]))[:]
def mask_xarray(var, landseamask_var='tmask'):
with xr.open_dataset(mask_file) as m_f, xr.open_dataset(input_file) as i_f:
mask = m_f[landseamask_var][0,:].values
#data = i_f[var].where(mask)
i_f[var] = i_f[var].where(mask)
mask_file = "WFD-EI-LandFraction2d_1x1_updated.nc"
#input_file = "WFDEI_global_dyn.2d.monthly_timevar_latfix.nc"
input_file = "CARDAMOM_2001_2010_GPP_Mean_monthly_dayssince2001.nc"
data = mask_xarray("gpp_gb", "lsmask")
#outfile = Dataset(data, 'w')
| GCEL/netcdf-utils | maskvariablenetcdf.py | maskvariablenetcdf.py | py | 1,079 | python | en | code | 0 | github-code | 13 |
42582140616 | import matplotlib.pyplot as plt
import networkx as nx
from manim import *
#reference: https://github.com/nipunramk/Reducible
class GraphNode:
def __init__(self, name, position, radius=0.5, font_size=1):
#geometric properties
self.center = position
self.radius = radius
self.circle = Circle(radius=radius)
self.circle.move_to(position)
#node label
self.name_key = name
self.name = Text(str(name))
self.name.scale(font_size) #text size
self.name.move_to(position)
#list of neighbours
self.neighbours = []
#useful for visit (TODO: move out to make this class more general)
self.visited = False
self.from_where = ''
def connect(self, other, arrow=False):
#line between the current node and its neighbour ('other')
line_center = Line(self.center, other.center)
#now the problem is that the line connects the centers of the nodes
#here we get the direction of the line and the point 'start' and 'end'
direction = line_center.get_unit_vector()
start, end = line_center.get_start_and_end()
#now we move 'start' and 'end' by the value of the radius along this direction
new_start = start + direction * self.radius
new_end = end - direction * self.radius
line = Line(new_start, new_end)
if arrow:
line.add_tip()
#add 'other' node to the list of neighbours of the current node
self.neighbours.append(other)
return line
def node_layout(edges_input, layout = 'kamada_kawai_layout'):
#we use the library NETWORKX, we create a graph and add the edges
#https://networkx.org/documentation/stable/reference/drawing.html?highlight=layout#module-networkx.drawing.layout
G = nx.DiGraph()
G.add_edges_from(edges_input)
try:
layout_function = eval(f'nx.{layout}') #f-string
#in 'pos' we have each node label with (x,y) coordinates
pos = layout_function(G)
labels = list(pos.keys())
#we want to give as output something in the form
# {'0': array([-1.6, 0.1, 0. ]), '1': array([ 0.4, -1.8 , 0. ])}
#we use (x,y) coordinates from 'pos' and edit them in order to fit the space properly
#the following coefficient indicates how much we want the nodes to be spaced out
#we compute the ratio between the available space and the space taken by the graph in order to scale it
x = [x for x, y in pos.values()]
y = [y for x, y in pos.values()]
coeff_x = config.frame_x_radius/(abs(max(x)-min(x)))
coeff_y = config.frame_y_radius/(abs(max(y)-min(y)))
#here we save the scaled positions
positions = []
for label in labels:
positions.append( np.array([pos.get(label)[0]*coeff_x, pos.get(label)[1]*coeff_y, 0]) )
#the following is the output in the desired shape
nodes_and_positions = dict(zip(labels, positions))
return nodes_and_positions
except:
print('Layout not available')
def make_graph_given_positions(nodes_pos_input, edges_input, undirected=True, arrow=False, radius=0.5, font_size=1):
nodes = {}
edges = {}
#from the input we read the label and the position, then we create a 'GraphNode'
for node_label in nodes_pos_input.keys():
pos = nodes_pos_input[node_label]
nodes[node_label] = GraphNode(node_label, position=pos, radius=radius, font_size=font_size)
#now we add edges to the dictionary 'edges', where the key is the pair (u, v)
#first we create the pair (first, second) reading from the input 'edges_input'
#then we call the function 'connect' on each edge
for edge in edges_input:
first, second = edge
edges[edge] = nodes[ first ].connect(nodes[ second ], arrow=arrow)
#if the graph is undirected we add also the pair (v, u)
if undirected:
first, second = edge
edge = second, first
edges[edge] = nodes[ second ].connect(nodes[ first ], arrow=arrow)
return nodes, edges
def set_graph_visual_properites(nodes, edges,
node_color=LIGHT_GREY, stroke_color=WHITE, data_color=WHITE, edge_color=LIGHT_GREY,
scale_factor=1):
n = []
e = []
#here we set visual properties of each node
for node in nodes.values():
node.circle.set_fill(color=node_color, opacity=0.5)
node.circle.set_stroke(color=stroke_color)
node.name.set_color(color=data_color)
#add node to the list
n.append(VGroup(node.circle, node.name))
#here we set visual properties of each edge
for edge in edges.values():
edge.set_stroke(width=7*scale_factor)
edge.set_color(color=edge_color)
if edge.has_tip():
edge.get_tip().set_stroke(width=1)
e.append(edge)
#this function returns a graph with all the colors/opacity/distances defined
return VGroup( VGroup(*n), VGroup(*e) )
def highlight_node(node, color=RED, scale_factor=1):
#here we create a (red) circle with the same radius and opacity 0
highlighted_node = Circle(radius=node.circle.radius * scale_factor)
highlighted_node.move_to(node.circle.get_center())
highlighted_node.set_stroke(width=8 * scale_factor)
highlighted_node.set_color(color)
highlighted_node.set_fill(opacity=0)
return highlighted_node
def highlight_edge(edges, u, v, color=RED, scale_factor=1, arrow=False):
#edge that we want to highlight
edge = edges[(u, v)]
#new line, same as the one already in the graph
highlighted_edge = Line(edge.get_start(), edge.get_end())
highlighted_edge.set_stroke(width=16*scale_factor)
highlighted_edge.set_color(color)
if arrow:
highlighted_edge.add_tip()
highlighted_edge.get_tip().set_color(color)
return highlighted_edge
def dfs(nodes, start):
#we want this function to return the order in which the nodes are visited: 'dfs_order'
dfs_order = []
#when visiting a node we also want to keep track of from which node we are coming from
#this in necessary for the animation, because we need to know which edge to highlight
#we add the first node to the stack
stack = [ nodes[start] ]
while len(stack) > 0:
node = stack.pop()
if not node.visited:
node.visited = True
#when a node is visited, we add its name to 'dfs_order'
dfs_order.append( node.name_key )
#now we check its neighbours
for neighbour in node.neighbours:
if not neighbour.visited:
#if a neighbour has never been visited we save that we are coming from the current node
neighbour.from_where = node.name_key
#then we add it to the stack for it to be visited
stack.append(neighbour)
return dfs_order
| martina-battisti/manim-rb-trees | graph_library.py | graph_library.py | py | 7,334 | python | en | code | 0 | github-code | 13 |
14951953458 | from django import forms
from .models import Comment ,Author ,Post
class TagForm(forms.Form):
name=forms.CharField(max_length=25, min_length=6)
class AuthForm(forms.ModelForm):
class Meta:
model=Author
fields=['name']
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=['title', 'article', 'author', ]
widgets={
'title' : forms.TextInput(attrs={'class': 'h-full-width' , 'placeholder':'Title'}),
'author' : forms.TextInput(attrs={'class': 'h-full-width' , 'placeholder':'Author'}),
'article' : forms.Textarea(attrs={'class': 'h-full-width' , 'placeholder':'Type your Articles here'})
}
class CommentForm(forms.ModelForm):
class Meta:
model=Comment
fields=['name', 'email', 'comment']
widgets={
'name' : forms.TextInput(attrs={' class ': 'h-full-width' , 'placeholder':'your name', 'id': 'cName'}),
'email' : forms.TextInput(attrs={' class ': 'h-full-width' , 'placeholder':'your email', 'id': 'cEmail'}),
'comment' : forms.Textarea(attrs={' class ': 'h-full-width' , 'placeholder':'your comment', 'id': 'cMessage'})
}
| Voidblocker/First-Blog-Project | my_app/forms.py | forms.py | py | 1,219 | python | en | code | 0 | github-code | 13 |
17324057524 | from ckeditor.fields import RichTextField
from django.db import models
from django.utils.translation import gettext as _
from phonenumber_field.modelfields import PhoneNumberField
class Direction(models.Model):
name = models.CharField(max_length=125, verbose_name=_("Name"))
date_create = models.DateTimeField(auto_now_add=True, verbose_name=_("Date create"))
def __str__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=125, verbose_name=_("Country"))
flag = models.ImageField(verbose_name=_("Photo flag"), upload_to="location")
def __str__(self):
return self.name
class Career(models.Model):
direction = models.ForeignKey(
Direction, verbose_name=_("Direction"), on_delete=models.CASCADE
)
name = models.CharField(max_length=125, verbose_name=_("Vacancy name"))
country = models.ForeignKey(
Country,
verbose_name=_("Country"),
on_delete=models.CASCADE,
related_name="country",
)
short_description = models.TextField(verbose_name=_("Short description"))
description = RichTextField(verbose_name=_("Description"))
remote = models.BooleanField(default=False, verbose_name=_("Remote"))
office = models.BooleanField(default=False, verbose_name=_("Office"))
relocation = models.BooleanField(default=False, verbose_name=_("Relocation"))
date_create = models.DateTimeField(auto_now_add=True, verbose_name=_("Date create"))
archived = models.BooleanField(verbose_name=_("Archived"), default=False)
def __str__(self):
return self.name
class Status(models.Model):
name = models.CharField(max_length=125, verbose_name=_("Status"))
def __str__(self):
return self.name
class CV(models.Model):
career = models.ForeignKey(
Career, verbose_name=_("Vacancy"), on_delete=models.DO_NOTHING
)
status = models.ForeignKey(
Status,
verbose_name=_("Status"),
null=True,
blank=True,
on_delete=models.DO_NOTHING,
)
name = models.CharField(max_length=125, verbose_name=_("Name"))
surname = models.CharField(max_length=125, verbose_name=_("Surname"))
phone_number = PhoneNumberField(verbose_name=_("Phone number"))
email = models.EmailField(verbose_name=_("Email"))
cv_file = models.FileField(verbose_name=_("Summary PartnerCV"), upload_to="CV_file")
date_create = models.DateTimeField(
verbose_name=_("Date"), auto_now_add=True, null=True, blank=True
)
def __str__(self):
return self.name
| xislam/Zeon | career/models.py | models.py | py | 2,585 | python | en | code | 0 | github-code | 13 |
17043947824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenIotmbsFacecheckSendModel(object):
def __init__(self):
self._dev_id = None
self._face_id = None
self._floor_num = None
self._out_request_id = None
self._phone_no = None
self._project_id = None
self._sn_list = None
@property
def dev_id(self):
return self._dev_id
@dev_id.setter
def dev_id(self, value):
self._dev_id = value
@property
def face_id(self):
return self._face_id
@face_id.setter
def face_id(self, value):
self._face_id = value
@property
def floor_num(self):
return self._floor_num
@floor_num.setter
def floor_num(self, value):
self._floor_num = value
@property
def out_request_id(self):
return self._out_request_id
@out_request_id.setter
def out_request_id(self, value):
self._out_request_id = value
@property
def phone_no(self):
return self._phone_no
@phone_no.setter
def phone_no(self, value):
self._phone_no = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
@property
def sn_list(self):
return self._sn_list
@sn_list.setter
def sn_list(self, value):
if isinstance(value, list):
self._sn_list = list()
for i in value:
self._sn_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.dev_id:
if hasattr(self.dev_id, 'to_alipay_dict'):
params['dev_id'] = self.dev_id.to_alipay_dict()
else:
params['dev_id'] = self.dev_id
if self.face_id:
if hasattr(self.face_id, 'to_alipay_dict'):
params['face_id'] = self.face_id.to_alipay_dict()
else:
params['face_id'] = self.face_id
if self.floor_num:
if hasattr(self.floor_num, 'to_alipay_dict'):
params['floor_num'] = self.floor_num.to_alipay_dict()
else:
params['floor_num'] = self.floor_num
if self.out_request_id:
if hasattr(self.out_request_id, 'to_alipay_dict'):
params['out_request_id'] = self.out_request_id.to_alipay_dict()
else:
params['out_request_id'] = self.out_request_id
if self.phone_no:
if hasattr(self.phone_no, 'to_alipay_dict'):
params['phone_no'] = self.phone_no.to_alipay_dict()
else:
params['phone_no'] = self.phone_no
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
if self.sn_list:
if isinstance(self.sn_list, list):
for i in range(0, len(self.sn_list)):
element = self.sn_list[i]
if hasattr(element, 'to_alipay_dict'):
self.sn_list[i] = element.to_alipay_dict()
if hasattr(self.sn_list, 'to_alipay_dict'):
params['sn_list'] = self.sn_list.to_alipay_dict()
else:
params['sn_list'] = self.sn_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenIotmbsFacecheckSendModel()
if 'dev_id' in d:
o.dev_id = d['dev_id']
if 'face_id' in d:
o.face_id = d['face_id']
if 'floor_num' in d:
o.floor_num = d['floor_num']
if 'out_request_id' in d:
o.out_request_id = d['out_request_id']
if 'phone_no' in d:
o.phone_no = d['phone_no']
if 'project_id' in d:
o.project_id = d['project_id']
if 'sn_list' in d:
o.sn_list = d['sn_list']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOpenIotmbsFacecheckSendModel.py | AlipayOpenIotmbsFacecheckSendModel.py | py | 4,166 | python | en | code | 241 | github-code | 13 |
72554938578 | # 1. вывести главное окно по центру
# 2. отключить от него resize
# 3. после главной кнопки появляется три новые кнопки ( через toplevel)
# фейерверк у главного окна по кнопке. кнопка которая отключает
# через 15, 30, 45 секунд с обратным отсчетом.
# через это время фейерверк заканчивается, форма закрывается
from tkinter import*
root = Tk()
width = 350
height = 150
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = int((screen_width/2) - (width/2))
y = int((screen_height/2) - (height/2))
root.geometry(str(width) + "x" + str(height) + "+" + str(x) + "+" + str(y))
root.title("lab10")
button = Button(root, height = 1, width = 7, text = "Старт", command = lambda: click_button(button))
root.resizable(False, False)
root.mainloop()
| yanooomm/lab10-2 | lab10.py | lab10.py | py | 1,016 | python | ru | code | 0 | github-code | 13 |
42304360539 | import random
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def __str__(self):
return str(self.x) + ' - ' + str(self.y)
class EllipticCurveCryptography:
def __init__(self,a,b):
self.a = a
self.b = b
def _point_addition(self, P, Q):
x1, y1 = P.x, P.y
x2, y2 = Q.x, Q.y
if (x1 == x2 and y1 == y2):
m = (3 * x1 * x1 + self.a) / (2 * y1)
else:
m = (y2-y1) / (x2 - x1)
x3 = m*m - x1 - x2
y3 = m*(x1 - x3) - y1
return Point(x3,y3)
def double_and_add(self,n,P):
temp_point = Point(P.x,P.y)
binary = bin(n)[3:]
for binary_char in binary:
temp_point = self._point_addition(temp_point, temp_point)
if binary_char == '1':
temp_point = self._point_addition(temp_point, P)
return temp_point
if __name__ == '__main__':
ecc = EllipticCurveCryptography(-2,2)
generator_point = Point(-2,-1)
alice_random = random.randint(2, 1e4)
bob_random = random.randint(2, 1e4)
alice_public = ecc.double_and_add(alice_random,generator_point)
bob_public = ecc.double_and_add(bob_random,generator_point)
alice_secret_key = ecc.double_and_add(alice_random,bob_public)
bob_secret_key = ecc.double_and_add(bob_random,alice_public)
print(alice_secret_key)
print(bob_secret_key)
| ucadena07/Cryptography | ECC/EllipticCurveCrytography.py | EllipticCurveCrytography.py | py | 1,517 | python | en | code | 0 | github-code | 13 |
36205861546 | from telegram.ext.callbackcontext import CallbackContext
from message_generator import MessageGenerator
from image_generator import ImageGenerator
import logging
import time
from database import Database
import telegram
from telegram.ext import Updater, CommandHandler
from settings import *
class Bot:
def __init__(self, token: str, messageGenerator: MessageGenerator, imageGenerator: ImageGenerator) -> None:
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
self.logger = logging.getLogger("LOG")
self.logger.info("Starting BOT.")
self.updater = Updater(token)
self.dispatcher = self.updater.dispatcher
self.messageGenerator = messageGenerator
self.imageGenerator = imageGenerator
self.messageGenerator.update()
# messageGenerator.generate()
self.message = self.messageGenerator.get_message()
self.job = self.updater.job_queue
self.job_daily = self.job.run_daily(callback=self.send_daily_message, time=DAILY_TIME, days=(0,1,2,3,4,5,6), context=None, name=None)
start_handler = CommandHandler("start", self.send_start)
self.dispatcher.add_handler(start_handler)
help_handler = CommandHandler("help", self.send_help)
self.dispatcher.add_handler(help_handler)
enable_handler = CommandHandler("enable", self.send_enable)
self.dispatcher.add_handler(enable_handler)
disable_handler = CommandHandler("disable", self.send_disable)
self.dispatcher.add_handler(disable_handler)
chart_handler = CommandHandler("grafico", self.send_chart)
self.dispatcher.add_handler(chart_handler)
message_handler = CommandHandler("news", self.send_message)
self.dispatcher.add_handler(message_handler)
# force_handler = CommandHandler("force", self.force)
# self.dispatcher.add_handler(force_handler)
# daily_handler = CommandHandler("daily", self.send_daily)
# self.dispatcher.add_handler(daily_handler)
# message to send when the bot is started
def send_start(self, chatbot, update) -> None:
welcome_message = '*Ciao, sono il bot che tiene traccia dei vaccini!*\n\n'
welcome_message += '✔ Digita: /enable per ricevere informazioni giornaliere riguardo lo stato delle vaccinazioni in italia!\n\n'
welcome_message += '❌ Digita: /disable per non ricevere più le informazioni giornaliere.\n\n'
welcome_message += '📰 Digita: /news per visualizzare lo stato attuale.\n\n'
welcome_message += '⚙ Digita: /help per ulteriori informazioni.'
chatbot.message.reply_text(welcome_message, parse_mode = telegram.ParseMode.MARKDOWN)
# message to send when /help is received
def send_help(self, chatbot, update) -> None:
help_message = 'Author: @Simon761\n'
help_message += 'Gli aggiornamenti giornalieri avvengono alle ore 18:00\n'
help_message += 'Fonte dei dati: https://github.com/italia/covid19-opendata-vaccini/blob/master/dati'
chatbot.message.reply_text(help_message, parse_mode = telegram.ParseMode.MARKDOWN)
# message to send when /enable is received
def send_enable(self, chatbot, update) -> None:
# write the chat id in the database
chat_id = chatbot.message.chat_id
db = Database()
db.add_user(chat_id)
db.close()
# send the confermation message
enable_message = 'Riceverai informazioni ogni giorno alle 18:00!'
chatbot.message.reply_text(enable_message)
# message to send when /disable is received
def send_disable(self, chatbot, update) -> None:
# remove chat id from the database
chat_id = chatbot.message.chat_id
db = Database()
db.rem_user(chat_id)
db.close()
# send the confermation message
disable_message = 'Non riceverai più messaggi dal bot.'
chatbot.message.reply_text(disable_message)
def send_chart(self, chatbot, update: CallbackContext) -> None:
chat_id = chatbot.message.chat_id
update.bot.send_photo(chat_id, photo=open(IMG_FILE, 'br'))
def send_message(self, chatbox, update):
chatbox.message.reply_text(self.message, parse_mode = telegram.ParseMode.MARKDOWN)
# send the daily message to the subscribed users
def send_daily_message(self, chatbot) -> None:
# update the message
self.update_message()
# get subscribers chat_ids
db = Database()
users = db.get_users()
db.close()
# send updated message to subscribers
for user in users:
try:
chat_id = user[0]
chatbot.bot.send_message(chat_id, self.message, parse_mode = telegram.ParseMode.MARKDOWN)
# sleep 35 millisecond to prevent ban for spam
time.sleep(0.035)
except Exception:
logging.warning(f"Error sending message for chat_id: {chat_id}.")
# update the image TODO could have been done better
self.update_image()
def update_image(self) -> None:
self.imageGenerator.generate()
# update the message to send daily
def update_message(self) -> None:
self.messageGenerator.update()
self.message = messageGenerator.get_message()
# def force(self, chatbot, update):
# chat_id = chatbot.message.chat_id
# #if chat_id == "40136672":
# self.send_daily_message(chatbot)
# start the bot
def run(self) -> int:
self.logger.info("Polling BOT.")
self.updater.start_polling()
# Run the BOT until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the BOT gracefully.
self.updater.idle()
return 0
if __name__ == "__main__":
TOKEN = get_token()
messageGenerator = MessageGenerator()
imageGenerator = ImageGenerator()
BOT = Bot(TOKEN, messageGenerator, imageGenerator)
BOT.run()
| Endex761/Vacciniamoci | bot.py | bot.py | py | 6,211 | python | en | code | 0 | github-code | 13 |
42865685449 | from PyQt5.QtWidgets import QFrame
from qfluentwidgets import ComboBox
from ..layout.inputLabel import InputLabel
class Select(QFrame):
def __init__(self, label:str, items: list, parent):
inputLabel = InputLabel(label, parent)
self.comboBox = ComboBox(inputLabel)
self.comboBox.addItems(items)
self.comboBox.setCurrentIndex(0)
self.comboBox.move(200, 200)
inputLabel.addWidget(self.comboBox)
parent.addWidget(inputLabel)
def onChange(self, slot):
return self.comboBox.currentTextChanged.connect(slot) | raherygino/python-gui-like-windows-11 | app/components/input/Select.py | Select.py | py | 579 | python | en | code | 4 | github-code | 13 |
25105807793 | import matplotlib.pyplot as plt
import geopandas as geo
#equivalent to import pandas as pd
pd = geo.pd
EARTH = geo.read_file(geo.datasets.get_path('naturalearth_lowres'))
crs={'init':'epsg:4326'}
EEZbounds = geo.read_file('World_EEZ_v11_20191118_gpkg/eez_boundaries_v11.gpkg')
EEZ = geo.read_file('World_EEZ_v11_20191118_gpkg/eez_v11.gpkg')
def main():
fig, ax = plt.subplots(figsize=(10,10))
EARTH.plot(ax=ax, color = 'green')
#Example EEZ lines (UK)
#drawEEZ('United Kingdom', ax = ax, color = 'blue')
country_names = pd.read_csv('catches2Country.csv')['country']
drawMany(country_names, ax = ax, color = 'blue')
#Plot points last as they are smaller and will be covered
#example coordinates (-50, -50) to (50, 50) increasing by (2,2)
coords = Coords().add(range(-50,50, 2), range(-50,50, 2))
geodata = coords.geoDataFrame()
geodata.plot(ax=ax, color ='red', markersize= 5)
plt.show()
#Plots EEZ
def drawEEZ(country_name, boundaries = False, **plotArgs):
if boundaries:
toDraw = EEZbounds[EEZbounds['SOVEREIGN1'] == country_name]
else:
toDraw = EEZ[EEZ['SOVEREIGN1'] == country_name]
if not toDraw.empty:
toDraw.plot(**plotArgs)
def drawMany(country_names, boundaries = False, **plotArgs):
if boundaries:
toDraw = EEZbounds[EEZbounds['SOVEREIGN1'].isin(country_names)]
else:
toDraw = EEZ[EEZ['SOVEREIGN1'].isin(country_names)]
if not toDraw.empty:
toDraw.plot(**plotArgs)
#requires longs and lats, creates columns otherwise
class Coords(pd.DataFrame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not 'latitude' in self.columns:
self['latitude'] = []
if not 'longitude' in self.columns:
self['longitude'] = []
def append(self, lat, long):
return Coords(super().append(pd.DataFrame({'latitude':[lat], 'longitude':[long]})))
def add(self, lats, longs):
return Coords(super().append(pd.DataFrame({'latitude': lats, 'longitude': longs})))
def geoDataFrame(self):
geometry = geo.points_from_xy(self['latitude'], self['longitude'], crs=crs)
return geo.GeoDataFrame(geometry = geometry, crs = crs)
if __name__ == '__main__':
main()
| intwhcom/Small-Cetaceans-Gap-Analysis | spacialDataMaps.py | spacialDataMaps.py | py | 2,321 | python | en | code | 0 | github-code | 13 |
38258831881 | import numpy as np
from sklearn.datasets import load_diabetes
from sklearn.decomposition import PCA
dataset = load_diabetes()
x = dataset.data
y = dataset.target
# print(x.shape,y.shape) (442, 10) (442,)
pca = PCA(n_components=8)
x2 = pca.fit_transform(x)
print(x2)
# print(x2.shape) (442, 7)
pca_EVR = pca.explained_variance_ratio_
print(pca_EVR)
print(sum(pca_EVR))
# 7개 0.9479436357350414
# 8개 0.9913119559917797
## 압축률!
cumsum = np.cumsum(pca.explained_variance_ratio_)
print('cumsum : ', cumsum)
d = np.argmax(cumsum >= 0.95)+1
print('cumsum >= 0.95', cumsum>=0.95)
print('d : ', d)
import matplotlib.pyplot as plt
plt.plot(cumsum)
plt.grid()
plt.show() | dongjaeseo/study | ml/m29_pca2_1_diabetes.py | m29_pca2_1_diabetes.py | py | 676 | python | en | code | 2 | github-code | 13 |
43728242103 | import cv2
import numpy as np
import os
# ================================= Warp Prespective =================================
'''
the perspective transformation is associated with the change in the viewpoint.
This type of transformation does not preserve parallelism, length, and angle. But they do preserve collinearity and incidence.
This means that the straight lines will remain straight even after the transformation.
'''
import cv2
import numpy as np
# Read image
image = cv2.imread('./Magazine.jpg')
# Define width and height of the image
width, height = 250, 350
# Define the 4 corner points of the image
pts1 = np.float32([[1050, 270], [1542, 543], [420, 740], [927, 1090]]) # These points are the 4 corner points of the image
# Define the 4 corner points of the output image
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
# Compute the perspective transform matrix
matrix = cv2.getPerspectiveTransform(pts1, pts2)
# Apply the perspective transformation to the image
output = cv2.warpPerspective(image, matrix, (width, height))
# Draw the points on the image
for x in range(0, len(pts1)):
cv2.circle(image, (int(pts1[x][0]), int(pts1[x][1])), 5, (0, 0, 255), -1) # Corrected the center points to integers
# Display the image
cv2.imshow('image', image)
cv2.imshow('output', output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| ahmadSoliman94/Computer-Vision | Image Processing/Transformations/Warp_prespective.py | Warp_prespective.py | py | 1,373 | python | en | code | 0 | github-code | 13 |
23371381768 | from turtle import *
speed(-1)
def draw_star(x,y,length):
for i in range (5):
forward(length)
right(144)
draw_star(1,1,100)
input()
speed(0)
color('blue')
for i in range(100):
import random
x = random.randint(-300, 300)
y = random.randint(-300, 300)
length = random.randint(3, 10)
draw_star(x, y, length)
## random.radint(): Return a random integer N such that a <= N <= b.
## Alias for randrange(a, b+1).
| Hailinh146/btvn-hailinh | Session 5/Turtle_circle_3_4.py | Turtle_circle_3_4.py | py | 458 | python | en | code | 0 | github-code | 13 |
11073952624 | import json
import logging
import os.path
import asyncio
import os
import subprocess
import sys
import time
from asyncio.subprocess import PIPE
import git
from git import Repo, InvalidGitRepositoryError
from clickhouse import DataType, RepoClickHouseClient
from datetime import datetime
ON_POSIX = 'posix' in sys.builtin_module_names
def connect_repo(repo_name: str, repo_folder: str):
logging.info(f'connecting to repo {repo_name} at {repo_folder}')
if os.path.exists(repo_folder):
if not os.path.isdir(repo_folder):
return Exception(f'[{repo_folder}] is not a folder')
try:
return Repo(repo_folder)
except InvalidGitRepositoryError:
# clean up dir and re-clone
logging.error(f'unable to connect to repository [{repo_name}]')
os.rmdir(repo_folder)
logging.info(f'cloning repo [{repo_name}] to [{repo_folder}]')
return git.Repo.clone_from(f'git@github.com:{repo_name}', repo_folder)
def update_repo(data_cache: str, repo_name: str):
repo_folder = os.path.join(data_cache, repo_name)
repo = connect_repo(repo_name, repo_folder)
status = repo.git.status()
if not None:
logging.info(status)
repo.git.pull()
return repo_folder
async def read_stream_and_display(stream, display):
"""Read from stream line by line until EOF, display
"""
output = []
while True:
line = await stream.readline()
if not line:
break
output.append(line)
display(line) # assume it doesn't block
return b''.join(output)
async def read_and_display(*cmd, cwd=os.getcwd(), stdin=None):
"""Capture cmd's stdout, stderr while displaying them as they arrive
(line by line).
"""
# start process
process = await asyncio.create_subprocess_exec(*cmd, stdout=PIPE, stderr=PIPE, stdin=stdin, cwd=cwd)
# read child's stdout/stderr concurrently (capture and display)
try:
stdout, stderr = await asyncio.gather(
read_stream_and_display(process.stdout, sys.stdout.buffer.write),
read_stream_and_display(process.stderr, sys.stderr.buffer.write))
except Exception:
process.kill()
raise
finally:
# wait for the process to exit
rc = await process.wait()
return rc, stdout, stderr
def is_valid_repo(repo_name):
g = git.cmd.Git()
try:
g.ls_remote('-h', f'git@github.com:{repo_name}')
except:
return False
return True
def git_import(repo_path, custom_params=[]):
logging.info(f'generating git history at {repo_path}')
loop = asyncio.get_event_loop()
rc, _, _ = loop.run_until_complete(read_and_display('clickhouse', 'git-import', cwd=repo_path))
return rc == 0
def clickhouse_import(client: RepoClickHouseClient, repo_path: str, repo_name: str, data_type: DataType):
logging.info(f'handling {data_type.name} for {repo_name}')
max_time = client.query_row(statement=f"SELECT max(time) FROM {data_type.table} WHERE repo_name='{repo_name}'")[0]
logging.info(f'max time for {data_type.name} is {max_time}')
logging.info(f'importing {data_type.name} for {repo_name}')
client_args = ['clickhouse', 'client', '--host', client.config.host, '--user',
client.config.username, '--password', client.config.password,
'--port', str(client.config.native_port), '--throw_if_no_data_to_insert', '0']
if client.config.secure:
client_args.append('--secure')
client_insert = subprocess.Popen(client_args + ['--query',
f'INSERT INTO {data_type.table} FORMAT Native'],
stdin=subprocess.PIPE)
ps = subprocess.Popen(('clickhouse', 'local', '--query', f"{data_type.statement.format(repo_name=repo_name)} "
f"WHERE time > '{max_time}' FORMAT Native"),
stdout=client_insert.stdin, cwd=repo_path)
client_insert.communicate()
return client_insert.returncode
def _remove_file(file_path):
if not os.path.exists(file_path):
logging.warning(f'[{file_path}] does not exist. Cannot remove.')
try:
os.remove(file_path)
logging.info(f'removed file [{file_path}]')
except:
logging.exception(f'unable to remove [{file_path}]')
def import_repo(client: RepoClickHouseClient, repo_name: str, data_cache: str, types: list[DataType], keep_files=False):
if not is_valid_repo(repo_name):
raise Exception(f'cannot find remote repo [{repo_name}]')
repo_path = update_repo(data_cache, repo_name)
if not git_import(repo_path, []):
raise Exception(f'unable to git-import [{repo_name}]')
for data_type in types:
if clickhouse_import(client, repo_path, repo_name, data_type) != 0:
raise Exception(f'unable to import [{data_type.name}] for [{repo_name}] to ClickHouse')
if not keep_files:
_remove_file(os.path.join(repo_path, f'{data_type.name}.tsv'))
def _claim_job(client: RepoClickHouseClient, worker_id: str, task_table: str, retries=2):
# find highest priority, oldest job thats not assigned - grab retries
jobs = client.query_rows(f"SELECT repo_name FROM {task_table} WHERE worker_id = '' ORDER BY priority DESC, "
f"started_time ASC LIMIT {retries}")
for job in jobs:
repo_name = job[0]
logging.info(f'attempting to claim {repo_name}')
scheduled_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
# keeper map doesn't allow two threads to set here
client.query_row(f"ALTER TABLE {task_table} UPDATE worker_id = '{worker_id}', "
f"started_time = '{scheduled_time}' WHERE repo_name = '{repo_name}' AND worker_id = ''")
# this may either throw an exception if another worker gets there first OR return 0 rows if the
# job has already been processed and deleted or claimed successfully. So we check we have set and claimed.
assigned_worker_id = client.query_row(f"SELECT worker_id FROM {task_table} WHERE repo_name = '{repo_name}'")
if assigned_worker_id[0] == worker_id:
logging.info(f'[{worker_id}] claimed repo [{repo_name}]')
return repo_name
else:
logging.info(f'unable to claim repo [{repo_name}]. maybe already claimed.')
except:
logging.exception(f'unable to claim repo [{repo_name}]. maybe already claimed.')
return None
def worker_process(client: RepoClickHouseClient, data_cache: str, task_table: str, worker_id: str,
types: list[DataType], sleep_time=10, keep_files=False):
logging.info(f'starting worker {worker_id}')
while True:
logging.info(f'{worker_id} polling for messages')
repo_name = _claim_job(client, worker_id, task_table)
if repo_name is not None:
try:
import_repo(client, repo_name, data_cache, types, keep_files=keep_files)
except Exception:
logging.exception(f'[{str(worker_id)}] failed on repo [{repo_name}]')
try:
logging.info(f'cleaning up job [{repo_name}]')
# always release the job so it can be scheduled
client.query_row(f"DELETE FROM {task_table} WHERE repo_name='{repo_name}'")
except:
logging.exception(f'unable to clean up job [{repo_name}]. Manually clean.')
logging.info(f'{worker_id} sleeping {sleep_time}s till next poll')
time.sleep(sleep_time)
| ClickHouse/clickhub | repo/importer.py | importer.py | py | 7,714 | python | en | code | 12 | github-code | 13 |
35623546480 | import numpy as np
import matplotlib.pyplot as mpl
import ga
# Equação escolhida
# Y = w1x1 + w2x2 + w3x3 + w4x4 + w5x5
# (x1,x2,x3,x4,x5) = (6,-4,5.7,7,-13,-6.9)
# A equação possui 5 inputs e 5 pesos
# Entradas da equação
entradas_eq = [6,-4,5.7,7,-13,-6.9]
# Número de pesos
pesosQtd = len(entradas_eq) # Nesse caso 5 inputs
# Solução por pop
solPop = 8
num_parents_mating = 4
# Definindo o tamanho da população, pop terá solPop cromossomo que terá gene pesosQtd
popTam = (solPop, pesosQtd)
# Criando a pop inicial de forma aleatória
novaPop = np.random.uniform(low=-4.0, high=4.0, size=popTam)
print(novaPop)
melhoresSaidas = []
numGeracoes = 1000
for generation in range(numGeracoes):
print("Geração : ", generation)
# Medir a fitness de cada cromossomo na população
fitness = ga.cal_pop_fitness(entradas_eq, novaPop)
print("Fitness")
print(fitness)
melhoresSaidas.append(np.max(np.sum(novaPop*entradas_eq, axis=1)))
# O melhor resultado na iteração atual
print("Melhor Resultado : ", np.max(np.sum(novaPop*entradas_eq, axis=1)))
# Seleção dos melhores parentes da população para o acasalamento
parents = ga.select_mating_pool(novaPop, fitness, num_parents_mating)
print("Parents")
print(parents)
# Gerando próxima geração usando crossover
offspring_crossover = ga.crossover(parents, offspring_size=(popTam[0]-parents.shape[0], pesosQtd))
print("Crossover")
print(offspring_crossover)
# Adicionando algumas variações ao offspring usando mutação
offspring_mutation = ga.mutation(offspring_crossover, num_mutations=2)
print("Mutação")
print(offspring_mutation)
# Criar a nova população com base nos parentes e offspring
novaPop[0:parents.shape[0], :] = parents
novaPop[parents.shape[0]:, :] = offspring_mutation
# Obtendo a melhor solução após a iteração finalizando todas as gerações.
# Primeiro, a primeira fitness é calculada para cada solução na geração final
fitness = ga.cal_pop_fitness(entradas_eq, novaPop)
# O retorno do index da solução correspondendo ao melhor fitness
melhorIdx = np.where(fitness == np.max(fitness))
print("Melhor Resultado : ", novaPop[melhorIdx, :])
print("Melhor Resultado fitness : ", fitness[melhorIdx])
mpl.plot(melhoresSaidas)
mpl.xlabel("Iteration")
mpl.ylabel("Fitness")
mpl.show() | RafaelCRC/Genetic-Algorithm-maximize-the-output-of-an-equation | main.py | main.py | py | 2,380 | python | pt | code | 0 | github-code | 13 |
30227683554 | from crispy_forms.helper import FormHelper
from django import forms
from salesapp.models import Item, Receipt, TrackSetting, ItemStocking
class ItemForm(forms.ModelForm):
class Meta:
model = Item
fields = "__all__"
def __init__(self, *args, **kwargs):
super(ItemForm, self).__init__(*args, **kwargs)
self.fields['number_in_stock'].widget.attrs['readonly'] = True
self.helper = FormHelper()
class TrackSettingForm(forms.ModelForm):
class Meta:
model = TrackSetting
fields = "__all__"
widgets = {
"start_date": forms.DateInput(attrs={"type": "date"}),
"end_date": forms.DateInput(attrs={"type": "date"})
}
class ReceiptForm(forms.ModelForm):
class Meta:
model = Receipt
fields = "__all__"
widgets = {
"date": forms.DateInput(attrs={"type": "date"})
}
class ItemStockingForm(forms.ModelForm):
class Meta:
model = ItemStocking
fields = "__all__"
widgets = {
"date": forms.DateInput(attrs={"type": "date"})
}
| brightkan/sales | salesapp/forms.py | forms.py | py | 1,121 | python | en | code | 0 | github-code | 13 |
5911549514 | class Solution:
def eraseOverlapIntervals(self, intervals):
def get_second(interval): # helper function for the sort() to return the end time of each interval
return interval[1]
intervals.sort(key = get_second) # sort the interval using the endtime of each interval as the key
n = len(intervals)
prev = 0
count = 1
for i in range(1, n):
if intervals[i][0] >= intervals[prev][1]: # compare the next interval with the previous one checking if the begining time of the next is at the same time or after the ending time of the previous.
prev = i # update previous if times are not overlapping
count += 1 # increment count to show that one interval is okay
return n - count # return number of intervals that cannot be attention by minusing total intervals by the count of intervals with non overlapping times | collinsakuma/LeetCode | Problems/435. Non-overlapping intervals/non_overlapping_intervals.py | non_overlapping_intervals.py | py | 942 | python | en | code | 0 | github-code | 13 |
3108386496 | """
The program displays the FIRST 10 lines of a FILE
whose NAME is provided as a COMMAND-LINE ARGUMENT,
CATCHING and HANDLING any EXCEPTIONS.
"""
# The system module must be imported to ACCESS the command-line ARGUMENTS
import sys
# Declaration of the CONSTANTS
NUM_LINES = 10
try:
if len(sys.argv) != 2:
raise Exception
# Opening the file name (sysargv[1]) in read mode
with open(sys.argv[1], "r") as f_name_opened:
# Reading and displaying the first 10 lines of the opened file
print("************************************* " +
"FIRST 10 LINES of the FILE \"{}\"".format(sys.argv[1]) +
" *************************************")
for i in range(NUM_LINES):
print(f_name_opened.readline().rstrip().encode(
"latin-1").decode("utf-8"))
# Exception -> file not found
except FileNotFoundError:
print("Warning, the file \"{}\" wasn't found.".format(sys.argv[1]))
quit()
# All other exceptions
except:
print("Warning, at least one file name must be provided as a command-line argument.")
quit()
| aleattene/python-workbook | chap_07/exe_149_display_head_file.py | exe_149_display_head_file.py | py | 1,108 | python | en | code | 1 | github-code | 13 |
42105980918 | import sys
from collections import Counter
sys.setrecursionlimit(10 ** 8)
ini = lambda: int(sys.stdin.readline())
inl = lambda: [int(x) for x in sys.stdin.readline().split()]
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
def solve():
n, m = inl()
a = inl()
c = Counter(a)
x, k = c.most_common(1)[0]
if k > n // 2:
return x
return "?"
print(solve())
| keijak/comp-pub | vcon/asa20200818/C/main.py | main.py | py | 474 | python | en | code | 0 | github-code | 13 |
40406124291 | # Ejercicio 15
# El director de una escuela está organizando un viaje de estudios,
# y requiere determinar cuánto debe cobrar a cada alumno y cuánto debe pagar a la compañía de viajes por el servicio.
# La forma de cobrar es la siguiente: si son 100 alumnos o más, el costo por cada alumno es de 65 euros;
# de 50 a 99 alumnos, el costo es de 70 euros, de 30 a 49, de 95 euros, y si son menos de 30,
# el costo de la renta del autobús es de 4000 euros, sin importar el número de alumnos.
# Realice un algoritmo que permita determinar el pago a la compañía de autobuses
# y lo que debe pagar cada alumno por el viaje.
alumnos = int(input("Introduzca el número de alumnos: "))
precio_x_alumno = 0
if alumnos >= 100:
precio_x_alumno = 65
elif 50 <= alumnos <= 99:
precio_x_alumno = 70
elif 30 <= alumnos <= 49:
precio_x_alumno = 95
elif alumnos < 30:
precio_x_alumno = 4000 / alumnos
if alumnos > 0:
precio_autobus = alumnos * precio_x_alumno
print("El precio por alumno es de: ", precio_x_alumno)
print("El precio del autobus es de: ", precio_autobus)
else:
print("El numero de alumnos debe de ser un valor positivo")
| mavb86/ejercicios-python | seccion4/if/ejercicio15.py | ejercicio15.py | py | 1,165 | python | es | code | 0 | github-code | 13 |
7895742022 | from chat.schatclient import SChatClient
import pytest
from time import time
from lib.settings import COMMAND, ONLINE, TIMESTAMP, USER, ACCOUNT_NAME, ERROR, RESPONSE
ONLINE_MESSAGE = {
COMMAND: ONLINE,
TIMESTAMP: '',
USER: {
ACCOUNT_NAME: 'guest'
}
}
ONLINE_USER_MESSAGE = {
COMMAND: ONLINE,
TIMESTAMP: '',
USER: {
ACCOUNT_NAME: 'test_user'
}
}
CORRECT_SERVER_RESPONSE = {
RESPONSE: 200
}
ERROR_SERVER_RESPONSE = {
RESPONSE: 400,
ERROR: 'Bad request'
}
# setting up tests
@pytest.fixture
def init():
try:
sut = SChatClient("", 7777)
print("SChatClient instance created.")
yield sut
finally:
print("SChatClient instance deleted.")
del sut
def test_make_online(init):
"""
testing of function make_online with correct argument
"""
result = init.make_online()
result[TIMESTAMP] = ONLINE_MESSAGE[TIMESTAMP] = time()
print("starting assertion")
assert result == ONLINE_MESSAGE, "Incorrect ONLINE message"
def test_make_online_user(init):
"""
testing of function make_online with correct argument
"""
user = ONLINE_USER_MESSAGE[USER][ACCOUNT_NAME] = 'test_user'
result = init.make_online(user)
result[TIMESTAMP] = ONLINE_USER_MESSAGE[TIMESTAMP] = time()
assert result == ONLINE_USER_MESSAGE, "Incorrect argument in function make_online"
def test_parse_correct_response(init):
"""
testing of function parse_server_answer with correct server response
:return:
"""
check_message = f'Correct message with response {CORRECT_SERVER_RESPONSE[RESPONSE]}.'
assert init.parse_server_answer(CORRECT_SERVER_RESPONSE) == check_message, 'Invalid correct server response'
def test_parse_error_response(init):
"""
testing of function parse_server_answer with bad server response
:return:
"""
check_message = f'Bad response. {ERROR_SERVER_RESPONSE[RESPONSE]}: {ERROR_SERVER_RESPONSE[ERROR]}'
print(check_message)
assert init.parse_server_answer(ERROR_SERVER_RESPONSE) == check_message, 'Invalid incorrect server response'
| Solda-git/CS | test/test_client.py | test_client.py | py | 2,171 | python | en | code | 0 | github-code | 13 |
35114321411 | import re
def text_to_query(text):
sentenceEnders = re.compile('[.!?›«»—]')
sentenceList = sentenceEnders.split(text)
nbr_word = 23
split_text = []
for sentence in sentenceList:
if sentence != "":
if len(sentence) >= nbr_word:
splited_sebtence = split_by_nbr_word(nbr_word, sentence)
split_text.extend(splited_sebtence)
else:
split_text.append(sentence)
return split_text
def split_by_nbr_word(n, sentence):
list = sentence.split()
nbr_split = int(len(list)/n)+1
result = []
for k in range(nbr_split):
temp = list[n*k: n*(k+1)]
if(len(temp) < 1):
continue
result.append(" ".join(temp))
return result
# text = "Le marketing Business to Business (B to B) est le marketing des entreprises qui vendent des biens ou des services à d’autres professionnels. Le marketing B to B est parfois appelé en français marketing d’entreprise à entreprise, marketing industriel, marketing professionnel, ou encore marketing d’affaires. Le marketing B to B n’est, à priori, pas une matière que l’on pourrait imaginer passionnante, or en l’étudiant de plus près, on s e rend compte que l’on a beaucoup à apprendre et combien cela peut être enrichissant."
# print(text_to_query(text))
| iliassaoufi/Plagiarism-check-algorithm__Python | getQuery.py | getQuery.py | py | 1,364 | python | fr | code | 1 | github-code | 13 |
31201129908 | from st2common import log as logging
from st2common.exceptions.triggers import TriggerDoesNotExistException
from st2common.models.api.reactor import (TriggerAPI, TriggerTypeAPI)
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import (Trigger, TriggerType)
__all__ = [
'get_trigger_db_by_ref',
'get_trigger_db_given_type_and_params',
'get_trigger_type_db',
'create_trigger_db',
'create_trigger_type_db',
'create_or_update_trigger_db',
'create_or_update_trigger_type_db'
]
LOG = logging.getLogger(__name__)
def get_trigger_db_given_type_and_params(type=None, parameters=None):
try:
parameters = parameters or {}
trigger_db = Trigger.query(type=type,
parameters=parameters).first()
if not parameters and not trigger_db:
# We need to do double query because some TriggeDB objects without
# parameters have "parameters" attribute stored in the db and others
# don't
trigger_db = Trigger.query(type=type, parameters=None).first()
return trigger_db
except ValueError as e:
LOG.debug('Database lookup for type="%s" parameters="%s" resulted ' +
'in exception : %s.', type, parameters, e, exc_info=True)
return None
def get_trigger_db_by_ref(ref):
"""
Returns the trigger object from db given a string ref.
:param ref: Reference to the trigger db object.
:type ref: ``str``
:rtype trigger_type: ``object``
"""
return Trigger.get_by_ref(ref)
def _get_trigger_db(trigger):
# TODO: This method should die in a fire
# XXX: Do not make this method public.
if isinstance(trigger, dict):
name = trigger.get('name', None)
pack = trigger.get('pack', None)
if name and pack:
ref = ResourceReference.to_string_reference(name=name, pack=pack)
return get_trigger_db_by_ref(ref)
return get_trigger_db_given_type_and_params(type=trigger['type'],
parameters=trigger.get('parameters', {}))
else:
raise Exception('Unrecognized object')
def get_trigger_type_db(ref):
"""
Returns the trigger type object from db given a string ref.
:param ref: Reference to the trigger type db object.
:type ref: ``str``
:rtype trigger_type: ``object``
"""
try:
return TriggerType.get_by_ref(ref)
except ValueError as e:
LOG.debug('Database lookup for ref="%s" resulted ' +
'in exception : %s.', ref, e, exc_info=True)
return None
def _get_trigger_dict_given_rule(rule):
trigger = rule.trigger
trigger_dict = {}
triggertype_ref = ResourceReference.from_string_reference(trigger.get('type'))
trigger_dict['pack'] = trigger_dict.get('pack', triggertype_ref.pack)
trigger_dict['type'] = triggertype_ref.ref
trigger_dict['parameters'] = rule.trigger.get('parameters', {})
return trigger_dict
def create_trigger_db(trigger_api):
# TODO: This is used only in trigger API controller. We should get rid of this.
trigger_ref = ResourceReference.to_string_reference(name=trigger_api.name,
pack=trigger_api.pack)
trigger_db = get_trigger_db_by_ref(trigger_ref)
if not trigger_db:
trigger_db = TriggerAPI.to_model(trigger_api)
LOG.debug('Verified trigger and formulated TriggerDB=%s', trigger_db)
trigger_db = Trigger.add_or_update(trigger_db)
return trigger_db
def create_or_update_trigger_db(trigger):
"""
Create a new TriggerDB model if one doesn't exist yet or update existing
one.
:param trigger: Trigger info.
:type trigger: ``dict``
"""
assert isinstance(trigger, dict)
existing_trigger_db = _get_trigger_db(trigger)
if existing_trigger_db:
is_update = True
else:
is_update = False
trigger_api = TriggerAPI(**trigger)
trigger_db = TriggerAPI.to_model(trigger_api)
if is_update:
trigger_db.id = existing_trigger_db.id
trigger_db = Trigger.add_or_update(trigger_db)
if is_update:
LOG.audit('Trigger updated. Trigger=%s', trigger_db)
else:
LOG.audit('Trigger created. Trigger=%s', trigger_db)
return trigger_db
def create_trigger_db_from_rule(rule):
trigger_dict = _get_trigger_dict_given_rule(rule)
existing_trigger_db = _get_trigger_db(trigger_dict)
# For simple triggertypes (triggertype with no parameters), we create a trigger when
# registering triggertype. So if we hit the case that there is no trigger in db but
# parameters is empty, then this case is a run time error.
if not trigger_dict.get('parameters', {}) and not existing_trigger_db:
raise TriggerDoesNotExistException(
'A simple trigger should have been created when registering '
'triggertype. Cannot create trigger: %s.' % (trigger_dict))
if not existing_trigger_db:
return create_or_update_trigger_db(trigger_dict)
return existing_trigger_db
def create_trigger_type_db(trigger_type):
"""
Creates a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:rtype: ``object``
"""
trigger_type_api = TriggerTypeAPI(**trigger_type)
ref = ResourceReference.to_string_reference(name=trigger_type_api.name,
pack=trigger_type_api.pack)
trigger_type_db = get_trigger_type_db(ref)
if not trigger_type_db:
trigger_type_db = TriggerTypeAPI.to_model(trigger_type_api)
LOG.debug('verified trigger and formulated TriggerDB=%s', trigger_type_db)
trigger_type_db = TriggerType.add_or_update(trigger_type_db)
return trigger_type_db
def create_or_update_trigger_type_db(trigger_type):
"""
Create or update a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:rtype: ``object``
"""
assert isinstance(trigger_type, dict)
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api = TriggerTypeAPI.to_model(trigger_type_api)
ref = ResourceReference.to_string_reference(name=trigger_type_api.name,
pack=trigger_type_api.pack)
existing_trigger_type_db = get_trigger_type_db(ref)
if existing_trigger_type_db:
is_update = True
else:
is_update = False
if is_update:
trigger_type_api.id = existing_trigger_type_db.id
trigger_type_db = TriggerType.add_or_update(trigger_type_api)
if is_update:
LOG.audit('TriggerType updated. TriggerType=%s', trigger_type_db)
else:
LOG.audit('TriggerType created. TriggerType=%s', trigger_type_db)
return trigger_type_db
| gtmanfred/st2 | st2common/st2common/services/triggers.py | triggers.py | py | 7,035 | python | en | code | null | github-code | 13 |
16515911396 | file = open("input.txt","r")
patterns = file.read().split("\n\n")
total1 = 0
total2 = 0
for pattern in patterns:
lines = pattern.split("\n")
# Horizontal lines
for i in range(1,len(lines)):
cnt = 0
for j in range(1,min(len(lines)-i,i)+1):
for k in range(len(lines[0])):
if lines[i-j][k]!=lines[i+j-1][k]:
cnt += 1
if cnt == 0:
total1 += 100*i
elif cnt == 1:
total2 += 100*i
# Vertical lines
for i in range(len(lines[0])):
cnt = 0
for j in range(1,min(len(lines[0])-i,i)+1):
for k in range(len(lines)):
if lines[k][i-j]!=lines[k][i+j-1]:
cnt += 1
if cnt == 0:
total1 += i
elif cnt == 1:
total2 += i
print(total1)
print(total2) | FLL128/AOC_2023 | Day13/main.py | main.py | py | 854 | python | en | code | 0 | github-code | 13 |
86594509010 | #!/usr/bin/python
#-*-coding:utf-8-*-
import cStringIO
import codecs
import re
from xml.dom import minidom
from httplibExt import *
import codecs
class LivebosObject():
object = None
type = None
actionType = None
objectId = None
version=None
modifyDate=None #"2011.04.20 14:25:30"
createDate=None #"2011.04.20 14:22:30"
creator=None
modifier=None
package=None
describe=None
#object=None #for workflow
files=[] #(name,url) in it
__objDict__ = {
'functionPermission.xml':{ #功能权限树
#'type':u'功能权限树',
'actionType':'-16',
'package':'U1NP'
},
'scope-factor.xml':{ #数据权限分区
#'type': u'数据权限分区',
'actionType':'CreateScopeFactor'
},
'meta-column.xml':{ #元数据
#'type': u'元数据',
'object':'',
'actionType':'CreateMetaColumn'
},
'script-variable.xml':{ #系统变量
#'type': u'系统变量',
'actionType':'CreateScriptVeriable'
},
'services.xml':{ #系统服务
#'type': u'系统服务',
'actionType':'-9'
},
'portlet-defines.xml':{ #Portlet配置信息
#'type': u'Portlet配置信息',
'actionType':'-14'
},
'mobile.xml':{
#'type': u'手机界面配置',
'actionType':'-13'
},
'resource.xml':{
#'type': u'数据源',
'actionType':'-10'
},
'sysparam.xml':{
#'type': u'系统参数',
'actionType':'CreateSysParam'
},
'dictionary.xml':{
#'type': u'数据字典',
'object':'',
'actionType':'CreateDict'
},
#'SYSTEM.xml':{ #系统默认方案 TODO:2 ge wen jian
# 'type': u'系统默认方案',
# 'typeId':'-9'
#},
}
def __init__(self, file=None):
if(file!=None):
self.update(file)
def __createBusinessObject__(self,file): #其他对象
f = codecs.open(file)
f.readline()
str = f.read().decode('gb2312')
f.close()
xmlStr = str.encode('utf-8')
#print xmlStr
xmldoc = minidom.parseString(xmlStr)
root = xmldoc.documentElement
#self.type = root.nodeName
self.actionType = root.attributes['type'].value.encode('gb2312')
self.object = root.attributes['name'].value.encode('gb2312')
self.objectId = root.attributes['object-id'].value.encode('gb2312')
#self.version = root.attributes['name'].value.encode('gb2312')
#self.modifyDate = root.attributes['modify-date'].value.encode('gb2312')
#self.createDate = root.attributes['create-date'].value.encode('gb2312')
#self.creator = root.attributes['creator'].value.encode('gb2312')
#self.modifier = root.attributes['modifier'].value.encode('gb2312')
self.package = root.attributes['package'].value.encode('gb2312')
self.describe = root.childNodes[1].firstChild.nodeValue.encode('gb2312')
def update(self,file):
del self.files[:]
fileName = file.split('/')[-1]
self.files.append((fileName,file))
if fileName in self.__objDict__: #system object
self.__dict__.update(self.__objDict__[fileName])
return
if fileName.startswith('WF_'): #workfolw
self.actionType = '6'
self.object = fileName[0:-4]
self.files.append(('workflowdes',file))
self.files.append(('layout',file[0:-4]+'_layout.xml'))
self.files.append(('image',file[0:-4]+'.jpeg'))
return
if fileName=='SYSTEM.xml' or fileName.startswith('UP'): #
#self.files.append((fileName[0:-4],file))
#find menu file
f = open(file)
f.readline()
str = f.read().decode('gb2312')
xmldoc = minidom.parseString(str.encode('utf-8'))
fileNodes = xmldoc.getElementsByTagName('file')
if len(fileNodes)>0:
menuFile = fileNodes[0].attributes['href'].value
menuFile = file[0:-len(fileName)]+menuFile
self.files.append(('menu',menuFile.encode("ascii")))
self.actionType = '-12'
self.object = fileName[0:-4]
return
self.__createBusinessObject__(file) #其他对象
| chyangfather/envadmin | docs/commitassistant/models.py | models.py | py | 4,952 | python | en | code | 0 | github-code | 13 |
8932970963 | import os, sys, time, glob
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Masking, GRU, TimeDistributed
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
def get_weights_file(checkpoint_path, file_name=None):
#todo: get latest checkpoint file in this folder
file_list = glob.glob(os.path.join(checkpoint_path,"weights*"))
latest_file = max(file_list, key=os.path.getctime)
return latest_file
class SimpleRNN():
def __init__(self, batch_size, in_dim=3, out_dim=3, initial_epoch=0, directories="./pred/simpleRNN", model_name="test", load=False):
self.in_dim = in_dim
self.out_dim = out_dim
self.num_units = 16
self.batch_size = batch_size
self.directories = directories
self.model_name = model_name
self.load = load
self.initial_epoch = initial_epoch
self._build_model()
def _build_model(self):
'''
build the simple rnn model
:return:
'''
t = time.time()
print('Begin to build the simple rnn model...')
self.model = Sequential()
self.model.add(Masking(mask_value=0.0, input_shape=(None, self.in_dim)))
# self.model.add(GRU(self.num_units, activation='tanh', return_sequences=True))
# self.model.add(GRU(self.num_units, activation='tanh', return_sequences=False))
self.model.add(GRU(self.num_units))
self.model.add(Dense(self.out_dim, activation="linear"))
self.model.compile(optimizer='RMSprop', loss='mean_squared_error')
print('Completed simple rnn model compilation in %.3f seconds' % (time.time() - t))
def training(self, X, Y, epochs):
'''
:param X: input
:param Y: output
:param epochs: joint training epochs
:return:
'''
modelDir = os.path.join(self.directories, self.model_name)
weights_name = "weights-{epoch:02d}-{val_loss:.2f}.hdf5"
tfDir = os.path.join(self.directories, self.model_name)
print("tensorboard directory")
print(tfDir)
print("modelDir")
print(modelDir)
if self.load:
try:
filename = get_weights_file(modelDir, weights_name)
self.model.load_weights(filename)
print("load model {} successfully".format(filename))
except:
print(
"failed to load model, please check the checkpoint directory... use default initialization setting")
tbCb = TensorBoard(log_dir=tfDir, histogram_freq=1,
write_graph=True, write_images=True)
saveCb = ModelCheckpoint(os.path.join(modelDir, weights_name), monitor='val_loss', verbose=0,
save_best_only=False,
save_weights_only=False, mode='auto', period=2)
# Perform batch training with epochs
t = time.time()
self.model.fit(X, Y,
batch_size=self.batch_size,
epochs=epochs + self.initial_epoch,
initial_epoch=self.initial_epoch,
validation_split=0.2, verbose=1, callbacks=[tbCb, saveCb])
averageTime = (time.time() - t) / epochs
print('Total time:', time.time() - t, ', Average time per epoch:', averageTime)
def predict(self, X, Y = None):
predict_result = self.model.predict(X, batch_size=self.batch_size)
# print("X: ")
# print(X)
# if Y is not None:
# print("Y:")
# print(Y)
# print("predict result")
# print(predict_result)
return predict_result, np.zeros(1)
def load_model(self):
# load model
modelDir = os.path.join(self.directories, self.model_name)
weights_name = "weights-{epoch:02d}-{val_loss:.2f}.hdf5"
try:
filename = get_weights_file(modelDir, weights_name)
self.model.load_weights(filename)
print("load model {} successfully".format(filename))
except:
print("failed to load model, please check the checkpoint directory {}... use default initialization setting".format(modelDir))
# for testing
def CreateSeqs(batch_size):
'''
Prepare random sequences for test usage
:return: sequences dataset
'''
x = np.random.random(size=(batch_size*10,3,3))
y = np.random.random(size=(batch_size*10,1, 3))
# print([data1][0].shape) # (1, 20)
return x,y
def main():
batch_size=1
in_dim = 3
out_dim = 3
my_rnn = SimpleRNN(batch_size, in_dim, out_dim)
x, y = CreateSeqs(batch_size)
my_rnn.training(x,y,10)
my_rnn.predict(x,y)
return 0
if __name__ == '__main__':
main()
| MzXuan/fetch_plan | baselines/baselines/ppo2/keras_simpleRNN.py | keras_simpleRNN.py | py | 4,893 | python | en | code | 0 | github-code | 13 |
35028098760 | # 알파벳 찾기
import sys
ipt = sys.stdin.readline
S=list(ipt().rstrip())
result=[] # 위치 값을 위한 리스트
for i in range(97,123): # 아스키 코드 사용
if chr(i) not in S: # 없으면 -1을 리스트에 추가
result.append(-1)
else:
result.append(S.index(chr(i))) # 있다면 인덱스 추가
for j in range(len(result)-1):
print(result[j], end=' ')
print(result[len(result)-1]) | Jehyung-dev/Algorithm | 백준/Bronze/10809. 알파벳 찾기/알파벳 찾기.py | 알파벳 찾기.py | py | 443 | python | ko | code | 0 | github-code | 13 |
38173644774 | import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
import pickle
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dropout, Dense
def mask_layer_outputs(unit_mask, layer_outputs):
unit_mask_tensor = tf.constant(unit_mask, dtype = "float32")
feature_map = layer_outputs * unit_mask_tensor
return feature_map
class VGG16(keras.models.Sequential):
def __init__(self,
input_shape = (224, 224, 3),
bn = False):
self.bn = bn
super().__init__()
self.build(input_shape)
def build_intermediate_model(self, layer_name):
self.intermediate_layer_model = keras.models.Model(inputs=self.input, outputs=self.get_layer(layer_name).output)
def build_predict_model(self, layer_name = "block5_conv3"):
target_layer_index = -1
for i, l in enumerate(self.layers):
if l.name == layer_name:
target_layer_index = i
input_shape = l.output_shape[1:]
if target_layer_index == -1:
raise Exception("Layer name not found!")
inputs = tf.keras.layers.Input(input_shape)
x = self.layers[target_layer_index + 1](inputs)
for l in self.layers[target_layer_index + 2::]:
x = l(x)
# x = self.get_layer("block5_pool")(inputs)
# x = self.get_layer("flatten")(x)
# x = self.get_layer("fc1")(x)
# if self.bn: x = self.get_layer("bn1")(x)
# x = self.get_layer("rl1")(x)
# x = self.get_layer("fc2")(x)
# if self.bn: x = self.get_layer("bn2")(x)
# x = self.get_layer("rl2")(x)
# x = self.get_layer("predictions")(x)
self.predict_model = keras.models.Model(inputs, x, name="predict_model")
def build(self, input_shape):
self.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=input_shape))
self.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2'))
self.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
#Block 2
self.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1'))
self.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2'))
self.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
#Block 3
self.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1'))
self.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2'))
self.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
self.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
#Block 4
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
self.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
#Block 5
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
self.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
self.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
#Fully connected
# self.add(Flatten(name="flatten"))
# # self.add(Dropout(0.3))
# self.add(Dense(4096,activation= "relu", name='fc1'))
# self.add(keras.layers.Activation("relu"))
# self.add(Dropout(0.5))
# self.add(Dense(4096, activation= "relu", name='fc2'))
# # self.add(keras.layers.Activation("relu"))
# self.add(Dropout(0.5))
self.add(Flatten(name="flatten"))
# self.add(Dropout(0.3))
self.add(Dense(4096, name='fc1'))
if self.bn: self.add(keras.layers.BatchNormalization(name = "bn1"))
self.add(keras.layers.Activation("relu", name = "rl1"))
# self.add(Dropout(0.5))
self.add(Dense(4096, name='fc2'))
if self.bn: self.add(keras.layers.BatchNormalization(name = "bn2"))
self.add(keras.layers.Activation("relu", name = "rl2"))
# self.add(Dropout(0.5))
self.add(Dense(1000, activation="softmax", name='predictions'))
def get_intermediate_layer_output(self, ds):
img_num = ds[0].shape[0]
# avoid of OOM
if img_num > 100:
i = 0
while True:
sub_ds = (ds[0][100 * i : 100 * i + 100, :, :, :], ds[1][100 * i : 100 * i + 100, :])
layer_outputs_temp = self.output_layers(self.intermediate_layer_model, sub_ds)
if i == 0:
layer_outputs = layer_outputs_temp
else:
layer_outputs = np.vstack((layer_outputs, layer_outputs_temp))
i += 1
if i >= img_num//100:
sub_ds = (ds[0][100 * i :, :, :, :], ds[1][100 * i :, :])
layer_outputs_temp = self.output_layers(self.intermediate_layer_model, sub_ds)
layer_outputs = np.vstack((layer_outputs, layer_outputs_temp))
return layer_outputs
else:
layer_outputs = self.output_layers(self.intermediate_layer_model, ds)
return layer_outputs.numpy()
@tf.function(experimental_relax_shapes=True)
def output_layers(self, model, x):
layer_outs = model(x)
return layer_outs
def change_intermediate_weights(self, filterList = [], layer_name = "block5_conv3"):
assert isinstance(filterList, list)
# weights = np.array(self.get_weights())
weights = self.get_layer(name = layer_name).get_weights()
zero_filter = np.zeros((3, 3, 1))
for filter_idx in filterList:
weights[0][:, :, :, filter_idx] = zero_filter
weights[1][filter_idx] = 0
self.get_layer(name = layer_name).set_weights(weights)
def compile(self):
super().compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["acc", keras.metrics.top_k_categorical_accuracy])
def init_model(self, fname, layer_name = "block5_conv3"):
self.compile()
self.build_intermediate_model(layer_name=layer_name)
self.load_weights(fname)
self.build_predict_model(layer_name = layer_name)
self.predict_model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["acc", tf.keras.metrics.top_k_categorical_accuracy])
if __name__ == "__main__":
from .process_dataset import *
from .data_loader import ImageDataGenerator_Modify
vggModel_path = "/media/workstation/zy/model/new_vgg_2_22/weights.01.hdf5"
# vggModel_path = "/home/workstation/.keras/models/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
ds_path = "/media/workstation/zy/cal_results/imagenet_sample/"
wnid = sorted(os.listdir(ds_path))[1]
ds_path = os.path.join(ds_path, wnid)
vggModel = VGG16()
vggModel.load_weights(vggModel_path)
vggModel.compile()
ds = load_directory(ds_path, sample_number=50,
ImageNetLabel=True, VGGPretrainedProcess=True)
# vggModel.evaluate(ds, steps = 1)
vggModel.build_intermediate_model("block5_conv3")
layer_output = vggModel.intermediate_layer_model.predict(ds, steps = 2)
# with open("/home/workstation/zy/paper_image/nips/introduction/%s_fmaps_new_01.pkl"%(wnid), "wb") as p:
# pickle.dump(layer_output, p)
| liyueqiao/feature-entropy | fe/vgg16.py | vgg16.py | py | 7,786 | python | en | code | 0 | github-code | 13 |
71648264657 | import logging
from javalang.tree import MethodInvocation
from qark.issue import Issue, Severity
from qark.plugins.webview.helpers import webview_default_vulnerable, valid_set_method_bool
from qark.scanner.plugin import CoroutinePlugin, ManifestPlugin
log = logging.getLogger(__name__)
SET_ALLOW_UNIVERSAL_ACCESS_FROM_FILE_URLS_DESCRIPTION = (
"JavaScript running in a file scheme context can access content from any origin. This is an insecure default "
"value for minSdkVersion < 16 or may have been overridden (setAllowUniversalAccessFromFileURLs) in later versions. "
"To validate this vulnerability, load the following local file in this WebView: "
"file://qark/poc/html/UNIV_FILE_WARNING.html"
)
class SetAllowUniversalAccessFromFileURLs(CoroutinePlugin, ManifestPlugin):
"""This plugin checks if the `setAllowUniversalAccessFromFileURLs` method is called with a value of `true`, or
if the default is vulnerable."""
def __init__(self):
super(SetAllowUniversalAccessFromFileURLs, self).__init__(category="webview",
name="Webview enables universal access for JavaScript",
description=SET_ALLOW_UNIVERSAL_ACCESS_FROM_FILE_URLS_DESCRIPTION)
self.severity = Severity.WARNING
self.java_method_name = "setAllowUniversalAccessFromFileURLs"
def can_run_coroutine(self):
if self.min_sdk <= 15:
self.issues.extend(webview_default_vulnerable(self.java_ast, method_name=self.java_method_name,
issue_name=self.name, description=self.description,
file_object=self.file_path, severity=self.severity))
return False
return True
def run_coroutine(self):
while True:
_, method_invocation = (yield)
if not isinstance(method_invocation, MethodInvocation):
continue
if valid_set_method_bool(method_invocation, str_bool="true", method_name=self.java_method_name):
self.issues.append(Issue(category=self.category, name=self.name, severity=self.severity,
description=self.description, line_number=method_invocation.position,
file_object=self.file_path))
plugin = SetAllowUniversalAccessFromFileURLs()
| linkedin/qark | qark/plugins/webview/set_allow_universal_access_from_file_urls.py | set_allow_universal_access_from_file_urls.py | py | 2,499 | python | en | code | 3,071 | github-code | 13 |
5885262730 | from __future__ import print_function
import json
import logging
import numpy
import os
import subprocess
import sys
from sawtooth.cli.admin_sub.genesis_common import genesis_info_file_name
from txnintegration.exceptions import ExitError
from txnintegration.matrices import NodeController
from txnintegration.matrices import EdgeController
from txnintegration.netconfig import NetworkConfig
from txnintegration.utils import find_executable
LOGGER = logging.getLogger(__name__)
class ValidatorNetworkManager(object):
def __init__(self, n_mag, same_matrix=True):
'''
Args:
n_mag (int): number of nodes for your node_controller, and,
correspondingly, the number of rows and columns in the
adjacency matrix for controlling point-to-point network
connectivity in your edge_controller.
same_matrix (bool): use the same matrix for nodes and edges. In
this case, the diagonal for the edge_matrix can be overloaded
to also activate and deactivate nodes. Quite convenient for
testing scenarios, but harder to discuss mathematically.
Overloading the diagonal of the edge matrix to 'be' the node
matrix is tempting because it's generally uninteresting to
prohibit a node from talking to itself on the network.
'''
self.n_mag = n_mag
self.node_controller = None
self.edge_controller = None
self.overload_matrices = same_matrix
self._initialized = False
def initialize(self, net_config, node_controller, edge_controller):
assert isinstance(net_config, NetworkConfig)
assert isinstance(node_controller, NodeController)
assert isinstance(edge_controller, EdgeController)
assert node_controller.get_mag() == edge_controller.get_mag()
self.net_config = net_config
self.node_controller = node_controller
self.edge_controller = edge_controller
self._initialized = True
def do_genesis(self, do_genesis_validator_idx=0, **kwargs):
assert self._initialized
cfg = self.get_configuration(do_genesis_validator_idx)
overrides = {
"InitialConnectivity": 0,
"DevModePublisher": True,
}
cfg.update(overrides)
self.set_configuration(do_genesis_validator_idx, cfg)
config_file = self.write_configuration(do_genesis_validator_idx)
cfg = self.get_configuration(do_genesis_validator_idx)
ledger_type = cfg.get('LedgerType', 'poet0')
# validate user input to Popen
assert ledger_type in ['dev_mode', 'poet0', 'poet1']
assert os.path.isfile(config_file)
alg_name = ledger_type
if ledger_type == 'dev_mode':
alg_name = 'dev-mode'
cli_args = 'admin %s-genesis --config %s' % (alg_name, config_file)
try:
executable = find_executable('sawtooth')
except ExitError:
path = os.path.dirname(self.node_controller.txnvalidator)
executable = os.path.join(path, 'sawtooth')
assert os.path.isfile(executable)
cmd = '%s %s %s' % (sys.executable, executable, cli_args)
proc = subprocess.Popen(cmd.split())
proc.wait()
if proc.returncode != 0:
return
# Get genesis block id
gblock_file = genesis_info_file_name(cfg['DataDirectory'])
assert os.path.exists(gblock_file) is True
genesis_dat = None
with open(gblock_file, 'r') as f:
genesis_dat = json.load(f)
assert 'GenesisId' in genesis_dat.keys()
head = genesis_dat['GenesisId']
print('created genesis block: %s' % head)
def launch(self, **kwargs):
assert self._initialized
print('launching network')
mat = numpy.ones(shape=(self.n_mag, self.n_mag))
self.update(node_mat=mat, edge_mat=mat, **kwargs)
def staged_launch(self, stage_chunk_size=8, **kwargs):
'''
Quick and dirty function to spread out initializations. Most re-draws
are effectively NOPs due to the delta matrix. Each round, the ledger
url becomes the zeroth index of the round.
Args:
stage_chunk_size (int): nax number of nodes to launch per round
Returns:
None
'''
assert self._initialized
if stage_chunk_size < self.n_mag:
print('launching network in segments of %s' % stage_chunk_size)
mat = numpy.zeros(shape=(self.n_mag, self.n_mag))
idx = 0
while idx < self.n_mag:
n = min(idx + stage_chunk_size, self.n_mag)
for i in range(n):
for j in range(n):
mat[i][j] = 1
self.update(node_mat=mat, edge_mat=mat, **kwargs)
idx += stage_chunk_size
def update(self, node_mat=None, edge_mat=None, **kwargs):
assert self._initialized
if self.overload_matrices is True:
if node_mat is None:
node_mat = edge_mat
if edge_mat is None:
edge_mat = node_mat
if edge_mat is not None:
self.edge_controller.animate(edge_mat, **kwargs)
if node_mat is not None:
self.node_controller.animate(node_mat, **kwargs)
if self.overload_matrices is True:
nm = self.node_controller.get_mat()
em = self.edge_controller.get_mat()
try:
assert nm.all() == em.all()
except AssertionError:
msg = "You've chose to overrload the edge matrix, but your"
msg += " node and edge matrices differ..."
print(msg)
def get_configuration(self, idx):
assert self._initialized
return self.net_config.get_node_cfg(idx)
def set_configuration(self, idx, cfg):
assert self._initialized
return self.net_config.set_node_cfg(idx, cfg)
def write_configuration(self, idx, path=None):
assert self._initialized
return self.net_config.write_node_cfg(idx, path)
def urls(self):
assert self._initialized
return self.node_controller.urls()
def shutdown(self, **kwargs):
if self._initialized:
self.node_controller.shutdown(**kwargs)
self.edge_controller.shutdown(**kwargs)
if self.net_config.provider is not None:
self.net_config.provider.shutdown()
def activate_node(self, idx, **kwargs):
mat = self.node_controller.get_mat()
mat[idx][idx] = 1
self.update(node_mat=mat, **kwargs)
def deactivate_node(self, idx, **kwargs):
mat = self.node_controller.get_mat()
mat[idx][idx] = 0
self.update(node_mat=mat, **kwargs)
def connect_edge(self, src, dst, **kwargs):
mat = self.edge_controller.get_mat()
mat[src][dst] = 1
self.update(edge_mat=mat, **kwargs)
def sever_edge(self, src, dst, **kwargs):
mat = self.edge_controller.get_mat()
mat[src][dst] = 0
self.update(edge_mat=mat, **kwargs)
def get_default_vnm(num_nodes,
txnvalidator=None,
overrides=None,
log_config=None,
data_dir=None,
block_chain_archive=None,
http_port=None,
udp_port=None,
host=None,
endpoint_host=None):
from txnintegration.netconfig import get_default_network_config_obj
from txnintegration.matrices import NopEdgeController
from txnintegration.validator_collection_controller import \
ValidatorCollectionController
vnm = ValidatorNetworkManager(num_nodes)
archive = block_chain_archive
net_cfg = get_default_network_config_obj(num_nodes,
overrides=overrides,
data_dir=data_dir,
block_chain_archive=archive,
http_port=http_port,
udp_port=udp_port,
host=host,
endpoint_host=endpoint_host)
vcc = ValidatorCollectionController(net_cfg, txnvalidator=txnvalidator)
nop = NopEdgeController(net_cfg)
vnm.initialize(net_cfg, vcc, nop)
return vnm
| gabykyei/GC_BlockChain_T_Rec | validator/txnintegration/validator_network_manager.py | validator_network_manager.py | py | 8,556 | python | en | code | 1 | github-code | 13 |
26335124240 | def url_suffix(request):
"""
Calculate any required url suffix to be appended
"""
ans = ""
# Forward 'webid'
if hasattr(request, 'webid'):
ans += "webid=%s" % request.webid
elif 'webid' in request.GET:
ans += "webid=%s" % request.GET['webid']
# Return url suffix
return ans
def context(request, **extra):
"""
Common context generator for all templates below
"""
# Check for webid
webid = ""
if hasattr(request, 'webid'):
webid = request.webid
# Return dict
return dict({
'url_suffix': url_suffix(request),
'webid': webid
}, **extra)
| wavesoft/creditpiggy | creditpiggy-server/creditpiggy/frontend/views/__init__.py | __init__.py | py | 568 | python | en | code | 0 | github-code | 13 |
11177081844 | from flask import Flask
import os
import redis
import json
app = Flask(__name__)
# Get port from environment variable or choose 8080 as local default
port = int(os.getenv('PORT', 8080))
redis_config = dict(host='localhost', port=6379, password='')
# Get Redis credentials from CF service
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
redis_credentials = services['aws-elasticache-redis'][0]['credentials']
redis_config['host'] = redis_credentials['host']
redis_config['port'] = int(redis_credentials['port'])
redis_config['password'] = redis_credentials['password']
redis_config['ssl'] = True
redis_config['ssl_cert_reqs'] = None
# Connect to redis
try:
client = redis.Redis(**redis_config)
except redis.ConnectionError:
client = None
@app.route('/')
def keys():
try:
hits = client.incr('hits')
keys = client.keys('*')
return f'Hits: {hits}\nKeys: {keys}'
except Exception as error:
print(error)
return 'Error'
@app.route('/<key>')
def get_current_values(key):
try:
result = client.mget(key)
message = f'Values: {str(result)}'
return message
except Exception as error:
print(error)
return 'Error'
@app.route('/<key>/<value>')
def add_value(key, value):
try:
client.append(key, value)
return f'Added {value} to {key}.'
except Exception as error:
print(error)
return 'Error'
@app.route('/delete')
def delete():
try:
client.flushall()
return f'Deleted'
except Exception as error:
print(error)
return 'Error'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
| cloud-gov/aws-redis-example | python/app.py | app.py | py | 1,734 | python | en | code | 8 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.