id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1685247 | <reponame>ironss/micropython-lib<gh_stars>0
"""
Watchdog monitor
Various services must report in periodically. If all of them have reported in, the watchdog
monitor feeds the hardware watchdog. Otherwise, the monitor does not feed the hardware watchdog
and eventually the system restarts.
"""
import ulogging
import utime
import machine
_logger = ulogging.getLogger('eng.sw.wdog')
_logger.setLevel(ulogging.INFO)
class TaskWatchdog:
#pylint: disable=too-few-public-methods
def __init__(self, name, timeout):
self.name = name
self.timeout = timeout
self.feed_time = utime.time()
def feed(self):
_logger.debug('{}: feed')
time_now = utime.time()
self.feed_time = time_now
_twdogs = {}
_wdt = None
def init(timeout):
global _wdt #pylint: disable=global-statement
if _wdt is None:
_wdt = machine.WDT(timeout=timeout)
def get(name, timeout):
try:
twdog = _twdogs[name]
except KeyError:
twdog = TaskWatchdog(name, timeout)
_twdogs[name] = twdog
return twdog
def step():
time_now = utime.time()
task_is_late = False
for name, twdog in _twdogs.items(): #pylint: disable=unused-variable
feed_delay = time_now - twdog.feed_time
if feed_delay >= twdog.timeout:
_logger.warning('{}: is late', wdog.name)
task_is_late = True
if not task_is_late:
if _wdt: _wdt.feed()
| StarcoderdataPython |
1777096 | <gh_stars>0
# Generated by Django 3.0.1 on 2020-12-20 15:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20201220_1616'),
]
operations = [
migrations.AddField(
model_name='prof',
name='prof_views',
field=models.IntegerField(default=0),
),
]
| StarcoderdataPython |
98003 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 13:09:04 2020
@author: 766810
"""
tab = []
loop = 'loop'
# tab will hold all Kaprekar numbers found
# loop is just for better wording
def asc(n):
# puts the number's digits in ascending...
return int(''.join(sorted(str(n))))
def desc(n):
# ...and descending order
return int(''.join(sorted(str(n))[::-1]))
n = input("Specify a number: ")
try:
n = int(n)
except:
# assuming n = 2016 to prevent program from crashing
print("\nInvalid number specified!!!\nAssuming n = 2020.")
n = "2020"
print("\nTransforming", str(n) + ":")
while True:
# iterates, assigns the new diff
print(desc(n), "-", asc(n), "=", desc(n)-asc(n))
n = desc(n) - asc(n)
if n not in tab:
# checks if already hit that number
tab.append(n)
else:
if tab.index(n) == len(tab)-1:
# if found as the last, it is a constant...
tab = []
tab.append(n)
loop = 'constant'
else:
# ...otherwise it is a loop
tab = tab[tab.index(n):]
# strip the first, non-looping items
break
print('Kaprekar', loop, 'reached:', tab) | StarcoderdataPython |
1605051 | <filename>cvchallenge1/predict.py
from datetime import datetime
import logging
import os
from urllib.request import urlopen
from PIL import Image
import tensorflow as tf
import numpy as np
from tensorflow.keras import backend as K
from urllib.request import urlopen
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing import image
from PIL import Image, ImageFont, ImageDraw
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
import time
import io
import json, base64
tf.compat.v1.disable_eager_execution()
def _log_msg(msg):
logging.info("{}: {}".format(datetime.now(),msg))
model = None
scriptpath = os.path.abspath(__file__)
scriptdir = os.path.dirname(scriptpath)
filename = os.path.join(scriptdir, 'mobilenetv2.h5')
heatmap_scema = [
[0.0, (0, 0, 0)],
#[0.20, (0, 0, 0)],
#[0.30, (0, 0, 0.3)],
#[0.50, (0, 0.5, 0)],
#[0.70, (0, .7, 0.2)],
[1.00, (0, 1.0, 0)],
]
def pixel(x, width=256, map=[], spread=1):
width = float(width)
r = sum([gaussian(x, p[1][0], p[0] * width, width/(spread*len(map))) for p in map])
g = sum([gaussian(x, p[1][1], p[0] * width, width/(spread*len(map))) for p in map])
b = sum([gaussian(x, p[1][2], p[0] * width, width/(spread*len(map))) for p in map])
return min(1.0, r), min(1.0, g), min(1.0, b)
def gaussian(x, a, b, c, d=0):
return a * np.exp(-(x - b)**2 / (2 * c**2)) + d
def _initialize():
global model
start_time = time.time()
if not model:
#if os.environ.get('HOME') == '/home':
# model = load_model('/keras/vgg16.h5')
#else:
# model = load_model('vgg16.h5')
model = load_model(filename)
logging.warning(f"Initializing weights: @ {time.time() - start_time :.2f} sec")
return f"Initializing weights: @ {time.time() - start_time :.2f} sec"
def _predict_image(img_org):
start_time = time.time()
result = {}
response = "Computer Vision challenge\r\n#AIApril\r\n\r\nTop 3 predictions of MobileNet_V2 \r\ntrained on ImageNet dataset:\r\n"
img = img_org.resize((224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
preds = model.predict(img)
for i in range(3):
res = decode_predictions(preds, top=3)[0][i]
response += f"{res[1]} - {res[2]*100:.2f}% \r\n"
logging.info(f"Prediction: {res[1]} - {res[2]*100:.1f}%")
result["prediction"+str(i)] = f"{res[1]} - {res[2]*100:.1f}%"
#response += "\r\nVisualizing CAM (Class Activation Mapping)\r\naka Neural Network attention for the best prediction"
#response += f"\r\nImage load + Predictions: @ {time.time() - start_time :.2f} sec \r\n"
logging.info(f"\r\nImage load + Predictions: @ {time.time() - start_time :.2f} sec")
ind = np.argmax(preds[0])
vector = model.output[:, ind]
# 4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_15_project (Conv2D) (None, 7, 7, 160) 153600 block_15_depthwise_relu[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_15_project_BN (BatchNorma (None, 7, 7, 160) 640 block_15_project[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_15_add (Add) (None, 7, 7, 160) 0 block_14_add[0][0]
# [4/1/2020 5:02:21 AM] block_15_project_BN[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_expand (Conv2D) (None, 7, 7, 960) 153600 block_15_add[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_expand_BN (BatchNormal (None, 7, 7, 960) 3840 block_16_expand[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_expand_relu (ReLU) (None, 7, 7, 960) 0 block_16_expand_BN[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_depthwise (DepthwiseCo (None, 7, 7, 960) 8640 block_16_expand_relu[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_depthwise_BN (BatchNor (None, 7, 7, 960) 3840 block_16_depthwise[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_depthwise_relu (ReLU) (None, 7, 7, 960) 0 block_16_depthwise_BN[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_project (Conv2D) (None, 7, 7, 320) 307200 block_16_depthwise_relu[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] block_16_project_BN (BatchNorma (None, 7, 7, 320) 1280 block_16_project[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] Conv_1 (Conv2D) (None, 7, 7, 1280) 409600 block_16_project_BN[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] Conv_1_bn (BatchNormalization) (None, 7, 7, 1280) 5120 Conv_1[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] out_relu (ReLU) (None, 7, 7, 1280) 0 Conv_1_bn[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] global_average_pooling2d (Globa (None, 1280) 0 out_relu[0][0]
# [4/1/2020 5:02:21 AM] __________________________________________________________________________________________________
# [4/1/2020 5:02:21 AM] Logits (Dense) (None, 1000) 1281000 global_average_pooling2d[0][0]
# [4/1/2020 5:02:21 AM] ================================================================================================== #
# The output feature map of the last convolutional layer
last_conv_layer = model.get_layer('Conv_1_bn')
# The gradient of the vector class with regard to the output feature map
grads = K.gradients(vector, last_conv_layer.output)[0]
# A vector of shape (1280,), where each entry is the mean intensity of the gradient over a specific feature map channel
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# This function allows us to access the values of the quantities we just defined:
# `pooled_grads` and the output feature map of conv layer, given a sample image
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
# These are the values of these two quantities, as Numpy arrays, given the image
pooled_grads_value, conv_layer_output_value = iterate([img])
# We multiply each channel in the feature map array by "how important this channel is" with regard to the predicted class
for i in range(1280):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
#response += f"Activation layers: @ {time.time() - start_time :.2f} sec \r\n"
logging.info(f"Activation layers: @ {time.time() - start_time :.2f} sec")
# The channel-wise mean of the resulting feature map is our heatmap of class activation
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
RGBheat = []
for line in heatmap:
RGBheat.append([])
for x in line:
r, g, b = pixel(x, width=1, map=heatmap_scema)
r, g, b = [int(256*v) for v in (r, g, b)]
pix = (r, g, b)
RGBheat[-1].append(pix)
heatmap = np.array(RGBheat)
heatmap = np.uint8(heatmap)
#heatmap = np.expand_dims(heatmap, axis=0)
#response += f"HeatMap created: @ {time.time() - start_time :.2f} sec \r\n"
logging.info(f"HeatMap created: @ {time.time() - start_time :.2f} sec")
heatmap = Image.fromarray(heatmap)
heatmap = heatmap.resize( img_org.size)
heatmap = np.uint8(heatmap)
superimposed_img = heatmap * 0.8 + img_org
## superimposed_img = img_org.copy()
## superimposed_img.putalpha(heatmap)
## result_img = Image.new("RGB", superimposed_img.size, (255, 255, 255))
## result_img.paste(superimposed_img, mask=superimposed_img.split()[3]) #
result_img = image.array_to_img(superimposed_img)
draw = ImageDraw.Draw(result_img)
font = ImageFont.load_default()
#response += f"\r\nTotal execution time: {time.time() - start_time :.2f} sec\r\n"
logging.info(f"\r\nTotal execution time: {time.time() - start_time :.2f} sec")
draw.text( (10,10), response, (55, 25, 255), font=font)
img_byte_arr = io.BytesIO()
result_img.save(img_byte_arr, format='JPEG', quality=100)
result['img'] = base64.encodebytes(img_byte_arr.getvalue()).decode("utf-8")
#result_img.save('test.jpg')
return result
def predict_image_from_url(image_url):
start_time = time.time()
logging.info("Predicting from url: " + str(image_url))
ttt = _initialize()
if image_url:
with urlopen(image_url) as testImage:
image_org = Image.open(testImage)
#image_org = image.load_img(testImage)
logging.info(f"\r\nTotal execution time: {time.time() - start_time :.2f} sec")
return _predict_image(image_org)
return
| StarcoderdataPython |
47367 | from conans.model import Generator
"""
PC FILE EXAMPLE:
prefix=/usr
exec_prefix=${prefix}
libdir=${exec_prefix}/lib
includedir=${prefix}/include
Name: my-project
Description: Some brief but informative description
Version: 1.2.3
Libs: -L${libdir} -lmy-project-1 -linkerflag
Cflags: -I${includedir}/my-project-1
Requires: glib-2.0 >= 2.40 gio-2.0 >= 2.42 nice >= 0.1.6
Requires.private: gthread-2.0 >= 2.40
"""
def concat_if_not_empty(groups):
return " ".join([param for group in groups for param in group if param and param.strip()])
def single_pc_file_contents(name, cpp_info):
lines = ['prefix=%s' % cpp_info.rootpath.replace("\\", "/")]
libdir_vars = []
for i, libdir in enumerate(cpp_info.libdirs):
varname = "libdir" if i == 0 else "libdir%d" % (i + 2)
lines.append("%s=${prefix}/%s" % (varname, libdir))
libdir_vars.append(varname)
include_dir_vars = []
for i, includedir in enumerate(cpp_info.includedirs):
varname = "includedir" if i == 0 else "includedir%d" % (i + 2)
lines.append("%s=${prefix}/%s" % (varname, includedir))
include_dir_vars.append(varname)
lines.append("")
lines.append("Name: %s" % name)
description = cpp_info.description or "Conan package: %s" % name
lines.append("Description: %s" % description)
lines.append("Version: %s" % cpp_info.version)
libdirs_flags = ["-L${%s}" % name for name in libdir_vars]
libnames_flags = ["-l%s " % name for name in cpp_info.libs]
shared_flags = cpp_info.sharedlinkflags + cpp_info.exelinkflags
lines.append("Libs: %s" % concat_if_not_empty([libdirs_flags, libnames_flags, shared_flags]))
include_dirs_flags = ["-I${%s}" % name for name in include_dir_vars]
lines.append("Cflags: %s" % concat_if_not_empty(
[include_dirs_flags,
cpp_info.cppflags,
cpp_info.cflags,
["-D%s" % d for d in cpp_info.defines]]))
if cpp_info.public_deps:
public_deps = " ".join(cpp_info.public_deps)
lines.append("Requires: %s" % public_deps)
return "\n".join(lines) + "\n"
class PkgConfigGenerator(Generator):
@property
def filename(self):
pass
@property
def content(self):
ret = {}
for depname, cpp_info in self.deps_build_info.dependencies:
ret["%s.pc" % depname] = single_pc_file_contents(depname, cpp_info)
return ret
| StarcoderdataPython |
1603377 | <reponame>DonaldKBrown/Keybase-SSH-Auth
"""
Keybase SSH Authentication Server
Copyright 2019 - <NAME>, <EMAIL>
Published under the MIT License
This module is the main module in this project. It
runs the Flask server responsible for requesting,
checking, and reporting SSH authentication requests.
"""
from flask import Flask, jsonify, request
from models import Request
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dotenv import load_dotenv
from os import path, environ
from functions import channel, bot_name, send_auth_request, find_first_reaction, send_decision
from datetime import datetime
load_dotenv()
db_path = path.dirname(path.abspath(__file__)) + '/data.db'
db_uri = 'sqlite:///' + db_path
if not path.exists(db_path):
from models import init_db
init_db(db_uri)
engine = create_engine(db_uri)
Session = sessionmaker(bind=engine)
app = Flask(__name__)
app.flask_env = environ['FLASK_ENV']
@app.before_request
def check_auth():
"""
This function checks each request for the auth
key to be present in the request before
allowing it to continue.
"""
if request.values.get('token') != environ['AUTH_TOKEN']:
return "Unauthorized", 401
@app.route('/request', methods=['POST'])
def new_request():
"""
This function receives a request from the PAM script and
returns a request ID used for later checking.
"""
user = request.values.get('user')
server = request.values.get('server')
remote = request.values.get('remote')
now = datetime.now()
msg_id = send_auth_request(server, remote, user)
conn = Session()
new_req = Request(
user = user,
host = server,
msg_id = msg_id,
request_ts = now,
remote = remote,
decider = None,
decision = None,
decision_ts = None
)
conn.add(new_req)
conn.commit()
conn.close()
return str(msg_id)
@app.route('/check/<path:msg_id>', methods=['GET'])
def check_status(msg_id):
"""
This function checks the current status
of a request based on msg_id and returns
pending, approved, or denied as a string
to the PAM script to decide whether to
continue checking, send a time-out request,
or exit with the appropriate status code.
"""
decision, decider = find_first_reaction(int(msg_id))
if not decision:
return 'pending'
else:
conn = Session()
saved_req = conn.query(Request).filter(Request.msg_id == int(msg_id)).first()
saved_req.decision_ts = datetime.now()
saved_req.decision = decision
saved_req.decider = decider
conn.commit()
send_decision(False, decider, decision, saved_req.user, saved_req.host, saved_req.remote, saved_req.msg_id)
conn.close()
return decision
@app.route('/timeout', methods=['POST'])
def timeout():
"""
This function records a timeout from the PAM
script. Saves it in the DB as a timeout and
sends a chat message letting you know that
it was timed out and automatically denied.
"""
msg_id = int(request.values.get('msg_id'))
conn = Session()
saved_req = conn.query(Request).filter(Request.msg_id == int(msg_id)).first()
saved_req.decision_ts = datetime.now()
saved_req.decision = 'timed_out'
saved_req.decider = 'automatic - time_out'
conn.commit()
send_decision(True, saved_req.decider, saved_req.decision, saved_req.user, saved_req.host, saved_req.remote, saved_req.msg_id)
conn.close()
return 'OK'
if __name__ == '__main__':
app.run(host="0.0.0.0") | StarcoderdataPython |
1759694 | <reponame>ecmwf/pyeccodes<filename>pyeccodes/defs/grib2/tables/4/4_2_3_1_table.py
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Estimated precipitation', 'units': 'kg m-2'},
{'abbr': 1,
'code': 1,
'title': 'Instantaneous rain rate',
'units': 'kg m-2 s-1'},
{'abbr': 2, 'code': 2, 'title': 'Cloud top height', 'units': 'm'},
{'abbr': 3,
'code': 3,
'title': 'Cloud top height quality indicator',
'units': 'Code table 4.219'},
{'abbr': 4,
'code': 4,
'title': 'Estimated u component of wind',
'units': 'm s-1'},
{'abbr': 5,
'code': 5,
'title': 'Estimated v component of wind',
'units': 'm s-1'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| StarcoderdataPython |
74079 | from .cache import * # will also import some globals like `britive`
def test_create(cached_security_policy):
assert isinstance(cached_security_policy, dict)
def test_list(cached_security_policy):
policies = britive.security_policies.list()
assert isinstance(policies, list)
assert cached_security_policy['id'] in [p['id'] for p in policies]
def test_get(cached_security_policy):
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert isinstance(policy, dict)
def test_disable(cached_security_policy):
response = britive.security_policies.disable(security_policy_id=cached_security_policy['id'])
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['status'] == 'Inactive'
def test_enable(cached_security_policy):
response = britive.security_policies.enable(security_policy_id=cached_security_policy['id'])
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['status'] == 'Active'
def test_update(cached_security_policy):
response = britive.security_policies.update(
security_policy_id=cached_security_policy['id'],
ips=['2.2.2.2']
)
assert response is None
policy = britive.security_policies.get(security_policy_id=cached_security_policy['id'])
assert policy['conditions'][0]['values'] == ['2.2.2.2']
def test_delete(cached_security_policy):
response = britive.security_policies.delete(security_policy_id=cached_security_policy['id'])
assert response is None
cleanup('security-policy')
| StarcoderdataPython |
145139 | import os
from ocr import OCR
class receiptParser:
ocr = OCR()
raw_tickets = None
def __init__(self, image_folder_path):
self.image_files = [f for f in os.listdir(image_folder_path) if bool(os.path.isfile(os.path.join(image_folder_path, f)) and '.jpg' in f)]
print(self.image_files)
def scrape_tickets(self):
self.raw_tickets = self.ocr(self.image_files)
def format_num(self, num):
return num.replace(',','.').replace('-','.')
def determine_num(self, num):
#input: string
#output: boolean
num = self.format_num(num)
if num.count('.')==1:
vals = num.split('.')# if '.' in num else num.split(',')
if ('$' in num):
return True #on basis of it being in the form $XX.YY
elif len(vals[1])==2 and vals[0].isdigit() and vals[1].isdigit(): #In form XX.YY
return True
else:
return False
else:
return False
def parse_tickets(self):
if not self.raw_tickets:
self.scrape_tickets()
final_costs = []
count = 1
for raw_ticket in self.raw_tickets:
ticket_data = raw_ticket.split()
nums = [float(self.format_num(num.replace('$',''))) for num in ticket_data if self.determine_num(num)]
if nums:
total = max(nums)
final_costs.append(total)
else:
total = "[UNDEFINED]"
#print("The total cost of receipt #{} is: ${}".format(count,total))
count+=1
return final_costs
if __name__=="__main__":
rp = receiptParser('images/')
print(rp.parse_tickets())
| StarcoderdataPython |
1725976 | <filename>misc/trans_writer.py
"""
This script writes continuous transformation information to file
"""
import numpy
import re
SAVE_PATH = '/home/bioprober/gh/3d-converter/tests/data/'
class TransWriter:
def __init__(self, filename):
self.filename = filename
self.info = ''
def config(self, axis, degree, steps=65, move=(0, 0, 0)):
"""
Configs self.info string by given parameters
@param axis: <Tuple> formed by a tuple of two coordinates ((x, y, z), (x, y, z)), axis to be rotate about
@param degree: <float> final rotation along the axis
@param steps <int> total steps it takes, same with the number of rows in position file
@param move: <Tuple> final movement along x, y, z axis, (x, y, z)
"""
print('Configuring position information...')
# get id array for first element
id_arr = numpy.array(list(range(int(steps))))
move_x_arr = move_y_arr = move_z_arr = radian_arr = numpy.zeros(len(id_arr)).tolist()
# get move arrays
if int(move[0]/steps) != 0:
move_x_arr = list(numpy.arange(0, move[0], move[0]/steps))
if int(move[1]/steps) != 0:
move_y_arr = list(numpy.arange(0, move[1], move[1]/steps))
if int(move[2]/steps) != 0:
move_z_arr = list(numpy.arange(0, move[2], move[2]/steps))
# get radian arrays
if int(degree/steps) != 0:
degrees = list(numpy.arange(0, degree, degree/steps))
radian_arr = numpy.radians(degrees)
# Calculate the rotated arrays and save their strings to self.info
for i, x, y, z, theta in zip(id_arr, move_x_arr, move_y_arr, move_z_arr, radian_arr):
move_matrix = self._get_move_matrix(x, y, z)
x_1, y_1, z_1, x_2, y_2, z_2 = axis[0][0], axis[0][1], axis[0][2], axis[1][0], axis[1][1], axis[1][2]
t, t_inverse = self._get_t_and_inverse(x_1, y_1, z_1)
rx, rx_inverse, ry, ry_inverse = self._get_r_x_y_and_inverse(x_1, y_1, z_1, x_2, y_2, z_2)
rz = self._get_r_z(theta)
trans = move_matrix.dot(t_inverse).dot(rx_inverse).dot(ry_inverse).dot(rz).dot(ry).dot(rx).dot(t)
s = ' '.join(str(item) for item in trans)
s = re.sub(r'[\[\]]', '', s)
self.info += str(i) + ' ' + s + '\n'
print('-Done configuring position information')
def save_to_file(self):
"""
Save the string in self.info to file
"""
print('writing to file {}...'.format(self.filename))
file = open(self.filename, 'w')
file.write(self.info)
print('-Done writing to file')
@staticmethod
def _calculate_unit_vector_elements(x_1, y_1, z_1, x_2, y_2, z_2):
"""
Calculates the elements in unit vector
@return: <Tuple> unit x, y, z
"""
if (x_2 - x_1) == (y_2 - y_1) == (z_2 - z_1) == 0:
return 0, 1, 0
mag = numpy.sqrt((x_2 - x_1) ** 2 + (y_2 - y_1) ** 2 + (z_2 - z_1) ** 2)
return (x_2 - x_1) / mag, (y_2 - y_1 ** 2) / mag if (y_2 - y_1 ** 2) / mag != 0 else 1, (z_2 - z_1) / mag
@staticmethod
def _get_move_matrix(x, y, z):
"""
Gets move matrix
@return: move matrix
"""
return numpy.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]])
def _get_r_x_y_and_inverse(self, x_1, y_1, z_1, x_2, y_2, z_2):
"""
Gets the Rx, Rx inverse, Ry, Ry inverse matrices
@return: <numpy.array> Rx, Rx inverse, Ry, Ry inverse matrices
"""
a, b, c = self._calculate_unit_vector_elements(x_1, y_1, z_1, x_2, y_2, z_2)
d = numpy.sqrt(b**2 + c**2)
rx = numpy.array([[1, 0, 0, 0],
[0, c/d, -b/d, 0],
[0, b/d, c/d, 0],
[0, 0, 0, 1]])
rx_inverse = numpy.array([[1, 0, 0, 0],
[0, c/d, b/d, 0],
[0, -b/d, c/d, 0],
[0, 0, 0, 1]])
ry = numpy.array([[d, 0, -a, 0],
[0, 1, 0, 0],
[a, 0, d, 0],
[0, 0, 0, 1]])
ry_inverse = numpy.array([[d, 0, a, 0],
[0, 1, 0, 0],
[-a, 0, d, 0],
[0, 0, 0, 1]])
return rx, rx_inverse, ry, ry_inverse
@staticmethod
def _get_r_z(theta):
"""
Gets Rz matrix
@param theta: theta radian of rotation
@return: Rz matrix
"""
return numpy.array([[numpy.cos(theta), -numpy.sin(theta), 0, 0],
[numpy.sin(theta), numpy.cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
@staticmethod
def _get_t_and_inverse(x, y, z):
"""
Gets T and T inverse matrices
@param: x
@param: y
@param: z
@return: <Tuple> T and T inverse
"""
t = numpy.array([[1, 0, 0, -x],
[0, 1, 0, -y],
[0, 0, 1, -z],
[0, 0, 0, 1]])
t_inverse = numpy.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]])
return t, t_inverse
def run():
print('---Transformation Information CLI---\n')
print('Please enter following information...\n')
x_1 = float(input('axis parameter x1: '))
y_1 = float(input('axis parameter y1: '))
z_1 = float(input('axis parameter z1: '))
x_2 = float(input('axis parameter x2: '))
y_2 = float(input('axis parameter y2: '))
z_2 = float(input('axis parameter z2: '))
degree = float(input('rotation degree: '))
steps = int(input('steps: '))
move_x = int(input('move along x: '))
move_y = int(input('move along y: '))
move_z = int(input('move along z: '))
filename = input('Please enter the file name to save file: ')
filename = SAVE_PATH + filename
axis = ((x_1, y_1, z_1), (x_2, y_2, z_2))
move = (move_x, move_y, move_z)
writer = TransWriter(filename)
writer.config(axis, degree, steps, move)
writer.save_to_file()
if __name__ == '__main__':
run()
| StarcoderdataPython |
147869 | import pyxel
import math
pyxel.init(200, 200)
pyxel.cls(7)
for i in range(0, 360, 1):
iRadian = math.radians(i)
lineColor = int(i * 7 / 360)
pyxel.line(100, 100, 100 + 100 * math.sin(iRadian * 2),
100 + 100 * math.cos(iRadian * 3), lineColor)
pyxel.show()
| StarcoderdataPython |
3371204 | import json
from core_data_modules.data_models import Scheme
def _open_scheme(filename):
with open(f"code_schemes/{filename}", "r") as f:
firebase_map = json.load(f)
return Scheme.from_firebase_map(firebase_map)
class CodeSchemes(object):
INTERNET = _open_scheme("internet_working.json")
INTERNET_YES_NO = _open_scheme("yes_no_internet.json")
WATER = _open_scheme("water_filter_working.json")
WATER_YES_NO = _open_scheme("yes_no_water.json")
WASTE = _open_scheme("waste_disposal_improvement.json")
WASTE_YES_NO = _open_scheme("yes_no_waste.json")
GENDER = _open_scheme("gender.json")
AGE = _open_scheme("age.json")
LOCATION = _open_scheme("location.json")
WS_CORRECT_DATASET = _open_scheme("ws_correct_dataset.json")
| StarcoderdataPython |
1632906 | from djtools.socialnetworks.models import SocialNetwork
from django.test import TestCase
class SocialNetworkTestCase(TestCase):
def setUp(self):
self.twitter = SocialNetwork.objects.create(
social_network='twitter',
account_id='test'
)
self.github = SocialNetwork.objects.create(
social_network='github',
account_id='test'
)
def test_get_choices(self):
choices = SocialNetwork.get_choices()
expected_choices = [
('facebook', "Facebook"),
('instagram', "Instagram"),
('twitter', "Twitter"),
('youtube', "YouTube"),
('vimeo', "Vimeo"),
('github', "GitHub"),
]
self.assertEqual(choices, expected_choices)
def test_get_social_networks(self):
self.assertListEqual(
list(SocialNetwork.get_social_networks()),
[self.twitter, self.github]
)
def test_info(self):
self.assertDictEqual(
self.github.info,
{
'name': 'GitHub',
'link': 'https://github.com/',
'icon': 'github',
}
)
def test_get_name(self):
self.assertEqual(self.github.get_name(), "GitHub")
def test_get_link(self):
self.assertEqual(self.github.get_link(), "https://github.com/test")
def test_get_icon(self):
self.assertEqual(self.github.get_icon(), "github")
def test_add_non_existing_social_network(self):
test = SocialNetwork.objects.create(
social_network='test',
account_id='test'
)
self.assertEqual(test.get_name(), None)
| StarcoderdataPython |
1753747 | <filename>src/skmultiflow/trees/nodes/anytime_split_node.py
import numpy as np
from skmultiflow.trees.attribute_split_suggestion import AttributeSplitSuggestion
from skmultiflow.trees.attribute_observer import NominalAttributeClassObserver
from skmultiflow.trees.attribute_observer import NumericAttributeClassObserverGaussian
from skmultiflow.trees.nodes import SplitNode
class AnyTimeSplitNode(SplitNode):
""" Node that splits the data in a Hoeffding Anytime Tree.
Parameters
----------
split_test: InstanceConditionalTest
Split test.
class_observations: dict (class_value, weight) or None
Class observations
attribute_observers : dict (attribute id, AttributeClassObserver)
Attribute Observers
"""
def __init__(self, split_test, class_observations, attribute_observers):
""" AnyTimeSplitNode class constructor."""
super().__init__(split_test, class_observations)
self._attribute_observers = attribute_observers
self._weight_seen_at_last_split_reevaluation = 0
def get_null_split(self, criterion):
""" Compute the null split (don't split).
Parameters
----------
criterion: SplitCriterion
The splitting criterion to be used.
Returns
-------
list
Split candidates.
"""
pre_split_dist = self._observed_class_distribution
null_split = AttributeSplitSuggestion(
None, [{}], criterion.get_merit_of_split(pre_split_dist, [pre_split_dist])
)
return null_split
def get_best_split_suggestions(self, criterion, ht):
""" Find possible split candidates without taking into account the the
null split.
Parameters
----------
criterion: SplitCriterion
The splitting criterion to be used.
ht: HoeffdingTree
Hoeffding Tree.
Returns
-------
list
Split candidates.
"""
best_suggestions = []
pre_split_dist = self._observed_class_distribution
for i, obs in self._attribute_observers.items():
best_suggestion = obs.get_best_evaluated_split_suggestion(
criterion, pre_split_dist, i, ht.binary_split
)
if best_suggestion is not None:
best_suggestions.append(best_suggestion)
return best_suggestions
@staticmethod
def find_attribute(id_att, split_suggestions):
""" Find the attribute given the id.
Parameters
----------
id_att: int.
Id of attribute to find.
split_suggestions: list
Possible split candidates.
Returns
-------
AttributeSplitSuggestion
Found attribute.
"""
# return current attribute as AttributeSplitSuggestion
x_current = None
for attSplit in split_suggestions:
selected_id = attSplit.split_test.get_atts_test_depends_on()[0]
if selected_id == id_att:
x_current = attSplit
return x_current
def learn_from_instance(self, X, y, weight, ht):
""" Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: int
Instance class.
weight: float
Instance weight.
ht: HoeffdingTree
Hoeffding Tree to update.
"""
# Update attribute_observers
try:
self._observed_class_distribution[y] += weight
except KeyError:
self._observed_class_distribution[y] = weight
for i in range(len(X)):
try:
obs = self._attribute_observers[i]
except KeyError:
if i in ht.nominal_attributes:
obs = NominalAttributeClassObserver()
else:
obs = NumericAttributeClassObserverGaussian()
self._attribute_observers[i] = obs
obs.observe_attribute_class(X[i], int(y), weight)
def get_weight_seen(self):
""" Calculate the total weight seen by the node.
Returns
-------
float
Total weight seen.
"""
return sum(self._observed_class_distribution.values())
def get_attribute_observers(self):
""" Get attribute observers at this node.
Returns
-------
dict (attribute id, AttributeClassObserver)
Attribute observers of this node.
"""
return self._attribute_observers
def get_weight_seen_at_last_split_reevaluation(self):
""" Get the weight seen at the last split reevaluation.
Returns
-------
float
Total weight seen at last split reevaluation.
"""
return self._weight_seen_at_last_split_reevaluation
def update_weight_seen_at_last_split_reevaluation(self):
""" Update weight seen at the last split in the reevaluation. """
self._weight_seen_at_last_split_reevaluation = sum(
self._observed_class_distribution.values()
)
def count_nodes(self):
""" Calculate the number of split node and leaf starting from this node
as a root.
Returns
-------
list[int int]
[number of split node, number of leaf node].
"""
count = np.array([1, 0])
# get children
for branch_idx in range(self.num_children()):
child = self.get_child(branch_idx)
if child is not None:
count += child.count_nodes()
return count
| StarcoderdataPython |
136582 | #
# Copyright (c) 2018 CNRS INRIA
#
## In this file, are reported some deprecated functions that are still maintained until the next important future releases ##
from __future__ import print_function
import warnings as _warnings
from . import libpinocchio_pywrap as pin
from .deprecation import deprecated, DeprecatedWarning
@deprecated("This function has been renamed to impulseDynamics for consistency with the C++ interface. Please change for impulseDynamics.")
def impactDynamics(model, data, q, v_before, J, r_coeff=0.0, update_kinematics=True):
return pin.impulseDynamics(model, data, q, v_before, J, r_coeff, update_kinematics)
@deprecated("This function has been deprecated. Please use buildSampleModelHumanoid or buildSampleModelHumanoidRandom instead.")
def buildSampleModelHumanoidSimple(usingFF=True):
return pin.buildSampleModelHumanoidRandom(usingFF)
@deprecated("Static method Model.BuildHumanoidSimple has been deprecated. Please use function buildSampleModelHumanoid or buildSampleModelHumanoidRandom instead.")
def _Model__BuildHumanoidSimple(usingFF=True):
return pin.buildSampleModelHumanoidRandom(usingFF)
pin.Model.BuildHumanoidSimple = staticmethod(_Model__BuildHumanoidSimple)
@deprecated("Static method Model.BuildEmptyModel has been deprecated. Please use the empty Model constructor instead.")
def _Model__BuildEmptyModel():
return pin.Model()
pin.Model.BuildEmptyModel = staticmethod(_Model__BuildEmptyModel)
# deprecate Model.neutralConfiguration. Since: 19 feb 2019
def _Model__neutralConfiguration(self):
message = "Using deprecated instance variable Model.neutralConfiguration. Please use Model.referenceConfigurations instead."
_warnings.warn(message, DeprecatedWarning, stacklevel=2)
return self._neutralConfiguration
pin.Model.neutralConfiguration = property(_Model__neutralConfiguration)
def _Model__set_neutralConfiguration(self,q):
message = "Using deprecated instance variable Model.neutralConfiguration. Please use Model.referenceConfigurations instead."
_warnings.warn(message, DeprecatedWarning, stacklevel=2)
self._neutralConfiguration = q
pin.Model.neutralConfiguration = pin.Model.neutralConfiguration.setter(_Model__set_neutralConfiguration)
@deprecated("This function has been renamed updateFramePlacements when taking two arguments, and framesForwardKinematics when taking three. Please change your code to the appropriate method.")
def framesKinematics(model,data,q=None):
if q is None:
pin.updateFramePlacements(model,data)
else:
pin.framesForwardKinematics(model,data,q)
@deprecated("This function has been renamed computeJointJacobians and will be removed in future releases of Pinocchio. Please change for new computeJointJacobians.")
def computeJacobians(model,data,q=None):
if q is None:
return pin.computeJointJacobians(model,data)
else:
return pin.computeJointJacobians(model,data,q)
# This function is only deprecated when using a specific signature. Therefore, it needs special care
# Marked as deprecated on 19 Feb 2019
def jointJacobian(model, data, q, jointId, *args):
if len(args)==2:
message = ("This function signature has been deprecated and will be removed in future releases of Pinocchio. "
"Please change for the new signature of jointJacobian or use computeJointJacobian + getJointJacobian.")
_warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
rf = args[0]
update_kinematics = args[1]
if update_kinematics:
pin.computeJointJacobians(model,data,q)
return pin.getJointJacobian(model,data,jointId,rf)
else:
return pin.jointJacobian(model,data,q,jointId)
jointJacobian.__doc__ = (
pin.jointJacobian.__doc__
+ '\n\njointJacobian( (Model)Model, (Data)Data, (object)Joint configuration q (size Model::nq), (Index)jointId, ReferenceFrame rf, (bool)updateKinematics) -> object :'
+ '\n This function signature has been deprecated and will be removed in future releases of Pinocchio.'
)
# This function is only deprecated when using a specific signature. Therefore, it needs special care
# Marked as deprecated on 19 Feb 2019
def frameJacobian(model, data, q, frameId, *args):
if len(args)==1:
message = ("This function signature has been deprecated and will be removed in future releases of Pinocchio. "
"Please change for the new signature of frameJacobian or use computeJointJacobian + updateFramePlacements + getFrameJacobian.")
_warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
rf = args[0]
pin.computeJointJacobians(model,data,q)
pin.updateFramePlacements(model,data)
return pin.getFrameJacobian(model, data, frameId, rf);
else:
return pin.frameJacobian(model,data,q,frameId)
frameJacobian.__doc__ = (
pin.frameJacobian.__doc__
+ '\n\nframeJacobian( (Model)Model, (Data)Data, (object)Joint configuration q (size Model::nq), (Index)frameId, ReferenceFrame rf) -> object :'
+ '\n This function signature has been deprecated and will be removed in future releases of Pinocchio.'
)
@deprecated("This function has been renamed jointJacobian and will be removed in future releases of Pinocchio. Please change for new jointJacobian function.")
def jacobian(model,data,q,jointId,local,update_kinematics):
rf = pin.ReferenceFrame.LOCAL if local else pin.ReferenceFrame.WORLD
if update_kinematics:
computeJointJacobians(model,data,q)
return pin.getJointJacobian(model,data,jointId,rf)
@deprecated("This function has been renamed getJointJacobian and will be removed in future releases of Pinocchio. Please change for new getJointJacobian function.")
def getJacobian(model,data,jointId,local):
if local:
return pin.getJointJacobian(model,data,jointId,pin.ReferenceFrame.LOCAL)
else:
return pin.getJointJacobian(model,data,jointId,pin.ReferenceFrame.WORLD)
@deprecated("This function has been renamed computeJacobiansTimeVariation and will be removed in future releases of Pinocchio. Please change for new computeJointJacobiansTimeVariation.")
def computeJacobiansTimeVariation(model,data,q,v):
return pin.computeJointJacobiansTimeVariation(model,data,q,v)
@deprecated("This function has been renamed getJointJacobianTimeVariation and will be removed in future releases of Pinocchio. Please change for new getJointJacobianTimeVariation function.")
def getJacobianTimeVariation(model,data,jointId,local):
if local:
return pin.getJointJacobianTimeVariation(model,data,jointId,pin.ReferenceFrame.LOCAL)
else:
return pin.getJointJacobianTimeVariation(model,data,jointId,pin.ReferenceFrame.WORLD)
@deprecated("This function has been renamed difference and will be removed in future releases of Pinocchio. Please change for new difference function.")
def differentiate(model,q0,q1):
return pin.difference(model,q0,q1)
@deprecated("This function has been renamed loadReferenceConfigurations and will be removed in future releases of Pinocchio. Please change for new loadReferenceConfigurations function.")
def getNeutralConfigurationFromSrdf(model, filename, verbose):
pin.loadReferenceConfigurations(model,filename,verbose)
model.neutralConfiguration = model.referenceConfigurations["half_sitting"]
return model.referenceConfigurations["half_sitting"]
@deprecated("This function has been renamed loadReferenceConfigurations and will be removed in future releases of Pinocchio. Please change for new loadReferenceConfigurations function.")
def getNeutralConfiguration(model, filename, verbose):
pin.loadReferenceConfigurations(model,filename,verbose)
model.neutralConfiguration = model.referenceConfigurations["half_sitting"]
return model.referenceConfigurations["half_sitting"]
@deprecated("This function has been renamed difference and will be removed in future releases of Pinocchio. Please change for new loadRotorParameters function.")
def loadRotorParamsFromSrdf(model, filename, verbose):
return pin.loadRotorParams(model,filename,verbose)
@deprecated("This function has been renamed difference and will be removed in future releases of Pinocchio. Please change for new removeCollisionPairs function.")
def removeCollisionPairsFromSrdf(model, geomModel, filename, verbose):
return pin.removeCollisionPairs(model,geomModel,filename,verbose)
# This function is only deprecated when using a specific signature. Therefore, it needs special care
def jacobianCenterOfMass(model, data, *args):
if len(args)==3:
message = "This function signature has been deprecated and will be removed in future releases of Pinocchio. Please change for one of the new signatures of the jacobianCenterOfMass function."
_warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
q = args[0]
computeSubtreeComs = args[1]
updateKinematics = args[2]
if updateKinematics:
return pin.jacobianCenterOfMass(model,data,q,computeSubtreeComs)
else:
return pin.jacobianCenterOfMass(model,data,computeSubtreeComs)
else:
return pin.jacobianCenterOfMass(model,data,*args)
jacobianCenterOfMass.__doc__ = (
pin.jacobianCenterOfMass.__doc__
+ '\n\njacobianCenterOfMass( (Model)Model, (Data)Data, (object)Joint configuration q (size Model::nq), (bool)computeSubtreeComs, (bool)updateKinematics) -> object :'
+ '\n This function signature has been deprecated and will be removed in future releases of Pinocchio.'
)
@deprecated("This function will be removed in future releases of Pinocchio. You can use exp or exp6.")
def exp6FromMotion(motion):
return pin.exp6(motion)
@deprecated("This function will be removed in future releases of Pinocchio. You can build a Motion object from a 6D vector and use the standard exp function to recover the same behavior.")
def exp6FromVector(vector6):
v = pin.Motion(vector6)
return pin.exp6(v)
@deprecated("This function will be removed in future releases of Pinocchio. You can use log or log6.")
def log6FromSE3(transform):
return pin.log6(transform)
@deprecated("This function will be removed in future releases of Pinocchio. You can use log or log6.")
def log6FromMatrix(matrix4):
return pin.log6(matrix4)
| StarcoderdataPython |
3393142 | '''
Задача 10
Найти количество цифр 5 в числе
'''
value=0
count=0
integer_number = 291341555
while integer_number > 0:
value = integer_number % 10
if value == 5:
count+=1
integer_number = integer_number//10
print(count)
| StarcoderdataPython |
3285594 | <reponame>Kirpich1812/amino_service
import random
class DeviceGenerator:
def __init__(self):
device = self.generate_device_info()
self.user_agent = device["user_agent"]
self.device_id = device["device_id"]
self.device_id_sig = device["device_id_sig"]
@staticmethod
def generate_device_info():
return {
"device_id": "01C2A4FBDFFE4EF0FA865FBE1C2E01A3AE74547DDB227F60840680E90AA9EF709FBA294E6FAF7EDEEF",
"device_id_sig": "Aa0ZDPOEgjt1EhyVYyZ5FgSZSqJt",
"user_agent": f"Dalvik/2.1.0 (Linux; U; Android 8.0; SM-A{random.randint(100, 900)}x Build/greatltexx-user {random.randint(5, 9)}.0 NMF{random.randint(20, 50)}X {random.randint(2, 4)}00190{random.randint(10, 999)} release-keys; com.narvii.amino.master/2.0.2{random.randint(2000, 9000)})"
}
| StarcoderdataPython |
67859 | <filename>sudokuless/load.py
import os
import sys
from .exceptions import FormatError
def from_text(txt):
"""Create a list structure from a special format of text.
Args:
txt: a string, 9 lines, 'x' represents blank cell that needs to fill. An example here:
xx31x8xxx
xx2xxx7xx
8xx63xxxx
xx4x568xx
xxx2x9xxx
xx538x2xx
xxxx91xx3
xx9xxx4xx
xxx8x79xx
Returns:
A 2d list object.
"""
if txt is None or txt == '':
raise FormatError('Nothing passed in')
if not isinstance(txt, str):
raise FormatError('Expect a string value')
table = list(filter(lambda x: False if x == '' else True, txt.splitlines()))
if len(table) != 9:
raise FormatError('Row number is {} which is not equal to 9'.format(len(table)))
for i,row in enumerate(table):
try:
table[i] = list(map(lambda x: None if x == 0 else x,
map(int, row.strip().replace(' ', '').replace('x', '0'))))
if len(table[i]) < 9:
raise FormatError('Col-{} has cells less than 9'.format(i+1))
elif len(table[i]) > 9:
raise FormatError('Col-{} has cells more than 9'.format(i+1))
except ValueError as e:
msg = e.args[0]
idx_start = msg.index('\'')
idx_end = msg.rindex('\'')
raise FormatError('Row-{} has an error when parsing, {} is not an number'.format(i+1,
msg[idx_start:idx_end+1]))
return table
def from_string(string):
"""Create a list structure from a string.
Args:
string: consist of 81 numbers. An example:
020000080568179234090000010030040050040205090070080040050000060289634175010000020
Returns:
A 2d list object.
"""
if len(string) != 81:
raise FormatError('string does not have precise 81 numbers')
text_format = []
for i, c in enumerate(string, 1):
text_format.append(c)
if i%9 == 0:
text_format.append('\n')
return from_text(''.join(text_format))
def from_file(filename):
"""Create a list structure from a special format of file.
Args:
filename: in which the formated string is located.
Returns:
A 2d list object.
"""
with open(filename, 'r') as f:
return from_text(f.read())
def from_input():
"""Create a list structure from input.
Returns:
A 2d list object.
"""
# insert a white line means input is over
return from_text(''.join(line for line in iter(sys.stdin.readline, '\n')))
| StarcoderdataPython |
1620942 | <filename>slue_toolkit/text_ner/reformat_pipeline.py<gh_stars>0
import fire
import os
from slue_toolkit.generic_utils import read_lst, write_to_file
def prep_data(model_type, asr_data_dir, asr_model_dir, out_data_dir, eval_set, lm="nolm"):
"""
Create tsv files for pipeline evaluation from the decoded ASR transcripts
"""
if "nolm" not in lm:
lm = "t3-b500-lw2-ws-1"
manifest_data_fn = os.path.join(asr_data_dir, eval_set+".wrd")
decoded_data_dir = os.path.join(asr_model_dir, "decode", eval_set, lm)
out_fn = f"{eval_subset}-{model_type}-asr-{lm}"
out_fn = os.path.join(out_data_dir, out_fn)
sent_lst = get_correct_order(decoded_data_dir, manifest_data_fn)
out_str = ""
for sent in sent_lst:
for wrd in sent.split(" "):
out_str += wrd+"\tO\n"
out_str += "\n"
write_to_file(out_str, out_fn)
print("Data prepared for model %s and lm %s" % (model_name, lm))
def get_correct_order(self, decoded_data_dir, manifest_data_fn):
"""
Reorder decoded sentenced to match the original order
"""
if not os.path.exists(decoded_data_dir):
print("Decoded data %s not found" % (decoded_data_dir))
sys.exit()
else:
fname = glob.glob(decoded_data_dir+"/ref.word*")
assert len(fname) == 1
decoded_sent_lst_gt = read_lst(fname[0])
fname = glob.glob(decoded_data_dir+"/hypo.word*")
assert len(fname) == 1
decoded_sent_lst_hyp = read_lst(fname[0])
manifest_sent_lst = read_lst(manifest_data_fn)
assert len(decoded_sent_lst_gt) == len(manifest_sent_lst)
assert len(decoded_sent_lst_hyp) == len(decoded_sent_lst_gt)
decoded_sent_lst_hyp_select = [line.split(" (None-")[0] for line in decoded_sent_lst_hyp]
decoded_sent_lst_gt = [line.split(" (None-")[0] for idx, line in enumerate(decoded_sent_lst_gt)]
decoded_sent_lst_reordered = [None]*len(manifest_sent_lst)
for idx, line in enumerate(decoded_sent_lst_gt):
assert line != -1
idx_new = manifest_sent_lst.index(line)
manifest_sent_lst[idx_new] = -1 # to ensure that it's not chosen again
decoded_sent_lst_reordered[idx_new] = decoded_sent_lst_hyp_select[idx]
return decoded_sent_lst_reordered
if __name__ == '__main__':
fire.Fire()
| StarcoderdataPython |
1616001 | #!/usr/bin/python3
import glob
import os
from pathlib import Path, PurePosixPath
files = glob.glob('/home/bkk/Pictures' + '/**/*.*', recursive=True)
filesToDelete = []
# detect files for deletion
for f in files:
if (PurePosixPath(f).suffix == '.jpg') and str(PurePosixPath(f).with_suffix('.HEIC')) in files:
print(f + '\n')
filesToDelete.append(f)
if (PurePosixPath(f).suffix == '.MOV') and str(PurePosixPath(f).with_stem(PurePosixPath(f).stem + '_HEVC')) in files:
print(f + '\n')
filesToDelete.append(f)
print(str(len(filesToDelete)) + ' files to delete in total\n')
# delete files
for f in filesToDelete:
print('deleting' + f + '\n')
os.remove(f)
| StarcoderdataPython |
3268973 | <filename>src/proxy_data.py
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .check_json import CheckJSON
class ProxyData:
def __init__(self, ip_info):
self.proxy_data = CheckJSON('ProxyData', ip_info).key_valid()
def info(self):
"""Return all proxy data as an object"""
return self.proxy_data
def proxy_level(self):
"""Returns proxy level"""
return CheckJSON('proxy_level', self.proxy_data).key_valid()
def proxy_type(self):
"""Returns proxy type"""
return CheckJSON('proxy_type', self.proxy_data).key_valid()
def proxy_last_detected(self):
"""Returns when a proxy was last detected"""
return CheckJSON('proxy_last_detected', self.proxy_data).key_valid() | StarcoderdataPython |
3228617 | inches = float(input())
centimeters = inches * 2.54
print(centimeters) | StarcoderdataPython |
1654000 | <gh_stars>1-10
import numpy as np
def sigmoide(Z):
# Calculo de la funcion sigmoide
sigmoide = 1 / (1 + np.exp(-Z))
return sigmoide
def one_hot(y, et):
# Transforma y en one_hot con et numero de etiquetas
m = len(y)
y = (y - 1)
y_onehot = np.zeros((m, et))
for i in range(m):
y_onehot[i][int(y[i])] = 1
return y_onehot
| StarcoderdataPython |
1694393 | <gh_stars>10-100
# JN 2016-01-12
"""
manage spikes for data viewer
"""
import os
from .tools import debug
class SpikeManager(object):
"""
Represent spikes for data viewer
"""
def __init__(self, sign, label):
self.fnames = {}
self.openfiles = {}
self.times = {}
self.spikes = {}
self.beg_end = {}
self.sortednames = {}
self.sortedfiles = {}
self.label = label
self.sign = sign
def __del__(self):
for fid in self.openfiles.values():
fid.close()
def check_add(self, key, entname):
sppath = os.path.join(key, 'data_{}.h5'.format(key))
sorted_path = os.path.join(key, self.label)
if os.path.isdir(sorted_path):
self.sortednames[entname] = sppath
if os.path.exists(sppath):
self.fnames[entname] = sppath
debug(self.sortednames)
def h5f_by_key(self, key):
if key in self.sortednames:
print('loading sorted spikes for ' + key)
self.sortedfiles[key] = Combinato(self.sortednames[key],
self.sign, self.label)
if key not in self.fnames:
debug('No spike file for ' + key)
return
if key not in self.openfiles:
fid = tables.open_file(self.fnames[key], 'r')
self.openfiles[key] = fid
# warning! times get loaded as copies
# because we have to search them
print('Loading times for ' + key)
t = fid.get_node('/' + self.sign + '/times')[:]
print('Done')
self.times[key] = t
s = fid.get_node('/' + self.sign + '/spikes')
self.spikes[key] = s
else:
t = self.times[key]
s = self.spikes[key]
return t, s
def set_beg_end(self, key, start, stop, sign='pos'):
ret = self.h5f_by_key(key, sign)
if ret is None:
return
beg = ret[0]
end = ret[1]
t = self.times[key]
beg = max(t.searchsorted(start) - 1, 0)
end = min(t.searchsorted(stop) + 1, t.shape[0])
self.beg_end[key] = (beg, end)
print(self.beg_end[key])
def get_sorted_data(self, key, start, stop):
ret = {}
ea = np.array([])
if key in self.sortedfiles:
print(start, stop)
clu = self.sortedfiles[key].get_groups_joined()
for c in clu:
times = clu[c]['times']
idx = (times >= start) & (times <= stop)
print(times[0], times[-1], start, stop)
print(c, idx.sum())
if idx.any():
t = times[idx]
else:
t = ea
ret[c] = {'times': t}
print(c, idx.sum())
return ret
else:
return np.array([])
def get_sp_data(self, key, which='times'):
key = str(key)
if key in self.beg_end:
beg = self.beg_end[key][0]
end = self.beg_end[key][1]
else:
return np.array([])
if which == 'times':
retv = self.times[key]
elif which == 'spikes':
retv = self.spikes[key]
return retv[beg:end]
| StarcoderdataPython |
3379191 | <reponame>AbdulFMS/lanedet<gh_stars>100-1000
from .detector import Detector
| StarcoderdataPython |
3383660 | """
Requirements.
$ pip3 install bokeh
"""
from MotionDetector import data_frames
from bokeh.plotting import figure, show, output_file
from bokeh.models import HoverTool, ColumnDataSource
# ColumnDataSource - standardized way of providing data to bokeh plot
data_frames["Start_string"] = data_frames["Start"].dt.strftime("%Y-%m-%d %H:%M:%S")
data_frames["End_string"] = data_frames["End"].dt.strftime("%Y-%m-%d %H:%M:%S")
cds = ColumnDataSource(data_frames)
p = figure(x_axis_type='datetime', height=100, width=500, title="Motion Graph", sizing_mode='scale_width')
p.yaxis.minor_tick_line_color = None
# import hover and implement capabilities here and later add 'ColumnDataSource'
hover=HoverTool(tooltips=[("Start", "@Start_string"),("End", "@End_string")])
p.add_tools(hover)
q = p.quad(left="Start",right="End",bottom=0,top=1,color="green", source=cds)
output_file("Graph.html")
show(p) | StarcoderdataPython |
4840337 | <reponame>AKhodus/adcm<filename>python/api/component/serializers.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=redefined-builtin
from rest_framework import serializers
from rest_framework.reverse import reverse
from api.api_views import hlink, filter_actions, get_api_url_kwargs, CommonAPIURL
from api.action.serializers import ActionShort
from cm import issue
from cm import status_api
from cm.models import Action
class ComponentObjectUrlField(serializers.HyperlinkedIdentityField):
def get_url(self, obj, view_name, request, format):
kwargs = get_api_url_kwargs(obj, request, True)
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class ComponentSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
cluster_id = serializers.IntegerField(read_only=True)
service_id = serializers.IntegerField(read_only=True)
name = serializers.CharField(read_only=True)
display_name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
state = serializers.CharField(read_only=True)
prototype_id = serializers.IntegerField(required=True, help_text='id of component prototype')
url = ComponentObjectUrlField(read_only=True, view_name='component-details')
class ComponentDetailSerializer(ComponentSerializer):
constraint = serializers.JSONField(read_only=True)
requires = serializers.JSONField(read_only=True)
bound_to = serializers.JSONField(read_only=True)
bundle_id = serializers.IntegerField(read_only=True)
monitoring = serializers.CharField(read_only=True)
status = serializers.SerializerMethodField()
issue = serializers.SerializerMethodField()
action = CommonAPIURL(read_only=True, view_name='object-action')
config = CommonAPIURL(read_only=True, view_name='object-config')
prototype = hlink('component-type-details', 'prototype_id', 'prototype_id')
def get_issue(self, obj):
return issue.aggregate_issues(obj)
def get_status(self, obj):
return status_api.get_component_status(obj.id)
class ComponentUISerializer(ComponentDetailSerializer):
actions = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
def get_actions(self, obj):
act_set = Action.objects.filter(prototype=obj.prototype)
self.context['object'] = obj
self.context['component_id'] = obj.id
actions = filter_actions(obj, act_set)
acts = ActionShort(actions, many=True, context=self.context)
return acts.data
def get_version(self, obj):
return obj.prototype.version
| StarcoderdataPython |
123725 | """Read yaml configuration files."""
import yaml
class Config:
"""Configuration file reader."""
def __init__(self, fh):
"""Initialize the configuration from a file handle."""
self._config = yaml.load(fh)
fh.close()
def get(self, *args):
"""Get a key from a path given as multiple strings from the config file."""
value = self._config
for segment in args:
if value is None:
return None
value = value.get(segment)
return value
| StarcoderdataPython |
1726215 | <filename>bot.py
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
from watson_developer_cloud import ConversationV1
import json
from dbhelper import DBHelper
db=DBHelper()
context = None
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
print('Received /start command')
update.message.reply_text('Hi!')
def help(bot, update):
print('Received /help command')
update.message.reply_text('Help!')
def message(bot, update):
print('Received an update')
global context
conversation = ConversationV1(username='<PASSWORD>',
password='<PASSWORD>',
version='2018-02-16')
# get response from watson
response = conversation.message(
workspace_id='00433057-81db-4e1e-ac1e-ae6076790f6e',
input={'text': update.message.text},
context=context)
print(json.dumps(response, indent=2))
context = response['context']
try:
if ((context['Temperature']=='yes' and context['Fatigue']=='yes' and context['Chill']=='yes') or (context['Temperature']=='yes')) :
update.message.reply_text('You may have fever.')
m="You can take "+db.get_med('Fever')
update.message.reply_text(m)
del context['Temperature']
del context['Fatigue']
del context['Chill']
except Exception, e:
print('Exception',e)
#try:
#if ((context['cold']=='yes' and context['Chill']=='yes')or(context['cold']=='yes')) :
#update.message.reply_text('You may have cold.')
#m="You can take "+db.get_med('Cold')
#update.message.reply_text(m)
#except Exception, e:
#print('Exception',e)
#try:
#if((context['anemia1']=='yes') or (context['anemia_symptoms']=='yes')):
#update.message.reply_text('You may be suffering from Anemia.')
#m="You can take "+db.get_med('Anemia')
#update.message.reply_text(m)
#except Exception, e:
#print('Exception',e)
try:
if(context['diarrhoea']=='yes' and context['vomiting']=='yes' and context['Fatigue']=='yes'):
update.message.reply_text('You may be suffering from Foodpoisoning.')
m="You can take "+db.get_med('Food poisoning')
update.message.reply_text(m)
except Exception, e:
print('Exception',e)
try:
if (context['skin_allergy']=='yes' or context['skin_dis']=='yes'):
update.message.reply_text('You may have Skin Allery.')
m="You can take "+db.get_med('Skin Allergy')
update.message.reply_text(m)
except Exception, e:
print('Exception',e)
# build response
resp = ''
for text in response['output']['text']:
resp += text
update.message.reply_text(resp)
def main():
#print(db.get_med('FEVER'))
#print(db.get_med('Fever'))
# Create the Updater and pass it your bot's token.
updater = Updater('623887564:AAHtNb41KPMA77e27zkvfrXFBxp9kRSKzSs')
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, message))
# Start the Bot
updater.start_polling()
# Block until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
from watson_developer_cloud import ConversationV1
import json
from dbhelper import DBHelper
db=DBHelper()
context = None
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
print('Received /start command')
update.message.reply_text('Hi!')
def help(bot, update):
print('Received /help command')
update.message.reply_text('Help!')
def message(bot, update):
print('Received an update')
global context
conversation = ConversationV1(username='8a8eac1c-f2e1-4888-8408-c9aa338fb439',
password='<PASSWORD>',
version='2018-02-16')
# get response from watson
response = conversation.message(
workspace_id='00433057-81db-4e1e-ac1e-ae6076790f6e',
input={'text': update.message.text},
context=context)
print(json.dumps(response, indent=2))
context = response['context']
try:
if ((context['Temperature']=='yes' and context['Fatigue']=='yes' and context['Chill']=='yes') or (context['Temperature']=='yes')) :
update.message.reply_text('You may have fever.')
m="You can take "+db.get_med('Fever')
update.message.reply_text(m)
del context['Temperature']
del context['Fatigue']
del context['Chill']
except Exception, e:
print('Exception',e)
#try:
#if ((context['cold']=='yes' and context['Chill']=='yes')or(context['cold']=='yes')) :
#update.message.reply_text('You may have cold.')
#m="You can take "+db.get_med('Cold')
#update.message.reply_text(m)
#except Exception, e:
#print('Exception',e)
#try:
#if((context['anemia1']=='yes') or (context['anemia_symptoms']=='yes')):
#update.message.reply_text('You may be suffering from Anemia.')
#m="You can take "+db.get_med('Anemia')
#update.message.reply_text(m)
#except Exception, e:
#print('Exception',e)
try:
if(context['diarrhoea']=='yes' and context['vomiting']=='yes' and context['Fatigue']=='yes'):
update.message.reply_text('You may be suffering from Foodpoisoning.')
m="You can take "+db.get_med('Food poisoning')
update.message.reply_text(m)
except Exception, e:
print('Exception',e)
try:
if (context['skin_allergy']=='yes' or context['skin_dis']=='yes'):
update.message.reply_text('You may have Skin Allery.')
m="You can take "+db.get_med('Skin Allergy')
update.message.reply_text(m)
except Exception, e:
print('Exception',e)
# build response
resp = ''
for text in response['output']['text']:
resp += text
update.message.reply_text(resp)
def main():
#print(db.get_med('FEVER'))
#print(db.get_med('Fever'))
# Create the Updater and pass it your bot's token.
updater = Updater('623887564:AAHtNb41KPMA77e27zkvfrXFBxp9kRSKzSs')
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, message))
# Start the Bot
updater.start_polling()
# Block until the user presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3345099 | <reponame>c-bik/pretalx<gh_stars>1-10
from decimal import Decimal
from functools import partial
from django import forms
from django.core.files.uploadedfile import UploadedFile
from django.utils.translation import gettext_lazy as _
from pretalx.common.forms.utils import get_help_text, validate_field_length
from pretalx.common.phrases import phrases
from pretalx.common.templatetags.rich_text import rich_text
class ReadOnlyFlag:
def __init__(self, *args, read_only=False, **kwargs):
super().__init__(*args, **kwargs)
self.read_only = read_only
if read_only:
for field in self.fields.values():
field.disabled = True
def clean(self):
if self.read_only:
raise forms.ValidationError(_("You are trying to change read-only data."))
return super().clean()
class PublicContent:
public_fields = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.Meta.public_fields:
field = self.fields.get(field_name)
if field:
field.original_help_text = getattr(field, "original_help_text", "")
field.added_help_text = getattr(field, "added_help_text", "") + str(
phrases.base.public_content
)
field.help_text = field.original_help_text + " " + field.added_help_text
class RequestRequire:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
count_chars = self.event.settings.cfp_count_length_in == "chars"
for key in self.Meta.request_require:
request = self.event.settings.get(f"cfp_request_{key}")
require = self.event.settings.get(f"cfp_require_{key}")
if not request and not require:
self.fields.pop(key)
else:
field = self.fields[key]
field.required = require
min_value = self.event.settings.get(f"cfp_{key}_min_length")
max_value = self.event.settings.get(f"cfp_{key}_max_length")
if min_value or max_value:
if min_value and count_chars:
field.widget.attrs[f"minlength"] = min_value
if max_value and count_chars:
field.widget.attrs[f"maxlength"] = max_value
field.validators.append(
partial(
validate_field_length,
min_length=min_value,
max_length=max_value,
count_in=self.event.settings.cfp_count_length_in,
)
)
field.original_help_text = getattr(field, "original_help_text", "")
field.added_help_text = get_help_text(
"",
min_value,
max_value,
self.event.settings.cfp_count_length_in,
)
field.help_text = (
field.original_help_text + " " + field.added_help_text
)
class QuestionFieldsMixin:
def get_field(self, *, question, initial, initial_object, readonly):
from pretalx.submission.models import QuestionVariant
original_help_text = question.help_text
help_text = rich_text(question.help_text)
if question.is_public:
help_text += " " + str(phrases.base.public_content)
count_chars = self.event.settings.cfp_count_length_in == "chars"
if question.variant == QuestionVariant.BOOLEAN:
# For some reason, django-bootstrap4 does not set the required attribute
# itself.
widget = (
forms.CheckboxInput(attrs={"required": "required", "placeholder": ""})
if question.required
else forms.CheckboxInput()
)
field = forms.BooleanField(
disabled=readonly,
help_text=help_text,
label=question.question,
required=question.required,
widget=widget,
initial=(initial == "True")
if initial
else bool(question.default_answer),
)
field.original_help_text = original_help_text
return field
if question.variant == QuestionVariant.NUMBER:
field = forms.DecimalField(
disabled=readonly,
help_text=help_text,
label=question.question,
required=question.required,
min_value=Decimal("0.00"),
initial=initial,
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
return field
if question.variant == QuestionVariant.STRING:
field = forms.CharField(
disabled=readonly,
help_text=get_help_text(
help_text,
question.min_length,
question.max_length,
self.event.settings.cfp_count_length_in,
),
label=question.question,
required=question.required,
initial=initial,
min_length=question.min_length if count_chars else None,
max_length=question.max_length if count_chars else None,
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
field.validators.append(
partial(
validate_field_length,
min_length=question.min_length,
max_length=question.max_length,
count_in=self.event.settings.cfp_count_length_in,
)
)
return field
if question.variant == QuestionVariant.TEXT:
field = forms.CharField(
label=question.question,
required=question.required,
widget=forms.Textarea,
disabled=readonly,
help_text=get_help_text(
help_text,
question.min_length,
question.max_length,
self.event.settings.cfp_count_length_in,
),
initial=initial,
min_length=question.min_length if count_chars else None,
max_length=question.max_length if count_chars else None,
)
field.validators.append(
partial(
validate_field_length,
min_length=question.min_length,
max_length=question.max_length,
count_in=self.event.settings.cfp_count_length_in,
)
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
return field
if question.variant == QuestionVariant.FILE:
field = forms.FileField(
label=question.question,
required=question.required,
disabled=readonly,
help_text=help_text,
initial=initial,
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
return field
if question.variant == QuestionVariant.CHOICES:
choices = question.options.all()
field = forms.ModelChoiceField(
queryset=choices,
label=question.question,
required=question.required,
empty_label=None,
initial=initial_object.options.first()
if initial_object
else question.default_answer,
disabled=readonly,
help_text=help_text,
widget=forms.RadioSelect if len(choices) < 4 else None,
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
return field
if question.variant == QuestionVariant.MULTIPLE:
field = forms.ModelMultipleChoiceField(
queryset=question.options.all(),
label=question.question,
required=question.required,
widget=forms.CheckboxSelectMultiple,
initial=initial_object.options.all()
if initial_object
else question.default_answer,
disabled=readonly,
help_text=help_text,
)
field.original_help_text = original_help_text
field.widget.attrs["placeholder"] = "" # XSS
return field
return None
def save_questions(self, k, v):
"""Receives a key and value from cleaned_data."""
from pretalx.submission.models import Answer, QuestionTarget
field = self.fields[k]
if field.answer:
# We already have a cached answer object, so we don't
# have to create a new one
if v == "" or v is None:
field.answer.delete()
else:
self._save_to_answer(field, field.answer, v)
field.answer.save()
elif v != "" and v is not None:
answer = Answer(
review=self.review
if field.question.target == QuestionTarget.REVIEWER
else None,
submission=self.submission
if field.question.target == QuestionTarget.SUBMISSION
else None,
person=self.speaker
if field.question.target == QuestionTarget.SPEAKER
else None,
question=field.question,
)
self._save_to_answer(field, answer, v)
answer.save()
def _save_to_answer(self, field, answer, value):
if isinstance(field, forms.ModelMultipleChoiceField):
answstr = ", ".join([str(o) for o in value])
if not answer.pk:
answer.save()
else:
answer.options.clear()
answer.answer = answstr
if value:
answer.options.add(*value)
elif isinstance(field, forms.ModelChoiceField):
if not answer.pk:
answer.save()
else:
answer.options.clear()
if value:
answer.options.add(value)
answer.answer = value.answer
else:
answer.answer = ""
elif isinstance(field, forms.FileField):
if isinstance(value, UploadedFile):
answer.answer_file.save(value.name, value)
answer.answer = "file://" + value.name
value = answer.answer
else:
answer.answer = value
| StarcoderdataPython |
145872 | <reponame>pykulytsky/demando
from typing import Type
from fastapi import Depends
from base.database import get_db
from sqlalchemy.orm import Session
from base.manager import BaseManager
class ItemManager(BaseManager):
def __init__(
self,
klass: Type,
user_model: Type,
db: Session = Depends(get_db),
) -> None:
self.user_model = user_model
super().__init__(klass, db=db)
def create(self, **fields):
data = {
'name': fields['name']
}
if fields.get('author', False):
data.update({
'author': self.user_model.manager(self.db).get(pk=fields['author'])
})
if fields.get('owner', False):
data.update({
'owner': self.user_model.manager(self.db).get(pk=fields['owner'])
})
return super().create(disable_check=True, **data)
class ItemManagerModel():
@classmethod
def manager(cls, db):
return ItemManager(cls, db)
| StarcoderdataPython |
3300019 | import math
import time
from phalski_ledshim import app, client, chart
def value():
t = time.time()
return (math.sin(t) + 1) / 2
if __name__ == '__main__':
a = app.App()
a.configure_worker(0.1, chart.Factory.red_blue_bar_chart_source(a.pixels, value, lambda: 1 - value()))
a.exec()
| StarcoderdataPython |
1637283 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
isoenum_webgui.routes
~~~~~~~~~~~~~~~~~~~~~
Isotopic enumerator web interface routes.
"""
import csv
import io
import json
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import jsonify
from flask import flash
from pyexcel.exceptions import FileTypeNotSupported
from isoenum_webgui import app
from isoenum_webgui.forms import FileForm
from isoenum_webgui.proc import generate_table
from isoenum_webgui.proc import generate_nmr
from isoenum_webgui.proc import update_record
from isoenum_webgui.proc import create_initial_record
from isoenum_webgui.proc import create_empty_record
from . import RECORDS
from . import HEADER
from . import CSV_HEADER
from . import NMR_TYPES
from . import EXAMPLE_PROJECT
@app.route("/", methods=["GET", "POST"])
@app.route("/home", methods=["GET", "POST"])
def home():
"""Home."""
file_input_form = FileForm()
if file_input_form.validate_on_submit():
if RECORDS:
RECORDS.clear()
try:
json_str = request.files["file"].read()
RECORDS.update(json.loads(json_str))
except json.decoder.JSONDecodeError:
try:
table_data = request.get_array(field_name="file")
if len(table_data[0]) == 1 and table_data[0][0].lower() != "base identifier":
table_header = ["Base Identifier"]
else:
table_header = table_data.pop(0)
for table_row in table_data:
record = create_initial_record(header=table_header, row=table_row)
RECORDS[record["record_id"]] = record
for i in generate_table(records=RECORDS):
progress_percentage = "{0:.0%}".format(i / len(RECORDS))
except FileTypeNotSupported:
flash("Invalid file", "danger")
return render_template("home.html", file_input_form=file_input_form)
except Exception:
flash("Invalid file", "danger")
return render_template("home.html", file_input_form=file_input_form)
return redirect(url_for("table"))
return render_template("home.html", file_input_form=file_input_form)
@app.route("/example", methods=["GET", "POST"])
def example_project():
"""Example project."""
if RECORDS:
RECORDS.clear()
RECORDS.update(EXAMPLE_PROJECT)
return redirect(url_for("table"))
@app.route("/table", methods=["GET", "POST"])
def table():
"""Display base and representative InChI."""
if request.method == "POST" and request.form.get("nmr-inchi-table-data"):
nmr_experiment_type = request.form.get("select-nmr-experiment")
generate_nmr(
nmr_experiment_type=nmr_experiment_type, records=RECORDS
)
return redirect(url_for("nmrtable", nmr_type=nmr_experiment_type))
return render_template("table.html", table_header=HEADER, table_data=RECORDS)
@app.route("/nmrtable", methods=["GET", "POST"])
def nmrtable():
"""Display NMR-specific InChI."""
nmr_experiment_type = request.args.get("nmr_type", "1D-1H")
nmr_experiment_type = NMR_TYPES[nmr_experiment_type]
return render_template(
"nmrtable.html",
table_header=HEADER,
table_data=RECORDS,
nmr_experiment_type=nmr_experiment_type)
@app.route("/update_record", methods=["POST"])
def update():
"""Update record."""
record_id = request.form.get("record_id", "")
new_record = update_record(record=request.form)
RECORDS[record_id].update(new_record)
return jsonify(RECORDS[record_id])
@app.route("/add_record", methods=["POST"])
def add():
"""Add empty record."""
record = create_empty_record()
RECORDS[record["record_id"]] = record
return jsonify({"record_id": record["record_id"]})
@app.route("/remove_record", methods=["POST"])
def remove():
"""Remove record."""
record_id = request.form.get("record_id", "")
if record_id:
try:
RECORDS.pop(record_id)
except KeyError:
raise KeyError
else:
raise KeyError
return jsonify({"record_id": record_id, "success": True})
@app.route("/molfile/<record_id>/<record_type>", methods=["GET"])
def display_molfile(record_id, record_type):
"""Display Molfile."""
if record_type == "repr":
svg = RECORDS[record_id]["Repr SVG"]
molfile = RECORDS[record_id]["Repr Molfile"]
elif record_type == "base":
svg = RECORDS[record_id]["Base SVG"]
molfile = RECORDS[record_id]["Base Molfile"]
else:
raise ValueError("Unknown record type")
return render_template("molfile.html", svg=svg, molfile=molfile)
@app.route("/export_json", methods=["GET"])
def export_json():
"""Export as JSON."""
response = app.response_class(
response=json.dumps(RECORDS, indent=4),
status=201,
mimetype="application/json",
headers={"Content-Disposition": "attachment;filename=records.json"},
)
return response
@app.route("/export_csv", methods=["GET"])
def export_csv():
"""Export as CSV."""
textio = io.StringIO()
csv_writer = csv.writer(textio)
header = CSV_HEADER["table"]
csv_writer.writerow(header)
for record in RECORDS.values():
row = [record[title] for title in header]
csv_writer.writerow(row)
response = app.response_class(
response=textio.getvalue(),
status=201,
mimetype="text/csv",
headers={"Content-Disposition": "attachment;filename=records.csv"},
)
return response
@app.route("/export_nmr_csv", methods=["GET", "POST"])
def export_nmr_csv():
"""Export NMR-specific InChI as CSV."""
nmr_experiment_type = request.args.get("nmr_type")
selected_rows = request.form.get("nmr-csv-data")
if selected_rows:
row_ids = selected_rows.split(",")
textio = io.StringIO()
csv_writer = csv.writer(textio)
header = CSV_HEADER["nmrtable"]
csv_writer.writerow(header)
for row_id in row_ids:
record_id, _ = row_id.split("_")
record_name = RECORDS[record_id]["Name"]
base_inchi = RECORDS[record_id]["Base Identifier"]
repr_inchi = RECORDS[record_id]["Repr Identifier"]
descr = " + ".join(RECORDS[record_id]["NMR"][nmr_experiment_type][repr_inchi][row_id]["descr"])
nmr_inchi = RECORDS[record_id]["NMR"][nmr_experiment_type][repr_inchi][row_id]["inchi"]
row = [record_name, base_inchi, repr_inchi, descr, nmr_inchi]
csv_writer.writerow(row)
response = app.response_class(
response=textio.getvalue(),
status=201,
mimetype="text/csv",
headers={"Content-Disposition": "attachment;filename=records.csv"},
)
return response
else:
flash("Please select NMR-specific InChI", "danger")
return redirect(url_for("nmrtable", nmr_type=nmr_experiment_type))
@app.route("/about")
def about():
"""About page."""
return render_template("about.html")
@app.after_request
def after_request(response):
"""Do not cache responses."""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, max-age=0"
return response
| StarcoderdataPython |
1759723 | <filename>2020/cassava-leaf-disease-classification/util/metrics.py
import numpy as np
from sklearn.metrics import accuracy_score
#from sklearn.metrics import f1_score
def get_cate_acc(true: list, pred: list, logger=None):
if len(true) != len(pred):
return 0.0
labels = len(set(true))
acc_n = np.zeros([labels])
acc_d = np.zeros([labels])
for i, t in enumerate(true):
acc_d[t] += 1
if t == pred[i]:
acc_n[t] += 1
cate_acc = acc_n / acc_d # shape [5]
if logger:
logger.debug(f'cate_acc : {cate_acc}')
#acc = acc_n.sum() / acc_d.sum() # accuracy_score(true, pred)
return cate_acc
def get_acc(true: list, pred: list, logger=None):
acc = accuracy_score(true, pred)
if logger:
logger.debug(f'acc : {acc}')
return acc
#def get_f1_score(true: list, pred: list):
# return f1_score(true, pred, average='macro')
if __name__ == "__main__":
true = [0,1,1,1,1,1]
pred = [0,1,2,1,1,3]
acc = get_acc_score(true, pred)
print(acc)
#f1 = get_f1_score(true, pred)
#print(f1)
| StarcoderdataPython |
3393861 | import keras
from keras.models import load_model, Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn import preprocessing
import cv2
np.random.seed(3)
X = []
y = []
with open('actions.csv', 'r') as f:
for line in f:
y.append(line.rstrip())
all_images = []
img_num = 0
while img_num < 3711:
img = cv2.imread(r'./dataset/frame_{0}.jpg'.format(img_num), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (0, 0), fx=0.3, fy=0.3)
img = img[:, :, np.newaxis]
all_images.append(img)
img_num += 1
X = np.array(all_images)
print(X[0].shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
img_x, img_y = 90, 498
input_shape = (img_x, img_y, 1)
classifications = 2
y_train = keras.utils.to_categorical(y_train, classifications)
y_test = keras.utils.to_categorical(y_test, classifications)
model = Sequential()
model.add(Conv2D(100, kernel_size=(2, 2), strides=(2, 2), activation='relu', input_shape=input_shape))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(classifications, activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
tbCallback = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
model.fit(X_train, y_train, batch_size=250, epochs=13, validation_data=(X_test, y_test), callbacks=[tbCallback])
model.save('model.h5') | StarcoderdataPython |
4806551 | import itertools
import os
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Literal
from typing import Optional
from typing import Union, List, Callable, Tuple, Dict, Any, NamedTuple
import fitz
import pdfminer
import unicodedata
from pdfminer.high_level import extract_pages
from pdfminer.image import ImageWriter
from pdfminer.layout import LTChar
from pydantic import confloat
from pdfscraper.utils import (
group_objs_y,
get_leftmost,
get_rightmost,
get_topmost,
get_bottommost,
)
ImageSource = Literal["pdfminer", "mupdf"]
@dataclass
class Color:
r: confloat(ge=0, le=1)
g: confloat(ge=0, le=1)
b: confloat(ge=0, le=1)
def __eq__(self, other, decimals=1):
if (
round(self.r, decimals) == round(other.r, decimals)
and round(self.b, decimals) == round(other.b, decimals)
and round(self.g, decimals) == round(other.g, decimals)
):
return True
else:
return False
@dataclass
class Point:
x: float
y: float
class Bbox(NamedTuple):
x0: float
y0: float
x1: float
y1: float
def __str__(self):
return (
f"Bbox(x0={self.x0:.2f},y0={self.y0:.2f},x1={self.x1:.2f},y1={self.y1:.2f})"
)
def __eq__(self, other, decimals=1, n=4):
return [round(i, ndigits=decimals) for i in self[:n]] == [
round(i, ndigits=decimals) for i in other[:n]
]
@property
def height(self):
return abs(self.y0 - self.y1)
@property
def width(self):
return abs(self.x0 - self.x1)
@classmethod
def from_coords(cls, coords, invert_y=False, page_height=None):
if invert_y:
x0, y0, x1, y1 = coords
y0, y1 = page_height - y1, page_height - y0
return cls(x0, y0, x1, y1)
class Word:
__slots__ = ("text", "bbox", "font", "size", "color")
def __init__(
self,
text: str = "",
bbox: Union[Bbox, List[int], Tuple[int]] = None,
font: str = "",
size: str = "",
color=None,
normalize_text=False,
):
self.text = text
if normalize_text:
self.text = self.text.replace("\xad", "-")
self.text = unicodedata.normalize("NFKD", self.text)
self.bbox = bbox
self.font = font
self.size = size
self.color = color
def __repr__(self):
return f'Word(text="{self.text}",bbox={self.bbox})'
def __eq__(self, other):
if (self.text, self.bbox) == (other.text, other.bbox):
return True
return False
def __str__(self):
return self.text
class Span:
__slots__ = ("words", "bbox")
def __init__(self, words: List[Word] = None, bbox: Bbox = None):
"""
A collection of words.
"""
self.words = words
self.bbox = bbox
@property
def text(self):
return "".join([i.text for i in self.words])
def __repr__(self):
return "Span <%s> %s" % ([round(i) for i in self.bbox], self.words)
class Line:
__slots__ = ("bbox", "spans")
def __init__(self, bbox: List[float], spans):
self.bbox = bbox
self.spans = spans
def __repr__(self):
# '\n'.join([i.text for i in self.spans])
return "Line: %s" % self.spans
@property
def text(self):
return " ".join([i.text for i in self.spans])
class Block:
__slots__ = ("bbox", "lines")
def __init__(self, bbox: Bbox, lines):
"""
A collection spans.
"""
self.bbox = bbox
self.lines = lines
def __repr__(self):
return "Block: %s" % self.lines
@dataclass
class Drawing:
bbox: Bbox
fill_color: Optional[Color]
stroke_color: Optional[Color]
fill: bool
stroke: bool
# height: PositiveFloat
# width: PositiveFloat
@dataclass
class RectShape(Drawing):
points: Optional[Tuple[Point, Point, Point, Point]]
@dataclass
class LineShape(Drawing):
points: Optional[Tuple[Point, Point]]
@dataclass
class CurveShape(Drawing):
points: Optional[Tuple[Point, Point, Point, Point]]
from pdfminer.layout import LTRect, LTLine, LTCurve
def get_pts(drawing: Dict):
ret = []
for i in drawing["items"]:
for j in i[1:]:
if isinstance(j, fitz.fitz.Rect):
ret.append(j.bl)
ret.append(j.br)
else:
ret.append(j)
return ret
def process_pdfminer_drawing(drawing: Union[LTRect, LTLine, LTCurve], orientation):
fill = drawing.fill
fill_color = Color(*drawing.non_stroking_color) if fill else None
stroke = drawing.stroke
stroke_color = Color(*drawing.stroking_color) if stroke else None
# pdfminer has bottom as y-zero
if orientation.bottom_is_zero:
bbox = Bbox(*drawing.bbox)
else:
bbox = Bbox.from_coords(
coords=drawing.bbox, invert_y=True, page_height=orientation.page_height
)
pts = None # drawing.pts
args = {
"fill": fill,
"fill_color": fill_color,
"stroke": stroke,
"stroke_color": stroke_color,
"bbox": bbox,
"points": pts,
}
if isinstance(drawing, LTRect):
return RectShape(**args)
elif isinstance(drawing, LTLine):
return LineShape(**args)
elif isinstance(drawing, LTCurve):
return CurveShape(**args)
def process_mupdf_drawing(drawing: Dict, orientation):
items = drawing["items"]
fill = "f" in drawing["type"]
fill_color = Color(*drawing["fill"]) if fill else None
stroke = "s" in drawing["type"]
stroke_color = Color(*drawing["color"]) if stroke else None
# mupdf has top as y-zero
if orientation.bottom_is_zero:
bbox = Bbox.from_coords(
coords=drawing["rect"], invert_y=True, page_height=orientation.page_height
)
else:
bbox = Bbox(*drawing["rect"])
pts = None # get_pts(drawing)
args = {
"fill": fill,
"fill_color": fill_color,
"stroke": stroke,
"stroke_color": stroke_color,
"bbox": bbox,
"points": pts,
}
drawing_commands = [item[0] for item in items]
if len(drawing_commands) == 1:
if drawing_commands[0] == "l":
return LineShape(**args)
if drawing_commands[0] == "re":
return RectShape(**args)
else:
return CurveShape(**args)
else:
return CurveShape(**args)
@dataclass
class PageVerticalOrientation:
bottom_is_zero: bool
page_height: float
@contextmanager
def attr_as(obj, field: str, value) -> None:
old_value = getattr(obj, field)
setattr(obj, field, value)
yield
setattr(obj, field, old_value)
@dataclass
class Image:
bbox: Bbox
width: float
height: float
source_width: float
source_height: float
colorspace_name: str
bpc: int
xref: int
name: str
source: ImageSource
raw_object: Any = None
parent_object: Any = None
colorspace_n: Optional[int] = None
class Config:
arbitrary_types_allowed = True
def _save_pdfminer(self, path: str):
path, ext = os.path.splitext(path)
folder, name = os.path.split(path)
im = self.raw_object
with attr_as(im, "name", name):
return ImageWriter(folder).export_image(im)
def _save_mupdf(self, path: str):
with open(path, "wb") as f:
f.write(self.parent_object.extract_image(self.xref)["image"])
def save(self, path: str):
if self.source == "pdfminer":
self._save_pdfminer(path)
elif self.source == "mupdf":
self._save_mupdf(path)
@classmethod
def from_pdfminer(
cls, image: pdfminer.layout.LTImage, orientation: PageVerticalOrientation
):
if orientation.bottom_is_zero:
bbox = Bbox(*image.bbox)
else:
bbox = Bbox.from_coords(
coords=image.bbox, invert_y=True, page_height=orientation.page_height
)
bpc = image.bits
if hasattr(image.colorspace[0], "name"):
colorspace_name = image.colorspace[0].name
else:
objs = image.colorspace[0].resolve()
colorspaces = [i for i in objs if hasattr(i, "name")]
colorspace_name = colorspaces[0].name
name = image.name
source_width, source_height = image.srcsize
width, height = image.width, image.height
xref = image.stream.objid
return cls(
bbox=bbox,
width=width,
height=height,
source_width=source_width,
source_height=source_height,
colorspace_name=colorspace_name,
bpc=bpc,
xref=xref,
name=name,
raw_object=image,
source="pdfminer",
)
@classmethod
def from_mupdf(
cls, image: Dict, doc: fitz.fitz.Document, orientation: PageVerticalOrientation
):
bbox = image.get("bbox")
if orientation.bottom_is_zero:
bbox = Bbox.from_coords(
coords=bbox, invert_y=True, page_height=orientation.page_height
)
else:
bbox = Bbox(*bbox)
bpc = image.get("bpc")
colorspace_name = image.get("colorspace_name")
name = image.get("name")
source_width, source_height = (
image.get("source_width"),
image.get("source_height"),
)
width, height = bbox.width, bbox.height
xref = image.get("xref")
return cls(
bbox=bbox,
width=width,
height=height,
source_width=source_width,
source_height=source_height,
colorspace_name=colorspace_name,
bpc=bpc,
xref=xref,
name=name,
raw_object=image,
source="mupdf",
parent_object=doc,
)
def get_images_from_mupdf_page(page):
images = page.get_images()
for (
xref,
smask,
source_width,
source_height,
bpc,
colorspace,
alt_colorspace,
name,
decode_filter,
) in images:
bbox = page.get_image_bbox(name)
yield {
"xref": xref,
"mask_xref": smask,
"source_width": source_width,
"source_height": source_height,
"bpc": bpc,
"colorspace_name": colorspace,
"name": name,
"decode_filter": decode_filter,
"bbox": bbox,
}
def process_span_fitz(span: dict, move=None):
words = [
list(g)
for k, g in (
itertools.groupby(span["chars"], key=lambda x: x["c"] not in (" ", "\xa0"))
)
]
new_words = []
coords = []
for word in words:
x0, y0 = get_leftmost(word[0]["bbox"]), get_topmost(word[0]["bbox"])
x1, y1 = get_rightmost(word[-1]["bbox"]), get_bottommost(word[-1]["bbox"])
if move:
y0 += move
y1 += move
coords.append([x0, y0, x1, y1])
text = "".join([c["c"] for c in word])
new_words.append(
Word(
**{
"text": text,
"bbox": Bbox(x0=x0, y0=y0, x1=x1, y1=y1),
"font": span["font"],
"size": span["size"],
"color": span["color"],
},
normalize_text=True,
)
)
bbox = get_span_bbox(new_words)
ret = Span(words=new_words, bbox=bbox)
return ret
def process_span_pdfminer(
span: List[LTChar], move: float = None, height: float = 0
) -> Span:
"""
Convert a list of pdfminer characters into a Span.
Split a list by space into Words.
@param span: list of characters
@param move: add this value to y-coordinates
@param height: page height
"""
words = [
list(g)
for k, g in (
itertools.groupby(span, key=lambda x: x.get_text() not in (" ", "\xa0"))
)
]
new_words = []
coords = []
for word in words:
if type(word) == pdfminer.layout.LTAnno:
continue
# reversing y-coordinates: in pdfminer the zero is the bottom of the page
# make it top
x0, y0 = word[0].x0, word[0].y1
x1, y1 = word[-1].x1, word[-1].y0
if move:
y0 += move
y1 += move
y0 = height - y0
y1 = height - y1
coords.append([x0, y0, x1, y1])
text = "".join([c.get_text() for c in word])
font = word[0].fontname
size = word[0].size
new_words.append(
Word(
**{
"text": text,
"bbox": Bbox(x0=x0, y0=y0, x1=x1, y1=y1),
"font": font,
"size": size,
"color": None,
},
normalize_text=True,
)
)
bbox = get_span_bbox(new_words)
ret = Span(words=new_words, bbox=bbox)
return ret
def get_image(layout_object):
if isinstance(layout_object, pdfminer.layout.LTImage):
return layout_object
if isinstance(layout_object, pdfminer.layout.LTContainer):
for child in layout_object:
return get_image(child)
else:
return None
class Page:
def __init__(self, words, drawings, images, raw_object):
self.words = words
self.drawings = drawings
self.images = images
self.raw_object = raw_object
def __repr__(self):
return "Page: %s" % "".join([repr(i) + "\n" for i in self.words])
def select(self, condition: Callable):
"""
Find content matching condition.
"""
words = [i for i in self.words if condition(i)]
drawings = [i for i in self.drawings if condition(i)]
ret = Page(words=words, drawings=drawings)
return ret
@property
def sorted(self) -> List[List[Word]]:
return group_objs_y(self.words)
@classmethod
def from_mupdf(cls, page: fitz.fitz.Page):
blocks = page.get_text("rawdict", flags=3)["blocks"]
for block in blocks:
for line in block["lines"]:
for j, span in enumerate(line["spans"]):
line["spans"][j] = process_span_fitz(span)
for block in blocks:
for k, line in enumerate(block["lines"]):
block["lines"][k] = Line(bbox=(line["bbox"]), spans=line["spans"])
for n, block in enumerate(blocks):
blocks[n] = Block(bbox=(block["bbox"]), lines=block["lines"])
drawings = sorted(page.get_drawings(), key=lambda x: x["rect"][1])
orientation = PageVerticalOrientation(
bottom_is_zero=False, page_height=Bbox(*page.rect).height
)
drawings = [process_mupdf_drawing(i, orientation) for i in drawings]
words = [
word
for block in blocks
for line in block.lines
for span in line.spans
for word in span.words
]
drawings = sorted(drawings, key=get_topmost)
images = get_images_from_mupdf_page(page)
images = [Image.from_mupdf(image, page.parent, orientation) for image in images]
page = Page(words=words, drawings=drawings, images=images, raw_object=page)
return page
@classmethod
def from_pdfminer(cls, page: pdfminer.layout.LTPage) -> "Page":
blocks = []
text_boxes = [i for i in page if hasattr(i, "get_text")]
for text_box in text_boxes:
# get text lines
lines = [
text_line for text_line in text_box if hasattr(text_line, "get_text")
]
# convert lines into spans
lines = [
process_span_pdfminer(
[i for i in line if type(i) != pdfminer.layout.LTAnno],
height=page.height,
)
for line in lines
]
# make a block out of spans
blocks.append(Block(bbox=Bbox(*text_box.bbox), lines=lines))
words = [
word for block in blocks for line in block.lines for word in line.words
]
drawings = [i for i in page if issubclass(type(i), pdfminer.layout.LTCurve)]
orientation = PageVerticalOrientation(
bottom_is_zero=False, page_height=page.height
)
drawings = [process_pdfminer_drawing(i, orientation) for i in drawings]
drawings = sorted(drawings, key=get_topmost)
images = filter(bool, map(get_image, page))
images = [Image.from_pdfminer(image, orientation) for image in images]
page = Page(words=words, images=images, drawings=drawings, raw_object=page)
return page
def get_span_bbox(span: List) -> Bbox:
"""
Calculate bounding box for a span.
:param span:
:return:
"""
coords = [i.bbox for i in span]
min_x0 = min((i.x0 for i in coords))
min_y0 = min((i.y0 for i in coords))
min_x1 = min((i.x1 for i in coords))
min_y1 = min((i.y1 for i in coords))
max_x0 = max((i.x0 for i in coords))
max_y0 = max((i.y0 for i in coords))
max_x1 = max((i.x1 for i in coords))
max_y1 = max((i.y1 for i in coords))
leftmost = min([min_x0, min_x1])
rightmost = max([max_x0, max_x1])
topmost = min([min_y0, min_y1])
bottommost = max([max_y0, max_y1])
bbox = Bbox(x0=leftmost, y0=topmost, x1=rightmost, y1=bottommost)
return bbox
def line2str(line: List[Word]) -> str:
return " ".join(map(str, line))
#
# path = r"C:\projects\test2.pdf"
# doc = fitz.open(path)
# fitz_page = doc[0]
#
# pdfminer_page = list(extract_pages(path))[0]
# print(
# list(
# zip(
# Page.from_mupdf(fitz_page).drawings,
# Page.from_pdfminer(pdfminer_page).drawings,
# )
# )
# )
| StarcoderdataPython |
3329473 | <reponame>torbjornvatn/powerline-shell
from subprocess import Popen, PIPE
from shlex import split
def add_gcloud_segment(powerline):
try:
cmd = "gcloud config list| grep project | tr '=' ' ' | tr -s ' ' ' ' | cut -d' ' -f2 | cut -d'-' -f3 | tr -d '\n'"
output = Popen('%s' % cmd, stdout=PIPE, shell=True).communicate()[0]
except OSError:
return
powerline.append(output + ' - ', Color.JOBS_FG, Color.JOBS_BG)
| StarcoderdataPython |
3339211 | """
CoaT architecture.
Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399
Official CoaT code at: https://github.com/mlpc-ucsd/CoaT
Modified from timm/models/vision_transformer.py
"""
from copy import deepcopy
from functools import partial
from typing import Tuple, List
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
__all__ = [
"coat_lite_tiny",
"coat_lite_mini",
"coat_lite_small"
]
def _cfg_coat(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed1.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'coat_tiny': _cfg_coat(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_tiny-473c2a20.pth'
),
'coat_mini': _cfg_coat(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_mini-2c6baf49.pth'
),
'coat_lite_tiny': _cfg_coat(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_tiny-461b07a7.pth'
),
'coat_lite_mini': _cfg_coat(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_mini-d7842000.pth'
),
'coat_lite_small': _cfg_coat(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_small-fea1d5a1.pth'
),
}
def masked_sin_pos_encoding(x, mask, num_pos_feats, temperature=10000, scale=2 * math.pi):
""" Masked Sinusoidal Positional Encoding
Parameters:
x: [PATCH] tokens
mask: the padding mask for [PATCH] tokens
num_pos_feats: the size of channel dimension
temperature: the temperature value
scale: the normalization scale
Returns:
pos: Sinusoidal positional encodings
"""
num_pos_feats = num_pos_feats // 2
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3)
return pos
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvRelPosEnc(nn.Module):
""" Convolutional relative position encoding. """
def __init__(self, Ch, h, window):
"""
Initialization.
Ch: Channels per head.
h: Number of heads.
window: Window size(s) in convolutional relative positional encoding. It can have two forms:
1. An integer of window size, which assigns all attention heads with the same window s
size in ConvRelPosEnc.
2. A dict mapping window size to #attention head splits (
e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2})
It will apply different window size to the attention head splits.
"""
super().__init__()
if isinstance(window, int):
# Set the same window size for all attention heads.
window = {window: h}
self.window = window
elif isinstance(window, dict):
self.window = window
else:
raise ValueError()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1
# Determine padding size.
# Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338
padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2
cur_conv = nn.Conv2d(cur_head_split * Ch, cur_head_split * Ch,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * Ch,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * Ch for x in self.head_splits]
def forward(self, q, v, size: Tuple[int, int]):
B, h, N, Ch = q.shape
H, W = size
assert N == 1 + H * W
# Convolutional relative position encoding.
q_img = q[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v_img.transpose(-1, -2).reshape(B, h * Ch, H, W)
v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels
conv_v_img_list = []
for i, conv in enumerate(self.conv_list):
conv_v_img_list.append(conv(v_img_list[i]))
conv_v_img = torch.cat(conv_v_img_list, dim=1)
conv_v_img = conv_v_img.reshape(B, h, Ch, H * W).transpose(-1, -2)
EV_hat = q_img * conv_v_img
EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch].
return EV_hat
class RonfiguredAttentionModule(nn.Module):
""" Factorized attention with convolutional relative position encoding class. """
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used.
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size: Tuple[int, int], det=None, cross_attn=False, cross_attn_mask=None):
# projection before projectioning
if not cross_attn:
B, N, C = x.shape
x = torch.cat([x, det], dim=1)
full_qkv = self.qkv(x)
patch_qkv, det_qkv = full_qkv[:, :N, :], full_qkv[:, N:, :]
else:
B, N, C = x[0].shape
_, ori_H, ori_W, _ = x[1].shape
ori_N = ori_H * ori_W
shifted_x = x[0]
cross_x = x[1].view(B, ori_N, C)
x = torch.cat([shifted_x, cross_x, det], dim=1)
full_qkv = self.qkv(x)
patch_qkv, cross_patch_qkv, det_qkv = \
full_qkv[:, :N, :], full_qkv[:, N:N + ori_N, :], full_qkv[:, N + ori_N:, :]
# [PATCH x PATCH] self-attention
# Generate Q, K, V.
patch_qkv = patch_qkv.reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
patch_q, patch_k, patch_v = patch_qkv[0], patch_qkv[1], patch_qkv[2] # [B, h, N, Ch]
# Factorized attention.
k_softmax = patch_k.softmax(dim=2)
factor_att = k_softmax.transpose(-1, -2) @ patch_v
factor_att = patch_q @ factor_att
# Convolutional relative position encoding.
crpe = self.crpe(patch_q, patch_v, size=size) # [B, h, N, Ch]
# Merge and reshape.
patch_x = self.scale * factor_att + crpe
patch_x = patch_x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C]
# extract qkv for [DET] tokens
det_qkv = det_qkv.view(B, -1, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
det_q, det_k, det_v = det_qkv[0], det_qkv[1], det_qkv[2]
# if cross-attention is activated
if cross_attn:
# reconstruct the spatial form of [PATCH] tokens for global [DET x PATCH] attention
cross_patch_qkv = cross_patch_qkv.view(B, ori_H, ori_W, 3, self.num_heads, C // self.num_heads)
patch_kv = cross_patch_qkv[:, :, :, 1:, :, :].permute(3, 0, 4, 1, 2, 5).contiguous()
patch_kv = patch_kv.view(2, B, self.num_heads, ori_H * ori_W, -1)
# extract "key and value" of [PATCH] tokens for cross-attention
cross_patch_k, cross_patch_v = patch_kv[0], patch_kv[1]
# bind key and value of [PATCH] and [DET] tokens for [DET X [PATCH, DET]] attention
det_k, det_v = torch.cat([cross_patch_k, det_k], dim=2), torch.cat([cross_patch_v, det_v], dim=2)
# [DET x DET] self-attention or binded [DET x [PATCH, DET]] attention
det_q = det_q * self.scale
det_attn = (det_q @ det_k.transpose(-2, -1))
# apply cross-attention mask if available
if cross_attn_mask is not None:
det_attn = det_attn + cross_attn_mask
det_attn = det_attn.softmax(dim=-1)
det_x = (det_attn @ det_v).transpose(1, 2).reshape(B, -1, C)
# projection for outputs from multi-head
x = torch.cat([patch_x.view(B, N, C), det_x], dim=1)
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
# decompose after FFN into [PATCH] and [DET] tokens
patch_x = x[:, :N, :]
det_x = x[:, N:, :]
return patch_x, det_x
class ConvPosEnc(nn.Module):
""" Convolutional Position Encoding.
Note: This module is similar to the conditional position encoding in CPVT.
"""
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim)
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
H, W = size
assert N == 1 + H * W
# Extract CLS token and image tokens.
cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C]
# Depthwise convolution.
feat = img_tokens.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
# Combine with CLS token.
x = torch.cat((cls_token, x), dim=1)
return x
class SerialBlock(nn.Module):
""" Serial block class.
Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None):
super().__init__()
# Conv-Attention.
self.cpe = shared_cpe
self.norm1 = norm_layer(dim)
self.factoratt_crpe = RonfiguredAttentionModule(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, size: Tuple[int, int],
# additional inputs for RAM
pos, cross_attn, cross_attn_mask):
B, L, C = x.shape
shortcut = x
H, W = size
x = self.norm1(x)
x, det = x[:, :-self.det_token_num, :], x[:, -self.det_token_num:, :]
orig_x = x[:, 1:, :].view(B, H, W, C)
# projects det positional encoding: make the channel size suitable for the current layer
patch_pos, det_pos = pos
det_pos = self.det_pos_linear(det_pos)
# prepare cross-attn and add positional encodings
if cross_attn:
# patch token (for cross-attention) + Sinusoidal pos encoding
cross_x = orig_x + patch_pos
# det token + learnable pos encoding
det = det + det_pos
x = (self.cpe(x, size), cross_x) # (x, cross_x)
else:
# it cross_attn is decativated, only [PATCH] and [DET] self-attention are performed
det = det + det_pos
x = self.cpe(x, size)
# Reconfigured Conv-Attention (RAM)
x, det = self.factoratt_crpe(x, size,
# additional parameters
det=det,
cross_attn=cross_attn,
cross_attn_mask=cross_attn_mask)
x = torch.cat([x, det], dim=1)
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
Parameters:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class CoaT(nn.Module):
""" CoaT class. """
def __init__(
self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(0, 0, 0, 0),
serial_depths=(0, 0, 0, 0), parallel_depth=0, num_heads=0, mlp_ratios=(0, 0, 0, 0), qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
return_interm_layers=True, out_features=None, crpe_window=None, **kwargs):
super().__init__()
crpe_window = crpe_window or {3: 2, 5: 3, 7: 3}
self.return_interm_layers = return_interm_layers
self.out_features = out_features
self.embed_dims = embed_dims
self.num_features = embed_dims[-1]
self.num_classes = num_classes
# Patch embeddings.
img_size = to_2tuple(img_size)
self.patch_embed1 = PatchEmbed(
patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dims[0], norm_layer=nn.LayerNorm)
self.patch_embed2 = PatchEmbed(
patch_size=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], norm_layer=nn.LayerNorm)
self.patch_embed3 = PatchEmbed(
patch_size=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], norm_layer=nn.LayerNorm)
self.patch_embed4 = PatchEmbed(
patch_size=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], norm_layer=nn.LayerNorm)
# Class tokens.
self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0]))
self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1]))
self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2]))
self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))
# Convolutional position encodings.
self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3)
self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3)
self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3)
self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3)
# Convolutional relative position encodings.
self.crpe1 = ConvRelPosEnc(Ch=embed_dims[0] // num_heads, h=num_heads, window=crpe_window)
self.crpe2 = ConvRelPosEnc(Ch=embed_dims[1] // num_heads, h=num_heads, window=crpe_window)
self.crpe3 = ConvRelPosEnc(Ch=embed_dims[2] // num_heads, h=num_heads, window=crpe_window)
self.crpe4 = ConvRelPosEnc(Ch=embed_dims[3] // num_heads, h=num_heads, window=crpe_window)
# Disable stochastic depth.
dpr = drop_path_rate
assert dpr == 0.0
# Serial blocks 1.
self.serial_blocks1 = nn.ModuleList([
SerialBlock(
dim=embed_dims[0], num_heads=num_heads, mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer,
shared_cpe=self.cpe1, shared_crpe=self.crpe1
)
for _ in range(serial_depths[0])]
)
# Serial blocks 2.
self.serial_blocks2 = nn.ModuleList([
SerialBlock(
dim=embed_dims[1], num_heads=num_heads, mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer,
shared_cpe=self.cpe2, shared_crpe=self.crpe2
)
for _ in range(serial_depths[1])]
)
# Serial blocks 3.
self.serial_blocks3 = nn.ModuleList([
SerialBlock(
dim=embed_dims[2], num_heads=num_heads, mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer,
shared_cpe=self.cpe3, shared_crpe=self.crpe3
)
for _ in range(serial_depths[2])]
)
# Serial blocks 4.
self.serial_blocks4 = nn.ModuleList([
SerialBlock(
dim=embed_dims[3], num_heads=num_heads, mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer,
shared_cpe=self.cpe4, shared_crpe=self.crpe4
)
for _ in range(serial_depths[3])]
)
# Classification head(s).
self.norm2 = self.norm3 =self.norm4 = None
if not self.return_interm_layers:
self.norm2 = self.norm3 = None
self.norm4 = norm_layer(embed_dims[3])
# CoaT-Lite series: Use feature of last scale for classification.
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Initialize weights.
trunc_normal_(self.cls_token1, std=.02)
trunc_normal_(self.cls_token2, std=.02)
trunc_normal_(self.cls_token3, std=.02)
trunc_normal_(self.cls_token4, std=.02)
self.apply(self._init_weights)
# dict to access
self.stages = [self.serial_blocks1, self.serial_blocks2, self.serial_blocks3, self.serial_blocks4]
self.patch_embeds = [self.patch_embed1, self.patch_embed2, self.patch_embed3, self.patch_embed4]
self.cls_tokens = [self.cls_token1, self.cls_token2, self.cls_token3, self.cls_token4]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4', 'det_pos_embed', 'det_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def insert_cls(self, x, cls_token):
""" Insert CLS token. """
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
return x
def remove_cls(self, x):
""" Remove CLS token. """
return x[:, 1:, :]
# added for ViDT with Coat
def finetune_det(self, method, det_token_num=100, pos_dim=256, cross_indices=[3]):
""" A funtion to add neccessary (leanable) variables to Swin Transformer for object detection
Parameters:
det_token_num: the number of object to detect, i.e., number of object queries
pos_dim: the channel dimension of positional encodings for [DET] and [PATCH] tokens
cross_indices: the indices where to use the [DET X PATCH] cross-attention
there are four possible stages in [0, 1, 2, 3]. 3 indicates Stage 4 in the ViDT paper.
"""
# which method?
self.method = method
# how many object we detect?
self.det_token_num = det_token_num
self.det_token = nn.Parameter(torch.zeros(1, det_token_num, self.embed_dims[0]))
self.det_token = trunc_normal_(self.det_token, std=.02)
# dim size of pos encoding
self.pos_dim = pos_dim
# learnable positional encoding for detection tokens
det_pos_embed = torch.zeros(1, det_token_num, pos_dim)
det_pos_embed = trunc_normal_(det_pos_embed, std=.02)
self.det_pos_embed = torch.nn.Parameter(det_pos_embed)
# info for detection
self.num_channels = [self.embed_dims[i+1] for i in range(len(self.embed_dims)-1)]
if method == 'vidt':
self.num_channels.append(self.pos_dim) # default: 256 (same to the default pos_dim)
self.cross_indices = cross_indices
# divisor to reduce the spatial size of the mask
self.mask_divisor = 2 ** (len(self.embed_dims) - len(self.cross_indices))
# projection matrix for det pos encoding in each Swin layer (there are 4 blocks)
for stage_id, stage in enumerate(self.stages):
for block in stage:
block.det_token_num = det_token_num
block.det_pos_linear = nn.Linear(pos_dim, self.embed_dims[stage_id])
# det channel expansion
if stage_id > 0:
det_exp = nn.Linear(self.embed_dims[stage_id-1], self.embed_dims[stage_id], bias=False)
trunc_normal_(det_exp.weight, std=.02)
det_exp_name = f'det_exp_{stage_id}'
self.add_module(det_exp_name, det_exp)
det_exp_norm = nn.LayerNorm(self.embed_dims[stage_id])
nn.init.constant_(det_exp_norm.bias, 0)
nn.init.constant_(det_exp_norm.weight, 1.0)
det_exp_norm_name = f'det_exp_norm_{stage_id}'
self.add_module(det_exp_norm_name, det_exp_norm)
self.det_exps = [self.det_exp_1, self.det_exp_2, self.det_exp_3]
self.det_exp_norms = [self.det_exp_norm_1, self.det_exp_norm_2, self.det_exp_norm_3]
# neck-free model do not require downsamling at the last stage
if method == 'vidt':
self.patch_embed5 = PatchEmbed(
patch_size=2, in_chans=self.embed_dims[-1],
embed_dim=pos_dim, norm_layer=nn.LayerNorm)
self.patch_embeds.append(self.patch_embed5)
def forward_stage(self, x, H, W, stage_fn, det_pos, input_mask, cross_attn, dim):
B = x.shape[0]
# compute sinusoidal pos encoding and cross-attn mask here to avoid redundant computation
if cross_attn:
_H, _W = input_mask.shape[1:]
if not (_H == H and _W == W):
input_mask = F.interpolate(input_mask[None].float(), size=(H, W)).to(torch.bool)[0]
# sinusoidal pos encoding for [PATCH] tokens used in cross-attention
patch_pos = masked_sin_pos_encoding(x, input_mask, dim)
# attention padding mask due to the zero padding in inputs
# the zero (padded) area is masked by 1.0 in 'input_mask'
cross_attn_mask = input_mask.float()
cross_attn_mask = cross_attn_mask.masked_fill(cross_attn_mask != 0.0, float(-100.0)). \
masked_fill(cross_attn_mask == 0.0, float(0.0))
# pad for detection token (this padding is required to process the binded [PATCH, DET] attention
cross_attn_mask = cross_attn_mask.view(B, H * W).unsqueeze(1).unsqueeze(2)
cross_attn_mask = F.pad(cross_attn_mask, (0, self.det_token_num), value=0)
else:
patch_pos = None
cross_attn_mask = None
# zip pos encodings
pos = (patch_pos, det_pos)
for blk in stage_fn:
# for selective cross-attention
if cross_attn:
_cross_attn = True
_cross_attn_mask = cross_attn_mask
_pos = pos # i.e., (patch_pos, det_pos)
else:
_cross_attn = False
_cross_attn_mask = None
_pos = (None, det_pos)
# attention operations with RAM
x = blk(x, size=(H, W),
# additional inputs
pos=_pos,
cross_attn=_cross_attn,
cross_attn_mask=_cross_attn_mask)
x, det = x[:, :H * W + 1, :], x[:, H * W + 1:, :]
x = self.remove_cls(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
return x, det, H, W
def forward(self, x, mask):
B, H, W = x.shape[0], x.shape[2], x.shape[3]
# expand det_token for all examples in the batch
det_token = self.det_token.expand(B, -1, -1)
# det pos encoding -> will be projected in each block
det_pos = self.det_pos_embed
# prepare a mask for cross attention
mask = F.interpolate(mask[None].float(),
size=(H // self.mask_divisor, W // self.mask_divisor)).to(torch.bool)[0]
# multi-scale [PATCH] tokens
patch_outs = []
for stage in range(len(self.embed_dims)):
# whether to use cross-attention
cross_attn = True if stage in self.cross_indices else False
x = self.patch_embeds[stage](x)
H, W = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.insert_cls(x, self.cls_tokens[stage])
# merge with det token after det token expansion
if stage > 0:
det_token = self.det_exps[stage-1](det_token)
det_token = self.det_exp_norms[stage-1](det_token)
x = torch.cat([x, det_token], dim=1)
x, det_token, H, W = self.forward_stage(x, H, W,
self.stages[stage],
# additional input for VIDT
input_mask=mask,
det_pos=det_pos,
cross_attn=cross_attn,
dim=self.embed_dims[stage])
if stage > 0:
patch_outs.append(x)
if self.method == 'vidt':
patch_outs.append(self.patch_embeds[-1](x))
det_tgt = det_token.permute(0, 2, 1)
det_pos = det_pos.permute(0, 2, 1)
return patch_outs, det_tgt, det_pos
def checkpoint_filter_fn(state_dict):
out_dict = {}
for k, v in state_dict.items():
# original model had unused norm layers, removing them requires filtering pretrained checkpoints
# for ViDT
if k.startswith('norm') or k.startswith('head'):
continue
out_dict[k] = v
return out_dict
def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
CoaT, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def coat_lite_tiny(pretrained=None, **kwargs):
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], parallel_depth=0,
num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs)
pretrained = True if pretrained == 'imagenet' else False
model = _create_coat('coat_lite_tiny', pretrained=pretrained, **model_cfg)
return model, 320
@register_model
def coat_lite_mini(pretrained=None, **kwargs):
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], parallel_depth=0,
num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs)
pretrained = True if pretrained == 'imagenet' else False
model = _create_coat('coat_lite_mini', pretrained=pretrained, **model_cfg)
return model, 512
@register_model
def coat_lite_small(pretrained=None, **kwargs):
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], parallel_depth=0,
num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs)
pretrained = True if pretrained == 'imagenet' else False
model = _create_coat('coat_lite_small', pretrained=pretrained, **model_cfg)
return model, 512
| StarcoderdataPython |
3271944 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import time
import random
tickets_disponibles = 1000
class VendeurTickets(threading.Thread):
tickets_vendus = 0
def __init__(self, semaphore):
super().__init__()
self.sem = semaphore
print('Le vendeur {} de ticket à commencé son travail'.format(self.getName()))
def run(self):
global tickets_disponibles
running = True
while running:
self.random_delay()
self.sem.acquire()
if tickets_disponibles <= 0:
running = False
else:
self.tickets_vendus += 1
tickets_disponibles -= 1
print('Le vendeur {} a vendu un ticket ({} restants)'.format(self.getName(), tickets_disponibles))
self.sem.release()
print('Le vendeur {} a vendu {} ticket(s) '.format(self.getName(), self.tickets_vendus))
def random_delay(self):
time.sleep(random.randint(0, 1))
def main():
semaphore = threading.BoundedSemaphore(50)
vendeurs = []
for i in range(50):
vendeur = VendeurTickets(semaphore)
vendeur.start()
vendeurs.append(vendeur)
for vendeur in vendeurs:
vendeur.join()
if __name__ == '__main__':
main()
| StarcoderdataPython |
154676 | # Copyright © 2014 <NAME>
# [This program is licensed under the "MIT License"]
# Please see the file COPYING in the source
# distribution of this software for license terms.
# The += operator on lists appends the a copy of the
# right-hand operand to the left-hand operand. This
# makes z += y different from z = z + y.
# First, note that the append operator makes a (shallow)
# copy of both lists.
x = [[1]]
y = [[2]]
z = x + y
print(z) # Prints [[1], [2]]
x[0][0] = 3
y[0][0] = 4
print(z) # Prints [[3], [4]]
x[0] = 3
y[0] = 4
print(z) # Prints [[3], [4]]
print()
# Now, note that append assignment operator makes a copy of
# just the right list.
x = [1]
z = x
y = [2]
z += y
print(z) # Prints [1, 2]
x[0] = 3
print(z) # Prints [3, 2]
y[0] = 3
print(z) # Prints [3, 2]
print()
# The identity z = z + y === z += y does not hold.
x = [1]
z = x
y = [2]
z = z + y
print(z) # Prints [1, 2]
x[0] = 3
print(z) # Prints [1, 2]
y[0] = 3
print(z) # Prints [1, 2]
| StarcoderdataPython |
1693547 | <reponame>craftslab/gerritstats
# -*- coding: utf-8 -*-
import pprint
import requests
from gerritstats.querier.querier import Querier, QuerierException
def test_exception():
exception = QuerierException("exception")
assert str(exception) == "exception"
def test_querier():
config = {
"gerrit": {
"host": "https://android-review.googlesource.com",
"pass": "",
"query": {"option": ["CURRENT_REVISION"]},
"user": "",
}
}
querier = Querier(config)
assert querier is not None
buf = {
"project": "platform/build/soong",
"branch": "master",
"subject": "dex_import that isn't available for platform isn't installed",
"updated": "Mon Jun 08 19:24:09 2020 +0900",
"insertions": 4,
"deletions": 2,
"_number": 1325676,
"owner": {
"name": "<NAME>",
"email": "<EMAIL>",
},
"labels": {
"Code-Review": {
"all": [
{
"value": 0,
"_account_id": 1000000,
"name": "<NAME>",
"username": "jiyong",
},
{
"value": 2,
"date": "2020-06-08 19:24:09.000000000",
"permitted_voting_range": {"min": 2, "max": 2},
"_account_id": 1000000,
"name": "<NAME>",
"email": "<EMAIL>",
"username": "jiyong",
},
],
},
},
}
buf = querier._build(buf)
assert buf is not None
try:
buf = querier._fetch("change:1325676", 0)
except requests.exceptions.InvalidSchema:
buf = None
if buf is not None:
pprint.pprint(buf)
| StarcoderdataPython |
1760169 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the ShipSteering Environment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: <NAME>, <EMAIL>
#########################################################################
__author__ = "<NAME>, <NAME>"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
batch=1 #number of samples per learning step
prnts=50 #number of learning steps after results are printed
epis=2000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = ShipSteeringEnvironment()
# create task
task = GoNorthwardTask(env,maxsteps = 500)
# create controller network
net = buildNetwork(task.outdim, task.indim, outclass=TanhLayer)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(learningRate = 0.3,
sigmaLearningRate = 0.15,
momentum = 0.0,
epsilon = 2.0,
rprop = False,
storeAllEvaluations = True))
et.agent = agent
#create experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
| StarcoderdataPython |
80000 | <reponame>SafetyGuardians/SafetyGuardiansApp
# Generated by Django 2.0 on 2018-01-08 11:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0003_auto_20180108_1123'),
]
operations = [
migrations.RenameField(
model_name='chatsession',
old_name='url',
new_name='uri',
),
]
| StarcoderdataPython |
1785455 | <reponame>keakon/Doodle
# -*- coding: utf-8 -*-
import logging
import os
doodle_env = os.getenv('DOODLE_ENV')
try:
if doodle_env == 'PRODUCTION':
from .production import ProductionConfig as CONFIG
logging.info('Production config loaded.')
elif doodle_env == 'TEST':
from .test import TestConfig as CONFIG
logging.info('Test config loaded.')
else:
from .development import DevelopmentConfig as CONFIG
logging.info('Development config loaded.')
except ImportError:
logging.warning('Loading config for %s environment failed, use default config instead.', doodle_env or 'unspecified')
from .default import Config as CONFIG
| StarcoderdataPython |
3336689 | #Change block
#change the block the player is standing on
#API setup
from picraft import Vector
from picraft import World, Block
world = World()
#-------Your Code-------#
#get block below player
position = world.player.tile_pos
position -= Vector(y=1)
#set the block
world.blocks[position] = Block(1, 0)
| StarcoderdataPython |
124911 | import json
import errno
config = {
'first_file_filesize' : '1024',
'second_file_filesize' : '128'
}
def write_config():
with open('config.json', 'w') as config_file:
json.dump(config, config_file)
def read_config():
try:
config_file = open('config.json', 'r')
config = json.load(config_file)
except Exception as error:
if errno.ENOENT == error.errno:
#config file doesn't exist, create one with defaults
write_config()
| StarcoderdataPython |
163303 | from django.template import Template, Context
from django.template.loader import render_to_string
from django.conf import settings
def parse(kwargs, template_name="shortcodes/vimeo.html"):
video_id = kwargs.get('id')
if video_id:
width = int(kwargs.get(
'width',
getattr(settings, 'SHORTCODES_VIMEO_WIDTH', 480))
)
height = int(kwargs.get(
'height',
getattr(settings, 'SHORTCODES_VIMEO_HEIGHT', 385))
)
ctx = {
'video_id': video_id,
'width': width,
'height': height
}
return render_to_string(template_name, ctx)
| StarcoderdataPython |
4824815 | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The reader that reads Open-IE extractions data into data pack.
Format:
https://github.com/gabrielStanovsky/oie-benchmark/tree/master/oie_corpus
"""
import logging
import os
from typing import Iterator, List
from forte.common.configuration import Config
from forte.common.exception import ProcessorConfigError
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.data.data_utils_io import dataset_path_iterator
from forte.data.base_reader import PackReader
from ft.onto.base_ontology import Sentence, RelationLink, EntityMention
__all__ = [
"OpenIEReader"
]
class OpenIEReader(PackReader):
r""":class:`OpenIEReader` is designed to read in the Open IE dataset used
by Open Information Extraction task. The related paper can be found
`here
<https://gabrielstanovsky.github.io/assets/papers/emnlp16a/paper.pdf>`__.
The related source code for generating this dataset can be found
`here
<https://github.com/gabrielStanovsky/oie-benchmark>`__.
To use this Reader, you must follow the dataset format. Each line in
the dataset should contain following fields:
.. code-block:: none
<sentence>\t<predicate_head>\t<full_predicate>\t<arg1>\t<arg2>....
You can also find the dataset format `here
<https://github.com/gabrielStanovsky/oie-benchmark/tree/master/oie_corpus>`__.
"""
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
if configs.oie_file_extension is None:
raise ProcessorConfigError(
"Configuration oie_file_extension not provided.")
def _collect(self, *args, **kwargs) -> Iterator[str]:
# pylint: disable = unused-argument
r"""Should be called with param ``oie_directory`` which is a path to a
folder containing json files.
Args:
args: args[0] is the directory to the open ie files.
kwargs:
Returns: Iterator over files in the path with oie extensions.
"""
oie_directory: str = args[0]
oie_file_extension: str = self.configs.oie_file_extension
logging.info("Reading dataset from %s with extension %s",
oie_directory, oie_file_extension)
return dataset_path_iterator(oie_directory, oie_file_extension)
def _cache_key_function(self, oie_file: str) -> str:
return os.path.basename(oie_file)
def _parse_pack(self, file_path: str) -> Iterator[DataPack]:
pack: DataPack = DataPack()
text: str = ""
offset: int = 0
with open(file_path, "r", encoding="utf8") as f:
for line in f:
line = line.strip()
if line != "":
oie_component: List[str] = line.split("\t")
# Add sentence.
sentence = oie_component[0]
text += sentence + "\n"
Sentence(pack, offset, offset + len(sentence))
# Find argument 1.
arg1_begin = sentence.find(oie_component[3]) + offset
arg1_end = arg1_begin + len(oie_component[3])
arg1: EntityMention = EntityMention(
pack, arg1_begin, arg1_end)
# Find argument 2.
arg2_begin = sentence.find(oie_component[4]) + offset
arg2_end = arg2_begin + len(oie_component[4])
arg2: EntityMention = EntityMention(
pack, arg2_begin, arg2_end)
head_relation = RelationLink(pack, arg1, arg2)
head_relation.rel_type = oie_component[2]
offset += len(sentence) + 1
self.set_text(pack, text)
pack.pack_name = os.path.basename(file_path)
yield pack
@classmethod
def default_configs(cls):
config: dict = super().default_configs()
# Add OIE dataset file extension. The default is '.oie'
config.update({
'oie_file_extension': 'oie'
})
return config
| StarcoderdataPython |
3318993 | <reponame>TMarquet/speech_recognition
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 11:02:28 2021
@author: kahg8
"""
import webrtcvad
import os
# Helper libraries
import numpy as np
from scipy.io import wavfile
import matplotlib.pyplot as plt
labels = ["yes", "no", "up", "down", "left",
"right", "on", "off", "stop", "go", "zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine",'silence','unknown']
unknown_labels = ["bed", "bird", "cat", "dog", "happy", "house", "marvin", "sheila",
"tree","wow"]
all_labels = labels[:len(labels)-2] + unknown_labels
import collections
import contextlib
import sys
import wave
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
def autolabel(rects,ax,test = False):
"""Attach a text label above each bar in *rects*, displaying its height."""
up = 3
if test:
up = 15
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, up), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def get_minimal_window(signal,sample_rate):
vad = webrtcvad.Vad(1)
frame_duration = 10 # ms
frames = frame_generator(frame_duration, signal, sample_rate)
frames = list(frames)
for frame in frames:
vad.is_speech(frame.bytes, sample_rate)
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
def observe_training_data():
all_labels = labels[:len(labels)-2]
test_files = np.loadtxt('/lists/testing_list.txt', dtype=str)
validation_files = np.loadtxt('/lists/validation_list.txt', dtype=str)
total = 0
count = []
count2 = []
min_audio = 99999999
for label in sorted(all_labels):
all_files = os.listdir(label)
count_temp = 0
count_temp2 = 0
for file in all_files:
path = label+'/'+file
sample_rate, audio = wavfile.read(path)
min_audio = min(min_audio,len(audio))
count_temp2 += 1
if not file in validation_files and not file in test_files and len(audio) == 16000:
count_temp += 1
total += 1
count.append(count_temp)
count2.append(count_temp2)
for label in sorted(unknown_labels):
all_files = os.listdir(label)
count_temp = 0
count_temp2 = 0
for file in all_files:
path = label+'/'+file
sample_rate, audio = wavfile.read(path)
min_audio = min(min_audio,len(audio))
count_temp2 += 1
if not file in validation_files and not file in test_files and len(audio) == 16000:
count_temp += 1
total += 1
count.append(count_temp)
count2.append(count_temp2)
print(count)
print(total)
print(min_audio)
width = 5
return count, count2
def observe_validation_data():
all_labels = labels[:len(labels)-2]
validation_files = np.loadtxt('/lists/validation_list.txt', dtype=str)
count = {}
total = 0
count_2 = {}
for file in validation_files:
label = file.split("/")[0]
if not label in count:
count[label] = 0
count_2[label] = 0
sample_rate, audio = wavfile.read(file)
count_2[label] +=1
if len(audio) == 16000:
count[label] += 1
total += 1
print(count)
print(total)
width = 5
count_l = []
count_l2 = []
r_label = all_labels + unknown_labels
for l in r_label:
count_l.append(count[l])
count_l2.append(count_2[l])
return count_l, count_l2
def observe_test_data():
all_labels = labels[:len(labels)-2]
validation_files = np.loadtxt('/lists/testing_list.txt', dtype=str)
count = {}
total = 0
count_2 = {}
for file in validation_files:
label = file.split("/")[0]
if not label in count:
count[label] = 0
count_2[label] = 0
sample_rate, audio = wavfile.read(file)
count_2[label] +=1
if len(audio) == 16000:
count[label] += 1
total += 1
print(count)
print(total)
count_l = []
count_l2 = []
r_label = all_labels + unknown_labels
for l in r_label:
count_l.append(count[l])
count_l2.append(count_2[l])
return count_l,count_l2
def print_hist():
count_t1, count_t2 = observe_training_data()
count_v1, count_v2 = observe_validation_data()
count_tt1, count_tt2 = observe_test_data()
all_labels = labels[:len(labels)-2]
fig, ax = plt.subplots(2,1,sharey=True)
x = np.arange(0,len(all_labels+ unknown_labels)*40,step = 40)
# the histogram of the data
width = 10
ax[0].set_xlabel('Labels')
ax[0].set_xticks(x)
ax[0].set_xticklabels(all_labels + unknown_labels)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax[0].set_ylabel('Number of examples')
ax[0].set_title('Histogram of the number of file of 1s per label')
ax[1].set_xlabel('Labels')
rectst1 = ax[0].bar(x-width, count_t1, width,label = 'training set',align = 'center')
rectst2 = ax[1].bar(x-width, count_t2, width,label = 'training set',align = 'center')
rectsv1 = ax[0].bar(x, count_v1, width,label = 'validation set',align = 'center')
rectsv2 = ax[1].bar(x, count_v2, width,label = 'validation set',align = 'center')
rectstt1 = ax[0].bar(x+width, count_tt1, width,label = 'test set',align = 'center')
rectstt2 = ax[1].bar(x+width, count_tt2, width,label = 'test set',align = 'center')
ax[1].set_xticks(x)
ax[1].set_xticklabels(all_labels + unknown_labels)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax[1].set_ylabel('Number of examples')
ax[1].set_ylim(top = 2750)
ax[1].set_title('Histogram of the number of file available per label')
ax[0].legend()
ax[1].legend()
autolabel(rectst1,ax[0])
autolabel(rectst2,ax[1])
autolabel(rectsv1,ax[0],test = True)
autolabel(rectsv2,ax[1],test= True)
autolabel(rectstt1,ax[0])
autolabel(rectstt2,ax[1])
# observe_training_data()
# i= 4
# j = 1
# fig, ax = plt.subplots(i,j)
# count = 0
# count_r = 0
# label = 'stop'
# for k in range(45,45+i*j):
# print((count//j,count%j))
# path= os.listdir(label)[k]
# file =label+'/'+path
# sample_rate, audio = wavfile.read(file)
# mfcc_data = ssc(audio)
# mfcc_data = np.subtract(mfcc_data,np.mean(mfcc_data))
# mfcc_data = np.divide(mfcc_data,mfcc_data.std())
# mfcc_data= np.swapaxes(mfcc_data, 0 ,1)
# get_minimal_window(audio, sample_rate)
# cax = ax[count%i].imshow(mfcc_data, interpolation='nearest', cmap=cm.coolwarm, origin='lower')
# # audio = np.subtract(audio,np.mean(audio))
# # audio = np.divide(audio,audio.std())
# # signal = list(audio)
# # if len(audio) < 16000:
# # signal += [0]*(16000 - len(audio))
# # ax[count // j][count%j].plot(range(16000),signal)
# count+=1
# plt.show()
# def main():
# for label in all_labels:
# print('Writing label :',label)
# os.mkdir(label+'_cut/')
# for file in os.listdir(label):
# audio, sample_rate = read_wave(label+'/'+file)
# vad = webrtcvad.Vad(int(3))
# frames = frame_generator(30, audio, sample_rate)
# frames = list(frames)
# segments = vad_collector(sample_rate, 30, 300, vad, frames)
# for i, segment in enumerate(segments):
# path = label+'_cut/'+file
# write_wave(path, segment, sample_rate)
# if i == 1:
# print('Attention double morceau',path)
# main() | StarcoderdataPython |
175061 | from pathlib import Path
from tempfile import TemporaryDirectory
import numpy as np
import torch
from agent import DqnAgent
from model import DqnModel
from replay_buffer import ReplayBuffer
from strategy import EpsilonGreedyStrategy
from torch import nn
import pytest
BATCH_SIZE = 5
@pytest.fixture
def agent():
model = nn.Linear(10, 2)
memory = ReplayBuffer(10)
agent = DqnAgent(model, memory, EpsilonGreedyStrategy(1.0))
yield agent
def create_batch():
states = np.random.randn(BATCH_SIZE, 10)
actions = np.random.randint(0, 2, (BATCH_SIZE, 1))
rewards = np.random.randint(0, 10, (BATCH_SIZE, 1))
next_states = np.random.randn(BATCH_SIZE, 10)
dones = np.random.randint(0, 2, (BATCH_SIZE, 1))
batch = states, actions, rewards, next_states, dones
return batch
def test_initialize():
model = DqnModel(10, 2, (5,))
agent = DqnAgent(model, None, EpsilonGreedyStrategy(1.0))
agent.initialize(True)
def test_store_load(agent):
with TemporaryDirectory() as dir:
path = Path(dir)
agent.store(path)
agent.load(path)
def test_get_action(agent):
for train_mode in [False, True]:
agent.initialize(train_mode)
for i in range(5):
action = agent.get_action(np.random.randn(10))
assert 0 <= action <= 1
def test_make_tensor(agent):
arr1 = np.random.randn(3, 5)
res = agent.make_tensor(arr1)
assert isinstance(res, torch.Tensor)
arr2 = np.random.randn(4, 6)
res1, res2 = agent.make_tensor(arr1, arr2)
assert isinstance(res1, torch.Tensor)
assert isinstance(res2, torch.Tensor)
def test_train_model_dqn(agent):
agent.initialize(True)
batch = create_batch()
agent.train_model(batch)
def test_train_model_ddqn(agent):
agent.ddqn = True
agent.initialize(True)
batch = create_batch()
agent.train_model(batch)
def test_state_dict(agent):
res = agent.state_dict()
print(res)
| StarcoderdataPython |
1789408 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Compare all messages within a tree with those stored in a date tree (yyyy/mm)"""
import KmdMbox
import KmdCmd
import KmdFiles
import mailbox
import time
import os
import logging
class KmdMboxMergeDateTree(KmdCmd.KmdCommand):
def extendParser(self):
super(KmdMboxMergeDateTree, self).extendParser()
#Extend parser
self.parser.add_argument('srctree', metavar='</path/to/srctree>', nargs=1, help='Path to merge')
self.parser.add_argument('datetree', metavar='</path/to/datetree>', nargs=1, help='Root of a mbox date tree /aaaa/mm')
# self.parser.add_argument('duptree', metavar='</path/to/datetree>', nargs=1, help='Root of a mbox date tree for duplicates /aaaa/mm')
#FIXME : option for misc storage, default in /path/to/datetree/misc
def run(self):
srctree = self.args.srctree[0]
datetree = self.args.datetree[0]
filedestdatepattern = "%Y.sbd/%Y-%m"
messagehashs = [] #Contiendra la liste des hashs connus
mboxfiles = {} #dict path=> objet mbox
doit = self.args.doit
dupmboxname = "~duplicates.tmp"
if os.path.exists(os.path.join(datetree, dupmboxname)):
#Rename duplicates mbox to avoid huge overhead at runtime
KmdFiles.fileMoveRename(os.path.join(datetree, dupmboxname), os.path.join(datetree, dupmboxname), doit)
for root, _, files in os.walk(srctree):
#For each folder in the src tree
for name in files:
dupcount, miscount, keepcount = 0,0,0
#for each file in the folder
p = os.path.join(root, name)
if os.path.islink(p):
continue
#Is it an mbox with meta datas ?
if not KmdMbox.isFileMbox(p):
logging.debug("File %s is not a MBOX file", p)
continue
srcbox = mailbox.mbox(p) #Open Mbox file
logging.info("Opening MBOX %s", p)
for m in srcbox : #Walk into Mbox
duplicate = False
#for each message
h = KmdMbox.messageHash(m) #hashmessage
if h in messagehashs :
#already known message
dupcount += 1
duplicate = True
vdate = KmdMbox.messageGetDate(m)
#Is the date valid
if vdate == None :
mboxname = os.path.join(datetree, "_misc_")
miscount += 1
else :
try :
mboxname = os.path.join(datetree, time.strftime(filedestdatepattern, vdate))
#Good, go on
except :
mboxname = os.path.join(datetree, "_misc_")
miscount += 1
if duplicate:
#current mbox : duplicates
mboxname = os.path.join(datetree, dupmboxname)
if mboxname not in mboxfiles:
#File not already open
head, _ = os.path.split(mboxname)
if not os.path.exists(head) :
#new tree
if doit :
os.makedirs(head)
logging.debug("Creating %s tree", head)
#create or open mbox
if not os.path.exists(mboxname):
logging.info("Creating MBOX %s", mboxname)
if doit :
mboxfiles[mboxname] = mailbox.mbox(mboxname)
else :
mboxfiles[mboxname] = "foobar" #to fool mbox opening test
else :
logging.info("Opening MBOX %s", mboxname)
mboxfiles[mboxname] = mailbox.mbox(mboxname)
#walk the mbox to append newhash
if not duplicate :
for m2 in mboxfiles[mboxname]:
messagehashs.append(KmdMbox.messageHash(m2))
#test against the newhashs
if not duplicate and h in messagehashs :
mboxname = os.path.join(datetree, dupmboxname)
dupcount += 1
if mboxname not in mboxfiles:
mboxfiles[mboxname] = mailbox.mbox(mboxname)
if doit :
KmdMbox.messageAddToMbox(mboxfiles[mboxname], m)
keepcount += 1
messagehashs.append(h)
srcbox.close()
logging.info("Closing MBOX %s - Duplicates: %d - Kept : %d (Misc : %d)", p, dupcount, keepcount, miscount)
if __name__ == "__main__":
cmd = KmdMboxMergeDateTree(__doc__)
cmd.run()
| StarcoderdataPython |
128261 | from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scs
from scipy.optimize import minimize
class DistrManager:
def __init__(
self, left_border: int = -1.8, right_border: int = 2, step: int = 0.2
) -> None:
self._left_border = left_border
self._right_border = right_border
self._step = step
def get_range(self) -> np.ndarray:
return np.arange(self._left_border, self._right_border, self._step)
def eval_x(self, x: float) -> float:
return 2 + 2 * x
def get_relation(self, x: List[float]) -> List[float]:
return [self.eval_x(x_i) + scs.norm.rvs(0, 1) for x_i in x]
def mess_relation(self, y: List[float]) -> List[float]:
print("\nAdd error to relation\n")
y[0] += 10
y[-1] -= 10
return y
def _get_mnk_params(self, x: float, y: float) -> Tuple[float, float]:
beta_1 = (np.mean(x * y) - np.mean(x) * np.mean(y)) / (
np.mean(x ** 2) - np.mean(x) ** 2
)
beta_0 = np.mean(y) - beta_1 * np.mean(x)
return beta_0, beta_1
def mnk(self, x: float, y: float) -> List[float]:
beta_0, beta_1 = self._get_mnk_params(x, y)
print(f"MNK:\t beta_0 = {beta_0}\t beta_1 = {beta_1}")
return [beta_0 + beta_1 * element for element in x]
def _minimize_mnm(self, x_0: Tuple[float, float], x: float, y: float) -> float:
return sum(abs(y[idx] - x_0[0] - x_0[1] * x_val) for idx, x_val in enumerate(x))
def _get_mnm_params(self, x: float, y: float) -> Tuple[float, float]:
beta_0, beta_1 = self._get_mnk_params(x, y)
minimized = minimize(
self._minimize_mnm, [beta_0, beta_1], args=(x, y), method="SLSQP"
)
return minimized.x[0], minimized.x[1]
def mnm(self, x: float, y: float) -> List[float]:
beta_0, beta_1 = self._get_mnm_params(x, y)
print(f"MNM:\t beta_0 = {beta_0}\t beta_1 = {beta_1}")
return [beta_0 + beta_1 * element for element in x]
def draw(self, x: float, y: float, name: str) -> None:
y_mnk = self.mnk(x, y)
y_mnm = self.mnm(x, y)
mnk_dist = sum((self.eval_x(x)[i] - y_mnk[i]) ** 2 for i in range(len(y)))
mnm_dist = sum(abs(self.eval_x(x)[i] - y_mnm[i]) for i in range(len(y)))
print(f"MNK distance = {mnk_dist}\t MNM distance = {mnm_dist}")
plt.plot(x, self.eval_x(x), color="red", label="Ideal")
plt.plot(x, y_mnk, color="green", label="MNK")
plt.plot(x, y_mnm, color="orange", label="MNM")
plt.scatter(x, y, c="blue", label="Sample")
plt.xlim([self._left_border, self._right_border])
plt.grid()
plt.legend()
plt.title(name)
plt.show()
| StarcoderdataPython |
3350433 | import numpy as np
import bpy
from mathutils import Matrix, Vector
import bmesh
import mathutils
import copy
def normalise_vector_batch(vector):
'''
normalising vectors so that (a**2 + b**2 + c**2 == 1**2)
'''
vector_s = vector ** 2
vector_s = np.sum(vector_s, axis=1)
vector_s = np.sqrt(vector_s)
vector_s = vector_s.reshape(vector_s.shape[0], 1)
return vector / vector_s
def axis_angle_to_quaternion(angle, vector):
'''
converts axis_angle (theta, vector) to quaternion (w, x, y, z).
formula for defining (w, x, y, z) --> (cos(theta/2), v1*sin(theta/2), v2*sin(theta/2), v3*sin(theta/2))
'''
w = np.cos(angle/2)
xyz = np.sin(angle/2) * vector
quat = np.hstack((w,xyz))
return quat
def angle_between_two_vectors_batch(vec1, vec2):
dot = np.dot(vec1, vec2)
# rounding since the dot product of same vector returns value over 1
dot = np.round(dot, 5)
angle = np.arccos(dot/1)
return angle
def generate_quaternion_from_3d_point(points, target):
points = normalise_vector_batch(points)
# create axis vector
axis_vector = np.cross(points, target)
axis_vector = normalise_vector_batch(axis_vector)
# create angle
angles = angle_between_two_vectors_batch(points, target)
angles = angles.reshape(angles.shape[0], 1)
angles = -angles
# create quaternion
quat = axis_angle_to_quaternion(angles, axis_vector)
return quat
def quats_to_rotation_mats(quats, invert=False):
''' input numpy '''
R_list = []
for quat in quats:
R = mathutils.Quaternion(quat)
R = R.to_matrix()
if invert:
R.invert()
R_list.append(R)
return np.array(R_list)
def rotate_batch(points, R_batch):
''' input numpy '''
point_list = []
for point, R in zip(points, R_batch):
p1 = np.matmul(point, R)
point_list.append(p1)
return np.array(point_list)
def rotate_points(points, R):
''' input numpy '''
point_list = []
for point in points:
p1 = np.matmul(point, R)
point_list.append(p1)
return np.array(point_list)
def generate_quaternion_batch(p1, p2):
''' generate quaternion where point1 defines up and point2 defines object's twist '''
# generate first rotation
target = np.array([0, 0, 1])
quats = generate_quaternion_from_3d_point(p1, target)
# shifting verts for second point so object points up and z = 0
R_batch = quats_to_rotation_mats(quats)
p2 = rotate_batch(p2, R_batch)
p2[:, 2] = 0
# generate second rotation
target = np.array([1, 0, 0])
quats2 = generate_quaternion_from_3d_point(p2, target)
# merge rotations
final_quats = []
for q1, q2 in zip(quats, quats2):
turn = mathutils.Quaternion(q1)
twist = mathutils.Quaternion(q2)
final_quat = turn @ twist
final_quats.append(final_quat)
return np.array(final_quats)
def create_mesh_animation(animation_data, object_name):
''' input shape ---> frames/vert/coor '''
animation_data = np.transpose(animation_data, (1, 0, 2))
obj = bpy.data.objects[object_name]
mesh = obj.data
action = bpy.data.actions.new("meshAnimation")
mesh.animation_data_create()
mesh.animation_data.action = action
# loop over verts
for idx, vert in enumerate(animation_data):
# create fcurves for vert (xyz)
fcurves = [action.fcurves.new(f'vertices[{idx}].co', index=i) for i in range(3)]
for frame, frame_data in enumerate(vert):
fcurves[0].keyframe_points.insert(frame, frame_data[0], options={'FAST'}) # x
fcurves[1].keyframe_points.insert(frame, frame_data[1], options={'FAST'}) # y
fcurves[2].keyframe_points.insert(frame, frame_data[2], options={'FAST'}) # z
def offset_quat_by_first_index(quats):
# offset rotation
offsets = []
for quat in quats:
offset = mathutils.Quaternion(quat) @ mathutils.Quaternion(quats[0]).inverted()
offsets.append(offset)
return offsets
def offset_quat(quats, offset):
change = copy.copy(offset)
# offset rotation
change = mathutils.Quaternion(change).inverted()
offsets = []
for quat in quats:
offset = mathutils.Quaternion(quat) @ change
offsets.append(offset)
return np.array(offsets)
def apply_rotation_to_rig_quat(rotation, obj, inverse=True):
# apply to test model
obj.pose.bones['head'].rotation_mode = 'QUATERNION'
for i, r in enumerate(rotation):
if inverse:
r = mathutils.Quaternion(r).inverted()
obj.pose.bones['head'].rotation_quaternion = r
obj.pose.bones['head'].keyframe_insert(data_path="rotation_quaternion", frame=i)
# -----------------------------------------------------------------------------------------------------
# clean this
def apply_quaternion_to_bone(rotation, obj, bone_name, inverse=False):
# apply to test model
obj.pose.bones[bone_name].rotation_mode = 'QUATERNION'
for i, r in enumerate(rotation):
if inverse:
r = mathutils.Quaternion(r).inverted()
print('r')
print(r)
obj.pose.bones[bone_name].rotation_quaternion = r
obj.pose.bones[bone_name].keyframe_insert(data_path="rotation_quaternion", frame=i)
def add_rotation_q_to_bone(rotation, obj, bone_name, inverse=False):
# apply to test model
obj.pose.bones[bone_name].rotation_mode = 'QUATERNION'
for i, r in enumerate(rotation):
if inverse:
r = mathutils.Quaternion(r).inverted()
bpy.context.scene.frame_set(i)
r_initial = obj.pose.bones[bone_name].rotation_quaternion
print('r_initial')
print(r_initial)
obj.pose.bones[bone_name].rotation_quaternion = r_initial @ mathutils.Quaternion(r)
obj.pose.bones[bone_name].keyframe_insert(data_path="rotation_quaternion", frame=i)
def slope_angle_2d(p1, p2, target):
# p1 to origain
points = p2 - p1
points = np.concatenate((points, np.zeros((points.shape[0], 1))), axis=1)
quats = generate_quaternion_from_3d_point(points, target)
return quats
def get_middle_point_2d(point2, point1):
# finding midpoint between 1 and 2
x = point2[:, 0] - point1[:, 0]
x = (x*.5) + point1[:, 0]
y = point2[:, 1] - point1[:, 1]
y = (y*.5) + point1[:, 1]
# merge
x = x.reshape(x.shape[0], 1)
y = y.reshape(y.shape[0], 1)
xy = np.concatenate((x, y), axis=1)
return xy
| StarcoderdataPython |
3249687 | # -*- coding: utf-8 -*-
# This CORS implementation was ported from Jupyter
# notebook.base.handlers.{IPythonHandler, APIHandler}.
#
# notebook's license is as follows:
#
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, division, print_function
from six.moves.urllib.parse import urlparse
from tornado import web
from tornado.log import app_log
class CORSRequestHandler(web.RequestHandler):
@property
def allow_origin(self):
"""Normal Access-Control-Allow-Origin"""
return self.settings.get('allow_origin', '')
@property
def allow_origin_pat(self):
"""Regular expression version of allow_origin"""
return self.settings.get('allow_origin_pat', None)
@property
def allow_credentials(self):
"""Whether to set Access-Control-Allow-Credentials"""
return self.settings.get('allow_credentials', False)
def set_default_headers(self):
"""Add CORS headers, if defined"""
super(CORSRequestHandler, self).set_default_headers()
if self.allow_origin:
self.set_header("Access-Control-Allow-Origin", self.allow_origin)
elif self.allow_origin_pat:
origin = self.get_origin()
if origin and self.allow_origin_pat.match(origin):
self.set_header("Access-Control-Allow-Origin", origin)
if self.allow_credentials:
self.set_header("Access-Control-Allow-Credentials", 'true')
def get_origin(self):
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
return origin
# origin_to_satisfy_tornado is present because tornado requires
# check_origin to take an origin argument, but we don't use it
def check_origin(self, origin_to_satisfy_tornado=""):
"""Check Origin for cross-site API requests, including websockets
Copied from WebSocket with changes:
- allow unspecified host/origin (e.g. scripts)
- allow token-authenticated requests
"""
if self.allow_origin == '*':
return True
host = self.request.headers.get("Host")
origin = self.request.headers.get("Origin")
# If no header is provided, assume it comes from a script/curl.
# We are only concerned with cross-site browser stuff here.
if origin is None or host is None:
return True
origin = origin.lower()
origin_host = urlparse(origin).netloc
# OK if origin matches host
if origin_host == host:
return True
# Check CORS headers
if self.allow_origin:
allow = self.allow_origin == origin
elif self.allow_origin_pat:
allow = bool(self.allow_origin_pat.match(origin))
else:
# No CORS headers deny the request
allow = False
if not allow:
app_log.warning(
'Blocking Cross Origin API request for %s. Origin: %s, Host: %s',
self.request.path, origin, host,
)
return allow
def prepare(self):
if not self.check_origin():
raise web.HTTPError(404)
return super(CORSRequestHandler, self).prepare()
@property
def content_security_policy(self):
csp = '; '.join([
super(CORSRequestHandler, self).content_security_policy,
"default-src 'none'",
])
return csp
def options(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Headers',
'accept, content-type, authorization')
self.set_header('Access-Control-Allow-Methods',
'GET, PUT, POST, PATCH, DELETE, OPTIONS')
self.finish()
| StarcoderdataPython |
4835809 | a = exec(open("tmp.txt").read())
| StarcoderdataPython |
77889 | <reponame>AviKalPython/self.py
# exc. 7.1.4
def squared_numbers(start, stop):
while start <= stop:
print(start**2)
start += 1
def main():
start = -3
stop = 3
squared_numbers(start, stop)
if __name__ == "__main__":
main() | StarcoderdataPython |
1718658 | class BaseStorage(object):
"""docstring for BaseStorage"""
def __init__(self):
super(BaseStorage, self).__init__()
def filter(self, criteria):
raise Exception("Not implemented Error")
def getSummary(self, criteria):
raise Exception("Not implemented Error")
def insert(self, measurement):
raise Exception("Not implemented Error")
def delete(self, measurementId):
raise Exception("Not implemented Error")
def truncate(self):
raise Exception("Not implemented Error")
| StarcoderdataPython |
1658182 | # Author: <NAME>, <NAME>
# with some functions borrowed from https://github.com/SeanNaren/deepspeech.pytorch
import json
import librosa
import numpy as np
import os
import os.path
import scipy.signal
import torch
import torch.nn.functional
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset
from dataloaders.utils import compute_spectrogram
class ImageCaptionDataset(Dataset):
def __init__(self, dataset_json_file, audio_conf=None, image_conf=None):
"""
Dataset that manages a set of paired images and audio recordings
:param dataset_json_file
:param audio_conf: Dictionary containing the sample rate, window and
the window length/stride in seconds, and normalization to perform (optional)
:param image_transform: torchvision transform to apply to the images (optional)
"""
with open(dataset_json_file, 'r') as fp:
data_json = json.load(fp)
self.data = data_json['data']
# self.image_base_path = data_json.get('image_base_path', '')
# self.audio_base_path = data_json.get('audio_base_path', '')
self.image_base_path = "/content/flickr8k_spoken_captions/imgs"
self.audio_base_path = "/content/flickr8k_spoken_captions/wavs"
self.audio_conf = audio_conf if audio_conf else {}
self.image_conf = image_conf if image_conf else {}
# image transforms
crop_size = self.image_conf.get('crop_size', 224)
center_crop = self.image_conf.get('center_crop', False)
if center_crop:
self.image_resize_and_crop = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()])
else:
self.image_resize_and_crop = transforms.Compose(
[transforms.RandomResizedCrop(crop_size), transforms.ToTensor()])
RGB_mean = self.image_conf.get('RGB_mean', [0.485, 0.456, 0.406])
RGB_std = self.image_conf.get('RGB_std', [0.229, 0.224, 0.225])
self.image_normalize = transforms.Normalize(mean=RGB_mean, std=RGB_std)
def _LoadAudio(self, path):
y, sr = librosa.load(path, None)
logspec, n_frames = compute_spectrogram(y, sr, self.audio_conf)
return logspec, n_frames
def _LoadImage(self, impath):
img = Image.open(impath).convert('RGB')
img = self.image_resize_and_crop(img)
img = self.image_normalize(img)
return img
def __getitem__(self, index):
"""
returns: image, audio, nframes
where image is a FloatTensor of size (3, H, W)
audio is a FloatTensor of size (N_freq, N_frames) for spectrogram, or (N_frames) for waveform
nframes is an integer
"""
datum = self.data[index]
audio, nframes = self._LoadAudio(os.path.join(self.audio_base_path, datum['wav']))
image = self._LoadImage(os.path.join(self.image_base_path, datum['image']))
return image, audio, nframes
def __len__(self):
return len(self.data)
| StarcoderdataPython |
1783040 | from histogram import Histogram
from metric import metric_decorated
class Meter(Histogram):
def __init__(self, name):
Histogram.__init__(self, name)
return
def mark(self):
self.update()
return
def __enter__(self):
self.mark()
return
def __exit__(*unused):
pass
def metered(target=None, **options):
def before(record):
record.meter.mark()
return
return metric_decorated(
target,
Meter,
metered,
before=before,
**options
)
| StarcoderdataPython |
178737 | #!/usr/bin/env python3
import pytest
import subprocess as sub
from bin import get_dust
def test_main():
sub.call(get_dust.__file__, shell=True)
if __name__ == '__main__':
pytest.main()
| StarcoderdataPython |
1707880 | <reponame>ypix/TeTueTwitchBot
from random import choice, randint
from time import time
from .. import db
heist = None
heist_lock = time()
def coinflip(bot, user, side=None, *args):
if side is None:
bot.send_message("You need to guess which side the coin will land!")
elif (side := side.lower()) not in (opt := ("h", "t", "heads", "tails")):
bot.send_message("Enter one of the following as the side: " + ", ".join(opt))
else:
result = choice(("heads", "tails"))
if side[0] == result[0]:
db.execute("UPDATE users SET Coins = Coins + 50 WHERE UserID = ?", user.get_id())
bot.send_message(f"It landed on {result}! You won 50 coins!")
else:
bot.send_message(f"Too bad - it landed on {result}. You didn't win anything!")
| StarcoderdataPython |
3288857 | <filename>backend/api/serializers.py
from rest_framework import serializers
from api.models import *
# So a user can be a user
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
# Converts the flashcard data as needed in order to be passed
class DeckInlineSerializer(serializers.ModelSerializer):
class Meta:
model = Deck
fields = ('id', 'name', 'description', 'flashcards')
# Converts the flashcard data as needed in order to be passed
class FlashcardSerializer(serializers.ModelSerializer):
class Meta:
model = Flashcard
fields = ('id', 'term', 'definition', 'parentdeck')
# Profile for a user to have decks
class ProfileSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
# decks = DeckInlineSerializer(read_only = True, many = True)
class Meta:
model = Profile
fields = ('id', 'user',)
# fields = ('id', 'user', 'deck')
class JSONAPIMeta:
included_resources = ['user']
# included_resources = ['user', 'decks']
# Converts the deck data as needed in order to be passed
class DeckSerializer(serializers.ModelSerializer):
flashcards = FlashcardSerializer(read_only=True, many=True)
class Meta:
model = Deck
fields = ('id', 'name', 'description', 'flashcards')
# Not used right now
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
# class DeckInlineSerializer(serializers.ModelSerializer):
# class Meta:
# model = Deck
# fields = ('id', 'name', 'description', 'flashcards', 'profile')
# class DeckSerializer(serializers.ModelSerializer):
# flashcards = FlashcardSerializer(read_only=True, many=True)
# included_serializers = {'profile': ProfileSerializer, }
# class Meta:
# model = Deck
# fields = ('id', 'name', 'description', 'flashcards', 'profile')
#
# class JSONAPIMeta:
# included_resources = ['profile']
# Converts the flashcard data as needed in order to be passed
# class FlashcardSerializer(serializers.ModelSerializer):
# parentdeck = DeckInlineSerializer(read_only=True, many=True)
# # parentdeck = DeckSerializer()
# class Meta:
# model = Flashcard
# fields = ('id', 'term', 'definition', 'parentdeck')
# class FlashcardSerializer(serializers.ModelSerializer):
# parentdeck = DeckInlineSerializer()
# class Meta:
# model = Flashcard
# fields = ('id', 'term', 'definition', 'parentdeck')
# def create(self, validated_data):
# parentdeck_parsed = validated_data.pop('parentdeck')
# flashcard = Flashcard.objects.create(**validated_data)
# Deck.objects.create(flashcard=flashcard, **parentdeck_parsed)
# return flashcard
# def create(self, validated_data):
# parentdeck_parsed = validated_data.pop('parentdeck')
# parentdeck = Deck.get_object_or_create(id=parentdeck_parsed.id)
# flashcard = Flashcard.objects.create(parentdeck=parentdeck, **validated_data)
# return flashcard
#
# def update(self, instance, validated_data):
# parentdeck_parsed = validated_data.pop('parentdeck')
# parentdeck = instance.parentdeck
#
# instance.term = validated_data.get('term', instance.term)
# instance.definition = validated_data.get('definition', instance.definition)
# instance.save()
# profile.is_premium_member = profile_data.get(
# 'is_premium_member',
# profile.is_premium_member
# )
# profile.has_support_contract = profile_data.get(
# 'has_support_contract',
# profile.has_support_contract
# )
# profile.save()
#
# return instance
# class DeckSerializer(serializers.ModelSerializer):
# flashcards = FlashcardSerializer()
# class Meta:
# model = Deck
# fields = ('id', 'name', 'description', 'flashcards')
# def create(self, validated_data):
# flashcards_parsed = validated_data.pop('flashcards')
# deck = Deck.objects.create(**validated_data)
# Flashcard.objects.create(deck=deck, **flashcards_parsed)
# return deck
| StarcoderdataPython |
1799888 | # coding=utf-8
from machine import Pin, Timer
VALID_GP_PINS = [9, 10, 11, 24]
PIN_PWM_TIMER = {
9: 2,
10: 3,
11: 3,
24: 0,
}
PIN_PWM_CHANNEL = {
9: Timer.B,
10: Timer.A,
11: Timer.B,
24: Timer.A,
}
PIN_PWM_ALT = {
9: 3,
10: 3,
11: 3,
24: 5,
}
PERC_100 = 10000 # in 100% * 100
class Servo:
"""
WiPy servo object
Sets up Timer and Channel and performs calculation so servo angle is automatically converted to duty cycle.
"""
def __init__(self, gp_pin, frequency, full_range100, pulse_min, pulse_max):
"""
:param gp_pin: GPIO pin
:param frequency: in Hz
:param full_range100: in deg * 100
:param pulse_min: in µs
:param pulse_max: in µs
:return:
"""
if not gp_pin in VALID_GP_PINS:
print('invalid GP pin:', gp_pin)
# Get WiPy PWM configuration constants
pin_alt = PIN_PWM_ALT[gp_pin]
timer_nr = PIN_PWM_TIMER[gp_pin]
timer_channel = PIN_PWM_CHANNEL[gp_pin]
# Configure PWM timer to pin flow
Pin('GP' + str(gp_pin), mode=Pin.ALT, alt=pin_alt)
timer = Timer(timer_nr, mode=Timer.PWM)
self.channel = timer.channel(timer_channel, freq=frequency)
# Store object properties
self.PWM_frame = 1000000 // frequency # in µs
self.full_range100 = full_range100
self.pulse_min = pulse_min
self.pulse_diff = pulse_max - pulse_min
def angle(self, angle100):
"""
Set timer duty cycle to specified angle
:param angle100: angle in deg * 100
:return:
"""
angle_fraction = PERC_100 * angle100 // self.full_range100 # in 100% * 100 format
pulse_width = self.pulse_min + angle_fraction * self.pulse_diff // PERC_100 # in µs
duty_cycle = PERC_100 * pulse_width // self.PWM_frame # in 100% * 100 format
self.channel.duty_cycle(duty_cycle)
| StarcoderdataPython |
132009 | import numpy as np
import torch
import ptan
def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
"""
Convert batch into training tensors
:param batch:
:param net:
:return: states variable, actions tensor, reference values variable
"""
states = []
actions = []
rewards = []
not_done_idx = []
last_states = []
for idx, exp in enumerate(batch):
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = torch.FloatTensor(actions).to(device)
# handle rewards
rewards_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
last_vals_v = net(last_states_v)
last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
rewards_np[not_done_idx] += last_val_gamma * last_vals_np
ref_vals_v = torch.FloatTensor(rewards_np).to(device)
return states_v, actions_v, ref_vals_v
| StarcoderdataPython |
3365363 | <filename>main.py
# encoding: utf-8
import re
from sys import argv #在cmd中读入文件所用的库
from Exceptions import NoTextError
from Exceptions import SameTextError
from Algorithm import lcs
from Algorithm import ld
from zhon.hanzi import punctuation #这是去除中文标点所用的库
def readin(textpath): #处理读入文件
str = textpath.read()
return str
def check(file1, file2): #将文字导入并判断是否正确导入
maintext = readin(file1)
comparedtext = readin(file2)
if(len(maintext)==0|len(comparedtext)==0):
raise NoTextError
return maintext,comparedtext
file1 = open(argv[1], 'rt', encoding='utf-8')
file2 = open(argv[2], 'rt', encoding='utf-8')
check(file1,file2)
strfile1 = str(file1)
strfile2 = str(file2) #把读入的IO流字符串化
maintext = re.sub("[%s]+" %punctuation, "",strfile1)
comparedtext = re.sub("[%s]+" %punctuation, "", strfile2) #处理中文标点
file1.close()
file2.close()
AnsLcs = float(lcs(maintext,comparedtext)) #计算二者的LCS值
AnsLd = float(ld(maintext, comparedtext)) #计算二者的LD值,具体函数在Algorithm.py里
percentage = AnsLcs / (AnsLd + AnsLcs)
if (percentage==1.00):
raise SameTextError
else:
ans = open(argv[3], 'w', encoding='utf-8') # 覆盖写入答案文件
ans.write(str("%.2f" % percentage)) # 变成str并保留两位小数后写入
ans.close()
#print("%.2f" % S)
#print("这是临时输出结果,正式使用时答案只会输出至ans.txt下,谢谢!")
| StarcoderdataPython |
128238 | """Tests for the serializers of the drf_auth app."""
from django.test import TestCase
from mixer.backend.django import mixer
from .. import serializers
class LoginSerializerTestCase(TestCase):
longMessage = True
def test_serializer(self):
user = mixer.blend('auth.User')
user.set_password('<PASSWORD>')
data = {}
s = serializers.LoginSerializer(data=data)
self.assertFalse(s.is_valid(), msg=(
'Should be invalid if no data is given'))
data = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
s = serializers.LoginSerializer(data=data)
self.assertFalse(s.is_valid(), msg=(
'Should be invalid if email does not exist'))
data = {'email': user.email, 'password': '<PASSWORD>'}
s = serializers.LoginSerializer(data=data)
self.assertTrue(s.is_valid(), msg=(
'Should be valid if all data is given and email does exist.'
' Note: Serializer does not check if password is correct. It'
' merely checks if the given data makes sense.'))
class FinishSignupSerializerTestCase(TestCase):
longMessage = True
def test_serializer(self):
user = mixer.blend('auth.User')
data = {}
s = serializers.FinishSignupSerializer(data=data)
self.assertFalse(s.is_valid(), msg=(
'Should be invalid if no data is given'))
data = {'email': user.email}
s = serializers.FinishSignupSerializer(data=data)
self.assertFalse(s.is_valid(), msg=(
'Should be invalid if the email already exists'))
data = {'email': '<EMAIL>'}
s = serializers.FinishSignupSerializer(data=data)
self.assertTrue(s.is_valid(), msg=(
'Should be valid if new unique email is given'))
| StarcoderdataPython |
1714652 | import os
import pytest
from ray.serve.storage.kv_store import (RayInternalKVStore, RayLocalKVStore,
RayS3KVStore)
def test_ray_internal_kv(serve_instance): # noqa: F811
with pytest.raises(TypeError):
RayInternalKVStore(namespace=1)
RayInternalKVStore(namespace=b"")
kv = RayInternalKVStore()
with pytest.raises(TypeError):
kv.put(1, b"1")
with pytest.raises(TypeError):
kv.put("1", 1)
with pytest.raises(TypeError):
kv.put("1", "1")
kv.put("1", b"2")
assert kv.get("1") == b"2"
kv.put("2", b"4")
assert kv.get("2") == b"4"
kv.put("1", b"3")
assert kv.get("1") == b"3"
assert kv.get("2") == b"4"
def test_ray_internal_kv_collisions(serve_instance): # noqa: F811
kv1 = RayInternalKVStore()
kv1.put("1", b"1")
assert kv1.get("1") == b"1"
kv2 = RayInternalKVStore("namespace")
assert kv2.get("1") is None
kv2.put("1", b"-1")
assert kv2.get("1") == b"-1"
assert kv1.get("1") == b"1"
def _test_operations(kv_store):
# Trival get & put
kv_store.put("1", b"1")
assert kv_store.get("1") == b"1"
kv_store.put("2", b"2")
assert kv_store.get("1") == b"1"
assert kv_store.get("2") == b"2"
# Overwrite same key
kv_store.put("1", b"-1")
assert kv_store.get("1") == b"-1"
# Get non-existing key
assert kv_store.get("3") is None
# Delete existing key
kv_store.delete("1")
kv_store.delete("2")
assert kv_store.get("1") is None
assert kv_store.get("2") is None
# Delete non-existing key
kv_store.delete("3")
def test_external_kv_local_disk():
kv_store = RayLocalKVStore("namespace", "test_kv_store.db")
_test_operations(kv_store)
@pytest.mark.skip(reason="Need to figure out credentials for testing")
def test_external_kv_aws_s3():
kv_store = RayS3KVStore(
"namespace",
bucket="jiao-test",
s3_path="/checkpoint",
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID", None),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY", None),
aws_session_token=os.environ.get("AWS_SESSION_TOKEN", None),
)
_test_operations(kv_store)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| StarcoderdataPython |
3362873 | """Data and commands for REPL"""
__all__ = ["CMDS", "MOVES", "ERRS", "META", "TITLE"]
TITLE = r"""Welcome to...
__/\\\\\\\\\\\\__________/\\\\\_________/\\\\\\\\\______/\\\________/\\\_
_\/\\\////////\\\______/\\\///\\\_____/\\\///////\\\___\/\\\_____/\\\//__
_\/\\\______\//\\\___/\\\/__\///\\\__\/\\\_____\/\\\___\/\\\__/\\\//_____
_\/\\\_______\/\\\__/\\\______\//\\\_\/\\\\\\\\\\\/____\/\\\\\\//\\\_____
_\/\\\_______\/\\\_\/\\\_______\/\\\_\/\\\//////\\\____\/\\\//_\//\\\____
_\/\\\_______\/\\\_\//\\\______/\\\__\/\\\____\//\\\___\/\\\____\//\\\___
_\/\\\_______/\\\___\///\\\__/\\\____\/\\\_____\//\\\__\/\\\_____\//\\\__
_\/\\\\\\\\\\\\/______\///\\\\\/_____\/\\\______\//\\\_\/\\\______\//\\\_
_\////////////__________\/////_______\///________\///__\///________\///__
...A game of mystery and intrigue, but most importantly, memes!"""
MOVES = {
"n": ["_move", "north"],
"s": ["_move", "south"],
"e": ["_move", "east"],
"w": ["_move", "west"],
"north": ["_move", "north"],
"south": ["_move", "south"],
"east": ["_move", "east"],
"west": ["_move", "west"]
}
CMDS = {
"go": MOVES,
"move": MOVES,
"walk": MOVES,
"travel": MOVES,
"run": MOVES,
"head": MOVES,
"look": ["_look"],
"i": ["_inventory"],
"inv": ["_inventory"],
"inventory": ["_inventory"],
"examine": ["_examine"],
"add": ["_take_item"],
"grab": ["_take_item"],
"take": ["_take_item"],
"loot": ["_take_item"],
"drop": ["_drop_item"],
"toss": ["_drop_item"],
}
META = {
".new": ["_start_over"],
".load": ["_start_over"],
".save": ["_save_game"],
".rq": ["_gtfo"],
".z": ["_zork"],
".m": ["_draw_maze"],
".v": ["_toggle_verbose"],
}
ERRS = {
"u": ["_repl_error", "Sorry, I don't know that one."],
"?": ["_repl_error", "Huh? Can you speak up?"],
"which way": ["_repl_error", "Um. Where are you trying to go?"],
"twice": ["_repl_error", "A command so nice you said it twice!\n...idiot"]
}
| StarcoderdataPython |
171350 | from __future__ import division
from __future__ import print_function
import json
import pdb
import math
all_params = json.load(open('config.json'))
dataset_name = all_params['dataset_name']
locals().update(all_params['experiment_setup'])
locals().update(all_params[dataset_name])
tcn_params['model_params']['encoder_params']['kernel_size'] //= sample_rate
tcn_params['model_params']['decoder_params']['kernel_size'] //= sample_rate
tcn_params['model_params']['mid_lstm_params'] = None
temp = []
for k in rl_params['k_steps']:
temp.append(math.ceil(k / sample_rate))
rl_params['k_steps'] = temp
temp = []
for k in rl_params['glimpse']:
temp.append(math.ceil(k / sample_rate))
rl_params['glimpse'] = temp
#tcn_params['train_params'] = None | StarcoderdataPython |
189879 | <filename>Convolutional-Neural-Networks/cnn_classification.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 02 13:40:22 2020
@author: ls616
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import imutils
import os
from os import listdir
from distutils.dir_util import copy_tree
from shutil import rmtree
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense, GlobalAveragePooling2D
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
import warnings
warnings.filterwarnings("ignore")
## set wd ##
os.chdir('/Users/ls616/Google Drive/MPE CDT/ML Projects/Projects/CNN/')
### DATA AUGMENTATION ###
## exploratory analysis of data ##
## fn. to count no. of samples (original OR augmented OR both) ##
def count_samples(dir_list,aug_dir_list,sample_type="original"):
if sample_type=="original" or sample_type=="both":
n_samples = []
for i in dir_list:
n_samples.append(len([name for name in listdir(i) if not name.startswith(".")]))
n_total_samples = np.sum(n_samples)
print(f"\nThere are {n_total_samples} original samples in total")
print(f"There are {n_samples[0]} ({round(n_samples[0]/n_total_samples*100,2)} %) original samples with tumours")
print(f"There are {n_samples[1]} ({round(n_samples[1]/n_total_samples*100,2)} %) original samples without tumours\n")
if sample_type=="original":
return n_samples, n_total_samples
if sample_type=="augmented" or sample_type=="both":
n_aug_samples = []
for i in aug_dir_list:
n_aug_samples.append(len([name for name in listdir(i) if not name.startswith(".")]))
n_total_aug_samples = np.sum(n_aug_samples)
print(f"\nThere are {n_total_aug_samples} augmented samples in total")
print(f"There are {n_aug_samples[0]} ({round(n_aug_samples[0]/n_total_aug_samples*100,2)} %) augmented samples with tumours")
print(f"There are {n_aug_samples[1]} ({round(n_aug_samples[1]/n_total_aug_samples*100,2)} %) augmented samples without tumours\n")
if sample_type=="augmented":
return n_aug_samples, n_aug_total_samples
if sample_type=="both":
n_all_samples = [sum(x) for x in zip(n_samples,n_aug_samples)]
n_all_total_samples = n_total_samples + n_total_aug_samples
print(f"\nThere are {n_all_total_samples} samples in total")
print(f"There are {n_all_samples[0]} ({round(n_all_samples[0]/n_all_total_samples*100,2)} %) samples with tumours")
print(f"There are {n_all_samples[1]} ({round(n_all_samples[1]/n_all_total_samples*100,2)} %) samples without tumours\n")
return n_samples, n_total_samples, n_aug_samples, n_aug_total_samples, n_all_samples, n_all_total_samples
## count samples ##
dir_list = ['og_data/yes','og_data/no']
n_samples, n_total_samples = count_samples(dir_list=dir_list,aug_dir_list=None,sample_type="original")
## fn. to augment data ##
def augment_data(dir_list, n_new_samples, aug_dir_list):
data_gen = ImageDataGenerator(rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
brightness_range=(0.3, 1.0),
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
for i in range(len(dir_list)):
## delete any existing files in target
rmtree(aug_dir_list[i])
os.mkdir(aug_dir_list[i])
for j in listdir(dir_list[i]):
# load image
img = cv2.imread(dir_list[i]+'/'+j)
# reshape image
img = img.reshape((1,)+img.shape)
# save directory
save_to_dir = aug_dir_list[i]
# save prefixed
save_prefix = 'aug_' + j[:-4]
# generate 'n_generated_samples' new samples
count = 1
for batch in data_gen.flow(x=img, batch_size=1,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format='jpg'):
count += 1
if count > n_new_samples[i]:
break
## augment data ##
## to balance data, we generate:
## -> 1 new samples for each image with a tumour
## -> 2 new samples for each image without a tumour
n_new_samples = [12,19]
aug_dir_list = ['aug_data/yes','aug_data/no']
augment_data(dir_list,n_new_samples,aug_dir_list)
## count no. of augmented samples ##
n_aug_samples, n_aug_total_samples = count_samples(dir_list,aug_dir_list,sample_type="augmented")
## count no. of all (original and augmented) samples ##
n_samples, n_total_samples, n_aug_samples, n_aug_total_samples, n_all_samples, n_all_total_samples = count_samples(dir_list,aug_dir_list,"both")
## merge all data in single folder
all_dir_list = ['all_data/yes','all_data/no']
for i in range(2):
## delete any existing files in target
rmtree(all_dir_list[i])
os.mkdir(all_dir_list[i])
## copy original data
copy_tree(dir_list[i], all_dir_list[i])
## copy augmented data
copy_tree(aug_dir_list[i],all_dir_list[i])
### MODEL FITTING ###
## fn. to crop images ##
## see https://www.pyimagesearch.com/2016/04/11/finding-extreme-points-in-contours-with-opencv/ ##
def crop_img(img, plot=False):
# Convert image to grayscale, add blur
bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bw = cv2.GaussianBlur(bw, (5, 5), 0)
# Threshold image, use erosions + dilations to remove regions of noise
thrsh = cv2.threshold(bw.copy(), 45, 255, cv2.THRESH_BINARY)[1]
thrsh = cv2.erode(thrsh, None, iterations=2)
thrsh = cv2.dilate(thrsh, None, iterations=2)
# Find contours in thresholded image, select the largest contour
cnts = cv2.findContours(thrsh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# Find extreme points
ext_left = tuple(c[c[:, :, 0].argmin()][0])
ext_right = tuple(c[c[:, :, 0].argmax()][0])
ext_top = tuple(c[c[:, :, 1].argmin()][0])
ext_bot = tuple(c[c[:, :, 1].argmax()][0])
# Crop the new image out of original image using the extreme points
new_img = img[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]]
if plot:
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.tick_params(axis='both', which='both',
top=False, bottom=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Original Image')
plt.subplot(1, 2, 2)
plt.imshow(new_img)
plt.tick_params(axis='both', which='both',
top=False, bottom=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Cropped Image')
plt.show()
return new_img
## test on single image ##
img_test = cv2.imread('all_data/yes/Y1.jpg')
crop_img(img_test,plot=True)
## fn. to import images ##
def import_data(img_size,dir_list):
## initialise inputs and outputs
X, y = [], []
for i in dir_list:
for j in listdir(i):
## load image
img = cv2.imread(i+"/"+j)
## crop image ##
img = crop_img(img)
## resize image
img = cv2.resize(img, dsize=img_size, interpolation=cv2.INTER_CUBIC)
## normalise values
img = img/255
## append to X
X.append(img)
## append classification value to y
if i[-3:]=="yes":
y.append(1)
if i[-2:]=="no":
y.append(0)
## convert to np array
X = np.array(X)
y = np.array(y)
## shuffle data
X, y = shuffle(X,y)
## summarise data
print(f'The number of samples is {len(X)}')
print(f'The size of X is {X.shape}')
print(f'The size of y is {y.shape}')
return X, y
## import data ##
img_size = (224,224); dir_list = ['all_data/yes','all_data/no']
X,y = import_data(img_size,dir_list)
## fn. to plot several images ##
def plot_images(X, y, n=8):
## first n imgs with y=0,1 ##
for y_label in [0,1]:
imgs = X[np.argwhere(y == y_label)]
n_imgs = imgs[:n]
## arrange images
col_n = 4
row_n = int(n/col_n)
plt.figure(figsize=(col_n*2, row_n*2+.1))
counter = 1 # current plot
for image in n_imgs:
plt.subplot(row_n, col_n, counter)
plt.imshow(image[0])
# remove ticks
plt.tick_params(axis='both', which='both',
top=False, bottom=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False,
labelright=False)
counter += 1
label_to_str = lambda x: "" if x == 1 else "No"
plt.suptitle(f"{label_to_str(y_label)} Brain Tumor")
plt.show()
## plot images ##
plot_images(X,y,8)
## training and test data ##
def train_test_val_split(X,y,val_size,test_size):
## split all data into training data & validation/test data
X_train,X_test_val,y_train,y_test_val = train_test_split(X,y,test_size = val_size+test_size)
## split validation/test data validation data & test data
X_val,X_test,y_val,y_test = train_test_split(X_test_val,y_test_val,test_size = test_size/(test_size+val_size))
## print summary
print(f"\nThe total no. of samples is {X.shape[0]}")
print (f"The no. of training samples is {X_train.shape[0]}")
print (f"The no. of validation samples is {X_val.shape[0]}")
print (f"The no. of test samples is {X_test.shape[0]}\n")
return X_train,X_val,X_test,y_train,y_val,y_test
X_train,X_val,X_test,y_train,y_val,y_test = train_test_val_split(X,y,val_size=0.15,test_size=0.15)
## fn. to build model ##
def build_model(input_shape):
## initialise model
model = Sequential()
## zero padding
model.add(ZeroPadding2D((2,2), input_shape = input_shape))
## conv2d layer
model.add(Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0'))
## batch normalisation layer
model.add(BatchNormalization(axis = 3, name = 'bn0'))
## activation
model.add(Activation('relu')) # shape=(?, 238, 238, 32)
# max pooling
model.add(MaxPooling2D((4, 4), name='max_pool0')) # shape=(?, 59, 59, 32)
# max pooling
model.add(MaxPooling2D((4, 4), name='max_pool1')) # shape=(?, 14, 14, 32)
# flatten
model.add(Flatten()) # shape=(?, 6272)
# dense
model.add(Dense(1, activation='sigmoid', name='fc')) # shape=(?, 1)
print(model.summary())
return model
## build model
input_shape = X_train.shape[1:]
model = build_model(input_shape)
## compile model
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy"])
## train model
history = model.fit(x=X_train, y=y_train, batch_size=32, epochs=50,
validation_data=(X_val, y_val),verbose=1)
| StarcoderdataPython |
89613 | <filename>notion_extensions/base/props/block/link_to_page.py<gh_stars>1-10
import sys
from typing import Dict, Union
if sys.version_info >= (3, 8): # "from typing" in Python 3.9 and earlier
from typing import Literal
else:
from typing_extensions import Literal
from .block import Block
from ...utils import parse_id
__all__ = [
"LinkToPage",
]
class LinkToPage(Block):
"""
LinkToPage
LinkToPage property values of block
Attributes
----------
type_ : str
Type of this link to page object. Possible values are: "page", "database"
id_ : str
Identifier for a page or a database page
Methods
-------
clear()
Clear data of title
json()
Return this class as dictionary
"""
TEMPLATE: Dict[str, Union[str, Dict]] = {
"type": "link_to_page",
"link_to_page": {},
}
def __init__(
self,
type_: Literal["page", "database"],
id_: str,
):
"""
Parameters
----------
type_ : 'page' or 'database'
Type of this link to page object. Possible values are: "page", "database"
id_ : str
Identifier for a page or a database page, URL style is ok.
"""
super().__init__()
if type_ not in ("page", "database"):
raise ValueError("type_ must be `page` or `database`")
id_ = parse_id(id_=id_, type_=type_)
type_ = f"{type_}_id"
self["link_to_page"]["type"] = type_
self["link_to_page"][type_] = id_
@property
def type_(self) -> str:
return self["link_to_page"]["type"]
@property
def id_(self) -> str:
return self["link_to_page"][self.type_]
| StarcoderdataPython |
66599 | from django.utils.crypto import get_random_string
from google.appengine.ext import ndb
class AppConfig(ndb.Model):
secret_key = ndb.StringProperty()
@classmethod
def get(cls):
"""Singleton configuration to store the Django secret key."""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret_key = get_random_string(50, chars)
return cls.get_or_insert('config', secret_key=secret_key)
| StarcoderdataPython |
1634163 | <reponame>campovski/hash20
class Reader:
def __init__(self, filename):
self.libraries = []
with open(filename, 'r') as f:
first_line = f.readline().split()
self.B = int(first_line[0])
self.L = int(first_line[1])
self.D = int(first_line[2])
second_line = f.readline().split()
self.scores = map(lambda x: int(x), second_line)
for _ in range(self.B):
try:
library = {}
tmp = f.readline().split()
first_line_l = map(lambda x: int(x), tmp)
library['signup'] = first_line_l[1]
library['canShip'] = first_line_l[2]
tmp = f.readline().split()
library['books'] = sorted(map(lambda x: int(x), tmp))
self.libraries.append(library)
except:
pass
if __name__ == "__main__":
Reader('testcases/example.txt')
| StarcoderdataPython |
1731630 | '''Trains a convolutional neural network on sample images from the environment
using neuroevolution to maximize the ability to discriminate between input
images.
Reference:
Koutnik, Jan, <NAME>, and <NAME>. "Evolving deep
unsupervised convolutional networks for vision-based reinforcement
learning." Proceedings of the 2014 conference on Genetic and
evolutionary computation. ACM, 2014.
'''
import random
import numpy as np
from cnn import create_cnn, calculate_cnn_output, calculate_fitness
from nn_utilities import update_model_weights
from datasets import load_images_torcs_4
from visualization import plot_feature_vectors
from deap import algorithms, base, creator, tools
from operator import attrgetter
from matplotlib import pyplot as plt
# Set the following parameters:
OUTPUT_DIR = 'experiments/train_cnn_ga_11/'
images = load_images_torcs_4()
# Create the ConvNet and load the training set
model = create_cnn()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
INDIVIDUAL_SIZE = 993
toolbox = base.Toolbox()
toolbox.register("attr_float", random.uniform, -1.5, 1.5)
toolbox.register("individual",
tools.initRepeat,
creator.Individual,
toolbox.attr_float,
n=INDIVIDUAL_SIZE)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def ga_fitness(individual):
# Update the ConvNet parameters
update_model_weights(model, np.asarray(individual))
# Calculate the output feature vectors
feature_vectors = calculate_cnn_output(model, images)
# Check their fitness
fitness = calculate_fitness(feature_vectors)
return fitness,
toolbox.register("evaluate", ga_fitness)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1.5, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=10) # Optimize this hyperparameter
# This is a modified version of the eaSimple algorithm included with DEAP here:
# https://github.com/DEAP/deap/blob/master/deap/algorithms.py#L84
def eaSimpleModified(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__):
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
best = []
best_ind = max(population, key=attrgetter("fitness"))
best.append(best_ind)
# Begin the generational process
for gen in range(1, ngen + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Save the best individual from the generation
best_ind = max(offspring, key=attrgetter("fitness"))
best.append(best_ind)
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
return population, logbook, best
def run(num_gen=10,
n=100,
mutpb=0.8,
cxpb=0.5):
np.random.seed(0)
history = tools.History()
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)
pop = toolbox.population(n=n)
history.update(pop)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log, best = eaSimpleModified(pop,
toolbox,
cxpb=cxpb,
mutpb=mutpb,
ngen=num_gen,
stats=stats,
halloffame=hof,
verbose=True)
return pop, log, hof, history, best
def plot_results(filename,
gen,
fitness_maxs,
fitness_avgs):
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fitness_maxs, "r-", label="Maximum Fitness")
line2 = ax1.plot(gen, fitness_avgs, "b-", label="Average Fitness")
lines = line1 + line2
labs = [line.get_label() for line in lines]
ax1.legend(lines, labs, loc="lower right")
ax1.set_xlabel('Generation')
ax1.set_ylabel('Fitness')
plt.savefig('{}'.format(filename))
def run_experiments(output_dir):
POPULATION_SIZE = 100
NUM_GENERATIONS = 100
CROSSOVER_PROB = 0.5
MUTATION_PROBS = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50]
for mutation_prob in MUTATION_PROBS:
pop, log, hof, history, best_per_gen = run(num_gen=NUM_GENERATIONS,
n=POPULATION_SIZE,
cxpb=CROSSOVER_PROB,
mutpb=mutation_prob)
best = np.asarray(hof)
gen = log.select("gen")
fitness_maxs = log.select("max")
fitness_avgs = log.select("avg")
plot_results(filename='{}train_cnn_ga_mutpb_{}.png'.
format(output_dir,
str(mutation_prob).replace('.', '_')),
gen=gen,
fitness_maxs=fitness_maxs,
fitness_avgs=fitness_avgs)
np.savetxt('{}train_cnn_ga_mutpb_{}.out'.
format(output_dir,
str(mutation_prob).replace('.', '_')), best)
# Plot the feature vectors produced by the best individual from each
# generation
for gen in range(len(best_per_gen)):
update_model_weights(model, np.asarray(best_per_gen[gen]))
feature_vectors = calculate_cnn_output(model, images)
plot_feature_vectors(feature_vectors, filename='{}feature_vectors_{}__{}.png'.\
format(output_dir, str(mutation_prob).replace('.', '_'), gen))
if __name__ == "__main__":
np.random.seed(0)
random.seed(0)
run_experiments(output_dir=OUTPUT_DIR)
| StarcoderdataPython |
3374274 | """
Read in a process Dee Bore data
Author: jpolton
Date: 26 Sept 2020
Conda environment:
coast + requests,
(E.g. workshop_env w/ requests)
### Build python environment:
## Create an environment with coast installed
yes | conda env remove --name workshop_env
yes | conda create --name workshop_env python=3.8
conda activate workshop_env
yes | conda install -c bodc coast=1.2.7
# enforce the GSW package number (something fishy with the build process bumped up this version number)
yes | conda install -c conda-forge gsw=3.3.1
# install cartopy, not part of coast package
yes | conda install -c conda-forge cartopy=0.20.1
## install request for shoothill server requests
conda install requests
Example usage:
python deebore.py
To do:
* Smooth data before finding flood ebb
* Workflow for updating all measured data
"""
import os
import sys
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import sklearn.metrics as metrics
import pytz
import pickle
if(0): # Use the COAsT files, in e.g. coast_dev
coastdir = os.path.dirname('/Users/jeff/GitHub/COAsT/coast')
sys.path.insert(0, coastdir)
from coast.tidegauge import Tidegauge
from coast.general_utils import day_of_week
from coast.stats_util import find_maxima
else: # Use the COAsT package in e.g. workshop_env
#from coast.tidegauge import Tidegauge
from shoothill_api.shoothill_api import GAUGE
from coast.general_utils import day_of_week
from coast.stats_util import find_maxima
import scipy.signal # find_peaks
import logging
logging.basicConfig(filename='bore.log', filemode='w+')
logging.getLogger().setLevel(logging.DEBUG)
################################################################################
class OpenWeather:
"""
Class to load in an export OpenWeather history file at Hawarden Airport into
an xarray dataset.
"""
def __init__(self):
self.dataset = None
#%% Load method
@classmethod
def read_openweather_to_xarray(cls, fn_openweather, date_start=None, date_end=None):
"""
For reading from a single OpenWeather csv history file into an
xarray dataset.
If no data lies between the specified dates, a dataset is still created
containing information on the gauge, but the time dimension will
be empty.
The data takes the form:
dt,dt_iso,timezone,city_name,lat,lon,temp,feels_like,temp_min,temp_max,pressure,sea_level,grnd_level,humidity,wind_speed,wind_deg,rain_1h,rain_3h,snow_1h,snow_3h,clouds_all,weather_id,weather_main,weather_description,weather_icon
1104537600,2005-01-01 00:00:00 +0000 UTC,0,hawarden airport,53.176908,-2.978784,7.63,6.95,7.54,7.74,1024,,,99,1.5,150,,,,,75,803,Clouds,broken clouds,04n
1104541200,2005-01-01 01:00:00 +0000 UTC,0,hawarden airport,53.176908,-2.978784,4.83,2.61,4.54,7.54,1023,,,99,2.6,170,,,,,28,802,Clouds,scattered clouds,03n
...
Parameters
----------
fn_openweather (str) : path to OpenWeather location file
date_start (datetime) : start date for returning data
date_end (datetime) : end date for returning data
Returns
-------
xarray.Dataset object.
E.g.
Coordinates:
* time (time) datetime64[ns] 2005-01-01 ... 2021-11-08T23:00:00
Data variables:
wind_speed (time) float64 1.5 2.6 4.6 4.1 5.1 ... 3.6 4.12 0.89 4.02 2.68
wind_deg (time) int64 150 170 200 220 210 200 ... 180 190 210 117 239 226
longitude float64 53.18
latitude float64 -2.979
site_name object 'hawarden airport'
"""
try:
dataset = cls.read_openweather_data(fn_openweather, date_start, date_end)
except:
raise Exception("Problem reading OpenWeather file: " + fn_openweather)
# Attributes
dataset["longitude"] = float(dataset["lat"][0])
dataset["latitude"] = float(dataset["lon"][0])
dataset["site_name"] = str(dataset["city_name"][0])
dataset = dataset.drop_vars(["lon", "lat", "city_name"])
return dataset
@classmethod
def read_openweather_data(cls, filnam, date_start=None, date_end=None):
"""
Reads NRW data from a csv file.
Parameters
----------
filnam (str) : path to OpenWeather file
date_start (np.datetime64) : start date for returning data.
date_end (np.datetime64) : end date for returning data.
Returns
-------
xarray.Dataset containing times, wind_speed, wind_deg, lat, lon, city_name
"""
import datetime
# Initialise empty dataset and lists
dataset = xr.Dataset()
# Define custom data parser
custom_date_parser = lambda x: datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S +0000 UTC")
data = pd.read_csv(filnam, delimiter=',', parse_dates=['dt_iso'], date_parser=custom_date_parser)
data.rename(columns={'dt_iso':'time'}, inplace=True)
data.set_index('time', inplace=True)
data.drop(columns=['dt', 'timezone', 'temp',
'feels_like', 'temp_min', 'temp_max', 'pressure', 'sea_level',
'grnd_level', 'humidity', 'rain_1h',
'rain_3h', 'snow_1h', 'snow_3h', 'clouds_all', 'weather_id',
'weather_main', 'weather_description', 'weather_icon'], inplace=True)
dataset = data.to_xarray()
if date_start != None:
dataset = dataset.where(dataset.time >= date_start)
if date_end != None:
dataset = dataset.where(dataset.time <= date_end)
# Assign local dataset to object-scope dataset
return dataset
#%% ############################################################################
class Controller():
"""
This is where the main things happen.
Where user input is managed and methods are launched
"""
############################################################################
#%% Initialising and Finishing methods
############################################################################
def __init__(self):
"""
Look for pickle file. If exists load it.
Initialise main controller.
"""
self.load_databucket()
logging.info("run interface")
self.load_bore_flag = False
self.run_interface()
def load_databucket(self):
"""
Auto load databucket from pickle file if it exists.
"""
#global DATABUCKET_FILE
#databucket = DataBucket()
logging.info("Auto load databucket from pickle file if it exists")
print("Add to pickle file, if it exists")
try:
if os.path.exists(DATABUCKET_FILE):
template = "...Loading (%s)"
print(template%DATABUCKET_FILE)
with open(DATABUCKET_FILE, 'rb') as file_object:
self.bore = pickle.load(file_object)
self.load_bore_flag = True
else:
print("... %s does not exist"%DATABUCKET_FILE)
except KeyError:
print('ErrorA ')
except (IOError, RuntimeError):
print('ErrorB ')
def pickle_bore(self):
""" save copy of self.bore into pickle file, if requested """
print('Pickle data.')
os.system('rm -f '+DATABUCKET_FILE)
if(1):
with open(DATABUCKET_FILE, 'wb') as file_object:
pickle.dump(self.bore, file_object)
else:
print("Don't save as pickle file")
return
def export_to_csv(self):
"""
Export the bore xr.Dataset to a CSV file for sharing
"""
print('Export data to csv. NOT IMPLEMENTED')
pass
def run_interface(self):
"""
Application's main loop
Get user input and respond
"""
print(INSTRUCTIONS)
while True:
command = input("What do you want to do? ")
if command == "q":
print("run_interface: quit")
logging.info("quit") # Function call.
ans = input('Save as pickle file?[Y/n]')
if ans == "n":
break
else:
self.pickle_bore()
break
elif command == "i":
print(INSTRUCTIONS)
elif command == "all":
print('load and process all data')
self.load_csv()
print('load and process measured (bodc) data')
self.load_and_process(source="bodc", HLW_list=["FW", "HW", "LW"])
#self.load_and_process(source="bodc", HLW="LW")
#self.load_and_process(source="bodc", HLW="FW")
print('load and process measured (API) data')
self.load_and_process(source="api", HLW_list=["HW", "LW", "FW"])
#self.load_and_process(source="api", HLW="LW")
#self.load_and_process(source="api", HLW="FW")
print('load and process CTR data. Obs + API')
self.get_river_data(HLW_list=["LW"])
print('load and process harmonic data')
self.load_and_process(source="harmonic", HLW_list=["HW", "LW"])
#self.load_and_process(source="harmonic", HLW="LW")
print('load and process harmonic reconstructed data')
self.load_and_process(source="harmonic_rec", HLW_list=["HW", "LW"])
#self.load_and_process(source="harmonic_rec", HLW="LW")
elif command == "0":
print('load bore observations')
self.load_csv()
elif command == "h":
print('load and process harmonic data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="harmonic")
elif command == "hrec":
print('load and process harmonic reconstructed data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="harmonic_rec")
elif command == "b":
print('load and process measured (bodc) data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="bodc")
elif command == "a":
print('load and process measured (API) data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="api")
elif command == "r":
print('load and process measured (API) river data')
if not self.load_bore_flag: self.load_csv()
self.get_river_data()
elif command == "m":
print("load and process met data")
if not self.load_bore_flag: self.load_csv()
self.get_met_data()
elif command == "2":
print('show bore dataset')
self.show()
elif command == "3":
print('plot bore data (lag vs tidal height')
plt.close('all');self.plot_lag_vs_height('bodc')
plt.close('all');self.plot_lag_vs_height('bodc', HLW="FW")
plt.close('all');self.plot_lag_vs_height('all')
plt.close('all');self.plot_lag_vs_height('harmonic')
plt.close('all');self.plot_lag_vs_height('harmonic_rec')
plt.close('all');self.plot_lag_vs_height('api')
plt.close('all');self.plot_lag_vs_height('api', HLW="FW")
elif command == "4":
print('plot difference between predicted and measured (lag vs tidal height)')
plt.close('all');self.plot_surge_effect('api')
plt.close('all');self.plot_surge_effect('bodc')
elif command == "d1":
print('load and plot HLW data')
self.load_and_plot_hlw_data()
elif command == "d2":
print("shoothill dev")
self.shoothill()
elif command == "d3":
print('Explore combinations of HLW times and heights for best fit')
self.fits_to_data(qc_flag=True)
self.fits_to_data(qc_flag=False)
elif command == "d4":
print('Plot combinations of HLW times, heights and rivers')
self.combinations_lag_hlw_river()
elif command == "d5":
print('Explore how rivers affect bore timing')
self.river_lag_timing()
elif command == "6":
self.predict_bore()
elif command == "x":
print('Export data')
self.export_to_csv()
elif command == "rm":
print('Remove pickle file)')
if os.path.exists(DATABUCKET_FILE):
os.remove(DATABUCKET_FILE)
else:
print("Can not delete the pickle file as it doesn't exists")
#self.load_databucket()
else:
template = "run_interface: I don't recognise (%s)"
print(template%command)
############################################################################
#%% Load and process methods
############################################################################
def load_and_process(self, source:str="harmonic", HLW_list=["HW"]):
"""
Performs sequential steps to build into the bore object.
1. Load Gladstone Dock data (though this might also be loaded from the obs logs)
2. Calculate the time lag between Gladstone and Saltney events.
3. Perform a linear fit to the time lag.
Inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - reconstruct time series from harmonic constants
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
print('loading '+source+' tide data')
self.get_Glad_data(source=source, HLW_list=HLW_list)
#self.compare_Glad_HLW()
print('Calculating the Gladstone to Saltney time difference')
self.calc_Glad_Saltney_time_lag(source=source, HLW_list=HLW_list)
print('Process linear fit. Calc and save')
self.process_fit(source=source, HLW_list=HLW_list)
def process_fit(self, source:str="harmonic", HLW_list=["HW"]):
for HLW in HLW_list:
# Get linear fit with rmse
self.bore.attrs['weights_'+HLW+'_'+source], self.bore.attrs['rmse_'+HLW+'_'+source] = self.linearfit(
self.bore['liv_height_'+HLW+'_'+source],
self.bore['Saltney_lag_'+HLW+'_'+source]
)
# Apply linear model
self.bore['linfit_lag_'+HLW+'_'+source] = self.bore.attrs['weights_'+HLW+'_'+source](self.bore['liv_height_'+HLW+'_'+source])
#self.bore['rmse_'+HLW+'_'+source] = '{:4.1f} mins'.format(self.stats(source=source, HLW=HLW))
def load_csv(self):
"""
Load observed bore data from text file.
Load as a dataframe and save to bore:xr.DataSet
"""
logging.info('Load bore data from csv file')
self.load_bore_flag = True
df = pd.read_csv('data/master-Table 1.csv')
df.drop(columns=['date + logged time','Unnamed: 14', \
'Unnamed: 15','Unnamed: 16'], \
inplace=True)
df.rename(columns={"date + logged time (GMT)":"time"}, inplace=True)
df.rename(columns={"wind_deg (from)":"wind_deg"}, inplace=True)
df.rename(columns={"wind_speed (m/s)":"wind_speed"}, inplace=True)
df['time'] = pd.to_datetime(df['time'], format="%d/%m/%Y %H:%M")
#df['time'] = pd.to_datetime(df['time'], utc=True, format="%d/%m/%Y %H:%M")
#df.set_index(['time'], inplace=True)
for index, row in df.iterrows():
df.loc[index,'time'] = np.datetime64( df.at[index,'time'] ) # numpy.datetime64 in UTC
bore = xr.Dataset()
bore = df.to_xarray()
# Set the t_dim to be a dimension and 'time' to be a coordinate
bore = bore.rename_dims( {'index':'t_dim'} ).assign_coords( time=("t_dim", bore.time.data))
bore = bore.swap_dims( {'t_dim':'time'} )
self.bore = bore
logging.info('Bore data loaded')
def get_river_data(self, HLW_list=["LW"]):
"""
Get Chester weir data. Consolidate CTR data.
Data from the table takes precident. Gaps are filled by the API.
"""
if HLW_list != ["LW"]:
print('Not expecting that possibility here')
else:
# Obtain CTR data for LW for the observations times.
self.get_Glad_data(source='ctr',HLW_list=["LW"])
alph = self.bore['Chester Weir height: CHESTER WEIR 15 MIN SG'] *np.NaN
beta = self.bore['ctr_height_LW_ctr']
#print( self.bore['ctr_height_LW_ctr'][0:10] )
self.bore['ctr_height_LW'] = alph
self.bore['ctr_height_LW'].values = [alph[i].values if np.isfinite(alph[i].values) else beta[i].values for i in range(len(alph))]
# 2015-06-20T12:16:00 has a -ve value. Only keep +ve values
self.bore['ctr_height_LW'] = self.bore['ctr_height_LW'].where( self.bore['ctr_height_LW'].values>0)
#plt.plot( ctr_h_csv, 'b+' )
#plt.plot( self.bore['ctr_height_LW_ctr'], 'ro')
#plt.plot( self.bore['ctr_height_LW'], 'g.')
del self.bore['ctr_height_LW_ctr'], self.bore['ctr_time_LW_ctr']
def get_met_data(self): #, HLW:str="HW"):
"""
Get the met data time matching the observation.
Met data from OpenWeather history download.
This can then be exported into the obs table:
c.met.to_pandas().to_csv('met.csv')
"""
fn_openweather = "data/met/openweather_2005-01-01_2021-11-08.csv"
met = OpenWeather()
met.dataset = met.read_openweather_to_xarray(fn_openweather)
winsize = 6 #4h for HW, 6h for LW. +/- search distance for nearest extreme value
self.met = xr.Dataset()
for measure_var in ['wind_speed', 'wind_deg']:
met_var = []
met_time = []
for i in range(len(self.bore.time)):
try:
met_ds = None
obs_time = self.bore.time[i].values
# Find nearest met observation
dt = np.abs(met.dataset['time'] - obs_time)
index = np.argsort(dt).values
if winsize is not None: # if search window trucation exists
if np.timedelta64(dt[index[0]].values, "m").astype("int") <= 60 * winsize: # compare in minutes
#print(f"dt:{np.timedelta64(dt[index[0]].values, 'm').astype('int')}")
#print(f"winsize:{winsize}")
met_ds = met.dataset[measure_var][index[0]]
else:
# return a NaN in an xr.Dataset
# The rather odd trailing zero is to remove the array layer
# on both time and measurement, and to match the other
# alternative for a return
met_ds = xr.DataArray( [np.NaN], coords={'time': [obs_time]})
#met_ds = xr.Dataset({measure_var: ('time', [np.NaN])}, coords={'time': [obs_time]})
else: # give the closest without window search truncation
met_ds = met.dataset[measure_var][index[0]]
#print("time,HW:",obs_time, HW.values)
if type(met_ds) is xr.DataArray:
#print(f"met: {met_ds.values}")
met_var.append( float(met_ds.values) )
#print('len(met_var)', len(met_var))
met_time.append( met_ds.time.values )
#print('len(met_time)', len(met_time))
#self.bore['LT_h'][i] = HLW.dataset.sea_level[HLW.dataset['sea_level'].argmin()]
#self.bore['LT_t'][i] = HLW.dataset.time[HLW.dataset['sea_level'].argmin()]
#ind.append(i)
#print(f"i:{i}, {met_time[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
if measure_var == "wind_speed":
ymax = 15
if measure_var == "wind_deg":
ymax = 360
plt.subplot(3,4,(i%12)+1)
plt.plot(met.dataset.time, met.dataset[measure_var])
plt.plot( met_time[-1], met_var[-1], 'r+' )
plt.plot( [self.bore.time[i].values,self.bore.time[i].values],[0,ymax],'k')
plt.xlim([met_time[-1] - np.timedelta64(5,'h'),
met_time[-1] + np.timedelta64(5,'h')])
#plt.ylim([0,11])
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.9, self.bore.location[i].values)
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.1, met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
else:
logging.info(f"Did not find a met time near this guess {obs_time}")
print(f"Did not find a met time near this guess {obs_time}")
except:
logging.warning('Issue with appending met data')
print('Issue with appending met data')
try: # Try and print the last observation timeseries
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
#print("number of obs:",len(self.bore.time))
#print("length of time", len(self.bore.time.values))
#print("length of data:", len(np.array(met_var)) )
self.met[measure_var] = xr.DataArray( np.array(met_var), coords=coords, dims=['time'])
def get_Glad_data(self, source:str='harmonic', HLW_list=["HW"]):
#def get_Glad_data(self, source:str='harmonic', HLW:str="HW"):
"""
Get Gladstone HLW data from external source
These data are reported in the bore.csv file but not consistently and it
is laborous to find old values.
It was considered a good idea to automate this step.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - reconstruct time series from harmonic constants
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW_list: ["LW","HW","FW","EW"] - the data is either processed for High or Low water
events, or Flood or Ebb (inflection) events
"""
loc = "liv" # default location - Liverpool
logging.info("Get Gladstone HLW data")
if source == "harmonic": # Load tidetable data from files
filnam1 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2005_2014_HLW.txt'
filnam2 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2015_2020_HLW.txt'
filnam3 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
tg = GAUGE()
tg1 = GAUGE()
tg2 = GAUGE()
tg3 = GAUGE()
tg1.dataset = tg1.read_hlw_to_xarray(filnam1)#, self.bore.time.min().values, self.bore.time.max().values)
tg2.dataset = tg2.read_hlw_to_xarray(filnam2)#, self.bore.time.min().values, self.bore.time.max().values)
tg3.dataset = tg3.read_hlw_to_xarray(filnam3)#, self.bore.time.min().values, self.bore.time.max().values)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset, tg3.dataset], dim='time')
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == "bodc": # load full 15min data from BODC files, extract HLW
dir = '/Users/jeff/GitHub/DeeBore/data/BODC_processed/'
filelist = ['2005LIV.txt',
'2006LIV.txt', '2007LIV.txt',
'2008LIV.txt', '2009LIV.txt',
'2010LIV.txt', '2011LIV.txt',
'2012LIV.txt', '2013LIV.txt',
'2014LIV.txt', '2015LIV.txt',
'2016LIV.txt', '2017LIV.txt',
'2018LIV.txt', '2019LIV.txt',
'2020LIV.txt',
'LIV2101.txt', 'LIV2102.txt',
'LIV2103.txt', 'LIV2104.txt',
'LIV2105.txt', 'LIV2106.txt',
'LIV2107.txt', 'LIV2108.txt',
'LIV2109.txt', 'LIV2110.txt']
tg = GAUGE()
for file in filelist:
tg0=GAUGE()
tg0.dataset = tg0.read_bodc_to_xarray(dir+file)
if tg.dataset is None:
tg.dataset = tg0.dataset
else:
tg.dataset = xr.concat([ tg.dataset, tg0.dataset], dim='time')
# Use QC to drop null values
#tg.dataset['sea_level'] = tg.dataset.sea_level.where( np.logical_or(tg.dataset.qc_flags=='', tg.dataset.qc_flags=='T'), drop=True)
tg.dataset['sea_level'] = tg.dataset.sea_level.where( tg.dataset.qc_flags!='N', drop=True)
# Fix some attributes (others might not be correct for all data)
tg.dataset['start_date'] = tg.dataset.time.min().values
tg.dataset['end_date'] = tg.dataset.time.max().values
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "api": # load full tidal signal from shoothill, extract HLW
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
fn_archive = "liv" # File head for netcdf archive of api call
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "ctr": # use api to load chester weir. Reset loc variable
loc = "ctr"
tg = GAUGE()
date_start=np.datetime64('2014-01-01')
date_end=np.datetime64('now','D')
#station_id = 7900 # below weir
station_id = 7899 # above weir
fn_archive = "ctr" # File head for netcdf archive of api call
station_id = 968
fn_archive = "iron"
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(station_id=station_id, date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(station_id=station_id ,date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == 'harmonic_rec': # load full tidal signal using anyTide code, extract HLW
tg = GAUGE()
#date_start=np.datetime64('now')
#ndays = 5
#tg.dataset = tg.anyTide_to_xarray(date_start=date_start, ndays=5)
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
tg.dataset = tg.anyTide_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
else:
logging.debug(f"Did not expect this eventuality...")
self.tg = tg
## Process the *_highs or *_lows
for HLW in HLW_list:
print(f"HLW: {HLW}")
#time_var = 'time_highs'
#measure_var = 'sea_level_highs'
#ind = [] # list of indices in the obs bore data where gladstone data is found
if HLW == 'HW':
time_var = 'time_highs'
measure_var = 'sea_level_highs'
elif HLW == 'LW':
time_var = 'time_lows'
measure_var = 'sea_level_lows'
elif HLW == 'FW':
time_var = 'time_flood'
measure_var = 'sea_level_flood'
elif HLW == 'EW':
time_var = 'time_ebb'
measure_var = 'sea_level_ebb'
else:
print('This should not have happened...')
HT_h = [] # Extrema - height
HT_t = [] # Extrema - time
winsize = 6 #4h for HW, 6h for LW. +/- search distance for nearest extreme value
for i in range(len(self.bore.time)):
if(1): #try:
HW = None
LW = None
obs_time = self.bore.time[i].values
# Extracting the highest and lowest value with a cubic spline is
# very memory costly. Only need to use the cubic method for the
# bodc and api sources, so compute the high and low waters in a
# piecewise approach around observations times.
if source == "bodc" or source == "api":
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
win = GAUGE()
win.dataset = tg.dataset.sel( time=slice(obs_time - np.timedelta64(winsize, "h"), obs_time + np.timedelta64(winsize, "h")) )
#if HLW == "LW":
# print(f"win.dataset {win.dataset}")
#print(i," win.dataset.time.size", win.dataset.time.size)
if win.dataset.time.size == 0:
tg_HLW = GAUGE()
tg_HLW.dataset = xr.Dataset({measure_var: (time_var, [np.NaN])}, coords={time_var: [obs_time]})
else:
if HLW == "FW" or HLW == "EW":
tg_HLW = win.find_flood_and_ebb_water(var_str='sea_level',method='cubic')
#print(f"inflection point time: {tg_HLW.dataset[time_var]}")
print(f"inflection points: {len(tg_HLW.dataset[time_var])}")
elif HLW == "HW" or HLW == "LW":
tg_HLW = win.find_high_and_low_water(var_str='sea_level',method='cubic')
print(f"max points: {len(tg_HLW.dataset[time_var])}")
else:
print(f"This should not have happened... HLW:{HLW}")
HW = tg_HLW.get_tide_table_times(
time_guess=obs_time,
time_var=time_var,
measure_var=measure_var,
method='nearest_1',
winsize=winsize ) #4h for HW, 6h for LW
#print("time,HW:",obs_time, HW.values)
if type(HW) is xr.DataArray: ## Actually I think they are alway xr.DataArray with time, but the height can be nan.
#print(f"HW: {HW}")
HT_h.append( HW.values )
#print('len(HT_h)', len(HT_h))
HT_t.append( HW[time_var].values )
#print('len(HT_t)', len(HT_t))
#self.bore['LT_h'][i] = HLW.dataset.sea_level[HLW.dataset['sea_level'].argmin()]
#self.bore['LT_t'][i] = HLW.dataset.time[HLW.dataset['sea_level'].argmin()]
#ind.append(i)
#print(f"i:{i}, {HT_t[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
plt.subplot(3,4,(i%12)+1)
plt.plot(self.tg.dataset.time, self.tg.dataset.sea_level)
plt.plot( HT_t[-1], HT_h[-1], 'r+' )
plt.plot( [self.bore.time[i].values,self.bore.time[i].values],[0,11],'k')
plt.xlim([HT_t[-1] - np.timedelta64(5,'h'),
HT_t[-1] + np.timedelta64(5,'h')])
plt.ylim([0,11])
plt.text( HT_t[-1]-np.timedelta64(5,'h'),10, self.bore.location[i].values)
plt.text( HT_t[-1]-np.timedelta64(5,'h'),1, HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/check_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'_'+source+'.png')
plt.close('all')
else:
logging.info(f"Did not find a high water near this guess")
print(f"Did not find a high water near this guess")
if(0):#except:
logging.warning('Issue with appending HLW data')
print('Issue with appending HLW data')
try: # Try and print the last observation timeseries
plt.savefig('figs/check_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'_'+source+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
#print("number of obs:",len(self.bore.time))
#print("length of time", len(self.bore.time.values))
#print("length of data:", len(np.array(HT_h)) )
self.bore[loc+'_height_'+HLW+'_'+source] = xr.DataArray( np.array(HT_h), coords=coords, dims=['time'])
self.bore[loc+'_time_'+HLW+'_'+source] = xr.DataArray( np.array(HT_t), coords=coords, dims=['time'])
print('There is a supressed plot.scatter here')
#self.bore.plot.scatter(x='liv_time', y='liv_height'); plt.show()
logging.debug(f"len(self.bore[loc+'_time_'{HLW}'_'{source}]): {len(self.bore[loc+'_time_'+HLW+'_'+source])}")
#logging.info(f'len(self.bore.liv_time)', len(self.bore.liv_time))
logging.debug(f"type(HT_t): {type(HT_t)}")
logging.debug(f"type(HT_h): {type(HT_h)}")
if loc=='liv':
logging.debug('log time, orig tide table, new tide table lookup')
for i in range(len(self.bore.time)):
logging.debug( f"{self.bore.time[i].values}, {self.bore['Liv (Gladstone Dock) HT time (GMT)'][i].values}, {self.bore['liv_time_'+HLW+'_'+source][i].values}")
#print('log time, orig tide table, new tide table lookup')
#for i in range(len(self.bore.time)):
# print( self.bore.time[i].values, self.bore['Liv (Gladstone Dock) HT time (GMT)'][i].values, self.bore['liv_time'][i].values)
def calc_Glad_Saltney_time_lag(self, source:str="harmonic", HLW_list=["HW"]):
"""
Compute lag (obs - tide) for arrival at Saltney relative to Glastone HT
Store lags as integer (minutes) since np.datetime64 and
np.timedelta64 objects are problematic with polyfitting.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
for HLW in HLW_list:
logging.info('calc_Glad_Saltney_time_diff')
nt = len(self.bore.time)
lag = (self.bore['time'].values - self.bore['liv_time_'+HLW+'_'+source].values).astype('timedelta64[m]')
# convert to integers so nans can be applied
lag = [ lag[i].astype('int') if np.isfinite(self.bore['liv_height_'+HLW+'_'+source].values)[i] else np.NaN for i in range(nt) ]
# Pick out FB and Blue bridge
Saltney_lag = [ lag[i] if self.bore.location.values[i] == 'bridge' else np.NaN for i in range(nt) ]
bluebridge_lag = [ lag[i] if self.bore.location.values[i] == 'blue bridge' else np.NaN for i in range(nt) ]
#Saltney_lag = [ lag[i].astype('int') if self.bore.location.values[i] == 'bridge' else np.NaN for i in range(nt) ]
#bluebridge_lag = [ lag[i].astype('int') if self.bore.location.values[i] == 'blue bridge' else np.NaN for i in range(nt) ]
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
self.bore['lag_'+HLW+'_'+source] = xr.DataArray( lag, coords=coords, dims=['time'])
self.bore['Saltney_lag_'+HLW+'_'+source] = xr.DataArray( Saltney_lag, coords=coords, dims=['time'])
self.bore['bluebridge_lag_'+HLW+'_'+source] = xr.DataArray( bluebridge_lag, coords=coords, dims=['time'])
def linearfit(self, X, Y):
"""
Linear regression. Calculates linear fit weights and RMSE
Is used after computing the lag between Gladstone and Saltney events,
during load_and_process(), to find a fit between Liverpool heights
and Saltney arrival lag.
Returns polynomal function for linear fit that can be used:
E.g.
X=range(10)
np.poly1d(weights)( range(10) )
Also returns RMSE
"""
idx = np.isfinite(X).values & np.isfinite(Y).values
weights = np.polyfit( X[idx], Y[idx], 1)
logging.debug("weights: {weights}")
#self.linfit = np.poly1d(weights)
#self.bore['linfit_lag'] = self.linfit(X)
#self.bore.attrs['weights'] = np.poly1d(weights)
#self.bore.attrs['weights'](range(10))
Y_fit = np.poly1d(weights)(X)
rmse = '{:4.1f} mins'.format( np.sqrt(np.nanmean((Y.values - Y_fit)**2)) )
return np.poly1d(weights), rmse
############################################################################
#%% Presenting data
############################################################################
def show(self):
""" Show xarray dataset """
print( self.bore )
def plot_lag_vs_height(self, source:str="harmonic", HLW:str="HW"):
"""
Plot bore lag (obs time - Gladstone tide time) against
Gladstone extreme water water (m).
Separate colours for Saltney, Bluebridge, Chester.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - data from harmonic reconstruction
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
'all' - Use bodc + api data
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
I = self.bore['Quality'] == "A"
if source == "all":
Yliv = self.bore['liv_height_'+HLW+'_bodc']
Xsalt = self.bore['Saltney_lag_'+HLW+'_bodc']
Xblue = self.bore['bluebridge_lag_'+HLW+'_bodc']
Yliv_api = self.bore['liv_height_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xsalt_api = self.bore['Saltney_lag_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xblue_api = self.bore['bluebridge_lag_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xfit = self.bore['linfit_lag_'+HLW+'_bodc']
Xsalt_api_latest = Xsalt_api.where( xr.ufuncs.isfinite(Xsalt_api), drop=True)[0]
Yliv_api_latest = Yliv_api.where( xr.ufuncs.isfinite(Xsalt_api), drop=True)[0]
plt.plot( Xsalt,Yliv, 'r.', label='Saltney: rmse '+'{:4.1f}'.format(self.stats('bodc'))+'mins')
plt.plot( Xsalt[I],Yliv[I], 'k+', label='1st hand')
plt.plot( Xblue,Yliv, 'b.', label='Bluebridge')
plt.plot( Xfit,Yliv, 'k-')
plt.plot( Xsalt_api,Yliv_api, 'ro', label='Saltney API')
plt.plot( Xblue_api,Yliv_api, 'bo', label='Bluebridge API')
plt.plot( Xsalt_api_latest,Yliv_api_latest, 'go', label='Saltney latest')
plt.plot( Xsalt_api[I],Yliv_api[I], 'k+')
else:
Yliv = self.bore['liv_height_'+HLW+'_'+source]
Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source]
Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source]
Xfit = self.bore['linfit_lag_'+HLW+'_'+source]
plt.plot( Xsalt,Yliv, 'r.', label='Saltney: rmse '+'{:4.1f}'.format(self.stats(source,HLW))+'mins')
plt.plot( Xsalt[I],Yliv[I], 'k+', label='1st hand')
plt.plot( Xblue,Yliv, 'b.', label='Bluebridge')
plt.plot( Xfit,Yliv, 'k-')
Xsalt_latest = Xsalt.where( xr.ufuncs.isfinite(Xsalt), drop=True)[0]
Yliv_latest = Yliv.where( xr.ufuncs.isfinite(Xsalt), drop=True)[0]
# Highlight recent data
Yliv = self.bore['liv_height_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
#Yliv = self.bore['liv_height_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
#Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
#Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
plt.plot( Xsalt,Yliv, 'ro', label='Saltney 2021')
plt.plot( Xblue,Yliv, 'bo', label='Bluebridge 2021')
plt.plot( Xsalt_latest,Yliv_latest, 'go', label='Saltney latest')
plt.plot( Xsalt[I],Yliv[I], 'k+')
#plt.plot( Xblue[0],Yliv[0], 'b+', label='Bluebridge recent')
plt.ylabel('Liv (Gladstone Dock) '+HLW+' (m)')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
if source =='harmonic': str='tide table predicted'
if source =='harmonic_rec': str='harmonic reconstructed'
if source =='all': str='all measured'
if source =='bodc': str='measured only QCd'
if source == 'api': str='measured w/o QC'
plt.title(f"Bore arrival time at <NAME> ({str} data)")
#plt.xlim([-125, -40]) # minutes
#plt.ylim([8.2, 10.9]) # metres
plt.legend()
#plt.show()
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_'+HLW+'_'+source+'.png')
def plot_surge_effect(self, source:str='bodc', HLW:str="HW"):
"""
Compare harmonic predicted HLW+lag with measured HLW+lag
Plot quiver between harmonic and measured values.
NB should probably have linfit predicted lag instead of
Saltney_lag_*_harmonic for the predicted value.
inputs:
source:
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
# Example plot
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
import matplotlib.dates as mdates
if source=='api':
last_bodc_time = self.bore['liv_time_'+HLW+'_bodc']\
.where(np.isfinite(self.bore['liv_height_'+HLW+'_bodc'].values))\
.dropna('time')\
.max().values
I = self.bore['liv_time_'+HLW+'_api'] > last_bodc_time + np.timedelta64(1,'D') #np.datetime64('2020-09-01')
nval = sum(I).values
else:
nval = min( len(self.bore['linfit_lag_'+HLW+'_harmonic']), len(self.bore['linfit_lag_'+HLW+'_bodc']) )
I = np.arange(nval)
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,1] = self.bore['liv_height_'+HLW+'_'+source][I]
segs_h[:,1,1] = self.bore['liv_height_'+HLW+'_harmonic'][I]
segs_h[:,0,0] = self.bore['Saltney_lag_'+HLW+'_'+source][I]
segs_h[:,1,0] = self.bore['Saltney_lag_'+HLW+'_harmonic'][I]
if source=='api':
print('liv_height_'+HLW+'_'+source, segs_h[:,0,1])
print('liv_height_'+HLW+'_harmonic', segs_h[:,1,1])
print('Saltney_lag_'+HLW+'_'+source, segs_h[:,0,0])
print('Saltney_lag_'+HLW+'_harmonic', segs_h[:,1,0])
II = self.bore['Quality'][I] == "A"
#segs_h[:,0,0] = self.bore.liv_height_bodc[:nval]
#segs_h[:,1,0] = self.bore.liv_height_harmonic[:nval]
#segs_h[:,0,1] = self.bore.Saltney_lag_bodc[:nval]
#segs_h[:,1,1] = self.bore.Saltney_lag_harmonic[:nval]
fig, ax = plt.subplots()
ax.set_ylim(np.nanmin(segs_h[:,:,1]), np.nanmax(segs_h[:,:,1]))
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,1,0],segs_h[:,1,1], c='red', s=4, label='predicted') # harmonic predictions
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=4, label='measured') # harmonic predictions
ax.scatter(segs_h[II,0,0],segs_h[II,0,1], c='green', s=16) # 1st hand
ax.set_title('Harmonic prediction with quiver to measured high waters')
plt.ylabel('Liv (Gladstone Dock) '+HLW+' (m)')
plt.xlabel('Arrival time (mins relative to LiV '+HLW+')')
plt.title('Bore arrival time at Saltney Ferry. Harmonic prediction cf measured')
plt.legend()
#plt.xlim([-125, -40]) # minutes
#plt.ylim([8.2, 10.9]) # metres
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_shift_'+HLW+'_'+source+'.png')
plt.close('all')
def plot_scatter_river(self, source:str='bodc', HLW:str="HW"):
"""
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 5 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%d%b%y') if self.bore['Quality'][i] == "A" else "" for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore['ctr_height_LW'],
s=S,
#cmap='magma',
cmap='jet',
vmin=4.4,
vmax=5.5, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('River height (m)')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_river_'+HLW+'_'+source+'.png')
############################################################################
#%% DIAGNOSTICS
############################################################################
def predict_bore(self, source:str='harmonic', HLW:str="HW"):
"""
Predict the bore timing at Saltney for a request input date (given in
days relative to now).
Implements a linear fit model to predicted tides.
Can select which linear fit model (weights) to use with by specifying
'source' and 'HLW'
INPUTS: which define the weights used.
-------
source: 'harmonic' [default] - from harmonic prediction
'bodc' - from measured and processed data
'api' - from recent, un processed data from shoothill API
HLW: [LW/HW] - processed from either High or Low water events
Requested parameters
--------------------
day : day
DESCRIPTION.
"""
print('Predict bore event for date')
filnam = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
nd = input('Make predictions for N days from hence (int):?')
day = np.datetime64('now', 'D') + np.timedelta64(int(nd), 'D')
dayp1 = day + np.timedelta64(24, 'h')
if(1): # np.datetime64('now', 'Y') < np.datetime64('2021'): # year 2020
print("predict_bore(): should check is table data is available. If not use harm reconstructed data")
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, day, dayp1)
HT = tg.dataset['sea_level'].where(tg.dataset['sea_level']\
.values > 7).dropna('time') #, drop=True)
else: # year 2021 (no tide table data)
source = 'harmonic_rec'
print('source=',source)
tg = GAUGE()
tg_tmp = GAUGE()
tg_tmp.dataset = tg_tmp.anyTide_to_xarray(date_start=day, date_end=dayp1)
tg = tg_tmp.find_high_and_low_water(var_str='sea_level')
#tg.dataset = tg.get_Glad_data(source='harmonic_rec',date_start=day, date_end=dayp1)
HT = tg.dataset['sea_level_highs'].where(tg.dataset['sea_level_highs']\
.values > 7).dropna('time_highs')\
.rename({'time_highs':'time'})
#plt.plot( HT.time, HT,'.' );plt.show()
#lag_pred = self.linfit(HT)
lag_pred = self.bore.attrs['weights_'+HLW+'_'+source](HT)
#lag_pred = lag_pred[np.isfinite(lag_pred)] # drop nans
Saltney_time_pred = [HT.time[i].values
+ np.timedelta64(int(round(lag_pred[i])), 'm')
for i in range(len(lag_pred))]
# Iterate over high tide events to print useful information
print(f"Predictions based on fit to {source} {HLW} data")
for i in range(len(lag_pred)):
#print( "Gladstone HT", np.datetime_as_string(HT.time[i], unit='m',timezone=pytz.timezone('UTC')),"(GMT). Height: {:.2f} m".format( HT.values[i]))
#print(" Saltney arrival", np.datetime_as_string(Saltney_time_pred[i], unit='m', timezone=pytz.timezone('Europe/London')),"(GMT/BST). Lag: {:.0f} mins".format( lag_pred[i] ))
print("Predictions for ", day_of_week(Saltney_time_pred[i]), Saltney_time_pred[i].astype('datetime64[s]').astype(datetime.datetime).strftime('%Y/%m/%d') )
print("Saltney FB:", np.datetime_as_string(Saltney_time_pred[i], unit='m', timezone=pytz.timezone('Europe/London')) )
try:
Glad_HLW = tg.get_tide_table_times( Saltney_time_pred[i], method='nearest_2' )
# Extract the High Tide value
print('Liv HT: ', np.datetime_as_string(Glad_HLW[ np.argmax(Glad_HLW.values) ].time.values, unit='m', timezone=pytz.timezone('Europe/London')), Glad_HLW[ np.argmax(Glad_HLW.values) ].values, 'm' )
# Extract the Low Tide value
print('Liv LT: ', np.datetime_as_string(Glad_HLW[ np.argmin(Glad_HLW.values) ].time.values, unit='m', timezone=pytz.timezone('Europe/London')), Glad_HLW[ np.argmin(Glad_HLW.values) ].values, 'm' )
except:
pass
print("")
#plt.scatter( Saltney_time_pred, HT ,'.');plt.show()
# problem with time stamp
def stats(self, source:str='harmonic', HLW:str="HW"):
"""
root mean square error
"""
rmse = np.sqrt(np.nanmean((self.bore['Saltney_lag_'+HLW+'_'+source].values - self.bore['linfit_lag_'+HLW+'_'+source].values)**2))
print(f"{source}: Root mean square error = {rmse}")
return rmse
############################################################################
#%% SECTION
############################################################################
def load_timeseries(self):
fn_tidegauge = '../COAsT/example_files/tide_gauges/lowestoft-p024-uk-bodc'
date0 = datetime.datetime(2007,1,10)
date1 = datetime.datetime(2007,1,12)
tidegauge = GAUGE(fn_tidegauge, date_start = date0, date_end = date1)
print(tidegauge.dataset)
############################################################################
#%% Development / Misc methods
############################################################################
def load_and_plot_hlw_data(self):
""" Simply load HLW file and plot """
filnam = 'data/Liverpool_2015_2020_HLW.txt'
date_start = datetime.datetime(2020, 1, 1)
date_end = datetime.datetime(2020, 12, 31)
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, date_start, date_end)
# Exaple plot
plt.figure()
tg.dataset.plot.scatter(x="time", y="sea_level")
plt.savefig('figs/Liverpool_HLW.png')
plt.close('all')
print(f"stats: mean {tg.time_mean('sea_level')}")
print(f"stats: std {tg.time_std('sea_level')}")
def shoothill(self):
"""
Extract the timeseries for a period.
Extract the extrema.
Plot timeseries. Overlay highs and lows
"""
date_start = np.datetime64('2020-09-01')
date_end = np.datetime64('2020-09-30')
# E.g Liverpool (Gladstone Dock station_id="13482", which is read by default.
# Load in data from the Shoothill API
sg = GAUGE()
sg.dataset = sg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
#sg = GAUGE(startday=date_start, endday=date_end) # create modified Tidegauge object
sg_HLW = sg.find_high_and_low_water(var_str='sea_level', method='cubic')
#g.dataset
#g_HLW.dataset
plt.figure()
sg.dataset.plot.scatter(x="time", y="sea_level")
sg_HLW.dataset.plot.scatter(x="time_highs", y="sea_level_highs")
sg_HLW.dataset.plot.scatter(x="time_lows", y="sea_level_lows")
plt.savefig('figs/Liverpool_shoothill.png')
plt.close('all')
"""
Compare harmonic predicted highs with measured highs
"""
# Compare tide predictions with measured HLW
filnam = 'data/Liverpool_2015_2020_HLW.txt'
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, date_start, date_end)
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
sg = GAUGE()
sg.dataset = sg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
sg_HW = sg.find_nearby_high_and_low_water(var_str='sea_level', target_times=tg_HLW.dataset.time_highs, method='cubic', extrema="max")
# Example plot
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
import matplotlib.dates as mdates
nval = min( len(sg_HLW.dataset.time_highs), len(tg_HLW.dataset.time_highs) )
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,0] = mdates.date2num( tg_HLW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,1,0] = mdates.date2num( sg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,0,1] = tg_HLW.dataset.sea_level_highs[:nval]
segs_h[:,1,1] = sg_HW.dataset.sea_level_highs[:nval]
fig, ax = plt.subplots()
ax.set_ylim(segs_h[:,:,1].min(), segs_h[:,:,1].max())
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=2) # harmonic predictions
ax.set_title('Harmonic prediction with quiver to measured high waters')
ax.xaxis_date()
ax.autoscale_view()
plt.savefig('figs/Liverpool_shoothill_vs_table.png')
plt.close('all')
"""
Compare QC's BODC measured highs with API highs (check reference levels)
"""
bg=GAUGE()
bg.dataset = bg.read_bodc_to_xarray("data/BODC_processed/2020LIV.txt")
# Use QC to drop null values
bg.dataset['sea_level'] = bg.dataset.sea_level.where( bg.dataset.qc_flags!='N', drop=True)
# Trim dataset
bg.dataset = bg.dataset.sel(time=slice(date_start, date_end))
# Fix some attributes (others might not be correct for all data)
bg.dataset['start_date'] = bg.dataset.time.min().values
bg.dataset['end_date'] = bg.dataset.time.max().values
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
bg_HW = bg.find_nearby_high_and_low_water(var_str='sea_level', target_times=tg_HLW.dataset.time_highs, method='cubic', extrema="max")
#bg_HLW = bg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
nval = min( len(sg_HW.dataset.time_highs), len(bg_HW.dataset.time_highs) )
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,0] = mdates.date2num( bg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,1,0] = mdates.date2num( sg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,0,1] = bg_HW.dataset.sea_level_highs[:nval]
segs_h[:,1,1] = sg_HW.dataset.sea_level_highs[:nval]
fig, ax = plt.subplots()
ax.set_ylim(segs_h[:,:,1].min(), segs_h[:,:,1].max())
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=2) # harmonic predictions
ax.set_title('BODC QCd quiver to API measured high waters')
ax.xaxis_date()
ax.autoscale_view()
plt.savefig('figs/Liverpool_shoothill_vs_bodc.png')
plt.close('all')
def fits_to_data(self, source:str="bodc", qc_flag:bool=False):
"""
Explore different combinations of HW and LW times and heights to
find the best fit to the data
qc_flag: if True, only fit bore['Quality'] == "A" data, else fit all data
"""
args_list = []
self.bore.attrs['weights_HW_'+source] = []
self.bore.attrs['rmse_HW_'+source] = []
args_list.append( {"HLW":"HW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(HW), time(HW)',
'wvar':'weights_HW'+'_'+source,
'rvar':'rmse_HW'+'_'+source}
)
self.bore.attrs['weights_dHW_'+source] = []
self.bore.attrs['rmse_dHW_'+source] = []
args_list.append( {"HLW":"dHW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source]-self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(HW-LW), time(HW)',
'wvar':'weights_dHW_'+source,
'rvar':'rmse_dHW'+'_'+source}
)
self.bore.attrs['weights_dLW_'+source] = []
self.bore.attrs['rmse_dLW_'+source] = []
args_list.append( {"HLW":"dLW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source]-self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_LW_'+source],
'label':'height(HW-LW), time(LW)',
'wvar':'weights_dLW'+'_'+source,
'rvar':'rmse_dLW'+'_'+source}
)
self.bore.attrs['weights_LW_'+source] = []
self.bore.attrs['rmse_LW_'+source] = []
args_list.append( {"HLW":"LW",
"source":source,
'xvar':self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_LW_'+source],
'label':'height(LW), time(LW)',
'wvar':'weights_LW'+'_'+source,
'rvar':'rmse_LW'+'_'+source}
)
#self.bore.attrs['weights_XX_'+source] = []
#self.bore.attrs['rmse_XX_'+source] = []
args_list.append( {"HLW":"XX",
"source":source,
'xvar':self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(LW), time(HW)',
'wvar':'weights_XX'+'_'+source,
'rvar':'rmse_XX'+'_'+source}
)
for args in args_list:
self.bore.attrs[args['wvar']] = []
self.bore.attrs[args['rvar']] = []
if qc_flag:
weights,rmse = self.linearfit( args['xvar'].where( self.bore['Quality'].values=="A"),
args['yvar'].where( self.bore['Quality'].values=="A" ) )
print(f"{source} class A| {args['label']}: {rmse}")
self.bore.attrs[args['wvar']] = weights
self.bore.attrs[args['rvar']] = rmse
else:
weights,rmse = self.linearfit( args['xvar'], args['yvar'] )
print(f"{source}| {args['label']}: {rmse}")
self.bore.attrs[args['wvar']] = weights
self.bore.attrs[args['rvar']] = rmse
###
def combinations_lag_hlw_river(self):
"""
Plot different combinations of Lag,HLW w/ rivers
"""
self.plot_scatter_river(source='harmonic', HLW="HW")
self.plot_scatter_river(source='bodc', HLW="HW")
self.plot_scatter_river(source='bodc', HLW="LW")
self.plot_scatter_river(source='bodc', HLW="dLW")
self.plot_scatter_river(source='bodc', HLW="dHW")
self.plot_scatter_river(source='bodc', HLW="XX")
self.plot_scatter_river(source='bodc', HLW="FW")
self.plot_scatter_river(source='api', HLW="HW")
self.plot_scatter_river(source='api', HLW="FW")
self.plot_scatter_date(source='api', HLW="HW")
self.plot_scatter_date(source='bodc', HLW="HW")
self.plot_scatter_date(source='bodc', HLW="FW")
self.plot_scatter_date(source='harmonic', HLW="HW")
self.plot_scatter_wind(source='api', HLW="HW")
self.plot_scatter_wind(source='bodc', HLW="HW")
self.plot_scatter_wind(source='bodc', HLW="FW")
self.plot_scatter_wind(source='harmonic', HLW="HW")
def river_lag_timing(self, HLW="HW", source="api"):
"""
Explore how rivers affect bore timing
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
Y = self.bore['ctr_height_LW']
lag_pred = self.bore.attrs['weights_'+HLW+'_'+source](self.bore['liv_height_HW_'+source])
X = lag_pred - self.bore['Saltney_lag_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 5 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%d%b%y') if self.bore['Quality'][i] == "A" else "" for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore['liv_height_HW_'+source], # - self.bore['liv_height_HW_harmonic'],
s=S,
#cmap='magma',
cmap='jet',
#vmin=8.5,
#vmax=10.5,
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Timing error (mins) on prediction relative to '+HLW)
plt.ylabel('River height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_river_LivHeight'+HLW+'_'+source+'.png')
def plot_scatter_date(self, source:str='bodc', HLW:str="HW"):
"""
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 10 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%b%y') for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore.time, #self.bore['ctr_height_LW'],
s=S,
#cmap='magma',
cmap='jet',
#vmin=4.4,
#vmax=5.5, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('Date')
plt.title('Bore arrival time at <NAME>')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_date_'+HLW+'_'+source+'.png')
def plot_scatter_wind(self, source:str='bodc', HLW:str="HW"):
"""
dir: str [along/across]. PLot either the along or across estuary wind speed
"""
for dirn in ["along", "across"]:
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 10 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%b%y') for i in range(len(self.bore['Quality']))]
if dirn == "along":
spd = self.bore.wind_speed * np.cos((315 - self.bore.wind_deg)*np.pi/180.)
elif dirn == "across":
spd = self.bore.wind_speed * np.sin((315 - self.bore.wind_deg)*np.pi/180.)
else:
print(f"{dirn}: did not expect that direction option")
ss= plt.scatter( X, Y, \
c=spd, #self.bore['ctr_height_LW'],
s=S,
cmap='Spectral',
vmin=-7,
vmax=7, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label(dirn+' estuary wind (m/s), from Hawarden/Connahs Quay')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_'+dirn+'_wind_'+HLW+'_'+source+'.png')
################################################################################
################################################################################
#%% Main Routine
################################################################################
################################################################################
if __name__ == "__main__":
#### Initialise logging
now_str = datetime.datetime.now().strftime("%d%b%y %H:%M")
logging.info(f"-----{now_str}-----")
#### Constants
DATABUCKET_FILE = "deebore.pkl"
INSTRUCTIONS = """
Choose Action:
all load and process all data
0 load bore observations
h load and process harmonic data
hrec load and process harmonic reconstructed data
b load and process measured (bodc) data
a load and process measured (API) data
r load and process measured (API) river data
m load and process met data
2 show bore dataset
3 plot bore data (lag vs tidal height)
4 plot difference between predicted and measured (lag vs tidal height)
6 Predict bore event for date
x Export data to csv. NOT IMPLEMENTED
rm Remove pickle file
i to show these instructions
q to quit (and pickle bore)
---
DEV:
d1 load and plot HLW data
d2 shoothill dev
d3 Explore different RMSE fits to the data
d4 Plot different combinations of Lag,HLW w/ rivers
d5 Explore how rivers affect bore timing
"""
## Do the main program
c = Controller()
| StarcoderdataPython |
1655527 | <reponame>mrinaald/flower
from typing import Tuple, Union, List
import numpy as np
from sklearn.linear_model import LogisticRegression
import openml
XY = Tuple[np.ndarray, np.ndarray]
Dataset = Tuple[XY, XY]
LogRegParams = Union[XY, Tuple[np.ndarray]]
XYList = List[XY]
def get_model_parameters(model: LogisticRegression) -> LogRegParams:
"""Returns the paramters of a sklearn LogisticRegression model"""
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params
def set_model_params(
model: LogisticRegression, params: LogRegParams
) -> LogisticRegression:
"""Sets the parameters of a sklean LogisticRegression model"""
model.coef_ = params[0]
if model.fit_intercept:
model.intercept_ = params[1]
return model
def set_initial_params(model: LogisticRegression):
"""
Sets initial parameters as zeros
Required since model params are uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch.
Refer to sklearn.linear_model.LogisticRegression documentation
for more information.
"""
n_classes = 10 # MNIST has 10 classes
n_features = 784 # Number of features in dataset
model.classes_ = np.array([i for i in range(10)])
model.coef_ = np.zeros((n_classes, n_features))
if model.fit_intercept:
model.intercept_ = np.zeros((n_classes,))
def load_mnist() -> Dataset:
"""
Loads the MNIST dataset using OpenML
Dataset link: https://www.openml.org/d/554
"""
mnist_openml = openml.datasets.get_dataset(554)
Xy, _, _, _ = mnist_openml.get_data(dataset_format="array")
X = Xy[:, :-1] # the last column contains labels
y = Xy[:, -1]
# First 60000 samples consist of the train set
x_train, y_train = X[:60000], y[:60000]
x_test, y_test = X[60000:], y[60000:]
return (x_train, y_train), (x_test, y_test)
def shuffle(X: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle X and y."""
rng = np.random.default_rng()
idx = rng.permutation(len(X))
return X[idx], y[idx]
def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList:
"""Split X and y into a number of partitions."""
return list(
zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions))
)
| StarcoderdataPython |
103450 | <gh_stars>100-1000
try:
from overlays import builder
builder.compile()
builder.copy()
except:
pass
import distribute_setup
import io
import sys
import platform
distribute_setup.use_setuptools()
from setuptools import setup, Extension, find_packages
open_as_utf8 = lambda x: io.open(x, encoding='utf-8')
kernel = platform.release()
if kernel >= '4.1.0':
kernel41 = [('BBBVERSION41', None)]
else:
kernel41 = None
CFLAGS = ['-Wall']
classifiers = ['Development Status :: 3 - Alpha',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Home Automation',
'Topic :: System :: Hardware']
extension_args = {
'include_dirs': ['source/include/'],
'extra_compile_args': CFLAGS,
'define_macros': kernel41
}
setup(name = 'Adafruit_BBIO',
version = '1.2.0',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'A module to control BeagleBone IO channels',
long_description = open_as_utf8('README.md').read() + open_as_utf8('CHANGELOG.md').read(),
long_description_content_type = 'text/markdown',
license = 'MIT',
keywords = 'Adafruit BeagleBone IO GPIO PWM ADC',
url = 'https://github.com/adafruit/adafruit-beaglebone-io-python/',
classifiers = classifiers,
packages = find_packages(),
py_modules = ['Adafruit_I2C'],
ext_modules = [Extension('Adafruit_BBIO.GPIO', ['source/py_gpio.c', 'source/event_gpio.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.PWM', ['source/py_pwm.c', 'source/c_pwm.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.ADC', ['source/py_adc.c', 'source/c_adc.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.SPI', ['source/spimodule.c', 'source/c_pinmux.c', 'source/constants.c', 'source/common.c'], **extension_args),
Extension('Adafruit_BBIO.UART', ['source/py_uart.c', 'source/c_pinmux.c', 'source/c_uart.c', 'source/constants.c', 'source/common.c'], **extension_args)] )
| StarcoderdataPython |
3379217 | # -*- coding: utf-8 -*-
"""Test data integrity."""
import re
import unittest
import bioregistry
import pandas as pd
from compath_resources.resources import (
get_decopath_df, get_kegg_reactome_df, get_kegg_wikipathways_df, get_pathbank_kegg_df, get_pathbank_reactome_df,
get_pathbank_wikipathways_df, get_reactome_hierarchy_df, get_special_mappings_df, get_wikipathways_reactome_df,
)
from compath_resources.sync import (
_import_decopath_df, _import_kegg_reactome_df, _import_kegg_wikipathways_df, _import_pathbank_kegg_df,
_import_pathbank_reactome_df, _import_pathbank_wikipathways_df, _import_reactome_hierarchy_df,
_import_special_mappings_df, _import_wikipathways_reactome_df,
)
DATA = [
(_import_decopath_df, get_decopath_df),
(_import_kegg_reactome_df, get_kegg_reactome_df),
(_import_pathbank_reactome_df, get_pathbank_reactome_df),
(_import_pathbank_kegg_df, get_pathbank_kegg_df),
(_import_wikipathways_reactome_df, get_wikipathways_reactome_df),
(_import_reactome_hierarchy_df, get_reactome_hierarchy_df),
(_import_special_mappings_df, get_special_mappings_df),
(_import_pathbank_wikipathways_df, get_pathbank_wikipathways_df),
(_import_kegg_wikipathways_df, get_kegg_wikipathways_df),
]
class TestIntegrity(unittest.TestCase):
"""Test case for checking data integrity."""
def test_curies(self):
"""Test correct prefixes and identifiers."""
registry = dict(bioregistry.read_registry())
registry['decopath'] = {} # TODO decopath needs a real resource and an entry in the bioregistry
miriam_patterns = {
k: re.compile(entry['miriam']['pattern'])
for k, entry in registry.items()
if 'miriam' in entry
}
dataframes = {
getter.__name__.removeprefix('get_').removesuffix('_df'): getter()
for _, getter in DATA
}
rows = ['Source Resource', 'Source ID', 'Target Resource', 'Target ID']
for name, df in dataframes.items():
with self.subTest(name=name):
for i, (source_prefix, source_id, target_prefix, target_id) in enumerate(df[rows].values):
self.assertIn(source_prefix, registry.keys())
self.assertNotEqual(source_prefix, 'kegg')
self.assertFalse(
bioregistry.is_deprecated(source_prefix),
msg=f'[{name}, row {i}] deprecated source prefix: {source_prefix}',
)
if source_regex := miriam_patterns.get(source_prefix):
self.assertRegex(
source_id, source_regex,
msg=f'[{name}, row {i}] source prefix: {source_prefix}',
)
self.assertIn(target_prefix, registry.keys())
self.assertNotEqual(target_prefix, 'kegg')
self.assertFalse(
bioregistry.is_deprecated(target_prefix),
msg=f'[{name}, row {i}] deprecated target prefix: {target_prefix}',
)
if target_regex := miriam_patterns.get(target_prefix):
self.assertRegex(
target_id, target_regex,
msg=f'[{name}, row {i}] target prefix: {target_prefix}',
)
def test_import(self):
"""Test the exported data is the same as the excel."""
for importer, getter in DATA:
with self.subTest(name=importer.__name__):
xlsx_df = importer()
self.assertIsInstance(xlsx_df, pd.DataFrame)
tsv_df = getter()
self.assertIsInstance(tsv_df, pd.DataFrame)
self.assertTrue(
(xlsx_df.values == tsv_df.values).all(),
msg='\nFiles are out of sync.\nrun `python -m compath_resources.sync`',
)
| StarcoderdataPython |
1619136 | <filename>kydavra/LassoSelector.py
'''
Created with love by Sigmoid
@Author - <NAME> - <EMAIL>
'''
# Importing all needed libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, LassoCV
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
class LassoSelector:
def __init__(self, alpha_start : float = 0, alpha_finish : float = 2, n_alphas : int = 300, extend_step : float = 20, power : int = 2) -> None:
'''
Setting the algorithm
:param alpha_start: float
The starting point in the greedy search of coefficients
:param alpha_finish: float
The finish point in the greedy search of coefficients
:param n_alphas: integer
The number of points in greedy search
:param extend_step: integer
The quantity with which the :param alpha_start and :param alpha_finish will be updated
:param power: integer
Used to set a threshold in finding the best coefficients
'''
self.n_alphas = n_alphas
self.alpha_start = alpha_start
self.alpha_finish = alpha_finish
self.extend_step = extend_step
self.power = power
def select(self, dataframe : 'pd.DataFrame', y_column : str, cv : int = 5) -> list:
'''
Selecting the most important columns
:param dataframe: pandas DataFrame
Data Frame on which the algorithm is applied
:param y_column: string
The column name of the value that we what to predict
:param cv: integer
Determines the cross-validation splitting strategy
:return: list
The list of columns selected by algorithm
'''
self.dataframe = dataframe
self.y_column = y_column
self.X_columns = [col for col in self.dataframe.columns if col != self.y_column]
# Converting the data frame into numpy arrays
X = self.dataframe[self.X_columns].values
y = self.dataframe[self.y_column].values
# Searching for the optimal value of alpha
while True:
# Generating the log search space for alpha
self.alphas = np.logspace(self.alpha_start, self.alpha_finish, self.n_alphas, endpoint=True)
# Creating and fitting up the lasso CV
self.lasso_cv = LassoCV(cv=cv, alphas=self.alphas, random_state=0, tol=0.01)
self.lasso_cv.fit(X, y)
# Deciding if we need to expand the search space of not
if self.lasso_cv.alpha_ == self.alphas[0]:
self.alpha_start -= self.extend_step
elif self.lasso_cv.alpha_ == self.alphas[-1]:
self.alpha_finish += self.extend_step
else:
# Selecting the columns with the coefficients bigger then 10e-power
self.choosed_cols = [self.X_columns[i] for i in range(len(self.lasso_cv.coef_)) if abs(self.lasso_cv.coef_[i])>10 **(-self.power)]
return self.choosed_cols
def plot_process(self, eps : float = 5e-3, title : str = "Lasso coef Plot", save : bool =False, file_path : str = None) -> None:
'''
Ploting the process of finding the best features
:param eps: float
Length of the path
:param title string
The title of the plot
:param save boolean, default = False
If the this parameter is set to False that the model will not save the model
If it is set to True the plot will be saved using :param file_path
:param file_path: string, default = None
The file path where the plot will be saved
If the :param save is set to False the it is not used
:return:
Plots the process of the algorithm
'''
# Converting the data frame into numpy arrays
X = self.dataframe[self.X_columns].values
y = self.dataframe[self.y_column].values
# Generating the line space for the X axis
alphas = np.linspace(self.lasso_cv.alpha_-0.1, self.lasso_cv.alpha_+0.1, self.n_alphas, endpoint=True)
# Generating the alphas and coefficients
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False, alphas=alphas)
neg_log_alphas_lasso = alphas_lasso
max_coef = coefs_lasso[0][0]
min_coef = coefs_lasso[0][0]
# Plotting the lasso coefficients
for i in range(len(coefs_lasso)):
line_style = lambda col : '-' if col in self.choosed_cols else '--'
plt.plot(neg_log_alphas_lasso, coefs_lasso[i], line_style(self.X_columns[i]), label=self.X_columns[i])
if max(coefs_lasso[i]) > max_coef:
max_coef = max(coefs_lasso[i])
if min(coefs_lasso[i]) < min_coef:
min_coef = min(coefs_lasso[i])
plt.vlines(self.lasso_cv.alpha_, min_coef, max_coef, linestyles='dashed')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title(title)
plt.axis('tight')
plt.legend()
if save:
plt.savefig(file_path)
plt.show() | StarcoderdataPython |
1662038 | """Plot utilities for the DESI ETC.
Requires that matplotlib is installed.
"""
import datetime
import copy # for shallow copies of matplotlib colormaps
try:
import DOSlib.logger as logging
except ImportError:
# Fallback when we are not running as a DOS application.
import logging
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines
import matplotlib.patheffects
import matplotlib.dates
import desietc.util
def plot_colorhist(D, ax, imshow, mode='reverse', color='w', alpha=0.75):
"""Draw a hybrid colorbar and histogram.
"""
ax.axis('off')
# Extract parameters of the original imshow.
cmap = imshow.get_cmap()
vmin, vmax = imshow.get_clim()
# Get the pixel dimension of the axis to fill.
fig = plt.gcf()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = int(round(bbox.width * fig.dpi)), int(round(bbox.height * fig.dpi))
# Draw the colormap gradient.
img = np.zeros((height, width, 3))
xgrad = np.linspace(0, 1, width)
img[:] = cmap(xgrad)[:, :-1]
# Superimpose a histogram of pixel values.
counts, _ = np.histogram(D.reshape(-1), bins=np.linspace(vmin, vmax, width + 1))
hist_height = ((height - 1) * counts / counts[1:-1].max()).astype(int)
mask = np.arange(height).reshape(-1, 1) < hist_height
if mode == 'color':
img[mask] = (1 - alpha) * img[mask] + alpha * np.asarray(matplotlib.colors.to_rgb(color))
elif mode == 'reverse':
cmap_r = cmap.reversed()
for i, x in enumerate(xgrad):
img[mask[:, i], i] = cmap_r(x)[:-1]
elif mode == 'complement':
# https://stackoverflow.com/questions/40233986/
# python-is-there-a-function-or-formula-to-find-the-complementary-colour-of-a-rgb
hilo = np.amin(img, axis=2, keepdims=True) + np.amax(img, axis=2, keepdims=True)
img[mask] = hilo[mask] - img[mask]
else:
raise ValueError('Invalid mode "{0}".'.format(mode))
ax.imshow(img, interpolation='none', origin='lower')
def plot_pixels(D, label=None, colorhist=False, zoom=1, masked_color='cyan',
imshow_args={}, text_args={}, colorhist_args={}):
"""Plot pixel data at 1:1 scale with an optional label and colorhist.
"""
dpi = 100 # value only affects metadata in an output file, not appearance on screen.
ny, nx = D.shape
width, height = zoom * nx, zoom * ny
if colorhist:
colorhist_height = 32
height += colorhist_height
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False)
ax = plt.axes((0, 0, 1, zoom * ny / height))
args = dict(imshow_args)
for name, default in dict(interpolation='none', origin='lower', cmap='plasma_r').items():
if name not in args:
args[name] = default
# Set the masked color in the specified colormap.
cmap = copy.copy(matplotlib.cm.get_cmap(args['cmap']))
cmap.set_bad(color=masked_color)
args['cmap'] = cmap
# Draw the image.
I = ax.imshow(D, **args)
ax.axis('off')
if label:
args = dict(text_args)
for name, default in dict(color='w', fontsize=18).items():
if name not in args:
args[name] = default
outline = [
matplotlib.patheffects.Stroke(linewidth=1, foreground='k'),
matplotlib.patheffects.Normal()]
text = ax.text(0.01, 0.01 * nx / ny, label, transform=ax.transAxes, **args)
text.set_path_effects(outline)
if colorhist:
axcb = plt.axes((0, zoom * ny / height, 1, colorhist_height / height))
plot_colorhist(D, axcb, I, **colorhist_args)
return fig, ax
def plot_data(D, W, downsampling=4, zoom=1, label=None, colorhist=False, stamps=[],
preprocess_args={}, imshow_args={}, text_args={}, colorhist_args={}):
"""Plot weighted image data using downsampling, optional preprocessing, and decorators.
"""
# Downsample the input data.
D, W = desietc.util.downsample_weighted(D, W, downsampling)
# Preprocess the data for display.
D = desietc.util.preprocess(D, W, **preprocess_args)
ny, nx = D.shape
# Display the image.
args = dict(imshow_args)
if 'extent' not in args:
# Use the input pixel space for the extent, without downsampling.
args['extent'] = [-0.5, nx * downsampling - 0.5, -0.5, ny * downsampling - 0.5]
fig, ax = plot_pixels(D, zoom=zoom, label=label, colorhist=colorhist,
imshow_args=args, text_args=text_args, colorhist_args=colorhist_args)
outline = [
matplotlib.patheffects.Stroke(linewidth=1, foreground='k'),
matplotlib.patheffects.Normal()]
for k, stamp in enumerate(stamps):
yslice, xslice = stamp[:2]
xlo, xhi = xslice.start, xslice.stop
ylo, yhi = yslice.start, yslice.stop
rect = plt.Rectangle((xlo, ylo), xhi - xlo, yhi - ylo, fc='none', ec='w', lw=1)
ax.add_artist(rect)
if xhi < nx // 2:
xtext, halign = xhi, 'left'
else:
xtext, halign = xlo, 'right'
text = ax.text(
xtext, 0.5 * (ylo + yhi), str(k), fontsize=12, color='w', va='center', ha=halign)
text.set_path_effects(outline)
return fig, ax
def save_acquisition_summary(
mjd, exptag, psf_model, psf_stack, fwhm, ffrac, nstars, badfit, noisy, path,
show_north=True, show_fiber=True, zoom=5, dpi=128, cmap='magma', masked_color='gray'):
"""
"""
# Get the size of the PSF model and stack images.
shapes = [D.shape for D in psf_stack.values() if D is not None]
if len(shapes) == 0:
return
size = shapes[0][1]
# Get the number of expected in-focus GFAs.
names = desietc.gfa.GFACamera.guide_names
ngfa = len(names)
# Initialize the figure.
width = size * zoom * ngfa
height = size * zoom * 2
fig, axes = plt.subplots(2, ngfa, figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
# Prepare a colormap with our custom ivar=0 color.
cmap = copy.copy(matplotlib.cm.get_cmap(cmap))
cmap.set_bad(color=masked_color)
# Get the colormap scale to use for all images.
model_sum = {name: psf_model[name].sum() for name in psf_model}
model_max = np.median([psf_model[camera].max() / model_sum[camera] for camera in psf_model])
vmin, vmax = -0.1 * model_max, 1.0 * model_max
# Calculate the image extent.
# Outline text to ensure that it is visible whatever pixels are below.
outline = [
matplotlib.patheffects.Stroke(linewidth=1, foreground='k'),
matplotlib.patheffects.Normal()]
# Calculate the fiber diameter to overlay in GFA pixels.
fiber_diam_um = 107
pixel_size_um = 15
radius = 0.5 * fiber_diam_um / pixel_size_um
center = ((size - 1) / 2, (size - 1) / 2)
# Loop over cameras.
default_norm = np.median([s for s in model_sum.values()])
for i, name in enumerate(names):
for ax in axes[:,i]:
ax.axis('off')
ax.add_artist(plt.Rectangle([0,0], 1, 1, fc='k', ec='none', transform=ax.transAxes, zorder=-1))
if name in psf_stack and psf_stack[name] is not None:
data = psf_stack[name][0].copy()
norm = model_sum.get(name, default_norm)
data /= norm
# do not show ivar=0 pixels
data[psf_stack[name][1] == 0] = np.nan
axes[0, i].imshow(data, vmin=vmin, vmax=vmax, cmap=cmap, interpolation='none', origin='lower')
if show_north:
# Draw an arrow point north in this GFA's pixel basis.
n = int(name[5])
angle = np.deg2rad(36 * (n - 2))
xc, yc = 0.5 * size, 0.16 * size
du = 0.02 * size * np.cos(angle)
dv = 0.02 * size * np.sin(angle)
xpt = np.array([-4 * du, dv, du, -dv, -4 * du])
ypt = np.array([4 * dv, du, -dv, -du, 4 * dv])
axes[0, i].add_line(matplotlib.lines.Line2D(xpt + xc, ypt + yc, c='c', lw=1, ls='-'))
if name in psf_model and psf_model[name] is not None:
data = psf_model[name]
data /= model_sum[name]
axes[1, i].imshow(psf_model[name], vmin=vmin, vmax=vmax, cmap=cmap,
interpolation='bicubic', origin='lower')
if show_fiber:
# Draw an outline of the fiber.
fiber = matplotlib.patches.Circle(center, radius, color='c', ls='-', lw=1, alpha=0.7, fill=False)
axes[1,i].add_artist(fiber)
# Generate a text overlay.
ax = plt.axes((0, 0, 1, 1))
ax.axis('off')
if mjd is not None:
night = desietc.util.mjd_to_night(mjd)
localtime = desietc.util.mjd_to_date(mjd, utc_offset=-7)
center = localtime.strftime('%H:%M:%S') + ' (UTC-7)'
else:
night = 'YYYYMMDD'
center = 'HH:MM:SS (UTC-7)'
left = f'{night}/{exptag}'
right = f'FWHM={fwhm:.2f}" ({100*ffrac:.1f}%)'
for (x, ha, label) in zip((0, 0.5, 1), ('left', 'center', 'right'), (left, center, right)):
text = ax.text(x, 0, label, color='w', ha=ha, va='bottom', size=10, transform=ax.transAxes)
text.set_path_effects(outline)
# Add per-GFA labels.
xtext = (np.arange(ngfa) + 0.5) / ngfa
for x, name in zip(xtext, names):
text = ax.text(x, 0.5, name, color='w', ha='center', va='center', size=8, transform=ax.transAxes)
text.set_path_effects(outline)
nstar = nstars[name]
label = f'{nstar} star'
if nstar > 1: label += 's'
text = ax.text(x, 0.45, label, color='w', ha='center', va='center', size=7, transform=ax.transAxes)
text.set_path_effects(outline)
warn_args = dict(size=10, color='c', fontweight='bold')
if nstar == 0:
text = ax.text(x, 0.92, 'NO STARS?', ha='center', va='top', transform=ax.transAxes, **warn_args)
text.set_path_effects(outline)
elif name in badfit:
text = ax.text(x, 0.92, 'BAD PSF?', ha='center', va='top', transform=ax.transAxes, **warn_args)
text.set_path_effects(outline)
if name in noisy:
text = ax.text(x, 1, 'NOISY?', ha='center', va='top', transform=ax.transAxes, **warn_args)
text.set_path_effects(outline)
# Save the image.
plt.savefig(path)
plt.close(fig)
def plot_measurements(buffer, mjd1, mjd2, ymin=0, resolution=1, label=None, ax=None):
"""Plot measurements spanning (mjd1, mjd2) in the specified buffer.
"""
ax = ax or plt.gca()
# Convert from MJD to minutes after mjd1.
minutes = lambda mjd: (mjd - mjd1) * 720
# Plot measurements covering (mjd1, mjd2) with some extra padding.
xlo, xhi = mjd1 - 3 * buffer.padding, mjd2 + 3 * buffer.padding
padded = buffer.inside(xlo, xhi)
used = buffer.inside(mjd1 - buffer.padding, mjd2 + buffer.padding)
extra = padded & ~used
for sel, color in ((extra, 'lightgray'), (used, 'b')):
x = 0.5 * (buffer.entries['mjd1'][sel] + buffer.entries['mjd2'][sel])
dx = 0.5 * (buffer.entries['mjd2'][sel] - buffer.entries['mjd1'][sel])
y = buffer.entries['value'][sel]
dy = buffer.entries['error'][sel]
ax.errorbar(minutes(x), y, xerr=dx * 720, yerr=dy, fmt='.', color=color, ms=2, lw=1)
# Draw the linear interpolation through the selected points.
ngrid = int(np.ceil((mjd2 - mjd1) / (resolution / buffer.SECS_PER_DAY)))
x_grid = np.linspace(mjd1, mjd2, ngrid)
y_grid = buffer.sample_grid(x_grid)
ax.fill_between(minutes(x_grid), ymin, y_grid, color='b', lw=0, alpha=0.2)
# Highlight samples used for the trend.
sel = buffer.inside(mjd2 - buffer.recent, mjd2)
x = 0.5 * (buffer.entries['mjd1'][sel] + buffer.entries['mjd2'][sel])
y = buffer.entries['value'][sel]
ax.plot(minutes(x), y, 'r.', ms=4, zorder=10)
# Extrapolate the trend.
offset, slope = buffer.trend(mjd2)
x = np.array([mjd2, xhi])
y = offset + slope * (x - mjd2)
ax.fill_between(minutes(x), ymin, y, color='r', lw=0, alpha=0.2)
# Draw vertical lines to show the (mjd1, mjd2) interval.
for xv in (mjd1, mjd2):
ax.axvline(minutes(xv), c='b', ls='--')
ax.set_xlim(minutes(xlo), minutes(xhi))
ax.set_ylim(ymin, None)
ax.set_xlabel(f'Minutes relative to MJD {mjd1:.6f}')
if label is not None:
ax.set_ylabel(label)
def mjd_plot(mjd, *args, axis=None, utc_offset=-7, date_format='%H:%M', **kwargs):
"""Replacement for plt.plot or axis.plot where the x-axis value is an MJD interpreted
as a local datetime.
"""
axis = axis or plt.gca()
plot_epoch = matplotlib.dates.date2num(datetime.datetime(1858, 11, 17) + datetime.timedelta(hours=utc_offset))
axis.plot_date(mjd + plot_epoch, *args, **kwargs)
axis.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(date_format))
return axis | StarcoderdataPython |
3233453 | # Generated by Django 2.0.13 on 2021-08-08 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0101_remove_taverntablenoticeboard_id"),
]
operations = [
migrations.AlterModelOptions(
name="posta",
options={},
),
migrations.AlterField(
model_name="posta",
name="datum",
field=models.DateTimeField(db_column="datum"),
),
migrations.AlterField(
model_name="posta",
name="odesilatel",
field=models.CharField(db_column="odesilatel", max_length=25),
),
migrations.AlterField(
model_name="posta",
name="prijemce",
field=models.CharField(db_column="prijemce", max_length=25),
),
migrations.AlterField(
model_name="posta",
name="viditelnost",
field=models.CharField(db_column="viditelnost", max_length=1),
),
migrations.RenameModel(
old_name="Posta",
new_name="Letters",
),
]
| StarcoderdataPython |
160695 | <filename>src/lib/imaplib.py<gh_stars>1-10
raise NotImplementedError("imaplib is not yet implemented in Skulpt")
| StarcoderdataPython |
1690805 | <reponame>andrinelo/lego<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-08 16:51
from __future__ import unicode_literals
import re
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='tag',
field=models.CharField(
max_length=64, primary_key=True, serialize=False, validators=[
django.core.validators.RegexValidator(
re.compile('^[-a-z0-9_]+\\Z', 32),
"Enter a valid 'tag' consisting of letters, numbers, underscores or hyphens.",
'invalid'
)
]
),
),
]
| StarcoderdataPython |
3232369 | from sqlalchemy import (
Column,
Integer,
SmallInteger,
String,
DateTime,
Float,
Boolean,
Text,
ARRAY,
)
from sqlalchemy.ext.declarative import declarative_base
from database_pg.utils import Mixin
DeclarativeBase = declarative_base(cls=Mixin)
def create_table(engine):
DeclarativeBase.metadata.create_all(engine, DeclarativeBase.metadata.tables.values(), checkfirst=True)
class ApartmentTable(DeclarativeBase):
__tablename__ = "apartment"
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
url = Column(String(240), nullable=False)
title = Column(String(480))
price_uah = Column(Integer, nullable=False)
price_usd = Column(Integer, nullable=False)
verified_price = Column(Boolean)
verified_apartment = Column(Boolean)
description = Column(Text)
region = Column(String(240))
city = Column(String(240))
district = Column(String(240))
street = Column(String(240))
building_number = Column(String(40))
latitude = Column(Float)
longitude = Column(Float)
total_square = Column(Float, nullable=False)
living_square = Column(Float, nullable=False)
kitchen_square = Column(Float, nullable=False)
room_count = Column(SmallInteger, nullable=False)
floor = Column(SmallInteger, nullable=False)
floor_count = Column(SmallInteger, nullable=False)
walls_material = Column(String(240))
heating = Column(String(240))
construction_year = Column(String(40))
apartment_type = Column(String(120))
selling_type = Column(String(120))
creation_date = Column(DateTime)
apartment_condition = Column(String(240))
centre_distance = Column(String(120))
centre_distance_type = Column(String(120))
images = Column("images", ARRAY(String(240), dimensions=1))
| StarcoderdataPython |
116206 | <filename>MFSDA/Resources/Libraries/stat_lpks_wob.py
"""
Local linear kernel smoothing for optimal bandwidth selection.
Author: <NAME> (<EMAIL>)
Last update: 2017-08-14
"""
from __future__ import division
import numpy as np
from numpy.linalg import inv
from stat_kernel import ep_kernel
"""
installed all the libraries above
"""
def lpks_wob(coord_mat, x_design, y_design):
"""
Local linear kernel smoothing for optimal bandwidth selection.
Args:
coord_mat (matrix): common coordinate matrix (l*d)
x_design (matrix): design matrix (n*p)
y_design (matrix): shape data (response matrix, n*l*m, m=d in MFSDA)
"""
# Set up
n, p = x_design.shape
l, d = coord_mat.shape
m = y_design.shape[2]
efity_design = y_design * 0
nh = 50 # the number of candidate bandwidth
k = 5 # number of folders
k_ind = np.floor(np.linspace(1, n, k + 1))
gcv = np.zeros((nh, m)) # GCV performance function
w = np.zeros((1, d + 1))
w[0] = 1
t_mat0 = np.zeros((l, l, d + 1)) # L x L x d + 1 matrix
t_mat0[:, :, 1] = np.ones((l, l))
for dii in range(d):
t_mat0[:, :, dii + 1] = np.dot(np.atleast_2d(coord_mat[:, dii]).T, np.ones((1, l))) \
- np.dot(np.ones((l, 1)), np.atleast_2d(coord_mat[:, dii]))
for nhii in range(nh):
k_mat = np.ones((l, l))
for dii in range(d):
coord_range = np.ptp(coord_mat[:, dii])
h_min = 0.01 # minimum bandwidth
h_max = 0.5 * coord_range # maximum bandwidth
vh = np.logspace(np.log10(h_min), np.log10(h_max), nh) # candidate bandwidth
h = vh[nhii]
k_mat = k_mat * ep_kernel(t_mat0[:, :, dii + 1] / h, h) # Epanechnikov kernel smoothing function
t_mat = np.transpose(t_mat0, [0, 2, 1]) # L x d+1 x L matrix
for mii in range(m):
for lii in range(l):
kx = np.dot(np.atleast_2d(k_mat[:, lii]).T, np.ones((1, d + 1)))*t_mat[:, :, lii] # L0 x d+1 matrix
indx = np.ones((1, n))
for kii in range(k):
ind_beg = int(k_ind[kii]-1)
ind_end = int(k_ind[kii+1])
indx[0, ind_beg:ind_end] = 0
x_design0 = x_design[np.nonzero(indx == 1)[0], :]
hat_mat = np.dot(inv(np.dot(x_design0.T, x_design0)+np.eye(p)*0.0001), x_design0.T)
hat_y_design0 = np.dot(hat_mat, y_design[np.nonzero(indx == 1)[0], :, mii])
sm_weight = np.dot(np.dot(w, inv(np.dot(kx.T, t_mat[:, :, lii])+np.eye(d+1)*0.0001)), kx.T)
efit_beta = np.dot(hat_y_design0, sm_weight.T)
efity_design[ind_beg:ind_end, lii, mii] = np.squeeze(np.dot(x_design[ind_beg:ind_end, :], efit_beta))
gcv[nhii, mii] = np.mean((y_design[:, :, mii]-efity_design[:, :, mii])**2)
flag = np.argmin(gcv, axis=0) # optimal bandwidth
return flag
| StarcoderdataPython |
1691986 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import
# def get_package_data():
# """Get Package Data for utilipy.math"""
# return {'utilipy.math': ['data/*']}
| StarcoderdataPython |
32055 | <reponame>kthy/wren
# -*- coding: utf-8 -*-
"""Gettext manipulation methods."""
from os import remove
from os.path import exists
from pathlib import Path
from shutil import copyfile, copystat
from typing import Sequence
from filehash import FileHash
from polib import MOFile, POFile, mofile
from wren.change import Change
def apply_changes(mo_file: MOFile, changelist: Sequence[Change]) -> None:
"""Apply all changes in the provided list of changes to the given MOFile."""
for change in changelist:
change.apply(mo_file)
def backup_original_mo(wowsdir: str, locale: str) -> None:
"""Copy the original `global.mo` to `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
_copyfile_and_checksum(global_mo_path, backup_mo_path)
def convert_mo_to_po(wowsdir: str, locale: str, outputdir: str) -> POFile:
"""Save the MO file for the given locale in PO format."""
mofile_path = Path(_global_mo_path(wowsdir, locale))
if not exists(mofile_path):
raise OSError(f"MO file for locale {locale} not found")
mof = mofile(mofile_path)
mof.save_as_pofile(f"{outputdir}/{mofile_path.stem}_{locale}.po")
def get_mo(wowsdir: str, locale: str) -> MOFile:
"""Open and return the global MO file in the given directory."""
return mofile(_global_mo_path(wowsdir, locale))
def restore_original_mo(wowsdir: str, locale: str) -> None:
"""Reinstate the original `global.mo` from `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
if exists(backup_mo_path):
_copyfile_and_checksum(backup_mo_path, global_mo_path)
remove(backup_mo_path)
def _copyfile_and_checksum(from_path, to_path) -> None:
"""Copy a file from from_path to to_path.
Raises OSError if the new file's checksum doesn't match the original."""
copyfile(from_path, to_path)
copystat(from_path, to_path)
hasher = FileHash("md5")
if hasher.hash_file(from_path) != hasher.hash_file(to_path):
raise OSError("Copy failed, hash mismatch detected")
def _backup_mo_path(wowsdir: str, locale: str) -> str:
return f"{_global_mo_path(wowsdir, locale)}.original"
def _global_mo_path(wowsdir: str, locale: str) -> str:
return f"{wowsdir}/res/texts/{locale}/LC_MESSAGES/global.mo"
| StarcoderdataPython |
3384141 | <reponame>PacktPublishing/GettingStartedwithPythonfortheInternetofThings-
import cv2
import numpy as np
# Load face cascade file
frontalface_cascade= cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
# Check if face cascade file has been loaded
if frontalface_cascade.empty():
raise IOError('Unable to load the face cascade classifier xml file')
# Initialize video capture object
capture = cv2.VideoCapture(0)
# Define the scaling factor
scale_factor = 0.5
# Loop until you hit the Esc key
while True:
# Capture current frame and resize it
ret, frame = capture.read()
frame = cv2.resize(frame, None, fx=scale_factor, fy=scale_factor,
interpolation=cv2.INTER_AREA)
# Convert to grayscale
gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Run the face detector on the grayscale image
face_rectangle = frontalface_cascade.detectMultiScale(gray_image, 1.3, 5)
# Draw rectangles on the image
for (x,y,w,h) in face_rectangle:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 3)
# Display the image
cv2.imshow('Face Detector', frame)
# Check if Esc key has been pressed
a = cv2.waitKey(1)
if a == 10:
break
# Release the video capture object and close all windows
capture.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
93241 | <reponame>andrrizzi/tfep-revisited-2021
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test objects and function in functions.transformer.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import numpy as np
import pytest
import torch
import torch.autograd
from ..functions.transformer import (
affine_transformer, affine_transformer_inv,
sos_polynomial_transformer, neural_spline_transformer,
mobius_transformer, unit_cube_to_inscribed_sphere
)
from ..utils import generate_block_sizes
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def create_random_input(batch_size, n_features, gate, n_parameters,
seed=0, x_func=torch.randn, par_func=torch.randn):
"""Create input, parameters and gates.
Parameters
----------
gate : bool
If False, the returned gate will be None.
"""
# Make sure the input arguments are deterministic.
generator = torch.Generator()
generator.manual_seed(seed)
x = x_func(batch_size, n_features, generator=generator,
dtype=torch.double, requires_grad=True)
parameters = par_func(batch_size, n_parameters, n_features, generator=generator,
dtype=torch.double, requires_grad=True)
if gate:
gate = torch.rand(batch_size, generator=generator,
dtype=torch.double, requires_grad=True)
# Set first and second batch to gate None (i.e., 1) and 0.0.
gate.data[0] = 1.0
gate.data[1] = 0.0
else:
gate = None
return x, parameters, gate
# =============================================================================
# REFERENCE IMPLEMENTATIONS
# =============================================================================
def reference_log_det_J(x, y):
"""Compute the log(abs(det(J))) with autograd and numpy."""
batch_size, n_features = x.shape
# Compute the jacobian with autograd.
jacobian = np.empty((batch_size, n_features, n_features))
for i in range(n_features):
loss = torch.sum(y[:, i])
loss.backward(retain_graph=True)
jacobian[:, i] = x.grad.detach().numpy()
# Reset gradient for next calculation.
x.grad.data.zero_()
# Compute the log det J numerically.
log_det_J = np.empty(batch_size)
for batch_idx in range(batch_size):
log_det_J[batch_idx] = np.log(np.abs(np.linalg.det(jacobian[batch_idx])))
return log_det_J
def reference_sos_polynomial_transformer(x, coefficients, gate):
"""Reference implementation of SOSPolynomialTransformer for testing."""
x = x.detach().numpy()
coefficients = coefficients.detach().numpy()
batch_size, n_coefficients, n_features = coefficients.shape
n_polynomials = (n_coefficients - 1) // 2
if gate is None:
gate = np.ones(batch_size, dtype=x.dtype)
else:
gate = gate.detach().numpy()
# This is the returned value.
y = np.empty(shape=x.shape)
det_J = np.ones(batch_size)
for batch_idx in range(batch_size):
for i in range(n_features):
x_i = x[batch_idx, i]
coefficients_i = coefficients[batch_idx, :, i]
# Compute all squared polynomials.
squared_polynomials = []
for k in range(n_polynomials):
a_k0 = coefficients_i[1 + k*2]
a_k1 = coefficients_i[2 + k*2]
poly = np.poly1d([a_k1, a_k0])
squared_polynomials.append(np.polymul(poly, poly))
# Sum the squared polynomials.
sum_of_squares_poly = squared_polynomials[0]
for poly in squared_polynomials[1:]:
sum_of_squares_poly = np.polyadd(sum_of_squares_poly, poly)
# The integrand is the derivative w.r.t. the input.
det_J[batch_idx] *= gate[batch_idx]*np.polyval(sum_of_squares_poly, x_i) + (1 - gate[batch_idx])
# Integrate and sum constant term.
a_0 = coefficients_i[0]
sum_of_squares_poly = np.polyint(sum_of_squares_poly, k=a_0)
y[batch_idx, i] = gate[batch_idx]*np.polyval(sum_of_squares_poly, x_i) + (1 - gate[batch_idx])*x_i
return y, np.log(np.abs(det_J))
def reference_neural_spline(x, x0, y0, widths, heights, slopes):
"""Reference implementation of neural_spline_transformer for testing."""
x = x.detach().numpy()
x0 = x0.detach().numpy()
y0 = y0.detach().numpy()
widths = widths.detach().numpy()
heights = heights.detach().numpy()
slopes = slopes.detach().numpy()
batch_size, n_bins, n_features = widths.shape
knots_x = np.empty((batch_size, n_bins+1, n_features), dtype=x.dtype)
knots_x[:, 0] = x0
knots_x[:, 1:] = x0 + np.cumsum(widths, axis=1)
knots_y = np.empty((batch_size, n_bins+1, n_features), dtype=x.dtype)
knots_y[:, 0] = y0
knots_y[:, 1:] = y0 + np.cumsum(heights, axis=1)
y = np.empty_like(x)
log_det_J = np.zeros(batch_size, dtype=x.dtype)
for batch_idx in range(batch_size):
for feat_idx in range(n_features):
bin_idx = np.digitize(x[batch_idx, feat_idx], knots_x[batch_idx, :, feat_idx], right=False) - 1
xk = knots_x[batch_idx, bin_idx, feat_idx]
xk1 = knots_x[batch_idx, bin_idx+1, feat_idx]
yk = knots_y[batch_idx, bin_idx, feat_idx]
yk1 = knots_y[batch_idx, bin_idx+1, feat_idx]
if bin_idx == 0:
deltak = 1
else:
deltak = slopes[batch_idx, bin_idx-1, feat_idx]
if bin_idx == n_bins-1:
deltak1 = 1
else:
deltak1 = slopes[batch_idx, bin_idx, feat_idx]
sk = (yk1 - yk) / (xk1 - xk)
epsilon = (x[batch_idx, feat_idx] - xk) / (xk1 - xk)
numerator = (yk1 - yk) * (sk * epsilon**2 + deltak * epsilon * (1 - epsilon))
denominator = sk + (deltak1 + deltak - 2*sk) * epsilon * (1 - epsilon)
y[batch_idx, feat_idx] = yk + numerator / denominator
numerator = sk**2 * (deltak1 * epsilon**2 + 2*sk*epsilon*(1 - epsilon) + deltak*(1 - epsilon)**2)
denominator = (sk + (deltak1 + deltak + - 2*sk) * epsilon * (1 - epsilon))**2
log_det_J[batch_idx] += np.log(numerator / denominator)
return y, log_det_J
def reference_mobius_transformer(x, w, blocks, gate):
"""Reference implementation of MobiusTransformer for testing."""
x = x.detach().numpy()
w = w.detach().numpy()
if gate is not None:
gate = gate.detach().numpy()
batch_size, n_features = x.shape
# Blocks can be an int, in which case x is to be divided in blocks of equal size.
if isinstance(blocks, int):
assert n_features % blocks == 0
blocks = [blocks] * int(n_features / blocks)
# If there is no gate, set it to 1.
if gate is None:
gate = np.ones((batch_size, 1), dtype=x.dtype)
else:
# Add fake dimension for broadcasting.
gate = gate[:, None]
# Initialize the output array.
y = np.empty_like(x)
log_det_J = np.zeros(batch_size, dtype=x.dtype)
# We return also the norm of the input and output.
x_norm = np.empty(shape=(batch_size, len(blocks)))
y_norm = np.empty(shape=(batch_size, len(blocks)))
# The start of the next block.
block_pointer = 0
for block_idx, block_size in enumerate(blocks):
# The input and parameters for the block.
x_block = x[:, block_pointer:block_pointer+block_size]
w_block = gate * w[:, block_pointer:block_pointer+block_size]
# Move the x vector on the unit sphere. Keep the number
# of dimensions so that broadcasting works.
x_norm_block = np.linalg.norm(x_block, axis=1, keepdims=True)
x_normalized_block = x_block / x_norm_block
# We'll need these terms for the Jacobian as well.
xw_block = x_normalized_block + w_block
w_norm = np.linalg.norm(w_block, axis=1, keepdims=True)
xw_norm = np.linalg.norm(xw_block, axis=1, keepdims=True)
diff_w_norm = 1 - w_norm**2
xw_norm_squared = xw_norm**2
# Compute the output for the block.
y_normalized_block = diff_w_norm / xw_norm_squared * xw_block + w_block
y_block = x_norm_block * y_normalized_block
y[:, block_pointer:block_pointer+block_size] = y_block
x_norm[:, block_idx] = x_norm_block[:, 0]
y_norm[:, block_idx] = np.linalg.norm(y_block, axis=1)
# Compute dxnormalized_i/dx_j.
dxnormalized_dx = np.empty((batch_size, block_size, block_size))
for batch_idx in range(batch_size):
for i in range(block_size):
for j in range(block_size):
dxnormalized_dx[batch_idx, i, j] = - x_block[batch_idx, i] * x_block[batch_idx, j] / x_norm_block[batch_idx, 0]**3
if i == j:
dxnormalized_dx[batch_idx, i, j] += 1 / x_norm_block[batch_idx, 0]
# Compute the block Jacobian dy_i/dx_j.
jacobian = np.empty((batch_size, block_size, block_size), dtype=x.dtype)
for batch_idx in range(batch_size):
for i in range(block_size):
for j in range(block_size):
# The first term is d||x||/dx_j * y_normalized, with (d||x||/dx_j)_i = x_j/||x||.
jacobian[batch_idx, i, j] = y_normalized_block[batch_idx, i] * x_normalized_block[batch_idx, j]
# This is the constant factor in front of the second term.
factor = x_norm_block[batch_idx, 0] * diff_w_norm[batch_idx, 0] / xw_norm_squared[batch_idx, 0]
# First and second additive terms in the numerator.
first_term = dxnormalized_dx[batch_idx, i, j]
second_term = 2 / xw_norm_squared[batch_idx, 0] * xw_block[batch_idx, i] * np.dot(xw_block[batch_idx], dxnormalized_dx[batch_idx, :, j])
jacobian[batch_idx, i, j] += factor * (first_term - second_term)
# Compute the log determinant.
for batch_idx in range(batch_size):
log_det_J[batch_idx] += np.log(np.abs(np.linalg.det(jacobian[batch_idx])))
# Point to next block.
block_pointer += block_size
return y, log_det_J, x_norm, y_norm
# =============================================================================
# TESTS
# =============================================================================
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features', [2, 5, 8])
@pytest.mark.parametrize('gate', [False, True])
def test_affine_transformer_round_trip(batch_size, n_features, gate):
"""Make sure the forward + inverse conposition of affine transformers is equal to the identity."""
x, coefficients, gate = create_random_input(batch_size, n_features, gate,
n_parameters=2, seed=0)
shift, log_scale = coefficients[:, 0], coefficients[:, 1]
# Check that a round trip gives the identity function.
y, log_det_J_y = affine_transformer(x, shift, log_scale, gate=gate)
x_inv, log_det_J_x_inv = affine_transformer_inv(y, shift, log_scale, gate=gate)
assert torch.allclose(x, x_inv)
assert torch.allclose(log_det_J_y + log_det_J_x_inv, torch.zeros_like(log_det_J_y))
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features', [2, 5, 8])
@pytest.mark.parametrize('gate', [False, True])
@pytest.mark.parametrize('func', [affine_transformer, affine_transformer_inv])
def test_affine_transformer_log_det_J(batch_size, n_features, gate, func):
"""Check that the log_det_J of the gated affine transformer is correct."""
x, coefficients, gate = create_random_input(batch_size, n_features, gate,
n_parameters=2, seed=0)
shift, log_scale = coefficients[:, 0], coefficients[:, 1]
# Check the log(abs(det(J))).
y, log_det_J = func(x, shift, log_scale, gate)
log_det_J_ref = reference_log_det_J(x, y)
assert np.allclose(log_det_J.detach().numpy(), log_det_J_ref)
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features', [2, 5, 8])
@pytest.mark.parametrize('n_polynomials', [2, 3, 5])
@pytest.mark.parametrize('gate', [False, True])
def test_sos_polynomial_transformer_reference(batch_size, n_features, n_polynomials, gate):
"""Compare PyTorch and reference implementation of sum-of-squares transformer."""
x, coefficients, gate = create_random_input(batch_size, n_features, gate=gate,
n_parameters=1+2*n_polynomials, seed=0)
ref_y, ref_log_det_J = reference_sos_polynomial_transformer(x, coefficients, gate)
torch_y, torch_log_det_J = sos_polynomial_transformer(x, coefficients, gate)
assert np.allclose(ref_y, torch_y.detach().numpy())
assert np.allclose(ref_log_det_J, torch_log_det_J.detach().numpy())
# Compute the reference log_det_J also with autograd and numpy.
ref_log_det_J2 = reference_log_det_J(x, torch_y)
assert np.allclose(ref_log_det_J2, torch_log_det_J.detach().numpy())
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features', [2, 5, 8])
@pytest.mark.parametrize('n_polynomials', [2, 3, 5])
@pytest.mark.parametrize('gate', [False, True])
def test_sos_polynomial_transformer_gradcheck(batch_size, n_features, n_polynomials, gate):
"""Run autograd.gradcheck on the SOS polynomial transformer."""
x, coefficients, gate = create_random_input(batch_size, n_features, gate=gate,
n_parameters=1+2*n_polynomials, seed=0)
# With a None mask, the module should fall back to the native implementation.
result = torch.autograd.gradcheck(
func=sos_polynomial_transformer,
inputs=[x, coefficients, gate]
)
assert result
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features', [2, 5, 8])
@pytest.mark.parametrize('x0', [-2, -1])
@pytest.mark.parametrize('y0', [1, 2])
@pytest.mark.parametrize('n_bins', [2, 3, 5])
def test_neural_spline_transformer_reference(batch_size, n_features, x0, y0, n_bins):
"""Compare PyTorch and reference implementation of neural spline transformer."""
# Determine the first and final knots of the spline. We
# arbitrarily set the domain of the first dimension to 0.0
# to test different dimensions for different features.
x0 = torch.full((n_features,), x0, dtype=torch.double, requires_grad=False)
xf = -x0
xf[0] = 0
y0 = torch.full((n_features,), y0, dtype=torch.double, requires_grad=False)
yf = y0 + xf - x0
# Create widths, heights, and slopes of the bins.
n_parameters = 3*n_bins - 1
x, parameters, _ = create_random_input(batch_size, n_features, gate=False,
n_parameters=n_parameters, seed=0,
x_func=torch.rand)
widths = torch.nn.functional.softmax(parameters[:, :n_bins], dim=1) * (xf - x0)
heights = torch.nn.functional.softmax(parameters[:, n_bins:2*n_bins], dim=1) * (yf - y0)
slopes = torch.nn.functional.softplus(parameters[:, 2*n_bins:])
# x is now between 0 and 1 but it must be between x0 and xf. We detach
# to make the new x a leaf variable and reset requires_grad.
x = x.detach() * (xf - x0) + x0
x.requires_grad = True
ref_y, ref_log_det_J = reference_neural_spline(x, x0, y0, widths, heights, slopes)
torch_y, torch_log_det_J = neural_spline_transformer(x, x0, y0, widths, heights, slopes)
assert np.allclose(ref_y, torch_y.detach().numpy())
assert np.allclose(ref_log_det_J, torch_log_det_J.detach().numpy())
# Check y0, yf boundaries are satisfied
assert torch.all(y0 < torch_y)
assert torch.all(torch_y < yf)
# Compute the reference log_det_J also with autograd and numpy.
ref_log_det_J2 = reference_log_det_J(x, torch_y)
assert np.allclose(ref_log_det_J2, torch_log_det_J.detach().numpy())
@pytest.mark.parametrize('n_features,blocks', [
(3, 3),
(6, 3),
(6, 2),
(5, [3, 2]),
(7, [3, 2, 2]),
(8, [3, 2, 2, 1])
])
def test_unit_cube_to_inscribed_sphere(n_features, blocks):
"""Test the mapping from unit cube to its inscribed sphere."""
# Create a bunch of points within the hypercube with half-side = radius.
radius = 1
batch_size = 256
generator = torch.Generator()
generator.manual_seed(0)
w = radius - 2 * radius * torch.rand(batch_size, n_features, generator=generator, dtype=torch.double)
# In the last two batches we set two cube vertices.
w[-1] = radius * torch.ones_like(w[-1])
w[-2] = -radius * torch.ones_like(w[-2])
# In the third to last batch we try to map the origin.
w[-3] = torch.zeros_like(w[-3])
# After the mapping, all points should be within the unit sphere.
w_mapped = unit_cube_to_inscribed_sphere(w, blocks, shorten_last_block=True)
blocks = generate_block_sizes(n_features, blocks, shorten_last_block=True)
block_pointer = 0
for block_size in blocks:
norms = []
for x in [w, w_mapped]:
x_block = x[:, block_pointer:block_pointer+block_size]
norms.append((x_block**2).sum(dim=1).sqrt())
# The test is more meaningful if some of the initial vectors
# started outside the hypersphere. Exclude the vertices since
# those are always outside the sphere.
if block_size > 1:
assert (norms[0][:-2] > radius).any()
assert (norms[1] <= radius).all()
# The cube vertices should be mapped exactly on the sphere surface.
assert torch.allclose(norms[1][-2:], radius * torch.ones_like(norms[1][-2:]))
# And the zero should be mapped to zero.
zero_block = w_mapped[-3, block_pointer:block_pointer+block_size]
assert torch.all(zero_block == torch.zeros_like(zero_block))
block_pointer += block_size
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features,blocks', [
(3, 3),
(6, 3),
(6, 2),
(5, [3, 2]),
(7, [3, 2, 2]),
(8, [3, 2, 2, 1])
])
@pytest.mark.parametrize('gate', [False, True])
def test_mobius_transformer_reference(batch_size, n_features, blocks, gate):
"""Compare PyTorch and reference implementation of sum-of-squares transformer."""
x, w, gate = create_random_input(batch_size, n_features, gate=gate,
n_parameters=1, seed=0, par_func=torch.rand)
w = 1 - 2 * w[:, 0]
# Compare PyTorch and reference.
ref_y, ref_log_det_J, ref_x_norm, ref_y_norm = reference_mobius_transformer(x, w, blocks, gate)
torch_y, torch_log_det_J = mobius_transformer(x, w, blocks, gate)
assert np.allclose(ref_y, torch_y.detach().numpy())
assert np.allclose(ref_log_det_J, torch_log_det_J.detach().numpy())
# Make sure the transform doesn't alter the distance from the center of the sphere.
assert np.allclose(ref_x_norm, ref_y_norm)
# Compute the reference log_det_J also with autograd and numpy.
ref_log_det_J2 = reference_log_det_J(x, torch_y)
assert np.allclose(ref_log_det_J2, torch_log_det_J.detach().numpy())
@pytest.mark.parametrize('batch_size', [2, 5])
@pytest.mark.parametrize('n_features,blocks', [
(3, 3),
(6, 3),
(6, 2),
(5, [3, 2]),
(7, [3, 2, 2]),
(8, [3, 2, 2, 1])
])
@pytest.mark.parametrize('gate', [False, True])
def test_mobius_transformer_gradcheck(batch_size, n_features, blocks, gate):
"""Run autograd.gradcheck on the Mobius transformer."""
x, w, gate = create_random_input(batch_size, n_features, gate=gate,
n_parameters=1, seed=0, par_func=torch.rand)
w = 1 - 2 * w[:, 0]
# With a None mask, the module should fall back to the native implementation.
result = torch.autograd.gradcheck(
func=mobius_transformer,
inputs=[x, w, blocks, gate]
)
assert result
| StarcoderdataPython |
92778 | <reponame>djgroen/flee-release
import flee.flee as flee
import datamanager.handle_refugee_data as handle_refugee_data
import numpy as np
import outputanalysis.analysis as a
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
if __name__ == "__main__":
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed=5000.0
flee.SimulationSettings.MaxMoveSpeed=5000.0
flee.SimulationSettings.MaxWalkSpeed=5000.0
end_time = 10
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=0.3)
l2 = e.addLocation("B", movechance=0.0)
l3 = e.addLocation("C", movechance=0.0)
l4 = e.addLocation("D", movechance=0.0)
e.linkUp("A","B","834.0")
e.linkUp("A","C","1368.0")
e.linkUp("A","D","536.0")
d = handle_refugee_data.RefugeeTable(csvformat="generic", data_directory="test_data", start_date="2010-01-01", data_layout="data_layout.csv")
for t in range(0,end_time):
new_refs = d.get_new_refugees(t)
# Insert refugee agents
for i in range(0, new_refs):
e.addAgent(location=l1)
# Propagate the model by one time step.
e.evolve()
print(t, l1.numAgents+l2.numAgents+l3.numAgents+l4.numAgents, l1.numAgents, l2.numAgents, l3.numAgents, l4.numAgents)
assert t==9
assert l1.numAgents+l2.numAgents+l3.numAgents+l4.numAgents==635 # This includes refugee counts from Fassala as well
#79 746 24601 14784 38188
print("Test successful!")
| StarcoderdataPython |
1613763 | <gh_stars>0
import dynet as dy
import numpy as np
import numbers
from typing import Any, List, Sequence, Union
import xnmt.batchers as batchers
import xnmt.event_trigger as event_trigger
import xnmt.events as events
import xnmt.input_readers as input_readers
import xnmt.search_strategies as search_strategies
import xnmt.sent as sent
import xnmt.vocabs as vocabs
from xnmt.settings import settings
from xnmt.modelparts import attenders, decoders, embedders
from xnmt.transducers import recurrent, base as transducers_base
from xnmt.persistence import serializable_init, Serializable, bare
from xnmt.reports import Reportable
from xnmt.losses import LossExpr
from .auto_regressive import AutoRegressiveTranslator
class DefaultTranslator(AutoRegressiveTranslator, Serializable, Reportable):
"""
A default translator based on attentional sequence-to-sequence models.
Args:
src_reader: A reader for the source side.
trg_reader: A reader for the target side.
src_embedder: A word embedder for the input language
encoder: An encoder to generate encoded inputs
attender: An attention module
decoder: A decoder
"""
yaml_tag = '!DefaultTranslator'
@events.register_xnmt_handler
@serializable_init
def __init__(self,
src_reader: input_readers.InputReader,
trg_reader: input_readers.InputReader,
src_embedder: embedders.Embedder = bare(embedders.LookupEmbedder),
encoder: transducers_base.SeqTransducer = bare(recurrent.BiLSTMSeqTransducer),
attender: attenders.Attender = bare(attenders.MlpAttender),
decoder: decoders.Decoder = bare(decoders.AutoRegressiveDecoder),
truncate_dec_batches: bool = False) -> None:
super().__init__(src_reader=src_reader, trg_reader=trg_reader)
self.src_embedder = src_embedder
self.encoder = encoder
self.attender = attender
self.decoder = decoder
self.truncate_dec_batches = truncate_dec_batches
def shared_params(self):
return [{".src_embedder.emb_dim", ".encoder.input_dim"},
{".encoder.hidden_dim", ".attender.input_dim", ".decoder.input_dim"},
{".attender.state_dim", ".decoder.rnn.hidden_dim"}]
def _initial_state(self, src: Union[batchers.Batch, sent.Sentence]):
# Encode sentence and initiate decoder state
embeddings = self.src_embedder.embed_sent(src)
encoding = self.encoder.transduce(embeddings)
final_state = self.encoder.get_final_states()
self.attender.init_sent(encoding)
self.decoder.init_sent(encoding)
ss = batchers.mark_as_batch([vocabs.Vocab.SS] * src.batch_size()) if batchers.is_batched(src) else vocabs.Vocab.SS
initial_state = self.decoder.initial_state(final_state, ss)
return initial_state
def eog_symbol(self):
return self.decoder.eog_symbol()
def finish_generating(self, output, dec_state):
return self.decoder.finish_generating(output, dec_state)
def calc_nll(self,
src: Union[batchers.Batch, sent.Sentence],
trg: Union[batchers.Batch, sent.Sentence]) -> LossExpr:
if isinstance(src, batchers.CompoundBatch):
src = src.batches[0]
# Encode the sentence
initial_state = self._initial_state(src)
dec_state = initial_state
trg_mask = trg.mask if batchers.is_batched(trg) else None
cur_losses = []
seq_len = trg.sent_len()
# Sanity check if requested
if settings.CHECK_VALIDITY and batchers.is_batched(src):
for j, single_trg in enumerate(trg):
# assert consistent length
assert single_trg.sent_len() == seq_len
# assert exactly one unmasked ES token
assert 1 == len([i for i in range(seq_len) if (trg_mask is None or
trg_mask.np_arr[j,i]==0) and single_trg[i]==vocabs.Vocab.ES])
input_word = None
for i in range(seq_len):
ref_word = self._select_ref_words(trg, i, truncate_masked=self.truncate_dec_batches)
if input_word is not None:
dec_state = self.decoder.add_input(dec_state, input_word)
rnn_output = dec_state.as_vector()
dec_state.context = self.attender.calc_context(rnn_output)
word_loss = self.decoder.calc_loss(dec_state, ref_word)
if not self.truncate_dec_batches and batchers.is_batched(src) and trg_mask is not None:
word_loss = trg_mask.cmult_by_timestep_expr(word_loss, i, inverse=True)
cur_losses.append(word_loss)
input_word = ref_word
units = [t.len_unpadded() for t in trg]
return LossExpr(dy.esum(cur_losses), units)
def _select_ref_words(self, sentence, index, truncate_masked = False):
if truncate_masked:
mask = sentence.mask if batchers.is_batched(sentence) else None
if not batchers.is_batched(sentence):
return sentence[index]
else:
ret = []
found_masked = False
for (j, single_trg) in enumerate(sentence):
if mask is None or mask.np_arr[j, index] == 0 or np.sum(mask.np_arr[:, index]) == mask.np_arr.shape[0]:
assert not found_masked, "sentences must be sorted by decreasing target length"
ret.append(single_trg[index])
else:
found_masked = True
return batchers.mark_as_batch(ret)
else:
return sentence[index] if not batchers.is_batched(sentence) else \
batchers.mark_as_batch([single_trg[index] for single_trg in sentence])
def generate_search_output(self,
src: batchers.Batch,
search_strategy: search_strategies.SearchStrategy) -> List[search_strategies.SearchOutput]:
"""
Takes in a batch of source sentences and outputs a list of search outputs.
Args:
src: The source sentences
search_strategy: The strategy with which to perform the search
Returns:
A list of search outputs including scores, etc.
"""
if src.batch_size() != 1:
raise NotImplementedError("batched decoding not implemented for DefaultTranslator. "
"Specify inference batcher with batch size 1.")
if isinstance(src, batchers.CompoundBatch):
src = src.batches[0]
search_outputs = search_strategy.generate_output(self,
self._initial_state(src),
src_length=src.sent_len())
return search_outputs
def generate(self,
src: batchers.Batch,
search_strategy: search_strategies.SearchStrategy) -> Sequence[sent.Sentence]:
"""
Takes in a batch of source sentences and outputs a list of search outputs.
Args:
src: The source sentences
search_strategy: The strategy with which to perform the search
Returns:
A list of search outputs including scores, etc.
"""
assert src.batch_size() == 1
event_trigger.start_sent(src)
search_outputs = self.generate_search_output(src, search_strategy)
if isinstance(src, batchers.CompoundBatch): src = src.batches[0]
sorted_outputs = sorted(search_outputs, key=lambda x: x.score[0], reverse=True)
assert len(sorted_outputs) >= 1
outputs = []
for curr_output in sorted_outputs:
output_actions = [x for x in curr_output.word_ids[0]]
attentions = [x for x in curr_output.attentions[0]]
score = curr_output.score[0]
out_sent = self._emit_translation(src, output_actions, score)
if len(sorted_outputs) == 1:
outputs.append(out_sent)
else:
outputs.append(sent.NbestSentence(base_sent=out_sent, nbest_id=src[0].idx))
if self.is_reporting():
attentions = np.concatenate([x.npvalue() for x in attentions], axis=1)
self.report_sent_info({"attentions": attentions,
"src": src[0],
"output": outputs[0]})
return outputs
def _emit_translation(self, src, output_actions, score):
return sent.SimpleSentence(idx=src[0].idx,
words=output_actions,
vocab=getattr(self.trg_reader, "vocab", None),
output_procs=self.trg_reader.output_procs,
score=score)
def add_input(self, word: Any, state: decoders.AutoRegressiveDecoderState) -> AutoRegressiveTranslator.Output:
if word is not None:
if type(word) == int:
word = [word]
if type(word) == list or type(word) == np.ndarray:
word = batchers.mark_as_batch(word)
next_state = self.decoder.add_input(state, word) if word is not None else state
attention = self.attender.calc_attention(next_state.as_vector())
next_state.context = self.attender.calc_context(next_state.as_vector(), attention=attention)
return AutoRegressiveTranslator.Output(next_state, attention)
def best_k(self, state: decoders.AutoRegressiveDecoderState, k: numbers.Integral, normalize_scores: bool = False):
best_words, best_scores = self.decoder.best_k(state, k, normalize_scores)
return best_words, best_scores
def sample(self, state: decoders.AutoRegressiveDecoderState, n: numbers.Integral, temperature: float = 1.0):
return self.decoder.sample(state, n, temperature)
class TreeTranslator(DefaultTranslator, Serializable):
yaml_tag = "!TreeTranslator"
def _emit_translation(self, src, output_actions, score):
return sent.DepTreeRNNGSequenceSentence(idx=src[0].idx,
score=score,
actions=output_actions,
surface_vocab=getattr(self.trg_reader, "surface_vocab", None),
nt_vocab=getattr(self.trg_reader, "nt_vocab", None),
edge_vocab=getattr(self.trg_reader, "edge_vocab", None),
output_procs=self.trg_reader.output_procs)
| StarcoderdataPython |
31501 | import re, requests, bs4, unicodedata
from datetime import timedelta, date, datetime
from time import time
# Constants
root = 'https://www.fanfiction.net'
# REGEX MATCHES
# STORY REGEX
_STORYID_REGEX = r"var\s+storyid\s*=\s*(\d+);"
_CHAPTER_REGEX = r"var\s+chapter\s*=\s*(\d+);"
_CHAPTERS_REGEX = r"Chapters:\s*(\d+)\s*"
_WORDS_REGEX = r"Words:\s*([\d,]+)\s*"
_TITLE_REGEX = r"var\s+title\s*=\s*'(.+)';"
_DATEP_REGEX = r"Published:\s*<span.+?='(\d+)'>"
_DATEU_REGEX = r"Updated:\s*<span.+?='(\d+)'>"
# USER REGEX
_USERID_REGEX = r"var\s+userid\s*=\s*(\d+);"
_USERID_URL_EXTRACT = r".*/u/(\d+)"
_USERNAME_REGEX = r"<link rel=\"canonical\" href=\"//www.fanfiction.net/u/\d+/(.+)\">"
_USER_STORY_COUNT_REGEX = r"My Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_COUNT_REGEX = r"Favorite Stories\s*<span class=badge>(\d+)<"
_USER_FAVOURITE_AUTHOR_COUNT_REGEX = r"Favorite Authors\s*<span class=badge>(\d+)<"
# Useful for generating a review URL later on
_STORYTEXTID_REGEX = r"var\s+storytextid\s*=\s*storytextid=(\d+);"
# REGEX that used to parse reviews page
_REVIEW_COMPLETE_INFO_REGEX = r"img class=.*?</div"
_REVIEW_USER_NAME_REGEX = r"> *([^< ][^<]*)<"
_REVIEW_CHAPTER_REGEX = r"<small style=[^>]*>([^<]*)<"
_REVIEW_TIME_REGEX = r"<span data[^>]*>([^<]*)<"
_REVIEW_TEXT_REGEX = r"<div[^>]*>([^<]*)<"
# Used to parse the attributes which aren't directly contained in the
# JavaScript and hence need to be parsed manually
_NON_JAVASCRIPT_REGEX = r'Rated:(.+?)</div>'
_HTML_TAG_REGEX = r'<.*?>'
# Needed to properly decide if a token contains a genre or a character name
_GENRES = [
'General', 'Romance', 'Humor', 'Drama', 'Poetry', 'Adventure', 'Mystery',
'Horror', 'Parody', 'Angst', 'Supernatural', 'Suspense', 'Sci-Fi',
'Fantasy', 'Spiritual', 'Tragedy', 'Western', 'Crime', 'Family', 'Hurt',
'Comfort', 'Friendship'
]
# TEMPLATES
_STORY_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d'
_CHAPTER_URL_TEMPLATE = 'https://www.fanfiction.net/s/%d/%d'
_USERID_URL_TEMPLATE = 'https://www.fanfiction.net/u/%d'
_DATE_COMPARISON = date(1970, 1, 1)
_DATE_FORMAT = '%Y%m%d'
def _parse_string(regex, source):
"""Returns first group of matched regular expression as string."""
return re.search(regex, source).group(1)
def _parse_integer(regex, source):
"""Returns first group of matched regular expression as integer."""
match = re.search(regex, source).group(1)
match = match.replace(',', '')
return int(match)
def _parse_date(regex, source):
xutime = _parse_integer(regex, source)
delta = timedelta(seconds=xutime)
return _DATE_COMPARISON + delta
def _unescape_javascript_string(string_):
"""Removes JavaScript-specific string escaping characters."""
return string_.replace("\\'", "'").replace('\\"', '"').replace('\\\\', '\\')
def _visible_filter(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
element = unicodedata.normalize('NFKD', element).encode('ascii', 'ignore')
if re.match(r'<!--.*-->', str(element)):
return False
return True
def _get_int_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("int token doesn't starts with given prefix")
else:
return int(token[len(prefix):].replace(',', ''))
def _get_date_value_from_token(token, prefix):
if not token.startswith(prefix):
raise ValueError("date token doesn't starts with given prefix")
else:
try:
return datetime.strptime(token[len(prefix):], '%m/%d/%Y')
except ValueError:
return datetime.today()
def _get_key_of_first_positive(f, d):
"""
returns key k of first item in l for which f(k) == True
or None
"""
for key, value in d.items():
if f(key) == True:
return key
return None
class Story(object):
SERIALIZED_ATTRS = [
'title',
'id',
'timestamp',
'description',
'fandoms',
'author_id',
'chapter_count',
'word_count',
'date_published',
'date_updated',
'rated',
'language',
'genre',
'characters',
'reviews',
'favs',
'followers',
'complete'
]
DATE_ATTRS = [
'timestamp',
'date_published',
'date_updated'
]
def __init__(self, url=None, id=None):
""" A story on fanfiction.net
If both url, and id are provided, url is used.
:type id: int
:param url: The url of the story.
:param id: The story id of the story.
Attributes:
id (int): The story id.
description (str): The text description of the story
timestamp: The timestamp of moment when data was consistent with site
fandoms [str]: The fandoms to which the story belongs
chapter_count (int); The number of chapters.
word_count (int): The number of words.
author_id (int): The user id of the author.
title (str): The title of the story.
date_published (date): The date the story was published.
date_updated (date): The date of the most recent update.
rated (str): The story rating.
language (str): The story language.
genre [str]: The genre(s) of the story.
characters [str]: The character(s) of the story.
reviews (int): The number of reviews of the story.
favs (int): The number of user which has this story in favorite list
followers (int): The number of users who follow the story
complete (bool): True if the story is complete, else False.
"""
self.inited = False
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_STORYID_REGEX, source)
else:
self.url = _STORY_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.author_id = _parse_integer(_USERID_REGEX, source)
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source).replace('+', ' '))
fandom_chunk = soup.find('div', id='pre_story_links').find_all('a')[-1].get_text().replace('Crossover', '')
self.fandoms = [fandom.strip() for fandom in fandom_chunk.split('+')]
self.description = soup.find('div', {'style': 'margin-top:2px'}).get_text()
# Tokens of information that aren't directly contained in the
# JavaScript, need to manually parse and filter those
tags = re.search(_NON_JAVASCRIPT_REGEX, source.replace('\n', ' ')).group(0)
tokens = [token.strip() for token in
re.sub(_HTML_TAG_REGEX, '', tags).split('-')]
self._parse_tags(tokens)
self.inited = True
def _parse_tags(self, tokens):
"""
parse desription of story such as 'Rated: T - English - Humor/Adventure - Chapters: 2 - Words: 131,097 - Reviews: 537 - Favs: 2,515 - Follows: 2,207 - Updated: Jul 27, 2016 - Published: Dec 17, 2009 - <NAME>.'
splitted into tokens list by '-' character
This functions fill all field of the self object except: id, author_id, title, fandoms, timestamp
"""
# skipping tokens 'Crossover' and token which contains fandoms
while not tokens[0].startswith('Rated:'):
tokens = tokens[1:]
# Both tokens are constant and always available
self.rated = tokens[0].replace('Rated:', '').replace('Fiction', '').strip()
self.language = tokens[1]
tokens = tokens[2:]
# there can be token with the list of genres
if tokens[0] in _GENRES or '/' in tokens[0] and all(token in _GENRES for token in tokens[0].split('/')):
self.genre = tokens[0].split('/')
tokens = tokens[1:]
else:
self.genre = []
# deleting useless 'id: ...' token
if tokens[-1].startswith('id:'):
tokens = tokens[:-1]
# and if story is complete the last token contain 'Complete'
if 'Complete' in tokens[-1]:
self.complete = True
tokens = tokens[:-1]
else:
self.complete = False
# except those there are 4 possible kind of tokens: tokens with int data, tokens with date data, story id token,
# and token with characters/pairings
int_tokens = {'Chapters: ': 'chapter_count', 'Words: ': 'word_count', 'Reviews: ': 'reviews',
'Favs: ': 'favs', 'Follows: ': 'followers'}
date_tokens = {'Updated: ': 'date_updated', 'Published: ': 'date_published'}
for token in tokens:
int_k = _get_key_of_first_positive(lambda s: token.startswith(s), int_tokens)
date_k = _get_key_of_first_positive(lambda s: token.startswith(s), date_tokens)
if int_k is not None:
setattr(self, int_tokens[int_k], _get_int_value_from_token(token, int_k))
elif date_k is not None:
setattr(self, date_tokens[date_k], _get_date_value_from_token(token, date_k))
else:
self.characters = [c.translate(str.maketrans('', '', '[]')).strip() for c in token.split(',')]
# now we have to fill field which could be left empty
if not hasattr(self, 'chapter_count'):
self.chapter_count = 1
for field in int_tokens.values():
if not hasattr(self, field):
setattr(self, field, 0)
if not hasattr(self, 'date_updated'):
self.date_updated = self.date_published
if not hasattr(self, 'characters'):
self.characters = []
def _parse_from_storylist_format(self, story_chunk, author_id):
"""
Parse story from html chunk
"""
if author_id:
self.author_id = author_id
else:
self.author_id = _parse_integer(_USERID_URL_EXTRACT, str(story_chunk))
self.timestamp = datetime.now()
self.fandoms = [s.strip() for s in story_chunk.get('data-category').split('&')]
self.title = story_chunk.get('data-title')
self.description = str(story_chunk.find('div', {'class': 'z-indent z-padtop'}))
# save only parts between div tags
self.description = self.description[self.description.find('>') + 1:]
self.description = self.description[:self.description.find('<div', 4)]
tags = story_chunk.find('div', {'class': 'z-padtop2 xgray'}).get_text()
self._parse_tags([token.strip() for token in tags.split('-')])
self.inited = True
def get_chapters(self):
"""
A generator for all chapters in the story.
:return: A generator to fetch chapter objects.
"""
for number in range(1, self.chapter_count + 1):
yield Chapter(story_id=self.id, chapter=number)
def get_user(self):
"""
:return: The user object of the author of the story.
"""
return User(id=self.author_id)
def get_json_dump(self, attrs=None):
result = {}
for attr in attrs or self.SERIALIZED_ATTRS:
if attr in self.DATE_ATTRS:
result[attr] = getattr(self, attr).strftime(_DATE_FORMAT)
else:
result[attr] = getattr(self, attr)
return result
def print_info(self, attrs=None):
"""
Print information held about the story.
:param attrs: A list of attribute names to print information for.
:return: void
"""
assert self.inited
if not attrs:
attrs = self.SERIALIZED_ATTRS
for attr in attrs:
print("%12s\t%s" % (attr, getattr(self, attr)))
def get_reviews(self):
"""
A generator for all reviews in the story.
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.id)
# Method alias which allows the user to treat the get_chapters method like
# a normal property if no manual opener is to be specified.
chapters = property(get_chapters)
class ReviewsGenerator(object):
"""
Class that generates review in chronological order
Attributes:
base_url (int): storys review url without specified page number
page_number (int): number of current review page
reviews_cache List(str): list of already downloaded (and partially processed) reviews
skip_reviews_number (int): length of already processed review from review_cache
"""
def __init__(self, story_id, chapter=0):
"""
If chapter unspecified then generator generates review for all chapters
"""
self.story_id = story_id
self.base_url = root + '/r/' + str(story_id) + '/' + str(chapter) + '/'
def __iter__(self):
self.page_number = 0
self.reviews_cache = []
self.skip_reviews_number = 0
return self
def __next__(self):
self.skip_reviews_number += 1
if len(self.reviews_cache) >= self.skip_reviews_number:
return Review(self.story_id, self.reviews_cache[self.skip_reviews_number - 1])
self.page_number += 1
page = self._downloadReviewPage(self.page_number)
self.reviews_cache = re.findall(_REVIEW_COMPLETE_INFO_REGEX, page, re.DOTALL)
if len(self.reviews_cache) == 0:
raise StopIteration
self.skip_reviews_number = 1
return Review(self.story_id, self.reviews_cache[0])
def _downloadReviewPage(self, page_number):
url = self.base_url + str(page_number) + '/'
return requests.get(url).text
class Review(object):
"""
A single review of fanfiction story, on fanfiction.net
Attributes:
story_id (int): story ID
user_id (int): ID of user who submited review (may be None if review is anonymous)
user_name (str): user name (or pseudonym for anonymous review)
chapter (str): chapter name
time_ago (str): how much time passed since review submit (format may be inconsistent with what you see in browser just because fanfiction.net sends different pages depend on do you download page from browser or from console/that library
text (str): review text
"""
def __init__(self, story_id, unparsed_info):
"""
That method should not be invoked outside of Story and Chapter classes
:param story_id (int): story ID
:param unparsed_info (int): string that contain the rest info
"""
self.story_id = story_id
self.user_name = _parse_string(_REVIEW_USER_NAME_REGEX, unparsed_info)
self.chapter = _parse_string(_REVIEW_CHAPTER_REGEX, unparsed_info)
self.text = _parse_string(_REVIEW_TEXT_REGEX, unparsed_info)
self.time_ago = _parse_string(_REVIEW_TIME_REGEX, unparsed_info)
# fanfiction.net provide strange format, instead of '8 hours ago' it show '8h'
# so let's add ' ago' suffix if review submitted hours or minutes ago
if self.time_ago[-1] == 'h' or self.time_ago[-1] == 'm':
self.time_ago += ' ago'
if re.search(_USERID_URL_EXTRACT, unparsed_info) == None:
self.user_id = None
else:
self.user_id = _parse_integer(_USERID_URL_EXTRACT, unparsed_info)
class Chapter(object):
def __init__(self, url=None, story_id=None, chapter=None):
""" A single chapter in a fanfiction story, on fanfiction.net
:param url: The url of the chapter.
:param story_id: The story id of the story of the chapter.
:param chapter: The chapter number of the story.
Attributes:
story_id (int): Story ID
number (int): Chapter number
story_text_id (int): ?
title (str): Title of the chapter, or title of the story.
raw_text (str): The raw HTML of the story.
text_list List(str): List of unicode strings for each paragraph.
text (str): Visible text of the story.
"""
if url is None:
if story_id is None:
raise Exception('A URL or story id must be entered.')
elif chapter is None:
raise Exception('Both a stroy id and chapter number must be provided')
elif story_id and chapter:
url = _CHAPTER_URL_TEMPLATE % (story_id, chapter)
source = requests.get(url)
source = source.text
self.story_id = _parse_integer(_STORYID_REGEX, source)
self.number = _parse_integer(_CHAPTER_REGEX, source)
self.story_text_id = _parse_integer(_STORYTEXTID_REGEX, source)
soup = bs4.BeautifulSoup(source, 'html.parser')
select = soup.find('select', {'name': 'chapter'})
if select:
# There are multiple chapters available, use chapter's title
self.title = select.find('option', selected=True).string.split(None, 1)[1]
else:
# No multiple chapters, one-shot or only a single chapter released
# until now; for the lack of a proper chapter title use the story's
self.title = _unescape_javascript_string(_parse_string(_TITLE_REGEX, source)).decode()
soup = soup.find('div', id='storytext')
# Try to remove AddToAny share buttons
try:
soup.find('div', {'class': lambda class_: class_ and 'a2a_kit' in class_}).extract()
except AttributeError:
pass
# Normalize HTML tag attributes
for hr in soup('hr'):
del hr['size']
del hr['noshade']
self.raw_text = soup.decode()
texts = soup.findAll(text=True)
self.text_list = list(filter(_visible_filter, texts))
self.text = '\n'.join(self.text_list)
def get_reviews(self):
"""
A generator for all reviews for that chapter
:return: A generator to fetch reviews.
"""
return ReviewsGenerator(self.story_id, self.number)
class User(object):
def __init__(self, url=None, id=None):
""" A user page on fanfiction.net
:param url: The url of user profile.
:param id: The url of user profile.
Attributes:
id (int): User id
timestamp (int): Timestamp of last update of downloaded profile
stories [Story]: The list of stories written by user
favorite_stories [Story]: The list of user favorite stories
favorite_authors [User]: The list of user favorite stories
username (str):
"""
self.id = id
self.url = url
if id is None:
if url is None:
raise ValueError("There must be a url or an id.")
else:
self.id = _parse_integer(_USERID_URL_EXTRACT, url)
else:
self.url = _USERID_URL_TEMPLATE % int(self.id)
self.id = int(self.id)
def download_data(self, timeout=5):
self.timestamp = datetime.now()
source = requests.get(self.url, timeout=timeout)
source = source.text
soup = bs4.BeautifulSoup(source, 'html.parser')
self.username = _parse_string(_USERNAME_REGEX, source)
self.stories = self._get_stories_from_profile(soup, fav_stories=False)
self.favorite_stories = self._get_stories_from_profile(soup, fav_stories=True)
self.favorite_authors = self._get_favorite_authors(soup)
def get_json_dump(self):
return {
'id': self.id,
'timestamp': self.timestamp.strftime(_DATE_FORMAT),
'username': self.username,
'stories': [story.id for story in self.stories],
'favorite_stories': [story.id for story in self.favorite_stories],
'favorite_authors': [user.id for user in self.favorite_authors]
}
def _get_stories_from_profile(self, soup, fav_stories=True):
if fav_stories:
target_class = 'favstories'
else:
target_class = 'mystories'
favourite_stories = soup.findAll('div', {'class': target_class})
result = []
for story_chunk in favourite_stories:
story = Story(id=story_chunk.get('data-storyid'))
story._parse_from_storylist_format(story_chunk, author_id=None if fav_stories else self.id)
result.append(story)
return result
def _get_favorite_authors(self, soup):
result = []
for column in soup.findAll('td', {'style': 'line-height:150%'}):
for author_tag in column.findAll('a', href=re.compile(r".*/u/(\d+)/.*")):
author_url = author_tag.get('href')
author_url = root + author_url
result.append(User(author_url))
return result
| StarcoderdataPython |
3206865 | <gh_stars>1-10
from BaseScouting.views.base_views import BaseSingleMatchView
from Scouting2013.model.reusable_models import Match
from Scouting2013.model.models2013 import ScoreResult
class SingleMatchView2013(BaseSingleMatchView):
def __init__(self):
BaseSingleMatchView.__init__(self, Match, 'Scouting2013/view_match.html')
def get_metrics(self, sr):
output = []
output.append(('teamNumber', sr.team.teamNumber))
sr_fields = ScoreResult.get_fields()
for key in sr_fields:
sr_field = sr_fields[key]
output.append((sr_field.display_name, getattr(sr, key)))
return output | StarcoderdataPython |
3302690 | <filename>handler.py
from dataclasses import dataclass
import asyncio
from . import types
class Handler:
def __init__(self):
self.handlers = []
def register(self, handler, kwargs):
record = Handler.HandlerObj(handler=handler, filters=kwargs)
self.handlers.append(record)
async def check_filters(self):
pass
async def notify(self, event):
update = event.__dict__
for handler in self.handlers:
for key, value in handler.filters.items():
if key in update.keys():
if update[key] == value:
await handler.handler(event)
@dataclass
class HandlerObj:
handler: callable
filters: dict
| StarcoderdataPython |
171294 | #!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides image augmentation functions.
All functions expect float-encoded images, with values between 0 and 1, but
do not clip their outputs.
"""
import chex
from distribution_shift_framework.core.pix import color_conversion
import jax
import jax.numpy as jnp
def _auto_contrast(image: chex.Array, cutoff: int = 0) -> chex.Array:
"""The auto contrast transform: remove top/bottom % and rescale histogram.
Args:
image: an RGB image given as a float tensor in [0, 1].
cutoff: what % of higher/lower pixels to remove
Returns:
The new image with auto contrast applied.
"""
im_rgbs = []
indices = jnp.arange(0, 256, 1)
for rgb in range(0, image.shape[2]):
im_rgb = image[:, :, rgb:rgb + 1]
hist = jnp.histogram(im_rgb, bins=256, range=(0, 1))[0]
hist_cumsum = hist.cumsum()
# Determine % samples
cut_lower = hist_cumsum[-1] * cutoff // 100
cut_higher = hist_cumsum[-1] * (100 - cutoff) // 100
# The lower offset
offset_lo = (hist_cumsum < cut_lower) * indices
offset_lo = offset_lo.max() / 256.
# The higher offset
offset_hi = (hist_cumsum <= cut_higher) * indices
offset_hi = offset_hi.max() / 256.
# Remove cutoff% samples from low/hi end
im_rgb = (im_rgb - offset_lo).clip(0, 1) + offset_lo
im_rgb = (im_rgb + 1 - offset_hi).clip(0, 1) - (1 - offset_hi)
# And renormalize
offset = (offset_hi - offset_lo) < 1 / 256.
im_rgb = (im_rgb - offset_lo) / (offset_hi - offset_lo + offset)
# And return
im_rgbs.append(im_rgb)
return jnp.concatenate(im_rgbs, axis=2)
def auto_contrast(image: chex.Array, cutoff: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _auto_contrast(image, cutoff)
else:
return jax.vmap(_auto_contrast)(image, cutoff.astype(jnp.int32))
def _equalize(image: chex.Array) -> chex.Array:
"""The equalize transform: make histogram cover full scale.
Args:
image: an RGB image given as a float tensor in [0, 1].
Returns:
The equalized image.
"""
im_rgbs = []
im = (image * 255).astype(jnp.int32).clip(0, 255)
for rgb in range(0, im.shape[2]):
im_rgb = im[:, :, rgb:rgb + 1]
hist = jnp.histogram(im_rgb, bins=256, range=(0, 256))[0]
last_nonzero_value = hist.sum() - hist.cumsum()
last_nonzero_value = last_nonzero_value + last_nonzero_value.max() * (
last_nonzero_value == 0)
step = (hist.sum() - last_nonzero_value.min()) // 255
n = step // 2
im_rgb_new = jnp.zeros((im_rgb.shape), dtype=im_rgb.dtype)
def for_loop(i, values):
(im, n, hist, step, im_rgb) = values
im = im + (n // step) * (im_rgb == i)
return (im, n + hist[i], hist, step, im_rgb)
result, _, _, _, _ = jax.lax.fori_loop(0, 256, for_loop,
(im_rgb_new, n, hist, step, im_rgb))
im_rgbs.append(result.astype(jnp.float32) / 255.)
return jnp.concatenate(im_rgbs, 2)
def equalize(image: chex.Array, unused_cutoff: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _equalize(image)
else:
return jax.vmap(_equalize)(image)
def _posterize(image: chex.Array, bits: chex.Array) -> chex.Array:
"""The posterize transform: remove least significant bits.
Args:
image: an RGB image given as a float tensor in [0, 1].
bits: how many bits to ignore.
Returns:
The posterized image.
"""
mask = ~(2**(8 - bits) - 1)
image = (image * 255).astype(jnp.int32).clip(0, 255)
image = jnp.bitwise_and(image, mask)
return image.astype(jnp.float32) / 255.
def posterize(image: chex.Array, bits: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _posterize(image, bits)
else:
return jax.vmap(_posterize)(image, bits.astype(jnp.uint8))
def _solarize(image: chex.Array, threshold: chex.Array) -> chex.Array:
"""The solarization transformation: pixels > threshold are inverted.
Args:
image: an RGB image given as a float tensor in [0, 1].
threshold: the threshold in [0, 1] above which to invert the image.
Returns:
The solarized image.
"""
image = (1 - image) * (image >= threshold) + image * (image < threshold)
return image
def solarize(image: chex.Array, threshold: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _solarize(image, threshold)
else:
return jax.vmap(_solarize)(image, threshold)
def adjust_color(image: chex.Array,
factor: chex.Numeric,
channel: int = 0,
channel_axis: int = -1) -> chex.Array:
"""Shifts the color of an RGB by a given multiplicative amount.
Args:
image: an RGB image, given as a float tensor in [0, 1].
factor: the (additive) amount to shift the RGB by.
channel: the RGB channel to manipulate
channel_axis: the index of the channel axis.
Returns:
The color adjusted image.
"""
red, green, blue = color_conversion.split_channels(image, channel_axis)
if channel == 0:
red = jnp.clip(red + factor, 0., 1.)
elif channel == 1:
green = jnp.clip(green + factor, 0., 1.)
else:
blue = jnp.clip(blue + factor, 0., 1.)
return jnp.stack((red, green, blue), axis=channel_axis)
| StarcoderdataPython |
1683941 | <gh_stars>0
# define settings
IMAGE_UPLOAD_SIZE = (1600, 1200) # image upload size
IMAGE_QUALITY = 85 # image quality
AMOUNT_ARTICLES_HOME_PAGE = 5 # amount of news on home page
| StarcoderdataPython |
3240151 | #!/usr/bin/env python3
from os import walk
from os.path import join, splitext
from re import compile
regex_bad_include = compile(r"^\s*#\s*include\s*<(ieompp/.*\.hpp)>\s*$")
if __name__ == "__main__":
for root, _, files in walk("include/ieompp"):
for file in files:
path = join(root, file)
if splitext(path)[1] != ".hpp":
continue
with open(path) as f:
contents = f.readlines()
print(path + ":")
for idx, line in enumerate(contents):
m = regex_bad_include.match(line)
if not m:
continue
new_line = "#include \"" + m.group(1) + "\""
print("{}:\t{} -> {}".format(idx, line.strip(), new_line))
contents[idx] = new_line + "\n"
with open(path, "w") as f:
f.writelines(contents)
| StarcoderdataPython |
1796910 | <filename>segelectri/data_loader/utils/parse_img_op.py
# coding=utf-8
import tensorflow as tf
import tensorflow_io as tfio
def decode_image(path: tf.Tensor):
"""decode fn for tiff, png, jpg, bmp, giff format
Args:
path (tf.Tensor): path for this image
"""
decode_fns = [tf.image.decode_image, tfio.experimental.image.decode_tiff]
raw_content = tf.io.read_file(path)
for fn in decode_fns:
try:
return fn(raw_content)
except Exception:
continue
raise RuntimeError('failed to decode image {}'.format(path.numpy()))
def parse_img_and_mask(origin_img_path: tf.Tensor, mask_img_path: tf.Tensor):
"""parse img and mask according to path that provided
Args:
origin_img_path (tf.Tensor): origin image path
mask_img_path (tf.Tensor): mask image path
"""
origin_img = decode_image(origin_img_path)
mask_img = decode_image(mask_img_path)[:, :, :3]
# assert origin_img.shape[:2] == mask_img.shape[:2], \
# 'mask image and origin image shape doesn\'t match'
return origin_img, mask_img
def save_img(img: tf.Tensor, img_path: str):
"""save image
Args:
img (tf.Tensor): img content
img_path (str): img path
"""
return tf.io.write_file(img_path, tf.image.encode_png(img))
| StarcoderdataPython |
3357839 |
def patch():
if patch._initialized:
return
patch._initialized = True
import gevent.monkey
gevent.monkey.patch_all()
import sys
if sys.version_info.major < 3:
_py2_patches()
_export()
patch._initialized = False
def _export():
import lymph
lymph.__version__ = '0.8.0-dev'
from lymph.exceptions import RpcError, LookupFailure, Timeout
from lymph.core.decorators import rpc, raw_rpc, event
from lymph.core.interfaces import Interface
from lymph.core.declarations import proxy
for obj in (RpcError, LookupFailure, Timeout, rpc, raw_rpc, event, Interface, proxy):
setattr(lymph, obj.__name__, obj)
def _py2_patches():
import monotime # NOQA
| StarcoderdataPython |
55575 | <gh_stars>0
class Solution:
def maxNumEdgesToRemove(self, n: int, edges: List[List[int]]) -> int:
answer=0
aliceVisited=[0]*(n+1)
bobVisited=[0]*(n+1)
aliceSet={}
aliceSetNum=1
bobSet={}
bobSetNum=1
# Return False if this edge can be deleted. Applies to all addEdge* methods.
def addEdge(edge) -> bool:
[t, v1, v2]=edge
if t==3:
use1=addEdgeFor('alice', edge)
use2=addEdgeFor('bob', edge)
return use1 or use2
elif t==1:
return addEdgeFor('alice', edge)
else:
return addEdgeFor('bob', edge)
def addEdgeFor(name, edge):
nonlocal aliceSetNum
nonlocal bobSetNum
[t, v1, v2]=edge
visited=aliceVisited
vSet=aliceSet
setNum=aliceSetNum
if name=='bob':
visited=bobVisited
vSet=bobSet
setNum=bobSetNum
if visited[v1]==visited[v2]:
if visited[v1]!=0:
return False
visited[v1]=setNum
visited[v2]=setNum
vSet[setNum]=[v1, v2]
if name=='alice':
aliceSetNum+=1
else:
bobSetNum+=1
elif visited[v1]==0:
visited[v1]=visited[v2]
vSet[visited[v1]].append(v1)
elif visited[v2]==0:
visited[v2]=visited[v1]
vSet[visited[v2]].append(v2)
else:
set1=visited[v1]
set2=visited[v2]
for x in vSet[set2]:
visited[x]=set1
vSet[set1].extend(vSet[set2])
del vSet[set2]
return True
for edge in edges:
if edge[0]==3:
if not addEdge(edge):
answer+=1
for edge in edges:
if edge[0]!=3:
if not addEdge(edge):
answer+=1
if len([x for x in aliceVisited[1:] if x==0])!=0 or len([x for x in bobVisited[1:] if x==0])!=0:
return -1
return answer | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.