code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import time
import json
from modules.DateGenerator import DateGenerator
from modules.Config import APILoader, Network
from modules.VaxFactory import VaxFactory
from modules.Device import UserAgents
from modules.Logger import Logger
class AppointmentBooking:
def __init__(self, data):
self.center_id = data["center_id"]
self.name = data["name"]
self.address = data["address"]
self.state_name = data["state_name"]
self.district_name = data["district_name"]
self.block_name = data["block_name"]
self.pincode = data["pincode"]
self.dose = data["dose"]
self.appointment_id = data["appointment_id"]
self.session_id = data["session_id"]
self.date = data["date"]
self.slot = data["slot"]
class Appointment:
BANGALORE_URBAN = "265"
BBMP = "294"
def __init__(self, seek=3, pin_codes=["560029"],
freq_s=60, mode_cron=True, token='',
district_codes=["265"]
) -> None:
self.attempts = 0
self.days_seek = seek
self.pin_codes = pin_codes
self.freq_s = freq_s
self.mode_cron = True if mode_cron else False
self.token = token
self.district_codes = district_codes
self.operation_window_start_hr = 0
self.operation_window_start_min = 0
self.operation_window_end_hr = 23
self.operation_window_end_min = 58
self.last_status_code = 101
def __cycle(self):
if self.mode_cron:
self.attempts += 1
def operation_window(self, start_hr=0, start_min=0, end_hr=23, end_min=58):
self.operation_window_start_hr = start_hr if start_hr >= 0 else 0
self.operation_window_end_hr = end_hr if end_hr >= 0 else 23
self.operation_window_start_min = start_min if start_min >= 0 else 0
self.operation_window_end_min = end_min if end_min >= 0 else 58
def can_continue(self):
return (self.mode_cron and self.attempts < 1) or not self.mode_cron
def reset(self):
self.attempts = 0
def __perform(self, perform, and_then=None):
self.reset()
DateGenerator.seed()
while self.can_continue():
perform()
if and_then is not None:
and_then()
Logger.log("Sleeping for", self.freq_s)
time.sleep(self.freq_s)
self.__cycle()
def __perform_seek(self):
base_api_url, method = APILoader.appointment_by_pin()
success = False
for i in range(0, self.days_seek):
date_to_check = DateGenerator.format_date_from_days(i)
pincode = self.pin_codes[0]
headers = Network.headers_json()
headers['User-Agent'] = UserAgents.android()
headers["Authorization"] = "Bearer " + self.token
api_url = base_api_url + "?pincode=" + pincode + "&date=" + date_to_check
resp = method(
url=api_url,
headers=headers
)
self.last_status_code = int(resp.status_code)
status = resp.status_code
if resp:
if int(status) < 300:
success = True
data = resp.json()
Logger.log("(Pin API)", date_to_check,
"[", status, "]", pincode, "::", data)
else:
Logger.log("(Pin API)", date_to_check,
"[", status, "]", pincode, " X Failed", resp.content.decode())
time.sleep(self.freq_s)
return success
def __perform_seek_area(self):
base_api_url, method = APILoader.appointment_by_district()
success = False
for districts in self.district_codes:
api_url = base_api_url + "?district_id=" + \
str(districts) + "&date=" + \
DateGenerator.format_date_from_days(0)
headers = Network.headers_json()
headers['User-Agent'] = UserAgents.android()
headers['Authorization'] = "Bearer " + self.token
resp = method(url=api_url, headers=headers)
self.last_status_code = int(resp.status_code)
if self.last_status_code < 300:
success = True
self.aggregate_centers(
centers=json.loads(resp.content.decode()))
else:
Logger.log("(District Cal API)", resp.status_code,
resp.content.decode())
return success
def seek(self):
return self.__perform(self.__perform_seek)
def seek_area(self, and_then=None):
return self.__perform(self.__perform_seek_area, and_then)
def aggregate_centers(self, centers):
centers = centers if "centers" not in centers else centers["centers"]
for each_center in centers:
id = each_center["center_id"]
name = each_center["name"]
addr = each_center["address"]
block = each_center["block_name"]
pincode = each_center["pincode"]
lati = each_center["lat"]
longi = each_center["long"]
sessions = each_center["sessions"]
for each_session in sessions:
session_id = each_session["session_id"]
session_date = each_session["date"]
min_age_limit = each_session["min_age_limit"]
vax = each_session["vaccine"]
cap_avail = each_session["available_capacity"]
cap_avail_dose1 = each_session["available_capacity_dose1"]
cap_avail_dose2 = each_session["available_capacity_dose2"]
slots = each_session["slots"]
VaxFactory.add_center(
id, name, addr, block, pincode, lati, longi,
session_id, session_date, min_age_limit, vax,
cap_avail, cap_avail_dose1, cap_avail_dose2, slots
) | modules/Appointment.py | import time
import json
from modules.DateGenerator import DateGenerator
from modules.Config import APILoader, Network
from modules.VaxFactory import VaxFactory
from modules.Device import UserAgents
from modules.Logger import Logger
class AppointmentBooking:
def __init__(self, data):
self.center_id = data["center_id"]
self.name = data["name"]
self.address = data["address"]
self.state_name = data["state_name"]
self.district_name = data["district_name"]
self.block_name = data["block_name"]
self.pincode = data["pincode"]
self.dose = data["dose"]
self.appointment_id = data["appointment_id"]
self.session_id = data["session_id"]
self.date = data["date"]
self.slot = data["slot"]
class Appointment:
BANGALORE_URBAN = "265"
BBMP = "294"
def __init__(self, seek=3, pin_codes=["560029"],
freq_s=60, mode_cron=True, token='',
district_codes=["265"]
) -> None:
self.attempts = 0
self.days_seek = seek
self.pin_codes = pin_codes
self.freq_s = freq_s
self.mode_cron = True if mode_cron else False
self.token = token
self.district_codes = district_codes
self.operation_window_start_hr = 0
self.operation_window_start_min = 0
self.operation_window_end_hr = 23
self.operation_window_end_min = 58
self.last_status_code = 101
def __cycle(self):
if self.mode_cron:
self.attempts += 1
def operation_window(self, start_hr=0, start_min=0, end_hr=23, end_min=58):
self.operation_window_start_hr = start_hr if start_hr >= 0 else 0
self.operation_window_end_hr = end_hr if end_hr >= 0 else 23
self.operation_window_start_min = start_min if start_min >= 0 else 0
self.operation_window_end_min = end_min if end_min >= 0 else 58
def can_continue(self):
return (self.mode_cron and self.attempts < 1) or not self.mode_cron
def reset(self):
self.attempts = 0
def __perform(self, perform, and_then=None):
self.reset()
DateGenerator.seed()
while self.can_continue():
perform()
if and_then is not None:
and_then()
Logger.log("Sleeping for", self.freq_s)
time.sleep(self.freq_s)
self.__cycle()
def __perform_seek(self):
base_api_url, method = APILoader.appointment_by_pin()
success = False
for i in range(0, self.days_seek):
date_to_check = DateGenerator.format_date_from_days(i)
pincode = self.pin_codes[0]
headers = Network.headers_json()
headers['User-Agent'] = UserAgents.android()
headers["Authorization"] = "Bearer " + self.token
api_url = base_api_url + "?pincode=" + pincode + "&date=" + date_to_check
resp = method(
url=api_url,
headers=headers
)
self.last_status_code = int(resp.status_code)
status = resp.status_code
if resp:
if int(status) < 300:
success = True
data = resp.json()
Logger.log("(Pin API)", date_to_check,
"[", status, "]", pincode, "::", data)
else:
Logger.log("(Pin API)", date_to_check,
"[", status, "]", pincode, " X Failed", resp.content.decode())
time.sleep(self.freq_s)
return success
def __perform_seek_area(self):
base_api_url, method = APILoader.appointment_by_district()
success = False
for districts in self.district_codes:
api_url = base_api_url + "?district_id=" + \
str(districts) + "&date=" + \
DateGenerator.format_date_from_days(0)
headers = Network.headers_json()
headers['User-Agent'] = UserAgents.android()
headers['Authorization'] = "Bearer " + self.token
resp = method(url=api_url, headers=headers)
self.last_status_code = int(resp.status_code)
if self.last_status_code < 300:
success = True
self.aggregate_centers(
centers=json.loads(resp.content.decode()))
else:
Logger.log("(District Cal API)", resp.status_code,
resp.content.decode())
return success
def seek(self):
return self.__perform(self.__perform_seek)
def seek_area(self, and_then=None):
return self.__perform(self.__perform_seek_area, and_then)
def aggregate_centers(self, centers):
centers = centers if "centers" not in centers else centers["centers"]
for each_center in centers:
id = each_center["center_id"]
name = each_center["name"]
addr = each_center["address"]
block = each_center["block_name"]
pincode = each_center["pincode"]
lati = each_center["lat"]
longi = each_center["long"]
sessions = each_center["sessions"]
for each_session in sessions:
session_id = each_session["session_id"]
session_date = each_session["date"]
min_age_limit = each_session["min_age_limit"]
vax = each_session["vaccine"]
cap_avail = each_session["available_capacity"]
cap_avail_dose1 = each_session["available_capacity_dose1"]
cap_avail_dose2 = each_session["available_capacity_dose2"]
slots = each_session["slots"]
VaxFactory.add_center(
id, name, addr, block, pincode, lati, longi,
session_id, session_date, min_age_limit, vax,
cap_avail, cap_avail_dose1, cap_avail_dose2, slots
) | 0.251188 | 0.199444 |
import os
from django.core.mail import send_mail
from django.template.loader import render_to_string
def feedback_mail(message, user):
plain = message + "\n\nPosted by %s, %s" % (user.fname, user.email)
subject = "FRCShirt Feedback"
print(os.getenv("ADMIN_EMAIL"))
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [os.getenv("ADMIN_EMAIL")])
def pass_reset_mail(user, key):
plain = "Hey %s!\n\nTo reset your password, click on the following link: " \
"https://frcshirt.trade/login/forgot/?key=%s" % (
user.fname, key
)
subject = "FRCShirt: Reset Password"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [user.email])
# TODO: Fix html message not displaying in gmail
def trade_mail(trade):
ctx = {'trade': trade}
html = render_to_string('trade/email/trade_notification.html', ctx)
plain = "Hey %s, you have a trade offer!\n\n%s offered you their %s for your %s. Go to https://frcshirt.trade/, " \
"log in, and select 'My Items' to view or accept the offer." % (
trade.take.owner.fname, trade.give.owner, trade.give, trade.take
)
subject = "Trade offer for your %s" % trade.take
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.take.owner.email])
def trade_accepted_mail(trade):
ctx = {'trade': trade}
# TODO: Find meetup and insert into ctx
html = render_to_string('trade/email/trade_accepted.html', ctx)
plain = "Hey %s!\n\n%s accepted your trade offer! They want to trade your %s for their %s. You can get in touch " \
"with them at %s to coordinate the trade. " % (
trade.give.owner.fname, trade.take.owner, trade.give, trade.take, trade.take.owner.email
)
subject = "Trade offer accepted!"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.give.owner.email])
def trade_cancelled_by_giver(trade):
ctx = {'trade': trade}
html = None
plain = "Hey %s,\n\n%s just marked the trade of your %s for their %s as cancelled. Reach out to them if you think " \
"this is an error. If you'd like to relist your %s, go to frcshirt.trade, click My Items and click " \
"relist on its item page" % (
trade.take.owner.fname, trade.give.owner, trade.take, trade.give, trade.take
)
subject = "Trade offer cancelled"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.take.owner.email])
def trade_cancelled_by_taker(trade):
ctx = {'trade': trade}
html = None
plain = "Hey %s,\n\n%s just marked the trade of your %s for their %s as cancelled. Reach out to them if you think " \
"this is an error. If you'd like to relist your %s, go to frcshirt.trade, click My Items and click " \
"relist on its item page" % (
trade.give.owner.fname, trade.take.owner, trade.give, trade.take, trade.give
)
subject = "Trade offer cancelled"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.give.owner.email]) | trade/email.py | import os
from django.core.mail import send_mail
from django.template.loader import render_to_string
def feedback_mail(message, user):
plain = message + "\n\nPosted by %s, %s" % (user.fname, user.email)
subject = "FRCShirt Feedback"
print(os.getenv("ADMIN_EMAIL"))
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [os.getenv("ADMIN_EMAIL")])
def pass_reset_mail(user, key):
plain = "Hey %s!\n\nTo reset your password, click on the following link: " \
"https://frcshirt.trade/login/forgot/?key=%s" % (
user.fname, key
)
subject = "FRCShirt: Reset Password"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [user.email])
# TODO: Fix html message not displaying in gmail
def trade_mail(trade):
ctx = {'trade': trade}
html = render_to_string('trade/email/trade_notification.html', ctx)
plain = "Hey %s, you have a trade offer!\n\n%s offered you their %s for your %s. Go to https://frcshirt.trade/, " \
"log in, and select 'My Items' to view or accept the offer." % (
trade.take.owner.fname, trade.give.owner, trade.give, trade.take
)
subject = "Trade offer for your %s" % trade.take
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.take.owner.email])
def trade_accepted_mail(trade):
ctx = {'trade': trade}
# TODO: Find meetup and insert into ctx
html = render_to_string('trade/email/trade_accepted.html', ctx)
plain = "Hey %s!\n\n%s accepted your trade offer! They want to trade your %s for their %s. You can get in touch " \
"with them at %s to coordinate the trade. " % (
trade.give.owner.fname, trade.take.owner, trade.give, trade.take, trade.take.owner.email
)
subject = "Trade offer accepted!"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.give.owner.email])
def trade_cancelled_by_giver(trade):
ctx = {'trade': trade}
html = None
plain = "Hey %s,\n\n%s just marked the trade of your %s for their %s as cancelled. Reach out to them if you think " \
"this is an error. If you'd like to relist your %s, go to frcshirt.trade, click My Items and click " \
"relist on its item page" % (
trade.take.owner.fname, trade.give.owner, trade.take, trade.give, trade.take
)
subject = "Trade offer cancelled"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.take.owner.email])
def trade_cancelled_by_taker(trade):
ctx = {'trade': trade}
html = None
plain = "Hey %s,\n\n%s just marked the trade of your %s for their %s as cancelled. Reach out to them if you think " \
"this is an error. If you'd like to relist your %s, go to frcshirt.trade, click My Items and click " \
"relist on its item page" % (
trade.give.owner.fname, trade.take.owner, trade.give, trade.take, trade.give
)
subject = "Trade offer cancelled"
send_mail(subject, plain, "FRCShirt<<EMAIL>>", [trade.give.owner.email]) | 0.131703 | 0.125226 |
import collections
import warnings
import torch
from reid_evaluation.metric import evaluate, compute_distances
from utils import MetricTracker, SharedStorage
class ActiveMetric:
"""Metric class that actively interacts with MetricTracker and SharedStorage to track metrics,
during end-of-step and end-of-epoch callbacks.
"""
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
"""
:param items: output from data loader and model during a single step
:param tracker: metric tracker to write metrics to
:param storage: storage to interact with. Note that writing data to storage should be handled by
the trainer. A common use-case would be to write to the metadata dictionary.
:return:
"""
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
pass
def _accuracy(output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def _top_k_acc(output, target, k=3):
warnings.warn("This metric isn't adapted to the current project. You'll probably get an error")
with torch.no_grad():
pred = torch.topk(output, k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == target).item()
return correct / len(target)
class Accuracy(ActiveMetric):
def __init__(self, output_key="preds", target_key="targets", name="accuracy"):
if type(name) is not str:
raise Exception("name must be a valid string")
self.__name__ = str(name)
self.output_key = output_key
self.target_key = target_key
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
output, target = items[self.output_key], items[self.target_key]
value = _accuracy(output, target)
tracker.update("accuracy", value, n=output.size(0))
class ReidMetric(ActiveMetric):
def __init__(self):
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
distmat = storage.get_data("distmat")
if distmat is None:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
all_cmc, all_AP, all_INP = evaluate(distmat, qpids, gpids, qcamids, gcamids)
r1 = all_cmc[0].item()
mAP = all_AP.mean().item()
mINP = all_INP.mean().item()
tracker.update("r1", r1)
tracker.update("mAP", mAP)
tracker.update("mINP", mINP)
class ReidGlobalDistanceHistogram(ActiveMetric):
def __init__(self, train=False):
self.train = train
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
if self.train:
qf = gf = storage.get_data("features")
qpids = gpids = storage.get_data("pids")
qcamids = gcamids = storage.get_data("camids")
prefix = ""
else:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
prefix = "valid_"
distmat = storage.get_data("distmat")
if distmat is None:
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
same_pid = gpids.eq(qpids.reshape(-1, 1))
same_cam = gcamids.eq(qcamids.reshape(-1, 1))
negative: torch.Tensor = ~same_pid
positive: torch.Tensor = same_pid
positive_same_cam = torch.logical_and(same_pid, same_cam)
positive_diff_cam = torch.logical_and(same_pid, ~same_cam)
if self.train:
# filter out identical instances from positive distances
same_image = torch.diagflat(torch.ones(qf.size(0), dtype=torch.bool, device=qf.device))
positive.logical_and_(~same_image)
positive_same_cam.logical_and_(~same_image)
tracker.update(prefix + "global_dist_pos_same_cam_mean", distmat[positive_same_cam].mean().item())
tracker.update(prefix + "global_dist_pos_diff_cam_mean", distmat[positive_diff_cam].mean().item())
tracker.update(prefix + "global_dist_pos_mean", distmat[positive].mean().item())
tracker.update(prefix + "global_dist_neg_mean", distmat[negative].mean().item())
tracker.append_histogram(prefix + "global_dist_pos_same_cam", distmat[positive_same_cam])
tracker.append_histogram(prefix + "global_dist_pos_diff_cam", distmat[positive_diff_cam])
tracker.append_histogram(prefix + "global_dist_pos", distmat[positive])
tracker.append_histogram(prefix + "global_dist_neg", distmat[negative]) | model/metric.py | import collections
import warnings
import torch
from reid_evaluation.metric import evaluate, compute_distances
from utils import MetricTracker, SharedStorage
class ActiveMetric:
"""Metric class that actively interacts with MetricTracker and SharedStorage to track metrics,
during end-of-step and end-of-epoch callbacks.
"""
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
"""
:param items: output from data loader and model during a single step
:param tracker: metric tracker to write metrics to
:param storage: storage to interact with. Note that writing data to storage should be handled by
the trainer. A common use-case would be to write to the metadata dictionary.
:return:
"""
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
pass
def _accuracy(output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def _top_k_acc(output, target, k=3):
warnings.warn("This metric isn't adapted to the current project. You'll probably get an error")
with torch.no_grad():
pred = torch.topk(output, k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == target).item()
return correct / len(target)
class Accuracy(ActiveMetric):
def __init__(self, output_key="preds", target_key="targets", name="accuracy"):
if type(name) is not str:
raise Exception("name must be a valid string")
self.__name__ = str(name)
self.output_key = output_key
self.target_key = target_key
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
output, target = items[self.output_key], items[self.target_key]
value = _accuracy(output, target)
tracker.update("accuracy", value, n=output.size(0))
class ReidMetric(ActiveMetric):
def __init__(self):
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
distmat = storage.get_data("distmat")
if distmat is None:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
all_cmc, all_AP, all_INP = evaluate(distmat, qpids, gpids, qcamids, gcamids)
r1 = all_cmc[0].item()
mAP = all_AP.mean().item()
mINP = all_INP.mean().item()
tracker.update("r1", r1)
tracker.update("mAP", mAP)
tracker.update("mINP", mINP)
class ReidGlobalDistanceHistogram(ActiveMetric):
def __init__(self, train=False):
self.train = train
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
if self.train:
qf = gf = storage.get_data("features")
qpids = gpids = storage.get_data("pids")
qcamids = gcamids = storage.get_data("camids")
prefix = ""
else:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
prefix = "valid_"
distmat = storage.get_data("distmat")
if distmat is None:
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
same_pid = gpids.eq(qpids.reshape(-1, 1))
same_cam = gcamids.eq(qcamids.reshape(-1, 1))
negative: torch.Tensor = ~same_pid
positive: torch.Tensor = same_pid
positive_same_cam = torch.logical_and(same_pid, same_cam)
positive_diff_cam = torch.logical_and(same_pid, ~same_cam)
if self.train:
# filter out identical instances from positive distances
same_image = torch.diagflat(torch.ones(qf.size(0), dtype=torch.bool, device=qf.device))
positive.logical_and_(~same_image)
positive_same_cam.logical_and_(~same_image)
tracker.update(prefix + "global_dist_pos_same_cam_mean", distmat[positive_same_cam].mean().item())
tracker.update(prefix + "global_dist_pos_diff_cam_mean", distmat[positive_diff_cam].mean().item())
tracker.update(prefix + "global_dist_pos_mean", distmat[positive].mean().item())
tracker.update(prefix + "global_dist_neg_mean", distmat[negative].mean().item())
tracker.append_histogram(prefix + "global_dist_pos_same_cam", distmat[positive_same_cam])
tracker.append_histogram(prefix + "global_dist_pos_diff_cam", distmat[positive_diff_cam])
tracker.append_histogram(prefix + "global_dist_pos", distmat[positive])
tracker.append_histogram(prefix + "global_dist_neg", distmat[negative]) | 0.816333 | 0.58166 |
from mvsnet import preprocess as pp
import imageio
import argparse
import json
import utils
import os
"""
Converts DTU depth data from the format that is consumed by the original MVSNet
to the mvs-training format that is created by export_densify_frames
"""
def convert_dtu(dtu_dir, output_dir):
camera_base = os.path.join(dtu_dir, 'Cameras')
camera_dir = os.path.join(dtu_dir, 'Cameras')
depths_base = os.path.join(dtu_dir, 'Depths')
images_base = os.path.join(dtu_dir, 'Rectified')
pair_path = os.path.join(camera_base, 'pair.txt')
num_scans = len((utils.list_no_hidden(images_base)))
print("Number of scans = ", num_scans)
for index, scan in enumerate(sorted(utils.list_no_hidden(images_base))):
if index > 43:
print("Processing scan", index)
# For each dtu scan session there are 7 different lighting settings
for l in range(7):
session_dir = os.path.join(
output_dir, 'dtu_scan_{}_lighting_{}'.format(index, l))
os.makedirs(session_dir)
session_images = os.path.join(session_dir, 'images')
session_depths = os.path.join(session_dir, 'depths')
session_cams = os.path.join(session_dir, 'cameras')
os.makedirs(session_images)
os.makedirs(session_depths)
os.makedirs(session_cams)
covis_path = os.path.join(session_dir, 'covisibility.json')
depths_dir = os.path.join(depths_base, scan)
images_dir = os.path.join(images_base, scan)
utils.pair_to_covisibility(pair_path, covis_path)
for i in range(49):
txt_path = os.path.join(camera_dir, utils.cam_name(i))
json_path = os.path.join(session_cams, '{}.json'.format(i))
# cams need to be rescaled due to image resizing
rescale = 512.0 / 1200.0
utils.cam_to_json(txt_path, json_path,
scale_factor=rescale)
for j in range(49):
png_path = os.path.join(session_depths, '{}.png'.format(j))
pfm_path = os.path.join(depths_dir, utils.depth_name(j))
utils.depth_pfm_to_png(pfm_path, png_path)
image_path = os.path.join(
images_dir, utils.image_name(j, l))
final_image_path = os.path.join(
session_images, '{}.jpg'.format(j))
img = imageio.imread(image_path)
imageio.imwrite(final_image_path, img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dtu_dir', type=str,
help="Diretory where dtu data is")
parser.add_argument('output_dir', type=str,
help="Directory to output the converted data")
args = parser.parse_args()
convert_dtu(args.dtu_dir, args.output_dir) | datasets/convert/dtu_to_mvs_training.py | from mvsnet import preprocess as pp
import imageio
import argparse
import json
import utils
import os
"""
Converts DTU depth data from the format that is consumed by the original MVSNet
to the mvs-training format that is created by export_densify_frames
"""
def convert_dtu(dtu_dir, output_dir):
camera_base = os.path.join(dtu_dir, 'Cameras')
camera_dir = os.path.join(dtu_dir, 'Cameras')
depths_base = os.path.join(dtu_dir, 'Depths')
images_base = os.path.join(dtu_dir, 'Rectified')
pair_path = os.path.join(camera_base, 'pair.txt')
num_scans = len((utils.list_no_hidden(images_base)))
print("Number of scans = ", num_scans)
for index, scan in enumerate(sorted(utils.list_no_hidden(images_base))):
if index > 43:
print("Processing scan", index)
# For each dtu scan session there are 7 different lighting settings
for l in range(7):
session_dir = os.path.join(
output_dir, 'dtu_scan_{}_lighting_{}'.format(index, l))
os.makedirs(session_dir)
session_images = os.path.join(session_dir, 'images')
session_depths = os.path.join(session_dir, 'depths')
session_cams = os.path.join(session_dir, 'cameras')
os.makedirs(session_images)
os.makedirs(session_depths)
os.makedirs(session_cams)
covis_path = os.path.join(session_dir, 'covisibility.json')
depths_dir = os.path.join(depths_base, scan)
images_dir = os.path.join(images_base, scan)
utils.pair_to_covisibility(pair_path, covis_path)
for i in range(49):
txt_path = os.path.join(camera_dir, utils.cam_name(i))
json_path = os.path.join(session_cams, '{}.json'.format(i))
# cams need to be rescaled due to image resizing
rescale = 512.0 / 1200.0
utils.cam_to_json(txt_path, json_path,
scale_factor=rescale)
for j in range(49):
png_path = os.path.join(session_depths, '{}.png'.format(j))
pfm_path = os.path.join(depths_dir, utils.depth_name(j))
utils.depth_pfm_to_png(pfm_path, png_path)
image_path = os.path.join(
images_dir, utils.image_name(j, l))
final_image_path = os.path.join(
session_images, '{}.jpg'.format(j))
img = imageio.imread(image_path)
imageio.imwrite(final_image_path, img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dtu_dir', type=str,
help="Diretory where dtu data is")
parser.add_argument('output_dir', type=str,
help="Directory to output the converted data")
args = parser.parse_args()
convert_dtu(args.dtu_dir, args.output_dir) | 0.378 | 0.23865 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.patches as patches
from LucasKanadeBasis import *
from LucasKanade import *
from TemplateCorrection import *
import time
def copyRect(rect):
rect_new = []
for ele in rect:
rect_new += [ele]
return rect_new
# write your script here, we recommend the above libraries for making your animation
bases = np.load('../data/sylvbases.npy')
frames = np.load('../data/sylvseq.npy')
seq_len = frames.shape[2]
frame0 = frames[:,:,0]
rect = [101, 61, 155, 107]
rect_baseline = [101, 61, 155, 107]
width = rect[3] - rect[1]
length = rect[2] - rect[0]
rectList = [copyRect(frame0)]
rectList_baseline = [copyRect(frame0)]
time_total = 0
# since template driftingb uses only the first ever frame
# lots of things can be pre-computed here
rows_img, cols_img = frame0.shape
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
rows_rect, cols_rect = x2 - x1, y2 - y1
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
c = np.linspace(x1, x2, cols_rect)
r = np.linspace(y1, y2, rows_rect)
cc, rr = np.meshgrid(c, r)
spline = RectBivariateSpline(y, x, frame0)
T = spline.ev(rr, cc)
#Apply LucasKanadeWithTemplateCorrection Algorithm
for i in range(seq_len):
if i == 0:
continue
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p_baseline = LucasKanade(It, It1, rect_baseline)
rect_baseline[0] += p_baseline[0]
rect_baseline[1] += p_baseline[1]
rect_baseline[2] += p_baseline[0]
rect_baseline[3] += p_baseline[1]
TemplateCorrection(T, It1, rect_baseline)
rectList_baseline.append(copyRect(rect_baseline))
#Apply LucasKanadeBasis Algorithm
for i in range(seq_len):
if i == 0:
continue
print("Processing frame %d" % i)
start = time.time()
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p = LucasKanadeBasis(It, It1, rect, bases)
rect[0] += p[0]
rect[1] += p[1]
rect[2] += p[0]
rect[3] += p[1]
end = time.time()
time_total += end - start
rectList.append(copyRect(rect))
if i == 1 or i == 100 or i == 200 or i == 300 or i == 350 or i == 400:
plt.figure()
plt.imshow(frames[:,:,i],cmap='gray')
bbox1 = patches.Rectangle((int(rectList[i][0]), int(rectList[i][1])), length, width,
fill=False, edgecolor='blue', linewidth=2)
plt.gca().add_patch(bbox1)
bbox0 = patches.Rectangle((int(rectList_baseline[i][0]), int(rectList_baseline[i][1])), length, width,
fill=False, edgecolor='red', linewidth=2)
plt.gca().add_patch(bbox0)
plt.title('frame %d' % i)
plt.show()
np.save('Sylvseqrects.npy',rectList)
print('Finished, the tracking frequency is %.4f' % (seq_len / time_total)) | src/testSylvSequence.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.patches as patches
from LucasKanadeBasis import *
from LucasKanade import *
from TemplateCorrection import *
import time
def copyRect(rect):
rect_new = []
for ele in rect:
rect_new += [ele]
return rect_new
# write your script here, we recommend the above libraries for making your animation
bases = np.load('../data/sylvbases.npy')
frames = np.load('../data/sylvseq.npy')
seq_len = frames.shape[2]
frame0 = frames[:,:,0]
rect = [101, 61, 155, 107]
rect_baseline = [101, 61, 155, 107]
width = rect[3] - rect[1]
length = rect[2] - rect[0]
rectList = [copyRect(frame0)]
rectList_baseline = [copyRect(frame0)]
time_total = 0
# since template driftingb uses only the first ever frame
# lots of things can be pre-computed here
rows_img, cols_img = frame0.shape
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
rows_rect, cols_rect = x2 - x1, y2 - y1
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
c = np.linspace(x1, x2, cols_rect)
r = np.linspace(y1, y2, rows_rect)
cc, rr = np.meshgrid(c, r)
spline = RectBivariateSpline(y, x, frame0)
T = spline.ev(rr, cc)
#Apply LucasKanadeWithTemplateCorrection Algorithm
for i in range(seq_len):
if i == 0:
continue
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p_baseline = LucasKanade(It, It1, rect_baseline)
rect_baseline[0] += p_baseline[0]
rect_baseline[1] += p_baseline[1]
rect_baseline[2] += p_baseline[0]
rect_baseline[3] += p_baseline[1]
TemplateCorrection(T, It1, rect_baseline)
rectList_baseline.append(copyRect(rect_baseline))
#Apply LucasKanadeBasis Algorithm
for i in range(seq_len):
if i == 0:
continue
print("Processing frame %d" % i)
start = time.time()
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p = LucasKanadeBasis(It, It1, rect, bases)
rect[0] += p[0]
rect[1] += p[1]
rect[2] += p[0]
rect[3] += p[1]
end = time.time()
time_total += end - start
rectList.append(copyRect(rect))
if i == 1 or i == 100 or i == 200 or i == 300 or i == 350 or i == 400:
plt.figure()
plt.imshow(frames[:,:,i],cmap='gray')
bbox1 = patches.Rectangle((int(rectList[i][0]), int(rectList[i][1])), length, width,
fill=False, edgecolor='blue', linewidth=2)
plt.gca().add_patch(bbox1)
bbox0 = patches.Rectangle((int(rectList_baseline[i][0]), int(rectList_baseline[i][1])), length, width,
fill=False, edgecolor='red', linewidth=2)
plt.gca().add_patch(bbox0)
plt.title('frame %d' % i)
plt.show()
np.save('Sylvseqrects.npy',rectList)
print('Finished, the tracking frequency is %.4f' % (seq_len / time_total)) | 0.307462 | 0.514278 |
from monitor_server import SERVER
if SERVER.config['SQLALCHEMY_DATABASE_URI'].startswith('sqlite:'):
from sqlalchemy.dialects.sqlite.json import JSON
else:
from sqlalchemy.dialects.postgresql.json import JSON
class MetricModel(SERVER.DB.Model):
__tablename__ = 'TEST_METRICS'
test_id = SERVER.DB.Column('ITEM_PK', SERVER.DB.Integer, primary_key=True)
session_h = SERVER.DB.Column('SESSION_H', SERVER.DB.ForeignKey('TEST_SESSIONS.SESSION_H'))
ctx_h = SERVER.DB.Column('CONTEXT_H', SERVER.DB.ForeignKey('EXECUTION_CONTEXTS.ENV_H'))
item_start_time = SERVER.DB.Column('ITEM_START_TIME', SERVER.DB.String(64), nullable=False)
item_path = SERVER.DB.Column('ITEM_PATH', SERVER.DB.String(4096), nullable=False)
item = SERVER.DB.Column('ITEM', SERVER.DB.String(2048), nullable=False)
item_variant = SERVER.DB.Column('ITEM_VARIANT', SERVER.DB.String(2048), nullable=False)
item_fs_loc = SERVER.DB.Column('ITEM_FS_LOC', SERVER.DB.String(2048), nullable=False)
kind = SERVER.DB.Column('KIND', SERVER.DB.String(64), nullable=False)
component = SERVER.DB.Column('COMPONENT', SERVER.DB.String(512), nullable=True)
wall_time = SERVER.DB.Column('TOTAL_TIME', SERVER.DB.Float, nullable=False)
user_time = SERVER.DB.Column('USER_TIME', SERVER.DB.Float, nullable=False)
krnl_time = SERVER.DB.Column('KERNEL_TIME', SERVER.DB.Float, nullable=False)
cpu_usage = SERVER.DB.Column('CPU_USAGE', SERVER.DB.Float, nullable=False)
mem_usage = SERVER.DB.Column('MEM_USAGE', SERVER.DB.Float, nullable=False)
class ExecutionContextModel(SERVER.DB.Model):
__tablename__ = 'EXECUTION_CONTEXTS'
h = SERVER.DB.Column('ENV_H', SERVER.DB.String(64), primary_key=True, nullable=False)
cpu_count = SERVER.DB.Column('CPU_COUNT', SERVER.DB.Integer, nullable=False)
cpu_freq = SERVER.DB.Column('CPU_FREQUENCY_MHZ', SERVER.DB.Integer, nullable=False)
cpu_type = SERVER.DB.Column('CPU_TYPE', SERVER.DB.String(64), nullable=False)
cpu_vendor = SERVER.DB.Column('CPU_VENDOR', SERVER.DB.String(256), nullable=True)
ram_total = SERVER.DB.Column('RAM_TOTAL_MB', SERVER.DB.Integer, nullable=False)
mac_node = SERVER.DB.Column('MACHINE_NODE', SERVER.DB.String(512), nullable=False)
mac_type = SERVER.DB.Column('MACHINE_TYPE', SERVER.DB.String(32), nullable=False)
mac_arch = SERVER.DB.Column('MACHINE_ARCH', SERVER.DB.String(16), nullable=False)
sys_info = SERVER.DB.Column('SYSTEM_INFO', SERVER.DB.String(256), nullable=False)
py_info = SERVER.DB.Column('PYTHON_INFO', SERVER.DB.String(512), nullable=False)
ctx_h_rel = SERVER.DB.relationship('MetricModel', backref='exec_ctx', lazy=True)
class SessionModel(SERVER.DB.Model):
__tablename__ = 'TEST_SESSIONS'
h = SERVER.DB.Column('SESSION_H', SERVER.DB.String(64), primary_key=True, nullable=False)
run_date = SERVER.DB.Column('RUN_DATE', SERVER.DB.String(64), nullable=False)
scm_ref = SERVER.DB.Column('SCM_REF', SERVER.DB.String(128), nullable=True)
description = SERVER.DB.Column('DESCRIPTION', SERVER.DB.JSON(), nullable=True)
session_h_rel = SERVER.DB.relationship('MetricModel', backref='sessions_ctx', lazy=True) | monitor_server/data/model.py |
from monitor_server import SERVER
if SERVER.config['SQLALCHEMY_DATABASE_URI'].startswith('sqlite:'):
from sqlalchemy.dialects.sqlite.json import JSON
else:
from sqlalchemy.dialects.postgresql.json import JSON
class MetricModel(SERVER.DB.Model):
__tablename__ = 'TEST_METRICS'
test_id = SERVER.DB.Column('ITEM_PK', SERVER.DB.Integer, primary_key=True)
session_h = SERVER.DB.Column('SESSION_H', SERVER.DB.ForeignKey('TEST_SESSIONS.SESSION_H'))
ctx_h = SERVER.DB.Column('CONTEXT_H', SERVER.DB.ForeignKey('EXECUTION_CONTEXTS.ENV_H'))
item_start_time = SERVER.DB.Column('ITEM_START_TIME', SERVER.DB.String(64), nullable=False)
item_path = SERVER.DB.Column('ITEM_PATH', SERVER.DB.String(4096), nullable=False)
item = SERVER.DB.Column('ITEM', SERVER.DB.String(2048), nullable=False)
item_variant = SERVER.DB.Column('ITEM_VARIANT', SERVER.DB.String(2048), nullable=False)
item_fs_loc = SERVER.DB.Column('ITEM_FS_LOC', SERVER.DB.String(2048), nullable=False)
kind = SERVER.DB.Column('KIND', SERVER.DB.String(64), nullable=False)
component = SERVER.DB.Column('COMPONENT', SERVER.DB.String(512), nullable=True)
wall_time = SERVER.DB.Column('TOTAL_TIME', SERVER.DB.Float, nullable=False)
user_time = SERVER.DB.Column('USER_TIME', SERVER.DB.Float, nullable=False)
krnl_time = SERVER.DB.Column('KERNEL_TIME', SERVER.DB.Float, nullable=False)
cpu_usage = SERVER.DB.Column('CPU_USAGE', SERVER.DB.Float, nullable=False)
mem_usage = SERVER.DB.Column('MEM_USAGE', SERVER.DB.Float, nullable=False)
class ExecutionContextModel(SERVER.DB.Model):
__tablename__ = 'EXECUTION_CONTEXTS'
h = SERVER.DB.Column('ENV_H', SERVER.DB.String(64), primary_key=True, nullable=False)
cpu_count = SERVER.DB.Column('CPU_COUNT', SERVER.DB.Integer, nullable=False)
cpu_freq = SERVER.DB.Column('CPU_FREQUENCY_MHZ', SERVER.DB.Integer, nullable=False)
cpu_type = SERVER.DB.Column('CPU_TYPE', SERVER.DB.String(64), nullable=False)
cpu_vendor = SERVER.DB.Column('CPU_VENDOR', SERVER.DB.String(256), nullable=True)
ram_total = SERVER.DB.Column('RAM_TOTAL_MB', SERVER.DB.Integer, nullable=False)
mac_node = SERVER.DB.Column('MACHINE_NODE', SERVER.DB.String(512), nullable=False)
mac_type = SERVER.DB.Column('MACHINE_TYPE', SERVER.DB.String(32), nullable=False)
mac_arch = SERVER.DB.Column('MACHINE_ARCH', SERVER.DB.String(16), nullable=False)
sys_info = SERVER.DB.Column('SYSTEM_INFO', SERVER.DB.String(256), nullable=False)
py_info = SERVER.DB.Column('PYTHON_INFO', SERVER.DB.String(512), nullable=False)
ctx_h_rel = SERVER.DB.relationship('MetricModel', backref='exec_ctx', lazy=True)
class SessionModel(SERVER.DB.Model):
__tablename__ = 'TEST_SESSIONS'
h = SERVER.DB.Column('SESSION_H', SERVER.DB.String(64), primary_key=True, nullable=False)
run_date = SERVER.DB.Column('RUN_DATE', SERVER.DB.String(64), nullable=False)
scm_ref = SERVER.DB.Column('SCM_REF', SERVER.DB.String(128), nullable=True)
description = SERVER.DB.Column('DESCRIPTION', SERVER.DB.JSON(), nullable=True)
session_h_rel = SERVER.DB.relationship('MetricModel', backref='sessions_ctx', lazy=True) | 0.304972 | 0.057388 |
import tempfile
import os
from sqlite3 import OperationalError
import pytest
import hypothesis.strategies as hst
from hypothesis import given
import unicodedata
import qcodes as qc
import qcodes.dataset.sqlite_base as mut # mut: module under test
from qcodes.dataset.database import initialise_database
from qcodes.dataset.param_spec import ParamSpec
_unicode_categories = ('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nd', 'Pc', 'Pd', 'Zs')
@pytest.fixture(scope="function")
def empty_temp_db():
# create a temp database for testing
with tempfile.TemporaryDirectory() as tmpdirname:
qc.config["core"]["db_location"] = os.path.join(tmpdirname, 'temp.db')
qc.config["core"]["db_debug"] = True
initialise_database()
yield
@pytest.fixture(scope='function')
def experiment(empty_temp_db):
e = qc.new_experiment("test-experiment", sample_name="test-sample")
yield e
e.conn.close()
def test_one_raises(experiment):
conn = experiment.conn
with pytest.raises(RuntimeError):
mut.one(conn.cursor(), column='Something_you_dont_have')
def test_atomic_transaction_raises(experiment):
conn = experiment.conn
bad_sql = '""'
with pytest.raises(OperationalError):
mut.atomic_transaction(conn, bad_sql)
def test_atomic_raises(experiment):
conn = experiment.conn
bad_sql = '""'
# it seems that the type of error raised differs between python versions
# 3.6.0 (OperationalError) and 3.6.3 (RuntimeError)
# -strange, huh?
with pytest.raises((OperationalError, RuntimeError)):
with mut.atomic(conn):
mut.transaction(conn, bad_sql)
def test_insert_many_values_raises(experiment):
conn = experiment.conn
with pytest.raises(ValueError):
mut.insert_many_values(conn, 'some_string', ['column1'],
values=[[1], [1, 3]])
@given(table_name=hst.text(max_size=50))
def test__validate_table_raises(table_name):
should_raise = False
for char in table_name:
if unicodedata.category(char) not in _unicode_categories:
should_raise = True
break
if should_raise:
with pytest.raises(RuntimeError):
mut._validate_table_name(table_name)
else:
assert mut._validate_table_name(table_name)
def test_get_dependents(experiment):
x = ParamSpec('x', 'numeric')
t = ParamSpec('t', 'numeric')
y = ParamSpec('y', 'numeric', depends_on=['x', 't'])
# Make a dataset
(_, run_id, _) = mut.create_run(experiment.conn,
experiment.exp_id,
name='testrun',
parameters=[x, t, y])
deps = mut.get_dependents(experiment.conn, run_id)
layout_id = mut.get_layout_id(experiment.conn,
'y', run_id)
assert deps == [layout_id]
# more parameters, more complicated dependencies
x_raw = ParamSpec('x_raw', 'numeric')
x_cooked = ParamSpec('x_cooked', 'numeric', inferred_from=['x_raw'])
z = ParamSpec('z', 'numeric', depends_on=['x_cooked'])
(_, run_id, _) = mut.create_run(experiment.conn,
experiment.exp_id,
name='testrun',
parameters=[x, t, x_raw,
x_cooked, y, z])
deps = mut.get_dependents(experiment.conn, run_id)
expected_deps = [mut.get_layout_id(experiment.conn, 'y', run_id),
mut.get_layout_id(experiment.conn, 'z', run_id)]
assert deps == expected_deps | qcodes/tests/dataset/test_sqlite_base.py |
import tempfile
import os
from sqlite3 import OperationalError
import pytest
import hypothesis.strategies as hst
from hypothesis import given
import unicodedata
import qcodes as qc
import qcodes.dataset.sqlite_base as mut # mut: module under test
from qcodes.dataset.database import initialise_database
from qcodes.dataset.param_spec import ParamSpec
_unicode_categories = ('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nd', 'Pc', 'Pd', 'Zs')
@pytest.fixture(scope="function")
def empty_temp_db():
# create a temp database for testing
with tempfile.TemporaryDirectory() as tmpdirname:
qc.config["core"]["db_location"] = os.path.join(tmpdirname, 'temp.db')
qc.config["core"]["db_debug"] = True
initialise_database()
yield
@pytest.fixture(scope='function')
def experiment(empty_temp_db):
e = qc.new_experiment("test-experiment", sample_name="test-sample")
yield e
e.conn.close()
def test_one_raises(experiment):
conn = experiment.conn
with pytest.raises(RuntimeError):
mut.one(conn.cursor(), column='Something_you_dont_have')
def test_atomic_transaction_raises(experiment):
conn = experiment.conn
bad_sql = '""'
with pytest.raises(OperationalError):
mut.atomic_transaction(conn, bad_sql)
def test_atomic_raises(experiment):
conn = experiment.conn
bad_sql = '""'
# it seems that the type of error raised differs between python versions
# 3.6.0 (OperationalError) and 3.6.3 (RuntimeError)
# -strange, huh?
with pytest.raises((OperationalError, RuntimeError)):
with mut.atomic(conn):
mut.transaction(conn, bad_sql)
def test_insert_many_values_raises(experiment):
conn = experiment.conn
with pytest.raises(ValueError):
mut.insert_many_values(conn, 'some_string', ['column1'],
values=[[1], [1, 3]])
@given(table_name=hst.text(max_size=50))
def test__validate_table_raises(table_name):
should_raise = False
for char in table_name:
if unicodedata.category(char) not in _unicode_categories:
should_raise = True
break
if should_raise:
with pytest.raises(RuntimeError):
mut._validate_table_name(table_name)
else:
assert mut._validate_table_name(table_name)
def test_get_dependents(experiment):
x = ParamSpec('x', 'numeric')
t = ParamSpec('t', 'numeric')
y = ParamSpec('y', 'numeric', depends_on=['x', 't'])
# Make a dataset
(_, run_id, _) = mut.create_run(experiment.conn,
experiment.exp_id,
name='testrun',
parameters=[x, t, y])
deps = mut.get_dependents(experiment.conn, run_id)
layout_id = mut.get_layout_id(experiment.conn,
'y', run_id)
assert deps == [layout_id]
# more parameters, more complicated dependencies
x_raw = ParamSpec('x_raw', 'numeric')
x_cooked = ParamSpec('x_cooked', 'numeric', inferred_from=['x_raw'])
z = ParamSpec('z', 'numeric', depends_on=['x_cooked'])
(_, run_id, _) = mut.create_run(experiment.conn,
experiment.exp_id,
name='testrun',
parameters=[x, t, x_raw,
x_cooked, y, z])
deps = mut.get_dependents(experiment.conn, run_id)
expected_deps = [mut.get_layout_id(experiment.conn, 'y', run_id),
mut.get_layout_id(experiment.conn, 'z', run_id)]
assert deps == expected_deps | 0.384334 | 0.46794 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/friends/friend.proto',
package='pogoprotos.data.friends',
syntax='proto3',
serialized_pb=_b('\n$pogoprotos/data/friends/friend.proto\x12\x17pogoprotos.data.friends\"\x85\x01\n\x06\x46riend\x12\x11\n\tplayer_id\x18\x01 \x01(\t\x12\x10\n\x08\x63odename\x18\x02 \x01(\t\x12\x0c\n\x04team\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x05\x12\x14\n\x0c\x64\x61ta_with_me\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x03\x12\x12\n\ncreated_ms\x18\x07 \x01(\x03\x62\x06proto3')
)
_FRIEND = _descriptor.Descriptor(
name='Friend',
full_name='pogoprotos.data.friends.Friend',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='player_id', full_name='pogoprotos.data.friends.Friend.player_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='codename', full_name='pogoprotos.data.friends.Friend.codename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team', full_name='pogoprotos.data.friends.Friend.team', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='pogoprotos.data.friends.Friend.score', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_with_me', full_name='pogoprotos.data.friends.Friend.data_with_me', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='pogoprotos.data.friends.Friend.version', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_ms', full_name='pogoprotos.data.friends.Friend.created_ms', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=199,
)
DESCRIPTOR.message_types_by_name['Friend'] = _FRIEND
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Friend = _reflection.GeneratedProtocolMessageType('Friend', (_message.Message,), dict(
DESCRIPTOR = _FRIEND,
__module__ = 'pogoprotos.data.friends.friend_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.friends.Friend)
))
_sym_db.RegisterMessage(Friend)
# @@protoc_insertion_point(module_scope) | pgoapi/protos/pogoprotos/data/friends/friend_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/friends/friend.proto',
package='pogoprotos.data.friends',
syntax='proto3',
serialized_pb=_b('\n$pogoprotos/data/friends/friend.proto\x12\x17pogoprotos.data.friends\"\x85\x01\n\x06\x46riend\x12\x11\n\tplayer_id\x18\x01 \x01(\t\x12\x10\n\x08\x63odename\x18\x02 \x01(\t\x12\x0c\n\x04team\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x05\x12\x14\n\x0c\x64\x61ta_with_me\x18\x05 \x01(\x0c\x12\x0f\n\x07version\x18\x06 \x01(\x03\x12\x12\n\ncreated_ms\x18\x07 \x01(\x03\x62\x06proto3')
)
_FRIEND = _descriptor.Descriptor(
name='Friend',
full_name='pogoprotos.data.friends.Friend',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='player_id', full_name='pogoprotos.data.friends.Friend.player_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='codename', full_name='pogoprotos.data.friends.Friend.codename', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team', full_name='pogoprotos.data.friends.Friend.team', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='pogoprotos.data.friends.Friend.score', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_with_me', full_name='pogoprotos.data.friends.Friend.data_with_me', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='pogoprotos.data.friends.Friend.version', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_ms', full_name='pogoprotos.data.friends.Friend.created_ms', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=199,
)
DESCRIPTOR.message_types_by_name['Friend'] = _FRIEND
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Friend = _reflection.GeneratedProtocolMessageType('Friend', (_message.Message,), dict(
DESCRIPTOR = _FRIEND,
__module__ = 'pogoprotos.data.friends.friend_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.friends.Friend)
))
_sym_db.RegisterMessage(Friend)
# @@protoc_insertion_point(module_scope) | 0.16248 | 0.112162 |
import os
import time
import yaml
from pathlib import Path
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from gammapy.modeling.models import (
SkyModel,
ExpCutoffPowerLawSpectralModel,
PointSpatialModel,
)
from gammapy.spectrum import FluxPointsEstimator
from gammapy.modeling import Fit
from gammapy.data import DataStore
from gammapy.maps import MapAxis, WcsGeom
from gammapy.cube import MapDatasetMaker, MapDataset, SafeMaskMaker
N_OBS = int(os.environ.get("GAMMAPY_BENCH_N_OBS", 10))
def data_prep():
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
OBS_ID = 110380
obs_ids = OBS_ID * np.ones(N_OBS)
observations = data_store.get_observations(obs_ids)
energy_axis = MapAxis.from_bounds(
0.1, 10, nbin=10, unit="TeV", name="energy", interp="log"
)
geom = WcsGeom.create(
skydir=(0, 0),
binsz=0.02,
width=(10, 8),
coordsys="GAL",
proj="CAR",
axes=[energy_axis],
)
src_pos = SkyCoord(0, 0, unit="deg", frame="galactic")
offset_max = 4 * u.deg
maker = MapDatasetMaker(offset_max=offset_max)
safe_mask_maker = SafeMaskMaker(methods=["offset-max"], offset_max="4 deg")
stacked = MapDataset.create(geom=geom)
datasets = []
for obs in observations:
dataset = maker.run(stacked, obs)
dataset = safe_mask_maker.run(dataset, obs)
dataset.edisp = dataset.edisp.get_energy_dispersion(
position=src_pos, e_reco=energy_axis.edges
)
dataset.psf = dataset.psf.get_psf_kernel(
position=src_pos, geom=geom, max_radius="0.3 deg"
)
datasets.append(dataset)
return datasets
def write(datasets):
for ind, dataset in enumerate(datasets):
dataset.write(f"dataset-{ind}.fits", overwrite=True)
def read():
datasets = []
spatial_model = PointSpatialModel(
lon_0="-0.05 deg", lat_0="-0.05 deg", frame="galactic"
)
spectral_model = ExpCutoffPowerLawSpectralModel(
index=2,
amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
reference=1.0 * u.TeV,
lambda_=0.1 / u.TeV,
)
model = SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name="gc-source"
)
for ind in range(N_OBS):
dataset = MapDataset.read(f"dataset-{ind}.fits")
dataset.model = model
datasets.append(dataset)
return datasets
def data_fit(datasets):
fit = Fit(datasets)
result = fit.run()
def flux_point(datasets):
e_edges = [0.3, 1, 3, 10] * u.TeV
fpe = FluxPointsEstimator(datasets=datasets, e_edges=e_edges, source="gc-source")
fpe.run()
def run_benchmark():
info = {"n_obs": N_OBS}
t = time.time()
datasets = data_prep()
info["data_preparation"] = time.time() - t
t = time.time()
write(datasets)
info["writing"] = time.time() - t
t = time.time()
datasets = read()
info["reading"] = time.time() - t
t = time.time()
data_fit(datasets)
info["data_fitting"] = time.time() - t
t = time.time()
flux_point(datasets)
info["flux_point"] = time.time() - t
Path("bench.yaml").write_text(yaml.dump(info, sort_keys=False, indent=4))
if __name__ == "__main__":
run_benchmark() | benchmarks/analysis_3d_joint.py | import os
import time
import yaml
from pathlib import Path
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from gammapy.modeling.models import (
SkyModel,
ExpCutoffPowerLawSpectralModel,
PointSpatialModel,
)
from gammapy.spectrum import FluxPointsEstimator
from gammapy.modeling import Fit
from gammapy.data import DataStore
from gammapy.maps import MapAxis, WcsGeom
from gammapy.cube import MapDatasetMaker, MapDataset, SafeMaskMaker
N_OBS = int(os.environ.get("GAMMAPY_BENCH_N_OBS", 10))
def data_prep():
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
OBS_ID = 110380
obs_ids = OBS_ID * np.ones(N_OBS)
observations = data_store.get_observations(obs_ids)
energy_axis = MapAxis.from_bounds(
0.1, 10, nbin=10, unit="TeV", name="energy", interp="log"
)
geom = WcsGeom.create(
skydir=(0, 0),
binsz=0.02,
width=(10, 8),
coordsys="GAL",
proj="CAR",
axes=[energy_axis],
)
src_pos = SkyCoord(0, 0, unit="deg", frame="galactic")
offset_max = 4 * u.deg
maker = MapDatasetMaker(offset_max=offset_max)
safe_mask_maker = SafeMaskMaker(methods=["offset-max"], offset_max="4 deg")
stacked = MapDataset.create(geom=geom)
datasets = []
for obs in observations:
dataset = maker.run(stacked, obs)
dataset = safe_mask_maker.run(dataset, obs)
dataset.edisp = dataset.edisp.get_energy_dispersion(
position=src_pos, e_reco=energy_axis.edges
)
dataset.psf = dataset.psf.get_psf_kernel(
position=src_pos, geom=geom, max_radius="0.3 deg"
)
datasets.append(dataset)
return datasets
def write(datasets):
for ind, dataset in enumerate(datasets):
dataset.write(f"dataset-{ind}.fits", overwrite=True)
def read():
datasets = []
spatial_model = PointSpatialModel(
lon_0="-0.05 deg", lat_0="-0.05 deg", frame="galactic"
)
spectral_model = ExpCutoffPowerLawSpectralModel(
index=2,
amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
reference=1.0 * u.TeV,
lambda_=0.1 / u.TeV,
)
model = SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name="gc-source"
)
for ind in range(N_OBS):
dataset = MapDataset.read(f"dataset-{ind}.fits")
dataset.model = model
datasets.append(dataset)
return datasets
def data_fit(datasets):
fit = Fit(datasets)
result = fit.run()
def flux_point(datasets):
e_edges = [0.3, 1, 3, 10] * u.TeV
fpe = FluxPointsEstimator(datasets=datasets, e_edges=e_edges, source="gc-source")
fpe.run()
def run_benchmark():
info = {"n_obs": N_OBS}
t = time.time()
datasets = data_prep()
info["data_preparation"] = time.time() - t
t = time.time()
write(datasets)
info["writing"] = time.time() - t
t = time.time()
datasets = read()
info["reading"] = time.time() - t
t = time.time()
data_fit(datasets)
info["data_fitting"] = time.time() - t
t = time.time()
flux_point(datasets)
info["flux_point"] = time.time() - t
Path("bench.yaml").write_text(yaml.dump(info, sort_keys=False, indent=4))
if __name__ == "__main__":
run_benchmark() | 0.662796 | 0.471467 |
from ast import Pass
import sqlite3
from pyrsistent import v
from utilities.exceptions import DBCursorError, DatabaseConnectionError, QueryError
class BaseService(object):
def __init__(self):
"""
:param cursor:
"""
super().__init__()
self.query = ""
def execute_query_fetchall(self, **character_check: dict) -> dict:
"""
Executes the built query and then fetches all the resulting rows from the db.
Returns the whole result set.
:param character_check: a dict that includes
language as string
include as list[str]
exclude as list[str]
:return: result_set_json as dict.
"""
try:
if character_check["language"] == "tr":
conn = sqlite3.connect('database/gtsTR.sqlite3.db')
else:
conn = sqlite3.connect('database/gtsEN.sqlite3.db')
except:
raise DatabaseConnectionError("Can not connect to sqlite db.")
try:
conn.row_factory = lambda cursor, row: row[0]
cursor = conn.cursor()
except:
raise DBCursorError("Can not initialize cursor.")
try:
cursor.execute(self.query)
result_set = cursor.fetchall()
result_set_json = {"result": result_set}
except:
raise QueryError("Can not execute desired query.")
finally:
cursor.close()
conn.close()
if character_check["include"]:
result_set_json = self.include_control(character_check["include"], result_set_json)
if character_check["exclude"]:
result_set_json = self.exclude_control(character_check["exclude"], result_set_json)
result_set_json["result"] = [word.upper() for word in result_set_json["result"] if (" " not in word) and (len(word) > 1)]
return result_set_json
def build_query_params(self, length: str, word: str, language: str) -> None:
"""
Builds query parameters. Removes blank lines,
Generates query.
:param: length as str,
word as str,
language as str
:return:
"""
generated_word = ""
for letter in word:
if letter == " ":
generated_word += "_"
else:
generated_word += letter
if language == "tr":
return self.build_query_tr(length, generated_word)
else:
return self.build_query_en(length, generated_word)
def build_query_tr(self, length: str, word: str) -> None:
"""
Builds query with given parameters.
:param: length as str, word as str.
"""
try:
if length == 0:
self.query = "SELECT madde FROM madde WHERE madde LIKE '%{0}%' order by madde".format(word)
else:
self.query = "SELECT madde FROM madde WHERE LENGTH(madde)={0} and madde LIKE '%{1}%' order by madde".format(length, word)
except:
raise QueryError("Failed to fetch items. Check query parameters!")
def build_query_en(self, length: str, word: str) -> None:
"""
Builds query with given parameters.
:param: length as str, word as str.
"""
try:
if length == 0:
self.query = "SELECT word FROM entries WHERE word LIKE '%{0}%' order by word".format(word)
else:
self.query = "SELECT word FROM entries WHERE LENGTH(word)={0} and word LIKE '%{1}%' order by word".format(length, word)
except:
raise QueryError("Failed to fetch items. Check query parameters!")
def include_control(self, char_list: list, result_set_json: dict) -> dict:
"""
Executes include letter control.
:param: char_list as list,
result_set_json as dict
:return: result_set_json as dict
"""
include_list = []
char_list_set = set([char.lower() for char in char_list])
for word in result_set_json["result"]:
set_word = set(word)
if len(char_list_set.intersection(set_word)) == len(char_list):
include_list.append(word)
result_set_json["result"] = include_list
return result_set_json
def exclude_control(self, char_list: list, result_set_json: dict) -> dict:
"""
Executes exclude letter control.
:param: char_list as list,
result_set_json as dict
:return: result_set_json as dict
"""
# TODO : Which faster algorithm can be used here?
hash_map = {}
exclude_list = []
for char in char_list:
for word in result_set_json["result"]:
if char not in word.lower() and word not in hash_map.keys():
hash_map[word] = 1
elif char not in word.lower() and word in hash_map.keys():
hash_map[word] += 1
for word, counter in hash_map.items():
if counter == len(char_list):
exclude_list.append(word)
result_set_json["result"] = exclude_list
return result_set_json | services/base_service.py | from ast import Pass
import sqlite3
from pyrsistent import v
from utilities.exceptions import DBCursorError, DatabaseConnectionError, QueryError
class BaseService(object):
def __init__(self):
"""
:param cursor:
"""
super().__init__()
self.query = ""
def execute_query_fetchall(self, **character_check: dict) -> dict:
"""
Executes the built query and then fetches all the resulting rows from the db.
Returns the whole result set.
:param character_check: a dict that includes
language as string
include as list[str]
exclude as list[str]
:return: result_set_json as dict.
"""
try:
if character_check["language"] == "tr":
conn = sqlite3.connect('database/gtsTR.sqlite3.db')
else:
conn = sqlite3.connect('database/gtsEN.sqlite3.db')
except:
raise DatabaseConnectionError("Can not connect to sqlite db.")
try:
conn.row_factory = lambda cursor, row: row[0]
cursor = conn.cursor()
except:
raise DBCursorError("Can not initialize cursor.")
try:
cursor.execute(self.query)
result_set = cursor.fetchall()
result_set_json = {"result": result_set}
except:
raise QueryError("Can not execute desired query.")
finally:
cursor.close()
conn.close()
if character_check["include"]:
result_set_json = self.include_control(character_check["include"], result_set_json)
if character_check["exclude"]:
result_set_json = self.exclude_control(character_check["exclude"], result_set_json)
result_set_json["result"] = [word.upper() for word in result_set_json["result"] if (" " not in word) and (len(word) > 1)]
return result_set_json
def build_query_params(self, length: str, word: str, language: str) -> None:
"""
Builds query parameters. Removes blank lines,
Generates query.
:param: length as str,
word as str,
language as str
:return:
"""
generated_word = ""
for letter in word:
if letter == " ":
generated_word += "_"
else:
generated_word += letter
if language == "tr":
return self.build_query_tr(length, generated_word)
else:
return self.build_query_en(length, generated_word)
def build_query_tr(self, length: str, word: str) -> None:
"""
Builds query with given parameters.
:param: length as str, word as str.
"""
try:
if length == 0:
self.query = "SELECT madde FROM madde WHERE madde LIKE '%{0}%' order by madde".format(word)
else:
self.query = "SELECT madde FROM madde WHERE LENGTH(madde)={0} and madde LIKE '%{1}%' order by madde".format(length, word)
except:
raise QueryError("Failed to fetch items. Check query parameters!")
def build_query_en(self, length: str, word: str) -> None:
"""
Builds query with given parameters.
:param: length as str, word as str.
"""
try:
if length == 0:
self.query = "SELECT word FROM entries WHERE word LIKE '%{0}%' order by word".format(word)
else:
self.query = "SELECT word FROM entries WHERE LENGTH(word)={0} and word LIKE '%{1}%' order by word".format(length, word)
except:
raise QueryError("Failed to fetch items. Check query parameters!")
def include_control(self, char_list: list, result_set_json: dict) -> dict:
"""
Executes include letter control.
:param: char_list as list,
result_set_json as dict
:return: result_set_json as dict
"""
include_list = []
char_list_set = set([char.lower() for char in char_list])
for word in result_set_json["result"]:
set_word = set(word)
if len(char_list_set.intersection(set_word)) == len(char_list):
include_list.append(word)
result_set_json["result"] = include_list
return result_set_json
def exclude_control(self, char_list: list, result_set_json: dict) -> dict:
"""
Executes exclude letter control.
:param: char_list as list,
result_set_json as dict
:return: result_set_json as dict
"""
# TODO : Which faster algorithm can be used here?
hash_map = {}
exclude_list = []
for char in char_list:
for word in result_set_json["result"]:
if char not in word.lower() and word not in hash_map.keys():
hash_map[word] = 1
elif char not in word.lower() and word in hash_map.keys():
hash_map[word] += 1
for word, counter in hash_map.items():
if counter == len(char_list):
exclude_list.append(word)
result_set_json["result"] = exclude_list
return result_set_json | 0.367384 | 0.159119 |
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.figure(figsize=(10,10))
plt.axis('off')
cz = (0.3, 0.3, 0.3)
cy = (0.7, 0.4, 0.12)
ci = (0.1, 0.3, 0.5)
ct = (0.7, 0.2, 0.1)
def ln_func(text, x, y, color, mode=None):
if mode is None:
color_font = b_color = color
alpha_bg = 0
elif mode == 'i':
color_font = 'white'
b_color = color
alpha_bg = 1
elif mode == 'b':
color_font = b_color = color
color = 'white'
alpha_bg = 1
plt.text(x, y, text, ha="left", va="top", color=color_font, bbox=dict(boxstyle="round", alpha=alpha_bg, ec=b_color, fc=color))
ty = 1
tx = 0.8
ln_func("Flowchart", tx-0.03, ty, ct, 'i')
ty -= 0.03
ln_func('<if', tx+0.0, ty, cz)
ln_func('$\\it{textspan\_a}$', tx+0.03, ty, cy)
ln_func('>', tx+0.13, ty, cz)
plt.gca().add_patch(patches.FancyArrowPatch((tx+0.13, ty-0.015), (tx+0.1, ty-0.075), connectionstyle="arc3,rad=-.7", color=cz, arrowstyle="Simple,head_width=3,head_length=6"))
ln_func('else', tx+0.12, ty-0.0325, cz, 'b')
plt.gca().add_patch(patches.FancyArrowPatch((tx, ty-0.015), (tx, ty-0.135), connectionstyle="arc3,rad=.7", color=cz, arrowstyle="Simple,head_width=3,head_length=6"))
ln_func('then', tx-0.0625, ty-0.065, cz, 'b')
ty-= 0.06
ln_func('$\\it{textspan\_b}$', tx+0.0, ty, ci)
ty -= 0.06
ln_func('$\\it{textspan\_c}$', tx+0.0, ty, ct)
ty = 1
tx = 0.0
ln_func("TRANSCRIPT", tx-0.03, ty, cz, 'i')
ty -= 0.05
ln_func("sentence_a", tx+0.00, ty, ci)
ty -= 0.05
ln_func("sentence_b", tx+0.00, ty, ct)
ty -= 0.05
ln_func("sentence_c", tx+0.00, ty, ct)
plt.text(0.16, 0.9, "Extract", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
plt.text(0.38, 0.9, "Classify", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
plt.text(0.665, 0.9, "Assemble", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
ty = 1.0
tx = 0.22
ln_func("Text Spans", tx-0.03, ty, ci, 'i')
ty -= 0.05
ln_func("$\\it{textspan\_a}$", tx+0, ty, cy)
ty -= 0.05
ln_func("$\\it{textspan\_b}$", tx+0, ty, ci)
ty -= 0.05
ln_func("$\\it{textspan\_c}$", tx+0, ty, ct)
ty = 1
tx = 0.45
ln_func("Relations", tx-0.03, ty, ci, 'i')
ty -= 0.03
ln_func("$\\it{textspan\_a}$", tx+0.00, ty, cy)
ln_func("$\\it{textspan\_b}$", tx+0.0, ty-0.02, ci)
ln_func('<next>', tx+0.11, ty-0.01, cz, 'b')
ty -= 0.06
ln_func("$\\it{textspan\_a}$", tx+0.00, ty, cy)
ln_func("$\\it{textspan\_c}$", tx+0.0, ty-0.02, ct)
ln_func('<if>', tx+0.11, ty-0.01, cz, 'b')
ty -= 0.06
ln_func("$\\it{textspan\_b}$", tx+0.00, ty, ci)
ln_func("$\\it{textspan\_c}$", tx+0.0, ty-0.02, ct)
ln_func('<none>', tx+0.11, ty-0.01, cz, 'b')
plt.savefig('figurepredict', dpi=1500)
#plt.show() | paper/figure_predict.py | import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.figure(figsize=(10,10))
plt.axis('off')
cz = (0.3, 0.3, 0.3)
cy = (0.7, 0.4, 0.12)
ci = (0.1, 0.3, 0.5)
ct = (0.7, 0.2, 0.1)
def ln_func(text, x, y, color, mode=None):
if mode is None:
color_font = b_color = color
alpha_bg = 0
elif mode == 'i':
color_font = 'white'
b_color = color
alpha_bg = 1
elif mode == 'b':
color_font = b_color = color
color = 'white'
alpha_bg = 1
plt.text(x, y, text, ha="left", va="top", color=color_font, bbox=dict(boxstyle="round", alpha=alpha_bg, ec=b_color, fc=color))
ty = 1
tx = 0.8
ln_func("Flowchart", tx-0.03, ty, ct, 'i')
ty -= 0.03
ln_func('<if', tx+0.0, ty, cz)
ln_func('$\\it{textspan\_a}$', tx+0.03, ty, cy)
ln_func('>', tx+0.13, ty, cz)
plt.gca().add_patch(patches.FancyArrowPatch((tx+0.13, ty-0.015), (tx+0.1, ty-0.075), connectionstyle="arc3,rad=-.7", color=cz, arrowstyle="Simple,head_width=3,head_length=6"))
ln_func('else', tx+0.12, ty-0.0325, cz, 'b')
plt.gca().add_patch(patches.FancyArrowPatch((tx, ty-0.015), (tx, ty-0.135), connectionstyle="arc3,rad=.7", color=cz, arrowstyle="Simple,head_width=3,head_length=6"))
ln_func('then', tx-0.0625, ty-0.065, cz, 'b')
ty-= 0.06
ln_func('$\\it{textspan\_b}$', tx+0.0, ty, ci)
ty -= 0.06
ln_func('$\\it{textspan\_c}$', tx+0.0, ty, ct)
ty = 1
tx = 0.0
ln_func("TRANSCRIPT", tx-0.03, ty, cz, 'i')
ty -= 0.05
ln_func("sentence_a", tx+0.00, ty, ci)
ty -= 0.05
ln_func("sentence_b", tx+0.00, ty, ct)
ty -= 0.05
ln_func("sentence_c", tx+0.00, ty, ct)
plt.text(0.16, 0.9, "Extract", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
plt.text(0.38, 0.9, "Classify", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
plt.text(0.665, 0.9, "Assemble", color='w', ha="center", va="center", rotation=0, size=10, bbox={'boxstyle':"rarrow", 'fc':'dodgerblue', 'ec':'dodgerblue'})
ty = 1.0
tx = 0.22
ln_func("Text Spans", tx-0.03, ty, ci, 'i')
ty -= 0.05
ln_func("$\\it{textspan\_a}$", tx+0, ty, cy)
ty -= 0.05
ln_func("$\\it{textspan\_b}$", tx+0, ty, ci)
ty -= 0.05
ln_func("$\\it{textspan\_c}$", tx+0, ty, ct)
ty = 1
tx = 0.45
ln_func("Relations", tx-0.03, ty, ci, 'i')
ty -= 0.03
ln_func("$\\it{textspan\_a}$", tx+0.00, ty, cy)
ln_func("$\\it{textspan\_b}$", tx+0.0, ty-0.02, ci)
ln_func('<next>', tx+0.11, ty-0.01, cz, 'b')
ty -= 0.06
ln_func("$\\it{textspan\_a}$", tx+0.00, ty, cy)
ln_func("$\\it{textspan\_c}$", tx+0.0, ty-0.02, ct)
ln_func('<if>', tx+0.11, ty-0.01, cz, 'b')
ty -= 0.06
ln_func("$\\it{textspan\_b}$", tx+0.00, ty, ci)
ln_func("$\\it{textspan\_c}$", tx+0.0, ty-0.02, ct)
ln_func('<none>', tx+0.11, ty-0.01, cz, 'b')
plt.savefig('figurepredict', dpi=1500)
#plt.show() | 0.412412 | 0.474692 |
import argparse, os, readline, sys
require date, taskfile, help
__dir__ = os.path.join(*os.path.split(__file__)[:-1]) \
if os.path.basename(__file__)!=__file__ else "."
# Command Line Argument Validation
operations = "list add edit delete move do fail help report".split()
ap = argparse.ArgumentParser(description="A Command Line ToDoList Manager", add_help=False)
ap.add_argument("data", nargs="*", default=[])
ap.add_argument("-h","--help", action="store_true", default=False)
ap.add_argument("-f","--file", default="./todolist.txt")
ap.add_argument("-n","--nosave", action="store_true", default=False)
ap.add_argument("--date", type=Date, default="today")
ap.add_argument("--nodeadline", action="store_true", default=False)
# User Interaction Functions
def confirm(msg="Are you sure?"):
while True:
x = raw_input(msg+" (yes/no) ")
if x=="yes": return True
elif x=="no": return False
print
def prompt(prompt, prefill=""):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
data = raw_input(prompt)
print
return data
finally: readline.set_startup_hook()
# Convenience Functions
def __relocate(taskfile,task,name):
if task.group:
task.group.task_remove(task)
taskfile.update(task.group)
taskgroup = taskfile.group(name)
if not taskgroup: return False
taskgroup.task_add(task)
task.group = taskgroup
taskfile.update(taskgroup)
return True
def __main():
print
args, unknown = ap.parse_known_args()
if len(unknown)>0:
print help.basic
sys.exit(0)
if len(args.data) and args.data[0] in operations:
operation = args.data[0]
args.data.pop(0)
else: operation = "list"
if args.help or operation=="help":
print help.full
sys.exit(0)
realdate = args.date.date==datetime.date.today()
taskfile = TaskFile(args.file,args.date,args.nodeadline)
if operation=="add":
if len(args.data)>0:
group = taskfile.group(args.data[0])
if group: args.data.pop(0)
else: group = taskfile.group("today")
else: group = taskfile.group("today")
if len(args.data)>0:
line = " ".join(args.data)
else:
while True:
line = prompt("Add Task: ")
if line.strip()!="": break
task = Task(line,group,args.date,args.nodeadline)
group.task_add(task)
taskfile.update(group)
print group.tabulate()
today = taskfile.group("today")
task = task.periodic(today)
if task:
today.task_add(task)
taskfile.update(today)
else:
if len(args.data)==0:
group = taskfile.group("today")
else:
group = taskfile.select(args.data[0], args.data[1:])
if not group:
group = taskfile.select("today", args.data)
if operation not in ("list","report"):
tasks = group.task_list()
if len(tasks)==0:
raise Exception("No Matching Task")
elif len(tasks)==1:
task = tasks[0]
else:
print group.tabulate(True)
while True:
index = prompt("Select Task by Index: ")
try: task = tasks[int(index)]
except ValueError, IndexError: continue
break
del tasks
if operation in ("edit","delete","move"):
print TaskGroup([task]).tabulate()
if operation=="list":
print group.tabulate()
elif operation=="report":
print group.report()
elif operation=="edit":
while True:
line = prompt("Edit Task: ",str(task))
if line!="": break
task.update(line)
taskfile.update(task.group)
elif operation=="delete":
task.group.task_remove(task)
taskfile.update(task.group)
elif operation=="move":
while True:
name = prompt("Enter Destination Date: ")
try: group = taskfile.group(name)
except: continue
break
__relocate(taskfile,task,group.name)
elif operation=="do":
task.tag_remove("failed")
task.tag_remove("impossible")
task.tag_add("done")
taskfile.update(task.group)
elif operation=="fail":
task.tag_add("failed")
task.tag_remove("impossible")
task.tag_remove("done")
taskfile.update(task.group)
if operation not in ("list","delete","report"):
print TaskGroup([task]).tabulate()
if args.nosave or not realdate:
pass
elif operation in ("list","report"):
taskfile.save()
elif confirm():
taskfile.save()
print "Saved updates to file."
print
def main():
try:
__main()
except KeyboardInterrupt:
print "^SIGINT\n"
sys.exit(1)
except Exception as e:
print "Error:", e.message, "\n"
exports["main"] = main | src/cli.py | import argparse, os, readline, sys
require date, taskfile, help
__dir__ = os.path.join(*os.path.split(__file__)[:-1]) \
if os.path.basename(__file__)!=__file__ else "."
# Command Line Argument Validation
operations = "list add edit delete move do fail help report".split()
ap = argparse.ArgumentParser(description="A Command Line ToDoList Manager", add_help=False)
ap.add_argument("data", nargs="*", default=[])
ap.add_argument("-h","--help", action="store_true", default=False)
ap.add_argument("-f","--file", default="./todolist.txt")
ap.add_argument("-n","--nosave", action="store_true", default=False)
ap.add_argument("--date", type=Date, default="today")
ap.add_argument("--nodeadline", action="store_true", default=False)
# User Interaction Functions
def confirm(msg="Are you sure?"):
while True:
x = raw_input(msg+" (yes/no) ")
if x=="yes": return True
elif x=="no": return False
print
def prompt(prompt, prefill=""):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
data = raw_input(prompt)
print
return data
finally: readline.set_startup_hook()
# Convenience Functions
def __relocate(taskfile,task,name):
if task.group:
task.group.task_remove(task)
taskfile.update(task.group)
taskgroup = taskfile.group(name)
if not taskgroup: return False
taskgroup.task_add(task)
task.group = taskgroup
taskfile.update(taskgroup)
return True
def __main():
print
args, unknown = ap.parse_known_args()
if len(unknown)>0:
print help.basic
sys.exit(0)
if len(args.data) and args.data[0] in operations:
operation = args.data[0]
args.data.pop(0)
else: operation = "list"
if args.help or operation=="help":
print help.full
sys.exit(0)
realdate = args.date.date==datetime.date.today()
taskfile = TaskFile(args.file,args.date,args.nodeadline)
if operation=="add":
if len(args.data)>0:
group = taskfile.group(args.data[0])
if group: args.data.pop(0)
else: group = taskfile.group("today")
else: group = taskfile.group("today")
if len(args.data)>0:
line = " ".join(args.data)
else:
while True:
line = prompt("Add Task: ")
if line.strip()!="": break
task = Task(line,group,args.date,args.nodeadline)
group.task_add(task)
taskfile.update(group)
print group.tabulate()
today = taskfile.group("today")
task = task.periodic(today)
if task:
today.task_add(task)
taskfile.update(today)
else:
if len(args.data)==0:
group = taskfile.group("today")
else:
group = taskfile.select(args.data[0], args.data[1:])
if not group:
group = taskfile.select("today", args.data)
if operation not in ("list","report"):
tasks = group.task_list()
if len(tasks)==0:
raise Exception("No Matching Task")
elif len(tasks)==1:
task = tasks[0]
else:
print group.tabulate(True)
while True:
index = prompt("Select Task by Index: ")
try: task = tasks[int(index)]
except ValueError, IndexError: continue
break
del tasks
if operation in ("edit","delete","move"):
print TaskGroup([task]).tabulate()
if operation=="list":
print group.tabulate()
elif operation=="report":
print group.report()
elif operation=="edit":
while True:
line = prompt("Edit Task: ",str(task))
if line!="": break
task.update(line)
taskfile.update(task.group)
elif operation=="delete":
task.group.task_remove(task)
taskfile.update(task.group)
elif operation=="move":
while True:
name = prompt("Enter Destination Date: ")
try: group = taskfile.group(name)
except: continue
break
__relocate(taskfile,task,group.name)
elif operation=="do":
task.tag_remove("failed")
task.tag_remove("impossible")
task.tag_add("done")
taskfile.update(task.group)
elif operation=="fail":
task.tag_add("failed")
task.tag_remove("impossible")
task.tag_remove("done")
taskfile.update(task.group)
if operation not in ("list","delete","report"):
print TaskGroup([task]).tabulate()
if args.nosave or not realdate:
pass
elif operation in ("list","report"):
taskfile.save()
elif confirm():
taskfile.save()
print "Saved updates to file."
print
def main():
try:
__main()
except KeyboardInterrupt:
print "^SIGINT\n"
sys.exit(1)
except Exception as e:
print "Error:", e.message, "\n"
exports["main"] = main | 0.069954 | 0.087994 |
from collections import OrderedDict
import tensorflow as tf
from tensorflow.keras.layers import ReLU, LayerNormalization
from tensorflow.keras.layers import UpSampling2D
from .adain import AdaptiveInstanceNormalization
from .linear_blocks import linear_block
from .conv_blocks import res_block
from .conv_blocks import conv_block
from .conv_blocks import res_block_adain
from .conv_blocks import conv_block_adain
from .norm import InstanceNorm, AdaptiveInstanceNorm, LayerNorm
def _adain_params_iter(adain_config, adain_params):
slices = []
curr_slice = 0
for _, dim in adain_config.items():
slices.append(adain_params[:, curr_slice: curr_slice + dim])
curr_slice += dim
return slices.__iter__()
def _get_adain_layer_params(adain_params, slices_iter):
beta_left, beta_right = next(slices_iter)
gamma_left, gamma_right = next(slices_iter)
beta = adain_params[:, beta_left:beta_right]
gamma = adain_params[:, gamma_left:gamma_right]
return gamma, beta
def _adain_net(inputs, dim=64, output_dim=3860):
outputs = linear_block(inputs, dim, activation=ReLU)
outputs = linear_block(outputs, dim, activation=ReLU)
outputs = linear_block(outputs, output_dim, activation=None)
return outputs
def _body(inputs, adain_params_iter, num_res_blocks, dim):
norm = AdaptiveInstanceNorm
output = inputs
for _ in range(num_res_blocks):
gamma1, beta1 = next(adain_params_iter), next(adain_params_iter)
gamma2, beta2 = next(adain_params_iter), next(adain_params_iter)
res_block_inputs = (output, gamma1, beta1, gamma2, beta2)
output = res_block_adain(res_block_inputs, dim, norm = norm)
return output, adain_params_iter
def _upsample_postprocess(inputs, skip_tensors, adain_params_iter, skip_dim=5, dim = 192):
outputs = inputs
norm = AdaptiveInstanceNorm
for skip_tensor in skip_tensors:
outputs = UpSampling2D(interpolation = 'bilinear')(outputs)
gamma, beta = next(adain_params_iter), next(adain_params_iter)
skip_outputs = conv_block_adain(
skip_tensor, gamma, beta, filters = skip_dim, kernel_size = 7, padding = 3, stride = 1, \
norm = norm, activation = ReLU)
print(outputs.shape, skip_outputs.shape)
outputs = tf.concat([outputs, skip_outputs], -1)
print(outputs.shape)
outputs = conv_block( outputs, filters = dim // 2, kernel_size = 7, \
padding=3, stride=1, norm=LayerNorm, activation=ReLU, \
norm_kwargs={}, activation_kwargs={})
dim //= 2
outputs = conv_block( outputs, filters = 6, kernel_size = 9, \
padding=4, stride=1, norm=None, activation=None, \
norm_kwargs={}, activation_kwargs={})
return outputs
def decoder(
content_input, skip_tensors, style_input, adain_config, \
num_upsamples=2, num_res_blocks=5, dim=192):
adain_params = _adain_net(style_input)
adain_params_iter = _adain_params_iter(adain_config, adain_params)
outputs, adain_params_iter = _body(
content_input, adain_params_iter, num_res_blocks, dim)
outputs = _upsample_postprocess(outputs, skip_tensors, adain_params_iter, dim = dim)
return outputs
def Decoder(
input_shape=(64, 64, 192),
skip2_shape=(128, 128, 5),
skip1_shape=(256, 256, 5),
style_shape=(3,),
num_res_blocks=5, dim=192,
num_upsamples=2,
skip_dim=5):
adain_config = OrderedDict()
for i in range(num_res_blocks * 2):
adain_config['res_block_{}_beta'.format(i)] = dim
adain_config['res_block_{}_gamma'.format(i)] = dim
for i in range(num_upsamples):
adain_config['upsample_block_{}_beta'.format(i)] = skip_dim
adain_config['upsample_block_{}_gamma'.format(i)] = skip_dim
content_inputs = tf.keras.Input(input_shape)
skip2_inputs = tf.keras.Input(skip2_shape)
skip1_inputs = tf.keras.Input(skip1_shape)
style_inputs = tf.keras.Input(style_shape)
outputs = decoder(
content_inputs, [skip2_inputs, skip1_inputs], style_inputs, adain_config, \
num_upsamples=num_upsamples, num_res_blocks=num_res_blocks, dim=dim)
print(outputs.shape)
model = tf.keras.models.Model(inputs=[content_inputs, skip2_inputs, skip1_inputs, style_inputs], outputs=outputs)
return model | models/decoder.py | from collections import OrderedDict
import tensorflow as tf
from tensorflow.keras.layers import ReLU, LayerNormalization
from tensorflow.keras.layers import UpSampling2D
from .adain import AdaptiveInstanceNormalization
from .linear_blocks import linear_block
from .conv_blocks import res_block
from .conv_blocks import conv_block
from .conv_blocks import res_block_adain
from .conv_blocks import conv_block_adain
from .norm import InstanceNorm, AdaptiveInstanceNorm, LayerNorm
def _adain_params_iter(adain_config, adain_params):
slices = []
curr_slice = 0
for _, dim in adain_config.items():
slices.append(adain_params[:, curr_slice: curr_slice + dim])
curr_slice += dim
return slices.__iter__()
def _get_adain_layer_params(adain_params, slices_iter):
beta_left, beta_right = next(slices_iter)
gamma_left, gamma_right = next(slices_iter)
beta = adain_params[:, beta_left:beta_right]
gamma = adain_params[:, gamma_left:gamma_right]
return gamma, beta
def _adain_net(inputs, dim=64, output_dim=3860):
outputs = linear_block(inputs, dim, activation=ReLU)
outputs = linear_block(outputs, dim, activation=ReLU)
outputs = linear_block(outputs, output_dim, activation=None)
return outputs
def _body(inputs, adain_params_iter, num_res_blocks, dim):
norm = AdaptiveInstanceNorm
output = inputs
for _ in range(num_res_blocks):
gamma1, beta1 = next(adain_params_iter), next(adain_params_iter)
gamma2, beta2 = next(adain_params_iter), next(adain_params_iter)
res_block_inputs = (output, gamma1, beta1, gamma2, beta2)
output = res_block_adain(res_block_inputs, dim, norm = norm)
return output, adain_params_iter
def _upsample_postprocess(inputs, skip_tensors, adain_params_iter, skip_dim=5, dim = 192):
outputs = inputs
norm = AdaptiveInstanceNorm
for skip_tensor in skip_tensors:
outputs = UpSampling2D(interpolation = 'bilinear')(outputs)
gamma, beta = next(adain_params_iter), next(adain_params_iter)
skip_outputs = conv_block_adain(
skip_tensor, gamma, beta, filters = skip_dim, kernel_size = 7, padding = 3, stride = 1, \
norm = norm, activation = ReLU)
print(outputs.shape, skip_outputs.shape)
outputs = tf.concat([outputs, skip_outputs], -1)
print(outputs.shape)
outputs = conv_block( outputs, filters = dim // 2, kernel_size = 7, \
padding=3, stride=1, norm=LayerNorm, activation=ReLU, \
norm_kwargs={}, activation_kwargs={})
dim //= 2
outputs = conv_block( outputs, filters = 6, kernel_size = 9, \
padding=4, stride=1, norm=None, activation=None, \
norm_kwargs={}, activation_kwargs={})
return outputs
def decoder(
content_input, skip_tensors, style_input, adain_config, \
num_upsamples=2, num_res_blocks=5, dim=192):
adain_params = _adain_net(style_input)
adain_params_iter = _adain_params_iter(adain_config, adain_params)
outputs, adain_params_iter = _body(
content_input, adain_params_iter, num_res_blocks, dim)
outputs = _upsample_postprocess(outputs, skip_tensors, adain_params_iter, dim = dim)
return outputs
def Decoder(
input_shape=(64, 64, 192),
skip2_shape=(128, 128, 5),
skip1_shape=(256, 256, 5),
style_shape=(3,),
num_res_blocks=5, dim=192,
num_upsamples=2,
skip_dim=5):
adain_config = OrderedDict()
for i in range(num_res_blocks * 2):
adain_config['res_block_{}_beta'.format(i)] = dim
adain_config['res_block_{}_gamma'.format(i)] = dim
for i in range(num_upsamples):
adain_config['upsample_block_{}_beta'.format(i)] = skip_dim
adain_config['upsample_block_{}_gamma'.format(i)] = skip_dim
content_inputs = tf.keras.Input(input_shape)
skip2_inputs = tf.keras.Input(skip2_shape)
skip1_inputs = tf.keras.Input(skip1_shape)
style_inputs = tf.keras.Input(style_shape)
outputs = decoder(
content_inputs, [skip2_inputs, skip1_inputs], style_inputs, adain_config, \
num_upsamples=num_upsamples, num_res_blocks=num_res_blocks, dim=dim)
print(outputs.shape)
model = tf.keras.models.Model(inputs=[content_inputs, skip2_inputs, skip1_inputs, style_inputs], outputs=outputs)
return model | 0.804828 | 0.45944 |
import argparse
from jinja2 import Environment, PackageLoader
def main(properties):
env = Environment(loader=PackageLoader('templates'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template('esp_template.jinja')
out_path = properties['out']
del properties['out']
with open(out_path, 'w') as out_file:
out_file.write(template.render(properties=properties))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate a Kubernetes config for your Endpoints API')
# Required
parser.add_argument(
'--service-name',
help='The hostname of your service. Usually \"my-project-id.appspot.com\".',
required=True
)
parser.add_argument(
'--service-version',
help='The generation id of your service. Run gcloud alpha service-management service describe <service-name> and look for the \"generation\" field',
required=True
)
parser.add_argument(
'--api-image',
required=True,
help='The docker image that serves your API traffic'
)
# Optional
parser.add_argument(
'--out',
default='esp_config.yaml',
help='Output path for your config'
)
parser.add_argument(
'--proxy-port',
default=8080,
type=int,
help='The port on which traffic will be served by the endpoints server proxy'
)
parser.add_argument(
'--ssl',
type=bool,
default=False,
help='Whether to use SSL termination. If true you must have a secret in your cluster named \"nginx-ssl\" which provides certs and secrets'
)
parser.add_argument(
'--ssl-port',
type=int,
default=443,
help='If --ssl is False has no effect. Customizes the port the nginx proxy serves SSL traffic on'
)
parser.add_argument(
'--api-port',
type=int,
default=8081,
help='The port that nginx proxies to, and your API image to serve traffic on'
)
parser.add_argument(
'--custom-nginx-config',
type=bool,
default=False,
help='Whether or not you provide a custom configuration for the nginx proxy. If true you must havea configmap in your cluster named \"nginx-config\"'
)
parser.add_argument(
'--replicas',
type=int,
default=1,
help='Number of replicas or your API container to maintain in the cluster.'
)
parsed = parser.parse_args()
main(vars(parsed)) | k8s/render.py |
import argparse
from jinja2 import Environment, PackageLoader
def main(properties):
env = Environment(loader=PackageLoader('templates'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template('esp_template.jinja')
out_path = properties['out']
del properties['out']
with open(out_path, 'w') as out_file:
out_file.write(template.render(properties=properties))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate a Kubernetes config for your Endpoints API')
# Required
parser.add_argument(
'--service-name',
help='The hostname of your service. Usually \"my-project-id.appspot.com\".',
required=True
)
parser.add_argument(
'--service-version',
help='The generation id of your service. Run gcloud alpha service-management service describe <service-name> and look for the \"generation\" field',
required=True
)
parser.add_argument(
'--api-image',
required=True,
help='The docker image that serves your API traffic'
)
# Optional
parser.add_argument(
'--out',
default='esp_config.yaml',
help='Output path for your config'
)
parser.add_argument(
'--proxy-port',
default=8080,
type=int,
help='The port on which traffic will be served by the endpoints server proxy'
)
parser.add_argument(
'--ssl',
type=bool,
default=False,
help='Whether to use SSL termination. If true you must have a secret in your cluster named \"nginx-ssl\" which provides certs and secrets'
)
parser.add_argument(
'--ssl-port',
type=int,
default=443,
help='If --ssl is False has no effect. Customizes the port the nginx proxy serves SSL traffic on'
)
parser.add_argument(
'--api-port',
type=int,
default=8081,
help='The port that nginx proxies to, and your API image to serve traffic on'
)
parser.add_argument(
'--custom-nginx-config',
type=bool,
default=False,
help='Whether or not you provide a custom configuration for the nginx proxy. If true you must havea configmap in your cluster named \"nginx-config\"'
)
parser.add_argument(
'--replicas',
type=int,
default=1,
help='Number of replicas or your API container to maintain in the cluster.'
)
parsed = parser.parse_args()
main(vars(parsed)) | 0.699049 | 0.112747 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
accelerator_id: pulumi.Input[str],
port_ranges: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]],
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Listener resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]] port_ranges: The portRanges of the listener.
:param pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
pulumi.set(__self__, "accelerator_id", accelerator_id)
pulumi.set(__self__, "port_ranges", port_ranges)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if client_affinity is not None:
pulumi.set(__self__, "client_affinity", client_affinity)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_protocol is not None:
pulumi.set(__self__, "proxy_protocol", proxy_protocol)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> pulumi.Input[str]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@accelerator_id.setter
def accelerator_id(self, value: pulumi.Input[str]):
pulumi.set(self, "accelerator_id", value)
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@port_ranges.setter
def port_ranges(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]):
pulumi.set(self, "port_ranges", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> Optional[pulumi.Input[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@client_affinity.setter
def client_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_affinity", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@proxy_protocol.setter
def proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxy_protocol", value)
@pulumi.input_type
class _ListenerState:
def __init__(__self__, *,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Listener resources.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
:param pulumi.Input[str] status: The status of the listener.
"""
if accelerator_id is not None:
pulumi.set(__self__, "accelerator_id", accelerator_id)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if client_affinity is not None:
pulumi.set(__self__, "client_affinity", client_affinity)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if port_ranges is not None:
pulumi.set(__self__, "port_ranges", port_ranges)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_protocol is not None:
pulumi.set(__self__, "proxy_protocol", proxy_protocol)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> Optional[pulumi.Input[str]]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@accelerator_id.setter
def accelerator_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accelerator_id", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> Optional[pulumi.Input[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@client_affinity.setter
def client_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_affinity", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@port_ranges.setter
def port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]]):
pulumi.set(self, "port_ranges", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@proxy_protocol.setter
def proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxy_protocol", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the listener.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
if accelerator_id is None and not opts.urn:
raise TypeError("Missing required property 'accelerator_id'")
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if port_ranges is None and not opts.urn:
raise TypeError("Missing required property 'port_ranges'")
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = None
super(Listener, __self__).__init__(
'alicloud:ga/listener:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
:param pulumi.Input[str] status: The status of the listener.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ListenerState.__new__(_ListenerState)
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = status
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> pulumi.Output[str]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ListenerCertificate']]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> pulumi.Output[Optional[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Output[Sequence['outputs.ListenerPortRange']]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> pulumi.Output[Optional[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the listener.
"""
return pulumi.get(self, "status") | sdk/python/pulumi_alicloud/ga/listener.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
accelerator_id: pulumi.Input[str],
port_ranges: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]],
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Listener resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]] port_ranges: The portRanges of the listener.
:param pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
pulumi.set(__self__, "accelerator_id", accelerator_id)
pulumi.set(__self__, "port_ranges", port_ranges)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if client_affinity is not None:
pulumi.set(__self__, "client_affinity", client_affinity)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_protocol is not None:
pulumi.set(__self__, "proxy_protocol", proxy_protocol)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> pulumi.Input[str]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@accelerator_id.setter
def accelerator_id(self, value: pulumi.Input[str]):
pulumi.set(self, "accelerator_id", value)
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@port_ranges.setter
def port_ranges(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]):
pulumi.set(self, "port_ranges", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> Optional[pulumi.Input[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@client_affinity.setter
def client_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_affinity", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@proxy_protocol.setter
def proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxy_protocol", value)
@pulumi.input_type
class _ListenerState:
def __init__(__self__, *,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Listener resources.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
:param pulumi.Input[str] status: The status of the listener.
"""
if accelerator_id is not None:
pulumi.set(__self__, "accelerator_id", accelerator_id)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if client_affinity is not None:
pulumi.set(__self__, "client_affinity", client_affinity)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if port_ranges is not None:
pulumi.set(__self__, "port_ranges", port_ranges)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if proxy_protocol is not None:
pulumi.set(__self__, "proxy_protocol", proxy_protocol)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> Optional[pulumi.Input[str]]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@accelerator_id.setter
def accelerator_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "accelerator_id", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> Optional[pulumi.Input[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@client_affinity.setter
def client_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_affinity", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@port_ranges.setter
def port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]]):
pulumi.set(self, "port_ranges", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@proxy_protocol.setter
def proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxy_protocol", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the listener.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
if accelerator_id is None and not opts.urn:
raise TypeError("Missing required property 'accelerator_id'")
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if port_ranges is None and not opts.urn:
raise TypeError("Missing required property 'port_ranges'")
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = None
super(Listener, __self__).__init__(
'alicloud:ga/listener:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
:param pulumi.Input[str] status: The status of the listener.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ListenerState.__new__(_ListenerState)
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = status
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> pulumi.Output[str]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ListenerCertificate']]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> pulumi.Output[Optional[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Output[Sequence['outputs.ListenerPortRange']]:
"""
The portRanges of the listener.
"""
return pulumi.get(self, "port_ranges")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
"""
Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> pulumi.Output[Optional[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the listener.
"""
return pulumi.get(self, "status") | 0.859162 | 0.119152 |
import re
import time
from ..base.account import BaseAccount
from ..helpers import parse_html_form, set_cookie
class TurbobitNet(BaseAccount):
__name__ = "TurbobitNet"
__type__ = "account"
__version__ = "0.12"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """TurbobitNet account plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "<EMAIL>"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
LOGIN_FAIL_PATTERN = r">(?:E-Mail address appears to be invalid\. Please try again|Incorrect login or password)</div>"
def grab_info(self, user, password, data):
html = self.load("https://turbobit.net/")
m = re.search(r">Turbo access till ([\d.]+)<", html)
if m is not None:
premium = True
validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y"))
else:
premium = False
validuntil = -1
return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
def signin(self, user, password, data):
set_cookie(self.req.cj, "turbobit.net", "user_lang", "en")
self.data = self.load("https://turbobit.net/login")
if "<a href='/user/logout'" in self.data:
self.skip_login()
action, inputs = parse_html_form(
'class="form-horizontal login mail"', self.data
)
if not inputs:
self.fail_login(self._("Login form not found"))
inputs["user[login]"] = user
inputs["user[pass]"] = password
inputs["user[submit]"] = "Sign in"
if inputs.get("user[captcha_type]"):
self.fail_login(
self._(
"Logging in with captcha is not supported, please disable catcha in turbobit's account settings"
)
)
self.data = self.load("https://turbobit.net/user/login", post=inputs)
if "<a href='/user/logout'" in self.data:
self.log_debug("Login successful")
elif re.search(self.LOGIN_FAIL_PATTERN, self.data):
self.fail_login()
elif ">Please enter the captcha code.</div>" in self.data:
self.fail_login(
self._(
"Logging in with captcha is not supported, please disable catcha in turbobit's account settings"
)
)
else:
self.fail_login(self._("Unknown response")) | supports/pyload/src/pyload/plugins/accounts/TurbobitNet.py | import re
import time
from ..base.account import BaseAccount
from ..helpers import parse_html_form, set_cookie
class TurbobitNet(BaseAccount):
__name__ = "TurbobitNet"
__type__ = "account"
__version__ = "0.12"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = """TurbobitNet account plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "<EMAIL>"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
LOGIN_FAIL_PATTERN = r">(?:E-Mail address appears to be invalid\. Please try again|Incorrect login or password)</div>"
def grab_info(self, user, password, data):
html = self.load("https://turbobit.net/")
m = re.search(r">Turbo access till ([\d.]+)<", html)
if m is not None:
premium = True
validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y"))
else:
premium = False
validuntil = -1
return {"premium": premium, "trafficleft": -1, "validuntil": validuntil}
def signin(self, user, password, data):
set_cookie(self.req.cj, "turbobit.net", "user_lang", "en")
self.data = self.load("https://turbobit.net/login")
if "<a href='/user/logout'" in self.data:
self.skip_login()
action, inputs = parse_html_form(
'class="form-horizontal login mail"', self.data
)
if not inputs:
self.fail_login(self._("Login form not found"))
inputs["user[login]"] = user
inputs["user[pass]"] = password
inputs["user[submit]"] = "Sign in"
if inputs.get("user[captcha_type]"):
self.fail_login(
self._(
"Logging in with captcha is not supported, please disable catcha in turbobit's account settings"
)
)
self.data = self.load("https://turbobit.net/user/login", post=inputs)
if "<a href='/user/logout'" in self.data:
self.log_debug("Login successful")
elif re.search(self.LOGIN_FAIL_PATTERN, self.data):
self.fail_login()
elif ">Please enter the captcha code.</div>" in self.data:
self.fail_login(
self._(
"Logging in with captcha is not supported, please disable catcha in turbobit's account settings"
)
)
else:
self.fail_login(self._("Unknown response")) | 0.23855 | 0.123736 |
from cnn_simple import *
from utils import *
import os
import numpy as np
import argparse
import time
os.system('echo $CUDA_VISIBLE_DEVICES')
PATIENCE = 5 # The parameter is used for early stopping
def main():
parser = argparse.ArgumentParser(prog='train.py')
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--batch', type=int, default=64)
parser.add_argument('--pretrain', type=bool, default=False)
parser.add_argument('--save_every', type=int, default=1)
parser.add_argument('--model_name', type=str, default='model/model-1')
args = parser.parse_args()
'''
To begin with, you should first read your csv training file and
cut them into training set and validation set.
Such as:
with open(csvFile, 'r') as f:
f.readline()
for i, line in enumerate(f):
data = line.split(',')
label = data[0]
pixel = data[1]
...
...
In addition, we maintain it in array structure and save it in pickle
'''
# training data
train_pixels = load_pickle('../train_pixels.pkl')
train_labels = load_pickle('../train_labels.pkl')
print ('# of training instances: ' + str(len(train_labels)))
# validation data
valid_pixels = load_pickle('../valid_pixels.pkl')
valid_labels = load_pickle('../valid_labels.pkl')
print ('# of validation instances: ' + str(len(valid_labels)))
'''
Modify the answer format so as to correspond with the output of keras model
We can also do this to training data here,
but we choose to do it in "train" function
'''
for i in range(len(valid_labels)):
valid_pixels[i] = np.fromstring(valid_pixels[i], dtype=float, sep=' ').reshape((48, 48, 1))
onehot = np.zeros((7, ), dtype=np.float)
onehot[int(valid_labels[i])] = 1.
valid_labels[i] = onehot
# start training
train(args.batch, args.epoch, args.pretrain, args.save_every,
train_pixels, train_labels,
np.asarray(valid_pixels), np.asarray(valid_labels),
args.model_name)
def train(batch_size, num_epoch, pretrain, save_every, train_pixels, train_labels, val_pixels, val_labels, model_name=None):
if pretrain == False:
model = build_model()
else:
model = load_model(model_name)
'''
"1 Epoch" means you have been looked all of the training data once already.
Batch size B means you look B instances at once when updating your parameter.
Thus, given 320 instances, batch size 32, you need 10 iterations in 1 epoch.
'''
num_instances = len(train_labels)
iter_per_epoch = int(num_instances / batch_size) + 1
batch_cutoff = [0]
for i in range(iter_per_epoch - 1):
batch_cutoff.append(batch_size * (i+1))
batch_cutoff.append(num_instances)
total_start_t = time.time()
best_metrics = 0.0
early_stop_counter = 0
for e in range(num_epoch):
#shuffle data in every epoch
rand_idxs = np.random.permutation(num_instances)
print ('#######')
print ('Epoch ' + str(e+1))
print ('#######')
start_t = time.time()
for i in range(iter_per_epoch):
if i % 50 == 0:
print ('Iteration ' + str(i+1))
X_batch = []
Y_batch = []
''' fill data into each batch '''
for n in range(batch_cutoff[i], batch_cutoff[i+1]):
X_batch.append(train_pixels[rand_idxs[n]])
Y_batch.append(np.zeros((7, ), dtype=np.float))
X_batch[-1] = np.fromstring(X_batch[-1], dtype=float, sep=' ').reshape((48, 48, 1))
Y_batch[-1][int(train_labels[rand_idxs[n]])] = 1.
''' use these batch data to train your model '''
model.train_on_batch(np.asarray(X_batch),np.asarray(Y_batch))
'''
The above process is one epoch, and then we can check the performance now.
'''
loss_and_metrics = model.evaluate(val_pixels, val_labels, batch_size)
print ('\nloss & metrics:')
print (loss_and_metrics)
'''
early stop is a mechanism to prevent your model from overfitting
'''
if loss_and_metrics[1] >= best_metrics:
best_metrics = loss_and_metrics[1]
print ("save best score!! "+str(loss_and_metrics[1]))
early_stop_counter = 0
else:
early_stop_counter += 1
'''
Sample code to write result :
if e == e:
val_proba = model.predict(val_pixels)
val_classes = val_proba.argmax(axis=-1)
with open('result/simple%s.csv' % str(e), 'w') as f:
f.write('acc = %s\n' % str(lossandmetrics[1]))
f.write('id,label')
for i in range(len(valclasses)):
f.write('\n' + str(i) + ',' + str(valclasses[i]))
'''
print ('Elapsed time in epoch ' + str(e+1) + ': ' + str(time.time() - startt))
if (e+1) % saveevery == 0:
model.save('model/model-%d.h5' %(e+1))
print ('Saved model %s!' %str(e+1))
if earlystopcounter >= PATIENCE:
print ('Stop by early stopping')
print ('Best score: '+str(best_metrics))
break
print ('Elapsed time in total: ' + str(time.time() - total_start_t))
if __name=='__main':
main() | ML-Course-NTU-Lee/hw3/demo/train.py | from cnn_simple import *
from utils import *
import os
import numpy as np
import argparse
import time
os.system('echo $CUDA_VISIBLE_DEVICES')
PATIENCE = 5 # The parameter is used for early stopping
def main():
parser = argparse.ArgumentParser(prog='train.py')
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--batch', type=int, default=64)
parser.add_argument('--pretrain', type=bool, default=False)
parser.add_argument('--save_every', type=int, default=1)
parser.add_argument('--model_name', type=str, default='model/model-1')
args = parser.parse_args()
'''
To begin with, you should first read your csv training file and
cut them into training set and validation set.
Such as:
with open(csvFile, 'r') as f:
f.readline()
for i, line in enumerate(f):
data = line.split(',')
label = data[0]
pixel = data[1]
...
...
In addition, we maintain it in array structure and save it in pickle
'''
# training data
train_pixels = load_pickle('../train_pixels.pkl')
train_labels = load_pickle('../train_labels.pkl')
print ('# of training instances: ' + str(len(train_labels)))
# validation data
valid_pixels = load_pickle('../valid_pixels.pkl')
valid_labels = load_pickle('../valid_labels.pkl')
print ('# of validation instances: ' + str(len(valid_labels)))
'''
Modify the answer format so as to correspond with the output of keras model
We can also do this to training data here,
but we choose to do it in "train" function
'''
for i in range(len(valid_labels)):
valid_pixels[i] = np.fromstring(valid_pixels[i], dtype=float, sep=' ').reshape((48, 48, 1))
onehot = np.zeros((7, ), dtype=np.float)
onehot[int(valid_labels[i])] = 1.
valid_labels[i] = onehot
# start training
train(args.batch, args.epoch, args.pretrain, args.save_every,
train_pixels, train_labels,
np.asarray(valid_pixels), np.asarray(valid_labels),
args.model_name)
def train(batch_size, num_epoch, pretrain, save_every, train_pixels, train_labels, val_pixels, val_labels, model_name=None):
if pretrain == False:
model = build_model()
else:
model = load_model(model_name)
'''
"1 Epoch" means you have been looked all of the training data once already.
Batch size B means you look B instances at once when updating your parameter.
Thus, given 320 instances, batch size 32, you need 10 iterations in 1 epoch.
'''
num_instances = len(train_labels)
iter_per_epoch = int(num_instances / batch_size) + 1
batch_cutoff = [0]
for i in range(iter_per_epoch - 1):
batch_cutoff.append(batch_size * (i+1))
batch_cutoff.append(num_instances)
total_start_t = time.time()
best_metrics = 0.0
early_stop_counter = 0
for e in range(num_epoch):
#shuffle data in every epoch
rand_idxs = np.random.permutation(num_instances)
print ('#######')
print ('Epoch ' + str(e+1))
print ('#######')
start_t = time.time()
for i in range(iter_per_epoch):
if i % 50 == 0:
print ('Iteration ' + str(i+1))
X_batch = []
Y_batch = []
''' fill data into each batch '''
for n in range(batch_cutoff[i], batch_cutoff[i+1]):
X_batch.append(train_pixels[rand_idxs[n]])
Y_batch.append(np.zeros((7, ), dtype=np.float))
X_batch[-1] = np.fromstring(X_batch[-1], dtype=float, sep=' ').reshape((48, 48, 1))
Y_batch[-1][int(train_labels[rand_idxs[n]])] = 1.
''' use these batch data to train your model '''
model.train_on_batch(np.asarray(X_batch),np.asarray(Y_batch))
'''
The above process is one epoch, and then we can check the performance now.
'''
loss_and_metrics = model.evaluate(val_pixels, val_labels, batch_size)
print ('\nloss & metrics:')
print (loss_and_metrics)
'''
early stop is a mechanism to prevent your model from overfitting
'''
if loss_and_metrics[1] >= best_metrics:
best_metrics = loss_and_metrics[1]
print ("save best score!! "+str(loss_and_metrics[1]))
early_stop_counter = 0
else:
early_stop_counter += 1
'''
Sample code to write result :
if e == e:
val_proba = model.predict(val_pixels)
val_classes = val_proba.argmax(axis=-1)
with open('result/simple%s.csv' % str(e), 'w') as f:
f.write('acc = %s\n' % str(lossandmetrics[1]))
f.write('id,label')
for i in range(len(valclasses)):
f.write('\n' + str(i) + ',' + str(valclasses[i]))
'''
print ('Elapsed time in epoch ' + str(e+1) + ': ' + str(time.time() - startt))
if (e+1) % saveevery == 0:
model.save('model/model-%d.h5' %(e+1))
print ('Saved model %s!' %str(e+1))
if earlystopcounter >= PATIENCE:
print ('Stop by early stopping')
print ('Best score: '+str(best_metrics))
break
print ('Elapsed time in total: ' + str(time.time() - total_start_t))
if __name=='__main':
main() | 0.32178 | 0.222996 |
import json
import logging
import os
import shutil
import time
from copy import deepcopy
import certifi
import requests
import yaml
from assemblyline.common import log as al_log
from assemblyline.common.digests import get_sha256_for_file
from assemblyline.common.isotime import iso_to_epoch
al_log.init_logging('service_updater')
LOGGER = logging.getLogger('assemblyline.updater.service')
UPDATE_CONFIGURATION_PATH = os.environ.get('UPDATE_CONFIGURATION_PATH', None)
UPDATE_OUTPUT_PATH = os.environ.get('UPDATE_OUTPUT_PATH', "/tmp/updater_output")
def test_file(_):
return True
def url_update(test_func=test_file) -> None:
"""
Using an update configuration file as an input, which contains a list of sources, download all the file(s) which
have been modified since the last update.
"""
update_config = {}
# Load configuration
if UPDATE_CONFIGURATION_PATH and os.path.exists(UPDATE_CONFIGURATION_PATH):
with open(UPDATE_CONFIGURATION_PATH, 'r') as yml_fh:
update_config = yaml.safe_load(yml_fh)
else:
LOGGER.warning("Could not find update configuration file.")
exit(1)
# Cleanup output path
if os.path.exists(UPDATE_OUTPUT_PATH):
if os.path.isdir(UPDATE_OUTPUT_PATH):
shutil.rmtree(UPDATE_OUTPUT_PATH)
else:
os.unlink(UPDATE_OUTPUT_PATH)
os.makedirs(UPDATE_OUTPUT_PATH)
# Get sources
sources = update_config.get('sources', None)
# Exit if no update sources given
if not sources:
exit()
# Parse updater configuration
previous_update = update_config.get('previous_update', None)
previous_hash = update_config.get('previous_hash', None) or {}
if previous_hash:
previous_hash = json.loads(previous_hash)
if isinstance(previous_update, str):
previous_update = iso_to_epoch(previous_update)
# Create a requests session
session = requests.Session()
files_sha256 = {}
# Go through each source and download file
for source in sources:
uri = source['uri']
name = source['name']
if not uri or not name:
LOGGER.warning(f"Invalid source: {source}")
continue
LOGGER.info(f"Downloading file '{name}' from uri '{uri}' ...")
username = source.get('username', None)
password = source.get('password', None)
auth = (username, password) if username and password else None
ca_cert = source.get('ca_cert', None)
ignore_ssl_errors = source.get('ssl_ignore_errors', False)
headers = source.get('headers', None)
if ca_cert:
# Add certificate to requests
cafile = certifi.where()
with open(cafile, 'a') as ca_editor:
ca_editor.write(f"\n{ca_cert}")
session.verify = not ignore_ssl_errors
try:
# Check the response header for the last modified date
response = session.head(uri, auth=auth, headers=headers)
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
# Convert the last modified time to epoch
last_modified = time.mktime(time.strptime(last_modified, "%a, %d %b %Y %H:%M:%S %Z"))
# Compare the last modified time with the last updated time
if update_config.get('previous_update', None) and last_modified <= previous_update:
# File has not been modified since last update, do nothing
LOGGER.info("File has not changed since last time, Skipping...")
continue
if update_config.get('previous_update', None):
previous_update = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.gmtime(previous_update))
if headers:
headers['If-Modified-Since'] = previous_update
else:
headers = {
'If-Modified-Since': previous_update,
}
response = session.get(uri, auth=auth, headers=headers)
# Check the response code
if response.status_code == requests.codes['not_modified']:
# File has not been modified since last update, do nothing
LOGGER.info("File has not changed since last time, Skipping...")
continue
elif response.ok:
file_path = os.path.join(UPDATE_OUTPUT_PATH, name)
with open(file_path, 'wb') as f:
f.write(response.content)
if not test_func(file_path):
os.unlink(file_path)
LOGGER.warning(f"The downloaded file was invalid. It will not be part of this update...")
continue
# Append the SHA256 of the file to a list of downloaded files
sha256 = get_sha256_for_file(file_path)
if previous_hash.get(name, None) != sha256:
files_sha256[name] = sha256
else:
LOGGER.info("File as the same hash as last time. Skipping...")
LOGGER.info("File successfully downloaded!")
except requests.Timeout:
LOGGER.warning(f"Cannot find the file for source {name} with url {uri} - (Timeout)")
continue
except Exception as e:
# Catch all other types of exceptions such as ConnectionError, ProxyError, etc.
LOGGER.warning(f"Source {name} failed with error: {str(e)}")
if files_sha256:
new_hash = deepcopy(previous_hash)
new_hash.update(files_sha256)
# Check if the new update hash matches the previous update hash
if new_hash == previous_hash:
# Update file(s) not changed, delete the downloaded files and exit
shutil.rmtree(UPDATE_OUTPUT_PATH, ignore_errors=True)
exit()
# Create the response yaml
with open(os.path.join(UPDATE_OUTPUT_PATH, 'response.yaml'), 'w') as yml_fh:
yaml.safe_dump(dict(
hash=json.dumps(new_hash),
), yml_fh)
LOGGER.info("Service update file(s) successfully downloaded")
# Close the requests session
session.close()
if __name__ == '__main__':
url_update() | assemblyline_core/updater/url_update.py | import json
import logging
import os
import shutil
import time
from copy import deepcopy
import certifi
import requests
import yaml
from assemblyline.common import log as al_log
from assemblyline.common.digests import get_sha256_for_file
from assemblyline.common.isotime import iso_to_epoch
al_log.init_logging('service_updater')
LOGGER = logging.getLogger('assemblyline.updater.service')
UPDATE_CONFIGURATION_PATH = os.environ.get('UPDATE_CONFIGURATION_PATH', None)
UPDATE_OUTPUT_PATH = os.environ.get('UPDATE_OUTPUT_PATH', "/tmp/updater_output")
def test_file(_):
return True
def url_update(test_func=test_file) -> None:
"""
Using an update configuration file as an input, which contains a list of sources, download all the file(s) which
have been modified since the last update.
"""
update_config = {}
# Load configuration
if UPDATE_CONFIGURATION_PATH and os.path.exists(UPDATE_CONFIGURATION_PATH):
with open(UPDATE_CONFIGURATION_PATH, 'r') as yml_fh:
update_config = yaml.safe_load(yml_fh)
else:
LOGGER.warning("Could not find update configuration file.")
exit(1)
# Cleanup output path
if os.path.exists(UPDATE_OUTPUT_PATH):
if os.path.isdir(UPDATE_OUTPUT_PATH):
shutil.rmtree(UPDATE_OUTPUT_PATH)
else:
os.unlink(UPDATE_OUTPUT_PATH)
os.makedirs(UPDATE_OUTPUT_PATH)
# Get sources
sources = update_config.get('sources', None)
# Exit if no update sources given
if not sources:
exit()
# Parse updater configuration
previous_update = update_config.get('previous_update', None)
previous_hash = update_config.get('previous_hash', None) or {}
if previous_hash:
previous_hash = json.loads(previous_hash)
if isinstance(previous_update, str):
previous_update = iso_to_epoch(previous_update)
# Create a requests session
session = requests.Session()
files_sha256 = {}
# Go through each source and download file
for source in sources:
uri = source['uri']
name = source['name']
if not uri or not name:
LOGGER.warning(f"Invalid source: {source}")
continue
LOGGER.info(f"Downloading file '{name}' from uri '{uri}' ...")
username = source.get('username', None)
password = source.get('password', None)
auth = (username, password) if username and password else None
ca_cert = source.get('ca_cert', None)
ignore_ssl_errors = source.get('ssl_ignore_errors', False)
headers = source.get('headers', None)
if ca_cert:
# Add certificate to requests
cafile = certifi.where()
with open(cafile, 'a') as ca_editor:
ca_editor.write(f"\n{ca_cert}")
session.verify = not ignore_ssl_errors
try:
# Check the response header for the last modified date
response = session.head(uri, auth=auth, headers=headers)
last_modified = response.headers.get('Last-Modified', None)
if last_modified:
# Convert the last modified time to epoch
last_modified = time.mktime(time.strptime(last_modified, "%a, %d %b %Y %H:%M:%S %Z"))
# Compare the last modified time with the last updated time
if update_config.get('previous_update', None) and last_modified <= previous_update:
# File has not been modified since last update, do nothing
LOGGER.info("File has not changed since last time, Skipping...")
continue
if update_config.get('previous_update', None):
previous_update = time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.gmtime(previous_update))
if headers:
headers['If-Modified-Since'] = previous_update
else:
headers = {
'If-Modified-Since': previous_update,
}
response = session.get(uri, auth=auth, headers=headers)
# Check the response code
if response.status_code == requests.codes['not_modified']:
# File has not been modified since last update, do nothing
LOGGER.info("File has not changed since last time, Skipping...")
continue
elif response.ok:
file_path = os.path.join(UPDATE_OUTPUT_PATH, name)
with open(file_path, 'wb') as f:
f.write(response.content)
if not test_func(file_path):
os.unlink(file_path)
LOGGER.warning(f"The downloaded file was invalid. It will not be part of this update...")
continue
# Append the SHA256 of the file to a list of downloaded files
sha256 = get_sha256_for_file(file_path)
if previous_hash.get(name, None) != sha256:
files_sha256[name] = sha256
else:
LOGGER.info("File as the same hash as last time. Skipping...")
LOGGER.info("File successfully downloaded!")
except requests.Timeout:
LOGGER.warning(f"Cannot find the file for source {name} with url {uri} - (Timeout)")
continue
except Exception as e:
# Catch all other types of exceptions such as ConnectionError, ProxyError, etc.
LOGGER.warning(f"Source {name} failed with error: {str(e)}")
if files_sha256:
new_hash = deepcopy(previous_hash)
new_hash.update(files_sha256)
# Check if the new update hash matches the previous update hash
if new_hash == previous_hash:
# Update file(s) not changed, delete the downloaded files and exit
shutil.rmtree(UPDATE_OUTPUT_PATH, ignore_errors=True)
exit()
# Create the response yaml
with open(os.path.join(UPDATE_OUTPUT_PATH, 'response.yaml'), 'w') as yml_fh:
yaml.safe_dump(dict(
hash=json.dumps(new_hash),
), yml_fh)
LOGGER.info("Service update file(s) successfully downloaded")
# Close the requests session
session.close()
if __name__ == '__main__':
url_update() | 0.421433 | 0.075109 |
# In[1]:
# %pip install tensorflow==2.4.1
# %pip install transformers
# %pip install pyarrow
# %pip install tensorflow-addons
# In[1]:
import tensorflow as tf
import pandas as pd
import pickle
import os
import tensorflow_addons as tfa
from transformers import RobertaTokenizer, RobertaTokenizerFast, TFRobertaModel, TFAlbertModel
AUTO = tf.data.experimental.AUTOTUNE
# In[2]:
model_iteration = 'iteration_1'
# In[3]:
tf.config.list_physical_devices()
# In[4]:
with open(f"./{model_iteration}/vocab/topics_vocab.pkl", "rb") as f:
target_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/doc_type_vocab.pkl", "rb") as f:
doc_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/journal_name_vocab.pkl", "rb") as f:
journal_vocab = pickle.load(f)
# In[5]:
encoding_layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=len(target_vocab)+1, output_mode="binary", sparse=False)
# loss_fn = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
loss_fn = tfa.losses.SigmoidFocalCrossEntropy(alpha=0.25, gamma=2.0,
reduction=tf.keras.losses.Reduction.NONE)
metric_1 = tf.keras.metrics.CategoricalAccuracy()
metric_2 = tf.keras.metrics.Recall()
metric_3 = tf.keras.metrics.Precision()
metric_4 = tf.keras.metrics.TopKCategoricalAccuracy(k=10)
# Eventually will use with focal loss
# In[6]:
class CustomModel(tf.keras.Model):
def train_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
def test_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=False)
loss = loss_fn(labels, predictions)
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
@property
def metrics(self):
return [metric_1, metric_2, metric_3]
# In[7]:
def _parse_function(example_proto):
feature_description = {
'paper_title': tf.io.RaggedFeature(tf.int64),
'paper_mask': tf.io.RaggedFeature(tf.int64),
'journal': tf.io.FixedLenFeature((1,), tf.int64),
'doc_type': tf.io.FixedLenFeature((1,), tf.int64),
'targets': tf.io.FixedLenFeature((20,), tf.int64)
}
example = tf.io.parse_single_example(example_proto, feature_description)
paper_title = example['paper_title']
paper_mask = example['paper_mask']
doc_type = example['doc_type']
journal = example['journal']
targets = example['targets']
return (paper_title, paper_mask, doc_type, journal), targets
# In[8]:
def get_dataset(path, data_type='train'):
tfrecords = [f"{path}{data_type}/{x}" for x in os.listdir(f"{path}{data_type}/") if x.endswith('tfrecord')]
tfrecords.sort()
raw_dataset = tf.data.TFRecordDataset(tfrecords[:25], num_parallel_reads=AUTO)
parsed_dataset = raw_dataset.map(_parse_function, num_parallel_calls=AUTO)
parsed_dataset = parsed_dataset .apply(tf.data.experimental.dense_to_ragged_batch(256, drop_remainder=True)).shuffle(1024)
return parsed_dataset.prefetch(AUTO)
# In[9]:
file_path = f'./{model_iteration}/tfrecords/'
# In[10]:
train_ds = get_dataset(file_path, 'train')
val_ds = get_dataset(file_path, 'val')
# In[11]:
mirrored_strategy = tf.distribute.MirroredStrategy()
# In[12]:
with mirrored_strategy.scope():
# model = TFAlbertModel.from_pretrained('albert-base-v2')
# model.layers[0].trainable = False
# Model Inputs
paper_title_input_ids = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_ids')
paper_title_att_mask = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_mask')
doc_type_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='doc_type_id')
journal_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='journal_id')
# Using HF Model for Title Representation
# paper_title_embs = model(input_ids = paper_title_input_ids,
# attention_mask=paper_title_att_mask,
# output_hidden_states=True,
# training=False).last_hidden_state
# Embedding Layers
paper_title_embs = tf.keras.layers.Embedding(input_dim=30001,
output_dim=512,
mask_zero=False,
trainable=True,
name="title_embedding")(paper_title_input_ids)
doc_embs = tf.keras.layers.Embedding(input_dim=len(doc_vocab)+1,
output_dim=32,
mask_zero=False,
name="doc_type_embedding")(doc_type_id)
journal_embs = tf.keras.layers.Embedding(input_dim=len(journal_vocab)+1,
output_dim=128,
mask_zero=False,
name="journal_embedding")(journal_id)
# First layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_1")(paper_title_embs)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_1")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_1")(dense_output)
dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
doc_flat = tf.keras.layers.GlobalAveragePooling1D(name="doc_pooling_layer")(doc_embs)
journal_flat = tf.keras.layers.GlobalAveragePooling1D(name="journal_pooling_layer")(journal_embs)
concat_output = tf.concat(values=[dense_output_flat, journal_flat, doc_flat], axis=1)
# Second layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_2")(concat_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_2")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_2")(dense_output)
# Third Layer
dense_output = tf.keras.layers.Dense(256, activation='relu',
kernel_regularizer='L2', name="dense_3")(dense_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_3")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_3")(dense_output)
# dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
# Output Layer
final_output = tf.keras.layers.Dense(len(target_vocab)+1, activation="sigmoid",
name="cls")(dense_output)
test_model = CustomModel(inputs=[paper_title_input_ids, paper_title_att_mask, doc_type_id, journal_id],
outputs=final_output, name='test_model')
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# In[13]:
test_model.compile(optimizer=optimizer)
# In[14]:
test_model.summary()
# In[15]:
callbacks = [tf.keras.callbacks.ModelCheckpoint(f'./models/{model_iteration}/{model_iteration}_first_try',
save_best_only=False, save_weights_only=False)]
# ## First try (with all variables and Albert model output)
# In[ ]:
history = test_model.fit(train_ds, epochs=1, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# ## ARCHIVE: Baseline Second Try (trainable embeddings)
# In[23]:
history = test_model.fit(train_ds, epochs=5, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]: | POC/mag_model_iteration_1.py |
# In[1]:
# %pip install tensorflow==2.4.1
# %pip install transformers
# %pip install pyarrow
# %pip install tensorflow-addons
# In[1]:
import tensorflow as tf
import pandas as pd
import pickle
import os
import tensorflow_addons as tfa
from transformers import RobertaTokenizer, RobertaTokenizerFast, TFRobertaModel, TFAlbertModel
AUTO = tf.data.experimental.AUTOTUNE
# In[2]:
model_iteration = 'iteration_1'
# In[3]:
tf.config.list_physical_devices()
# In[4]:
with open(f"./{model_iteration}/vocab/topics_vocab.pkl", "rb") as f:
target_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/doc_type_vocab.pkl", "rb") as f:
doc_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/journal_name_vocab.pkl", "rb") as f:
journal_vocab = pickle.load(f)
# In[5]:
encoding_layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=len(target_vocab)+1, output_mode="binary", sparse=False)
# loss_fn = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
loss_fn = tfa.losses.SigmoidFocalCrossEntropy(alpha=0.25, gamma=2.0,
reduction=tf.keras.losses.Reduction.NONE)
metric_1 = tf.keras.metrics.CategoricalAccuracy()
metric_2 = tf.keras.metrics.Recall()
metric_3 = tf.keras.metrics.Precision()
metric_4 = tf.keras.metrics.TopKCategoricalAccuracy(k=10)
# Eventually will use with focal loss
# In[6]:
class CustomModel(tf.keras.Model):
def train_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
def test_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=False)
loss = loss_fn(labels, predictions)
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
@property
def metrics(self):
return [metric_1, metric_2, metric_3]
# In[7]:
def _parse_function(example_proto):
feature_description = {
'paper_title': tf.io.RaggedFeature(tf.int64),
'paper_mask': tf.io.RaggedFeature(tf.int64),
'journal': tf.io.FixedLenFeature((1,), tf.int64),
'doc_type': tf.io.FixedLenFeature((1,), tf.int64),
'targets': tf.io.FixedLenFeature((20,), tf.int64)
}
example = tf.io.parse_single_example(example_proto, feature_description)
paper_title = example['paper_title']
paper_mask = example['paper_mask']
doc_type = example['doc_type']
journal = example['journal']
targets = example['targets']
return (paper_title, paper_mask, doc_type, journal), targets
# In[8]:
def get_dataset(path, data_type='train'):
tfrecords = [f"{path}{data_type}/{x}" for x in os.listdir(f"{path}{data_type}/") if x.endswith('tfrecord')]
tfrecords.sort()
raw_dataset = tf.data.TFRecordDataset(tfrecords[:25], num_parallel_reads=AUTO)
parsed_dataset = raw_dataset.map(_parse_function, num_parallel_calls=AUTO)
parsed_dataset = parsed_dataset .apply(tf.data.experimental.dense_to_ragged_batch(256, drop_remainder=True)).shuffle(1024)
return parsed_dataset.prefetch(AUTO)
# In[9]:
file_path = f'./{model_iteration}/tfrecords/'
# In[10]:
train_ds = get_dataset(file_path, 'train')
val_ds = get_dataset(file_path, 'val')
# In[11]:
mirrored_strategy = tf.distribute.MirroredStrategy()
# In[12]:
with mirrored_strategy.scope():
# model = TFAlbertModel.from_pretrained('albert-base-v2')
# model.layers[0].trainable = False
# Model Inputs
paper_title_input_ids = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_ids')
paper_title_att_mask = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_mask')
doc_type_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='doc_type_id')
journal_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='journal_id')
# Using HF Model for Title Representation
# paper_title_embs = model(input_ids = paper_title_input_ids,
# attention_mask=paper_title_att_mask,
# output_hidden_states=True,
# training=False).last_hidden_state
# Embedding Layers
paper_title_embs = tf.keras.layers.Embedding(input_dim=30001,
output_dim=512,
mask_zero=False,
trainable=True,
name="title_embedding")(paper_title_input_ids)
doc_embs = tf.keras.layers.Embedding(input_dim=len(doc_vocab)+1,
output_dim=32,
mask_zero=False,
name="doc_type_embedding")(doc_type_id)
journal_embs = tf.keras.layers.Embedding(input_dim=len(journal_vocab)+1,
output_dim=128,
mask_zero=False,
name="journal_embedding")(journal_id)
# First layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_1")(paper_title_embs)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_1")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_1")(dense_output)
dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
doc_flat = tf.keras.layers.GlobalAveragePooling1D(name="doc_pooling_layer")(doc_embs)
journal_flat = tf.keras.layers.GlobalAveragePooling1D(name="journal_pooling_layer")(journal_embs)
concat_output = tf.concat(values=[dense_output_flat, journal_flat, doc_flat], axis=1)
# Second layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_2")(concat_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_2")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_2")(dense_output)
# Third Layer
dense_output = tf.keras.layers.Dense(256, activation='relu',
kernel_regularizer='L2', name="dense_3")(dense_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_3")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_3")(dense_output)
# dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
# Output Layer
final_output = tf.keras.layers.Dense(len(target_vocab)+1, activation="sigmoid",
name="cls")(dense_output)
test_model = CustomModel(inputs=[paper_title_input_ids, paper_title_att_mask, doc_type_id, journal_id],
outputs=final_output, name='test_model')
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# In[13]:
test_model.compile(optimizer=optimizer)
# In[14]:
test_model.summary()
# In[15]:
callbacks = [tf.keras.callbacks.ModelCheckpoint(f'./models/{model_iteration}/{model_iteration}_first_try',
save_best_only=False, save_weights_only=False)]
# ## First try (with all variables and Albert model output)
# In[ ]:
history = test_model.fit(train_ds, epochs=1, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# ## ARCHIVE: Baseline Second Try (trainable embeddings)
# In[23]:
history = test_model.fit(train_ds, epochs=5, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]: | 0.741955 | 0.31457 |
from collections import deque
from time import time
import numpy as np
class LoopTracker(object):
"""timekeeping, contains
1) with `enter`-> `exit`; 2) loop between current and next `exit`. """
def __init__(self, length):
self.with_time_list = deque(maxlen=length)
self.loop_time_list = deque(maxlen=length)
self.loop_point = None
def __enter__(self):
self.start = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time()
self.with_time_list.append(self.end - self.start)
if not self.loop_point:
self.loop_point = self.end
else:
self.loop_time_list.append(self.end - self.loop_point)
self.loop_point = self.end
def average(self, time_name):
"""mean time of `with` interaction, and loop time as well."""
if time_name == "enter":
return np.nanmean(self.with_time_list) * 1000
elif time_name == "loop":
return np.nanmean(self.loop_time_list) * 1000
else:
return np.nan
class SingleTracker(object):
"""single time tracker, only profiling the enter time used."""
def __init__(self, length):
self.with_time_list = deque(maxlen=length)
self.start = time()
def __enter__(self):
self.start = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.with_time_list.append(time() - self.start)
def average(self):
"""mean time of `with` interaction"""
if not self.with_time_list:
return np.nan
return np.nanmean(self.with_time_list) * 1000
class PredictStats(object):
"""predictor status records
handle the wait and inference time of predictor"""
def __init__(self):
"""init with default value"""
self.obs_wait_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
def get(self):
ret = {
"mean_predictor_wait_ms": self.obs_wait_time * 1000 / self.iters,
"mean_predictor_infer_ms": self.inference_time * 1000 / self.iters,
}
self.reset()
return ret
def reset(self):
self.obs_wait_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
class AgentStats(object):
""" Agent status records
handle the env.step and inference time of Agent"""
def __init__(self):
"""init with default value"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
def get(self):
"""get agent status and clear the buffer"""
ret = {
"mean_env_step_time_ms": self.env_step_time * 1000 / self.iters,
"mean_inference_time_ms": self.inference_time * 1000 / self.iters,
"iters": self.iters,
}
self.reset()
return ret
def reset(self):
"""reset buffer"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
class AgentGroupStats(object):
""" AgentGroup status records
handle the env.step and inference time of AgentGroup
the status could been make sence within once explore
There should been gather by logger or others"""
def __init__(self, n_agents, env_type):
"""init with default value"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
self.explore_time_in_epi = 0.0
self.wait_model_time = 0.0
self.restore_model_time = 0.0
self.n_agents = n_agents
self.env_api_type = env_type
self._stats = dict()
def update_with_agent_stats(self, agent_stats: list):
"""update agent status to agent group"""
_steps = [sta["mean_env_step_time_ms"] for sta in agent_stats]
_infers = [sta["mean_inference_time_ms"] for sta in agent_stats]
_iters = [sta["iters"] for sta in agent_stats]
self._stats.update(
{
"mean_env_step_ms": np.nanmean(_steps),
"mean_inference_ms": np.nanmean(_infers),
"iters": np.max(_iters),
}
)
def get(self):
"""get the newest one-explore-status of agent group"""
self._stats.update(
{
"explore_ms": self.explore_time_in_epi * 1000,
"wait_model_ms": self.wait_model_time * 1000,
"restore_model_ms": self.restore_model_time * 1000,
}
)
# use unified api, agent group will record the iteraction times.
if self.iters > 1e-1:
self._stats.update(
{
"mean_env_step_time_ms": self.env_step_time * 1000 / self.iters,
"mean_inference_time_ms": self.inference_time * 1000 / self.iters,
"iters": self.iters,
}
)
self.reset()
return self._stats
def reset(self):
"""reset buffer."""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
self.explore_time_in_epi = 0.0
self.wait_model_time = 0.0
self.restore_model_time = 0.0 | built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_QMIX_TensorFlow/xt/util/profile_stats.py | from collections import deque
from time import time
import numpy as np
class LoopTracker(object):
"""timekeeping, contains
1) with `enter`-> `exit`; 2) loop between current and next `exit`. """
def __init__(self, length):
self.with_time_list = deque(maxlen=length)
self.loop_time_list = deque(maxlen=length)
self.loop_point = None
def __enter__(self):
self.start = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time()
self.with_time_list.append(self.end - self.start)
if not self.loop_point:
self.loop_point = self.end
else:
self.loop_time_list.append(self.end - self.loop_point)
self.loop_point = self.end
def average(self, time_name):
"""mean time of `with` interaction, and loop time as well."""
if time_name == "enter":
return np.nanmean(self.with_time_list) * 1000
elif time_name == "loop":
return np.nanmean(self.loop_time_list) * 1000
else:
return np.nan
class SingleTracker(object):
"""single time tracker, only profiling the enter time used."""
def __init__(self, length):
self.with_time_list = deque(maxlen=length)
self.start = time()
def __enter__(self):
self.start = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.with_time_list.append(time() - self.start)
def average(self):
"""mean time of `with` interaction"""
if not self.with_time_list:
return np.nan
return np.nanmean(self.with_time_list) * 1000
class PredictStats(object):
"""predictor status records
handle the wait and inference time of predictor"""
def __init__(self):
"""init with default value"""
self.obs_wait_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
def get(self):
ret = {
"mean_predictor_wait_ms": self.obs_wait_time * 1000 / self.iters,
"mean_predictor_infer_ms": self.inference_time * 1000 / self.iters,
}
self.reset()
return ret
def reset(self):
self.obs_wait_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
class AgentStats(object):
""" Agent status records
handle the env.step and inference time of Agent"""
def __init__(self):
"""init with default value"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0.0
def get(self):
"""get agent status and clear the buffer"""
ret = {
"mean_env_step_time_ms": self.env_step_time * 1000 / self.iters,
"mean_inference_time_ms": self.inference_time * 1000 / self.iters,
"iters": self.iters,
}
self.reset()
return ret
def reset(self):
"""reset buffer"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
class AgentGroupStats(object):
""" AgentGroup status records
handle the env.step and inference time of AgentGroup
the status could been make sence within once explore
There should been gather by logger or others"""
def __init__(self, n_agents, env_type):
"""init with default value"""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
self.explore_time_in_epi = 0.0
self.wait_model_time = 0.0
self.restore_model_time = 0.0
self.n_agents = n_agents
self.env_api_type = env_type
self._stats = dict()
def update_with_agent_stats(self, agent_stats: list):
"""update agent status to agent group"""
_steps = [sta["mean_env_step_time_ms"] for sta in agent_stats]
_infers = [sta["mean_inference_time_ms"] for sta in agent_stats]
_iters = [sta["iters"] for sta in agent_stats]
self._stats.update(
{
"mean_env_step_ms": np.nanmean(_steps),
"mean_inference_ms": np.nanmean(_infers),
"iters": np.max(_iters),
}
)
def get(self):
"""get the newest one-explore-status of agent group"""
self._stats.update(
{
"explore_ms": self.explore_time_in_epi * 1000,
"wait_model_ms": self.wait_model_time * 1000,
"restore_model_ms": self.restore_model_time * 1000,
}
)
# use unified api, agent group will record the iteraction times.
if self.iters > 1e-1:
self._stats.update(
{
"mean_env_step_time_ms": self.env_step_time * 1000 / self.iters,
"mean_inference_time_ms": self.inference_time * 1000 / self.iters,
"iters": self.iters,
}
)
self.reset()
return self._stats
def reset(self):
"""reset buffer."""
self.env_step_time = 0.0
self.inference_time = 0.0
self.iters = 0
self.explore_time_in_epi = 0.0
self.wait_model_time = 0.0
self.restore_model_time = 0.0 | 0.777046 | 0.32126 |
from . import db,login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from datetime import datetime
#Added this code to solve the Exception: Missing user_loader or request_loader.
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
blog = db.relationship('Blog',backref = 'user',lazy = "dynamic")
comment = db.relationship('Comment',backref = 'user',lazy = "dynamic")
# upvote = db.relationship('Like',backref='user',lazy='dynamic')
# downvote = db.relationship('Dislike',backref='user',lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Blog(db.Model):
_tablename_ = 'blogs'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
category = db.Column(db.String)
description = db.Column(db.Text)
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comment = db.relationship('Comment', backref='blog', lazy='dynamic')
# save blog
def save_blog(self):
db.session.add(self)
db.session.commit()
#Delete blog
def delete_blog(self):
db.session.delete(self)
db.session.commit()
# get blog by id
@classmethod
def get_blog(cls, id):
blog = Blog.query.filter_by(id=id).first()
return blog
@classmethod
def get_blogs(cls,id):
blogs =Blog.query.filter_by(blog_id=id).all()
return blogs
def _repr_(self):
return f'Blog {self.title}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(255))
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
blog_id = db.Column(db.Integer,db.ForeignKey("blog.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).all()
return comments
def _repr_(self):
return f'Comment {self.comment}'
class Quote:
"""
Qoute blueprint
"""
def __init__(self,quote, author):
self.quote = quote
self.author = author
class Subscriber(db.Model):
__tablename__ = "subscribers"
id=db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(255),index=True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Subscriber:{self.email}' | app/models.py | from . import db,login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from datetime import datetime
#Added this code to solve the Exception: Missing user_loader or request_loader.
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
blog = db.relationship('Blog',backref = 'user',lazy = "dynamic")
comment = db.relationship('Comment',backref = 'user',lazy = "dynamic")
# upvote = db.relationship('Like',backref='user',lazy='dynamic')
# downvote = db.relationship('Dislike',backref='user',lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Blog(db.Model):
_tablename_ = 'blogs'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
category = db.Column(db.String)
description = db.Column(db.Text)
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comment = db.relationship('Comment', backref='blog', lazy='dynamic')
# save blog
def save_blog(self):
db.session.add(self)
db.session.commit()
#Delete blog
def delete_blog(self):
db.session.delete(self)
db.session.commit()
# get blog by id
@classmethod
def get_blog(cls, id):
blog = Blog.query.filter_by(id=id).first()
return blog
@classmethod
def get_blogs(cls,id):
blogs =Blog.query.filter_by(blog_id=id).all()
return blogs
def _repr_(self):
return f'Blog {self.title}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(255))
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
blog_id = db.Column(db.Integer,db.ForeignKey("blog.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).all()
return comments
def _repr_(self):
return f'Comment {self.comment}'
class Quote:
"""
Qoute blueprint
"""
def __init__(self,quote, author):
self.quote = quote
self.author = author
class Subscriber(db.Model):
__tablename__ = "subscribers"
id=db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(255),index=True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f'Subscriber:{self.email}' | 0.372277 | 0.058051 |
from fractions import Fraction
from random import randint
NUMBER_OF_ITERATIONS = 10_000
FLOUR = True
SUGAR = False
def main(iterations: int = NUMBER_OF_ITERATIONS) -> None:
"""
print the percentage of the iterations where the selected person had the
required item
"""
times_person_A_has_extra_flour = 0
times_person_A_does_not_have_extra_flour = 0
times_person_B_has_extra_flour = 0
times_person_B_does_not_have_extra_flour = 0
# local redefinition to make things slightly faster
FLOUR = True
SUGAR = False
for _ in range(iterations):
first_item = FLOUR if randint(0, 1) else SUGAR
second_item = FLOUR if randint(0, 1) else SUGAR
# only choose pairs that person A could have, based on their statement
# that one of the two items they have is flour
if first_item or second_item:
# person A has extra flour if both items are flour
if first_item == second_item == FLOUR:
times_person_A_has_extra_flour += 1
else:
times_person_A_does_not_have_extra_flour += 1
# only choose pairs that person B could have, based on their statement
# that the first item they bought is flour
if first_item:
# person B has extra flour if both items are flour
if first_item == second_item == FLOUR:
times_person_B_has_extra_flour += 1
else:
times_person_B_does_not_have_extra_flour += 1
print(__doc__)
print(
f"""If they visit the store {iterations:,} times, and you pick someone at random
each time they respond as in the scenario:
"""
)
total_valid_person_A_scenarios = (
times_person_A_has_extra_flour + times_person_A_does_not_have_extra_flour
)
percent_of_times_person_A_has_flour = (
times_person_A_has_extra_flour / total_valid_person_A_scenarios
)
print(f"person A had flour {percent_of_times_person_A_has_flour:.1%} of the time")
total_valid_person_B_scenarios = (
times_person_B_has_extra_flour + times_person_B_does_not_have_extra_flour
)
percent_of_times_person_B_has_flour = (
times_person_B_has_extra_flour / total_valid_person_B_scenarios
)
print(f"person B had flour {percent_of_times_person_B_has_flour:.1%} of the time")
person_A_fraction = Fraction(
times_person_A_has_extra_flour, iterations
).limit_denominator(iterations)
person_B_fraction = Fraction(
times_person_B_has_extra_flour, iterations
).limit_denominator(iterations)
print(
f"""
If you don't wait for a time when their answers are correct,
{person_A_fraction.numerator:,} times out of every {person_A_fraction.denominator:,} visits to the store ({float(person_A_fraction):.1%}) person A will have flour,
and person B will have it {person_B_fraction.numerator:,} times out of {person_B_fraction.denominator:,} visits ({float(person_B_fraction):.1%})
This is because out of {iterations:,} scenarios, person A's statement was true in {total_valid_person_A_scenarios / iterations:.1%} of the scenarios,
while person B's statement was true in only {total_valid_person_B_scenarios / iterations:.1%} of the scenarios.
"""
)
if __name__ == "__main__":
main() | other/two_children_problem.py | from fractions import Fraction
from random import randint
NUMBER_OF_ITERATIONS = 10_000
FLOUR = True
SUGAR = False
def main(iterations: int = NUMBER_OF_ITERATIONS) -> None:
"""
print the percentage of the iterations where the selected person had the
required item
"""
times_person_A_has_extra_flour = 0
times_person_A_does_not_have_extra_flour = 0
times_person_B_has_extra_flour = 0
times_person_B_does_not_have_extra_flour = 0
# local redefinition to make things slightly faster
FLOUR = True
SUGAR = False
for _ in range(iterations):
first_item = FLOUR if randint(0, 1) else SUGAR
second_item = FLOUR if randint(0, 1) else SUGAR
# only choose pairs that person A could have, based on their statement
# that one of the two items they have is flour
if first_item or second_item:
# person A has extra flour if both items are flour
if first_item == second_item == FLOUR:
times_person_A_has_extra_flour += 1
else:
times_person_A_does_not_have_extra_flour += 1
# only choose pairs that person B could have, based on their statement
# that the first item they bought is flour
if first_item:
# person B has extra flour if both items are flour
if first_item == second_item == FLOUR:
times_person_B_has_extra_flour += 1
else:
times_person_B_does_not_have_extra_flour += 1
print(__doc__)
print(
f"""If they visit the store {iterations:,} times, and you pick someone at random
each time they respond as in the scenario:
"""
)
total_valid_person_A_scenarios = (
times_person_A_has_extra_flour + times_person_A_does_not_have_extra_flour
)
percent_of_times_person_A_has_flour = (
times_person_A_has_extra_flour / total_valid_person_A_scenarios
)
print(f"person A had flour {percent_of_times_person_A_has_flour:.1%} of the time")
total_valid_person_B_scenarios = (
times_person_B_has_extra_flour + times_person_B_does_not_have_extra_flour
)
percent_of_times_person_B_has_flour = (
times_person_B_has_extra_flour / total_valid_person_B_scenarios
)
print(f"person B had flour {percent_of_times_person_B_has_flour:.1%} of the time")
person_A_fraction = Fraction(
times_person_A_has_extra_flour, iterations
).limit_denominator(iterations)
person_B_fraction = Fraction(
times_person_B_has_extra_flour, iterations
).limit_denominator(iterations)
print(
f"""
If you don't wait for a time when their answers are correct,
{person_A_fraction.numerator:,} times out of every {person_A_fraction.denominator:,} visits to the store ({float(person_A_fraction):.1%}) person A will have flour,
and person B will have it {person_B_fraction.numerator:,} times out of {person_B_fraction.denominator:,} visits ({float(person_B_fraction):.1%})
This is because out of {iterations:,} scenarios, person A's statement was true in {total_valid_person_A_scenarios / iterations:.1%} of the scenarios,
while person B's statement was true in only {total_valid_person_B_scenarios / iterations:.1%} of the scenarios.
"""
)
if __name__ == "__main__":
main() | 0.523177 | 0.362236 |
import os
import time
from glob import glob
from multiprocessing import Pool, cpu_count
import numpy as np
import tensorflow as tf
from absl import app, logging
from absl.flags import argparse_flags
from tqdm import auto as tqdm
import lm.config
import lm.encoders
import lm.examples
args = None
def readlines_txt(src):
with open(src) as fd:
if not args.by_line:
return [fd.read()]
else:
return fd.readlines()
LINE_READER = {
".txt": readlines_txt,
".tsv": readlines_txt,
}
def readlines(src):
_, ext = os.path.splitext(src)
f = LINE_READER.get(ext, None)
if f is None:
logging.warning("no readlines for file %s", src)
return
return f(src)
# Helper functions and classes
def sizechunks(l, n):
out = []
chunk = []
sz = 0
pbar = tqdm.tqdm(l)
pbar.set_description("Measuring size...")
for fpath in pbar:
chunk.append(fpath)
sz += tf.io.gfile.stat(fpath).length
if sz >= n:
out.append(chunk)
sz = 0
chunk = []
if chunk:
out.append(chunk)
return out
def parallel(src_dst_list, total):
count = args.nproc or cpu_count()
pool = Pool(processes=count) if count > 1 else None
mapper = pool.imap if count > 1 else map
if args.format == "tfrecord":
transformer = lm.examples.transform_many_and_write_one_tfrecord
elif args.format in ["tok16", "tok32"]:
transformer = lm.examples.transform_many_and_write_one_tok16_or_tok32
else:
raise ValueError("Unknown --format {}".format(args.format))
token_total = 0
example_total = 0
for token_count, example_count in tqdm.tqdm(
mapper(transformer, src_dst_list),
total=total,
):
token_total += token_count
example_total += example_count
return token_total, example_total
def parse_args(args, parser):
parser.add_argument(
"input",
type=str,
help="A file containing a list of filenames. Each file will become a single training example (unless --by_line is set).",
)
parser.add_argument(
"output", type=str, default="output", help="Where to write tfrecords"
)
parser.add_argument(
"--size",
type=float,
default=165.0,
help="the size in MB of uncompressed text to add to each tfrecord file, default 165MB",
)
parser.add_argument(
"--name", type=str, default="dataset", help="prefix name for the output files."
)
parser.add_argument(
"--encoder", type=str, default="gpt2", help="Name or path of an encoder spec"
)
parser.add_argument(
"--by_line", action="store_true", help="encodes each line as a separate record"
)
parser.add_argument(
"--no-ftfy", action="store_true", help="Don't pass source text through ftfy.fix_text() (and don't replace unicode ellipses with '...')"
)
parser.add_argument(
"--nproc", type=int, default=0, help="the number of processes to use for multiprocess encoding (0=all CPUs, 1=disable multiprocessing)"
)
parser.add_argument(
"--format", type=str, default="tfrecord", help="""--format=tfrecord (the default) writes tokens as .tfrecord files; each document becomes a single tf.train.Example. --format=tok16 simply dumps uint16 tokens to disk; good for OpenAI GPT-2/GPT-3 tokenization (which is the default --encoder setting). --format=tok32 dumps int32 tokens to disk; suitable for advanced tokenization schemes that need to store negative-valued tokens or whose vocabulary is larger than 65535 elements."""
)
def is_integer(x):
return np.can_cast(x, np.int32)
def is_float(x):
return np.can_cast(x, np.float32)
def is_exact(x):
return is_integer(x) or is_float(x) and x == int(x)
def num(x, digits_after_decimal=2):
if is_integer(x):
spec = '{:,d}'
else:
spec = '{:,.%df}' % digits_after_decimal
return spec.format(x)
def local_parse_args(args):
parser = argparse_flags.ArgumentParser()
parse_args(args, parser)
return parser.parse_args(args[1:])
def main(argv):
global args
args = argv
txt_files = open(args.input).read().splitlines()
if not txt_files:
logging.error("no data files found")
return
os.makedirs(args.output, exist_ok=True)
if tf.io.gfile.exists(args.encoder):
enccfg = lm.config.load(args.encoder)
encoder = lm.encoders.from_config(enccfg)
else:
encoder = lm.encoders.from_config(dict(kind="hf", location=args.encoder))
megabytes_per_tfrecord = int(args.size * 1e6)
file_chunks = sizechunks(
txt_files, megabytes_per_tfrecord
) # Assign files_per file to a tfrecord file each
logging.info(
"Got %d files, divided into %d chunks.", len(txt_files), len(file_chunks)
)
def getdst(name, idx, total):
filename_format = '%s-%05d-of-%05d' # standard tensorflow shard naming convention: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sharded-filename
if args.format == "tfrecord":
return os.path.join(args.output, (filename_format + ".tfrecord") % (name, idx, total))
elif args.format == "tok16":
return os.path.join(args.output, (filename_format + ".tok16") % (name, idx, total))
elif args.format == "tok32":
return os.path.join(args.output, (filename_format + ".tok32") % (name, idx, total))
else:
raise ValueError("Unknown --format {}".format(args.format))
jobs = list(
(encoder, chunks, getdst(args.name, idx, len(file_chunks)), args)
for idx, chunks in enumerate(file_chunks)
)
start = time.time()
token_total, example_total = parallel(jobs, total=len(file_chunks))
end = time.time()
elapsed = (end - start)
tokens_per_second = token_total / elapsed
tokens_per_file = token_total / len(jobs)
logging.info(
"finished in %ss: tokenized %d of %d files (%s tokens @ %s tokens/sec) into %d files (~%s tokens per file)",
num(elapsed), example_total, len(txt_files), num(token_total), num(tokens_per_second), len(jobs), num(tokens_per_file),
)
if __name__ == "__main__":
app.run(main, flags_parser=parse_args) | src/lm/cli/encode.py | import os
import time
from glob import glob
from multiprocessing import Pool, cpu_count
import numpy as np
import tensorflow as tf
from absl import app, logging
from absl.flags import argparse_flags
from tqdm import auto as tqdm
import lm.config
import lm.encoders
import lm.examples
args = None
def readlines_txt(src):
with open(src) as fd:
if not args.by_line:
return [fd.read()]
else:
return fd.readlines()
LINE_READER = {
".txt": readlines_txt,
".tsv": readlines_txt,
}
def readlines(src):
_, ext = os.path.splitext(src)
f = LINE_READER.get(ext, None)
if f is None:
logging.warning("no readlines for file %s", src)
return
return f(src)
# Helper functions and classes
def sizechunks(l, n):
out = []
chunk = []
sz = 0
pbar = tqdm.tqdm(l)
pbar.set_description("Measuring size...")
for fpath in pbar:
chunk.append(fpath)
sz += tf.io.gfile.stat(fpath).length
if sz >= n:
out.append(chunk)
sz = 0
chunk = []
if chunk:
out.append(chunk)
return out
def parallel(src_dst_list, total):
count = args.nproc or cpu_count()
pool = Pool(processes=count) if count > 1 else None
mapper = pool.imap if count > 1 else map
if args.format == "tfrecord":
transformer = lm.examples.transform_many_and_write_one_tfrecord
elif args.format in ["tok16", "tok32"]:
transformer = lm.examples.transform_many_and_write_one_tok16_or_tok32
else:
raise ValueError("Unknown --format {}".format(args.format))
token_total = 0
example_total = 0
for token_count, example_count in tqdm.tqdm(
mapper(transformer, src_dst_list),
total=total,
):
token_total += token_count
example_total += example_count
return token_total, example_total
def parse_args(args, parser):
parser.add_argument(
"input",
type=str,
help="A file containing a list of filenames. Each file will become a single training example (unless --by_line is set).",
)
parser.add_argument(
"output", type=str, default="output", help="Where to write tfrecords"
)
parser.add_argument(
"--size",
type=float,
default=165.0,
help="the size in MB of uncompressed text to add to each tfrecord file, default 165MB",
)
parser.add_argument(
"--name", type=str, default="dataset", help="prefix name for the output files."
)
parser.add_argument(
"--encoder", type=str, default="gpt2", help="Name or path of an encoder spec"
)
parser.add_argument(
"--by_line", action="store_true", help="encodes each line as a separate record"
)
parser.add_argument(
"--no-ftfy", action="store_true", help="Don't pass source text through ftfy.fix_text() (and don't replace unicode ellipses with '...')"
)
parser.add_argument(
"--nproc", type=int, default=0, help="the number of processes to use for multiprocess encoding (0=all CPUs, 1=disable multiprocessing)"
)
parser.add_argument(
"--format", type=str, default="tfrecord", help="""--format=tfrecord (the default) writes tokens as .tfrecord files; each document becomes a single tf.train.Example. --format=tok16 simply dumps uint16 tokens to disk; good for OpenAI GPT-2/GPT-3 tokenization (which is the default --encoder setting). --format=tok32 dumps int32 tokens to disk; suitable for advanced tokenization schemes that need to store negative-valued tokens or whose vocabulary is larger than 65535 elements."""
)
def is_integer(x):
return np.can_cast(x, np.int32)
def is_float(x):
return np.can_cast(x, np.float32)
def is_exact(x):
return is_integer(x) or is_float(x) and x == int(x)
def num(x, digits_after_decimal=2):
if is_integer(x):
spec = '{:,d}'
else:
spec = '{:,.%df}' % digits_after_decimal
return spec.format(x)
def local_parse_args(args):
parser = argparse_flags.ArgumentParser()
parse_args(args, parser)
return parser.parse_args(args[1:])
def main(argv):
global args
args = argv
txt_files = open(args.input).read().splitlines()
if not txt_files:
logging.error("no data files found")
return
os.makedirs(args.output, exist_ok=True)
if tf.io.gfile.exists(args.encoder):
enccfg = lm.config.load(args.encoder)
encoder = lm.encoders.from_config(enccfg)
else:
encoder = lm.encoders.from_config(dict(kind="hf", location=args.encoder))
megabytes_per_tfrecord = int(args.size * 1e6)
file_chunks = sizechunks(
txt_files, megabytes_per_tfrecord
) # Assign files_per file to a tfrecord file each
logging.info(
"Got %d files, divided into %d chunks.", len(txt_files), len(file_chunks)
)
def getdst(name, idx, total):
filename_format = '%s-%05d-of-%05d' # standard tensorflow shard naming convention: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sharded-filename
if args.format == "tfrecord":
return os.path.join(args.output, (filename_format + ".tfrecord") % (name, idx, total))
elif args.format == "tok16":
return os.path.join(args.output, (filename_format + ".tok16") % (name, idx, total))
elif args.format == "tok32":
return os.path.join(args.output, (filename_format + ".tok32") % (name, idx, total))
else:
raise ValueError("Unknown --format {}".format(args.format))
jobs = list(
(encoder, chunks, getdst(args.name, idx, len(file_chunks)), args)
for idx, chunks in enumerate(file_chunks)
)
start = time.time()
token_total, example_total = parallel(jobs, total=len(file_chunks))
end = time.time()
elapsed = (end - start)
tokens_per_second = token_total / elapsed
tokens_per_file = token_total / len(jobs)
logging.info(
"finished in %ss: tokenized %d of %d files (%s tokens @ %s tokens/sec) into %d files (~%s tokens per file)",
num(elapsed), example_total, len(txt_files), num(token_total), num(tokens_per_second), len(jobs), num(tokens_per_file),
)
if __name__ == "__main__":
app.run(main, flags_parser=parse_args) | 0.522689 | 0.180215 |
import ddt
from deuceclient.tests import *
import httpretty
from deucevalere import vault_validate
from deucevalere.tests import *
from deucevalere.tests.client_base import TestValereClientBase
@ddt.ddt
@httpretty.activate
class TestConvenienceFunctionValidation(TestValereClientBase):
def setUp(self):
super().setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
self.generate_blocks(count=20)
self.generate_orphaned_blocks(count=10)
self.secondary_setup(manager_start=None,
manager_end=None)
def tearDown(self):
super().tearDown()
@ddt.data(True, False)
def test_vault_validate(self, value):
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
def storage_head_callback(request, uri, headers):
return self.storage_block_head_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_storage_block_pattern_matcher(),
body=storage_head_callback)
self.assertEqual(vault_validate(self.deuce_client,
self.vault,
self.manager,
head_storage_blocks=value),
0)
def test_vault_validate_fail_metadata_block_list(self):
def metadata_listing_callback(request, uri, headers):
return (404, headers, 'mock failure')
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
with self.assertRaises(RuntimeError):
vault_validate(self.deuce_client,
self.vault,
self.manager)
def test_vault_validate_fail_storage_block_list(self):
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def storage_listing_callback(request, uri, headers):
return (404, headers, 'mock failure')
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
with self.assertRaises(RuntimeError):
vault_validate(self.deuce_client,
self.vault,
self.manager) | deucevalere/tests/test_convenience_functions_validation.py | import ddt
from deuceclient.tests import *
import httpretty
from deucevalere import vault_validate
from deucevalere.tests import *
from deucevalere.tests.client_base import TestValereClientBase
@ddt.ddt
@httpretty.activate
class TestConvenienceFunctionValidation(TestValereClientBase):
def setUp(self):
super().setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
self.generate_blocks(count=20)
self.generate_orphaned_blocks(count=10)
self.secondary_setup(manager_start=None,
manager_end=None)
def tearDown(self):
super().tearDown()
@ddt.data(True, False)
def test_vault_validate(self, value):
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def storage_listing_callback(request, uri, headers):
return self.storage_block_listing_success(request,
uri,
headers)
def storage_head_callback(request, uri, headers):
return self.storage_block_head_success(request,
uri,
headers)
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_storage_block_pattern_matcher(),
body=storage_head_callback)
self.assertEqual(vault_validate(self.deuce_client,
self.vault,
self.manager,
head_storage_blocks=value),
0)
def test_vault_validate_fail_metadata_block_list(self):
def metadata_listing_callback(request, uri, headers):
return (404, headers, 'mock failure')
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
with self.assertRaises(RuntimeError):
vault_validate(self.deuce_client,
self.vault,
self.manager)
def test_vault_validate_fail_storage_block_list(self):
def metadata_listing_callback(request, uri, headers):
return self.metadata_block_listing_success(request,
uri,
headers)
def metadata_head_callback(request, uri, headers):
return self.metadata_block_head_success(request,
uri,
headers)
def storage_listing_callback(request, uri, headers):
return (404, headers, 'mock failure')
url = get_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
url,
body=metadata_listing_callback)
httpretty.register_uri(httpretty.HEAD,
self.get_metadata_block_pattern_matcher(),
body=metadata_head_callback)
surl = get_storage_blocks_url(self.apihost, self.vault.vault_id)
httpretty.register_uri(httpretty.GET,
surl,
body=storage_listing_callback)
with self.assertRaises(RuntimeError):
vault_validate(self.deuce_client,
self.vault,
self.manager) | 0.511961 | 0.117876 |
"""Process road data from OSM extracts and create road network topology
"""
import os
from glob import glob
import fiona
import geopandas as gpd
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from utils import *
def get_road_condition_surface(x):
if not x.surface:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 'paved','asphalt'
else:
return 'unpaved','gravel'
elif x.surface == 'paved':
return x.surface, 'asphalt'
elif x.surface == 'unpaved':
return x.surface, 'gravel'
elif x.surface in ('asphalt','concrete'):
return 'paved',x.surface
else:
return 'unpaved',x.surface
def get_road_width(x,width,shoulder):
if not x.lanes:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 2.0*width + 2.0*shoulder
else:
return 1.0*width + 2.0*shoulder
else:
return float(x.lanes)*width + 2.0*shoulder
def get_road_lanes(x):
if not x.lanes:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 2
else:
return 1
else:
return x.lanes
def main(config):
incoming_data_path = config['paths']['incoming_data']
data_path = config['paths']['data']
output_path = config['paths']['output']
scratch_path = config['paths']['scratch']
networks = os.path.join(scratch_path,'road')
# Extract date string
date="211101"
width = 6.5 # Default carriageway width in meters
shoulder = 1.5
# Extract rail features from .osm.pbf to .gpkg
countries=[
"kenya",
"tanzania",
"uganda",
"zambia"
]
summary_path = os.path.join(output_path,'summary_stats')
if os.path.exists(summary_path) == False:
os.mkdir(summary_path)
output_excel = os.path.join(summary_path,
'road_conditions_summary.xlsx',
)
output_wrtr = pd.ExcelWriter(output_excel)
for country in countries:
# Read the geopackage file that was converted from osm.pdf
edges = gpd.read_file(os.path.join(networks,f"{country}-road.gpkg"), layer = "lines")
# From the geopackage file extract relevant roads
highway_list = ['motorway','motorway_link',
'trunk','trunk_link',
'primary','primary_link',
'secondary','secondary_link',
'tertiary','tertiary_link']
edges = edges[edges.highway.isin(highway_list)]
# Add attributes
edges['surface_material'] = edges.progress_apply(lambda x:get_road_condition_surface(x),axis=1)
edges[['road_cond','material']] = edges['surface_material'].apply(pd.Series)
edges.drop('surface_material',axis=1,inplace=True)
edges['width_m'] = edges.progress_apply(lambda x:get_road_width(x,width,shoulder),axis=1)
edges['lanes'] = edges.progress_apply(lambda x:get_road_lanes(x),axis=1)
edges['highway'] = edges.progress_apply(lambda x: x.highway.replace('_link',''),axis=1)
processed_path = os.path.join(data_path,country,'networks')
if os.path.exists(processed_path) == False:
os.mkdir(processed_path)
out_fname = os.path.join(data_path,country,"networks","road.gpkg")
# Create network topology
network = create_network_from_nodes_and_edges(
None,
edges,
"road",
out_fname,
)
# Set projection systems find the actual road lengths in meters
# Length may be invalid for a geographic CRS using degrees as units; must project geometries to a planar CRS
# EPSG 32736 works for Burundi, Eswatini, Kenya, Malawi, Mozambique, Rwanda, South Africa, Tanzania, Uganda, Zambia, Zimbabwe
# Use https://epsg.io/ to find for other areas
network.edges = network.edges.set_crs(epsg=4326)
network.nodes = network.nodes.set_crs(epsg=4326)
network.edges = network.edges.to_crs(epsg=32736)
network.nodes = network.nodes.to_crs(epsg=32736)
network.edges['road_length_m'] = network.edges.progress_apply(lambda x:x.geometry.length,axis=1)
# Store the final road network in geopackage in the processed_path
network.edges.to_file(out_fname, layer='edges', driver='GPKG')
network.nodes.to_file(out_fname, layer='nodes', driver='GPKG')
# Generate summary statistics
sum_network = network.edges.groupby(['highway','road_cond'])[['road_length_m']].sum().reset_index()
print (sum_network) # length in m
sum_network2 = (sum_network.set_index(['highway']).pivot(
columns='road_cond'
)['road_length_m'].div(1000).reset_index().rename_axis(None, axis=1)).fillna(0)
print(sum_network2) # length converted to km
sum_network2.to_excel(output_wrtr,country, index=False)
output_wrtr.save()
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG) | scripts/preprocess/road/process_road.py | """Process road data from OSM extracts and create road network topology
"""
import os
from glob import glob
import fiona
import geopandas as gpd
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from utils import *
def get_road_condition_surface(x):
if not x.surface:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 'paved','asphalt'
else:
return 'unpaved','gravel'
elif x.surface == 'paved':
return x.surface, 'asphalt'
elif x.surface == 'unpaved':
return x.surface, 'gravel'
elif x.surface in ('asphalt','concrete'):
return 'paved',x.surface
else:
return 'unpaved',x.surface
def get_road_width(x,width,shoulder):
if not x.lanes:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 2.0*width + 2.0*shoulder
else:
return 1.0*width + 2.0*shoulder
else:
return float(x.lanes)*width + 2.0*shoulder
def get_road_lanes(x):
if not x.lanes:
if x.highway in ('motorway','motorway_link','trunk','trunk_link','primary','primary_link'):
return 2
else:
return 1
else:
return x.lanes
def main(config):
incoming_data_path = config['paths']['incoming_data']
data_path = config['paths']['data']
output_path = config['paths']['output']
scratch_path = config['paths']['scratch']
networks = os.path.join(scratch_path,'road')
# Extract date string
date="211101"
width = 6.5 # Default carriageway width in meters
shoulder = 1.5
# Extract rail features from .osm.pbf to .gpkg
countries=[
"kenya",
"tanzania",
"uganda",
"zambia"
]
summary_path = os.path.join(output_path,'summary_stats')
if os.path.exists(summary_path) == False:
os.mkdir(summary_path)
output_excel = os.path.join(summary_path,
'road_conditions_summary.xlsx',
)
output_wrtr = pd.ExcelWriter(output_excel)
for country in countries:
# Read the geopackage file that was converted from osm.pdf
edges = gpd.read_file(os.path.join(networks,f"{country}-road.gpkg"), layer = "lines")
# From the geopackage file extract relevant roads
highway_list = ['motorway','motorway_link',
'trunk','trunk_link',
'primary','primary_link',
'secondary','secondary_link',
'tertiary','tertiary_link']
edges = edges[edges.highway.isin(highway_list)]
# Add attributes
edges['surface_material'] = edges.progress_apply(lambda x:get_road_condition_surface(x),axis=1)
edges[['road_cond','material']] = edges['surface_material'].apply(pd.Series)
edges.drop('surface_material',axis=1,inplace=True)
edges['width_m'] = edges.progress_apply(lambda x:get_road_width(x,width,shoulder),axis=1)
edges['lanes'] = edges.progress_apply(lambda x:get_road_lanes(x),axis=1)
edges['highway'] = edges.progress_apply(lambda x: x.highway.replace('_link',''),axis=1)
processed_path = os.path.join(data_path,country,'networks')
if os.path.exists(processed_path) == False:
os.mkdir(processed_path)
out_fname = os.path.join(data_path,country,"networks","road.gpkg")
# Create network topology
network = create_network_from_nodes_and_edges(
None,
edges,
"road",
out_fname,
)
# Set projection systems find the actual road lengths in meters
# Length may be invalid for a geographic CRS using degrees as units; must project geometries to a planar CRS
# EPSG 32736 works for Burundi, Eswatini, Kenya, Malawi, Mozambique, Rwanda, South Africa, Tanzania, Uganda, Zambia, Zimbabwe
# Use https://epsg.io/ to find for other areas
network.edges = network.edges.set_crs(epsg=4326)
network.nodes = network.nodes.set_crs(epsg=4326)
network.edges = network.edges.to_crs(epsg=32736)
network.nodes = network.nodes.to_crs(epsg=32736)
network.edges['road_length_m'] = network.edges.progress_apply(lambda x:x.geometry.length,axis=1)
# Store the final road network in geopackage in the processed_path
network.edges.to_file(out_fname, layer='edges', driver='GPKG')
network.nodes.to_file(out_fname, layer='nodes', driver='GPKG')
# Generate summary statistics
sum_network = network.edges.groupby(['highway','road_cond'])[['road_length_m']].sum().reset_index()
print (sum_network) # length in m
sum_network2 = (sum_network.set_index(['highway']).pivot(
columns='road_cond'
)['road_length_m'].div(1000).reset_index().rename_axis(None, axis=1)).fillna(0)
print(sum_network2) # length converted to km
sum_network2.to_excel(output_wrtr,country, index=False)
output_wrtr.save()
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG) | 0.422266 | 0.382286 |
import config
import telebot
import sqlite3
bot = telebot.TeleBot(config.token)
user_markup = telebot.types.InlineKeyboardMarkup(row_width=2)
first_button = telebot.types.InlineKeyboardButton(text="Магазин", callback_data="shop")
second_button = telebot.types.InlineKeyboardButton(text="О нас", callback_data="about")
third_button = telebot.types.InlineKeyboardButton(text="Профиль", callback_data="prof_users")
fourth_button=telebot.types.InlineKeyboardButton(text="Корзина", callback_data="sale")
user_markup.add( first_button, second_button,third_button,fourth_button )
backboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
backbutton = telebot.types.InlineKeyboardButton( text="В меню", callback_data="mainmenu" )
menubutton = telebot.types.InlineKeyboardButton( text="Вернутся к категориям", callback_data="shop" )
backboard.add( backbutton, menubutton )
user_markup = telebot.types.InlineKeyboardMarkup( row_width=2 )
user_markup.add( first_button, second_button, third_button, fourth_button )
shopboard = telebot.types.InlineKeyboardMarkup( row_width=3 )
cat1 = telebot.types.InlineKeyboardButton( text="Формы", callback_data="category_form" )
cat2 = telebot.types.InlineKeyboardButton( text="Жидкий силикон", callback_data="category_silic" )
cat3 = telebot.types.InlineKeyboardButton( text="Краски", callback_data="category_kras" )
cat4 = telebot.types.InlineKeyboardButton( text="Блёстки", callback_data="category_bles" )
cat5 = telebot.types.InlineKeyboardButton( text="Шприцы", callback_data="category_shpritc" )
cat6 = telebot.types.InlineKeyboardButton( text="Аттрактанты", callback_data="category_att" )
cat7 = telebot.types.InlineKeyboardButton( text="Приманки", callback_data="category_sp" )
cat8 = telebot.types.InlineKeyboardButton( text="Лодки", callback_data="category_ship" )
cat9 = telebot.types.InlineKeyboardButton( text="Одежда", callback_data="category_odej" )
shopboard.add( cat1, cat2, cat3, cat4, cat5, cat6, cat7, cat8, cat9, backbutton )
adoutboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
adoutboard.add( backbutton )
profboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
profboard.add( backbutton )
saleboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
saleboard.add( backbutton )
def regist(call):
reg = []
def inputsurname(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите Фамилию:' )
bot.register_next_step_handler( msg, inputphone )
def inputphone(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите телефон:' )
bot.register_next_step_handler( msg, inputadress )
def inputadress(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите адрес:' )
bot.register_next_step_handler( msg, allreg )
def allreg(message):
text = message.text
reg.append( text )
bot.send_message( message.chat.id, 'Регистрация завершена!' )
with sqlite3.connect( "picbd.db") as pic:
cur = pic.cursor()
cur.execute("INSERT INTO users (id, name, surname, phone, adress) VALUES (?, ?, ?, ?, ?)",(message.chat.id,reg[0],reg[1],reg[2],reg[3]))
pic.commit()
cur.close()
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
msg = bot.send_message(call.message.chat.id, 'Введите имя' )
bot.register_next_step_handler( msg, inputsurname)
def sendphoto(bdname,call):
media=[]
with sqlite3.connect( "picbd.db" ) as pic:
cur = pic.cursor()
result = cur.execute( "SELECT price,path,name FROM "+bdname ).fetchall()
pic.close()
for i in result:
media.append(telebot.types.InputMediaPhoto(open( i[1], "rb" ),"Цена: " + str( i[0] ) + " грн." + "\n" + "Название: " + i[2]))
else:
bot.send_media_group( call.message.chat.id, media )
def profile(bdname,call):
with sqlite3.connect( "picbd.db" ) as pic:
cur = pic.cursor()
info = cur.execute("SELECT * FROM "+bdname+" WHERE id=?", (call.message.chat.id,))
if info.fetchone() is None:
cur.close()
regist(call)
else:
info = cur.execute( "SELECT * FROM users WHERE id=?", (call.message.chat.id,))
data=info.fetchone()
cur.close()
bot.send_message( call.message.chat.id,"Имя: "+data[1]+"\nФамилия: "+data[2]+"\nТелефон: "+data[3]+"\nАдрес доставки: "+data[4])
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
@bot.message_handler(commands=["start"])
def start(commands):
bot.send_message( commands.chat.id, "Выберите категорию ",reply_markup=user_markup )
@bot.callback_query_handler(func=lambda call:True)
def callback_inline(call):
if call.data == "mainmenu":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id, text="Меню",reply_markup=user_markup )
if call.data == "shop":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id, text="Выберите категорию",reply_markup=shopboard )
elif call.data=="about":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id,text="Раздел 'О нас' в работе", reply_markup=adoutboard )
elif call.data == "sale":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id,text="Раздел 'Корзина' в работе", reply_markup=saleboard )
if call.data.split("_")[0]=="category":
sendphoto(call.data.split("_")[1],call)
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
if call.data.split("_")[0]=="prof":
profile(call.data.split("_")[1],call)
bot.infinity_polling() | main.py | import config
import telebot
import sqlite3
bot = telebot.TeleBot(config.token)
user_markup = telebot.types.InlineKeyboardMarkup(row_width=2)
first_button = telebot.types.InlineKeyboardButton(text="Магазин", callback_data="shop")
second_button = telebot.types.InlineKeyboardButton(text="О нас", callback_data="about")
third_button = telebot.types.InlineKeyboardButton(text="Профиль", callback_data="prof_users")
fourth_button=telebot.types.InlineKeyboardButton(text="Корзина", callback_data="sale")
user_markup.add( first_button, second_button,third_button,fourth_button )
backboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
backbutton = telebot.types.InlineKeyboardButton( text="В меню", callback_data="mainmenu" )
menubutton = telebot.types.InlineKeyboardButton( text="Вернутся к категориям", callback_data="shop" )
backboard.add( backbutton, menubutton )
user_markup = telebot.types.InlineKeyboardMarkup( row_width=2 )
user_markup.add( first_button, second_button, third_button, fourth_button )
shopboard = telebot.types.InlineKeyboardMarkup( row_width=3 )
cat1 = telebot.types.InlineKeyboardButton( text="Формы", callback_data="category_form" )
cat2 = telebot.types.InlineKeyboardButton( text="Жидкий силикон", callback_data="category_silic" )
cat3 = telebot.types.InlineKeyboardButton( text="Краски", callback_data="category_kras" )
cat4 = telebot.types.InlineKeyboardButton( text="Блёстки", callback_data="category_bles" )
cat5 = telebot.types.InlineKeyboardButton( text="Шприцы", callback_data="category_shpritc" )
cat6 = telebot.types.InlineKeyboardButton( text="Аттрактанты", callback_data="category_att" )
cat7 = telebot.types.InlineKeyboardButton( text="Приманки", callback_data="category_sp" )
cat8 = telebot.types.InlineKeyboardButton( text="Лодки", callback_data="category_ship" )
cat9 = telebot.types.InlineKeyboardButton( text="Одежда", callback_data="category_odej" )
shopboard.add( cat1, cat2, cat3, cat4, cat5, cat6, cat7, cat8, cat9, backbutton )
adoutboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
adoutboard.add( backbutton )
profboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
profboard.add( backbutton )
saleboard = telebot.types.InlineKeyboardMarkup( row_width=2 )
saleboard.add( backbutton )
def regist(call):
reg = []
def inputsurname(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите Фамилию:' )
bot.register_next_step_handler( msg, inputphone )
def inputphone(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите телефон:' )
bot.register_next_step_handler( msg, inputadress )
def inputadress(message):
text = message.text
reg.append( text )
msg = bot.send_message( message.chat.id, 'Введите адрес:' )
bot.register_next_step_handler( msg, allreg )
def allreg(message):
text = message.text
reg.append( text )
bot.send_message( message.chat.id, 'Регистрация завершена!' )
with sqlite3.connect( "picbd.db") as pic:
cur = pic.cursor()
cur.execute("INSERT INTO users (id, name, surname, phone, adress) VALUES (?, ?, ?, ?, ?)",(message.chat.id,reg[0],reg[1],reg[2],reg[3]))
pic.commit()
cur.close()
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
msg = bot.send_message(call.message.chat.id, 'Введите имя' )
bot.register_next_step_handler( msg, inputsurname)
def sendphoto(bdname,call):
media=[]
with sqlite3.connect( "picbd.db" ) as pic:
cur = pic.cursor()
result = cur.execute( "SELECT price,path,name FROM "+bdname ).fetchall()
pic.close()
for i in result:
media.append(telebot.types.InputMediaPhoto(open( i[1], "rb" ),"Цена: " + str( i[0] ) + " грн." + "\n" + "Название: " + i[2]))
else:
bot.send_media_group( call.message.chat.id, media )
def profile(bdname,call):
with sqlite3.connect( "picbd.db" ) as pic:
cur = pic.cursor()
info = cur.execute("SELECT * FROM "+bdname+" WHERE id=?", (call.message.chat.id,))
if info.fetchone() is None:
cur.close()
regist(call)
else:
info = cur.execute( "SELECT * FROM users WHERE id=?", (call.message.chat.id,))
data=info.fetchone()
cur.close()
bot.send_message( call.message.chat.id,"Имя: "+data[1]+"\nФамилия: "+data[2]+"\nТелефон: "+data[3]+"\nАдрес доставки: "+data[4])
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
@bot.message_handler(commands=["start"])
def start(commands):
bot.send_message( commands.chat.id, "Выберите категорию ",reply_markup=user_markup )
@bot.callback_query_handler(func=lambda call:True)
def callback_inline(call):
if call.data == "mainmenu":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id, text="Меню",reply_markup=user_markup )
if call.data == "shop":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id, text="Выберите категорию",reply_markup=shopboard )
elif call.data=="about":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id,text="Раздел 'О нас' в работе", reply_markup=adoutboard )
elif call.data == "sale":
bot.edit_message_text( chat_id=call.message.chat.id, message_id=call.message.message_id,text="Раздел 'Корзина' в работе", reply_markup=saleboard )
if call.data.split("_")[0]=="category":
sendphoto(call.data.split("_")[1],call)
bot.send_message( chat_id=call.message.chat.id, text="Выберите действие", reply_markup=backboard )
if call.data.split("_")[0]=="prof":
profile(call.data.split("_")[1],call)
bot.infinity_polling() | 0.063956 | 0.093719 |
import logging
from numbers import Real
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
import toml
from incognita.data import ons_pd
from incognita.logger import logger
from incognita.utility import config
from incognita.utility import deciles
from incognita.utility import root
from incognita.utility.timing import time_function
ons_postcode_directory_stub = ons_pd.ONSPostcodeDirectory(
fields=set(),
index_column="",
data_types={},
PUBLICATION_DATE="",
IMD_MAX={"England": 32844, "Wales": 1909, "Scotland": 6976, "Northern Ireland": 890},
COUNTRY_CODES={"E92000001": "England", "W92000004": "Wales", "S92000003": "Scotland", "N92000002": "Northern Ireland"},
)
def add(number1: Real, number2: Real) -> Real:
return number1 + number2
def test_calc_imd_decile():
data = {"row_1": [1, "E92000001", 32844], "row_2": [2, "W92000004", 1]}
frame = pd.DataFrame.from_dict(data, orient="index", columns=["id", "ctry", "imd"])
imd_decile_data: pd.Series = deciles.calc_imd_decile(frame["imd"], frame["ctry"], ons_postcode_directory_stub)
predicted_result = pd.Series(data=[10, 1], index=["row_1", "row_2"])
assert isinstance(imd_decile_data, pd.Series)
assert_series_equal(imd_decile_data, predicted_result, check_dtype=False)
def test_settings_are_accurate():
with open(root.PROJECT_ROOT.joinpath("incognita-config.toml"), "r") as read_file:
settings = toml.load(read_file)
assert config._SETTINGS_TOML == settings
def test_settings_model_is_accurate():
with open(root.PROJECT_ROOT.joinpath("incognita-config.toml"), "r") as read_file:
settings = toml.load(read_file)
assert config.SETTINGS == config._create_settings(settings)
class ExampleClassLogger:
@time_function
def add(self, number1: Real, number2: Real) -> Real:
logger.info("Example Function")
return number1 + number2
def test_time_function_wraps_function():
assert time_function(add)(2, 2) == add(2, 2)
def test_time_function_raises_exception_on_non_method_arguments():
with pytest.raises(ValueError):
time_function("not a function or method") # NoQA
def test_time_function_logger_output(caplog: pytest.LogCaptureFixture):
caplog.set_level(logging.INFO)
ExampleClassLogger().add(2, 2)
assert "Calling function add" in caplog.text
assert "add took 0.0" in caplog.text | tests/test_utility.py | import logging
from numbers import Real
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
import toml
from incognita.data import ons_pd
from incognita.logger import logger
from incognita.utility import config
from incognita.utility import deciles
from incognita.utility import root
from incognita.utility.timing import time_function
ons_postcode_directory_stub = ons_pd.ONSPostcodeDirectory(
fields=set(),
index_column="",
data_types={},
PUBLICATION_DATE="",
IMD_MAX={"England": 32844, "Wales": 1909, "Scotland": 6976, "Northern Ireland": 890},
COUNTRY_CODES={"E92000001": "England", "W92000004": "Wales", "S92000003": "Scotland", "N92000002": "Northern Ireland"},
)
def add(number1: Real, number2: Real) -> Real:
return number1 + number2
def test_calc_imd_decile():
data = {"row_1": [1, "E92000001", 32844], "row_2": [2, "W92000004", 1]}
frame = pd.DataFrame.from_dict(data, orient="index", columns=["id", "ctry", "imd"])
imd_decile_data: pd.Series = deciles.calc_imd_decile(frame["imd"], frame["ctry"], ons_postcode_directory_stub)
predicted_result = pd.Series(data=[10, 1], index=["row_1", "row_2"])
assert isinstance(imd_decile_data, pd.Series)
assert_series_equal(imd_decile_data, predicted_result, check_dtype=False)
def test_settings_are_accurate():
with open(root.PROJECT_ROOT.joinpath("incognita-config.toml"), "r") as read_file:
settings = toml.load(read_file)
assert config._SETTINGS_TOML == settings
def test_settings_model_is_accurate():
with open(root.PROJECT_ROOT.joinpath("incognita-config.toml"), "r") as read_file:
settings = toml.load(read_file)
assert config.SETTINGS == config._create_settings(settings)
class ExampleClassLogger:
@time_function
def add(self, number1: Real, number2: Real) -> Real:
logger.info("Example Function")
return number1 + number2
def test_time_function_wraps_function():
assert time_function(add)(2, 2) == add(2, 2)
def test_time_function_raises_exception_on_non_method_arguments():
with pytest.raises(ValueError):
time_function("not a function or method") # NoQA
def test_time_function_logger_output(caplog: pytest.LogCaptureFixture):
caplog.set_level(logging.INFO)
ExampleClassLogger().add(2, 2)
assert "Calling function add" in caplog.text
assert "add took 0.0" in caplog.text | 0.65368 | 0.445288 |
import os
import time
import torch
import random
import argparse
from tqdm import tqdm
from scorer import Scorer
from data_utils import load_data
from sagan_trainer import SAGAN_Trainer
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
class Instructor:
def __init__(self, args):
self.args = args
self._print_args()
def _print_args(self):
print('TRAINING ARGUMENTS:')
for arg in vars(self.args):
print(f">>> {arg}: {getattr(self.args, arg)}")
def train_sagan(self):
print('=> creating model...')
trainer = SAGAN_Trainer(args)
writer = SummaryWriter()
print('=> creating scorer...')
scorer = Scorer(device=args.device, resize=True)
print('=> loading data...')
dataloader = load_data(im_size=self.args.im_size,
batch_size=self.args.batch_size,
workers=self.args.num_workers,
dataset=self.args.dataset,
data_path=os.path.join(self.args.data_dir, self.args.dataset))
data_iter = iter(dataloader)
model_save_step = int(self.args.model_save_step * len(dataloader))
fixed_z = torch.randn(self.args.batch_size, self.args.z_dim).to(self.args.device)
real_images, _ = next(data_iter)
real_images = (real_images * 0.5 + 0.5).clamp(0, 1)
writer.add_images('real', real_images, 0)
save_image(real_images, os.path.join(self.args.sample_dir, self.args.timestamp, 'real.png'))
all_preds = list()
for inputs, _ in tqdm(dataloader):
inputs = inputs.to(self.args.device) * 0.5 + 0.5
all_preds.append(scorer.get_preds(inputs))
score, _ = scorer.compute_score(torch.cat(all_preds, dim=0), splits=10)
print(f"real inception score: {score:.4f}")
best_score = 0
for step in range(self.args.total_step):
''' train sagan model '''
trainer.D.train()
trainer.G.train()
try:
real_images, _ = next(data_iter)
except:
data_iter = iter(dataloader)
real_images, _ = next(data_iter)
real_images = real_images.to(self.args.device)
d_loss_real, d_loss_fake, g_loss_fake = trainer.train(real_images)
''' print info '''
if (step + 1) % self.args.log_step == 0:
print(f"step: {step + 1}/{self.args.total_step}, g_loss_fake: {g_loss_fake:.4f}")
writer.add_scalar('Loss/D_real', d_loss_real, step + 1)
writer.add_scalar('Loss/D_fake', d_loss_fake, step + 1)
writer.add_scalar('Loss/G_fake', g_loss_fake, step + 1)
writer.add_scalar('Score/G_attn1', trainer.G.attn1.gamma.mean().item(), step + 1)
writer.add_scalar('Score/D_attn1', trainer.D.attn1.gamma.mean().item(), step + 1)
''' compute inception score '''
if (step + 1) % self.args.eval_step == 0:
trainer.G.eval()
all_preds = list()
for i in tqdm(range(self.args.sample_num)):
z = torch.randn(self.args.batch_size, self.args.z_dim).to(self.args.device)
inputs = trainer.G(z) * 0.5 + 0.5
all_preds.append(scorer.get_preds(inputs))
score, _ = scorer.compute_score(torch.cat(all_preds, dim=0), splits=10)
best_score = score if score > best_score else best_score
print(f"fake inception score: {score:.4f}")
writer.add_scalar('Score/IS_fake', score, step + 1)
''' sample image '''
if (step + 1) % self.args.sample_step == 0:
trainer.G.eval()
fake_images = trainer.G(fixed_z)
fake_images = (fake_images * 0.5 + 0.5).clamp(0, 1)
writer.add_images('fake', fake_images, step + 1)
save_image(fake_images, os.path.join(self.args.sample_dir, self.args.timestamp, f"fake_{step + 1}.png"))
''' save model '''
if (step + 1) % model_save_step == 0:
torch.save(trainer.G.state_dict(), os.path.join(self.args.save_dir, self.args.timestamp, f"{step + 1}_G.pt"))
torch.save(trainer.D.state_dict(), os.path.join(self.args.save_dir, self.args.timestamp, f"{step + 1}_D.pt"))
writer.close()
print(f"best inception score: {best_score:.4f}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
''' dataset '''
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10'])
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--sample_dir', type=str, default='sample')
parser.add_argument('--save_dir', type=str, default='saves')
parser.add_argument('--num_workers', type=int, default=16)
''' model '''
parser.add_argument('--im_size', type=int, default=32)
parser.add_argument('--z_dim', type=int, default=128)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)
''' optimization '''
parser.add_argument('--total_step', type=int, default=200000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--g_lr', type=float, default=0.0001)
parser.add_argument('--d_lr', type=float, default=0.0004)
parser.add_argument('--beta1', type=float, default=0.0)
parser.add_argument('--beta2', type=float, default=0.9)
parser.add_argument('--lambda_gp', type=float, default=10)
parser.add_argument('--adv_loss', type=str, default='wgan-gp', choices=['hinge', 'wgan-gp'])
''' environment'''
parser.add_argument('--device', type=str, default=None, choices=['cpu', 'cuda'])
parser.add_argument('--parallel', default=False, action='store_true')
parser.add_argument('--log_step', type=int, default=20)
parser.add_argument('--sample_step', type=int, default=200)
parser.add_argument('--eval_step', type=int, default=500)
parser.add_argument('--model_save_step', type=int, default=10)
parser.add_argument('--sample_num', type=int, default=100)
parser.add_argument('--timestamp', type=str, default=None)
args = parser.parse_args()
args.timestamp = args.timestamp if args.timestamp else str(int(time.time())) + format(random.randint(0, 999), '03')
args.device = torch.device(args.device) if args.device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
for dir_name in [args.data_dir, args.sample_dir, args.save_dir]:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
os.mkdir(os.path.join(args.sample_dir, args.timestamp))
os.mkdir(os.path.join(args.save_dir, args.timestamp))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ins = Instructor(args)
ins.train_sagan() | main.py | import os
import time
import torch
import random
import argparse
from tqdm import tqdm
from scorer import Scorer
from data_utils import load_data
from sagan_trainer import SAGAN_Trainer
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
class Instructor:
def __init__(self, args):
self.args = args
self._print_args()
def _print_args(self):
print('TRAINING ARGUMENTS:')
for arg in vars(self.args):
print(f">>> {arg}: {getattr(self.args, arg)}")
def train_sagan(self):
print('=> creating model...')
trainer = SAGAN_Trainer(args)
writer = SummaryWriter()
print('=> creating scorer...')
scorer = Scorer(device=args.device, resize=True)
print('=> loading data...')
dataloader = load_data(im_size=self.args.im_size,
batch_size=self.args.batch_size,
workers=self.args.num_workers,
dataset=self.args.dataset,
data_path=os.path.join(self.args.data_dir, self.args.dataset))
data_iter = iter(dataloader)
model_save_step = int(self.args.model_save_step * len(dataloader))
fixed_z = torch.randn(self.args.batch_size, self.args.z_dim).to(self.args.device)
real_images, _ = next(data_iter)
real_images = (real_images * 0.5 + 0.5).clamp(0, 1)
writer.add_images('real', real_images, 0)
save_image(real_images, os.path.join(self.args.sample_dir, self.args.timestamp, 'real.png'))
all_preds = list()
for inputs, _ in tqdm(dataloader):
inputs = inputs.to(self.args.device) * 0.5 + 0.5
all_preds.append(scorer.get_preds(inputs))
score, _ = scorer.compute_score(torch.cat(all_preds, dim=0), splits=10)
print(f"real inception score: {score:.4f}")
best_score = 0
for step in range(self.args.total_step):
''' train sagan model '''
trainer.D.train()
trainer.G.train()
try:
real_images, _ = next(data_iter)
except:
data_iter = iter(dataloader)
real_images, _ = next(data_iter)
real_images = real_images.to(self.args.device)
d_loss_real, d_loss_fake, g_loss_fake = trainer.train(real_images)
''' print info '''
if (step + 1) % self.args.log_step == 0:
print(f"step: {step + 1}/{self.args.total_step}, g_loss_fake: {g_loss_fake:.4f}")
writer.add_scalar('Loss/D_real', d_loss_real, step + 1)
writer.add_scalar('Loss/D_fake', d_loss_fake, step + 1)
writer.add_scalar('Loss/G_fake', g_loss_fake, step + 1)
writer.add_scalar('Score/G_attn1', trainer.G.attn1.gamma.mean().item(), step + 1)
writer.add_scalar('Score/D_attn1', trainer.D.attn1.gamma.mean().item(), step + 1)
''' compute inception score '''
if (step + 1) % self.args.eval_step == 0:
trainer.G.eval()
all_preds = list()
for i in tqdm(range(self.args.sample_num)):
z = torch.randn(self.args.batch_size, self.args.z_dim).to(self.args.device)
inputs = trainer.G(z) * 0.5 + 0.5
all_preds.append(scorer.get_preds(inputs))
score, _ = scorer.compute_score(torch.cat(all_preds, dim=0), splits=10)
best_score = score if score > best_score else best_score
print(f"fake inception score: {score:.4f}")
writer.add_scalar('Score/IS_fake', score, step + 1)
''' sample image '''
if (step + 1) % self.args.sample_step == 0:
trainer.G.eval()
fake_images = trainer.G(fixed_z)
fake_images = (fake_images * 0.5 + 0.5).clamp(0, 1)
writer.add_images('fake', fake_images, step + 1)
save_image(fake_images, os.path.join(self.args.sample_dir, self.args.timestamp, f"fake_{step + 1}.png"))
''' save model '''
if (step + 1) % model_save_step == 0:
torch.save(trainer.G.state_dict(), os.path.join(self.args.save_dir, self.args.timestamp, f"{step + 1}_G.pt"))
torch.save(trainer.D.state_dict(), os.path.join(self.args.save_dir, self.args.timestamp, f"{step + 1}_D.pt"))
writer.close()
print(f"best inception score: {best_score:.4f}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
''' dataset '''
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10'])
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--sample_dir', type=str, default='sample')
parser.add_argument('--save_dir', type=str, default='saves')
parser.add_argument('--num_workers', type=int, default=16)
''' model '''
parser.add_argument('--im_size', type=int, default=32)
parser.add_argument('--z_dim', type=int, default=128)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)
''' optimization '''
parser.add_argument('--total_step', type=int, default=200000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--g_lr', type=float, default=0.0001)
parser.add_argument('--d_lr', type=float, default=0.0004)
parser.add_argument('--beta1', type=float, default=0.0)
parser.add_argument('--beta2', type=float, default=0.9)
parser.add_argument('--lambda_gp', type=float, default=10)
parser.add_argument('--adv_loss', type=str, default='wgan-gp', choices=['hinge', 'wgan-gp'])
''' environment'''
parser.add_argument('--device', type=str, default=None, choices=['cpu', 'cuda'])
parser.add_argument('--parallel', default=False, action='store_true')
parser.add_argument('--log_step', type=int, default=20)
parser.add_argument('--sample_step', type=int, default=200)
parser.add_argument('--eval_step', type=int, default=500)
parser.add_argument('--model_save_step', type=int, default=10)
parser.add_argument('--sample_num', type=int, default=100)
parser.add_argument('--timestamp', type=str, default=None)
args = parser.parse_args()
args.timestamp = args.timestamp if args.timestamp else str(int(time.time())) + format(random.randint(0, 999), '03')
args.device = torch.device(args.device) if args.device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
for dir_name in [args.data_dir, args.sample_dir, args.save_dir]:
if not os.path.exists(dir_name):
os.mkdir(dir_name)
os.mkdir(os.path.join(args.sample_dir, args.timestamp))
os.mkdir(os.path.join(args.save_dir, args.timestamp))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ins = Instructor(args)
ins.train_sagan() | 0.586523 | 0.138928 |
from rest_framework import serializers
from .models import *
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth import authenticate
from django.contrib.auth.models import update_last_login
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
fields = ('id','email','first_name','last_name','age','gender','state','city','mobile')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
class RailwayPassengerSerializer(serializers.ModelSerializer):
class Meta:
model = RailwayPassenger
fields = ('pnr','user','user_id','route_id','train_id','seat_amount')
class AuthUserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('id','email','password','first_name','last_name','age','gender','state','city','mobile')
def create(self, validated_data):
auth_user = UserProfile.objects.create_user(**validated_data)
return auth_user
class AuthUserLoginSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=128, write_only=True)
access = serializers.CharField(read_only=True)
refresh = serializers.CharField(read_only=True)
role = serializers.CharField(read_only=True)
def create(self, validated_date):
pass
def update(self, instance, validated_data):
pass
def validate(self, data):
email = data['email']
password = data['password']
user = authenticate(username=email, password=password)
if user is None:
raise serializers.ValidationError("No user exist with this email")
if not user.is_active:
raise serializers.ValidationError(
'This user has been deactivated.'
)
refresh = RefreshToken.for_user(user)
refresh_token = str(refresh)
access_token = str(refresh.access_token)
update_last_login(None, user)
validation = {
'access': access_token,
'refresh': refresh_token,
'email': user.email,
'full_name': user.get_name(),
'role': user.role,
}
return validation | railway_api/serializers.py | from rest_framework import serializers
from .models import *
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth import authenticate
from django.contrib.auth.models import update_last_login
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
fields = ('id','email','first_name','last_name','age','gender','state','city','mobile')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
class RailwayPassengerSerializer(serializers.ModelSerializer):
class Meta:
model = RailwayPassenger
fields = ('pnr','user','user_id','route_id','train_id','seat_amount')
class AuthUserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('id','email','password','first_name','last_name','age','gender','state','city','mobile')
def create(self, validated_data):
auth_user = UserProfile.objects.create_user(**validated_data)
return auth_user
class AuthUserLoginSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=128, write_only=True)
access = serializers.CharField(read_only=True)
refresh = serializers.CharField(read_only=True)
role = serializers.CharField(read_only=True)
def create(self, validated_date):
pass
def update(self, instance, validated_data):
pass
def validate(self, data):
email = data['email']
password = data['password']
user = authenticate(username=email, password=password)
if user is None:
raise serializers.ValidationError("No user exist with this email")
if not user.is_active:
raise serializers.ValidationError(
'This user has been deactivated.'
)
refresh = RefreshToken.for_user(user)
refresh_token = str(refresh)
access_token = str(refresh.access_token)
update_last_login(None, user)
validation = {
'access': access_token,
'refresh': refresh_token,
'email': user.email,
'full_name': user.get_name(),
'role': user.role,
}
return validation | 0.529507 | 0.079603 |
' Argument parser '
import argparse
import pathlib
from . import(
constants,
custom_types
)
def valid_percent(value):
' Validate percentage values '
percent = float(value)
if 0 < percent <= 100:
return percent
raise argparse.ArgumentTypeError(f'{value} must be within 1 and 100')
def quoted_choices(choices):
' Return a string of quoted choices '
return ', '.join([f"'{choice}'" for choice in choices])
def str_to_bool(value):
' Validate boolean arguments '
token = value.lower()
true_values = ['t', 'true', '1']
if token in true_values:
return True
false_values = ['f', 'false', '0']
if token in false_values:
return False
choices = quoted_choices(true_values + false_values)
raise argparse.ArgumentTypeError(f"invalid choice '{value}' (choose from {choices})")
def get_arguments():
' Parse command line arguments '
parser = argparse.ArgumentParser(
prog=__package__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'ffprobe_file_path',
type=pathlib.Path,
help='Path to the ffprobe binary',
)
parser.add_argument(
'ffmpeg_file_path',
type=pathlib.Path,
help='Path to the ffmpeg binary',
)
parser.add_argument(
'input_folder_path',
type=pathlib.Path,
help='Path to the video folder containing timestamped folders',
)
parser.add_argument(
'output_folder_path',
type=pathlib.Path,
help='Path to the output folder containing both merged files and the temporary work folder',
)
parser.add_argument(
'--codec',
default='hevc_nvenc',
help='Codec to use for encoding',
choices=constants.CODEC_OPTIONS.keys(),
)
preset_token = '--preset'
parser.add_argument(
preset_token,
default='slow',
help='Codec\'s preset to use for encoding. See ffmpeg -h long for each codec\'s available presets',
)
parser.add_argument(
'--reduce',
default=constants.DONT_REDUCE,
help='Percent to reduce video to',
type=valid_percent,
)
parser.add_argument(
'--layout',
default='pyramid',
help='Camera layout',
choices=constants.LAYOUT_OFFSETS.keys(),
)
parser.add_argument(
'--keep_temp_folder',
default=False,
help='Keep temporary working folder after extraction',
type=str_to_bool,
)
parser.add_argument(
'--log_level',
default='info',
help=(
'Display log messages that matches or exceeds the severity '
f'of the specified level. Use "{constants.DISABLE_LOGGING}" '
'to disable messages'
),
choices=constants.LOG_LEVELS.keys()
)
args = parser.parse_args()
presets = constants.CODEC_OPTIONS[args.codec][0]
if args.preset not in presets:
choices = quoted_choices(presets)
parser.error(
f"argument {preset_token}: invalid choice: '{args.preset}' (choose from {choices})"
)
return (
constants.LOG_LEVELS[args.log_level],
(
custom_types.FFMpegPaths(
args.ffprobe_file_path,
args.ffmpeg_file_path,
),
custom_types.LayoutOptions(
args.codec,
args.preset,
args.layout,
args.reduce,
),
custom_types.BaseFolderPaths(
args.input_folder_path,
args.output_folder_path,
),
args.keep_temp_folder,
)
) | teslacam/arg_parser.py | ' Argument parser '
import argparse
import pathlib
from . import(
constants,
custom_types
)
def valid_percent(value):
' Validate percentage values '
percent = float(value)
if 0 < percent <= 100:
return percent
raise argparse.ArgumentTypeError(f'{value} must be within 1 and 100')
def quoted_choices(choices):
' Return a string of quoted choices '
return ', '.join([f"'{choice}'" for choice in choices])
def str_to_bool(value):
' Validate boolean arguments '
token = value.lower()
true_values = ['t', 'true', '1']
if token in true_values:
return True
false_values = ['f', 'false', '0']
if token in false_values:
return False
choices = quoted_choices(true_values + false_values)
raise argparse.ArgumentTypeError(f"invalid choice '{value}' (choose from {choices})")
def get_arguments():
' Parse command line arguments '
parser = argparse.ArgumentParser(
prog=__package__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'ffprobe_file_path',
type=pathlib.Path,
help='Path to the ffprobe binary',
)
parser.add_argument(
'ffmpeg_file_path',
type=pathlib.Path,
help='Path to the ffmpeg binary',
)
parser.add_argument(
'input_folder_path',
type=pathlib.Path,
help='Path to the video folder containing timestamped folders',
)
parser.add_argument(
'output_folder_path',
type=pathlib.Path,
help='Path to the output folder containing both merged files and the temporary work folder',
)
parser.add_argument(
'--codec',
default='hevc_nvenc',
help='Codec to use for encoding',
choices=constants.CODEC_OPTIONS.keys(),
)
preset_token = '--preset'
parser.add_argument(
preset_token,
default='slow',
help='Codec\'s preset to use for encoding. See ffmpeg -h long for each codec\'s available presets',
)
parser.add_argument(
'--reduce',
default=constants.DONT_REDUCE,
help='Percent to reduce video to',
type=valid_percent,
)
parser.add_argument(
'--layout',
default='pyramid',
help='Camera layout',
choices=constants.LAYOUT_OFFSETS.keys(),
)
parser.add_argument(
'--keep_temp_folder',
default=False,
help='Keep temporary working folder after extraction',
type=str_to_bool,
)
parser.add_argument(
'--log_level',
default='info',
help=(
'Display log messages that matches or exceeds the severity '
f'of the specified level. Use "{constants.DISABLE_LOGGING}" '
'to disable messages'
),
choices=constants.LOG_LEVELS.keys()
)
args = parser.parse_args()
presets = constants.CODEC_OPTIONS[args.codec][0]
if args.preset not in presets:
choices = quoted_choices(presets)
parser.error(
f"argument {preset_token}: invalid choice: '{args.preset}' (choose from {choices})"
)
return (
constants.LOG_LEVELS[args.log_level],
(
custom_types.FFMpegPaths(
args.ffprobe_file_path,
args.ffmpeg_file_path,
),
custom_types.LayoutOptions(
args.codec,
args.preset,
args.layout,
args.reduce,
),
custom_types.BaseFolderPaths(
args.input_folder_path,
args.output_folder_path,
),
args.keep_temp_folder,
)
) | 0.685423 | 0.285154 |
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import scipy.misc
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch
import torch.optim as optim
import torchvision
import numpy as np
import torch.utils.data as data_utils
import torch.nn.functional as F
from data_utils import *
# lr_scheduler() manages the learning rate according to given condition
def lr_scheduler(optimizer, init_lr, epoch):
for param_group in optimizer.param_groups:
if epoch == 150 or epoch == 225:
param_group['lr']=param_group['lr']/10
if epoch == 0:
param_group['lr']=init_lr
print('Current learning rate is {}'.format(param_group['lr']))
return optimizer
def train_model(cnn,optimizer_s,lrate,num_epochs,train_loader,test_loader,dataset_train_len,dataset_test_len, plotsFileName, csvFileName):
epochs= []
train_acc=[]
test_acc=[]
train_loss=[]
test_loss = []
train_error=[]
test_error =[]
for epoch in range(num_epochs):
cnn.train()
epochs.append(epoch)
optimizer = lr_scheduler(optimizer_s, lrate, epoch)
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('*' * 70)
running_loss = 0.0
running_corrects = 0.0
train_batch_ctr = 0.0
for i, (image, label) in enumerate(train_loader):
image,label = Variable(image.cuda(),requires_grad=True),Variable(label.cuda(),requires_grad=False)
optimizer.zero_grad()
outputs = cnn(image)
_, preds = torch.max(outputs.data, 1)
loss = F.nll_loss(outputs, label)
loss.backward()
optimizer.step()
train_batch_ctr = train_batch_ctr + 1
running_loss += loss.data[0]
running_corrects += torch.sum(preds == label.data)
epoch_acc = running_corrects / (dataset_train_len)
print ('Train corrects: {} Train samples: {} Train accuracy: {}' .format( running_corrects, (dataset_train_len),epoch_acc))
train_acc.append(epoch_acc)
train_loss.append(running_loss / train_batch_ctr)
train_error.append(((dataset_train_len)-running_corrects) / (dataset_train_len))
cnn.eval()
test_running_corrects = 0.0
test_batch_ctr = 0.0
test_running_loss = 0.0
test_total = 0.0
for image, label in test_loader:
image, label = Variable(image.cuda(),volatile=True), Variable(label.cuda())
test_outputs = cnn(image)
_, predicted_test = torch.max(test_outputs.data, 1)
loss = F.nll_loss(test_outputs, label)
test_running_loss += loss.data[0]
test_batch_ctr = test_batch_ctr+1
test_running_corrects += torch.sum(predicted_test == label.data)
test_epoch_acc = test_running_corrects / (dataset_test_len)
test_acc.append(test_epoch_acc)
test_loss.append(test_running_loss / test_batch_ctr)
test_error.append(((dataset_test_len)-test_running_corrects) / (dataset_test_len))
print('Test corrects: {} Test samples: {} Test accuracy {}' .format(test_running_corrects,(dataset_test_len),test_epoch_acc))
print('Train loss: {} Test loss: {}' .format(train_loss[epoch],test_loss[epoch]))
print('Train error: {} Test error {}' .format(train_error[epoch],test_error[epoch]))
print('*' * 70)
plots(epochs, train_acc, test_acc, train_loss, test_loss,train_error,test_error,plotsFileName)
write_csv(csvFileName, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch)
'''
plots() and write_csv() are defined in data_utils.py. plots() updates the training plots with each epoch and
write_csv() updates training log with each epoch.
''' | train_utils.py | import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import scipy.misc
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch
import torch.optim as optim
import torchvision
import numpy as np
import torch.utils.data as data_utils
import torch.nn.functional as F
from data_utils import *
# lr_scheduler() manages the learning rate according to given condition
def lr_scheduler(optimizer, init_lr, epoch):
for param_group in optimizer.param_groups:
if epoch == 150 or epoch == 225:
param_group['lr']=param_group['lr']/10
if epoch == 0:
param_group['lr']=init_lr
print('Current learning rate is {}'.format(param_group['lr']))
return optimizer
def train_model(cnn,optimizer_s,lrate,num_epochs,train_loader,test_loader,dataset_train_len,dataset_test_len, plotsFileName, csvFileName):
epochs= []
train_acc=[]
test_acc=[]
train_loss=[]
test_loss = []
train_error=[]
test_error =[]
for epoch in range(num_epochs):
cnn.train()
epochs.append(epoch)
optimizer = lr_scheduler(optimizer_s, lrate, epoch)
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('*' * 70)
running_loss = 0.0
running_corrects = 0.0
train_batch_ctr = 0.0
for i, (image, label) in enumerate(train_loader):
image,label = Variable(image.cuda(),requires_grad=True),Variable(label.cuda(),requires_grad=False)
optimizer.zero_grad()
outputs = cnn(image)
_, preds = torch.max(outputs.data, 1)
loss = F.nll_loss(outputs, label)
loss.backward()
optimizer.step()
train_batch_ctr = train_batch_ctr + 1
running_loss += loss.data[0]
running_corrects += torch.sum(preds == label.data)
epoch_acc = running_corrects / (dataset_train_len)
print ('Train corrects: {} Train samples: {} Train accuracy: {}' .format( running_corrects, (dataset_train_len),epoch_acc))
train_acc.append(epoch_acc)
train_loss.append(running_loss / train_batch_ctr)
train_error.append(((dataset_train_len)-running_corrects) / (dataset_train_len))
cnn.eval()
test_running_corrects = 0.0
test_batch_ctr = 0.0
test_running_loss = 0.0
test_total = 0.0
for image, label in test_loader:
image, label = Variable(image.cuda(),volatile=True), Variable(label.cuda())
test_outputs = cnn(image)
_, predicted_test = torch.max(test_outputs.data, 1)
loss = F.nll_loss(test_outputs, label)
test_running_loss += loss.data[0]
test_batch_ctr = test_batch_ctr+1
test_running_corrects += torch.sum(predicted_test == label.data)
test_epoch_acc = test_running_corrects / (dataset_test_len)
test_acc.append(test_epoch_acc)
test_loss.append(test_running_loss / test_batch_ctr)
test_error.append(((dataset_test_len)-test_running_corrects) / (dataset_test_len))
print('Test corrects: {} Test samples: {} Test accuracy {}' .format(test_running_corrects,(dataset_test_len),test_epoch_acc))
print('Train loss: {} Test loss: {}' .format(train_loss[epoch],test_loss[epoch]))
print('Train error: {} Test error {}' .format(train_error[epoch],test_error[epoch]))
print('*' * 70)
plots(epochs, train_acc, test_acc, train_loss, test_loss,train_error,test_error,plotsFileName)
write_csv(csvFileName, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch)
'''
plots() and write_csv() are defined in data_utils.py. plots() updates the training plots with each epoch and
write_csv() updates training log with each epoch.
''' | 0.723016 | 0.550849 |
# Este script toma un archivo CSV y lo geolocaliza, es decir crea un nuevo archivo _geo.csv con columnas lat y long.
# Utiliza el API de Google Maps
# Instrucciones en como obtener la API Key para Google Maps: https://github.com/slawek87/geolocation-python
from geolocation.main import GoogleMaps
from geolocation.distance_matrix.client import DistanceMatrixApiClient
import os
import argparse
import csv
import sys
import logging
import time
## SAVE THE FILE TO A GEOJSON
# the template. where data from the csv will be formatted to geojson
template = \
''' \
{ "type" : "Feature",
"geometry" : {
"type" : "Point",
"coordinates" : [%s, %s]},
"properties" : %s
},
'''
## GEOLOCALIZATION
# get address to geolocalize
# geolocalize
# save it in file_geo.csv
# environment variables:
# GOOGLE_MAPS_API_KEY
# COUNTRY
# arguments:
# COLUMN NAMES
def convert_row(row):
properties = {}
for i in row:
if i != 'lng' or i != 'lat':
properties[i] = row[i]
return template % (row['lng'], row['lat'], properties)
def get_environment_variables():
if not os.environ.has_key('COUNTRY') or not os.environ.has_key('GOOGLE_MAPS_API_KEY'):
sys.exit('Variables de ambiente COUNTRY o GOOGLE_MAPS_API_KEY no estan definidas.')
# look for environment variables
return {'country': os.environ['COUNTRY'], 'api_key': os.environ['GOOGLE_MAPS_API_KEY']}
def get_address(row, fields, country):
address = ', '.join(map(lambda x: row[x], fields.split(','))) + ', ' + country
return address
def main():
logging_file = 'geolocation_%s.log' % time.strftime("%H_%M_%S")
logging.basicConfig(filename=logging_file,level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Geolocalizer un archivo CSV.')
parser.add_argument('--csv', help='nombre del archivo CSV a geolocalizar')
parser.add_argument('--columnas', help='las columnas (en orden) de la dirección')
args = parser.parse_args()
# get file name and column names from arguments
csv_file = args.csv
new_csv_file = '_'.join([csv_file.split('.')[0], 'geo.csv'])
new_geojson_file = '_'.join([csv_file.split('.')[0], '.geojson'])
# get the columns names where the address is
fields = args.columnas
# get api key and country from environment variables
env_variables = get_environment_variables()
# get an instance of google maps
google_maps = GoogleMaps(api_key=env_variables['api_key'])
# the head of the geojson file
output = \
''' \
{ "type" : "Feature Collection",
"features" : [
'''
with open(csv_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
fieldnames = reader.fieldnames
fieldnames.append('lat')
fieldnames.append('lng')
with open(new_csv_file, 'wb') as newcsvfile:
writer = csv.DictWriter(newcsvfile, fieldnames=fieldnames)
for row in reader:
address = get_address(row, fields, env_variables['country'])
try:
location = google_maps.search(location=address) # sends search to Google Maps.
my_location = location.first() # returns only first location.
if my_location != None:
row['lat'] = my_location.lat
row['lng'] = my_location.lng
writer.writerow(row)
except:
e = sys.exc_info()[0]
logging.warning("<p>LOG: no pudo encontrar la dirección: '%s' . Error: %s</p>", address, e)
output += convert_row(row)
# convert new file into a geojson file
# the tail of the geojson file
output += \
''' \
]
}
'''
# opens an geoJSON file to write the output to
outFileHandle = open(new_geojson_file, "w")
outFileHandle.write(output)
outFileHandle.close()
if __name__ == '__main__':
main() | code/geolocalize.py |
# Este script toma un archivo CSV y lo geolocaliza, es decir crea un nuevo archivo _geo.csv con columnas lat y long.
# Utiliza el API de Google Maps
# Instrucciones en como obtener la API Key para Google Maps: https://github.com/slawek87/geolocation-python
from geolocation.main import GoogleMaps
from geolocation.distance_matrix.client import DistanceMatrixApiClient
import os
import argparse
import csv
import sys
import logging
import time
## SAVE THE FILE TO A GEOJSON
# the template. where data from the csv will be formatted to geojson
template = \
''' \
{ "type" : "Feature",
"geometry" : {
"type" : "Point",
"coordinates" : [%s, %s]},
"properties" : %s
},
'''
## GEOLOCALIZATION
# get address to geolocalize
# geolocalize
# save it in file_geo.csv
# environment variables:
# GOOGLE_MAPS_API_KEY
# COUNTRY
# arguments:
# COLUMN NAMES
def convert_row(row):
properties = {}
for i in row:
if i != 'lng' or i != 'lat':
properties[i] = row[i]
return template % (row['lng'], row['lat'], properties)
def get_environment_variables():
if not os.environ.has_key('COUNTRY') or not os.environ.has_key('GOOGLE_MAPS_API_KEY'):
sys.exit('Variables de ambiente COUNTRY o GOOGLE_MAPS_API_KEY no estan definidas.')
# look for environment variables
return {'country': os.environ['COUNTRY'], 'api_key': os.environ['GOOGLE_MAPS_API_KEY']}
def get_address(row, fields, country):
address = ', '.join(map(lambda x: row[x], fields.split(','))) + ', ' + country
return address
def main():
logging_file = 'geolocation_%s.log' % time.strftime("%H_%M_%S")
logging.basicConfig(filename=logging_file,level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Geolocalizer un archivo CSV.')
parser.add_argument('--csv', help='nombre del archivo CSV a geolocalizar')
parser.add_argument('--columnas', help='las columnas (en orden) de la dirección')
args = parser.parse_args()
# get file name and column names from arguments
csv_file = args.csv
new_csv_file = '_'.join([csv_file.split('.')[0], 'geo.csv'])
new_geojson_file = '_'.join([csv_file.split('.')[0], '.geojson'])
# get the columns names where the address is
fields = args.columnas
# get api key and country from environment variables
env_variables = get_environment_variables()
# get an instance of google maps
google_maps = GoogleMaps(api_key=env_variables['api_key'])
# the head of the geojson file
output = \
''' \
{ "type" : "Feature Collection",
"features" : [
'''
with open(csv_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
fieldnames = reader.fieldnames
fieldnames.append('lat')
fieldnames.append('lng')
with open(new_csv_file, 'wb') as newcsvfile:
writer = csv.DictWriter(newcsvfile, fieldnames=fieldnames)
for row in reader:
address = get_address(row, fields, env_variables['country'])
try:
location = google_maps.search(location=address) # sends search to Google Maps.
my_location = location.first() # returns only first location.
if my_location != None:
row['lat'] = my_location.lat
row['lng'] = my_location.lng
writer.writerow(row)
except:
e = sys.exc_info()[0]
logging.warning("<p>LOG: no pudo encontrar la dirección: '%s' . Error: %s</p>", address, e)
output += convert_row(row)
# convert new file into a geojson file
# the tail of the geojson file
output += \
''' \
]
}
'''
# opens an geoJSON file to write the output to
outFileHandle = open(new_geojson_file, "w")
outFileHandle.write(output)
outFileHandle.close()
if __name__ == '__main__':
main() | 0.338296 | 0.477067 |
# FIXME IMPORT!
import random
import math
import copy
import time
import socket
import pickle
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
from RULEngine.Util.constant import POSITION_DEADZONE
from ai.Algorithm.IntelligentModule import Pathfinder
from ai.Debug.debug_interface import COLOR_ID_MAP, DEFAULT_PATH_TIMEOUT
OBSTACLE_DEAD_ZONE = 700
TIME_TO_UPDATE = 1
class PathfinderRRT(Pathfinder):
"""
La classe hérite de IntelligentModule pour définir sa propriété state.
L'interface expose une méthode qui force le calcul de toutes les
trajectoires. Celles-ci sont enregistrés par effet de bords dans le
GameState.
Une méthode permet de récupérer la trajectoire d'un robot spécifique.
"""
def __init__(self, p_worldstate):
"""
Constructeur, appel le constructeur de la classe mère pour assigner
la référence sur l'InfoManager.
:param info_manager: référence sur l'InfoManager
"""
super().__init__(p_worldstate)
self.paths = {}
for i in range(6):
self.paths[i] = []
self.last_timestamp = self.ws.game_state.get_timestamp()
# Pour être conforme à la nouvelle interface à être changé
# éventuellement mgl 2016/12/23
# TODO(mgl): change this please!
def get_next_point(self, robot_id=None):
pass
def update(self):
pass
def draw_path(self, path, pid=0):
points = []
for path_element in path:
x = path_element.position.x
y = path_element.position.y
points.append((x,y))
self.debug_interface.add_multiple_points(points, COLOR_ID_MAP[pid], width=5, link="path - " + str(pid),
timeout=DEFAULT_PATH_TIMEOUT)
def get_path(self, pid=None, target=None):
"""
Retourne la trajectoire du robot.
:param pid: Identifiant du robot, 0 à 5.
:return: Une liste de Pose, [Pose]
"""
assert(isinstance(pid, int)), "Un pid doit être passé"
assert(isinstance(target, Pose)), "La cible doit être une Pose"
return self._compute_path(pid, target)
def _compute_path(self, pid, target):
"""
Cette méthode calcul la trajectoire pour un robot.
:param pid: L'identifiant du robot, 0 à 5.
:return: None
"""
# TODO mettre les buts dans les obstacles
list_of_pid = list(range(6))
list_of_other_team_pid = list(range(6))
list_of_pid.remove(pid)
obstacleList = []
for other_pid in list_of_pid:
# TODO info manager changer get_player_position
position = self.ws.game_state.get_player_pose(other_pid).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
initial_position_of_main_player = self.ws.game_state.get_player_pose(pid).position
for pid in list_of_other_team_pid:
position = self.ws.game_state.get_player_pose(pid,False).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
target_position_of_player = target.position
target_orientation_of_player = target.orientation
assert(isinstance(target_position_of_player, Position)), "La cible du joueur doit être une Position"
try :
target_position_of_player.x
target_position_of_player.y
except AttributeError:
target_position_of_player = self.ws.game_state.get_player_pose(pid).position
rrt = RRT(start=[initial_position_of_main_player.x,
initial_position_of_main_player.y],
goal=[target_position_of_player.x, target_position_of_player.y],
obstacleList=obstacleList,
# TODO Vérifier si le robot peut sortir du terrain
rand_area=[-4500, 4500],
expand_dis=get_expand_dis([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]),
goal_sample_rate=get_goal_sample_rate([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]))
not_smoothed_path = rrt.planning(obstacleList)
# Path smoothing
maxIter = 100
# Il faut inverser la liste du chemin lissé tout en retirant le point de départ
smoothed_path = path_smoothing(not_smoothed_path, maxIter, obstacleList)
smoothed_path = list(reversed(smoothed_path[:-1]))
return self._smoothed_path_to_pose_list(smoothed_path, target_orientation_of_player)
def _smoothed_path_to_pose_list(self, smoothed_path, target_orientation):
smoothed_poses = []
for point in smoothed_path:
smoothed_poses.append(Pose(Position(point[0], point[1]), target_orientation))
return smoothed_poses
class RRT():
"""
Classe principale du pathfinder, contient les fonctions principales
permettant de générer le path.
"""
def __init__(self, start, goal, obstacleList, rand_area, expand_dis, goal_sample_rate, max_iteration=50):
"""
Setting Parameter
start: Position de départ [x,y]
goal: Destination [x,y]
obstacleList: Position et taille des obstacles [[x,y,size],...]
randArea: Ramdom Samping Area [min,max]
expand_dis : Longueur des arêtes
goal_sample_rate : Probabilité d'obtenir directement le goal comme position.
Améliore la vitesse du RRT
max_iteration : Nombre d'itération du path smoother
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = rand_area[0]
self.maxrand = rand_area[1]
self.expand_dis = expand_dis
self.goal_sample_rate = goal_sample_rate
self.max_iteration = max_iteration
def planning(self, obstacleList):
"""Fonction qui s'occupe de faire le path"""
initial_time = time.time()
self.node_list = [self.start]
#TODO changer le gros hack degueux pour la gestion de la loop infinie
while True and time.time()-initial_time < TIME_TO_UPDATE:
# Random Sampling
if random.randint(0, 100) > self.goal_sample_rate:
random_coordinates = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
random_coordinates = [self.end.x, self.end.y]
# Find nearest node
nind = self.get_nearest_list_index(self.node_list, random_coordinates)
# print(nind)
# expand tree
nearest_node = self.node_list[nind]
theta = math.atan2(random_coordinates[1] - nearest_node.y, random_coordinates[0] - nearest_node.x)
new_node = copy.deepcopy(nearest_node)
new_node.x += self.expand_dis * math.cos(theta)
new_node.y += self.expand_dis * math.sin(theta)
new_node.parent = nind
if not self.__collision_check(new_node, obstacleList):
continue
self.node_list.append(new_node)
# check goal
dx = new_node.x - self.end.x
dy = new_node.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expand_dis:
break
path = [[self.end.x, self.end.y]]
last_index = len(self.node_list) - 1
while self.node_list[last_index].parent is not None:
node = self.node_list[last_index]
path.append([node.x, node.y])
last_index = node.parent
path.append([self.start.x, self.start.y])
# TODO fix gros hack sale
if time.time()-initial_time >=1 :
path = [[self.start.x, self.start.y],[self.start.x, self.start.y]]
return path
def get_nearest_list_index(self, node_list, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in node_list]
minind = dlist.index(min(dlist))
return minind
def __collision_check(self, node, obstacleList):
""" Permet de vérifier si le chemin passe à travers un obstacle"""
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = math.sqrt(dx * dx + dy * dy)
if d <= size:
return False # collision
return True # safe
class Node():
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def get_expand_dis(start, goal):
"""Modifie la distance entre 2 noeuds selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
# TODO voir comment on regle ça
except TypeError:
d = 0
if d < 600 :
expand_dis = d/2
else :
expand_dis = 300
return expand_dis
def get_goal_sample_rate(start, goal):
"""Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate
def get_path_length(path):
"""Donne la longueur du trajet"""
path_length = 0
try :
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
path_length += d
except TypeError:
pass
return path_length
def get_target_point(path, targetL):
l = 0
ti = 0
last_pair_len = 0
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
l += d
if l >= targetL:
ti = i-1
last_pair_len = d
break
try :
partRatio = (l - targetL) / last_pair_len
except ZeroDivisionError:
partRatio = 0
# print(partRatio)
# print((ti,len(path),path[ti],path[ti+1]))
x = path[ti][0] + (path[ti + 1][0] - path[ti][0]) * partRatio
y = path[ti][1] + (path[ti + 1][1] - path[ti][1]) * partRatio
# print((x,y))
return [x, y, ti]
def line_collision_check(first, second, obstacleList):
"""
Vérifie si la ligne entre 2 noeuds entre en collision avec un obstacle.
"""
# Line Equation
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
try:
a = y2-y1
b = -(x2-x1)
c = y2 * (x2-x1) - x2 * (y2-y1)
except ZeroDivisionError:
return False
# print(first)
# print(second)
for (ox, oy, size) in obstacleList:
d = abs(a*ox+b*oy+c)/(math.sqrt(a*a+b*b))
# print((ox,oy,size,d))
if d <= (size):
# print("NG")
return False
# print("OK")
return True # OK
def path_smoothing(path, maxIter, obstacleList):
# Elle ralentit légèrement le tout, voir si améliorable
"""Permet de rendre le trajet obtenu avec le RRT plus lisse"""
# print("PathSmoothing")
path_length = get_path_length(path)
for i in range(maxIter):
# Sample two points
pick_points = [random.uniform(0, path_length), random.uniform(0, path_length)]
pick_points.sort()
# print(pick_points)
first = get_target_point(path, pick_points[0])
# print(first)
second = get_target_point(path, pick_points[1])
# print(second)
if first[2] <= 0 or second[2] <= 0:
continue
if (second[2]+1) > len(path):
continue
if second[2] == first[2]:
continue
# collision check
if not line_collision_check(first, second, obstacleList):
continue
#Create New path
new_path = []
new_path.extend(path[:first[2]+1])
new_path.append([first[0], first[1]])
new_path.append([second[0], second[1]])
new_path.extend(path[second[2]+1:])
path = new_path
path_length = get_path_length(path)
return path
# taille terrain = 9000 x 6000 | ai/Algorithm/PathfinderRRT.py | # FIXME IMPORT!
import random
import math
import copy
import time
import socket
import pickle
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
from RULEngine.Util.constant import POSITION_DEADZONE
from ai.Algorithm.IntelligentModule import Pathfinder
from ai.Debug.debug_interface import COLOR_ID_MAP, DEFAULT_PATH_TIMEOUT
OBSTACLE_DEAD_ZONE = 700
TIME_TO_UPDATE = 1
class PathfinderRRT(Pathfinder):
"""
La classe hérite de IntelligentModule pour définir sa propriété state.
L'interface expose une méthode qui force le calcul de toutes les
trajectoires. Celles-ci sont enregistrés par effet de bords dans le
GameState.
Une méthode permet de récupérer la trajectoire d'un robot spécifique.
"""
def __init__(self, p_worldstate):
"""
Constructeur, appel le constructeur de la classe mère pour assigner
la référence sur l'InfoManager.
:param info_manager: référence sur l'InfoManager
"""
super().__init__(p_worldstate)
self.paths = {}
for i in range(6):
self.paths[i] = []
self.last_timestamp = self.ws.game_state.get_timestamp()
# Pour être conforme à la nouvelle interface à être changé
# éventuellement mgl 2016/12/23
# TODO(mgl): change this please!
def get_next_point(self, robot_id=None):
pass
def update(self):
pass
def draw_path(self, path, pid=0):
points = []
for path_element in path:
x = path_element.position.x
y = path_element.position.y
points.append((x,y))
self.debug_interface.add_multiple_points(points, COLOR_ID_MAP[pid], width=5, link="path - " + str(pid),
timeout=DEFAULT_PATH_TIMEOUT)
def get_path(self, pid=None, target=None):
"""
Retourne la trajectoire du robot.
:param pid: Identifiant du robot, 0 à 5.
:return: Une liste de Pose, [Pose]
"""
assert(isinstance(pid, int)), "Un pid doit être passé"
assert(isinstance(target, Pose)), "La cible doit être une Pose"
return self._compute_path(pid, target)
def _compute_path(self, pid, target):
"""
Cette méthode calcul la trajectoire pour un robot.
:param pid: L'identifiant du robot, 0 à 5.
:return: None
"""
# TODO mettre les buts dans les obstacles
list_of_pid = list(range(6))
list_of_other_team_pid = list(range(6))
list_of_pid.remove(pid)
obstacleList = []
for other_pid in list_of_pid:
# TODO info manager changer get_player_position
position = self.ws.game_state.get_player_pose(other_pid).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
initial_position_of_main_player = self.ws.game_state.get_player_pose(pid).position
for pid in list_of_other_team_pid:
position = self.ws.game_state.get_player_pose(pid,False).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
target_position_of_player = target.position
target_orientation_of_player = target.orientation
assert(isinstance(target_position_of_player, Position)), "La cible du joueur doit être une Position"
try :
target_position_of_player.x
target_position_of_player.y
except AttributeError:
target_position_of_player = self.ws.game_state.get_player_pose(pid).position
rrt = RRT(start=[initial_position_of_main_player.x,
initial_position_of_main_player.y],
goal=[target_position_of_player.x, target_position_of_player.y],
obstacleList=obstacleList,
# TODO Vérifier si le robot peut sortir du terrain
rand_area=[-4500, 4500],
expand_dis=get_expand_dis([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]),
goal_sample_rate=get_goal_sample_rate([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]))
not_smoothed_path = rrt.planning(obstacleList)
# Path smoothing
maxIter = 100
# Il faut inverser la liste du chemin lissé tout en retirant le point de départ
smoothed_path = path_smoothing(not_smoothed_path, maxIter, obstacleList)
smoothed_path = list(reversed(smoothed_path[:-1]))
return self._smoothed_path_to_pose_list(smoothed_path, target_orientation_of_player)
def _smoothed_path_to_pose_list(self, smoothed_path, target_orientation):
smoothed_poses = []
for point in smoothed_path:
smoothed_poses.append(Pose(Position(point[0], point[1]), target_orientation))
return smoothed_poses
class RRT():
"""
Classe principale du pathfinder, contient les fonctions principales
permettant de générer le path.
"""
def __init__(self, start, goal, obstacleList, rand_area, expand_dis, goal_sample_rate, max_iteration=50):
"""
Setting Parameter
start: Position de départ [x,y]
goal: Destination [x,y]
obstacleList: Position et taille des obstacles [[x,y,size],...]
randArea: Ramdom Samping Area [min,max]
expand_dis : Longueur des arêtes
goal_sample_rate : Probabilité d'obtenir directement le goal comme position.
Améliore la vitesse du RRT
max_iteration : Nombre d'itération du path smoother
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = rand_area[0]
self.maxrand = rand_area[1]
self.expand_dis = expand_dis
self.goal_sample_rate = goal_sample_rate
self.max_iteration = max_iteration
def planning(self, obstacleList):
"""Fonction qui s'occupe de faire le path"""
initial_time = time.time()
self.node_list = [self.start]
#TODO changer le gros hack degueux pour la gestion de la loop infinie
while True and time.time()-initial_time < TIME_TO_UPDATE:
# Random Sampling
if random.randint(0, 100) > self.goal_sample_rate:
random_coordinates = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
random_coordinates = [self.end.x, self.end.y]
# Find nearest node
nind = self.get_nearest_list_index(self.node_list, random_coordinates)
# print(nind)
# expand tree
nearest_node = self.node_list[nind]
theta = math.atan2(random_coordinates[1] - nearest_node.y, random_coordinates[0] - nearest_node.x)
new_node = copy.deepcopy(nearest_node)
new_node.x += self.expand_dis * math.cos(theta)
new_node.y += self.expand_dis * math.sin(theta)
new_node.parent = nind
if not self.__collision_check(new_node, obstacleList):
continue
self.node_list.append(new_node)
# check goal
dx = new_node.x - self.end.x
dy = new_node.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expand_dis:
break
path = [[self.end.x, self.end.y]]
last_index = len(self.node_list) - 1
while self.node_list[last_index].parent is not None:
node = self.node_list[last_index]
path.append([node.x, node.y])
last_index = node.parent
path.append([self.start.x, self.start.y])
# TODO fix gros hack sale
if time.time()-initial_time >=1 :
path = [[self.start.x, self.start.y],[self.start.x, self.start.y]]
return path
def get_nearest_list_index(self, node_list, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in node_list]
minind = dlist.index(min(dlist))
return minind
def __collision_check(self, node, obstacleList):
""" Permet de vérifier si le chemin passe à travers un obstacle"""
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = math.sqrt(dx * dx + dy * dy)
if d <= size:
return False # collision
return True # safe
class Node():
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def get_expand_dis(start, goal):
"""Modifie la distance entre 2 noeuds selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
# TODO voir comment on regle ça
except TypeError:
d = 0
if d < 600 :
expand_dis = d/2
else :
expand_dis = 300
return expand_dis
def get_goal_sample_rate(start, goal):
"""Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate
def get_path_length(path):
"""Donne la longueur du trajet"""
path_length = 0
try :
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
path_length += d
except TypeError:
pass
return path_length
def get_target_point(path, targetL):
l = 0
ti = 0
last_pair_len = 0
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
l += d
if l >= targetL:
ti = i-1
last_pair_len = d
break
try :
partRatio = (l - targetL) / last_pair_len
except ZeroDivisionError:
partRatio = 0
# print(partRatio)
# print((ti,len(path),path[ti],path[ti+1]))
x = path[ti][0] + (path[ti + 1][0] - path[ti][0]) * partRatio
y = path[ti][1] + (path[ti + 1][1] - path[ti][1]) * partRatio
# print((x,y))
return [x, y, ti]
def line_collision_check(first, second, obstacleList):
"""
Vérifie si la ligne entre 2 noeuds entre en collision avec un obstacle.
"""
# Line Equation
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
try:
a = y2-y1
b = -(x2-x1)
c = y2 * (x2-x1) - x2 * (y2-y1)
except ZeroDivisionError:
return False
# print(first)
# print(second)
for (ox, oy, size) in obstacleList:
d = abs(a*ox+b*oy+c)/(math.sqrt(a*a+b*b))
# print((ox,oy,size,d))
if d <= (size):
# print("NG")
return False
# print("OK")
return True # OK
def path_smoothing(path, maxIter, obstacleList):
# Elle ralentit légèrement le tout, voir si améliorable
"""Permet de rendre le trajet obtenu avec le RRT plus lisse"""
# print("PathSmoothing")
path_length = get_path_length(path)
for i in range(maxIter):
# Sample two points
pick_points = [random.uniform(0, path_length), random.uniform(0, path_length)]
pick_points.sort()
# print(pick_points)
first = get_target_point(path, pick_points[0])
# print(first)
second = get_target_point(path, pick_points[1])
# print(second)
if first[2] <= 0 or second[2] <= 0:
continue
if (second[2]+1) > len(path):
continue
if second[2] == first[2]:
continue
# collision check
if not line_collision_check(first, second, obstacleList):
continue
#Create New path
new_path = []
new_path.extend(path[:first[2]+1])
new_path.append([first[0], first[1]])
new_path.append([second[0], second[1]])
new_path.extend(path[second[2]+1:])
path = new_path
path_length = get_path_length(path)
return path
# taille terrain = 9000 x 6000 | 0.158891 | 0.348008 |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from api.utils import humanize_time
import datetime
import urllib
class ChatMessage(models.Model):
author = models.ForeignKey(User, related_name='author', null=False, blank=False)
message = models.CharField(max_length=2000, blank=False, null=False)
date = models.DateTimeField(auto_now_add=True)
url = models.URLField(max_length=300, blank=False, null=False)
def __unicode__(self):
return "Chat message item on %s by %s" % (self.date, self.author)
class FilterListItem(models.Model):
user = models.ForeignKey(User, null=False, blank=False)
url = models.URLField(max_length=200, null=False, blank=False)
date_created = models.DateTimeField(default=datetime.datetime.utcnow())
class Meta:
abstract = True
class WhiteListItem(FilterListItem):
class Meta:
unique_together = ('user','url')
def __unicode__(self):
return "Whitelist item %s for %s" % (self.url, self.user.username)
class BlackListItem(FilterListItem):
class Meta:
unique_together = ('user','url')
def __unicode__(self):
return "Blacklist item %s for %s" % (self.url, self.user.username)
class EyeHistoryRaw(models.Model):
user = models.ForeignKey(User)
src = models.CharField(max_length=40, default='')
url = models.URLField(max_length=2000, default='')
domain = models.URLField(max_length=2000, default='')
favIconUrl = models.URLField(max_length=2000, default='')
title = models.CharField(max_length=2000, default='')
start_event = models.CharField(max_length=40, default='')
start_time = models.DateTimeField()
end_event = models.CharField(max_length=40, default='')
end_time = models.DateTimeField()
total_time = models.IntegerField() # store in ms
# store as human readable according to moment.js library: http://momentjs.com/docs/#/displaying/humanize-duration/
humanize_time = models.CharField(max_length=200, default='')
def __unicode__(self):
return "EyeHistory item %s for %s on %s" % (self.url, self.user.username, self.start_time)
class EyeHistory(models.Model):
user = models.ForeignKey(User)
src = models.CharField(max_length=40, default='')
url = models.URLField(max_length=2000, default='')
domain = models.URLField(max_length=2000, default='')
favIconUrl = models.URLField(max_length=2000, default='')
title = models.CharField(max_length=2000, default='')
start_event = models.CharField(max_length=40, default='')
start_time = models.DateTimeField()
end_event = models.CharField(max_length=40, default='')
end_time = models.DateTimeField()
total_time = models.IntegerField() # store in ms
# store as human readable according to moment.js library: http://momentjs.com/docs/#/displaying/humanize-duration/
humanize_time = models.CharField(max_length=200, default='')
def __unicode__(self):
return "EyeHistory item %s for %s on %s" % (self.url, self.user.username, self.start_time)
def save(self, save_raw=True, *args, **kwargs):
if self.favIconUrl.strip() == '':
self.favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(self.url)
super(EyeHistory, self).save(*args, **kwargs)
class EyeHistoryMessage(models.Model):
message = models.CharField(max_length=300, default='')
post_time = models.DateTimeField(auto_now_add=True)
eyehistory = models.ForeignKey(EyeHistory, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ['-post_time']
def __unicode__(self):
return "Message %s on %s" % (self.message, self.post_time)
def save_raw_eyehistory(user, url, title, start_event, end_event, start_time, end_time, src, domain, favIconUrl):
elapsed_time = end_time - start_time
total_time = int(round((elapsed_time.microseconds / 1.0E3) + (elapsed_time.seconds * 1000) + (elapsed_time.days * 8.64E7)))
hum_time = humanize_time(elapsed_time)
if favIconUrl == None:
favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(url)
raw, created = EyeHistoryRaw.objects.get_or_create(user=user,
url=url,
title=title,
start_event=start_event,
end_event=end_event,
start_time=start_time,
end_time=end_time,
src=src,
domain=domain,
favIconUrl=favIconUrl,
total_time=total_time,
humanize_time=hum_time)
def merge_histories(dup_histories, end_time, end_event):
earliest_start = timezone.now()
earliest_eyehist = None
dup_histories = list(dup_histories)
for hist in dup_histories:
if hist.start_time < earliest_start:
earliest_start = hist.start_time
earliest_eyehist = hist
if earliest_eyehist == None:
earliest_eyehist = dup_histories[0]
earliest_eyehist.end_time = end_time
earliest_eyehist.end_event = end_event
elapsed_time = earliest_eyehist.end_time - earliest_eyehist.start_time
earliest_eyehist.total_time = int(round((elapsed_time.microseconds / 1.0E3) + (elapsed_time.seconds * 1000) + (elapsed_time.days * 8.64E7)))
earliest_eyehist.humanize_time = humanize_time(elapsed_time)
if earliest_eyehist.favIconUrl.strip() == '':
earliest_eyehist.favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(earliest_eyehist.url)
earliest_eyehist.save()
if len(dup_histories) > 1:
for item in dup_histories:
if item != earliest_eyehist:
messages = EyeHistoryMessage.objects.filter(eyehistory=item)
for message in messages:
message.eyehistory = earliest_eyehist
message.save()
item.delete()
return earliest_eyehist | api/models.py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from api.utils import humanize_time
import datetime
import urllib
class ChatMessage(models.Model):
author = models.ForeignKey(User, related_name='author', null=False, blank=False)
message = models.CharField(max_length=2000, blank=False, null=False)
date = models.DateTimeField(auto_now_add=True)
url = models.URLField(max_length=300, blank=False, null=False)
def __unicode__(self):
return "Chat message item on %s by %s" % (self.date, self.author)
class FilterListItem(models.Model):
user = models.ForeignKey(User, null=False, blank=False)
url = models.URLField(max_length=200, null=False, blank=False)
date_created = models.DateTimeField(default=datetime.datetime.utcnow())
class Meta:
abstract = True
class WhiteListItem(FilterListItem):
class Meta:
unique_together = ('user','url')
def __unicode__(self):
return "Whitelist item %s for %s" % (self.url, self.user.username)
class BlackListItem(FilterListItem):
class Meta:
unique_together = ('user','url')
def __unicode__(self):
return "Blacklist item %s for %s" % (self.url, self.user.username)
class EyeHistoryRaw(models.Model):
user = models.ForeignKey(User)
src = models.CharField(max_length=40, default='')
url = models.URLField(max_length=2000, default='')
domain = models.URLField(max_length=2000, default='')
favIconUrl = models.URLField(max_length=2000, default='')
title = models.CharField(max_length=2000, default='')
start_event = models.CharField(max_length=40, default='')
start_time = models.DateTimeField()
end_event = models.CharField(max_length=40, default='')
end_time = models.DateTimeField()
total_time = models.IntegerField() # store in ms
# store as human readable according to moment.js library: http://momentjs.com/docs/#/displaying/humanize-duration/
humanize_time = models.CharField(max_length=200, default='')
def __unicode__(self):
return "EyeHistory item %s for %s on %s" % (self.url, self.user.username, self.start_time)
class EyeHistory(models.Model):
user = models.ForeignKey(User)
src = models.CharField(max_length=40, default='')
url = models.URLField(max_length=2000, default='')
domain = models.URLField(max_length=2000, default='')
favIconUrl = models.URLField(max_length=2000, default='')
title = models.CharField(max_length=2000, default='')
start_event = models.CharField(max_length=40, default='')
start_time = models.DateTimeField()
end_event = models.CharField(max_length=40, default='')
end_time = models.DateTimeField()
total_time = models.IntegerField() # store in ms
# store as human readable according to moment.js library: http://momentjs.com/docs/#/displaying/humanize-duration/
humanize_time = models.CharField(max_length=200, default='')
def __unicode__(self):
return "EyeHistory item %s for %s on %s" % (self.url, self.user.username, self.start_time)
def save(self, save_raw=True, *args, **kwargs):
if self.favIconUrl.strip() == '':
self.favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(self.url)
super(EyeHistory, self).save(*args, **kwargs)
class EyeHistoryMessage(models.Model):
message = models.CharField(max_length=300, default='')
post_time = models.DateTimeField(auto_now_add=True)
eyehistory = models.ForeignKey(EyeHistory, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ['-post_time']
def __unicode__(self):
return "Message %s on %s" % (self.message, self.post_time)
def save_raw_eyehistory(user, url, title, start_event, end_event, start_time, end_time, src, domain, favIconUrl):
elapsed_time = end_time - start_time
total_time = int(round((elapsed_time.microseconds / 1.0E3) + (elapsed_time.seconds * 1000) + (elapsed_time.days * 8.64E7)))
hum_time = humanize_time(elapsed_time)
if favIconUrl == None:
favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(url)
raw, created = EyeHistoryRaw.objects.get_or_create(user=user,
url=url,
title=title,
start_event=start_event,
end_event=end_event,
start_time=start_time,
end_time=end_time,
src=src,
domain=domain,
favIconUrl=favIconUrl,
total_time=total_time,
humanize_time=hum_time)
def merge_histories(dup_histories, end_time, end_event):
earliest_start = timezone.now()
earliest_eyehist = None
dup_histories = list(dup_histories)
for hist in dup_histories:
if hist.start_time < earliest_start:
earliest_start = hist.start_time
earliest_eyehist = hist
if earliest_eyehist == None:
earliest_eyehist = dup_histories[0]
earliest_eyehist.end_time = end_time
earliest_eyehist.end_event = end_event
elapsed_time = earliest_eyehist.end_time - earliest_eyehist.start_time
earliest_eyehist.total_time = int(round((elapsed_time.microseconds / 1.0E3) + (elapsed_time.seconds * 1000) + (elapsed_time.days * 8.64E7)))
earliest_eyehist.humanize_time = humanize_time(elapsed_time)
if earliest_eyehist.favIconUrl.strip() == '':
earliest_eyehist.favIconUrl = "http://www.google.com/s2/favicons?domain_url=" + urllib.quote(earliest_eyehist.url)
earliest_eyehist.save()
if len(dup_histories) > 1:
for item in dup_histories:
if item != earliest_eyehist:
messages = EyeHistoryMessage.objects.filter(eyehistory=item)
for message in messages:
message.eyehistory = earliest_eyehist
message.save()
item.delete()
return earliest_eyehist | 0.5144 | 0.071559 |
import numpy as np
import math
from matplotlib import pyplot
import time
import sys
import numba
@numba.jit
def bilinear_interpolation(X, Y, f, x, y):
"""Returns the approximate value of f(x,y) using bilinear interpolation.
Arguments
---------
X, Y -- mesh grid.
f -- the function f that should be an NxN matrix.
x, y -- coordinates where to compute f(x,y)
"""
N = np.shape(X[:, 0])[0]
dx, dy = X[0, 1] - X[0, 0], Y[1, 0] - Y[0, 0]
x_start, y_start = X[0, 0], Y[0, 0]
i1, i2 = int((x - x_start) / dx), int((x - x_start) / dx) + 1
j1, j2 = int((y - y_start) / dy), int((y - y_start) / dy) + 1
# Take care of boundaries
# 1. Right boundary
if i1 >= N - 1 and j1 <= N - 1 and j1 >= 0:
return f[j1, N - 1]
if i1 >= N - 1 and j1 <= 0:
return f[0, N - 1]
if i1 >= N - 1 and j1 >= N - 1:
return f[N - 1, N - 1]
# 2. Left boundary
if i1 <= 0 and j1 <= N - 1 and j1 >= 0:
return f[j1, 0]
if i1 <= 0 and j1 <= 0:
return f[0, 0]
if i1 <= 0 and j1 >= N - 1:
return f[N - 1, 0]
# 3. Top boundary
if j1 >= N - 1 and i1 <= N - 1 and i1 >= 0:
return f[N - 1, i1]
if j1 >= N - 1 and i1 <= 0:
return f[N - 1, 0]
# 3. Bottom boundary
if j1 <= 0 and i1 <= N - 1 and i1 >= 0:
return f[0, i1]
if j1 <= 0 and i1 >= N - 1:
return f[N - 1, 0]
x1, x2 = X[j1, i1], X[j2, i2]
y1, y2 = Y[j1, i1], Y[j2, i2]
f_interpolated = (
1
/ (x2 - x1)
* 1
/ (y2 - y1)
* (
f[j1, i1] * (x2 - x) * (y2 - y)
+ f[j1, i2] * (x - x1) * (y2 - y)
+ f[j2, i1] * (x2 - x) * (y - y1)
+ f[j2, i2] * (x - x1) * (y - y1)
)
)
return f_interpolated
@numba.jit
def rk4(X, Y, x, y, f, h, dim):
"""Returns the approximate value of f(x,y) using bilinear interpolation.
Arguments
---------
X, Y -- mesh grid.
x, y -- coordinates where to begin the evolution.
f -- the function f that will be evolved.
h -- the time step (usually referred to this as dt.)
dim -- 0 for x and 1 for y.
"""
k1 = h * bilinear_interpolation(X, Y, f, x, y)
k2 = h * bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k1)
k3 = h * bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k2)
k4 = h * bilinear_interpolation(X, Y, f, x + h, y + k3)
if dim == 0:
return x + 1.0 / 6 * k1 + 1.0 / 3 * k2 + 1.0 / 3 * k3 + 1.0 / 6 * k4
elif dim == 1:
return y + 1.0 / 6 * k1 + 1.0 / 3 * k2 + 1.0 / 3 * k3 + 1.0 / 6 * k4
else:
print("invalid dimension parameter passed to rk4, exiting")
# sys.exit()
@numba.jit
def integrate(x_y, integration_time, dt, X, Y, u, v):
xs = x_y[0]
ys = x_y[1]
tr_x = xs
tr_y = ys
for k in range(0, int(integration_time / dt)):
xs, ys = rk4(X, Y, xs, ys, u, dt, 0), rk4(X, Y, xs, ys, v, dt, 1)
tr_x += xs
tr_y += ys
return [tr_x, tr_y]
def eigs(xt, yt, xo, yo):
ftlemat = np.zeros((2, 2))
ftlemat[0][0] = (xt[1] - xt[0]) / (xo[1] - xo[0])
ftlemat[1][0] = (yt[1] - yt[0]) / (xo[1] - xo[0])
ftlemat[0][1] = (xt[3] - xt[2]) / (yo[1] - yo[0])
ftlemat[1][1] = (yt[3] - yt[2]) / (yo[1] - yo[0])
if True in np.isnan(ftlemat):
return "nan"
ftlemat = np.dot(ftlemat.transpose(), ftlemat)
w, v = np.linalg.eig(ftlemat)
return w | dyntrack/utils/FTLE.py | import numpy as np
import math
from matplotlib import pyplot
import time
import sys
import numba
@numba.jit
def bilinear_interpolation(X, Y, f, x, y):
"""Returns the approximate value of f(x,y) using bilinear interpolation.
Arguments
---------
X, Y -- mesh grid.
f -- the function f that should be an NxN matrix.
x, y -- coordinates where to compute f(x,y)
"""
N = np.shape(X[:, 0])[0]
dx, dy = X[0, 1] - X[0, 0], Y[1, 0] - Y[0, 0]
x_start, y_start = X[0, 0], Y[0, 0]
i1, i2 = int((x - x_start) / dx), int((x - x_start) / dx) + 1
j1, j2 = int((y - y_start) / dy), int((y - y_start) / dy) + 1
# Take care of boundaries
# 1. Right boundary
if i1 >= N - 1 and j1 <= N - 1 and j1 >= 0:
return f[j1, N - 1]
if i1 >= N - 1 and j1 <= 0:
return f[0, N - 1]
if i1 >= N - 1 and j1 >= N - 1:
return f[N - 1, N - 1]
# 2. Left boundary
if i1 <= 0 and j1 <= N - 1 and j1 >= 0:
return f[j1, 0]
if i1 <= 0 and j1 <= 0:
return f[0, 0]
if i1 <= 0 and j1 >= N - 1:
return f[N - 1, 0]
# 3. Top boundary
if j1 >= N - 1 and i1 <= N - 1 and i1 >= 0:
return f[N - 1, i1]
if j1 >= N - 1 and i1 <= 0:
return f[N - 1, 0]
# 3. Bottom boundary
if j1 <= 0 and i1 <= N - 1 and i1 >= 0:
return f[0, i1]
if j1 <= 0 and i1 >= N - 1:
return f[N - 1, 0]
x1, x2 = X[j1, i1], X[j2, i2]
y1, y2 = Y[j1, i1], Y[j2, i2]
f_interpolated = (
1
/ (x2 - x1)
* 1
/ (y2 - y1)
* (
f[j1, i1] * (x2 - x) * (y2 - y)
+ f[j1, i2] * (x - x1) * (y2 - y)
+ f[j2, i1] * (x2 - x) * (y - y1)
+ f[j2, i2] * (x - x1) * (y - y1)
)
)
return f_interpolated
@numba.jit
def rk4(X, Y, x, y, f, h, dim):
"""Returns the approximate value of f(x,y) using bilinear interpolation.
Arguments
---------
X, Y -- mesh grid.
x, y -- coordinates where to begin the evolution.
f -- the function f that will be evolved.
h -- the time step (usually referred to this as dt.)
dim -- 0 for x and 1 for y.
"""
k1 = h * bilinear_interpolation(X, Y, f, x, y)
k2 = h * bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k1)
k3 = h * bilinear_interpolation(X, Y, f, x + 0.5 * h, y + 0.5 * k2)
k4 = h * bilinear_interpolation(X, Y, f, x + h, y + k3)
if dim == 0:
return x + 1.0 / 6 * k1 + 1.0 / 3 * k2 + 1.0 / 3 * k3 + 1.0 / 6 * k4
elif dim == 1:
return y + 1.0 / 6 * k1 + 1.0 / 3 * k2 + 1.0 / 3 * k3 + 1.0 / 6 * k4
else:
print("invalid dimension parameter passed to rk4, exiting")
# sys.exit()
@numba.jit
def integrate(x_y, integration_time, dt, X, Y, u, v):
xs = x_y[0]
ys = x_y[1]
tr_x = xs
tr_y = ys
for k in range(0, int(integration_time / dt)):
xs, ys = rk4(X, Y, xs, ys, u, dt, 0), rk4(X, Y, xs, ys, v, dt, 1)
tr_x += xs
tr_y += ys
return [tr_x, tr_y]
def eigs(xt, yt, xo, yo):
ftlemat = np.zeros((2, 2))
ftlemat[0][0] = (xt[1] - xt[0]) / (xo[1] - xo[0])
ftlemat[1][0] = (yt[1] - yt[0]) / (xo[1] - xo[0])
ftlemat[0][1] = (xt[3] - xt[2]) / (yo[1] - yo[0])
ftlemat[1][1] = (yt[3] - yt[2]) / (yo[1] - yo[0])
if True in np.isnan(ftlemat):
return "nan"
ftlemat = np.dot(ftlemat.transpose(), ftlemat)
w, v = np.linalg.eig(ftlemat)
return w | 0.471467 | 0.663471 |
from fastapi import FastAPI, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
from app.database import crud, models, schemas, SessionLocal, engine
from app.validate_wasm import validate_wasm
from typing import List
models.Base.metadata.create_all(bind=engine) # Dont know what this does
# Initialize the FastAPI instance
app = FastAPI(
title="WASM Bots",
description="A simple server for our LangSec project where we can upload base64 encoded wasm bots",
version="1.0"
)
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Used to create a connection to the db upon requests to the server
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
@app.get("/bots", response_model=List[schemas.Bot])
def get_bots(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
"""Returns all the wasm bots"""
bots = crud.get_bots(db, skip=skip, limit=limit)
return bots
@app.get("/bots/{bot_id}", response_model=schemas.Bot)
def get_bot(bot_id: int, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot(db, bot_id)
@app.delete("/bots/{bot_id}")
def remove_bot(bot_id: int, db: Session = Depends(get_db)):
"""Removes that particular bot"""
if crud.remove_bot(db, bot_id):
return
else:
raise HTTPException(status_code=400, detail=f"Bot with id #{bot_id} does not exist")
@app.post("/bots/get-by-name", response_model=schemas.Bot)
def get_bot_by_name(name: str, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot_by_name(db, name)
@app.post("/bots", response_model=schemas.Bot)
def create_bot(bot: schemas.BotBase, db: Session = Depends(get_db)):
"""Creates a new bot"""
if not validate_wasm(bot.base64_encoded_bot):
raise HTTPException(status_code=400, detail="Provided wasm file is invalid")
db_bot = crud.get_bot_by_name(db, name=bot.name)
if db_bot:
# A bot with that name already exists
raise HTTPException(status_code=400, detail=f"Bot with that name already exists: {bot.name}")
return crud.create_bot(db, bot) | backend/app/main.py | from fastapi import FastAPI, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
from app.database import crud, models, schemas, SessionLocal, engine
from app.validate_wasm import validate_wasm
from typing import List
models.Base.metadata.create_all(bind=engine) # Dont know what this does
# Initialize the FastAPI instance
app = FastAPI(
title="WASM Bots",
description="A simple server for our LangSec project where we can upload base64 encoded wasm bots",
version="1.0"
)
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Used to create a connection to the db upon requests to the server
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
@app.get("/bots", response_model=List[schemas.Bot])
def get_bots(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
"""Returns all the wasm bots"""
bots = crud.get_bots(db, skip=skip, limit=limit)
return bots
@app.get("/bots/{bot_id}", response_model=schemas.Bot)
def get_bot(bot_id: int, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot(db, bot_id)
@app.delete("/bots/{bot_id}")
def remove_bot(bot_id: int, db: Session = Depends(get_db)):
"""Removes that particular bot"""
if crud.remove_bot(db, bot_id):
return
else:
raise HTTPException(status_code=400, detail=f"Bot with id #{bot_id} does not exist")
@app.post("/bots/get-by-name", response_model=schemas.Bot)
def get_bot_by_name(name: str, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot_by_name(db, name)
@app.post("/bots", response_model=schemas.Bot)
def create_bot(bot: schemas.BotBase, db: Session = Depends(get_db)):
"""Creates a new bot"""
if not validate_wasm(bot.base64_encoded_bot):
raise HTTPException(status_code=400, detail="Provided wasm file is invalid")
db_bot = crud.get_bot_by_name(db, name=bot.name)
if db_bot:
# A bot with that name already exists
raise HTTPException(status_code=400, detail=f"Bot with that name already exists: {bot.name}")
return crud.create_bot(db, bot) | 0.61231 | 0.065515 |
from pyrsistent import pmap, thaw
from .event import EventBase
from .util import is_pmap, ms_from_dt
class TimeRangeEvent(EventBase):
"""
The creation of an TimeRangeEvent is done by combining two parts -
the timerange and the data.
To construct you specify a TimeRange, along with the data.
The first arg can be:
- a TimeRangeEvent instance (copy ctor)
- a pyrsistent.PMap, or
- a python tuple, list or pyrsistent.PVector object containing two
python datetime objects or ms timestamps - the args for the
TimeRange object.
To specify the data you can supply either:
- a python dict
- a pyrsistent.PMap, or
- a simple type such as an integer. In the case of the simple type
this is a shorthand for supplying {"value": v}.
Parameters
----------
instance_or_args : TimeRange, iterable, pyrsistent.pmap
See above
arg2 : dict, pmap, int, float, str, optional
See above.
"""
__slots__ = () # inheriting relevant slots, stil need this
def __init__(self, instance_or_args, arg2=None):
"""
Create a time range event.
"""
# pylint doesn't like self._d but be consistent w/original code.
# pylint: disable=invalid-name
if isinstance(instance_or_args, TimeRangeEvent):
super(TimeRangeEvent, self).__init__(instance_or_args._d) # pylint: disable=protected-access
return
elif is_pmap(instance_or_args):
super(TimeRangeEvent, self).__init__(instance_or_args)
return
rng = self.timerange_from_arg(instance_or_args)
data = self.data_from_arg(arg2)
super(TimeRangeEvent, self).__init__(pmap(dict(range=rng, data=data)))
# Query/accessor methods
def to_json(self):
"""
Returns the TimeRangeEvent as a JSON object, essentially
::
{timerange: tr, data: {key: value, ...}}
This is actually like json.loads(s) - produces the
actual data structure from the object internal data.
Returns
-------
dict
Dict representation of internals (timerange, data).
"""
return dict(
timerange=self.timerange().to_json(),
data=thaw(self.data()),
)
def key(self):
"""Returns a range string in the format 'begin,end' as expressed
as ms since the epoch.
Returns
-------
str
The begin and end of the timerange in ms since the epoch.
"""
return '{0},{1}'.format(ms_from_dt(self.begin()), ms_from_dt(self.end()))
def type(self): # pylint: disable=no-self-use
"""Return the type of this event type
Returns
-------
class
The class of this event type.
"""
return TimeRangeEvent
def to_point(self, cols=None):
"""
Returns a flat array starting with the timestamp, followed by the values.
Can be given an optional list of columns so the returned list will
have the values in order. Primarily for the TimeSeries wire format.
Parameters
----------
cols : list, optional
List of data columns to order the data points in so the
TimeSeries wire format lines up correctly. If not specified,
the points will be whatever order that dict.values() decides
to return it in.
Returns
-------
list
Epoch ms followed by points.
"""
points = [self.timerange().to_json()]
data = thaw(self.data())
if isinstance(cols, list):
points += [data.get(x, None) for x in cols]
else:
points += [x for x in list(data.values())]
return points
def timerange_as_utc_string(self):
"""The timerange of this data, in UTC time, as a string.
Returns
-------
str
Formatted time string
"""
return self.timerange().to_utc_string()
def timerange_as_local_string(self):
"""The timerange of this data, in Local time, as a string.
Returns
-------
str
Formatted time string.
"""
return self.timerange().to_local_string()
def timestamp(self):
"""The timestamp of this Event data. It's just the beginning
of the range in this case.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.begin()
def timerange(self):
"""The TimeRange of this data.
Returns
-------
TimeRange
The underlying time range object.
"""
return self._d.get('range')
def begin(self):
"""The begin time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.timerange().begin()
def end(self):
"""The end time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
End of range.
"""
return self.timerange().end()
# data setters, returns new object
def set_data(self, data):
"""Sets the data portion of the event and returns a new TimeRangeEvent.
:param data: The new data portion for this event object.
:type data: dict
:returns: TimeRangeEvent - a new TimeRangeEvent object.
Parameters
----------
data : dict
New payload to set as the data for this event.
Returns
-------
TimeRangeEvent
A new time range event object with new data payload.
"""
_dnew = self._d.set('data', self.data_from_arg(data))
return TimeRangeEvent(_dnew)
# Humanize
def humanize_duration(self):
"""Humanize the timerange.
Returns
-------
str
Humanized string of the time range.
"""
return self.timerange().humanize_duration() | pypond/timerange_event.py | from pyrsistent import pmap, thaw
from .event import EventBase
from .util import is_pmap, ms_from_dt
class TimeRangeEvent(EventBase):
"""
The creation of an TimeRangeEvent is done by combining two parts -
the timerange and the data.
To construct you specify a TimeRange, along with the data.
The first arg can be:
- a TimeRangeEvent instance (copy ctor)
- a pyrsistent.PMap, or
- a python tuple, list or pyrsistent.PVector object containing two
python datetime objects or ms timestamps - the args for the
TimeRange object.
To specify the data you can supply either:
- a python dict
- a pyrsistent.PMap, or
- a simple type such as an integer. In the case of the simple type
this is a shorthand for supplying {"value": v}.
Parameters
----------
instance_or_args : TimeRange, iterable, pyrsistent.pmap
See above
arg2 : dict, pmap, int, float, str, optional
See above.
"""
__slots__ = () # inheriting relevant slots, stil need this
def __init__(self, instance_or_args, arg2=None):
"""
Create a time range event.
"""
# pylint doesn't like self._d but be consistent w/original code.
# pylint: disable=invalid-name
if isinstance(instance_or_args, TimeRangeEvent):
super(TimeRangeEvent, self).__init__(instance_or_args._d) # pylint: disable=protected-access
return
elif is_pmap(instance_or_args):
super(TimeRangeEvent, self).__init__(instance_or_args)
return
rng = self.timerange_from_arg(instance_or_args)
data = self.data_from_arg(arg2)
super(TimeRangeEvent, self).__init__(pmap(dict(range=rng, data=data)))
# Query/accessor methods
def to_json(self):
"""
Returns the TimeRangeEvent as a JSON object, essentially
::
{timerange: tr, data: {key: value, ...}}
This is actually like json.loads(s) - produces the
actual data structure from the object internal data.
Returns
-------
dict
Dict representation of internals (timerange, data).
"""
return dict(
timerange=self.timerange().to_json(),
data=thaw(self.data()),
)
def key(self):
"""Returns a range string in the format 'begin,end' as expressed
as ms since the epoch.
Returns
-------
str
The begin and end of the timerange in ms since the epoch.
"""
return '{0},{1}'.format(ms_from_dt(self.begin()), ms_from_dt(self.end()))
def type(self): # pylint: disable=no-self-use
"""Return the type of this event type
Returns
-------
class
The class of this event type.
"""
return TimeRangeEvent
def to_point(self, cols=None):
"""
Returns a flat array starting with the timestamp, followed by the values.
Can be given an optional list of columns so the returned list will
have the values in order. Primarily for the TimeSeries wire format.
Parameters
----------
cols : list, optional
List of data columns to order the data points in so the
TimeSeries wire format lines up correctly. If not specified,
the points will be whatever order that dict.values() decides
to return it in.
Returns
-------
list
Epoch ms followed by points.
"""
points = [self.timerange().to_json()]
data = thaw(self.data())
if isinstance(cols, list):
points += [data.get(x, None) for x in cols]
else:
points += [x for x in list(data.values())]
return points
def timerange_as_utc_string(self):
"""The timerange of this data, in UTC time, as a string.
Returns
-------
str
Formatted time string
"""
return self.timerange().to_utc_string()
def timerange_as_local_string(self):
"""The timerange of this data, in Local time, as a string.
Returns
-------
str
Formatted time string.
"""
return self.timerange().to_local_string()
def timestamp(self):
"""The timestamp of this Event data. It's just the beginning
of the range in this case.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.begin()
def timerange(self):
"""The TimeRange of this data.
Returns
-------
TimeRange
The underlying time range object.
"""
return self._d.get('range')
def begin(self):
"""The begin time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
Beginning of range.
"""
return self.timerange().begin()
def end(self):
"""The end time of this Event, which will be just the timestamp.
Returns
-------
datetime.datetime
End of range.
"""
return self.timerange().end()
# data setters, returns new object
def set_data(self, data):
"""Sets the data portion of the event and returns a new TimeRangeEvent.
:param data: The new data portion for this event object.
:type data: dict
:returns: TimeRangeEvent - a new TimeRangeEvent object.
Parameters
----------
data : dict
New payload to set as the data for this event.
Returns
-------
TimeRangeEvent
A new time range event object with new data payload.
"""
_dnew = self._d.set('data', self.data_from_arg(data))
return TimeRangeEvent(_dnew)
# Humanize
def humanize_duration(self):
"""Humanize the timerange.
Returns
-------
str
Humanized string of the time range.
"""
return self.timerange().humanize_duration() | 0.91204 | 0.72594 |
import datetime
import scrapelib
import pytz
import cachetools
class GovInfo(scrapelib.Scraper):
BASE_URL = 'https://api.govinfo.gov'
def __init__(self, *args, api_key='DEMO_KEY', **kwargs):
super().__init__(*args, **kwargs)
self.headers['X-Api-Key'] = api_key
def collections(self):
endpoint = '/collections'
response = self.get(self.BASE_URL + endpoint)
return response.json()
def _format_time(self, dt):
utc_time = dt.astimezone(pytz.utc)
time_str = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
return time_str
def congressional_hearings(self, start_time=None, end_time=None):
if start_time is None:
# the earliest date for this collection
start_time = datetime.datetime(2018, 6, 5, 0, 0, tzinfo=pytz.utc)
start_time_str = self._format_time(start_time)
if end_time is None:
end_time = datetime.datetime.now(pytz.utc)
end_time_str = self._format_time(end_time)
partial = '/collections/CHRG/{start_time}'.format(start_time=start_time_str)
url_template = self.BASE_URL + partial + '/{end_time}'
seen = cachetools.LRUCache(30)
for page in self._pages(url_template, end_time_str):
for package in page['packages']:
package_id = package['packageId']
if package_id in seen:
continue
else:
# the LRUCache is like a dict, but all we care
# about is whether we've seen this package
# recently, so we just store None as the value
# associated with the package_id key
seen[package_id] = None
response = self.get(package['packageLink'])
yield response.json()
def _pages(self, url_template, end_time_str):
page_size = 100
params = {'offset': 0,
'pageSize': page_size}
url = url_template.format(end_time=end_time_str)
response = self.get(url, params=params)
data = response.json()
yield data
while len(data['packages']) == page_size:
# the API results are sorted in descending order by timestamp
# so we can paginate through results by making the end_time
# filter earlier and earlier
earliest_timestamp = data['packages'][-1]['lastModified']
url = url_template.format(end_time=earliest_timestamp)
response = self.get(url, params=params)
data = response.json()
yield data | govinfo/__init__.py | import datetime
import scrapelib
import pytz
import cachetools
class GovInfo(scrapelib.Scraper):
BASE_URL = 'https://api.govinfo.gov'
def __init__(self, *args, api_key='DEMO_KEY', **kwargs):
super().__init__(*args, **kwargs)
self.headers['X-Api-Key'] = api_key
def collections(self):
endpoint = '/collections'
response = self.get(self.BASE_URL + endpoint)
return response.json()
def _format_time(self, dt):
utc_time = dt.astimezone(pytz.utc)
time_str = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
return time_str
def congressional_hearings(self, start_time=None, end_time=None):
if start_time is None:
# the earliest date for this collection
start_time = datetime.datetime(2018, 6, 5, 0, 0, tzinfo=pytz.utc)
start_time_str = self._format_time(start_time)
if end_time is None:
end_time = datetime.datetime.now(pytz.utc)
end_time_str = self._format_time(end_time)
partial = '/collections/CHRG/{start_time}'.format(start_time=start_time_str)
url_template = self.BASE_URL + partial + '/{end_time}'
seen = cachetools.LRUCache(30)
for page in self._pages(url_template, end_time_str):
for package in page['packages']:
package_id = package['packageId']
if package_id in seen:
continue
else:
# the LRUCache is like a dict, but all we care
# about is whether we've seen this package
# recently, so we just store None as the value
# associated with the package_id key
seen[package_id] = None
response = self.get(package['packageLink'])
yield response.json()
def _pages(self, url_template, end_time_str):
page_size = 100
params = {'offset': 0,
'pageSize': page_size}
url = url_template.format(end_time=end_time_str)
response = self.get(url, params=params)
data = response.json()
yield data
while len(data['packages']) == page_size:
# the API results are sorted in descending order by timestamp
# so we can paginate through results by making the end_time
# filter earlier and earlier
earliest_timestamp = data['packages'][-1]['lastModified']
url = url_template.format(end_time=earliest_timestamp)
response = self.get(url, params=params)
data = response.json()
yield data | 0.371479 | 0.182389 |
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
import colorsys
# Wizard gui modules
from wizard.gui import gui_utils
class color_picker(QtWidgets.QWidget):
validate_signal = pyqtSignal(str)
color_signal = pyqtSignal(str)
def __init__(self, color='#798fe8', parent=None):
super(color_picker, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.ToolTip)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.build_ui()
self.connect_functions()
self.set_color(color)
def showEvent(self, event):
gui_utils.move_ui(self)
event.accept()
def set_color(self, hex):
h, s, v = self.hex_to_hsv(hex)
self.set_HSV(h, s, v)
def set_HSV(self, h, s, v):
self.hue_selector.move(0, (100 - h) * 1.85)
self.color_view.setStyleSheet(f"border-radius: 5px;background-color: qlineargradient(x1:1, x2:0, stop:0 hsl({h}%,100%,50%), stop:1 #fff);")
self.selector.move(s * 2 - 6, (200 - v * 2) - 6)
def hex_to_hsv(self, hex):
hex = hex.replace('#', '')
if len(hex) < 6: hex += "0"*(6-len(hex))
elif len(hex) > 6: hex = hex[0:6]
r,g,b = tuple(int(hex[i:i+2], 16) for i in (0,2,4))
h,s,v = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
return (h * 100, s * 100, v * 100)
def leaveEvent(self, event):
h, s, v = self.get_color()
self.validate_signal.emit(self.hsv_to_hex(h, s, v))
self.close()
def connect_functions(self):
self.hue.mouseMoveEvent = self.moveHueSelector
self.black_overlay.mouseMoveEvent = self.moveSVSelector
self.black_overlay.mousePressEvent = self.moveSVSelector
def moveSVSelector(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
pos = event.pos()
if pos.x() < 0: pos.setX(0)
if pos.y() < 0: pos.setY(0)
if pos.x() > 200: pos.setX(200)
if pos.y() > 200: pos.setY(200)
self.selector.move(pos - QtCore.QPoint(6,6))
self.hsvChanged()
def moveHueSelector(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
pos = event.pos().y()
if pos < 0: pos = 0
if pos > 185: pos = 185
self.hue_selector.move(QtCore.QPoint(0,pos))
self.hsvChanged()
def hsv_to_hex(self, h, s, v):
r,g,b = colorsys.hsv_to_rgb(h / 100.0, s / 100.0, v / 100.0)
hex = '#%02x%02x%02x' % (int(r*255),int(g*255),int(b*255))
return hex
def hsvChanged(self):
h, s, v = self.get_color()
self.color_signal.emit(self.hsv_to_hex(h,s,v))
self.color_view.setStyleSheet(f"border-radius: 5px;background-color: qlineargradient(x1:1, x2:0, stop:0 hsl({h}%,100%,50%), stop:1 #fff);")
def get_color(self):
h,s,v = (100 - self.hue_selector.y() / 1.85, (self.selector.x() + 6) / 2.0, (194 - self.selector.y()) / 2.0)
return h, s, v
def build_ui(self):
self.main_widget_layout = QtWidgets.QHBoxLayout()
self.main_widget_layout.setContentsMargins(12, 12, 12, 12)
self.setLayout(self.main_widget_layout)
self.main_widget = QtWidgets.QFrame()
self.main_widget.setMaximumWidth(300)
self.main_widget.setObjectName('black_round_frame')
self.main_layout = QtWidgets.QHBoxLayout()
self.main_layout.setSpacing(6)
self.main_widget.setLayout(self.main_layout)
self.main_widget_layout.addWidget(self.main_widget)
self.color_view = QtWidgets.QFrame(self)
self.color_view.setMinimumSize(QtCore.QSize(200, 200))
self.color_view.setMaximumSize(QtCore.QSize(200, 200))
self.color_view.setStyleSheet("/* ALL CHANGES HERE WILL BE OVERWRITTEN */;\n"
"background-color: qlineargradient(x1:1, x2:0, stop:0 hsl(0%,100%,50%), stop:1 rgba(255, 255, 255, 255));border-radius:6px;\n"
"\n"
"")
self.color_view.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.color_view.setFrameShadow(QtWidgets.QFrame.Raised)
self.color_view.setObjectName("color_view")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.color_view)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.black_overlay = QtWidgets.QFrame(self.color_view)
self.black_overlay.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(0, 0, 0, 0), stop:1 rgba(0, 0, 0, 255));;border-radius:4px;\n"
"\n"
"\n"
"")
self.black_overlay.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.black_overlay.setFrameShadow(QtWidgets.QFrame.Raised)
self.black_overlay.setObjectName("black_overlay")
self.selector = QtWidgets.QFrame(self.black_overlay)
self.selector.setGeometry(QtCore.QRect(194, 20, 12, 12))
self.selector.setMinimumSize(QtCore.QSize(12, 12))
self.selector.setMaximumSize(QtCore.QSize(12, 12))
self.selector.setStyleSheet("background-color:none;\n"
"border: 2px solid white;\n"
"border-radius: 6px;")
self.selector.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.selector.setFrameShadow(QtWidgets.QFrame.Raised)
self.selector.setObjectName("selector")
self.verticalLayout_2.addWidget(self.black_overlay)
self.main_layout.addWidget(self.color_view)
self.frame_2 = QtWidgets.QFrame(self)
self.frame_2.setObjectName('transparent_widget')
self.frame_2.setMinimumSize(QtCore.QSize(12, 0))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.hue_bg = QtWidgets.QFrame(self.frame_2)
self.hue_bg.setGeometry(QtCore.QRect(0, 0, 12, 200))
self.hue_bg.setMinimumSize(QtCore.QSize(12, 200))
self.hue_bg.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 rgba(255, 0, 0, 255), stop:0.166 rgba(255, 255, 0, 255), stop:0.333 rgba(0, 255, 0, 255), stop:0.5 rgba(0, 255, 255, 255), stop:0.666 rgba(0, 0, 255, 255), stop:0.833 rgba(255, 0, 255, 255), stop:1 rgba(255, 0, 0, 255));\n"
"border-radius: 6px;")
self.hue_bg.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.hue_bg.setFrameShadow(QtWidgets.QFrame.Raised)
#self.hue_bg.setObjectName("hue_bg")
self.hue_selector = QtWidgets.QLabel(self.frame_2)
self.hue_selector.setGeometry(QtCore.QRect(0, 185, 0, 12))
self.hue_selector.setMinimumSize(QtCore.QSize(12, 0))
self.hue_selector.setStyleSheet("background-color: none;\n"
"border: 2px solid white;\n"
"border-radius: 6px;")
self.hue_selector.setText("")
self.hue_selector.setObjectName("hue_selector")
self.hue = QtWidgets.QFrame(self.frame_2)
self.hue.setGeometry(QtCore.QRect(0, 0, 12, 200))
self.hue.setMinimumSize(QtCore.QSize(12, 200))
self.hue.setStyleSheet("background-color: none;")
self.hue.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.hue.setFrameShadow(QtWidgets.QFrame.Raised)
self.hue.setObjectName("hue")
self.main_layout.addWidget(self.frame_2) | wizard/gui/color_picker.py |
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
import colorsys
# Wizard gui modules
from wizard.gui import gui_utils
class color_picker(QtWidgets.QWidget):
validate_signal = pyqtSignal(str)
color_signal = pyqtSignal(str)
def __init__(self, color='#798fe8', parent=None):
super(color_picker, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.ToolTip)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.build_ui()
self.connect_functions()
self.set_color(color)
def showEvent(self, event):
gui_utils.move_ui(self)
event.accept()
def set_color(self, hex):
h, s, v = self.hex_to_hsv(hex)
self.set_HSV(h, s, v)
def set_HSV(self, h, s, v):
self.hue_selector.move(0, (100 - h) * 1.85)
self.color_view.setStyleSheet(f"border-radius: 5px;background-color: qlineargradient(x1:1, x2:0, stop:0 hsl({h}%,100%,50%), stop:1 #fff);")
self.selector.move(s * 2 - 6, (200 - v * 2) - 6)
def hex_to_hsv(self, hex):
hex = hex.replace('#', '')
if len(hex) < 6: hex += "0"*(6-len(hex))
elif len(hex) > 6: hex = hex[0:6]
r,g,b = tuple(int(hex[i:i+2], 16) for i in (0,2,4))
h,s,v = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
return (h * 100, s * 100, v * 100)
def leaveEvent(self, event):
h, s, v = self.get_color()
self.validate_signal.emit(self.hsv_to_hex(h, s, v))
self.close()
def connect_functions(self):
self.hue.mouseMoveEvent = self.moveHueSelector
self.black_overlay.mouseMoveEvent = self.moveSVSelector
self.black_overlay.mousePressEvent = self.moveSVSelector
def moveSVSelector(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
pos = event.pos()
if pos.x() < 0: pos.setX(0)
if pos.y() < 0: pos.setY(0)
if pos.x() > 200: pos.setX(200)
if pos.y() > 200: pos.setY(200)
self.selector.move(pos - QtCore.QPoint(6,6))
self.hsvChanged()
def moveHueSelector(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
pos = event.pos().y()
if pos < 0: pos = 0
if pos > 185: pos = 185
self.hue_selector.move(QtCore.QPoint(0,pos))
self.hsvChanged()
def hsv_to_hex(self, h, s, v):
r,g,b = colorsys.hsv_to_rgb(h / 100.0, s / 100.0, v / 100.0)
hex = '#%02x%02x%02x' % (int(r*255),int(g*255),int(b*255))
return hex
def hsvChanged(self):
h, s, v = self.get_color()
self.color_signal.emit(self.hsv_to_hex(h,s,v))
self.color_view.setStyleSheet(f"border-radius: 5px;background-color: qlineargradient(x1:1, x2:0, stop:0 hsl({h}%,100%,50%), stop:1 #fff);")
def get_color(self):
h,s,v = (100 - self.hue_selector.y() / 1.85, (self.selector.x() + 6) / 2.0, (194 - self.selector.y()) / 2.0)
return h, s, v
def build_ui(self):
self.main_widget_layout = QtWidgets.QHBoxLayout()
self.main_widget_layout.setContentsMargins(12, 12, 12, 12)
self.setLayout(self.main_widget_layout)
self.main_widget = QtWidgets.QFrame()
self.main_widget.setMaximumWidth(300)
self.main_widget.setObjectName('black_round_frame')
self.main_layout = QtWidgets.QHBoxLayout()
self.main_layout.setSpacing(6)
self.main_widget.setLayout(self.main_layout)
self.main_widget_layout.addWidget(self.main_widget)
self.color_view = QtWidgets.QFrame(self)
self.color_view.setMinimumSize(QtCore.QSize(200, 200))
self.color_view.setMaximumSize(QtCore.QSize(200, 200))
self.color_view.setStyleSheet("/* ALL CHANGES HERE WILL BE OVERWRITTEN */;\n"
"background-color: qlineargradient(x1:1, x2:0, stop:0 hsl(0%,100%,50%), stop:1 rgba(255, 255, 255, 255));border-radius:6px;\n"
"\n"
"")
self.color_view.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.color_view.setFrameShadow(QtWidgets.QFrame.Raised)
self.color_view.setObjectName("color_view")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.color_view)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.black_overlay = QtWidgets.QFrame(self.color_view)
self.black_overlay.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(0, 0, 0, 0), stop:1 rgba(0, 0, 0, 255));;border-radius:4px;\n"
"\n"
"\n"
"")
self.black_overlay.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.black_overlay.setFrameShadow(QtWidgets.QFrame.Raised)
self.black_overlay.setObjectName("black_overlay")
self.selector = QtWidgets.QFrame(self.black_overlay)
self.selector.setGeometry(QtCore.QRect(194, 20, 12, 12))
self.selector.setMinimumSize(QtCore.QSize(12, 12))
self.selector.setMaximumSize(QtCore.QSize(12, 12))
self.selector.setStyleSheet("background-color:none;\n"
"border: 2px solid white;\n"
"border-radius: 6px;")
self.selector.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.selector.setFrameShadow(QtWidgets.QFrame.Raised)
self.selector.setObjectName("selector")
self.verticalLayout_2.addWidget(self.black_overlay)
self.main_layout.addWidget(self.color_view)
self.frame_2 = QtWidgets.QFrame(self)
self.frame_2.setObjectName('transparent_widget')
self.frame_2.setMinimumSize(QtCore.QSize(12, 0))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.hue_bg = QtWidgets.QFrame(self.frame_2)
self.hue_bg.setGeometry(QtCore.QRect(0, 0, 12, 200))
self.hue_bg.setMinimumSize(QtCore.QSize(12, 200))
self.hue_bg.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 rgba(255, 0, 0, 255), stop:0.166 rgba(255, 255, 0, 255), stop:0.333 rgba(0, 255, 0, 255), stop:0.5 rgba(0, 255, 255, 255), stop:0.666 rgba(0, 0, 255, 255), stop:0.833 rgba(255, 0, 255, 255), stop:1 rgba(255, 0, 0, 255));\n"
"border-radius: 6px;")
self.hue_bg.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.hue_bg.setFrameShadow(QtWidgets.QFrame.Raised)
#self.hue_bg.setObjectName("hue_bg")
self.hue_selector = QtWidgets.QLabel(self.frame_2)
self.hue_selector.setGeometry(QtCore.QRect(0, 185, 0, 12))
self.hue_selector.setMinimumSize(QtCore.QSize(12, 0))
self.hue_selector.setStyleSheet("background-color: none;\n"
"border: 2px solid white;\n"
"border-radius: 6px;")
self.hue_selector.setText("")
self.hue_selector.setObjectName("hue_selector")
self.hue = QtWidgets.QFrame(self.frame_2)
self.hue.setGeometry(QtCore.QRect(0, 0, 12, 200))
self.hue.setMinimumSize(QtCore.QSize(12, 200))
self.hue.setStyleSheet("background-color: none;")
self.hue.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.hue.setFrameShadow(QtWidgets.QFrame.Raised)
self.hue.setObjectName("hue")
self.main_layout.addWidget(self.frame_2) | 0.669096 | 0.13788 |
import datetime
import jwt
from api.models import (AccountDetails, AgentCoins, AgentTransactionHistory,
ContactUs, User, UserCoins, UserTrasactionHistory, otp)
from CustomCode import (autentication, fixed_var, password_functions, sms,
string_generator, validator)
from django.db.models import Sum
from rest_framework.decorators import api_view
from rest_framework.response import Response
from wasteCoin import settings
# Create your views here.
@api_view(['GET'])
def index_page(request):
return_data = {
"error" : "0",
"message" : "Successful"
}
return Response(return_data)
@api_view(["POST"])
def user_registration(request):
try:
firstName = request.data.get('firstname',None)
lastName = request.data.get('lastname',None)
phoneNumber = request.data.get('phonenumber',None)
email = request.data.get('email',None)
gender = request.data.get('gender',None)
password = request.data.get('password',None)
address = request.data.get('address',None)
lga = request.data.get('lga',None)
state = request.data.get('state',None)
country = request.data.get('country',None)
reg_field = [firstName,lastName,phoneNumber,email,password,address,lga,state,country]
if not None in reg_field and not "" in reg_field:
if User.objects.filter(user_phone =phoneNumber).exists() or User.objects.filter(email =email).exists():
return_data = {
"error": "1",
"message": "User Exists"
}
elif validator.checkmail(email) == False or validator.checkphone(phoneNumber)== False:
return_data = {
"error": "1",
"message": "Email or Phone number is Invalid"
}
else:
#generate user_id
userRandomId = string_generator.alphanumeric(20)
miner_id = string_generator.numeric(7)
transactionid = string_generator.alphanumeric(15)
#encrypt password
encryped_password = password_functions.generate_password_hash(password)
#Save user_data
new_userData = User(user_id=userRandomId,firstname=firstName,lastname=lastName,
email=email,user_phone=phoneNumber,user_gender=gender,
user_password=<PASSWORD>,user_address=address,
user_state=state,user_LGA=lga,user_country=country)
new_userData.save()
#Generate OTP
code = string_generator.numeric(6)
#Save OTP
user_OTP =otp(user=new_userData,otp_code=code)
user_OTP.save()
#Generate default coins
user_Coins = UserCoins(user=new_userData,minerID=miner_id,redeemedWasteCoin=0,minedCoins=0)
user_Coins.save()
#Save Transaction Details
user_transaction = UserTrasactionHistory(user=new_userData,transaction_id=transactionid,
amount=0,coin_redeemed_amount=0,transaction="Credit")
user_transaction.save()
role = User.objects.get(user_id=userRandomId).role
validated = otp.objects.get(user__user_id=userRandomId).validated
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set duration for token
payload = {"user_id": f"{userRandomId}",
"role": role,
"validated": validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
message = f"Welcome to WasteCoin, your verification code is {code}"
sms.sendsms(phoneNumber[1:],message)
return_data = {
"error": "0",
"message": "The registration was successful, A verrification SMS has been sent",
"token": f"{token.decode('UTF-8')}",
"elapsed_time": f"{timeLimit}",
}
else:
return_data = {
"error":"2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#User verfication
@api_view(["POST"])
@autentication.token_required
def user_verification(request,decrypedToken):
try:
otp_entered = request.data.get("otp",None)
if otp_entered != None and otp_entered != "":
user_data = otp.objects.get(user__user_id=decrypedToken['user_id'])
otpCode,date_added = user_data.otp_code,user_data.date_added
date_now = datetime.datetime.now(datetime.timezone.utc)
duration = float((date_now - date_added).total_seconds())
timeLimit = 1800.0 #30 mins interval
if otp_entered == otpCode and duration < timeLimit:
#validate user
user_data.validated = True
user_data.save()
return_data = {
"error": "0",
"message":"User Verified"
}
elif otp_entered != otpCode and duration < timeLimit:
return_data = {
"error": "1",
"message": "Incorrect OTP"
}
elif otp_entered == otpCode and duration > timeLimit:
return_data = {
"error": "1",
"message": "OTP has expired"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#resend OTP
@api_view(["POST"])
def resend_otp(request):
try:
phone_number = request.data.get('phone_number',None)
if phone_number != None and phone_number != "":
if User.objects.filter(user_phone =phone_number).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = otp.objects.get(user__user_phone=phone_number)
user = User.objects.get(user_phone=phone_number)
#generate new otp
code = string_generator.numeric(6)
user_data.otp_code = code
user_data.save()
message = f"Welcome to WasteCoin, your verification code is {code}"
sms.sendsms(phone_number[1:],message)
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user.user_id}',
"role": user.role,
"validated": user_data.validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
return_data = {
"error": "0",
"message": "OTP sent to phone number",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#User login
@api_view(["POST"])
def user_login(request):
try:
email_phone = request.data.get("email_phone",None)
password = request.data.get("password",None)
field = [email_phone,password]
if not None in field and not '' in field:
validate_mail = validator.checkmail(email_phone)
validate_phone = validator.checkphone(email_phone)
if validate_mail == True:
if User.objects.filter(email =email_phone).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = User.objects.get(email=email_phone)
is_valid_password = password_functions.check_password_match(password,user_data.user_password)
is_verified = otp.objects.get(user__user_phone=user_data.user_phone).validated
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user_data.user_id}',
"role": user_data.role,
"validated": is_verified,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
if is_valid_password and is_verified:
return_data = {
"error": "0",
"message": "Successfull",
"token": token.decode('UTF-8'),
"token-expiration": f"{timeLimit}",
"user_details": [
{
"firstname": f"{user_data.firstname}",
"lastname": f"{user_data.lastname}",
"email": f"{user_data.email}",
"phone_number": f"{user_data.user_phone}",
"gender": f"{user_data.user_gender}",
"address": f"{user_data.user_address}",
"state": f"{user_data.user_state}",
"LGA": f"{user_data.user_LGA}",
"country": f"{user_data.user_country}"
}
]
}
elif is_verified == False:
return_data = {
"error" : "1",
"message": "User is not verified",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error" : "1",
"message" : "Wrong Password"
}
elif validate_phone == True:
if User.objects.filter(user_phone =email_phone).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = User.objects.get(user_phone=email_phone)
is_verified = otp.objects.get(user__user_phone=user_data.user_phone).validated
is_valid_password = password_functions.check_password_match(password,user_data.user_password)
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user_data.user_id}',
"validated": is_verified,
"role": user_data.role,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
if is_valid_password and is_verified:
return_data = {
"error": "0",
"message": "Successfull",
"token": token.decode('UTF-8'),
"token-expiration": f"{timeLimit}",
"user_details": [
{
"firstname": f"{user_data.firstname}",
"lastname": f"{user_data.lastname}",
"email": f"{user_data.email}",
"phone_number": f"{user_data.user_phone}",
"gender": f"{user_data.user_gender}",
"address": f"{user_data.user_address}",
"state": f"{user_data.user_state}",
"LGA": f"{user_data.user_LGA}",
"country": f"{user_data.user_country}"
}
]
}
elif is_verified == False:
return_data = {
"error" : "1",
"message": "User is not verified",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error" : "1",
"message" : "Wrong Password"
}
else:
return_data = {
"error": "2",
"message": "Email or Phone Number is Invalid"
}
else:
return_data = {
"error" : "2",
"message" : "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
def password_reset(request):
try:
phone_number = request.data.get('phone_number',None)
if phone_number != None and phone_number != "":
if User.objects.filter(user_phone =phone_number).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = otp.objects.get(user__user_phone=phone_number)
user = User.objects.get(user_phone=phone_number)
generate_pin = string_generator.alphanumeric(15)
user_data.password_reset_code = generate_pin
user_data.save()
message = f"Welcome to WasteCoin, your password reset code is {generate_pin}"
sms.sendsms(phone_number[1:],message)
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user.user_id}',
"role": user.role,
"validated": user_data.validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
return_data = {
"error": "0",
"message": "Successful, reset code sent to Phone Number",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#Change password
@api_view(["POST"])
@autentication.token_required
def password_change(request,decrypedToken):
try:
reset_code = request.data.get("reset_code",None)
new_password = request.data.get("new_password",None)
fields = [reset_code,new_password]
if not None in fields and not "" in fields:
#get user info
user_data = User.objects.get(user_id=decrypedToken["user_id"])
otp_reset_code = otp.objects.get(user__user_id=decrypedToken["user_id"]).password_reset_code
print(otp_reset_code)
if reset_code == otp_reset_code:
#encrypt password
encryptpassword = password_functions.generate_password_hash(new_password)
user_data.user_password = <PASSWORD>
user_data.save()
return_data = {
"error": "0",
"message": "Successfull, Password Changed"
}
elif reset_code != otp_reset_code:
return_data = {
"error": "1",
"message": "Code does not Match"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def Dashboard(request,decrypedToken):
try:
user_id = decrypedToken['user_id']
if user_id != None and user_id != '':
total_wastecoin = fixed_var.backallocation
rate_exchange = fixed_var.exchange_rate
rate_changed = fixed_var.changed_rate
month = datetime.datetime.now().strftime('%B')
total_minedCoins = UserTrasactionHistory.objects.filter(transaction="Credit").aggregate(Sum('amount'))['amount__sum']
total_unminedCoins = total_wastecoin - total_minedCoins
#Get Percentage
percent_of_Usermined_coins = round((total_minedCoins/(total_wastecoin))*100)
percent_of_Userunmined_coins = round((total_unminedCoins/(total_wastecoin))*100)
WasteCoinBoard = UserTrasactionHistory.objects.filter(transaction='Credit').distinct('amount').order_by('-amount')
i = 0
numberOfUsers = 5
topCoinsMined = []
while i < len(WasteCoinBoard):
topUsers = {
"miner_id": UserCoins.objects.get(user__user_id=WasteCoinBoard[i].user.user_id).minerID,
"CoinMined": WasteCoinBoard[i].amount
}
topCoinsMined.append(topUsers)
i += 1
return_data = {
"error": "0",
"message": "Sucessfull",
"data":
{
"allocatedWasteCoin": total_wastecoin,
"month": month,
"exchangeRate": rate_exchange,
"changedRate": rate_changed,
"totalWasteCoinMined": total_minedCoins,
"totalWasteCoinUnmined": total_unminedCoins,
"summary": {
"totalWasteCoinMinedPercentage": percent_of_Usermined_coins,
"totalWasteCoinUnMinedPercentage": percent_of_Userunmined_coins
},
"leaderBoard": topCoinsMined
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception as e:
return_data = {
"error": "3",
"message": str(e)
}
return Response(return_data)
#Check leaderboard
@api_view(["GET"])
def LeadBoard(request):
try:
WasteCoinBoard = UserCoins.objects.all().order_by('-minedCoins')
i = 0
topCoinsMined = []
numberOfUsers = 2
while i < len(WasteCoinBoard):
topUsers = {
"miner_id": WasteCoinBoard[i].minerID,
"CoinMined": UserCoins.objects.get(user__user_id=WasteCoinBoard[i].user.user_id).minedCoins
}
topCoinsMined.append(topUsers)
i += 1
return_data = {
"error": "0",
"message": "Successfull",
"LeaderBoard": topCoinsMined
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def user_profile(request,decrypedToken):
try:
userID = decrypedToken['user_id']
UserInfo = User.objects.get(user_id=userID)
UserCoin = UserCoins.objects.get(user__user_id=userID)
#verify if user have account
account_info = AccountDetails.objects.filter(user__user_id=decrypedToken['user_id']).exists()
if account_info == True:
account = AccountDetails.objects.get(user__user_id=decrypedToken['user_id'])
account_details = {
"account_name": account.account_name,
"account_number": account.account_number,
"bank_name": account.bank_name
}
else:
account_details = {
"account_name": None,
"account_number": None,
"bank_name": None
}
if decrypedToken['role'] == 'user':
UserInfo = User.objects.get(user_id=userID)
UserCoin = UserCoins.objects.get(user__user_id=userID)
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"user_details": {
"first_name": f"{UserInfo.firstname}",
"last_name": f"{UserInfo.lastname}",
"email": f"{UserInfo.email}",
"phone_number": f"{UserInfo.user_phone}",
"gender": f"{UserInfo.user_gender}",
"address": f"{UserInfo.user_address}",
"state": f"{UserInfo.user_state}",
"LGA": f"{UserInfo.user_LGA}",
"country": f"{UserInfo.user_country}",
"role": f"{UserInfo.role}"
}
,
"user_coins": {
"miner_id": f"{UserCoin.minerID}",
"mined_coins": f"{UserCoin.minedCoins}",
"redeemed_coins": f"{UserCoin.redeemedWasteCoin}",
},
"account_information": account_details
}
}
else:
UserInfo = User.objects.get(user_id=userID)
AgentCoin = AgentCoins.objects.get(agent__user_id=userID)
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"user_details": {
"first_name": f"{UserInfo.firstname}",
"last_name": f"{UserInfo.lastname}",
"email": f"{UserInfo.email}",
"phone_number": f"{UserInfo.user_phone}",
"gender": f"{UserInfo.user_gender}",
"address": f"{UserInfo.user_address}",
"state": f"{UserInfo.user_state}",
"LGA": f"{UserInfo.user_LGA}",
"country": f"{UserInfo.user_country}",
"role": f"{UserInfo.role}"
},
"agent_coins": f"{AgentCoin.agentCoins}",
"account_information": account_details
}
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def wallet_details(request,decrypedToken):
try:
userID = decrypedToken['user_id']
trasactions = []
if decrypedToken["role"] == "user":
i = 0
transaction_history = UserTrasactionHistory.objects.filter(user__user_id=userID)
numOfTransactions = len(transaction_history)
user_coins = UserCoins.objects.get(user__user_id=userID)
while i < numOfTransactions:
perTransaction = {
"date": transaction_history[i].date_added.strftime("%Y-%m-%d"),
"amount": transaction_history[i].amount,
"transaction": transaction_history[i].transaction
}
trasactions.append(perTransaction)
i += 1
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"current_balance": f"{user_coins.minedCoins}",
"transaction_history": trasactions[1:][::-1]
}
}
else:
i = 0
transaction_history = AgentTransactionHistory.objects.filter(agent__user_id=userID)
numOfTransactions = len(transaction_history)
agent_coins = AgentCoins.objects.get(agent__user_id=userID)
while i < numOfTransactions:
perTransaction = {
"date": transaction_history[i].date_added.strftime("%Y-%m-%d"),
"amount": transaction_history[i].amount,
"transaction" : transaction_history[i].transaction,
"miner_id": transaction_history[i].coin_allocated_to
}
trasactions.append(perTransaction)
i +=1
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"current_balance": f"{agent_coins.agentCoins}",
"transaction_history": trasactions[::-1]
}
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def redeemcoins(request,decrypedToken):
try:
coins_amount = request.data.get("amount",None)
if coins_amount != None and coins_amount != "":
coins_amount = float(coins_amount)
if coins_amount == float(0) or coins_amount < float(0):
return_data = {
"error": 2,
"message": "Number is negative or zero"
}
else:
user_coins = UserCoins.objects.get(user__user_id=decrypedToken["user_id"])
exchange_rate = fixed_var.exchange_rate
numofCoins = user_coins.minedCoins
user_data = User.objects.get(user_id=decrypedToken["user_id"])
if coins_amount > numofCoins:
return_data = {
"error": "1",
"message": "Not enough coins"
}
else:
transactionid = string_generator.alphanumeric(15)
toNaira = exchange_rate * coins_amount
user_coins.minedCoins = numofCoins - coins_amount
user_coins.redeemedWasteCoin = coins_amount
user_coins.save()
#Save Transaction
transaction = UserTrasactionHistory(user=user_data,transaction_id=transactionid,
amount=coins_amount,coin_redeemed_amount=toNaira,transaction="Debit")
transaction.save()
#Add coin to the coin repository
return_data = {
"error": "0",
"message": "Successful, Coin Mined",
"transaction_id": f"{transactionid}",
"amount": f"{toNaira}"
}
else:
return_data = {
"error": 2,
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def allocate_coins(request,decrypedToken):
try:
coins_allocated = float(request.data.get("coins_allocated",None))
user_MinerID = request.data.get("miner_id",None)
field = [coins_allocated,user_MinerID]
if not None in field and not "" in field:
if UserCoins.objects.filter(minerID=user_MinerID).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
elif User.objects.get(user_id= decrypedToken['user_id']).role != "agent":
return_data = {
"error": "2",
"message": "Unauthorized User"
}
else:
agent_coins = AgentCoins.objects.get(agent__user_id=decrypedToken["user_id"]).agentCoins
if coins_allocated > agent_coins:
return_data = {
"error": "2",
"message": "Not enough coins"
}
else:
wastecoin_user = UserCoins.objects.get(minerID=user_MinerID)
user = wastecoin_user.user
agent_user = User.objects.get(user_id= decrypedToken['user_id'])
agent_coins = AgentCoins.objects.get(agent__user_id=decrypedToken["user_id"])
user_coins = UserCoins.objects.get(user__user_id=user.user_id)
string_generator.alphanumeric(15)
#allocate Coin to user
remaining_coins =agent_coins.agentCoins - coins_allocated
agent_coins.agentCoins = remaining_coins
#Debit_agent
withdrawl= AgentTransactionHistory(agent=agent_user,transaction_id=string_generator.alphanumeric(15),amount=coins_allocated,
coin_allocated_to=user_MinerID,transaction="Debit")
agent_coins.save()
withdrawl.save()
#credit User
add_coins = user_coins.minedCoins + coins_allocated
user_coins.minedCoins = add_coins
allocate = UserTrasactionHistory(user=user,transaction_id=string_generator.alphanumeric(15),
amount=coins_allocated,transaction="Credit")
user_coins.save()
allocate.save()
return_data = {
"error": "0",
"message": f"Successful,coins allocated to {user.firstname} {user.lastname}",
"current_balance": f"{remaining_coins}"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def changepassword(request,decryptedToken):
try:
old_password = request.data.get("old_password",None)
new_password = request.data.get("new_password",None)
field = [old_password,new_password]
if not None in field and not "" in field:
user_data = User.objects.get(user_id=decryptedToken["user_id"])
is_valid_password = password_functions.check_password_match(old_password,user_data.user_password)
if is_valid_password == False:
return_data = {
"error": "2",
"message": "Password is Incorrect"
}
else:
#decrypt password
encryptpassword = password_functions.generate_password_hash(new_password)
user_data.user_password = <PASSWORD>
user_data.save()
return_data = {
"error": "0",
"message": "Successfull, Password Changed"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["PUT"])
@autentication.token_required
def update_info(request,decryptedToken):
try:
address = request.data.get("address",None)
state = request.data.get("state",None)
user_lga = request.data.get("lga",None)
field = [address,state,user_lga]
if not None in field and not "" in field:
print(decryptedToken["user_id"])
user_data = User.objects.get(user_id=decryptedToken["user_id"])
user_data.user_address = address
user_data.user_state = state
user_data.user_LGA = user_lga
user_data.save()
return_data = {
"error": "0",
"message": "Successfully Updated",
"data": {
"address": address,
"state": state,
"lga": user_lga
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST","PUT"])
@autentication.token_required
def account_details(request,decryptedToken):
try:
accountName = request.data.get("account_name",None)
accountNumber = request.data.get("account_number",None)
bankName = request.data.get("bank_name",None)
field = [accountName,accountNumber,bankName]
if not None in field and not "" in field:
user_data = User.objects.get(user_id=decryptedToken['user_id'])
if AccountDetails.objects.filter(user__user_id=decryptedToken['user_id']).exists():
user_account = AccountDetails.objects.get(user__user_id=decryptedToken['user_id'])
user_account.account_number = accountNumber
user_account.account_name = accountName
user_account.bank_name = bankName
user_account.save()
return_data = {
"error": "0",
"message": "Account saved successfully",
"data": {
"account_name": accountName,
"account_number": accountNumber,
"bank_name": bankName
}
}
else:
user_account = AccountDetails(user=user_data,account_name=accountName,
account_number=accountNumber,bank_name=bankName)
user_account.save()
return_data = {
"error": "0",
"message": "Account saved successfully",
"data": {
"account_name": accountName,
"account_number": accountNumber,
"bank_name": bankName
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
def contact_us(request):
try:
fullName = request.data.get('full_name',None)
Email = request.data.get('email',None)
phoneNumber = request.data.get('phone_number',None)
Message = request.data.get('message',None)
field = [fullName,Email,Message]
if not None in field and not "" in field:
if phoneNumber == None or phoneNumber == "":
contact_response = ContactUs(full_name=fullName,email=Email,message=Message)
contact_response.save()
return_data = {
"error": "0",
"message": "Your response have been saved successfully"
}
else:
contact_response = ContactUs(full_name=fullName,email=Email,phone_number=phoneNumber,message=Message)
contact_response.save()
return_data = {
"error": "0",
"message": "Your response have been saved successfully"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
# | api/views.py | import datetime
import jwt
from api.models import (AccountDetails, AgentCoins, AgentTransactionHistory,
ContactUs, User, UserCoins, UserTrasactionHistory, otp)
from CustomCode import (autentication, fixed_var, password_functions, sms,
string_generator, validator)
from django.db.models import Sum
from rest_framework.decorators import api_view
from rest_framework.response import Response
from wasteCoin import settings
# Create your views here.
@api_view(['GET'])
def index_page(request):
return_data = {
"error" : "0",
"message" : "Successful"
}
return Response(return_data)
@api_view(["POST"])
def user_registration(request):
try:
firstName = request.data.get('firstname',None)
lastName = request.data.get('lastname',None)
phoneNumber = request.data.get('phonenumber',None)
email = request.data.get('email',None)
gender = request.data.get('gender',None)
password = request.data.get('password',None)
address = request.data.get('address',None)
lga = request.data.get('lga',None)
state = request.data.get('state',None)
country = request.data.get('country',None)
reg_field = [firstName,lastName,phoneNumber,email,password,address,lga,state,country]
if not None in reg_field and not "" in reg_field:
if User.objects.filter(user_phone =phoneNumber).exists() or User.objects.filter(email =email).exists():
return_data = {
"error": "1",
"message": "User Exists"
}
elif validator.checkmail(email) == False or validator.checkphone(phoneNumber)== False:
return_data = {
"error": "1",
"message": "Email or Phone number is Invalid"
}
else:
#generate user_id
userRandomId = string_generator.alphanumeric(20)
miner_id = string_generator.numeric(7)
transactionid = string_generator.alphanumeric(15)
#encrypt password
encryped_password = password_functions.generate_password_hash(password)
#Save user_data
new_userData = User(user_id=userRandomId,firstname=firstName,lastname=lastName,
email=email,user_phone=phoneNumber,user_gender=gender,
user_password=<PASSWORD>,user_address=address,
user_state=state,user_LGA=lga,user_country=country)
new_userData.save()
#Generate OTP
code = string_generator.numeric(6)
#Save OTP
user_OTP =otp(user=new_userData,otp_code=code)
user_OTP.save()
#Generate default coins
user_Coins = UserCoins(user=new_userData,minerID=miner_id,redeemedWasteCoin=0,minedCoins=0)
user_Coins.save()
#Save Transaction Details
user_transaction = UserTrasactionHistory(user=new_userData,transaction_id=transactionid,
amount=0,coin_redeemed_amount=0,transaction="Credit")
user_transaction.save()
role = User.objects.get(user_id=userRandomId).role
validated = otp.objects.get(user__user_id=userRandomId).validated
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set duration for token
payload = {"user_id": f"{userRandomId}",
"role": role,
"validated": validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
message = f"Welcome to WasteCoin, your verification code is {code}"
sms.sendsms(phoneNumber[1:],message)
return_data = {
"error": "0",
"message": "The registration was successful, A verrification SMS has been sent",
"token": f"{token.decode('UTF-8')}",
"elapsed_time": f"{timeLimit}",
}
else:
return_data = {
"error":"2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#User verfication
@api_view(["POST"])
@autentication.token_required
def user_verification(request,decrypedToken):
try:
otp_entered = request.data.get("otp",None)
if otp_entered != None and otp_entered != "":
user_data = otp.objects.get(user__user_id=decrypedToken['user_id'])
otpCode,date_added = user_data.otp_code,user_data.date_added
date_now = datetime.datetime.now(datetime.timezone.utc)
duration = float((date_now - date_added).total_seconds())
timeLimit = 1800.0 #30 mins interval
if otp_entered == otpCode and duration < timeLimit:
#validate user
user_data.validated = True
user_data.save()
return_data = {
"error": "0",
"message":"User Verified"
}
elif otp_entered != otpCode and duration < timeLimit:
return_data = {
"error": "1",
"message": "Incorrect OTP"
}
elif otp_entered == otpCode and duration > timeLimit:
return_data = {
"error": "1",
"message": "OTP has expired"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#resend OTP
@api_view(["POST"])
def resend_otp(request):
try:
phone_number = request.data.get('phone_number',None)
if phone_number != None and phone_number != "":
if User.objects.filter(user_phone =phone_number).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = otp.objects.get(user__user_phone=phone_number)
user = User.objects.get(user_phone=phone_number)
#generate new otp
code = string_generator.numeric(6)
user_data.otp_code = code
user_data.save()
message = f"Welcome to WasteCoin, your verification code is {code}"
sms.sendsms(phone_number[1:],message)
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user.user_id}',
"role": user.role,
"validated": user_data.validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
return_data = {
"error": "0",
"message": "OTP sent to phone number",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#User login
@api_view(["POST"])
def user_login(request):
try:
email_phone = request.data.get("email_phone",None)
password = request.data.get("password",None)
field = [email_phone,password]
if not None in field and not '' in field:
validate_mail = validator.checkmail(email_phone)
validate_phone = validator.checkphone(email_phone)
if validate_mail == True:
if User.objects.filter(email =email_phone).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = User.objects.get(email=email_phone)
is_valid_password = password_functions.check_password_match(password,user_data.user_password)
is_verified = otp.objects.get(user__user_phone=user_data.user_phone).validated
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user_data.user_id}',
"role": user_data.role,
"validated": is_verified,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
if is_valid_password and is_verified:
return_data = {
"error": "0",
"message": "Successfull",
"token": token.decode('UTF-8'),
"token-expiration": f"{timeLimit}",
"user_details": [
{
"firstname": f"{user_data.firstname}",
"lastname": f"{user_data.lastname}",
"email": f"{user_data.email}",
"phone_number": f"{user_data.user_phone}",
"gender": f"{user_data.user_gender}",
"address": f"{user_data.user_address}",
"state": f"{user_data.user_state}",
"LGA": f"{user_data.user_LGA}",
"country": f"{user_data.user_country}"
}
]
}
elif is_verified == False:
return_data = {
"error" : "1",
"message": "User is not verified",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error" : "1",
"message" : "Wrong Password"
}
elif validate_phone == True:
if User.objects.filter(user_phone =email_phone).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = User.objects.get(user_phone=email_phone)
is_verified = otp.objects.get(user__user_phone=user_data.user_phone).validated
is_valid_password = password_functions.check_password_match(password,user_data.user_password)
#Generate token
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user_data.user_id}',
"validated": is_verified,
"role": user_data.role,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
if is_valid_password and is_verified:
return_data = {
"error": "0",
"message": "Successfull",
"token": token.decode('UTF-8'),
"token-expiration": f"{timeLimit}",
"user_details": [
{
"firstname": f"{user_data.firstname}",
"lastname": f"{user_data.lastname}",
"email": f"{user_data.email}",
"phone_number": f"{user_data.user_phone}",
"gender": f"{user_data.user_gender}",
"address": f"{user_data.user_address}",
"state": f"{user_data.user_state}",
"LGA": f"{user_data.user_LGA}",
"country": f"{user_data.user_country}"
}
]
}
elif is_verified == False:
return_data = {
"error" : "1",
"message": "User is not verified",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error" : "1",
"message" : "Wrong Password"
}
else:
return_data = {
"error": "2",
"message": "Email or Phone Number is Invalid"
}
else:
return_data = {
"error" : "2",
"message" : "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
def password_reset(request):
try:
phone_number = request.data.get('phone_number',None)
if phone_number != None and phone_number != "":
if User.objects.filter(user_phone =phone_number).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
else:
user_data = otp.objects.get(user__user_phone=phone_number)
user = User.objects.get(user_phone=phone_number)
generate_pin = string_generator.alphanumeric(15)
user_data.password_reset_code = generate_pin
user_data.save()
message = f"Welcome to WasteCoin, your password reset code is {generate_pin}"
sms.sendsms(phone_number[1:],message)
timeLimit= datetime.datetime.utcnow() + datetime.timedelta(minutes=1440) #set limit for user
payload = {"user_id": f'{user.user_id}',
"role": user.role,
"validated": user_data.validated,
"exp":timeLimit}
token = jwt.encode(payload,settings.SECRET_KEY)
return_data = {
"error": "0",
"message": "Successful, reset code sent to Phone Number",
"token": token.decode('UTF-8')
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
#Change password
@api_view(["POST"])
@autentication.token_required
def password_change(request,decrypedToken):
try:
reset_code = request.data.get("reset_code",None)
new_password = request.data.get("new_password",None)
fields = [reset_code,new_password]
if not None in fields and not "" in fields:
#get user info
user_data = User.objects.get(user_id=decrypedToken["user_id"])
otp_reset_code = otp.objects.get(user__user_id=decrypedToken["user_id"]).password_reset_code
print(otp_reset_code)
if reset_code == otp_reset_code:
#encrypt password
encryptpassword = password_functions.generate_password_hash(new_password)
user_data.user_password = <PASSWORD>
user_data.save()
return_data = {
"error": "0",
"message": "Successfull, Password Changed"
}
elif reset_code != otp_reset_code:
return_data = {
"error": "1",
"message": "Code does not Match"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def Dashboard(request,decrypedToken):
try:
user_id = decrypedToken['user_id']
if user_id != None and user_id != '':
total_wastecoin = fixed_var.backallocation
rate_exchange = fixed_var.exchange_rate
rate_changed = fixed_var.changed_rate
month = datetime.datetime.now().strftime('%B')
total_minedCoins = UserTrasactionHistory.objects.filter(transaction="Credit").aggregate(Sum('amount'))['amount__sum']
total_unminedCoins = total_wastecoin - total_minedCoins
#Get Percentage
percent_of_Usermined_coins = round((total_minedCoins/(total_wastecoin))*100)
percent_of_Userunmined_coins = round((total_unminedCoins/(total_wastecoin))*100)
WasteCoinBoard = UserTrasactionHistory.objects.filter(transaction='Credit').distinct('amount').order_by('-amount')
i = 0
numberOfUsers = 5
topCoinsMined = []
while i < len(WasteCoinBoard):
topUsers = {
"miner_id": UserCoins.objects.get(user__user_id=WasteCoinBoard[i].user.user_id).minerID,
"CoinMined": WasteCoinBoard[i].amount
}
topCoinsMined.append(topUsers)
i += 1
return_data = {
"error": "0",
"message": "Sucessfull",
"data":
{
"allocatedWasteCoin": total_wastecoin,
"month": month,
"exchangeRate": rate_exchange,
"changedRate": rate_changed,
"totalWasteCoinMined": total_minedCoins,
"totalWasteCoinUnmined": total_unminedCoins,
"summary": {
"totalWasteCoinMinedPercentage": percent_of_Usermined_coins,
"totalWasteCoinUnMinedPercentage": percent_of_Userunmined_coins
},
"leaderBoard": topCoinsMined
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception as e:
return_data = {
"error": "3",
"message": str(e)
}
return Response(return_data)
#Check leaderboard
@api_view(["GET"])
def LeadBoard(request):
try:
WasteCoinBoard = UserCoins.objects.all().order_by('-minedCoins')
i = 0
topCoinsMined = []
numberOfUsers = 2
while i < len(WasteCoinBoard):
topUsers = {
"miner_id": WasteCoinBoard[i].minerID,
"CoinMined": UserCoins.objects.get(user__user_id=WasteCoinBoard[i].user.user_id).minedCoins
}
topCoinsMined.append(topUsers)
i += 1
return_data = {
"error": "0",
"message": "Successfull",
"LeaderBoard": topCoinsMined
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def user_profile(request,decrypedToken):
try:
userID = decrypedToken['user_id']
UserInfo = User.objects.get(user_id=userID)
UserCoin = UserCoins.objects.get(user__user_id=userID)
#verify if user have account
account_info = AccountDetails.objects.filter(user__user_id=decrypedToken['user_id']).exists()
if account_info == True:
account = AccountDetails.objects.get(user__user_id=decrypedToken['user_id'])
account_details = {
"account_name": account.account_name,
"account_number": account.account_number,
"bank_name": account.bank_name
}
else:
account_details = {
"account_name": None,
"account_number": None,
"bank_name": None
}
if decrypedToken['role'] == 'user':
UserInfo = User.objects.get(user_id=userID)
UserCoin = UserCoins.objects.get(user__user_id=userID)
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"user_details": {
"first_name": f"{UserInfo.firstname}",
"last_name": f"{UserInfo.lastname}",
"email": f"{UserInfo.email}",
"phone_number": f"{UserInfo.user_phone}",
"gender": f"{UserInfo.user_gender}",
"address": f"{UserInfo.user_address}",
"state": f"{UserInfo.user_state}",
"LGA": f"{UserInfo.user_LGA}",
"country": f"{UserInfo.user_country}",
"role": f"{UserInfo.role}"
}
,
"user_coins": {
"miner_id": f"{UserCoin.minerID}",
"mined_coins": f"{UserCoin.minedCoins}",
"redeemed_coins": f"{UserCoin.redeemedWasteCoin}",
},
"account_information": account_details
}
}
else:
UserInfo = User.objects.get(user_id=userID)
AgentCoin = AgentCoins.objects.get(agent__user_id=userID)
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"user_details": {
"first_name": f"{UserInfo.firstname}",
"last_name": f"{UserInfo.lastname}",
"email": f"{UserInfo.email}",
"phone_number": f"{UserInfo.user_phone}",
"gender": f"{UserInfo.user_gender}",
"address": f"{UserInfo.user_address}",
"state": f"{UserInfo.user_state}",
"LGA": f"{UserInfo.user_LGA}",
"country": f"{UserInfo.user_country}",
"role": f"{UserInfo.role}"
},
"agent_coins": f"{AgentCoin.agentCoins}",
"account_information": account_details
}
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["GET"])
@autentication.token_required
def wallet_details(request,decrypedToken):
try:
userID = decrypedToken['user_id']
trasactions = []
if decrypedToken["role"] == "user":
i = 0
transaction_history = UserTrasactionHistory.objects.filter(user__user_id=userID)
numOfTransactions = len(transaction_history)
user_coins = UserCoins.objects.get(user__user_id=userID)
while i < numOfTransactions:
perTransaction = {
"date": transaction_history[i].date_added.strftime("%Y-%m-%d"),
"amount": transaction_history[i].amount,
"transaction": transaction_history[i].transaction
}
trasactions.append(perTransaction)
i += 1
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"current_balance": f"{user_coins.minedCoins}",
"transaction_history": trasactions[1:][::-1]
}
}
else:
i = 0
transaction_history = AgentTransactionHistory.objects.filter(agent__user_id=userID)
numOfTransactions = len(transaction_history)
agent_coins = AgentCoins.objects.get(agent__user_id=userID)
while i < numOfTransactions:
perTransaction = {
"date": transaction_history[i].date_added.strftime("%Y-%m-%d"),
"amount": transaction_history[i].amount,
"transaction" : transaction_history[i].transaction,
"miner_id": transaction_history[i].coin_allocated_to
}
trasactions.append(perTransaction)
i +=1
return_data = {
"error": "0",
"message": "Successfull",
"data": {
"current_balance": f"{agent_coins.agentCoins}",
"transaction_history": trasactions[::-1]
}
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def redeemcoins(request,decrypedToken):
try:
coins_amount = request.data.get("amount",None)
if coins_amount != None and coins_amount != "":
coins_amount = float(coins_amount)
if coins_amount == float(0) or coins_amount < float(0):
return_data = {
"error": 2,
"message": "Number is negative or zero"
}
else:
user_coins = UserCoins.objects.get(user__user_id=decrypedToken["user_id"])
exchange_rate = fixed_var.exchange_rate
numofCoins = user_coins.minedCoins
user_data = User.objects.get(user_id=decrypedToken["user_id"])
if coins_amount > numofCoins:
return_data = {
"error": "1",
"message": "Not enough coins"
}
else:
transactionid = string_generator.alphanumeric(15)
toNaira = exchange_rate * coins_amount
user_coins.minedCoins = numofCoins - coins_amount
user_coins.redeemedWasteCoin = coins_amount
user_coins.save()
#Save Transaction
transaction = UserTrasactionHistory(user=user_data,transaction_id=transactionid,
amount=coins_amount,coin_redeemed_amount=toNaira,transaction="Debit")
transaction.save()
#Add coin to the coin repository
return_data = {
"error": "0",
"message": "Successful, Coin Mined",
"transaction_id": f"{transactionid}",
"amount": f"{toNaira}"
}
else:
return_data = {
"error": 2,
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def allocate_coins(request,decrypedToken):
try:
coins_allocated = float(request.data.get("coins_allocated",None))
user_MinerID = request.data.get("miner_id",None)
field = [coins_allocated,user_MinerID]
if not None in field and not "" in field:
if UserCoins.objects.filter(minerID=user_MinerID).exists() == False:
return_data = {
"error": "1",
"message": "User does not exist"
}
elif User.objects.get(user_id= decrypedToken['user_id']).role != "agent":
return_data = {
"error": "2",
"message": "Unauthorized User"
}
else:
agent_coins = AgentCoins.objects.get(agent__user_id=decrypedToken["user_id"]).agentCoins
if coins_allocated > agent_coins:
return_data = {
"error": "2",
"message": "Not enough coins"
}
else:
wastecoin_user = UserCoins.objects.get(minerID=user_MinerID)
user = wastecoin_user.user
agent_user = User.objects.get(user_id= decrypedToken['user_id'])
agent_coins = AgentCoins.objects.get(agent__user_id=decrypedToken["user_id"])
user_coins = UserCoins.objects.get(user__user_id=user.user_id)
string_generator.alphanumeric(15)
#allocate Coin to user
remaining_coins =agent_coins.agentCoins - coins_allocated
agent_coins.agentCoins = remaining_coins
#Debit_agent
withdrawl= AgentTransactionHistory(agent=agent_user,transaction_id=string_generator.alphanumeric(15),amount=coins_allocated,
coin_allocated_to=user_MinerID,transaction="Debit")
agent_coins.save()
withdrawl.save()
#credit User
add_coins = user_coins.minedCoins + coins_allocated
user_coins.minedCoins = add_coins
allocate = UserTrasactionHistory(user=user,transaction_id=string_generator.alphanumeric(15),
amount=coins_allocated,transaction="Credit")
user_coins.save()
allocate.save()
return_data = {
"error": "0",
"message": f"Successful,coins allocated to {user.firstname} {user.lastname}",
"current_balance": f"{remaining_coins}"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
@autentication.token_required
def changepassword(request,decryptedToken):
try:
old_password = request.data.get("old_password",None)
new_password = request.data.get("new_password",None)
field = [old_password,new_password]
if not None in field and not "" in field:
user_data = User.objects.get(user_id=decryptedToken["user_id"])
is_valid_password = password_functions.check_password_match(old_password,user_data.user_password)
if is_valid_password == False:
return_data = {
"error": "2",
"message": "Password is Incorrect"
}
else:
#decrypt password
encryptpassword = password_functions.generate_password_hash(new_password)
user_data.user_password = <PASSWORD>
user_data.save()
return_data = {
"error": "0",
"message": "Successfull, Password Changed"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["PUT"])
@autentication.token_required
def update_info(request,decryptedToken):
try:
address = request.data.get("address",None)
state = request.data.get("state",None)
user_lga = request.data.get("lga",None)
field = [address,state,user_lga]
if not None in field and not "" in field:
print(decryptedToken["user_id"])
user_data = User.objects.get(user_id=decryptedToken["user_id"])
user_data.user_address = address
user_data.user_state = state
user_data.user_LGA = user_lga
user_data.save()
return_data = {
"error": "0",
"message": "Successfully Updated",
"data": {
"address": address,
"state": state,
"lga": user_lga
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST","PUT"])
@autentication.token_required
def account_details(request,decryptedToken):
try:
accountName = request.data.get("account_name",None)
accountNumber = request.data.get("account_number",None)
bankName = request.data.get("bank_name",None)
field = [accountName,accountNumber,bankName]
if not None in field and not "" in field:
user_data = User.objects.get(user_id=decryptedToken['user_id'])
if AccountDetails.objects.filter(user__user_id=decryptedToken['user_id']).exists():
user_account = AccountDetails.objects.get(user__user_id=decryptedToken['user_id'])
user_account.account_number = accountNumber
user_account.account_name = accountName
user_account.bank_name = bankName
user_account.save()
return_data = {
"error": "0",
"message": "Account saved successfully",
"data": {
"account_name": accountName,
"account_number": accountNumber,
"bank_name": bankName
}
}
else:
user_account = AccountDetails(user=user_data,account_name=accountName,
account_number=accountNumber,bank_name=bankName)
user_account.save()
return_data = {
"error": "0",
"message": "Account saved successfully",
"data": {
"account_name": accountName,
"account_number": accountNumber,
"bank_name": bankName
}
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameter"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
@api_view(["POST"])
def contact_us(request):
try:
fullName = request.data.get('full_name',None)
Email = request.data.get('email',None)
phoneNumber = request.data.get('phone_number',None)
Message = request.data.get('message',None)
field = [fullName,Email,Message]
if not None in field and not "" in field:
if phoneNumber == None or phoneNumber == "":
contact_response = ContactUs(full_name=fullName,email=Email,message=Message)
contact_response.save()
return_data = {
"error": "0",
"message": "Your response have been saved successfully"
}
else:
contact_response = ContactUs(full_name=fullName,email=Email,phone_number=phoneNumber,message=Message)
contact_response.save()
return_data = {
"error": "0",
"message": "Your response have been saved successfully"
}
else:
return_data = {
"error": "2",
"message": "Invalid Parameters"
}
except Exception:
return_data = {
"error": "3",
"message": "An error occured"
}
return Response(return_data)
# | 0.283583 | 0.118921 |
from data_science_layer.reporting.abstract_report import AbstractReport
from data_science_layer.pipeline.abstract_pipline import AbstractPipeline
import pkg_resources
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class RegressorCurves(AbstractReport):
sub_folder = 'reports'
log_y = False
exp_y = False
def report(self, pipeline: AbstractPipeline):
# Set Directory path
folder = ''
path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder + '/')
pkg_resources.ensure_directory(path)
# Hist Train
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.train.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Train.png')
# Hist Test
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.test.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Test.png')
# Feature Results
nrows = len(pipeline._ml_models)
nrows = 2 if nrows == 1 else nrows
ncols = 2
ncols = 2 ** pipeline.test_y.shape[1] if pipeline.test_y.shape[1] > 1 else ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
fig2, axes2 = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
for i, model in enumerate(pipeline.get_models()):
name = model.short_name
preds_y_train, _ = model.predict(pipeline.train)
preds_y_test, _ = model.predict(pipeline.test)
preds_y_train = pd.DataFrame(preds_y_train)
preds_y_test = pd.DataFrame(preds_y_test)
train_y = pd.DataFrame(pipeline.train_y)
test_y = pd.DataFrame(pipeline.test_y)
k = 0
for j in range(pipeline.test_y.shape[1]):
try:
sns.distplot(preds_y_test.iloc[:, j], label='predict', ax=axes[i, k])
sns.distplot(test_y.iloc[:, j], label='test', ax=axes[i, k])
axes[i, k].set_title('Distribution ' + str(name))
axes[i, k].legend()
sns.regplot(test_y.iloc[:, j], preds_y_test.iloc[:, j], ax=axes[i, k + 1])
axes[i, k + 1].set_title('Scatter ' + str(name))
axes[i, k + 1].set_xlabel('Test')
axes[i, k + 1].set_ylabel('Predict')
sns.distplot(np.exp(preds_y_test.iloc[:, j]), label='predict', ax=axes2[i, k])
sns.distplot(np.exp(test_y.iloc[:, j]), label='test', ax=axes2[i, k])
axes2[i, k].set_title('Distribution ' + str(name))
axes2[i, k].legend()
sns.regplot(np.exp(test_y.iloc[:, j]), np.exp(preds_y_test.iloc[:, j]), ax=axes2[i, k + 1])
axes2[i, k + 1].set_title('Scatter ' + str(name))
axes2[i, k + 1].set_xlabel('Test')
axes2[i, k + 1].set_ylabel('Predict')
except:
# ExceptionTracking().log_exception('Result distributions plot failed', 'DCE_pipeline', 'NA')
print('report error')
k += 2
fig.savefig(path + 'result_distributions_log.png')
fig2.savefig(path + 'result_distributions.png') | data_science_layer/reporting/regressor_curves.py | from data_science_layer.reporting.abstract_report import AbstractReport
from data_science_layer.pipeline.abstract_pipline import AbstractPipeline
import pkg_resources
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class RegressorCurves(AbstractReport):
sub_folder = 'reports'
log_y = False
exp_y = False
def report(self, pipeline: AbstractPipeline):
# Set Directory path
folder = ''
path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder + '/')
pkg_resources.ensure_directory(path)
# Hist Train
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.train.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Train.png')
# Hist Test
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.test.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Test.png')
# Feature Results
nrows = len(pipeline._ml_models)
nrows = 2 if nrows == 1 else nrows
ncols = 2
ncols = 2 ** pipeline.test_y.shape[1] if pipeline.test_y.shape[1] > 1 else ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
fig2, axes2 = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
for i, model in enumerate(pipeline.get_models()):
name = model.short_name
preds_y_train, _ = model.predict(pipeline.train)
preds_y_test, _ = model.predict(pipeline.test)
preds_y_train = pd.DataFrame(preds_y_train)
preds_y_test = pd.DataFrame(preds_y_test)
train_y = pd.DataFrame(pipeline.train_y)
test_y = pd.DataFrame(pipeline.test_y)
k = 0
for j in range(pipeline.test_y.shape[1]):
try:
sns.distplot(preds_y_test.iloc[:, j], label='predict', ax=axes[i, k])
sns.distplot(test_y.iloc[:, j], label='test', ax=axes[i, k])
axes[i, k].set_title('Distribution ' + str(name))
axes[i, k].legend()
sns.regplot(test_y.iloc[:, j], preds_y_test.iloc[:, j], ax=axes[i, k + 1])
axes[i, k + 1].set_title('Scatter ' + str(name))
axes[i, k + 1].set_xlabel('Test')
axes[i, k + 1].set_ylabel('Predict')
sns.distplot(np.exp(preds_y_test.iloc[:, j]), label='predict', ax=axes2[i, k])
sns.distplot(np.exp(test_y.iloc[:, j]), label='test', ax=axes2[i, k])
axes2[i, k].set_title('Distribution ' + str(name))
axes2[i, k].legend()
sns.regplot(np.exp(test_y.iloc[:, j]), np.exp(preds_y_test.iloc[:, j]), ax=axes2[i, k + 1])
axes2[i, k + 1].set_title('Scatter ' + str(name))
axes2[i, k + 1].set_xlabel('Test')
axes2[i, k + 1].set_ylabel('Predict')
except:
# ExceptionTracking().log_exception('Result distributions plot failed', 'DCE_pipeline', 'NA')
print('report error')
k += 2
fig.savefig(path + 'result_distributions_log.png')
fig2.savefig(path + 'result_distributions.png') | 0.42919 | 0.468243 |
import unittest
import enum
from test.asserting.policy import PolicyAssertion, get_fixture_path
from vint.linting.level import Level
from vint.linting.policy.prohibit_unused_variable import ProhibitUnusedVariable
class Fixtures(enum.Enum):
VALID_VIM_SCRIPT = get_fixture_path('prohibit_unused_variable_valid.vim')
INVALID_VIM_SCRIPT = get_fixture_path('prohibit_unused_variable_invalid.vim')
ISSUE_274 = get_fixture_path('prohibit_unused_variable_issue_274.vim')
IGNORED_PATTERNS = get_fixture_path('prohibit_unused_variable_ignored_patterns.vim')
README = get_fixture_path('prohibit_unused_variable_readme.vim')
class TestProhibitUnusedVariable(PolicyAssertion, unittest.TestCase):
def test_get_violation_if_found_when_file_is_valid(self):
self.assertFoundNoViolations(Fixtures.VALID_VIM_SCRIPT.value,
ProhibitUnusedVariable)
def create_violation(self, line, column, path):
return {
'name': 'ProhibitUnusedVariable',
'level': Level.WARNING,
'position': {
'line': line,
'column': column,
'path': path
}
}
def test_get_violation_if_found_when_file_is_invalid(self):
expected_violations = [
self.create_violation(2, 5, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(4, 11, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(7, 25, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(7, 36, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(11, 9, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(12, 9, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(15, 8, Fixtures.INVALID_VIM_SCRIPT.value),
]
self.assertFoundViolationsEqual(Fixtures.INVALID_VIM_SCRIPT.value,
ProhibitUnusedVariable,
expected_violations)
def test_issue_274(self):
self.assertFoundNoViolations(Fixtures.ISSUE_274.value, ProhibitUnusedVariable)
def test_ignored_patterns(self):
expected_violations = [
self.create_violation(1, 5, Fixtures.IGNORED_PATTERNS.value),
]
self.assertFoundViolationsEqual(Fixtures.IGNORED_PATTERNS.value,
ProhibitUnusedVariable,
expected_violations,
policy_options={'ignored_patterns': ['_ignored$']})
def test_readme(self):
self.assertFoundNoViolations(Fixtures.README.value,
ProhibitUnusedVariable)
if __name__ == '__main__':
unittest.main() | test/integration/vint/linting/policy/test_prohibit_unused_variable.py | import unittest
import enum
from test.asserting.policy import PolicyAssertion, get_fixture_path
from vint.linting.level import Level
from vint.linting.policy.prohibit_unused_variable import ProhibitUnusedVariable
class Fixtures(enum.Enum):
VALID_VIM_SCRIPT = get_fixture_path('prohibit_unused_variable_valid.vim')
INVALID_VIM_SCRIPT = get_fixture_path('prohibit_unused_variable_invalid.vim')
ISSUE_274 = get_fixture_path('prohibit_unused_variable_issue_274.vim')
IGNORED_PATTERNS = get_fixture_path('prohibit_unused_variable_ignored_patterns.vim')
README = get_fixture_path('prohibit_unused_variable_readme.vim')
class TestProhibitUnusedVariable(PolicyAssertion, unittest.TestCase):
def test_get_violation_if_found_when_file_is_valid(self):
self.assertFoundNoViolations(Fixtures.VALID_VIM_SCRIPT.value,
ProhibitUnusedVariable)
def create_violation(self, line, column, path):
return {
'name': 'ProhibitUnusedVariable',
'level': Level.WARNING,
'position': {
'line': line,
'column': column,
'path': path
}
}
def test_get_violation_if_found_when_file_is_invalid(self):
expected_violations = [
self.create_violation(2, 5, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(4, 11, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(7, 25, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(7, 36, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(11, 9, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(12, 9, Fixtures.INVALID_VIM_SCRIPT.value),
self.create_violation(15, 8, Fixtures.INVALID_VIM_SCRIPT.value),
]
self.assertFoundViolationsEqual(Fixtures.INVALID_VIM_SCRIPT.value,
ProhibitUnusedVariable,
expected_violations)
def test_issue_274(self):
self.assertFoundNoViolations(Fixtures.ISSUE_274.value, ProhibitUnusedVariable)
def test_ignored_patterns(self):
expected_violations = [
self.create_violation(1, 5, Fixtures.IGNORED_PATTERNS.value),
]
self.assertFoundViolationsEqual(Fixtures.IGNORED_PATTERNS.value,
ProhibitUnusedVariable,
expected_violations,
policy_options={'ignored_patterns': ['_ignored$']})
def test_readme(self):
self.assertFoundNoViolations(Fixtures.README.value,
ProhibitUnusedVariable)
if __name__ == '__main__':
unittest.main() | 0.364438 | 0.331174 |
import designes as dsgn
import characters as chr
import random as rdm
market_1 = False
market_2 = False
market_3 = False
lvl = 0
print(dsgn.main_screen())
cvp = int(input())
if cvp != 1:
print("ÇIKIŞ YAPILIYOR")
while 1:
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
dsgn.choice_screen() #seçim ekranı: 1-taverna 2-market 3-büyücü 4-arena
cvp = int(input())
if cvp == 1:
dsgn.tavern_screen() #taverna sayfasına giriş
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 80:
chr.hero.hp = chr.hero.hp + rdm.randint(10, 40)
chr.hero.gold = chr.hero.gold-80
else:
print("yeterli altının yok!")
else:
continue
elif cvp == 2:
dsgn.market_screen() #market sayfasına giriş
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 50 and market_1 == False:
chr.hero.ap = chr.hero.ap + 15
chr.hero.gold = chr.hero.gold-50
market_1 =True
else:
print("yeterli altının yok!")
elif cvp == 2:
if chr.hero.gold > 150 and market_2 == False:
chr.hero.ap = chr.hero.ap + 35
chr.hero.gold = chr.hero.gold - 150
market_2 = True
else:
print("yeterli altının yok!")
elif cvp == 3:
if chr.hero.gold > 300 and market_3 == False:
chr.hero.hp = chr.hero.hp + 200
chr.hero.gold = chr.hero.gold - 300
market_3 = True
else:
print("yeterli altının yok!")
else:
pass
continue
elif cvp == 3:
dsgn.magic_screen() #büyücü sayfasına giriş
dsgn.durum()
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 150:
chr.hero.hp = chr.hero.hp + rdm.randint(50,100)
chr.hero.gold = chr.hero.gold - 150
else:
print("yeterli altının yok!")
else:
pass
continue
elif cvp == 4:
dsgn.durum()
dsgn.attack_wait_screen() #saldırı bekleme seçim ekranı
while chr.monster.hp > 0 and chr.hero.hp > 0:
cvp = int(input())
monster_cvp = rdm.randint(1, 4)
if cvp == 1:
if monster_cvp == 1:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
elif cvp == 2:
if monster_cvp == 2:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
elif cvp == 3:
if monster_cvp == 3:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
else:
chr.hero.hp += 20
monster_atack_chance = rdm.randint(1,4)
if monster_atack_chance == 1:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("Saldırıdan kurtuldun")
else:
dsgn.durum()
dsgn.attack_hero_action_screen()
chr.hero.hp -= rdm.randint(0,chr.monster.ap)
print("Hasar aldın")
if chr.hero.hp < 0:
print("ÖLDÜN")
break
elif chr.monster.hp < 0:
print("CANAVARI ÖLDÜRDÜN")
lvl += 1
chr.monster.hp += 100 * (lvl * 0.5)
chr.monster.gold += 40 * (lvl)
chr.monster.ap += 40 * (lvl * 0.1)
continue
else:
pass
else:
print("lütfen geçerli bir değer giriniz") | Python-Game_Spark-man/main.py | import designes as dsgn
import characters as chr
import random as rdm
market_1 = False
market_2 = False
market_3 = False
lvl = 0
print(dsgn.main_screen())
cvp = int(input())
if cvp != 1:
print("ÇIKIŞ YAPILIYOR")
while 1:
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
dsgn.choice_screen() #seçim ekranı: 1-taverna 2-market 3-büyücü 4-arena
cvp = int(input())
if cvp == 1:
dsgn.tavern_screen() #taverna sayfasına giriş
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 80:
chr.hero.hp = chr.hero.hp + rdm.randint(10, 40)
chr.hero.gold = chr.hero.gold-80
else:
print("yeterli altının yok!")
else:
continue
elif cvp == 2:
dsgn.market_screen() #market sayfasına giriş
print("""************************************************************************************************************
""")
print(f"HP: {chr.hero.hp} AP: {chr.hero.ap} GOLD: {chr.hero.gold}")
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 50 and market_1 == False:
chr.hero.ap = chr.hero.ap + 15
chr.hero.gold = chr.hero.gold-50
market_1 =True
else:
print("yeterli altının yok!")
elif cvp == 2:
if chr.hero.gold > 150 and market_2 == False:
chr.hero.ap = chr.hero.ap + 35
chr.hero.gold = chr.hero.gold - 150
market_2 = True
else:
print("yeterli altının yok!")
elif cvp == 3:
if chr.hero.gold > 300 and market_3 == False:
chr.hero.hp = chr.hero.hp + 200
chr.hero.gold = chr.hero.gold - 300
market_3 = True
else:
print("yeterli altının yok!")
else:
pass
continue
elif cvp == 3:
dsgn.magic_screen() #büyücü sayfasına giriş
dsgn.durum()
cvp = int(input())
if cvp == 1:
if chr.hero.gold > 150:
chr.hero.hp = chr.hero.hp + rdm.randint(50,100)
chr.hero.gold = chr.hero.gold - 150
else:
print("yeterli altının yok!")
else:
pass
continue
elif cvp == 4:
dsgn.durum()
dsgn.attack_wait_screen() #saldırı bekleme seçim ekranı
while chr.monster.hp > 0 and chr.hero.hp > 0:
cvp = int(input())
monster_cvp = rdm.randint(1, 4)
if cvp == 1:
if monster_cvp == 1:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
elif cvp == 2:
if monster_cvp == 2:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
elif cvp == 3:
if monster_cvp == 3:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("ıskaladın")
else:
chr.monster.hp = chr.monster.hp - rdm.randint(0, chr.hero.ap)
dsgn.durum()
dsgn.attack_hero_action_screen()
if chr.monster.hp < 0:
chr.hero.gold = chr.hero.gold + chr.monster.gold
else:
chr.hero.hp += 20
monster_atack_chance = rdm.randint(1,4)
if monster_atack_chance == 1:
dsgn.durum()
dsgn.attack_hero_miss_screen()
print("Saldırıdan kurtuldun")
else:
dsgn.durum()
dsgn.attack_hero_action_screen()
chr.hero.hp -= rdm.randint(0,chr.monster.ap)
print("Hasar aldın")
if chr.hero.hp < 0:
print("ÖLDÜN")
break
elif chr.monster.hp < 0:
print("CANAVARI ÖLDÜRDÜN")
lvl += 1
chr.monster.hp += 100 * (lvl * 0.5)
chr.monster.gold += 40 * (lvl)
chr.monster.ap += 40 * (lvl * 0.1)
continue
else:
pass
else:
print("lütfen geçerli bir değer giriniz") | 0.046206 | 0.121477 |
from django.core.management.base import BaseCommand, CommandError
import os
import re
import json
def fp_dict(path):
json_file=open(path)
json_str = json_file.read()
json_data = json.loads(json_str)
return json_data
def get_cont_type(self,jsonfile,n1,n2):
if(len(n1.split("x")) != 2):
self.stdout.write(self.style.ERROR("GPCR num %s not understood in %s." % (n1 , jsonfile)))
return False
chain1=n1.split("x")[0]
chain2=n2.split("x")[0]
if (chain1==chain2):
group="1"
info="Intra"
else:
group="2"
info="Inter"
return (info)
def addContTypetoEdges(self,jsonfile,myfp):
cont_li=myfp["edges"];
for cont_info in cont_li:
n1=cont_info["name1"]
n2=cont_info["name2"]
(info)=get_cont_type(self,jsonfile,n1,n2)
if (info):
cont_info["helixpos"]=info
else:
break
class Command(BaseCommand):
help = "Add information at the JSON files of the Flare Plots"
def add_arguments(self, parser):
parser.add_argument(
"-type",
dest="info_type",
nargs="*",
choices=["helixpos"], #Add more options if I want to add other types of info tot he FP
action="store",
default="helixpos",
help="Type of information to be added to the flare plot."
)
parser.add_argument(
'-dyn',
dest='dyn_id',
nargs='*',
action='store',
default=False,
help='Specify id(s) of dynamics for which a json file will be modified'
)
def handle(self, *args, **options):
hb_json_path="/protwis/sites/files/Precomputed/flare_plot/hbonds"
if not os.path.isdir(hb_json_path):
self.stdout.write(self.style.ERROR("No json files found."))
return
for myfile in os.listdir(hb_json_path):
isjsonfile=re.match("^\d*_trj_(\d*)_\w*.json$",myfile);
if isjsonfile:
jsonfile=isjsonfile.group(0)
dynrestrict=options['dyn_id']
if dynrestrict:
dyn_id=isjsonfile.group(1)
if dyn_id not in dynrestrict:
continue
fp_path=os.path.join(hb_json_path, jsonfile)
myfp=fp_dict(fp_path)
addContTypetoEdges(self,jsonfile,myfp)
with open(fp_path,"w") as of:
json.dump(myfp, of)
self.stdout.write(self.style.NOTICE("%s modified") % (jsonfile)) | dynadb/management/commands/addinfo_fplot.py | from django.core.management.base import BaseCommand, CommandError
import os
import re
import json
def fp_dict(path):
json_file=open(path)
json_str = json_file.read()
json_data = json.loads(json_str)
return json_data
def get_cont_type(self,jsonfile,n1,n2):
if(len(n1.split("x")) != 2):
self.stdout.write(self.style.ERROR("GPCR num %s not understood in %s." % (n1 , jsonfile)))
return False
chain1=n1.split("x")[0]
chain2=n2.split("x")[0]
if (chain1==chain2):
group="1"
info="Intra"
else:
group="2"
info="Inter"
return (info)
def addContTypetoEdges(self,jsonfile,myfp):
cont_li=myfp["edges"];
for cont_info in cont_li:
n1=cont_info["name1"]
n2=cont_info["name2"]
(info)=get_cont_type(self,jsonfile,n1,n2)
if (info):
cont_info["helixpos"]=info
else:
break
class Command(BaseCommand):
help = "Add information at the JSON files of the Flare Plots"
def add_arguments(self, parser):
parser.add_argument(
"-type",
dest="info_type",
nargs="*",
choices=["helixpos"], #Add more options if I want to add other types of info tot he FP
action="store",
default="helixpos",
help="Type of information to be added to the flare plot."
)
parser.add_argument(
'-dyn',
dest='dyn_id',
nargs='*',
action='store',
default=False,
help='Specify id(s) of dynamics for which a json file will be modified'
)
def handle(self, *args, **options):
hb_json_path="/protwis/sites/files/Precomputed/flare_plot/hbonds"
if not os.path.isdir(hb_json_path):
self.stdout.write(self.style.ERROR("No json files found."))
return
for myfile in os.listdir(hb_json_path):
isjsonfile=re.match("^\d*_trj_(\d*)_\w*.json$",myfile);
if isjsonfile:
jsonfile=isjsonfile.group(0)
dynrestrict=options['dyn_id']
if dynrestrict:
dyn_id=isjsonfile.group(1)
if dyn_id not in dynrestrict:
continue
fp_path=os.path.join(hb_json_path, jsonfile)
myfp=fp_dict(fp_path)
addContTypetoEdges(self,jsonfile,myfp)
with open(fp_path,"w") as of:
json.dump(myfp, of)
self.stdout.write(self.style.NOTICE("%s modified") % (jsonfile)) | 0.289071 | 0.057971 |
import os
import mock
import utils
from common import cli_helpers
# Need this for plugin imports
utils.add_sys_plugin_path("kubernetes")
from plugins.kubernetes.parts import ( # noqa E402
general,
network,
)
class TestKubernetesPluginPartGeneral(utils.BaseTestCase):
def setUp(self):
self.snaps_list = cli_helpers.get_snap_list_all()
super().setUp()
def tearDown(self):
super().tearDown()
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_service_info(self):
expected = ['calico-node (3)',
'containerd (17)',
'containerd-shim (16)',
'flanneld (1)',
'kube-proxy (1)',
'kubelet (2)']
general.get_kubernetes_service_checker()()
self.assertEqual(general.KUBERNETES_INFO['services'], expected)
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_snap_info_from_line(self):
result = {'conjure-up': '2.6.14-20200716.2107',
'core': '16-2.48.2',
'core18': '20201210',
'docker': '19.03.11',
'go': '1.15.6',
'helm': '3.5.0',
'kubectl': '1.20.2',
'vault': '1.5.4'}
general.get_kubernetes_package_checker()()
self.assertEqual(general.KUBERNETES_INFO["snaps"], result)
@mock.patch.object(general.cli_helpers, "get_snap_list_all")
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_snap_info_from_line_no_k8s(self, mock_get_snap_list_all):
filterered_snaps = []
for line in self.snaps_list:
found = False
for snap in general.SNAPS_K8S:
cls = general.KubernetesPackageChecks
if cls.get_snap_info_from_line(line, snap):
found = True
break
if not found:
filterered_snaps.append(line)
mock_get_snap_list_all.return_value = filterered_snaps
general.get_kubernetes_package_checker()()
self.assertIsNone(general.KUBERNETES_INFO.get("snaps"))
class TestKubernetesPluginPartNetwork(utils.BaseTestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
@mock.patch.object(network.cli_helpers, "get_ip_addr")
@mock.patch.object(network, "NETWORK_INFO", {})
def test_get_network_info(self, mock_get_ip_addr):
def fake_get_ip_addr():
path = os.path.join(os.environ["DATA_ROOT"],
"sos_commands/networking/ip_-d_address.k8s")
with open(path) as fd:
return fd.readlines()
mock_get_ip_addr.side_effect = fake_get_ip_addr
expected = {'flannel':
{'flannel.1': {'addr': '172.16.31.10/32',
'vxlan': '10.78.2.176@enp6s0f0'}}}
network.get_kubernetes_network_checks()()
self.assertEqual(network.NETWORK_INFO, expected) | tests/unit/test_kubernetes.py | import os
import mock
import utils
from common import cli_helpers
# Need this for plugin imports
utils.add_sys_plugin_path("kubernetes")
from plugins.kubernetes.parts import ( # noqa E402
general,
network,
)
class TestKubernetesPluginPartGeneral(utils.BaseTestCase):
def setUp(self):
self.snaps_list = cli_helpers.get_snap_list_all()
super().setUp()
def tearDown(self):
super().tearDown()
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_service_info(self):
expected = ['calico-node (3)',
'containerd (17)',
'containerd-shim (16)',
'flanneld (1)',
'kube-proxy (1)',
'kubelet (2)']
general.get_kubernetes_service_checker()()
self.assertEqual(general.KUBERNETES_INFO['services'], expected)
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_snap_info_from_line(self):
result = {'conjure-up': '2.6.14-20200716.2107',
'core': '16-2.48.2',
'core18': '20201210',
'docker': '19.03.11',
'go': '1.15.6',
'helm': '3.5.0',
'kubectl': '1.20.2',
'vault': '1.5.4'}
general.get_kubernetes_package_checker()()
self.assertEqual(general.KUBERNETES_INFO["snaps"], result)
@mock.patch.object(general.cli_helpers, "get_snap_list_all")
@mock.patch.object(general, "KUBERNETES_INFO", {})
def test_get_snap_info_from_line_no_k8s(self, mock_get_snap_list_all):
filterered_snaps = []
for line in self.snaps_list:
found = False
for snap in general.SNAPS_K8S:
cls = general.KubernetesPackageChecks
if cls.get_snap_info_from_line(line, snap):
found = True
break
if not found:
filterered_snaps.append(line)
mock_get_snap_list_all.return_value = filterered_snaps
general.get_kubernetes_package_checker()()
self.assertIsNone(general.KUBERNETES_INFO.get("snaps"))
class TestKubernetesPluginPartNetwork(utils.BaseTestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
@mock.patch.object(network.cli_helpers, "get_ip_addr")
@mock.patch.object(network, "NETWORK_INFO", {})
def test_get_network_info(self, mock_get_ip_addr):
def fake_get_ip_addr():
path = os.path.join(os.environ["DATA_ROOT"],
"sos_commands/networking/ip_-d_address.k8s")
with open(path) as fd:
return fd.readlines()
mock_get_ip_addr.side_effect = fake_get_ip_addr
expected = {'flannel':
{'flannel.1': {'addr': '172.16.31.10/32',
'vxlan': '10.78.2.176@enp6s0f0'}}}
network.get_kubernetes_network_checks()()
self.assertEqual(network.NETWORK_INFO, expected) | 0.42656 | 0.128416 |
import math
import glob
import threading
from PIL import Image
import tqdm
from TiledImage import others
def resizeImage(image: Image.Image, w, h, keepRatio=True):
if keepRatio:
ratio = image.width / image.height
if w > h:
return image.resize((int(ratio * h), h))
else:
return image.resize((w, int(ratio / w)))
return image.resize((w, h))
def loadImagesFromFolder(path: str) -> list[Image.Image]:
"""
:param path: path of folder. make sure to add "/*"
:return: lsit of pillow images
"""
others.printLoadingTiles()
return [Image.open(f) for f in tqdm.tqdm(glob.iglob(path))]
class ImageTiles:
def __init__(self, imageTiles: list[Image.Image]):
self.imTiles = imageTiles
self.tiles: dict[(int, int, int), Image] = {}
def averageColor(self, tile: Image.Image):
return tile.convert("RGB").resize((1, 1)).getpixel((0, 0))[:3]
def prepTiles(self):
self.tiles = {self.averageColor(im): im for im in tqdm.tqdm(self.imTiles, desc="Averaging image colors...")}
def getNearest(self, r, g, b):
distances = {math.sqrt((r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2): (r2, g2, b2) for r2, g2, b2 in
self.tiles.keys()}
rgb = distances[min(*distances.keys())]
return self.tiles[rgb]
class CanvasQuad:
def __init__(self, x, y, canvasQuadSize, canvas: Image.Image):
self.x = x
self.y = y
self.canvasQuadSize = canvasQuadSize
self.worldX = self.canvasQuadSize[0] * x
self.worldY = self.canvasQuadSize[1] * y
self.canvas = canvas
def fill(self, color="#000000"):
self.canvas.paste(Image.new("RGB", self.canvasQuadSize, color), (self.worldX, self.worldY))
def setTile(self, im: Image.Image, x, y):
self.canvas.paste(im, (self.worldX + x, self.worldY + y))
class ImageQuadrant:
def __init__(self, x: int, y: int, imageTiles: ImageTiles, refQuad: Image.Image, quadCanvas: CanvasQuad, tileSize):
self.x = x
self.y = y
self.refQuad = refQuad
self.imageTiles = imageTiles
self.quadCanvas = quadCanvas
self.tileSize = tileSize
def run(self, pbar: tqdm.tqdm):
for x in range(self.refQuad.width):
for y in range(self.refQuad.height):
pix = self.refQuad.getpixel((x, y))
tile = self.imageTiles.getNearest(pix[0], pix[1], pix[2])
self.quadCanvas.setTile(tile, self.tileSize[0] * x, self.tileSize[1] * y)
pbar.update(1)
class TiledImageMaker:
def __init__(self, imageTiles: list[Image.Image], referenceImage: Image.Image):
"""
:param imageTiles: Tiles to use for the tiled image. They have to be of the same width and height
:param referenceImage: The image reference.
"""
self.quads: list[ImageQuadrant] = []
self.tiles = ImageTiles(imageTiles)
self.refImage = referenceImage.copy()
self.tile_w = imageTiles[0].width
self.tile_h = imageTiles[0].height
# Resize the referenceImage to be smaller, resulting in smaller image output. Improves performance
self.downsample = True
self.keepRatio = True
def getCanvas(self):
print((self.refImage.width * self.tile_w, self.refImage.height * self.tile_h))
return Image.new("RGB", (self.refImage.width * self.tile_w, self.refImage.height * self.tile_h))
def _prep_reference_image(self):
if self.downsample:
self.refImage = resizeImage(
self.refImage,
int(self.refImage.width / self.tile_w), int(self.refImage.height / self.tile_h),
self.keepRatio)
def generate(self, quadNo=2, save_dir="./out.png", save=True):
self._prep_reference_image()
canvas = self.getCanvas()
self.tiles.prepTiles()
quadRefSize = (math.ceil(self.refImage.width / quadNo), math.ceil(self.refImage.height / quadNo))
quadCanvasSize = (quadRefSize[0] * self.tile_w, quadRefSize[1] * self.tile_h)
others.printImageOutputDetails(save_dir,*canvas.size)
for y in range(quadNo):
for x in range(quadNo):
xPos = x * quadRefSize[0]
yPos = y * quadRefSize[1]
quadIm = self.refImage.crop((xPos, yPos, quadRefSize[0] + xPos, quadRefSize[1] + yPos))
quad = ImageQuadrant(x, y, self.tiles,
quadIm,
CanvasQuad(x, y, quadCanvasSize, canvas), (self.tile_w, self.tile_h))
self.quads.append(quad)
total_iterations = quadRefSize[0] * quadRefSize[1] * quadNo * quadNo
with tqdm.tqdm(total=total_iterations, desc=f"Progress [size:{self.refImage.size}]") as pbar:
threads = [threading.Thread(target=i.run, args=(pbar,)) for i in self.quads]
[i.start() for i in threads]
[i.join() for i in threads]
print("Saving...")
canvas.save(save_dir) | TiledImage/__init__.py | import math
import glob
import threading
from PIL import Image
import tqdm
from TiledImage import others
def resizeImage(image: Image.Image, w, h, keepRatio=True):
if keepRatio:
ratio = image.width / image.height
if w > h:
return image.resize((int(ratio * h), h))
else:
return image.resize((w, int(ratio / w)))
return image.resize((w, h))
def loadImagesFromFolder(path: str) -> list[Image.Image]:
"""
:param path: path of folder. make sure to add "/*"
:return: lsit of pillow images
"""
others.printLoadingTiles()
return [Image.open(f) for f in tqdm.tqdm(glob.iglob(path))]
class ImageTiles:
def __init__(self, imageTiles: list[Image.Image]):
self.imTiles = imageTiles
self.tiles: dict[(int, int, int), Image] = {}
def averageColor(self, tile: Image.Image):
return tile.convert("RGB").resize((1, 1)).getpixel((0, 0))[:3]
def prepTiles(self):
self.tiles = {self.averageColor(im): im for im in tqdm.tqdm(self.imTiles, desc="Averaging image colors...")}
def getNearest(self, r, g, b):
distances = {math.sqrt((r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2): (r2, g2, b2) for r2, g2, b2 in
self.tiles.keys()}
rgb = distances[min(*distances.keys())]
return self.tiles[rgb]
class CanvasQuad:
def __init__(self, x, y, canvasQuadSize, canvas: Image.Image):
self.x = x
self.y = y
self.canvasQuadSize = canvasQuadSize
self.worldX = self.canvasQuadSize[0] * x
self.worldY = self.canvasQuadSize[1] * y
self.canvas = canvas
def fill(self, color="#000000"):
self.canvas.paste(Image.new("RGB", self.canvasQuadSize, color), (self.worldX, self.worldY))
def setTile(self, im: Image.Image, x, y):
self.canvas.paste(im, (self.worldX + x, self.worldY + y))
class ImageQuadrant:
def __init__(self, x: int, y: int, imageTiles: ImageTiles, refQuad: Image.Image, quadCanvas: CanvasQuad, tileSize):
self.x = x
self.y = y
self.refQuad = refQuad
self.imageTiles = imageTiles
self.quadCanvas = quadCanvas
self.tileSize = tileSize
def run(self, pbar: tqdm.tqdm):
for x in range(self.refQuad.width):
for y in range(self.refQuad.height):
pix = self.refQuad.getpixel((x, y))
tile = self.imageTiles.getNearest(pix[0], pix[1], pix[2])
self.quadCanvas.setTile(tile, self.tileSize[0] * x, self.tileSize[1] * y)
pbar.update(1)
class TiledImageMaker:
def __init__(self, imageTiles: list[Image.Image], referenceImage: Image.Image):
"""
:param imageTiles: Tiles to use for the tiled image. They have to be of the same width and height
:param referenceImage: The image reference.
"""
self.quads: list[ImageQuadrant] = []
self.tiles = ImageTiles(imageTiles)
self.refImage = referenceImage.copy()
self.tile_w = imageTiles[0].width
self.tile_h = imageTiles[0].height
# Resize the referenceImage to be smaller, resulting in smaller image output. Improves performance
self.downsample = True
self.keepRatio = True
def getCanvas(self):
print((self.refImage.width * self.tile_w, self.refImage.height * self.tile_h))
return Image.new("RGB", (self.refImage.width * self.tile_w, self.refImage.height * self.tile_h))
def _prep_reference_image(self):
if self.downsample:
self.refImage = resizeImage(
self.refImage,
int(self.refImage.width / self.tile_w), int(self.refImage.height / self.tile_h),
self.keepRatio)
def generate(self, quadNo=2, save_dir="./out.png", save=True):
self._prep_reference_image()
canvas = self.getCanvas()
self.tiles.prepTiles()
quadRefSize = (math.ceil(self.refImage.width / quadNo), math.ceil(self.refImage.height / quadNo))
quadCanvasSize = (quadRefSize[0] * self.tile_w, quadRefSize[1] * self.tile_h)
others.printImageOutputDetails(save_dir,*canvas.size)
for y in range(quadNo):
for x in range(quadNo):
xPos = x * quadRefSize[0]
yPos = y * quadRefSize[1]
quadIm = self.refImage.crop((xPos, yPos, quadRefSize[0] + xPos, quadRefSize[1] + yPos))
quad = ImageQuadrant(x, y, self.tiles,
quadIm,
CanvasQuad(x, y, quadCanvasSize, canvas), (self.tile_w, self.tile_h))
self.quads.append(quad)
total_iterations = quadRefSize[0] * quadRefSize[1] * quadNo * quadNo
with tqdm.tqdm(total=total_iterations, desc=f"Progress [size:{self.refImage.size}]") as pbar:
threads = [threading.Thread(target=i.run, args=(pbar,)) for i in self.quads]
[i.start() for i in threads]
[i.join() for i in threads]
print("Saving...")
canvas.save(save_dir) | 0.660282 | 0.39257 |
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
#crud urls
app_name='products'
urlpatterns=[
path('category/',views.get_all_categories,name='categories'),
path('category_view_ajax/',views.category_view_ajax,name="category_view_ajax"),
path('add_category/',views.add_category,name="add_category"),
path('add_multiple_categories',views.add_multiple_categories,name="add_multiple_categories"),
path('delete_category_ajax/',views.delete_category_ajax,name="delete_category_ajax"),
path('edit_category/',views.edit_category,name="edit_category"),
path('edit_category_ajax/',views.edit_category_ajax,name="edit_category_ajax"),
path('delete_multiple_categories/',views.delete_multiple_categories,name="delete_multiple_categories"),
path('category_view_ajax/',views.category_view_ajax,name = 'category_view_ajax'),
#sub category urls
path('sub_categories/', views.get_all_sub_categories, name = 'sub_categories'),
path('get_categories/',views.get_categories,name = 'get_categories'),
path('add_sub_category/',views.add_sub_category, name = 'add_sub_category'),
path('delete_sub_category/',views.delete_sub_category_ajax, name = 'delete_sub_category_ajax'),
path('edit_sub_category/', views.edit_sub_category, name = 'edit_sub_category'),
path('edit_sub_category_ajax/', views.edit_sub_category_ajax, name = 'edit_sub_category_ajax'),
path('delete_multiple_sub_categories/',views.delete_multiple_sub_categories, name ='delete_multiple_sub_categories'),
path('add_multiple_sub_categories/',views.add_multiple_sub_categories, name = 'add_multiple_sub_categories'),
#brands
path('brands/', views.get_all_brands, name = 'brands'),
path('add_brand/', views.add_brand, name = 'add_brand'),
path('delete_brand/', views.delete_brand, name = 'delete_brand_ajax'),
path('edit_brand/',views.update_brand, name='edit_brand'),
path('edit_brand_ajax/',views.edit_brand_ajax,name = 'edit_brand_ajax'),
path('delete_multiple_brands/',views.delete_multiple_brands, name = 'delete_multiple_brands'),
path('brand_view_ajax/',views.brand_view_ajax, name ='brand_view_ajax'),
path('add_multiple_brands/',views.add_multiple_brands,name = 'add_multiple_brands'),
#items
path('items/',views.get_all_items,name = 'items'),
path('add_item/',views.add_item, name = 'add_item'),
path('delete_item/',views.delete_item, name = 'delete_item_ajax'),
path('edit_item/',views.edit_item,name = 'edit_item'),
path('edit_item_ajax/', views.edit_item_ajax,name = 'edit_item_ajax'),
path('delete_multiple_items/',views.delete_multiple_items,name = 'delete_multiple_items'),
path('item_view_ajax/',views.item_view_ajax,name = 'item_view_ajax'),
path('add_multiple_items', views.add_multiple_items, name = 'add_multiple_items'),
path('get_brands_subcategories', views.get_brands_subcategories ,name = 'get_brands_subcategories')
] | products/urls.py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
#crud urls
app_name='products'
urlpatterns=[
path('category/',views.get_all_categories,name='categories'),
path('category_view_ajax/',views.category_view_ajax,name="category_view_ajax"),
path('add_category/',views.add_category,name="add_category"),
path('add_multiple_categories',views.add_multiple_categories,name="add_multiple_categories"),
path('delete_category_ajax/',views.delete_category_ajax,name="delete_category_ajax"),
path('edit_category/',views.edit_category,name="edit_category"),
path('edit_category_ajax/',views.edit_category_ajax,name="edit_category_ajax"),
path('delete_multiple_categories/',views.delete_multiple_categories,name="delete_multiple_categories"),
path('category_view_ajax/',views.category_view_ajax,name = 'category_view_ajax'),
#sub category urls
path('sub_categories/', views.get_all_sub_categories, name = 'sub_categories'),
path('get_categories/',views.get_categories,name = 'get_categories'),
path('add_sub_category/',views.add_sub_category, name = 'add_sub_category'),
path('delete_sub_category/',views.delete_sub_category_ajax, name = 'delete_sub_category_ajax'),
path('edit_sub_category/', views.edit_sub_category, name = 'edit_sub_category'),
path('edit_sub_category_ajax/', views.edit_sub_category_ajax, name = 'edit_sub_category_ajax'),
path('delete_multiple_sub_categories/',views.delete_multiple_sub_categories, name ='delete_multiple_sub_categories'),
path('add_multiple_sub_categories/',views.add_multiple_sub_categories, name = 'add_multiple_sub_categories'),
#brands
path('brands/', views.get_all_brands, name = 'brands'),
path('add_brand/', views.add_brand, name = 'add_brand'),
path('delete_brand/', views.delete_brand, name = 'delete_brand_ajax'),
path('edit_brand/',views.update_brand, name='edit_brand'),
path('edit_brand_ajax/',views.edit_brand_ajax,name = 'edit_brand_ajax'),
path('delete_multiple_brands/',views.delete_multiple_brands, name = 'delete_multiple_brands'),
path('brand_view_ajax/',views.brand_view_ajax, name ='brand_view_ajax'),
path('add_multiple_brands/',views.add_multiple_brands,name = 'add_multiple_brands'),
#items
path('items/',views.get_all_items,name = 'items'),
path('add_item/',views.add_item, name = 'add_item'),
path('delete_item/',views.delete_item, name = 'delete_item_ajax'),
path('edit_item/',views.edit_item,name = 'edit_item'),
path('edit_item_ajax/', views.edit_item_ajax,name = 'edit_item_ajax'),
path('delete_multiple_items/',views.delete_multiple_items,name = 'delete_multiple_items'),
path('item_view_ajax/',views.item_view_ajax,name = 'item_view_ajax'),
path('add_multiple_items', views.add_multiple_items, name = 'add_multiple_items'),
path('get_brands_subcategories', views.get_brands_subcategories ,name = 'get_brands_subcategories')
] | 0.250821 | 0.044681 |
from __future__ import unicode_literals
import csv
import json
import os
from . import CONFIG, common, osm, plfunctions
from .cities import montreal as mrl
from .cities import quebec as qbc
from .cities import newyork as nyc
from .cities import seattle as sea
from .cities import boston as bos
from .database import PostgresWrapper
from .filters import group_rules
from .logger import Logger
from .utils import pretty_time, tstr_to_float
# distance from road to slot
LINE_OFFSET = 6
CITIES = ["montreal", "quebec", "newyork", "seattle", "boston"]
db = PostgresWrapper(
"host='{PG_HOST}' port={PG_PORT} dbname={PG_DATABASE} "
"user={PG_USERNAME} password={<PASSWORD>} ".format(**CONFIG))
def process_quebec(debug=False):
"""
Process Quebec data
"""
def info(msg):
return Logger.info("Québec: {}".format(msg))
def debug(msg):
return Logger.debug("Québec: {}".format(msg))
def warning(msg):
return Logger.warning("Québec: {}".format(msg))
info('Loading and translating rules')
insert_rules('quebec_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Creating sign table")
db.query(qbc.create_sign)
info("Loading signs")
db.query(qbc.insert_sign)
db.create_index('quebec_sign', 'direction')
db.create_index('quebec_sign', 'code')
db.create_index('quebec_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_sign')
info("Creating signposts")
db.query(qbc.create_signpost)
db.create_index('quebec_signpost', 'id')
db.create_index('quebec_signpost', 'rid')
db.create_index('quebec_signpost', 'signs', index_type='gin')
db.create_index('quebec_signpost', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_signpost')
info("Add signpost id to signs")
db.query(qbc.add_signposts_to_sign)
db.vacuum_analyze('public', 'quebec_sign')
info("Projection signposts on road")
duplicates = db.query(qbc.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(qbc.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(qbc.generate_signposts_orphans)
info("Table 'signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(qbc.create_slots_likely)
db.query(qbc.insert_slots_likely.format(isleft=1))
db.query(qbc.insert_slots_likely.format(isleft=-1))
db.create_index('quebec_slots_likely', 'id')
db.create_index('quebec_slots_likely', 'signposts', index_type='gin')
db.create_index('quebec_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_slots_likely')
db.query(qbc.create_nextpoints_for_signposts)
db.create_index('quebec_nextpoints', 'id')
db.create_index('quebec_nextpoints', 'slot_id')
db.create_index('quebec_nextpoints', 'direction')
db.vacuum_analyze('public', 'quebec_nextpoints')
db.query(qbc.insert_slots_temp.format(offset=LINE_OFFSET))
db.create_index('quebec_slots_temp', 'id')
db.create_index('quebec_slots_temp', 'geom', index_type='gist')
db.create_index('quebec_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'quebec_slots_temp')
info("Creating and overlaying paid slots")
db.query(qbc.create_bornes_raw)
db.query(qbc.create_paid_signpost)
db.query(qbc.aggregate_paid_signposts.format(offset=LINE_OFFSET))
db.query(qbc.overlay_paid_rules)
db.query(qbc.create_paid_slots_standalone)
if debug:
info("Creating debug slots")
db.query(qbc.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('quebec_slots_debug', 'pkid')
db.create_index('quebec_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_slots_debug')
def process_montreal(debug=False):
"""
process montreal data and generate parking slots
"""
def info(msg):
return Logger.info("Montréal: {}".format(msg))
def debug(msg):
return Logger.debug("Montréal: {}".format(msg))
def warning(msg):
return Logger.warning("Montréal: {}".format(msg))
debug('Loading and translating rules')
insert_rules('montreal_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Matching osm roads with geobase")
db.query(mrl.match_roads_geobase)
db.create_index('montreal_roads_geobase', 'id')
db.create_index('montreal_roads_geobase', 'id_trc')
db.create_index('montreal_roads_geobase', 'osm_id')
db.create_index('montreal_roads_geobase', 'name')
db.create_index('montreal_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_roads_geobase')
info("Creating sign table")
db.query(mrl.create_sign)
info("Loading signs")
db.query(mrl.insert_sign)
db.query(mrl.insert_signpost_verdun)
db.query(mrl.insert_sign_verdun)
db.create_index('montreal_sign', 'geom', index_type='gist')
db.create_index('montreal_sign', 'direction')
db.create_index('montreal_sign', 'elevation')
db.create_index('montreal_sign', 'signpost')
db.vacuum_analyze('public', 'montreal_sign')
info("Creating sign posts")
db.query(mrl.create_signpost)
db.query(mrl.insert_signpost)
db.create_index('montreal_signpost', 'geom', index_type='gist')
db.create_index('montreal_signpost', 'geobase_id')
db.vacuum_analyze('public', 'montreal_signpost')
info("Projecting signposts on road")
duplicates = db.query(mrl.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
db.create_index('montreal_signpost_onroad', 'id')
db.create_index('montreal_signpost_onroad', 'road_id')
db.create_index('montreal_signpost_onroad', 'isleft')
db.create_index('montreal_signpost_onroad', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_signpost_onroad')
percent, total = db.query(mrl.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(mrl.generate_signposts_orphans)
info("Table 'montreal_signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(mrl.create_slots_likely)
db.query(mrl.insert_slots_likely.format(isleft=1))
db.query(mrl.insert_slots_likely.format(isleft=-1))
db.create_index('montreal_slots_likely', 'id')
db.create_index('montreal_slots_likely', 'signposts', index_type='gin')
db.create_index('montreal_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_slots_likely')
db.query(mrl.create_nextpoints_for_signposts)
db.create_index('montreal_nextpoints', 'id')
db.create_index('montreal_nextpoints', 'slot_id')
db.create_index('montreal_nextpoints', 'direction')
db.vacuum_analyze('public', 'montreal_nextpoints')
db.create_index('montreal_slots_temp', 'id')
db.create_index('montreal_slots_temp', 'geom', index_type='gist')
db.create_index('montreal_slots_temp', 'rules', index_type='gin')
db.query(mrl.insert_slots_temp.format(offset=LINE_OFFSET))
info("Creating and overlaying paid slots")
db.query(mrl.overlay_paid_rules)
db.vacuum_analyze('public', 'montreal_slots_temp')
if debug:
info("Creating debug slots")
db.query(mrl.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('montreal_slots_debug', 'pkid')
db.create_index('montreal_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_slots_debug')
def process_newyork(debug=False):
"""
Process New York data
"""
def info(msg):
return Logger.info("New York: {}".format(msg))
def debug(msg):
return Logger.debug("New York: {}".format(msg))
def warning(msg):
return Logger.warning("New York: {}".format(msg))
info('Loading and translating rules')
insert_rules('newyork_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Loading signs")
db.query(nyc.create_sign)
db.query(nyc.insert_sign)
db.create_index('newyork_sign', 'direction')
db.create_index('newyork_sign', 'code')
db.create_index('newyork_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_sign')
info("Creating signposts")
db.query(nyc.create_signpost)
db.query(nyc.insert_signpost)
db.create_index('newyork_signpost', 'id')
db.create_index('newyork_signpost', 'geobase_id')
db.create_index('newyork_signpost', 'signs', index_type='gin')
db.create_index('newyork_signpost', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_signpost')
info("Matching osm roads with geobase")
db.query(nyc.match_roads_geobase)
db.create_index('newyork_roads_geobase', 'id')
db.create_index('newyork_roads_geobase', 'osm_id')
db.create_index('newyork_roads_geobase', 'name')
db.create_index('newyork_roads_geobase', 'boro')
db.create_index('newyork_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_roads_geobase')
info("Match signposts to geobase")
db.query(nyc.match_signposts)
db.vacuum_analyze('public', 'newyork_signpost')
info("Add signpost id to signs")
db.query(nyc.add_signposts_to_sign)
db.vacuum_analyze('public', 'newyork_sign')
info("Projecting signposts on road")
duplicates = db.query(nyc.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(nyc.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(nyc.generate_signposts_orphans)
info("Table 'newyork_signpost_orphans' has been generated to check for orphans")
info("Creating likely slots")
db.query(nyc.create_slots_likely)
db.query(nyc.insert_slots_likely.format(isleft=1))
db.query(nyc.insert_slots_likely.format(isleft=-1))
# Get rid of problem segments FIXME
db.query("""
with tmp as (
select *
from (
select g.id, count(distinct s.order_no)
from newyork_roads_geobase g
join newyork_signpost s on s.geobase_id = g.id
group by g.id
) foo where count > 2
)
delete from newyork_slots_likely s using tmp t where t.id = s.rid;
""")
db.create_index('newyork_slots_likely', 'id')
db.create_index('newyork_slots_likely', 'signposts', index_type='gin')
db.create_index('newyork_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_slots_likely')
info("Creating nextpoints")
db.query(nyc.create_nextpoints_for_signposts)
db.create_index('newyork_nextpoints', 'id')
db.create_index('newyork_nextpoints', 'slot_id')
db.create_index('newyork_nextpoints', 'direction')
db.vacuum_analyze('public', 'newyork_nextpoints')
for x in ['K', 'M', 'Q', 'B', 'S']:
info("Creating slots between signposts (borough {})".format(x))
db.query(nyc.insert_slots_temp.format(boro=x, offset=LINE_OFFSET))
db.create_index('newyork_slots_temp', 'id')
db.create_index('newyork_slots_temp', 'geom', index_type='gist')
db.create_index('newyork_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'newyork_slots_temp')
if debug:
info("Creating debug slots")
for x in ['K', 'M', 'Q', 'B', 'S']:
db.query(nyc.create_slots_for_debug.format(boro=x, offset=LINE_OFFSET))
db.create_index('newyork_slots_debug', 'pkid')
db.create_index('newyork_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_slots_debug')
def process_seattle(debug=False):
"""
Process Seattle data
"""
def info(msg):
return Logger.info("Seattle: {}".format(msg))
def debug(msg):
return Logger.debug("Seattle: {}".format(msg))
def warning(msg):
return Logger.warning("Seattle: {}".format(msg))
info('Loading and translating rules')
insert_rules('seattle_rules_translation')
insert_dynamic_rules_seattle()
db.vacuum_analyze('public', 'rules')
info("Matching OSM roads with geobase")
db.query(sea.match_roads_geobase)
db.create_index('seattle_roads_geobase', 'id')
db.create_index('seattle_roads_geobase', 'osm_id')
db.create_index('seattle_roads_geobase', 'name')
db.create_index('seattle_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_roads_geobase')
info("Loading signs")
db.query(sea.create_sign)
db.query(sea.insert_sign)
db.query(sea.insert_sign_paid)
db.query(sea.insert_sign_directional)
db.query(sea.insert_sign_parklines)
db.create_index('seattle_sign', 'direction')
db.create_index('seattle_sign', 'code')
db.create_index('seattle_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_sign')
info("Creating signposts")
db.query(sea.create_signpost)
db.query(sea.insert_signpost)
db.create_index('seattle_signpost', 'id')
db.create_index('seattle_signpost', 'geobase_id')
db.create_index('seattle_signpost', 'signs', index_type='gin')
db.create_index('seattle_signpost', 'geom', index_type='gist')
db.query(sea.add_signposts_to_sign)
db.vacuum_analyze('public', 'seattle_signpost')
info("Projecting signposts on road")
duplicates = db.query(sea.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(sea.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(sea.generate_signposts_orphans)
info("Table 'seattle_signpost_orphans' has been generated to check for orphans")
db.query(sea.assign_directions)
db.vacuum_analyze('public', 'seattle_sign')
info("Creating likely slots")
db.query(sea.create_slots_likely)
db.query(sea.insert_slots_likely.format(isleft=1))
db.query(sea.insert_slots_likely.format(isleft=-1))
db.create_index('seattle_slots_likely', 'id')
db.create_index('seattle_slots_likely', 'signposts', index_type='gin')
db.create_index('seattle_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_slots_likely')
info("Creating nextpoints")
db.query(sea.create_nextpoints_for_signposts)
db.create_index('seattle_nextpoints', 'id')
db.create_index('seattle_nextpoints', 'slot_id')
db.create_index('seattle_nextpoints', 'direction')
db.vacuum_analyze('public', 'seattle_nextpoints')
info("Creating slots between signposts")
db.query(sea.insert_slots_temp.format(offset=LINE_OFFSET))
db.create_index('seattle_slots_temp', 'id')
db.create_index('seattle_slots_temp', 'geom', index_type='gist')
db.create_index('seattle_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'seattle_slots_temp')
if debug:
info("Creating debug slots")
db.query(sea.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('seattle_slots_debug', 'pkid')
db.create_index('seattle_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_slots_debug')
def process_boston(debug=False):
"""
process boston data and generate parking slots
"""
def info(msg):
return Logger.info("Boston: {}".format(msg))
def debug(msg):
return Logger.debug("Boston: {}".format(msg))
def warning(msg):
return Logger.warning("Boston: {}".format(msg))
debug('Loading and translating rules')
insert_rules('boston_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Matching OSM roads with geobase")
db.query(bos.create_roads_geobase)
db.query(bos.match_roads_geobase.format(tbl="boston_geobase"))
db.query(bos.match_roads_geobase.format(tbl="boston_metro_geobase"))
db.create_index('boston_roads_geobase', 'id')
db.create_index('boston_roads_geobase', 'roadsegment')
db.create_index('boston_roads_geobase', 'osm_id')
db.create_index('boston_roads_geobase', 'name')
db.create_index('boston_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_roads_geobase')
info("Creating sign table")
db.query(bos.create_sign)
info("Loading signs")
db.query(bos.insert_sign)
db.query(bos.insert_sign_cambridge)
db.create_index('boston_sign', 'geom', index_type='gist')
db.create_index('boston_sign', 'direction')
db.create_index('boston_sign', 'signpost')
db.vacuum_analyze('public', 'boston_sign')
info("Creating sign posts")
db.query(bos.create_signpost)
db.query(bos.insert_signpost)
db.create_index('boston_signpost', 'geom', index_type='gist')
db.create_index('boston_signpost', 'geobase_id')
db.query(bos.add_signposts_to_sign)
db.vacuum_analyze('public', 'boston_signpost')
info("Projecting signposts on road")
duplicates = db.query(bos.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
db.create_index('boston_signpost_onroad', 'id')
db.create_index('boston_signpost_onroad', 'road_id')
db.create_index('boston_signpost_onroad', 'isleft')
db.create_index('boston_signpost_onroad', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_signpost_onroad')
percent, total = db.query(bos.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(bos.generate_signposts_orphans)
info("Table 'boston_signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(bos.create_slots_likely)
db.query(bos.insert_slots_likely.format(isleft=1))
db.query(bos.insert_slots_likely.format(isleft=-1))
db.create_index('boston_slots_likely', 'id')
db.create_index('boston_slots_likely', 'signposts', index_type='gin')
db.create_index('boston_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_slots_likely')
db.query(bos.create_nextpoints_for_signposts)
db.create_index('boston_nextpoints', 'id')
db.create_index('boston_nextpoints', 'slot_id')
db.create_index('boston_nextpoints', 'direction')
db.vacuum_analyze('public', 'boston_nextpoints')
db.create_index('boston_slots_temp', 'id')
db.create_index('boston_slots_temp', 'geom', index_type='gist')
db.create_index('boston_slots_temp', 'rules', index_type='gin')
db.query(bos.insert_slots_temp.format(offset=LINE_OFFSET))
info("Creating and overlaying paid slots")
db.query(bos.overlay_paid_rules)
db.vacuum_analyze('public', 'boston_slots_temp')
if debug:
info("Creating debug slots")
db.query(bos.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('boston_slots_debug', 'pkid')
db.create_index('boston_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_slots_debug')
def cleanup_table():
"""
Remove temporary tables
"""
Logger.info("Cleanup schema")
# drop universal temp tables
for x in ["bad_intersection", "way_intersection", "roads", "signpost_onroad", "parking_lots_raw"]:
db.query("DROP TABLE IF EXISTS {}".format(x))
# drop per-city temp tables
for x in ["slots_likely", "slots_temp", "nextpoints", "paid_temp", "signpost_temp",
"paid_slots_raw", "bornes_raw", "bornes_clustered"]:
for y in CITIES:
db.query("DROP TABLE IF EXISTS {}_{}".format(y, x))
def process_osm():
"""
Process OSM data
"""
def info(msg):
return Logger.info("OpenStreetMap: {}".format(msg))
def debug(msg):
return Logger.debug("OpenStreetMap: {}".format(msg))
def warning(msg):
return Logger.warning("OpenStreetMap: {}".format(msg))
info("Filtering ways")
db.query(osm.create_osm_ways)
db.create_index('osm_ways', 'geom', index_type='gist')
db.create_index('osm_ways', 'osm_id')
db.create_index('osm_ways', 'name')
info("Creating way intersections from planet lines")
db.query(osm.create_way_intersection)
db.create_index('way_intersection', 'way_id')
db.create_index('way_intersection', 'geom', index_type='gist')
db.vacuum_analyze('public', 'way_intersection')
res = db.query(osm.remove_bad_intersection)
if res:
debug("Removed {} bad intersections".format(len(res)))
info("Splitting ways on intersections")
db.query(osm.split_osm_roads)
db.create_index('roads', 'id')
db.create_index('roads', 'osm_id')
db.create_index('roads', 'name')
db.create_index('roads', 'geom', index_type='gist')
db.vacuum_analyze('public', 'roads')
def run(cities=CITIES, osm=False, debug=False):
"""
Run the entire pipeline
"""
Logger.debug("Loading extensions and custom functions")
db.query("create extension if not exists fuzzystrmatch")
db.query("create extension if not exists intarray")
db.query(plfunctions.st_isleft_func)
db.query(plfunctions.array_sort)
db.query(plfunctions.get_max_range)
if osm:
process_osm()
# create common tables
db.query(common.create_rules)
db.create_index('rules', 'code')
db.query(common.create_slots)
for x in cities:
db.query(common.create_slots_temp.format(city=x))
db.query(common.create_slots_partition.format(city=x))
Logger.info("Processing parking lot / garage data")
db.query(common.create_parking_lots)
db.query(common.create_parking_lots_raw.format(city="montreal"))
insert_raw_lots("montreal", "lots_montreal.csv")
insert_parking_lots("montreal")
db.query(common.create_parking_lots_raw.format(city="quebec"))
insert_raw_lots("quebec", "lots_quebec.csv")
insert_parking_lots("quebec")
db.query(common.create_parking_lots_raw.format(city="seattle"))
insert_raw_lots("seattle", "lots_seattle.csv")
insert_parking_lots("seattle")
db.query(common.create_parking_lots_raw.format(city="boston"))
insert_raw_lots("boston", "lots_boston.csv")
insert_parking_lots("boston")
db.create_index('parking_lots', 'id')
db.create_index('parking_lots', 'city')
db.create_index('parking_lots', 'geom', index_type='gist')
db.create_index('parking_lots', 'agenda', index_type='gin')
db.query("DROP TABLE IF EXISTS parking_lots_streetview;")
insert_lots_streetview("lots_newyork_streetview.csv")
if 'montreal' in cities:
process_montreal(debug)
if 'quebec' in cities:
process_quebec(debug)
if 'newyork' in cities:
process_newyork(debug)
if 'seattle' in cities:
process_seattle(debug)
if 'boston' in cities:
process_boston(debug)
Logger.info("Shorten slots that intersect with roads or other slots")
for x in cities:
db.query(common.cut_slots_crossing_roads.format(city=x, offset=LINE_OFFSET))
db.query(common.cut_slots_crossing_slots.format(city=x))
Logger.info("Aggregating like slots")
for x in cities:
db.create_index(x+'_slots', 'id')
db.create_index(x+'_slots', 'geom', index_type='gist')
db.create_index(x+'_slots', 'rules', index_type='gin')
db.query(common.aggregate_like_slots.format(city=x, within=3 if x == "seattle" else 0.1))
db.query(common.create_client_data.format(city=x))
db.vacuum_analyze('public', x+'_slots')
Logger.info("Creating permit lists")
db.query(common.create_permit_lists)
for x in cities:
db.query(common.insert_permit_lists.format(city=x))
if not debug:
cleanup_table()
def insert_rules(from_table):
"""
Get rules from specific location (montreal, quebec),
group them, make a simpler model and load them into database
"""
Logger.debug("Get rules from {} and simplify them".format(from_table))
rules = db.query(
common.get_rules_from_source.format(source=from_table),
namedtuple=True
)
rules_grouped = group_rules(rules)
Logger.debug("Load rules into rules table")
db.copy_from('public', 'rules', common.rules_columns, [
[
json.dumps(val).replace('\\', '\\\\') if isinstance(val, dict) else val
for val in rule._asdict().values()]
for rule in rules_grouped
])
def insert_raw_lots(city, filename):
db.query("""
COPY {}_parking_lots (name, operator, address, description, lun_normal, mar_normal, mer_normal,
jeu_normal, ven_normal, sam_normal, dim_normal, hourly_normal, daily_normal, max_normal,
lun_special, mar_special, mer_special, jeu_special, ven_special, sam_special, dim_special,
hourly_special, daily_special, max_special, lun_free, mar_free, mer_free, jeu_free,
ven_free, sam_free, dim_free, daily_free, indoor, handicap, card, valet, lat, long,
capacity, street_view_lat, street_view_long, street_view_head, street_view_id, active,
partner_name, partner_id)
FROM '{}'
WITH CSV HEADER
""".format(city, os.path.join(os.path.dirname(__file__), 'data', filename)))
def insert_lots_streetview(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', 'load_lots_streetview.sql'), 'rb') as infile:
db.query(infile.read().format(os.path.join(os.path.dirname(__file__), 'data', filename)))
db.vacuum_analyze("public", "parking_lots_streetview")
def insert_parking_lots(city):
columns = ["city", "name", "operator", "address", "description", "agenda", "capacity", "attrs",
"geom", "active", "street_view", "partner_name", "partner_id", "geojson"]
days = ["lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
lots, queries = [], []
for row in db.query("""
SELECT *, ST_Transform(ST_SetSRID(ST_MakePoint(long, lat), 4326), 3857) AS geom
FROM {}_parking_lots
""".format(city), namedtuple=True):
lot = [(x.decode('utf-8').replace("'", "''") if x else '') for x in [row.name, row.operator, row.address, row.description]]
# Create pricing rules per time period the lot is open
agenda = {str(y): [] for y in range(1,8)}
for x in range(1,8):
if getattr(row, days[x - 1] + "_normal"):
y = getattr(row, days[x - 1] + "_normal")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": row.max_normal or None,
"hourly": row.hourly_normal or None, "daily": row.daily_normal or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": row.hourly_normal or None,
"max": row.max_normal or None, "daily": row.daily_normal or None})
if getattr(row, days[x - 1] + "_special"):
y = getattr(row, days[x - 1] + "_special")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": row.max_special or None,
"hourly": row.hourly_special or None, "daily": row.daily_special or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": row.hourly_special or None,
"max": row.max_special or None, "daily": row.daily_special or None})
if getattr(row, days[x - 1] + "_free"):
y = getattr(row, days[x - 1] + "_free")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": None,
"hourly": 0, "daily": row.daily_free or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": 0, "max": None,
"daily": row.daily_free or None})
# Create "closed" rules for periods not covered by an open rule
for x in agenda:
hours = sorted([y["hours"] for y in agenda[x]], key=lambda z: z[0])
for i, y in enumerate(hours):
starts = [z[0] for z in hours]
if y[0] == 0.0:
continue
last_end = hours[i-1][1] if not i == 0 else 0.0
next_start = hours[i+1][0] if not i == (len(hours) - 1) else 24.0
if not last_end in starts:
agenda[x].append({"hours": [last_end, y[0]], "hourly": None, "max": None,
"daily": None})
if not next_start in starts and y[1] != 24.0:
agenda[x].append({"hours": [y[1], next_start], "hourly": None, "max": None,
"daily": None})
if agenda[x] == []:
agenda[x].append({"hours": [0.0,24.0], "hourly": None, "max": None, "daily": None})
lot += [json.dumps(agenda), row.capacity or 0, json.dumps({"indoor": row.indoor,
"handicap": row.handicap, "card": row.card, "valet": row.valet}), row.geom, row.active,
row.street_view_head, row.street_view_id,
"'{}'".format(row.partner_name) if row.partner_name else "NULL",
"'{}'".format(row.partner_id) if row.partner_id else "NULL"]
lots.append(lot)
for x in lots:
queries.append("""
INSERT INTO parking_lots ({}) VALUES ('{city}', '{}', '{}', '{}', '{}', '{}'::jsonb, {},
'{}'::jsonb, '{}'::geometry, '{}', json_build_object('head', {}, 'id', '{}')::jsonb,
{}, {}, ST_AsGeoJSON(ST_Transform('{geom}'::geometry, 4326))::jsonb)
""".format(",".join(columns), *[y for y in x], city=city, geom=x[-6]))
db.queries(queries)
def insert_dynamic_rules_seattle():
# load dynamic paid parking rules for Seattle
paid_rules = []
data = db.query("""
SELECT ROW_NUMBER() OVER (ORDER BY wkd_start1), array_agg(elmntkey), wkd_start1,
wkd_end1, wkd_start2, wkd_end2, wkd_start3, wkd_end3, sat_start1, sat_end1,
sat_start2, sat_end2, sat_start3, sat_end3, sun_start1, sun_end1, sun_start2,
sun_end2, sun_start3, sun_end3, wkd_rate1, wkd_rate2, wkd_rate3, sat_rate1,
sat_rate2, sat_rate3, sun_rate1, sun_rate2, sun_rate3, parking_time_limit,
rpz_spaces != 0, rpz_zone, peak_hour
FROM seattle_parklines
WHERE parking_category = 'Paid Parking'
GROUP BY wkd_start1, wkd_end1, wkd_start2, wkd_end2, wkd_start3,
wkd_end3, sat_start1, sat_end1, sat_start2, sat_end2, sat_start3, sat_end3,
sun_start1, sun_end1, sun_start2, sun_end2, sun_start3, sun_end3, wkd_rate1,
wkd_rate2, wkd_rate3, sat_rate1, sat_rate2, sat_rate3, sun_rate1, sun_rate2,
sun_rate3, parking_time_limit, rpz_spaces != 0, rpz_zone, peak_hour
""")
for x in data:
wkd2 = wkd3 = sat2 = sat3 = sun2 = sun3 = False
if x[2] and x[3]:
# weekday start/end times no1
start, end = x[2], x[3]
if x[4] and x[5] and x[4] == (end + 1) and x[20] == x[21]:
end = x[5]
wkd2 = True
if x[6] and x[7] and x[6] == (end + 1) and x[21] == x[22]:
end = x[7]
wkd3 = True
paid_rules.append(_dynrule(x, "MON-FRI", start, end, 1))
if x[4] and x[5] and not wkd2:
# weekday start/end times no2
start, end = x[4], x[5]
if x[6] and x[7] and x[6] == (end + 1) and x[21] == x[22]:
end = x[7]
wkd3 = True
paid_rules.append(_dynrule(x, "MON-FRI", start, end, 2))
if x[6] and x[7] and not wkd3:
# weekday start/end times no3
paid_rules.append(_dynrule(x, "MON-FRI", x[6], x[7], 3))
if x[8] and x[9]:
# saturday start/end times no1
start, end = x[8], x[9]
if x[10] and x[11] and x[10] == (end + 1) and x[23] == x[24]:
end = x[11]
sat2 = True
if x[12] and x[13] and x[12] == (end + 1) and x[24] == x[25]:
end = x[13]
sat3 = True
paid_rules.append(_dynrule(x, "SAT", start, end, 4))
if x[10] and x[11] and not sat2:
# saturday start/end times no2
start, end = x[10], x[11]
if x[12] and x[13] and x[12] == (end + 1) and x[24] == x[25]:
end = x[13]
sat3 = True
paid_rules.append(_dynrule(x, "SAT", start, end, 5))
if x[12] and x[13] and not sat3:
# saturday start/end times no3
paid_rules.append(_dynrule(x, "SAT", start, end, 6))
if x[14] and x[15]:
# sunday start/end times no1
start, end = x[14], x[15]
if x[16] and x[17] and x[16] == (end + 1) and x[26] == x[27]:
end = x[17]
sun2 = True
if x[18] and x[19] and x[18] == (end + 1) and x[27] == x[28]:
end = x[19]
sun3 = True
paid_rules.append(_dynrule(x, "SUN", start, end, 7))
if x[16] and x[17] and not sun2:
# sunday start/end times no2
start, end = x[16], x[17]
if x[18] and x[19] and x[18] == (end + 1) and x[27] == x[28]:
end = x[19]
sun3 = True
paid_rules.append(_dynrule(x, "SUN", start, end, 8))
if x[18] and x[19] and not sun3:
# sunday start/end times no3
paid_rules.append(_dynrule(x, "SUN", start, end, 9))
if x[32]:
# peak hour restriction
insert_qry = "('{}', '{}', '{}'::jsonb, {}, ARRAY[{}]::varchar[], '{}', ARRAY{}::varchar[])"
code, agenda = "SEA-PAID-{}-10".format(x[0]), {str(y): [] for y in range(1,8)}
for z in x[32].split(" "):
for y in range(1,6):
agenda[str(y)].append([tstr_to_float(z.split("-")[0] + z[-2:]),
tstr_to_float(z.split("-")[1])])
desc = "PEAK HOUR NO PARKING WEEKDAYS {}".format(x[32])
paid_rules.append(insert_qry.format(code, desc, json.dumps(agenda), "NULL",
"'peak_hour'", "", x[1]))
db.query("""
INSERT INTO rules (code, description, agenda, time_max_parking, restrict_types, permit_no)
SELECT code, description, agenda, time_max_parking, restrict_types, permit_no
FROM (VALUES {}) AS d(code, description, agenda, time_max_parking, restrict_types, permit_no, ids)
""".format(",".join([x for x in paid_rules])))
db.query("""
INSERT INTO seattle_sign_codes (code, signs)
SELECT code, ids
FROM (VALUES {}) AS d(code, description, agenda, time_max_parking, restrict_types,
permit_no, ids)
""".format(",".join([x for x in paid_rules])))
def _dynrule(x, per, start, end, count):
insert_qry = "('{}', '{}', '{}'::jsonb, {}, ARRAY[{}]::varchar[], '{}', ARRAY{}::varchar[])"
code, agenda = "SEA-PAID-{}-{}".format(x[0], count), {str(y): [] for y in range(1,8)}
if per == "MON-FRI":
for y in range(1,6):
agenda[str(y)].append([float(start) / 60.0, round(float(end) / 60.0)])
else:
agenda["6" if per == "SAT" else "7"].append([float(start) / 60.0, round(float(end) / 60.0)])
desc = "PAID PARKING {}-{} {} ${}/hr".format(pretty_time(start), pretty_time(end), per,
"{0:.2f}".format(float(x[19 + count])))
return insert_qry.format(code, desc, json.dumps(agenda), int(x[29]) if x[29] else "NULL",
"'paid'" + (",'permit'" if x[30] else ""), x[31] if x[31] else "", x[1]) | prkng_process/pipeline.py | from __future__ import unicode_literals
import csv
import json
import os
from . import CONFIG, common, osm, plfunctions
from .cities import montreal as mrl
from .cities import quebec as qbc
from .cities import newyork as nyc
from .cities import seattle as sea
from .cities import boston as bos
from .database import PostgresWrapper
from .filters import group_rules
from .logger import Logger
from .utils import pretty_time, tstr_to_float
# distance from road to slot
LINE_OFFSET = 6
CITIES = ["montreal", "quebec", "newyork", "seattle", "boston"]
db = PostgresWrapper(
"host='{PG_HOST}' port={PG_PORT} dbname={PG_DATABASE} "
"user={PG_USERNAME} password={<PASSWORD>} ".format(**CONFIG))
def process_quebec(debug=False):
"""
Process Quebec data
"""
def info(msg):
return Logger.info("Québec: {}".format(msg))
def debug(msg):
return Logger.debug("Québec: {}".format(msg))
def warning(msg):
return Logger.warning("Québec: {}".format(msg))
info('Loading and translating rules')
insert_rules('quebec_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Creating sign table")
db.query(qbc.create_sign)
info("Loading signs")
db.query(qbc.insert_sign)
db.create_index('quebec_sign', 'direction')
db.create_index('quebec_sign', 'code')
db.create_index('quebec_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_sign')
info("Creating signposts")
db.query(qbc.create_signpost)
db.create_index('quebec_signpost', 'id')
db.create_index('quebec_signpost', 'rid')
db.create_index('quebec_signpost', 'signs', index_type='gin')
db.create_index('quebec_signpost', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_signpost')
info("Add signpost id to signs")
db.query(qbc.add_signposts_to_sign)
db.vacuum_analyze('public', 'quebec_sign')
info("Projection signposts on road")
duplicates = db.query(qbc.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(qbc.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(qbc.generate_signposts_orphans)
info("Table 'signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(qbc.create_slots_likely)
db.query(qbc.insert_slots_likely.format(isleft=1))
db.query(qbc.insert_slots_likely.format(isleft=-1))
db.create_index('quebec_slots_likely', 'id')
db.create_index('quebec_slots_likely', 'signposts', index_type='gin')
db.create_index('quebec_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_slots_likely')
db.query(qbc.create_nextpoints_for_signposts)
db.create_index('quebec_nextpoints', 'id')
db.create_index('quebec_nextpoints', 'slot_id')
db.create_index('quebec_nextpoints', 'direction')
db.vacuum_analyze('public', 'quebec_nextpoints')
db.query(qbc.insert_slots_temp.format(offset=LINE_OFFSET))
db.create_index('quebec_slots_temp', 'id')
db.create_index('quebec_slots_temp', 'geom', index_type='gist')
db.create_index('quebec_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'quebec_slots_temp')
info("Creating and overlaying paid slots")
db.query(qbc.create_bornes_raw)
db.query(qbc.create_paid_signpost)
db.query(qbc.aggregate_paid_signposts.format(offset=LINE_OFFSET))
db.query(qbc.overlay_paid_rules)
db.query(qbc.create_paid_slots_standalone)
if debug:
info("Creating debug slots")
db.query(qbc.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('quebec_slots_debug', 'pkid')
db.create_index('quebec_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'quebec_slots_debug')
def process_montreal(debug=False):
"""
process montreal data and generate parking slots
"""
def info(msg):
return Logger.info("Montréal: {}".format(msg))
def debug(msg):
return Logger.debug("Montréal: {}".format(msg))
def warning(msg):
return Logger.warning("Montréal: {}".format(msg))
debug('Loading and translating rules')
insert_rules('montreal_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Matching osm roads with geobase")
db.query(mrl.match_roads_geobase)
db.create_index('montreal_roads_geobase', 'id')
db.create_index('montreal_roads_geobase', 'id_trc')
db.create_index('montreal_roads_geobase', 'osm_id')
db.create_index('montreal_roads_geobase', 'name')
db.create_index('montreal_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_roads_geobase')
info("Creating sign table")
db.query(mrl.create_sign)
info("Loading signs")
db.query(mrl.insert_sign)
db.query(mrl.insert_signpost_verdun)
db.query(mrl.insert_sign_verdun)
db.create_index('montreal_sign', 'geom', index_type='gist')
db.create_index('montreal_sign', 'direction')
db.create_index('montreal_sign', 'elevation')
db.create_index('montreal_sign', 'signpost')
db.vacuum_analyze('public', 'montreal_sign')
info("Creating sign posts")
db.query(mrl.create_signpost)
db.query(mrl.insert_signpost)
db.create_index('montreal_signpost', 'geom', index_type='gist')
db.create_index('montreal_signpost', 'geobase_id')
db.vacuum_analyze('public', 'montreal_signpost')
info("Projecting signposts on road")
duplicates = db.query(mrl.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
db.create_index('montreal_signpost_onroad', 'id')
db.create_index('montreal_signpost_onroad', 'road_id')
db.create_index('montreal_signpost_onroad', 'isleft')
db.create_index('montreal_signpost_onroad', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_signpost_onroad')
percent, total = db.query(mrl.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(mrl.generate_signposts_orphans)
info("Table 'montreal_signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(mrl.create_slots_likely)
db.query(mrl.insert_slots_likely.format(isleft=1))
db.query(mrl.insert_slots_likely.format(isleft=-1))
db.create_index('montreal_slots_likely', 'id')
db.create_index('montreal_slots_likely', 'signposts', index_type='gin')
db.create_index('montreal_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_slots_likely')
db.query(mrl.create_nextpoints_for_signposts)
db.create_index('montreal_nextpoints', 'id')
db.create_index('montreal_nextpoints', 'slot_id')
db.create_index('montreal_nextpoints', 'direction')
db.vacuum_analyze('public', 'montreal_nextpoints')
db.create_index('montreal_slots_temp', 'id')
db.create_index('montreal_slots_temp', 'geom', index_type='gist')
db.create_index('montreal_slots_temp', 'rules', index_type='gin')
db.query(mrl.insert_slots_temp.format(offset=LINE_OFFSET))
info("Creating and overlaying paid slots")
db.query(mrl.overlay_paid_rules)
db.vacuum_analyze('public', 'montreal_slots_temp')
if debug:
info("Creating debug slots")
db.query(mrl.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('montreal_slots_debug', 'pkid')
db.create_index('montreal_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'montreal_slots_debug')
def process_newyork(debug=False):
"""
Process New York data
"""
def info(msg):
return Logger.info("New York: {}".format(msg))
def debug(msg):
return Logger.debug("New York: {}".format(msg))
def warning(msg):
return Logger.warning("New York: {}".format(msg))
info('Loading and translating rules')
insert_rules('newyork_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Loading signs")
db.query(nyc.create_sign)
db.query(nyc.insert_sign)
db.create_index('newyork_sign', 'direction')
db.create_index('newyork_sign', 'code')
db.create_index('newyork_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_sign')
info("Creating signposts")
db.query(nyc.create_signpost)
db.query(nyc.insert_signpost)
db.create_index('newyork_signpost', 'id')
db.create_index('newyork_signpost', 'geobase_id')
db.create_index('newyork_signpost', 'signs', index_type='gin')
db.create_index('newyork_signpost', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_signpost')
info("Matching osm roads with geobase")
db.query(nyc.match_roads_geobase)
db.create_index('newyork_roads_geobase', 'id')
db.create_index('newyork_roads_geobase', 'osm_id')
db.create_index('newyork_roads_geobase', 'name')
db.create_index('newyork_roads_geobase', 'boro')
db.create_index('newyork_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_roads_geobase')
info("Match signposts to geobase")
db.query(nyc.match_signposts)
db.vacuum_analyze('public', 'newyork_signpost')
info("Add signpost id to signs")
db.query(nyc.add_signposts_to_sign)
db.vacuum_analyze('public', 'newyork_sign')
info("Projecting signposts on road")
duplicates = db.query(nyc.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(nyc.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(nyc.generate_signposts_orphans)
info("Table 'newyork_signpost_orphans' has been generated to check for orphans")
info("Creating likely slots")
db.query(nyc.create_slots_likely)
db.query(nyc.insert_slots_likely.format(isleft=1))
db.query(nyc.insert_slots_likely.format(isleft=-1))
# Get rid of problem segments FIXME
db.query("""
with tmp as (
select *
from (
select g.id, count(distinct s.order_no)
from newyork_roads_geobase g
join newyork_signpost s on s.geobase_id = g.id
group by g.id
) foo where count > 2
)
delete from newyork_slots_likely s using tmp t where t.id = s.rid;
""")
db.create_index('newyork_slots_likely', 'id')
db.create_index('newyork_slots_likely', 'signposts', index_type='gin')
db.create_index('newyork_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_slots_likely')
info("Creating nextpoints")
db.query(nyc.create_nextpoints_for_signposts)
db.create_index('newyork_nextpoints', 'id')
db.create_index('newyork_nextpoints', 'slot_id')
db.create_index('newyork_nextpoints', 'direction')
db.vacuum_analyze('public', 'newyork_nextpoints')
for x in ['K', 'M', 'Q', 'B', 'S']:
info("Creating slots between signposts (borough {})".format(x))
db.query(nyc.insert_slots_temp.format(boro=x, offset=LINE_OFFSET))
db.create_index('newyork_slots_temp', 'id')
db.create_index('newyork_slots_temp', 'geom', index_type='gist')
db.create_index('newyork_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'newyork_slots_temp')
if debug:
info("Creating debug slots")
for x in ['K', 'M', 'Q', 'B', 'S']:
db.query(nyc.create_slots_for_debug.format(boro=x, offset=LINE_OFFSET))
db.create_index('newyork_slots_debug', 'pkid')
db.create_index('newyork_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'newyork_slots_debug')
def process_seattle(debug=False):
"""
Process Seattle data
"""
def info(msg):
return Logger.info("Seattle: {}".format(msg))
def debug(msg):
return Logger.debug("Seattle: {}".format(msg))
def warning(msg):
return Logger.warning("Seattle: {}".format(msg))
info('Loading and translating rules')
insert_rules('seattle_rules_translation')
insert_dynamic_rules_seattle()
db.vacuum_analyze('public', 'rules')
info("Matching OSM roads with geobase")
db.query(sea.match_roads_geobase)
db.create_index('seattle_roads_geobase', 'id')
db.create_index('seattle_roads_geobase', 'osm_id')
db.create_index('seattle_roads_geobase', 'name')
db.create_index('seattle_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_roads_geobase')
info("Loading signs")
db.query(sea.create_sign)
db.query(sea.insert_sign)
db.query(sea.insert_sign_paid)
db.query(sea.insert_sign_directional)
db.query(sea.insert_sign_parklines)
db.create_index('seattle_sign', 'direction')
db.create_index('seattle_sign', 'code')
db.create_index('seattle_sign', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_sign')
info("Creating signposts")
db.query(sea.create_signpost)
db.query(sea.insert_signpost)
db.create_index('seattle_signpost', 'id')
db.create_index('seattle_signpost', 'geobase_id')
db.create_index('seattle_signpost', 'signs', index_type='gin')
db.create_index('seattle_signpost', 'geom', index_type='gist')
db.query(sea.add_signposts_to_sign)
db.vacuum_analyze('public', 'seattle_signpost')
info("Projecting signposts on road")
duplicates = db.query(sea.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
percent, total = db.query(sea.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(sea.generate_signposts_orphans)
info("Table 'seattle_signpost_orphans' has been generated to check for orphans")
db.query(sea.assign_directions)
db.vacuum_analyze('public', 'seattle_sign')
info("Creating likely slots")
db.query(sea.create_slots_likely)
db.query(sea.insert_slots_likely.format(isleft=1))
db.query(sea.insert_slots_likely.format(isleft=-1))
db.create_index('seattle_slots_likely', 'id')
db.create_index('seattle_slots_likely', 'signposts', index_type='gin')
db.create_index('seattle_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_slots_likely')
info("Creating nextpoints")
db.query(sea.create_nextpoints_for_signposts)
db.create_index('seattle_nextpoints', 'id')
db.create_index('seattle_nextpoints', 'slot_id')
db.create_index('seattle_nextpoints', 'direction')
db.vacuum_analyze('public', 'seattle_nextpoints')
info("Creating slots between signposts")
db.query(sea.insert_slots_temp.format(offset=LINE_OFFSET))
db.create_index('seattle_slots_temp', 'id')
db.create_index('seattle_slots_temp', 'geom', index_type='gist')
db.create_index('seattle_slots_temp', 'rules', index_type='gin')
db.vacuum_analyze('public', 'seattle_slots_temp')
if debug:
info("Creating debug slots")
db.query(sea.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('seattle_slots_debug', 'pkid')
db.create_index('seattle_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'seattle_slots_debug')
def process_boston(debug=False):
"""
process boston data and generate parking slots
"""
def info(msg):
return Logger.info("Boston: {}".format(msg))
def debug(msg):
return Logger.debug("Boston: {}".format(msg))
def warning(msg):
return Logger.warning("Boston: {}".format(msg))
debug('Loading and translating rules')
insert_rules('boston_rules_translation')
db.vacuum_analyze('public', 'rules')
info("Matching OSM roads with geobase")
db.query(bos.create_roads_geobase)
db.query(bos.match_roads_geobase.format(tbl="boston_geobase"))
db.query(bos.match_roads_geobase.format(tbl="boston_metro_geobase"))
db.create_index('boston_roads_geobase', 'id')
db.create_index('boston_roads_geobase', 'roadsegment')
db.create_index('boston_roads_geobase', 'osm_id')
db.create_index('boston_roads_geobase', 'name')
db.create_index('boston_roads_geobase', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_roads_geobase')
info("Creating sign table")
db.query(bos.create_sign)
info("Loading signs")
db.query(bos.insert_sign)
db.query(bos.insert_sign_cambridge)
db.create_index('boston_sign', 'geom', index_type='gist')
db.create_index('boston_sign', 'direction')
db.create_index('boston_sign', 'signpost')
db.vacuum_analyze('public', 'boston_sign')
info("Creating sign posts")
db.query(bos.create_signpost)
db.query(bos.insert_signpost)
db.create_index('boston_signpost', 'geom', index_type='gist')
db.create_index('boston_signpost', 'geobase_id')
db.query(bos.add_signposts_to_sign)
db.vacuum_analyze('public', 'boston_signpost')
info("Projecting signposts on road")
duplicates = db.query(bos.project_signposts)
if duplicates:
warning("Duplicates found for projected signposts : {}"
.format(str(duplicates)))
db.create_index('boston_signpost_onroad', 'id')
db.create_index('boston_signpost_onroad', 'road_id')
db.create_index('boston_signpost_onroad', 'isleft')
db.create_index('boston_signpost_onroad', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_signpost_onroad')
percent, total = db.query(bos.count_signpost_projected)[0]
if percent < 100:
warning("Only {:.0f}% of signposts have been bound to a road. Total is {}"
.format(percent, total))
db.query(bos.generate_signposts_orphans)
info("Table 'boston_signpost_orphans' has been generated to check for orphans")
info("Creating slots between signposts")
db.query(bos.create_slots_likely)
db.query(bos.insert_slots_likely.format(isleft=1))
db.query(bos.insert_slots_likely.format(isleft=-1))
db.create_index('boston_slots_likely', 'id')
db.create_index('boston_slots_likely', 'signposts', index_type='gin')
db.create_index('boston_slots_likely', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_slots_likely')
db.query(bos.create_nextpoints_for_signposts)
db.create_index('boston_nextpoints', 'id')
db.create_index('boston_nextpoints', 'slot_id')
db.create_index('boston_nextpoints', 'direction')
db.vacuum_analyze('public', 'boston_nextpoints')
db.create_index('boston_slots_temp', 'id')
db.create_index('boston_slots_temp', 'geom', index_type='gist')
db.create_index('boston_slots_temp', 'rules', index_type='gin')
db.query(bos.insert_slots_temp.format(offset=LINE_OFFSET))
info("Creating and overlaying paid slots")
db.query(bos.overlay_paid_rules)
db.vacuum_analyze('public', 'boston_slots_temp')
if debug:
info("Creating debug slots")
db.query(bos.create_slots_for_debug.format(offset=LINE_OFFSET))
db.create_index('boston_slots_debug', 'pkid')
db.create_index('boston_slots_debug', 'geom', index_type='gist')
db.vacuum_analyze('public', 'boston_slots_debug')
def cleanup_table():
"""
Remove temporary tables
"""
Logger.info("Cleanup schema")
# drop universal temp tables
for x in ["bad_intersection", "way_intersection", "roads", "signpost_onroad", "parking_lots_raw"]:
db.query("DROP TABLE IF EXISTS {}".format(x))
# drop per-city temp tables
for x in ["slots_likely", "slots_temp", "nextpoints", "paid_temp", "signpost_temp",
"paid_slots_raw", "bornes_raw", "bornes_clustered"]:
for y in CITIES:
db.query("DROP TABLE IF EXISTS {}_{}".format(y, x))
def process_osm():
"""
Process OSM data
"""
def info(msg):
return Logger.info("OpenStreetMap: {}".format(msg))
def debug(msg):
return Logger.debug("OpenStreetMap: {}".format(msg))
def warning(msg):
return Logger.warning("OpenStreetMap: {}".format(msg))
info("Filtering ways")
db.query(osm.create_osm_ways)
db.create_index('osm_ways', 'geom', index_type='gist')
db.create_index('osm_ways', 'osm_id')
db.create_index('osm_ways', 'name')
info("Creating way intersections from planet lines")
db.query(osm.create_way_intersection)
db.create_index('way_intersection', 'way_id')
db.create_index('way_intersection', 'geom', index_type='gist')
db.vacuum_analyze('public', 'way_intersection')
res = db.query(osm.remove_bad_intersection)
if res:
debug("Removed {} bad intersections".format(len(res)))
info("Splitting ways on intersections")
db.query(osm.split_osm_roads)
db.create_index('roads', 'id')
db.create_index('roads', 'osm_id')
db.create_index('roads', 'name')
db.create_index('roads', 'geom', index_type='gist')
db.vacuum_analyze('public', 'roads')
def run(cities=CITIES, osm=False, debug=False):
"""
Run the entire pipeline
"""
Logger.debug("Loading extensions and custom functions")
db.query("create extension if not exists fuzzystrmatch")
db.query("create extension if not exists intarray")
db.query(plfunctions.st_isleft_func)
db.query(plfunctions.array_sort)
db.query(plfunctions.get_max_range)
if osm:
process_osm()
# create common tables
db.query(common.create_rules)
db.create_index('rules', 'code')
db.query(common.create_slots)
for x in cities:
db.query(common.create_slots_temp.format(city=x))
db.query(common.create_slots_partition.format(city=x))
Logger.info("Processing parking lot / garage data")
db.query(common.create_parking_lots)
db.query(common.create_parking_lots_raw.format(city="montreal"))
insert_raw_lots("montreal", "lots_montreal.csv")
insert_parking_lots("montreal")
db.query(common.create_parking_lots_raw.format(city="quebec"))
insert_raw_lots("quebec", "lots_quebec.csv")
insert_parking_lots("quebec")
db.query(common.create_parking_lots_raw.format(city="seattle"))
insert_raw_lots("seattle", "lots_seattle.csv")
insert_parking_lots("seattle")
db.query(common.create_parking_lots_raw.format(city="boston"))
insert_raw_lots("boston", "lots_boston.csv")
insert_parking_lots("boston")
db.create_index('parking_lots', 'id')
db.create_index('parking_lots', 'city')
db.create_index('parking_lots', 'geom', index_type='gist')
db.create_index('parking_lots', 'agenda', index_type='gin')
db.query("DROP TABLE IF EXISTS parking_lots_streetview;")
insert_lots_streetview("lots_newyork_streetview.csv")
if 'montreal' in cities:
process_montreal(debug)
if 'quebec' in cities:
process_quebec(debug)
if 'newyork' in cities:
process_newyork(debug)
if 'seattle' in cities:
process_seattle(debug)
if 'boston' in cities:
process_boston(debug)
Logger.info("Shorten slots that intersect with roads or other slots")
for x in cities:
db.query(common.cut_slots_crossing_roads.format(city=x, offset=LINE_OFFSET))
db.query(common.cut_slots_crossing_slots.format(city=x))
Logger.info("Aggregating like slots")
for x in cities:
db.create_index(x+'_slots', 'id')
db.create_index(x+'_slots', 'geom', index_type='gist')
db.create_index(x+'_slots', 'rules', index_type='gin')
db.query(common.aggregate_like_slots.format(city=x, within=3 if x == "seattle" else 0.1))
db.query(common.create_client_data.format(city=x))
db.vacuum_analyze('public', x+'_slots')
Logger.info("Creating permit lists")
db.query(common.create_permit_lists)
for x in cities:
db.query(common.insert_permit_lists.format(city=x))
if not debug:
cleanup_table()
def insert_rules(from_table):
"""
Get rules from specific location (montreal, quebec),
group them, make a simpler model and load them into database
"""
Logger.debug("Get rules from {} and simplify them".format(from_table))
rules = db.query(
common.get_rules_from_source.format(source=from_table),
namedtuple=True
)
rules_grouped = group_rules(rules)
Logger.debug("Load rules into rules table")
db.copy_from('public', 'rules', common.rules_columns, [
[
json.dumps(val).replace('\\', '\\\\') if isinstance(val, dict) else val
for val in rule._asdict().values()]
for rule in rules_grouped
])
def insert_raw_lots(city, filename):
db.query("""
COPY {}_parking_lots (name, operator, address, description, lun_normal, mar_normal, mer_normal,
jeu_normal, ven_normal, sam_normal, dim_normal, hourly_normal, daily_normal, max_normal,
lun_special, mar_special, mer_special, jeu_special, ven_special, sam_special, dim_special,
hourly_special, daily_special, max_special, lun_free, mar_free, mer_free, jeu_free,
ven_free, sam_free, dim_free, daily_free, indoor, handicap, card, valet, lat, long,
capacity, street_view_lat, street_view_long, street_view_head, street_view_id, active,
partner_name, partner_id)
FROM '{}'
WITH CSV HEADER
""".format(city, os.path.join(os.path.dirname(__file__), 'data', filename)))
def insert_lots_streetview(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', 'load_lots_streetview.sql'), 'rb') as infile:
db.query(infile.read().format(os.path.join(os.path.dirname(__file__), 'data', filename)))
db.vacuum_analyze("public", "parking_lots_streetview")
def insert_parking_lots(city):
columns = ["city", "name", "operator", "address", "description", "agenda", "capacity", "attrs",
"geom", "active", "street_view", "partner_name", "partner_id", "geojson"]
days = ["lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
lots, queries = [], []
for row in db.query("""
SELECT *, ST_Transform(ST_SetSRID(ST_MakePoint(long, lat), 4326), 3857) AS geom
FROM {}_parking_lots
""".format(city), namedtuple=True):
lot = [(x.decode('utf-8').replace("'", "''") if x else '') for x in [row.name, row.operator, row.address, row.description]]
# Create pricing rules per time period the lot is open
agenda = {str(y): [] for y in range(1,8)}
for x in range(1,8):
if getattr(row, days[x - 1] + "_normal"):
y = getattr(row, days[x - 1] + "_normal")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": row.max_normal or None,
"hourly": row.hourly_normal or None, "daily": row.daily_normal or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": row.hourly_normal or None,
"max": row.max_normal or None, "daily": row.daily_normal or None})
if getattr(row, days[x - 1] + "_special"):
y = getattr(row, days[x - 1] + "_special")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": row.max_special or None,
"hourly": row.hourly_special or None, "daily": row.daily_special or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": row.hourly_special or None,
"max": row.max_special or None, "daily": row.daily_special or None})
if getattr(row, days[x - 1] + "_free"):
y = getattr(row, days[x - 1] + "_free")
hours = [float(z) for z in y.split(",")]
if hours != [0.0, 24.0] and hours[0] > hours[1]:
nextday = str(x+1) if (x < 7) else "1"
agenda[nextday].append({"hours": [0.0, hours[1]], "max": None,
"hourly": 0, "daily": row.daily_free or None})
hours = [hours[0], 24.0]
agenda[str(x)].append({"hours": hours, "hourly": 0, "max": None,
"daily": row.daily_free or None})
# Create "closed" rules for periods not covered by an open rule
for x in agenda:
hours = sorted([y["hours"] for y in agenda[x]], key=lambda z: z[0])
for i, y in enumerate(hours):
starts = [z[0] for z in hours]
if y[0] == 0.0:
continue
last_end = hours[i-1][1] if not i == 0 else 0.0
next_start = hours[i+1][0] if not i == (len(hours) - 1) else 24.0
if not last_end in starts:
agenda[x].append({"hours": [last_end, y[0]], "hourly": None, "max": None,
"daily": None})
if not next_start in starts and y[1] != 24.0:
agenda[x].append({"hours": [y[1], next_start], "hourly": None, "max": None,
"daily": None})
if agenda[x] == []:
agenda[x].append({"hours": [0.0,24.0], "hourly": None, "max": None, "daily": None})
lot += [json.dumps(agenda), row.capacity or 0, json.dumps({"indoor": row.indoor,
"handicap": row.handicap, "card": row.card, "valet": row.valet}), row.geom, row.active,
row.street_view_head, row.street_view_id,
"'{}'".format(row.partner_name) if row.partner_name else "NULL",
"'{}'".format(row.partner_id) if row.partner_id else "NULL"]
lots.append(lot)
for x in lots:
queries.append("""
INSERT INTO parking_lots ({}) VALUES ('{city}', '{}', '{}', '{}', '{}', '{}'::jsonb, {},
'{}'::jsonb, '{}'::geometry, '{}', json_build_object('head', {}, 'id', '{}')::jsonb,
{}, {}, ST_AsGeoJSON(ST_Transform('{geom}'::geometry, 4326))::jsonb)
""".format(",".join(columns), *[y for y in x], city=city, geom=x[-6]))
db.queries(queries)
def insert_dynamic_rules_seattle():
# load dynamic paid parking rules for Seattle
paid_rules = []
data = db.query("""
SELECT ROW_NUMBER() OVER (ORDER BY wkd_start1), array_agg(elmntkey), wkd_start1,
wkd_end1, wkd_start2, wkd_end2, wkd_start3, wkd_end3, sat_start1, sat_end1,
sat_start2, sat_end2, sat_start3, sat_end3, sun_start1, sun_end1, sun_start2,
sun_end2, sun_start3, sun_end3, wkd_rate1, wkd_rate2, wkd_rate3, sat_rate1,
sat_rate2, sat_rate3, sun_rate1, sun_rate2, sun_rate3, parking_time_limit,
rpz_spaces != 0, rpz_zone, peak_hour
FROM seattle_parklines
WHERE parking_category = 'Paid Parking'
GROUP BY wkd_start1, wkd_end1, wkd_start2, wkd_end2, wkd_start3,
wkd_end3, sat_start1, sat_end1, sat_start2, sat_end2, sat_start3, sat_end3,
sun_start1, sun_end1, sun_start2, sun_end2, sun_start3, sun_end3, wkd_rate1,
wkd_rate2, wkd_rate3, sat_rate1, sat_rate2, sat_rate3, sun_rate1, sun_rate2,
sun_rate3, parking_time_limit, rpz_spaces != 0, rpz_zone, peak_hour
""")
for x in data:
wkd2 = wkd3 = sat2 = sat3 = sun2 = sun3 = False
if x[2] and x[3]:
# weekday start/end times no1
start, end = x[2], x[3]
if x[4] and x[5] and x[4] == (end + 1) and x[20] == x[21]:
end = x[5]
wkd2 = True
if x[6] and x[7] and x[6] == (end + 1) and x[21] == x[22]:
end = x[7]
wkd3 = True
paid_rules.append(_dynrule(x, "MON-FRI", start, end, 1))
if x[4] and x[5] and not wkd2:
# weekday start/end times no2
start, end = x[4], x[5]
if x[6] and x[7] and x[6] == (end + 1) and x[21] == x[22]:
end = x[7]
wkd3 = True
paid_rules.append(_dynrule(x, "MON-FRI", start, end, 2))
if x[6] and x[7] and not wkd3:
# weekday start/end times no3
paid_rules.append(_dynrule(x, "MON-FRI", x[6], x[7], 3))
if x[8] and x[9]:
# saturday start/end times no1
start, end = x[8], x[9]
if x[10] and x[11] and x[10] == (end + 1) and x[23] == x[24]:
end = x[11]
sat2 = True
if x[12] and x[13] and x[12] == (end + 1) and x[24] == x[25]:
end = x[13]
sat3 = True
paid_rules.append(_dynrule(x, "SAT", start, end, 4))
if x[10] and x[11] and not sat2:
# saturday start/end times no2
start, end = x[10], x[11]
if x[12] and x[13] and x[12] == (end + 1) and x[24] == x[25]:
end = x[13]
sat3 = True
paid_rules.append(_dynrule(x, "SAT", start, end, 5))
if x[12] and x[13] and not sat3:
# saturday start/end times no3
paid_rules.append(_dynrule(x, "SAT", start, end, 6))
if x[14] and x[15]:
# sunday start/end times no1
start, end = x[14], x[15]
if x[16] and x[17] and x[16] == (end + 1) and x[26] == x[27]:
end = x[17]
sun2 = True
if x[18] and x[19] and x[18] == (end + 1) and x[27] == x[28]:
end = x[19]
sun3 = True
paid_rules.append(_dynrule(x, "SUN", start, end, 7))
if x[16] and x[17] and not sun2:
# sunday start/end times no2
start, end = x[16], x[17]
if x[18] and x[19] and x[18] == (end + 1) and x[27] == x[28]:
end = x[19]
sun3 = True
paid_rules.append(_dynrule(x, "SUN", start, end, 8))
if x[18] and x[19] and not sun3:
# sunday start/end times no3
paid_rules.append(_dynrule(x, "SUN", start, end, 9))
if x[32]:
# peak hour restriction
insert_qry = "('{}', '{}', '{}'::jsonb, {}, ARRAY[{}]::varchar[], '{}', ARRAY{}::varchar[])"
code, agenda = "SEA-PAID-{}-10".format(x[0]), {str(y): [] for y in range(1,8)}
for z in x[32].split(" "):
for y in range(1,6):
agenda[str(y)].append([tstr_to_float(z.split("-")[0] + z[-2:]),
tstr_to_float(z.split("-")[1])])
desc = "PEAK HOUR NO PARKING WEEKDAYS {}".format(x[32])
paid_rules.append(insert_qry.format(code, desc, json.dumps(agenda), "NULL",
"'peak_hour'", "", x[1]))
db.query("""
INSERT INTO rules (code, description, agenda, time_max_parking, restrict_types, permit_no)
SELECT code, description, agenda, time_max_parking, restrict_types, permit_no
FROM (VALUES {}) AS d(code, description, agenda, time_max_parking, restrict_types, permit_no, ids)
""".format(",".join([x for x in paid_rules])))
db.query("""
INSERT INTO seattle_sign_codes (code, signs)
SELECT code, ids
FROM (VALUES {}) AS d(code, description, agenda, time_max_parking, restrict_types,
permit_no, ids)
""".format(",".join([x for x in paid_rules])))
def _dynrule(x, per, start, end, count):
insert_qry = "('{}', '{}', '{}'::jsonb, {}, ARRAY[{}]::varchar[], '{}', ARRAY{}::varchar[])"
code, agenda = "SEA-PAID-{}-{}".format(x[0], count), {str(y): [] for y in range(1,8)}
if per == "MON-FRI":
for y in range(1,6):
agenda[str(y)].append([float(start) / 60.0, round(float(end) / 60.0)])
else:
agenda["6" if per == "SAT" else "7"].append([float(start) / 60.0, round(float(end) / 60.0)])
desc = "PAID PARKING {}-{} {} ${}/hr".format(pretty_time(start), pretty_time(end), per,
"{0:.2f}".format(float(x[19 + count])))
return insert_qry.format(code, desc, json.dumps(agenda), int(x[29]) if x[29] else "NULL",
"'paid'" + (",'permit'" if x[30] else ""), x[31] if x[31] else "", x[1]) | 0.493409 | 0.113481 |
from flask import Flask, request, jsonify, render_template, abort, Response
from . import shallow_backend
from . import suggest
app = Flask(__name__)
app.debug = True
# todo: 1. suggestions do not work
# todo: 2. adding corrections is still not implemeneted
# todo: 3. joining tokens across the lines has to be implemented
# todo: 4. commitable decorator
@app.route('/scriptorium')
def scriptorium():
'''scriptorium page: returns titles of
works being transcribed'''
titles = shallow_backend.get_just_titles()
return render_template("scriptorium.html", titles=titles)
# FIXME - for nice frienldy urls: /tiro/<author>/<title>/page
@app.route('/tiro/<title>/<int:pagenumber>')
def tiro(title, pagenumber):
"""shows single page"""
page = shallow_backend.get_page(title, pagenumber)
if not page:
abort(404)
return render_template("tiro.html", page=page)
# AJAX SECTION
@app.route("/suggest")
def suggest():
""" Ajax - receive incorrect form, suggest correction"""
incorrect = request.args.get('word')
suggestions = suggest.smart_suggest(incorrect)
return jsonify(suggestions)
@app.route("/update", methods=['POST'])
def update():
'''
Corrected version of the token provided manually by user
Rename the function.
'''
correct_form = request.form.get('correct_form')
word_id = request.form.get('word_id')
return shallow_backend.save_corrected(word_id, correct_form)
@app.route("/divide", methods=['POST'])
def divide():
"""
Fixme: use PUT method
:return:
"""
word_id = request.form.get('word_id'),
word = request.form.get('word')
return shallow_backend.divide_word(word_id, word)
@app.route("/join", methods=['POST'])
def join():
"""
Fixme: use PUT method
:return:
"""
word_id = request.form.get('word_id')
word = request.form.get('word')
return shallow_backend.join_word_with_next(word_id, word)
@app.route("/setcorrect", methods=['POST'])
def set_correct():
'''
User clicks: 'this word is correct' - we set corr to 1
'''
word_id = request.form.get('word_id')
return shallow_backend.set_correct(word_id)
@app.route("/setincorrect", methods=['POST'])
def setincorrect():
'''
User clicks: 'this word is not correct' - we set corr to 0
'''
word_id = request.form.get('word_id')
return shallow_backend.set_incorrect(word_id)
@app.route("/setpagination", methods=['POST'])
def setpagination():
'''
'''
word_id = request.form.get('word_id')
return shallow_backend.set_pagination(word_id)
@app.route("/remove", methods=['POST'])
def remove():
'''
Remove token
'''
word_id = request.form.get('word_id')
return shallow_backend.remove(word_id)
# ADMIN SECTION
@app.route("/corrections", methods=['GET'])
def get_corrections():
'''
Show all corrections - only to logged in user
'''
time_from = request.args.get('time_from')
time_to = request.args.get('time_to')
source = request.args.get('source')
corrections = shallow_backend.get_corrections(time_from=time_from,
time_to=time_to,
source=source)
return render_template("corrections.html", corrections=corrections)
@app.route("/rollback", methods=['POST'])
def rollback():
'''
Rollback correction
'''
word_id = request.form.get('word_id')
return shallow_backend.roll_back(word_id)
@app.errorhandler(404)
def page_not_found(error):
''' 404 '''
return render_template("404.html"), 404 | app.py | from flask import Flask, request, jsonify, render_template, abort, Response
from . import shallow_backend
from . import suggest
app = Flask(__name__)
app.debug = True
# todo: 1. suggestions do not work
# todo: 2. adding corrections is still not implemeneted
# todo: 3. joining tokens across the lines has to be implemented
# todo: 4. commitable decorator
@app.route('/scriptorium')
def scriptorium():
'''scriptorium page: returns titles of
works being transcribed'''
titles = shallow_backend.get_just_titles()
return render_template("scriptorium.html", titles=titles)
# FIXME - for nice frienldy urls: /tiro/<author>/<title>/page
@app.route('/tiro/<title>/<int:pagenumber>')
def tiro(title, pagenumber):
"""shows single page"""
page = shallow_backend.get_page(title, pagenumber)
if not page:
abort(404)
return render_template("tiro.html", page=page)
# AJAX SECTION
@app.route("/suggest")
def suggest():
""" Ajax - receive incorrect form, suggest correction"""
incorrect = request.args.get('word')
suggestions = suggest.smart_suggest(incorrect)
return jsonify(suggestions)
@app.route("/update", methods=['POST'])
def update():
'''
Corrected version of the token provided manually by user
Rename the function.
'''
correct_form = request.form.get('correct_form')
word_id = request.form.get('word_id')
return shallow_backend.save_corrected(word_id, correct_form)
@app.route("/divide", methods=['POST'])
def divide():
"""
Fixme: use PUT method
:return:
"""
word_id = request.form.get('word_id'),
word = request.form.get('word')
return shallow_backend.divide_word(word_id, word)
@app.route("/join", methods=['POST'])
def join():
"""
Fixme: use PUT method
:return:
"""
word_id = request.form.get('word_id')
word = request.form.get('word')
return shallow_backend.join_word_with_next(word_id, word)
@app.route("/setcorrect", methods=['POST'])
def set_correct():
'''
User clicks: 'this word is correct' - we set corr to 1
'''
word_id = request.form.get('word_id')
return shallow_backend.set_correct(word_id)
@app.route("/setincorrect", methods=['POST'])
def setincorrect():
'''
User clicks: 'this word is not correct' - we set corr to 0
'''
word_id = request.form.get('word_id')
return shallow_backend.set_incorrect(word_id)
@app.route("/setpagination", methods=['POST'])
def setpagination():
'''
'''
word_id = request.form.get('word_id')
return shallow_backend.set_pagination(word_id)
@app.route("/remove", methods=['POST'])
def remove():
'''
Remove token
'''
word_id = request.form.get('word_id')
return shallow_backend.remove(word_id)
# ADMIN SECTION
@app.route("/corrections", methods=['GET'])
def get_corrections():
'''
Show all corrections - only to logged in user
'''
time_from = request.args.get('time_from')
time_to = request.args.get('time_to')
source = request.args.get('source')
corrections = shallow_backend.get_corrections(time_from=time_from,
time_to=time_to,
source=source)
return render_template("corrections.html", corrections=corrections)
@app.route("/rollback", methods=['POST'])
def rollback():
'''
Rollback correction
'''
word_id = request.form.get('word_id')
return shallow_backend.roll_back(word_id)
@app.errorhandler(404)
def page_not_found(error):
''' 404 '''
return render_template("404.html"), 404 | 0.297164 | 0.084568 |
import csv
import datetime
import os
import roslib
import rospy
import rostopic
_node_name = 'logger_node'
"""Name of this node in the ROS system."""
class DataIn(object):
"""Receiver and buffer of a topic."""
def __init__(self, topic, name=None):
"""Constructor.
topic -- topic including field to receive.
name -- abbreviation for the topic used as column header in the CSV.
"""
self.__topic = topic
"""Topic including field (full path, i.e., all namespaces included)."""
if name is None:
self.__name = topic
else:
self.__name = name
"""Abbreviation for the topic used as column header."""
self.__value = None # no message received yet
"""Value of the input data."""
# get the topic type and function to evaluate the field
# wait until topic gets available
rospy.loginfo("DataIn '%s': await availability...", topic)
topic_type, real_topic, field_eval = rostopic.get_topic_type(topic,
blocking=True)
if topic_type is None:
raise Exception("Can not resolve topic type of {}.".format(topic))
if field_eval is None:
raise Exception("Can not resolve field of {}.".format(topic))
# subscribe and save function for message evaluation
data_class = roslib.message.get_message_class(topic_type)
self.__subscriber = rospy.Subscriber(real_topic, data_class,
self.__msg_callback)
"""Subscriber to a topic - to receive messages."""
self.__field_eval = field_eval
"""Function returning the value of the specificed field in a
message."""
rospy.loginfo("DataIn '%s': created.", topic)
def __exit__(self):
self.__subscriber.unregister()
def __msg_callback(self, msg):
"""Called when a message of the topic is received."""
self.__value = self.__field_eval(msg)
rospy.logdebug("DataIn '%s' received: %f", self.__topic, self.__value)
def __get_name(self):
"""Returns the name of the input."""
return self.__name
name = property(__get_name)
def __get_value(self):
"""Returns the latest value received."""
return self.__value
value = property(__get_value)
class Logger(object):
"""Logs inputs to a CSV."""
def __init__(self, path, data, log_time=True):
"""Constructor.
path -- path to CSV.
data -- list of DataIn objects.
log_time -- enables logging of timestamps.
"""
self.__data = data # DataIn objects
self.__path = path # path to csv to write logs to
self.__writer = None # csv writer
self.__enabled = False # enable write to csv
self.__log_time = log_time
# open csv file
csvfile = open(path, 'w')
self.__writer = csv.writer(csvfile)
# write header
fieldnames = [d.name for d in self.__data]
if self.__log_time:
fieldnames.insert(0, "time")
rospy.logdebug("CSV fields: %s.", fieldnames)
fieldnames[0] = '%' + fieldnames[0] # header is a comment
self.__writer.writerow(fieldnames)
rospy.loginfo("Logger will write to %s.",
os.path.abspath(csvfile.name))
def __exit__(self):
print "close csv file"
csvfile.close()
def log(self, event):
values = [d.value for d in self.__data]
if self.__enabled:
if self.__log_time:
curtime = rospy.Time.now()
values.insert(0, "{:d}.{:09d}".format(curtime.secs,
curtime.nsecs))
self.__writer.writerow(values)
rospy.logdebug("New row: %s.", values)
else:
# enable logger as soon as all inputs received at least once
received = [1 for v in values if v is not None]
if sum(received) == len(values):
rospy.loginfo("Start logging (next cycle).")
self.__enabled = True
# main entry point of this node
if __name__ == '__main__':
try:
# setup ROS node
rospy.init_node(_node_name)
# params
topics = rospy.get_param('~topics', None)
rate = rospy.get_param('~rate', 10)
# path ... create default string from-ROS-time
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
path = rospy.get_param('~path', now + ".csv")
# create a port for each topic
inputs = []
try:
for name, topic in topics.items():
inputs.append(DataIn(topic, name=name))
except Exception as e:
raise RuntimeError("""Failed to subscribe to topics
{}. {}""".format(topics, e))
# initialize logger
if len(inputs) == 0:
raise RuntimeError("""No topics/inputs.""")
logger = Logger(path, inputs)
# create and start timer for logging
pub_timer = rospy.Timer(rospy.Duration(1.0/rate), logger.log)
"""Timer for logging the received data periodically."""
rospy.loginfo("Logger node initialized.")
# loop over receive-log-sleep
rospy.spin()
# cleanup
pub_timer.shutdown()
rospy.sleep(0.5)
except Exception as e:
rospy.logerr('Logger node failed. %s', e)
raise
except rospy.ROSInterruptException:
pass | src/logger_node.py | import csv
import datetime
import os
import roslib
import rospy
import rostopic
_node_name = 'logger_node'
"""Name of this node in the ROS system."""
class DataIn(object):
"""Receiver and buffer of a topic."""
def __init__(self, topic, name=None):
"""Constructor.
topic -- topic including field to receive.
name -- abbreviation for the topic used as column header in the CSV.
"""
self.__topic = topic
"""Topic including field (full path, i.e., all namespaces included)."""
if name is None:
self.__name = topic
else:
self.__name = name
"""Abbreviation for the topic used as column header."""
self.__value = None # no message received yet
"""Value of the input data."""
# get the topic type and function to evaluate the field
# wait until topic gets available
rospy.loginfo("DataIn '%s': await availability...", topic)
topic_type, real_topic, field_eval = rostopic.get_topic_type(topic,
blocking=True)
if topic_type is None:
raise Exception("Can not resolve topic type of {}.".format(topic))
if field_eval is None:
raise Exception("Can not resolve field of {}.".format(topic))
# subscribe and save function for message evaluation
data_class = roslib.message.get_message_class(topic_type)
self.__subscriber = rospy.Subscriber(real_topic, data_class,
self.__msg_callback)
"""Subscriber to a topic - to receive messages."""
self.__field_eval = field_eval
"""Function returning the value of the specificed field in a
message."""
rospy.loginfo("DataIn '%s': created.", topic)
def __exit__(self):
self.__subscriber.unregister()
def __msg_callback(self, msg):
"""Called when a message of the topic is received."""
self.__value = self.__field_eval(msg)
rospy.logdebug("DataIn '%s' received: %f", self.__topic, self.__value)
def __get_name(self):
"""Returns the name of the input."""
return self.__name
name = property(__get_name)
def __get_value(self):
"""Returns the latest value received."""
return self.__value
value = property(__get_value)
class Logger(object):
"""Logs inputs to a CSV."""
def __init__(self, path, data, log_time=True):
"""Constructor.
path -- path to CSV.
data -- list of DataIn objects.
log_time -- enables logging of timestamps.
"""
self.__data = data # DataIn objects
self.__path = path # path to csv to write logs to
self.__writer = None # csv writer
self.__enabled = False # enable write to csv
self.__log_time = log_time
# open csv file
csvfile = open(path, 'w')
self.__writer = csv.writer(csvfile)
# write header
fieldnames = [d.name for d in self.__data]
if self.__log_time:
fieldnames.insert(0, "time")
rospy.logdebug("CSV fields: %s.", fieldnames)
fieldnames[0] = '%' + fieldnames[0] # header is a comment
self.__writer.writerow(fieldnames)
rospy.loginfo("Logger will write to %s.",
os.path.abspath(csvfile.name))
def __exit__(self):
print "close csv file"
csvfile.close()
def log(self, event):
values = [d.value for d in self.__data]
if self.__enabled:
if self.__log_time:
curtime = rospy.Time.now()
values.insert(0, "{:d}.{:09d}".format(curtime.secs,
curtime.nsecs))
self.__writer.writerow(values)
rospy.logdebug("New row: %s.", values)
else:
# enable logger as soon as all inputs received at least once
received = [1 for v in values if v is not None]
if sum(received) == len(values):
rospy.loginfo("Start logging (next cycle).")
self.__enabled = True
# main entry point of this node
if __name__ == '__main__':
try:
# setup ROS node
rospy.init_node(_node_name)
# params
topics = rospy.get_param('~topics', None)
rate = rospy.get_param('~rate', 10)
# path ... create default string from-ROS-time
now = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
path = rospy.get_param('~path', now + ".csv")
# create a port for each topic
inputs = []
try:
for name, topic in topics.items():
inputs.append(DataIn(topic, name=name))
except Exception as e:
raise RuntimeError("""Failed to subscribe to topics
{}. {}""".format(topics, e))
# initialize logger
if len(inputs) == 0:
raise RuntimeError("""No topics/inputs.""")
logger = Logger(path, inputs)
# create and start timer for logging
pub_timer = rospy.Timer(rospy.Duration(1.0/rate), logger.log)
"""Timer for logging the received data periodically."""
rospy.loginfo("Logger node initialized.")
# loop over receive-log-sleep
rospy.spin()
# cleanup
pub_timer.shutdown()
rospy.sleep(0.5)
except Exception as e:
rospy.logerr('Logger node failed. %s', e)
raise
except rospy.ROSInterruptException:
pass | 0.596551 | 0.20201 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from myutils.nn import LSTM, Linear
class TextRCNN(nn.Module):
"""Text RCNN model."""
def __init__(self, config, pretrained_emb):
super(TextRCNN, self).__init__()
self.config = config["arch"]["args"]
# word embedding layer
self.word_emb = nn.Embedding.from_pretrained(pretrained_emb, freeze=True)
# rnn layer
self.bi_rnn = LSTM(
input_size=self.config["word_dim"],
hidden_size=self.config["hidden_size"],
batch_first=True,
num_layers=1,
bidirectional=True,
dropout=self.config["dropout"]
)
# conv layer
self.conv_layer = nn.Sequential(
nn.Conv1d(
in_channels=self.config["hidden_size"] * 2 + self.config["word_dim"],
out_channels=self.config["hidden_size"],
kernel_size=self.config["filter_size"]
),
# nn.BatchNorm1d(self.config["hidden_size"]),
nn.ReLU(inplace=True)
)
# full-connected layer
self.fc = Linear(self.config["hidden_size"] * self.config["kmax_pooling"], self.config["n_classes"])
def kmax_pooling(self, x, dim=2, k=2):
"""k-max pooling"""
index = x.topk(k, dim=dim)[1].sort(dim=dim)[0]
return x.gather(dim, index)
def forward(self, data):
text, length = data # (b, seq_len), (b)
x0 = self.word_emb(text) # (b, seq_len, d)
x, h = self.bi_rnn((x0, length)) # (b, seq_len, 2*d), (b, 2*d)
x = torch.cat((x0, x), dim=-1).permute(0, 2, 1) # (b, seq_len, 3*d) --> (b, 3*d, seq_len)
# x = torch.tanh(self.conv(x)) # (b, d, seq_len - filter_size + 1)
x = self.conv_layer(x) # (b, d, seq_len - filter_size + 1)
x = self.kmax_pooling(x, dim=2, k=self.config["kmax_pooling"])
x = x.reshape(x.size()[0], -1) # (b, k*d)
x = self.fc(x)
_, pred = torch.max(x, dim=-1)
return x, pred | src/model/text_rcnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from myutils.nn import LSTM, Linear
class TextRCNN(nn.Module):
"""Text RCNN model."""
def __init__(self, config, pretrained_emb):
super(TextRCNN, self).__init__()
self.config = config["arch"]["args"]
# word embedding layer
self.word_emb = nn.Embedding.from_pretrained(pretrained_emb, freeze=True)
# rnn layer
self.bi_rnn = LSTM(
input_size=self.config["word_dim"],
hidden_size=self.config["hidden_size"],
batch_first=True,
num_layers=1,
bidirectional=True,
dropout=self.config["dropout"]
)
# conv layer
self.conv_layer = nn.Sequential(
nn.Conv1d(
in_channels=self.config["hidden_size"] * 2 + self.config["word_dim"],
out_channels=self.config["hidden_size"],
kernel_size=self.config["filter_size"]
),
# nn.BatchNorm1d(self.config["hidden_size"]),
nn.ReLU(inplace=True)
)
# full-connected layer
self.fc = Linear(self.config["hidden_size"] * self.config["kmax_pooling"], self.config["n_classes"])
def kmax_pooling(self, x, dim=2, k=2):
"""k-max pooling"""
index = x.topk(k, dim=dim)[1].sort(dim=dim)[0]
return x.gather(dim, index)
def forward(self, data):
text, length = data # (b, seq_len), (b)
x0 = self.word_emb(text) # (b, seq_len, d)
x, h = self.bi_rnn((x0, length)) # (b, seq_len, 2*d), (b, 2*d)
x = torch.cat((x0, x), dim=-1).permute(0, 2, 1) # (b, seq_len, 3*d) --> (b, 3*d, seq_len)
# x = torch.tanh(self.conv(x)) # (b, d, seq_len - filter_size + 1)
x = self.conv_layer(x) # (b, d, seq_len - filter_size + 1)
x = self.kmax_pooling(x, dim=2, k=self.config["kmax_pooling"])
x = x.reshape(x.size()[0], -1) # (b, k*d)
x = self.fc(x)
_, pred = torch.max(x, dim=-1)
return x, pred | 0.944158 | 0.307397 |
from textx import metamodel_from_str, get_children_of_type
import numpy as np
import random
grammar = """
Model: commands*=GameCommand;
GameCommand: MoveCommand | ActionCommand;
MoveCommand: Left|Right|Up|Down;
ActionCommand: Reset | Exit;
Left: 'left' count=INT?;
Right: 'right' count=INT?;
Up: 'up' count=INT?;
Down: 'down' count=INT?;
Reset: 'reset';
Exit: 'exit';
""" # Only final data structures, a purely functional data structure is immutable
# COMO TAL EN PYTHON NO SE PUEDEN CREAR CONSTANTES DE FORMA NATIVA(SIN LIBRERIA) PERO ESTA VARIABLE ES UNA CONSTANTE EN FORMA LOGICA, ES INMUTABLE
def cname(o):
return o.__class__.__name__
class Coordinate(object):
"""Prueba de Federico"""
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "{},{}".format(self.x, self.y)
def command_validator(model, finish, player, stop, game, size):
"""This gets the foobar
This really should have a full function definition, but I am too lazy.
>>> print get_foobar(10, 20)
30
>>> print get_foobar('a', 'b')
ab
Isn't that what you want?
"""
for command in model.commands:
_cname = cname(command)
delta = 1 if command.count == 0 else command.count # ESTO ES OTRO CONCEPTO YA QUE DELEGA EL FLUJO O CONDICIONES A UNA FUNCION... ESTA LINEA ES UNA FUNCION
if _cname == 'Left' and player.x - delta >= 0:
player.x = player.x - delta
elif _cname == 'Right' and player.x + delta <= size - 1:
player.x = player.x + delta
elif _cname == 'Up' and player.y - delta >= 0:
player.y = player.y - delta
elif _cname == 'Down' and player.y + delta <= size - 1:
player.y = player.y + delta
else:
if command == 'reset':
game, finish, player = create_game(board_size)
elif command == 'exit':
print("Exiting...")
stop = True
game = np.zeros((board_size, board_size))
game[player.y][player.x] = '1'
game[finish.y][finish.x] = '2'
if player.x == finish.x and player.y == finish.y:
stop = True
print("Victory!")
return finish, player, stop, game
def create_game(board_size):
"""This gets the foobar
This really should have a full function definition, but I am too lazy.
>>> print get_foobar(10, 20)
30
>>> print get_foobar('a', 'b')
ab
Isn't that what you want?
"""
game = np.zeros((board_size, board_size))
finish = Coordinate(random.randint(0, board_size - 1), random.randint(0, board_size - 1))
player = Coordinate(random.randint(0, board_size - 1), random.randint(0, board_size - 1))
game[player.y][player.x] = '1'
game[finish.y][finish.x] = '2'
return game, finish, player
mm = metamodel_from_str(grammar)
board_size = 10
# Side effects free functions: this is a pure function, the variables games, finis, player are mutable but not
# outside their context???
game, finish, player = create_game(board_size)
stop = False
while not stop:
print(game)
print("\nMove single cell: '<left, right, up, down>'")
print("Move multiple cells: '<left, right, up, down> XCELLS'")
print("Reset game: 'reset'")
print("Exit game: 'exit'")
command = input('Enter command: ')
# Functions as parameters and return values (F(G(x))) ???
finish, player, stop, game = command_validator(mm.model_from_str(command), finish, player, stop, game, board_size) | src/main/python/dsl.py |
from textx import metamodel_from_str, get_children_of_type
import numpy as np
import random
grammar = """
Model: commands*=GameCommand;
GameCommand: MoveCommand | ActionCommand;
MoveCommand: Left|Right|Up|Down;
ActionCommand: Reset | Exit;
Left: 'left' count=INT?;
Right: 'right' count=INT?;
Up: 'up' count=INT?;
Down: 'down' count=INT?;
Reset: 'reset';
Exit: 'exit';
""" # Only final data structures, a purely functional data structure is immutable
# COMO TAL EN PYTHON NO SE PUEDEN CREAR CONSTANTES DE FORMA NATIVA(SIN LIBRERIA) PERO ESTA VARIABLE ES UNA CONSTANTE EN FORMA LOGICA, ES INMUTABLE
def cname(o):
return o.__class__.__name__
class Coordinate(object):
"""Prueba de Federico"""
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "{},{}".format(self.x, self.y)
def command_validator(model, finish, player, stop, game, size):
"""This gets the foobar
This really should have a full function definition, but I am too lazy.
>>> print get_foobar(10, 20)
30
>>> print get_foobar('a', 'b')
ab
Isn't that what you want?
"""
for command in model.commands:
_cname = cname(command)
delta = 1 if command.count == 0 else command.count # ESTO ES OTRO CONCEPTO YA QUE DELEGA EL FLUJO O CONDICIONES A UNA FUNCION... ESTA LINEA ES UNA FUNCION
if _cname == 'Left' and player.x - delta >= 0:
player.x = player.x - delta
elif _cname == 'Right' and player.x + delta <= size - 1:
player.x = player.x + delta
elif _cname == 'Up' and player.y - delta >= 0:
player.y = player.y - delta
elif _cname == 'Down' and player.y + delta <= size - 1:
player.y = player.y + delta
else:
if command == 'reset':
game, finish, player = create_game(board_size)
elif command == 'exit':
print("Exiting...")
stop = True
game = np.zeros((board_size, board_size))
game[player.y][player.x] = '1'
game[finish.y][finish.x] = '2'
if player.x == finish.x and player.y == finish.y:
stop = True
print("Victory!")
return finish, player, stop, game
def create_game(board_size):
"""This gets the foobar
This really should have a full function definition, but I am too lazy.
>>> print get_foobar(10, 20)
30
>>> print get_foobar('a', 'b')
ab
Isn't that what you want?
"""
game = np.zeros((board_size, board_size))
finish = Coordinate(random.randint(0, board_size - 1), random.randint(0, board_size - 1))
player = Coordinate(random.randint(0, board_size - 1), random.randint(0, board_size - 1))
game[player.y][player.x] = '1'
game[finish.y][finish.x] = '2'
return game, finish, player
mm = metamodel_from_str(grammar)
board_size = 10
# Side effects free functions: this is a pure function, the variables games, finis, player are mutable but not
# outside their context???
game, finish, player = create_game(board_size)
stop = False
while not stop:
print(game)
print("\nMove single cell: '<left, right, up, down>'")
print("Move multiple cells: '<left, right, up, down> XCELLS'")
print("Reset game: 'reset'")
print("Exit game: 'exit'")
command = input('Enter command: ')
# Functions as parameters and return values (F(G(x))) ???
finish, player, stop, game = command_validator(mm.model_from_str(command), finish, player, stop, game, board_size) | 0.52683 | 0.227148 |
from __future__ import annotations
from typing import Dict, Set, List, Tuple, Optional, Any
import enum
import gc
import math
import sys
import pytest
import msgspec
class FruitInt(enum.IntEnum):
APPLE = 1
BANANA = 2
class FruitStr(enum.Enum):
APPLE = "apple"
BANANA = "banana"
class Person(msgspec.Struct):
first: str
last: str
age: int
prefect: bool = False
class Node(msgspec.Struct):
left: Optional[Node] = None
right: Optional[Node] = None
INTS = [
-(2 ** 63),
-(2 ** 31 + 1),
-(2 ** 31),
-(2 ** 15 + 1),
-(2 ** 15),
-(2 ** 7 + 1),
-(2 ** 7),
-(2 ** 5 + 1),
-(2 ** 5),
-1,
0,
1,
2 ** 7 - 1,
2 ** 7,
2 ** 8 - 1,
2 ** 8,
2 ** 16 - 1,
2 ** 16,
2 ** 32 - 1,
2 ** 32,
2 ** 64 - 1,
]
FLOATS = [
-1.5,
0.0,
1.5,
-float("inf"),
float("inf"),
float("nan"),
sys.float_info.max,
sys.float_info.min,
-sys.float_info.max,
-sys.float_info.min,
]
SIZES = [0, 1, 31, 32, 2 ** 8 - 1, 2 ** 8, 2 ** 16 - 1, 2 ** 16]
def assert_eq(x, y):
if isinstance(x, float) and math.isnan(x):
assert math.isnan(y)
else:
assert x == y
class TestEncodeFunction:
def test_encode(self):
dec = msgspec.Decoder()
assert dec.decode(msgspec.encode(1)) == 1
def test_encode_error(self):
with pytest.raises(TypeError):
msgspec.encode(object())
def test_encode_large_object(self):
"""Check that buffer resize works"""
data = b"x" * 4097
dec = msgspec.Decoder()
assert dec.decode(msgspec.encode(data)) == data
def test_encode_no_default(self):
class Foo:
pass
with pytest.raises(
TypeError, match="Encoding objects of type Foo is unsupported"
):
msgspec.encode(Foo())
def test_encode_default(self):
unsupported = object()
def default(x):
assert x is unsupported
return "hello"
orig_refcount = sys.getrefcount(default)
res = msgspec.encode(unsupported, default=default)
assert msgspec.encode("hello") == res
assert sys.getrefcount(default) == orig_refcount
def test_encode_default_errors(self):
def default(x):
raise TypeError("bad")
orig_refcount = sys.getrefcount(default)
with pytest.raises(TypeError, match="bad"):
msgspec.encode(object(), default=default)
assert sys.getrefcount(default) == orig_refcount
def test_encode_parse_arguments_errors(self):
with pytest.raises(TypeError, match="Missing 1 required argument"):
msgspec.encode()
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.encode(1, lambda x: None)
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.encode(1, 2, 3)
with pytest.raises(TypeError, match="Invalid keyword argument 'bad'"):
msgspec.encode(1, bad=1)
with pytest.raises(TypeError, match="Extra keyword arguments"):
msgspec.encode(1, default=lambda x: None, extra="extra")
class TestDecodeFunction:
def setup(self):
self.buf = msgspec.encode([1, 2, 3])
def test_decode(self):
assert msgspec.decode(self.buf) == [1, 2, 3]
def test_decode_type_keyword(self):
assert msgspec.decode(self.buf, type=List[int]) == [1, 2, 3]
with pytest.raises(msgspec.DecodingError):
assert msgspec.decode(self.buf, type=List[str])
def test_decode_type_any(self):
assert msgspec.decode(self.buf, type=Any) == [1, 2, 3]
def test_decode_invalid_type(self):
with pytest.raises(TypeError, match="Type '1' is not supported"):
msgspec.decode(self.buf, type=1)
def test_decode_invalid_buf(self):
with pytest.raises(TypeError):
msgspec.decode(1)
def test_decode_parse_arguments_errors(self):
with pytest.raises(TypeError, match="Missing 1 required argument"):
msgspec.decode()
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.decode(self.buf, List[int])
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.decode(self.buf, 2, 3)
with pytest.raises(TypeError, match="Invalid keyword argument 'bad'"):
msgspec.decode(self.buf, bad=1)
with pytest.raises(TypeError, match="Extra keyword arguments"):
msgspec.decode(self.buf, type=List[int], extra=1)
class TestEncoderMisc:
@pytest.mark.parametrize("x", [-(2 ** 63) - 1, 2 ** 64])
def test_encode_integer_limits(self, x):
enc = msgspec.Encoder()
with pytest.raises(OverflowError):
enc.encode(x)
def rec_obj1(self):
o = []
o.append(o)
return o
def rec_obj2(self):
o = ([],)
o[0].append(o)
return o
def rec_obj3(self):
o = {}
o["a"] = o
return o
def rec_obj4(self):
class Box(msgspec.Struct):
a: "Box"
o = Box(None)
o.a = o
return o
@pytest.mark.parametrize("case", [1, 2, 3, 4])
def test_encode_infinite_recursive_object_errors(self, case):
enc = msgspec.Encoder()
o = getattr(self, "rec_obj%d" % case)()
with pytest.raises(RecursionError):
enc.encode(o)
def test_getsizeof(self):
a = sys.getsizeof(msgspec.Encoder(write_buffer_size=64))
b = sys.getsizeof(msgspec.Encoder(write_buffer_size=128))
assert b > a
def test_encode_no_default(self):
class Foo:
pass
enc = msgspec.Encoder()
assert enc.default is None
with pytest.raises(
TypeError, match="Encoding objects of type Foo is unsupported"
):
enc.encode(Foo())
def test_encode_default(self):
unsupported = object()
def default(x):
assert x is unsupported
return "hello"
orig_refcount = sys.getrefcount(default)
enc = msgspec.Encoder(default=default)
assert enc.default is default
assert sys.getrefcount(enc.default) == orig_refcount + 2
assert sys.getrefcount(default) == orig_refcount + 1
res = enc.encode(unsupported)
assert enc.encode("hello") == res
del enc
assert sys.getrefcount(default) == orig_refcount
def test_encode_default_errors(self):
def default(x):
raise TypeError("bad")
enc = msgspec.Encoder(default=default)
with pytest.raises(TypeError, match="bad"):
enc.encode(object())
def test_encode_default_recurses(self):
class Node:
def __init__(self, a):
self.a = a
def default(x):
return {"type": "Node", "a": x.a}
enc = msgspec.Encoder(default=default)
msg = enc.encode(Node(Node(1)))
res = msgspec.decode(msg)
assert res == {"type": "Node", "a": {"type": "Node", "a": 1}}
def test_encode_default_recursion_error(self):
enc = msgspec.Encoder(default=lambda x: x)
with pytest.raises(RecursionError):
enc.encode(object())
class TestDecoderMisc:
def test_decoder_type_attribute(self):
dec = msgspec.Decoder()
assert dec.type is Any
dec = msgspec.Decoder(int)
assert dec.type is int
@pytest.mark.parametrize("typ, typstr", [(None, "None"), (Any, "Any")])
def test_decoder_none_any_repr(self, typ, typstr):
dec = msgspec.Decoder(typ)
assert repr(dec) == f"Decoder({typstr})"
# Optionality of None/Any doesn't change things
dec = msgspec.Decoder(Optional[typ])
assert repr(dec) == f"Decoder({typstr})"
@pytest.mark.parametrize(
"typ, typstr",
[
(bool, "bool"),
(int, "int"),
(float, "float"),
(str, "str"),
(bytes, "bytes"),
(bytearray, "bytearray"),
(Dict, "Dict[Any, Any]"),
(Dict[int, str], "Dict[int, str]"),
(List, "List[Any]"),
(List[Optional[int]], "List[Optional[int]]"),
(Set, "Set[Any]"),
(Set[Optional[int]], "Set[Optional[int]]"),
(Tuple, "Tuple[Any, ...]"),
(Tuple[Optional[int], ...], "Tuple[Optional[int], ...]"),
(Tuple[int, str], "Tuple[int, str]"),
(Person, "Person"),
(FruitInt, "FruitInt"),
(FruitStr, "FruitStr"),
(List[Optional[Dict[str, Person]]], "List[Optional[Dict[str, Person]]]"),
],
)
def test_decoder_repr(self, typ, typstr):
dec = msgspec.Decoder(typ)
assert repr(dec) == f"Decoder({typstr})"
dec = msgspec.Decoder(Optional[typ])
assert repr(dec) == f"Decoder(Optional[{typstr}])"
def test_decoder_unsupported_type(self):
with pytest.raises(TypeError):
msgspec.Decoder(1)
with pytest.raises(TypeError):
msgspec.Decoder(slice)
def test_decoder_validates_struct_definition_unsupported_types(self):
"""Struct definitions aren't validated until first use"""
class Test(msgspec.Struct):
a: slice
with pytest.raises(TypeError):
msgspec.Decoder(Test)
class TestTypedDecoder:
def check_unexpected_type(self, dec_type, val, msg):
dec = msgspec.Decoder(dec_type)
s = msgspec.Encoder().encode(val)
with pytest.raises(msgspec.DecodingError, match=msg):
dec.decode(s)
def test_none(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(None)
assert dec.decode(enc.encode(None)) is None
with pytest.raises(msgspec.DecodingError, match="expected `None`"):
assert dec.decode(enc.encode(1))
@pytest.mark.parametrize("x", [False, True])
def test_bool(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bool)
assert dec.decode(enc.encode(x)) is x
def test_bool_unexpected_type(self):
self.check_unexpected_type(bool, "a", "expected `bool`")
@pytest.mark.parametrize("x", INTS)
def test_int(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(int)
assert dec.decode(enc.encode(x)) == x
def test_int_unexpected_type(self):
self.check_unexpected_type(int, "a", "expected `int`")
@pytest.mark.parametrize("x", FLOATS + INTS)
def test_float(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(float)
res = dec.decode(enc.encode(x))
sol = float(x)
if math.isnan(sol):
assert math.isnan(res)
else:
assert res == sol
def test_float_unexpected_type(self):
self.check_unexpected_type(float, "a", "expected `float`")
@pytest.mark.parametrize("size", SIZES)
def test_str(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(str)
x = "a" * size
res = dec.decode(enc.encode(x))
assert res == x
def test_str_unexpected_type(self):
self.check_unexpected_type(str, 1, "expected `str`")
@pytest.mark.parametrize("size", SIZES)
def test_bytes(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bytes)
x = b"a" * size
res = dec.decode(enc.encode(x))
assert isinstance(res, bytes)
assert res == x
def test_bytes_unexpected_type(self):
self.check_unexpected_type(bytes, 1, "expected `bytes`")
@pytest.mark.parametrize("size", SIZES)
def test_bytearray(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bytearray)
x = bytearray(size)
res = dec.decode(enc.encode(x))
assert isinstance(res, bytearray)
assert res == x
def test_bytearray_unexpected_type(self):
self.check_unexpected_type(bytearray, 1, "expected `bytearray`")
@pytest.mark.parametrize("size", SIZES)
def test_list_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(list)
x = list(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [list, List, List[Any]])
def test_list_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = [1, "two", b"three"]
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `list`"):
dec.decode(enc.encode(1))
def test_list_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(List[int])
x = [1, 2, 3]
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode([1, 2, "three"]))
@pytest.mark.parametrize("size", SIZES)
def test_set_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(set)
x = set(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [set, Set, Set[Any]])
def test_set_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = {1, "two", b"three"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `set`"):
dec.decode(enc.encode(1))
def test_set_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Set[int])
x = {1, 2, 3}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode({1, 2, "three"}))
@pytest.mark.parametrize("size", SIZES)
def test_vartuple_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(tuple)
x = tuple(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [tuple, Tuple, Tuple[Any, ...]])
def test_vartuple_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `tuple`"):
dec.decode(enc.encode(1))
def test_vartuple_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[int, ...])
x = (1, 2, 3)
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode((1, 2, "three")))
def test_fixtuple_any(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[Any, Any, Any])
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `tuple`"):
dec.decode(enc.encode(1))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Tuple\[Any, Any, Any\]`: expected tuple of length 3, got 2",
):
dec.decode(enc.encode((1, 2)))
def test_fixtuple_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[int, str, bytes])
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `bytes`"):
dec.decode(enc.encode((1, "two", "three")))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Tuple\[int, str, bytes\]`: expected tuple of length 3, got 2",
):
dec.decode(enc.encode((1, 2)))
@pytest.mark.parametrize("size", SIZES)
def test_dict_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(dict)
x = {i: i for i in range(size)}
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [dict, Dict, Dict[Any, Any]])
def test_dict_any_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = {1: "one", "two": 2, b"three": 3.0}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `dict`"):
dec.decode(enc.encode(1))
def test_dict_any_val(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[str, Any])
x = {"a": 1, "b": "two", "c": b"three"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
def test_dict_any_key(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[Any, str])
x = {1: "a", "two": "b", b"three": "c"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
def test_dict_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[str, int])
x = {"a": 1, "b": 2}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode({"a": "two"}))
def test_enum(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(FruitStr)
a = enc.encode(FruitStr.APPLE)
assert enc.encode("APPLE") == a
assert dec.decode(a) == FruitStr.APPLE
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(
msgspec.DecodingError, match="Error decoding enum `FruitStr`"
):
dec.decode(enc.encode("MISSING"))
with pytest.raises(msgspec.DecodingError):
dec.decode(enc.encode(1))
def test_int_enum(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(FruitInt)
a = enc.encode(FruitInt.APPLE)
assert enc.encode(1) == a
assert dec.decode(a) == FruitInt.APPLE
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(
msgspec.DecodingError, match="Error decoding enum `FruitInt`"
):
dec.decode(enc.encode(1000))
with pytest.raises(msgspec.DecodingError):
dec.decode(enc.encode("INVALID"))
def test_struct(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
x = Person(first="harry", last="potter", age=13)
a = enc.encode(x)
assert (
enc.encode(
{"first": "harry", "last": "potter", "age": 13, "prefect": False}
)
== a
)
assert dec.decode(a) == x
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(msgspec.DecodingError, match="expected `struct`"):
dec.decode(enc.encode(1))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Person` field `first` \(`str`\): expected `str`, got `int`",
):
dec.decode(enc.encode({1: "harry"}))
def test_struct_field_wrong_type(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
bad = enc.encode({"first": "harry", "last": "potter", "age": "thirteen"})
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(bad)
def test_struct_missing_fields(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
bad = enc.encode({"first": "harry", "last": "potter"})
with pytest.raises(msgspec.DecodingError, match="missing required field `age`"):
dec.decode(bad)
bad = enc.encode({})
with pytest.raises(
msgspec.DecodingError, match="missing required field `first`"
):
dec.decode(bad)
@pytest.mark.parametrize(
"extra", [None, False, True, 1, 2.0, "three", b"four", [1, 2], {3: 4}]
)
def test_struct_ignore_extra_fields(self, extra):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
a = enc.encode(
{
"extra1": extra,
"first": "harry",
"extra2": extra,
"last": "potter",
"age": 13,
"extra3": extra,
}
)
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
def test_struct_defaults_missing_fields(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
a = enc.encode({"first": "harry", "last": "potter", "age": 13})
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
assert res.prefect is False
def test_struct_gc_maybe_untracked_on_decode(self):
class Test(msgspec.Struct):
x: Any
y: Any
z: Tuple = ()
enc = msgspec.Encoder()
dec = msgspec.Decoder(List[Test])
ts = [
Test(1, 2),
Test(3, "hello"),
Test([], []),
Test({}, {}),
Test(None, None, ()),
]
a, b, c, d, e = dec.decode(enc.encode(ts))
assert not gc.is_tracked(a)
assert not gc.is_tracked(b)
assert gc.is_tracked(c)
assert gc.is_tracked(d)
assert not gc.is_tracked(e)
def test_struct_recursive_definition(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Node)
x = Node(Node(Node(), Node(Node())))
s = enc.encode(x)
res = dec.decode(s)
assert res == x
@pytest.mark.parametrize(
"typ, value",
[
(bool, False),
(bool, True),
(int, 1),
(float, 2.5),
(str, "a"),
(bytes, b"a"),
(bytearray, bytearray(b"a")),
(FruitInt, FruitInt.APPLE),
(FruitStr, FruitStr.APPLE),
(Person, Person("harry", "potter", 13)),
(list, [1]),
(set, {1}),
(tuple, (1, 2)),
(Tuple[int, int], (1, 2)),
(dict, {1: 2}),
],
)
def test_optional(self, typ, value):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Optional[typ])
s = enc.encode(value)
s2 = enc.encode(None)
assert dec.decode(s) == value
assert dec.decode(s2) is None
dec = msgspec.Decoder(typ)
with pytest.raises(msgspec.DecodingError):
dec.decode(s2)
@pytest.mark.parametrize(
"typ, value",
[
(List[Optional[int]], [1, None]),
(Tuple[Optional[int], int], (None, 1)),
(Set[Optional[int]], {1, None}),
(Dict[str, Optional[int]], {"a": 1, "b": None}),
(Dict[Optional[str], int], {"a": 1, None: 2}),
],
)
def test_optional_nested(self, typ, value):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
s = enc.encode(value)
assert dec.decode(s) == value
def test_decoding_error_no_struct_toplevel(self):
b = msgspec.Encoder().encode([{"a": 1}])
dec = msgspec.Decoder(List[Dict[str, str]])
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `List\[Dict\[str, str\]\]`: expected `str`, got `int`",
):
dec.decode(b)
class CommonTypeTestBase:
"""Test msgspec untyped encode/decode"""
def test_none(self):
self.check(None)
@pytest.mark.parametrize("x", [False, True])
def test_bool(self, x):
self.check(x)
@pytest.mark.parametrize("x", INTS)
def test_int(self, x):
self.check(x)
@pytest.mark.parametrize("x", FLOATS)
def test_float(self, x):
self.check(x)
@pytest.mark.parametrize("size", SIZES)
def test_str(self, size):
self.check(" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_bytes(self, size):
self.check(b" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_dict(self, size):
self.check({str(i): i for i in range(size)})
@pytest.mark.parametrize("size", SIZES)
def test_list(self, size):
self.check(list(range(size)))
class TestDecodeArrayTypeUsesTupleIfHashableRequired:
def test_decode_tuple_dict_keys_as_tuples(self):
orig = {(1, 2): [1, 2, [3, 4]], (1, (2, 3)): [4, 5, 6]}
data = msgspec.encode(orig)
out = msgspec.decode(data)
assert orig == out
@pytest.mark.parametrize(
"typ",
[
Dict[Tuple[int, Tuple[int, int]], List[int]],
Dict[Tuple[int, Tuple[int, ...]], Any],
Dict[Tuple, List[int]],
Dict[Tuple[Any, ...], Any],
Dict[Tuple[Any, Any], Any],
],
)
def test_decode_dict_key_status_forwarded_through_typed_tuples(self, typ):
orig = {(1, (2, 3)): [1, 2, 3]}
data = msgspec.encode(orig)
out = msgspec.Decoder(typ).decode(data)
assert orig == out
def test_decode_tuple_set_keys_as_tuples(self):
orig = {(1, 2), (3, (4, 5)), 6}
data = msgspec.encode(orig)
out = msgspec.decode(data, type=set)
assert orig == out
def test_decode_hashable_struct_in_key(self):
class Test(msgspec.Struct):
data: List[int]
def __hash__(self):
return hash(tuple(self.data))
orig = {(1, Test([1, 2])): [1, 2]}
data = msgspec.encode(orig)
out = msgspec.Decoder(Dict[Tuple[int, Test], List[int]]).decode(data)
assert orig == out
class TestUntypedDecoder(CommonTypeTestBase):
"""Check the untyped deserializer works for common types"""
def check(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder()
assert_eq(dec.decode(enc.encode(x)), x)
class TestCompatibility(CommonTypeTestBase):
"""Test compatibility with the existing python msgpack library"""
def check(self, x):
msgpack = pytest.importorskip("msgpack")
enc = msgspec.Encoder()
dec = msgspec.Decoder()
assert_eq(dec.decode(msgpack.dumps(x)), x)
assert_eq(msgpack.loads(enc.encode(x)), x) | tests/test_msgspec.py | from __future__ import annotations
from typing import Dict, Set, List, Tuple, Optional, Any
import enum
import gc
import math
import sys
import pytest
import msgspec
class FruitInt(enum.IntEnum):
APPLE = 1
BANANA = 2
class FruitStr(enum.Enum):
APPLE = "apple"
BANANA = "banana"
class Person(msgspec.Struct):
first: str
last: str
age: int
prefect: bool = False
class Node(msgspec.Struct):
left: Optional[Node] = None
right: Optional[Node] = None
INTS = [
-(2 ** 63),
-(2 ** 31 + 1),
-(2 ** 31),
-(2 ** 15 + 1),
-(2 ** 15),
-(2 ** 7 + 1),
-(2 ** 7),
-(2 ** 5 + 1),
-(2 ** 5),
-1,
0,
1,
2 ** 7 - 1,
2 ** 7,
2 ** 8 - 1,
2 ** 8,
2 ** 16 - 1,
2 ** 16,
2 ** 32 - 1,
2 ** 32,
2 ** 64 - 1,
]
FLOATS = [
-1.5,
0.0,
1.5,
-float("inf"),
float("inf"),
float("nan"),
sys.float_info.max,
sys.float_info.min,
-sys.float_info.max,
-sys.float_info.min,
]
SIZES = [0, 1, 31, 32, 2 ** 8 - 1, 2 ** 8, 2 ** 16 - 1, 2 ** 16]
def assert_eq(x, y):
if isinstance(x, float) and math.isnan(x):
assert math.isnan(y)
else:
assert x == y
class TestEncodeFunction:
def test_encode(self):
dec = msgspec.Decoder()
assert dec.decode(msgspec.encode(1)) == 1
def test_encode_error(self):
with pytest.raises(TypeError):
msgspec.encode(object())
def test_encode_large_object(self):
"""Check that buffer resize works"""
data = b"x" * 4097
dec = msgspec.Decoder()
assert dec.decode(msgspec.encode(data)) == data
def test_encode_no_default(self):
class Foo:
pass
with pytest.raises(
TypeError, match="Encoding objects of type Foo is unsupported"
):
msgspec.encode(Foo())
def test_encode_default(self):
unsupported = object()
def default(x):
assert x is unsupported
return "hello"
orig_refcount = sys.getrefcount(default)
res = msgspec.encode(unsupported, default=default)
assert msgspec.encode("hello") == res
assert sys.getrefcount(default) == orig_refcount
def test_encode_default_errors(self):
def default(x):
raise TypeError("bad")
orig_refcount = sys.getrefcount(default)
with pytest.raises(TypeError, match="bad"):
msgspec.encode(object(), default=default)
assert sys.getrefcount(default) == orig_refcount
def test_encode_parse_arguments_errors(self):
with pytest.raises(TypeError, match="Missing 1 required argument"):
msgspec.encode()
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.encode(1, lambda x: None)
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.encode(1, 2, 3)
with pytest.raises(TypeError, match="Invalid keyword argument 'bad'"):
msgspec.encode(1, bad=1)
with pytest.raises(TypeError, match="Extra keyword arguments"):
msgspec.encode(1, default=lambda x: None, extra="extra")
class TestDecodeFunction:
def setup(self):
self.buf = msgspec.encode([1, 2, 3])
def test_decode(self):
assert msgspec.decode(self.buf) == [1, 2, 3]
def test_decode_type_keyword(self):
assert msgspec.decode(self.buf, type=List[int]) == [1, 2, 3]
with pytest.raises(msgspec.DecodingError):
assert msgspec.decode(self.buf, type=List[str])
def test_decode_type_any(self):
assert msgspec.decode(self.buf, type=Any) == [1, 2, 3]
def test_decode_invalid_type(self):
with pytest.raises(TypeError, match="Type '1' is not supported"):
msgspec.decode(self.buf, type=1)
def test_decode_invalid_buf(self):
with pytest.raises(TypeError):
msgspec.decode(1)
def test_decode_parse_arguments_errors(self):
with pytest.raises(TypeError, match="Missing 1 required argument"):
msgspec.decode()
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.decode(self.buf, List[int])
with pytest.raises(TypeError, match="Extra positional arguments"):
msgspec.decode(self.buf, 2, 3)
with pytest.raises(TypeError, match="Invalid keyword argument 'bad'"):
msgspec.decode(self.buf, bad=1)
with pytest.raises(TypeError, match="Extra keyword arguments"):
msgspec.decode(self.buf, type=List[int], extra=1)
class TestEncoderMisc:
@pytest.mark.parametrize("x", [-(2 ** 63) - 1, 2 ** 64])
def test_encode_integer_limits(self, x):
enc = msgspec.Encoder()
with pytest.raises(OverflowError):
enc.encode(x)
def rec_obj1(self):
o = []
o.append(o)
return o
def rec_obj2(self):
o = ([],)
o[0].append(o)
return o
def rec_obj3(self):
o = {}
o["a"] = o
return o
def rec_obj4(self):
class Box(msgspec.Struct):
a: "Box"
o = Box(None)
o.a = o
return o
@pytest.mark.parametrize("case", [1, 2, 3, 4])
def test_encode_infinite_recursive_object_errors(self, case):
enc = msgspec.Encoder()
o = getattr(self, "rec_obj%d" % case)()
with pytest.raises(RecursionError):
enc.encode(o)
def test_getsizeof(self):
a = sys.getsizeof(msgspec.Encoder(write_buffer_size=64))
b = sys.getsizeof(msgspec.Encoder(write_buffer_size=128))
assert b > a
def test_encode_no_default(self):
class Foo:
pass
enc = msgspec.Encoder()
assert enc.default is None
with pytest.raises(
TypeError, match="Encoding objects of type Foo is unsupported"
):
enc.encode(Foo())
def test_encode_default(self):
unsupported = object()
def default(x):
assert x is unsupported
return "hello"
orig_refcount = sys.getrefcount(default)
enc = msgspec.Encoder(default=default)
assert enc.default is default
assert sys.getrefcount(enc.default) == orig_refcount + 2
assert sys.getrefcount(default) == orig_refcount + 1
res = enc.encode(unsupported)
assert enc.encode("hello") == res
del enc
assert sys.getrefcount(default) == orig_refcount
def test_encode_default_errors(self):
def default(x):
raise TypeError("bad")
enc = msgspec.Encoder(default=default)
with pytest.raises(TypeError, match="bad"):
enc.encode(object())
def test_encode_default_recurses(self):
class Node:
def __init__(self, a):
self.a = a
def default(x):
return {"type": "Node", "a": x.a}
enc = msgspec.Encoder(default=default)
msg = enc.encode(Node(Node(1)))
res = msgspec.decode(msg)
assert res == {"type": "Node", "a": {"type": "Node", "a": 1}}
def test_encode_default_recursion_error(self):
enc = msgspec.Encoder(default=lambda x: x)
with pytest.raises(RecursionError):
enc.encode(object())
class TestDecoderMisc:
def test_decoder_type_attribute(self):
dec = msgspec.Decoder()
assert dec.type is Any
dec = msgspec.Decoder(int)
assert dec.type is int
@pytest.mark.parametrize("typ, typstr", [(None, "None"), (Any, "Any")])
def test_decoder_none_any_repr(self, typ, typstr):
dec = msgspec.Decoder(typ)
assert repr(dec) == f"Decoder({typstr})"
# Optionality of None/Any doesn't change things
dec = msgspec.Decoder(Optional[typ])
assert repr(dec) == f"Decoder({typstr})"
@pytest.mark.parametrize(
"typ, typstr",
[
(bool, "bool"),
(int, "int"),
(float, "float"),
(str, "str"),
(bytes, "bytes"),
(bytearray, "bytearray"),
(Dict, "Dict[Any, Any]"),
(Dict[int, str], "Dict[int, str]"),
(List, "List[Any]"),
(List[Optional[int]], "List[Optional[int]]"),
(Set, "Set[Any]"),
(Set[Optional[int]], "Set[Optional[int]]"),
(Tuple, "Tuple[Any, ...]"),
(Tuple[Optional[int], ...], "Tuple[Optional[int], ...]"),
(Tuple[int, str], "Tuple[int, str]"),
(Person, "Person"),
(FruitInt, "FruitInt"),
(FruitStr, "FruitStr"),
(List[Optional[Dict[str, Person]]], "List[Optional[Dict[str, Person]]]"),
],
)
def test_decoder_repr(self, typ, typstr):
dec = msgspec.Decoder(typ)
assert repr(dec) == f"Decoder({typstr})"
dec = msgspec.Decoder(Optional[typ])
assert repr(dec) == f"Decoder(Optional[{typstr}])"
def test_decoder_unsupported_type(self):
with pytest.raises(TypeError):
msgspec.Decoder(1)
with pytest.raises(TypeError):
msgspec.Decoder(slice)
def test_decoder_validates_struct_definition_unsupported_types(self):
"""Struct definitions aren't validated until first use"""
class Test(msgspec.Struct):
a: slice
with pytest.raises(TypeError):
msgspec.Decoder(Test)
class TestTypedDecoder:
def check_unexpected_type(self, dec_type, val, msg):
dec = msgspec.Decoder(dec_type)
s = msgspec.Encoder().encode(val)
with pytest.raises(msgspec.DecodingError, match=msg):
dec.decode(s)
def test_none(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(None)
assert dec.decode(enc.encode(None)) is None
with pytest.raises(msgspec.DecodingError, match="expected `None`"):
assert dec.decode(enc.encode(1))
@pytest.mark.parametrize("x", [False, True])
def test_bool(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bool)
assert dec.decode(enc.encode(x)) is x
def test_bool_unexpected_type(self):
self.check_unexpected_type(bool, "a", "expected `bool`")
@pytest.mark.parametrize("x", INTS)
def test_int(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(int)
assert dec.decode(enc.encode(x)) == x
def test_int_unexpected_type(self):
self.check_unexpected_type(int, "a", "expected `int`")
@pytest.mark.parametrize("x", FLOATS + INTS)
def test_float(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder(float)
res = dec.decode(enc.encode(x))
sol = float(x)
if math.isnan(sol):
assert math.isnan(res)
else:
assert res == sol
def test_float_unexpected_type(self):
self.check_unexpected_type(float, "a", "expected `float`")
@pytest.mark.parametrize("size", SIZES)
def test_str(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(str)
x = "a" * size
res = dec.decode(enc.encode(x))
assert res == x
def test_str_unexpected_type(self):
self.check_unexpected_type(str, 1, "expected `str`")
@pytest.mark.parametrize("size", SIZES)
def test_bytes(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bytes)
x = b"a" * size
res = dec.decode(enc.encode(x))
assert isinstance(res, bytes)
assert res == x
def test_bytes_unexpected_type(self):
self.check_unexpected_type(bytes, 1, "expected `bytes`")
@pytest.mark.parametrize("size", SIZES)
def test_bytearray(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(bytearray)
x = bytearray(size)
res = dec.decode(enc.encode(x))
assert isinstance(res, bytearray)
assert res == x
def test_bytearray_unexpected_type(self):
self.check_unexpected_type(bytearray, 1, "expected `bytearray`")
@pytest.mark.parametrize("size", SIZES)
def test_list_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(list)
x = list(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [list, List, List[Any]])
def test_list_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = [1, "two", b"three"]
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `list`"):
dec.decode(enc.encode(1))
def test_list_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(List[int])
x = [1, 2, 3]
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode([1, 2, "three"]))
@pytest.mark.parametrize("size", SIZES)
def test_set_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(set)
x = set(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [set, Set, Set[Any]])
def test_set_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = {1, "two", b"three"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `set`"):
dec.decode(enc.encode(1))
def test_set_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Set[int])
x = {1, 2, 3}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode({1, 2, "three"}))
@pytest.mark.parametrize("size", SIZES)
def test_vartuple_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(tuple)
x = tuple(range(size))
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [tuple, Tuple, Tuple[Any, ...]])
def test_vartuple_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `tuple`"):
dec.decode(enc.encode(1))
def test_vartuple_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[int, ...])
x = (1, 2, 3)
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode((1, 2, "three")))
def test_fixtuple_any(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[Any, Any, Any])
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `tuple`"):
dec.decode(enc.encode(1))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Tuple\[Any, Any, Any\]`: expected tuple of length 3, got 2",
):
dec.decode(enc.encode((1, 2)))
def test_fixtuple_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Tuple[int, str, bytes])
x = (1, "two", b"three")
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `bytes`"):
dec.decode(enc.encode((1, "two", "three")))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Tuple\[int, str, bytes\]`: expected tuple of length 3, got 2",
):
dec.decode(enc.encode((1, 2)))
@pytest.mark.parametrize("size", SIZES)
def test_dict_lengths(self, size):
enc = msgspec.Encoder()
dec = msgspec.Decoder(dict)
x = {i: i for i in range(size)}
res = dec.decode(enc.encode(x))
assert res == x
@pytest.mark.parametrize("typ", [dict, Dict, Dict[Any, Any]])
def test_dict_any_any(self, typ):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
x = {1: "one", "two": 2, b"three": 3.0}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `dict`"):
dec.decode(enc.encode(1))
def test_dict_any_val(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[str, Any])
x = {"a": 1, "b": "two", "c": b"three"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
def test_dict_any_key(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[Any, str])
x = {1: "a", "two": "b", b"three": "c"}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
def test_dict_typed(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Dict[str, int])
x = {"a": 1, "b": 2}
res = dec.decode(enc.encode(x))
assert res == x
with pytest.raises(msgspec.DecodingError, match="expected `str`"):
dec.decode(enc.encode({1: 2}))
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(enc.encode({"a": "two"}))
def test_enum(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(FruitStr)
a = enc.encode(FruitStr.APPLE)
assert enc.encode("APPLE") == a
assert dec.decode(a) == FruitStr.APPLE
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(
msgspec.DecodingError, match="Error decoding enum `FruitStr`"
):
dec.decode(enc.encode("MISSING"))
with pytest.raises(msgspec.DecodingError):
dec.decode(enc.encode(1))
def test_int_enum(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(FruitInt)
a = enc.encode(FruitInt.APPLE)
assert enc.encode(1) == a
assert dec.decode(a) == FruitInt.APPLE
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(
msgspec.DecodingError, match="Error decoding enum `FruitInt`"
):
dec.decode(enc.encode(1000))
with pytest.raises(msgspec.DecodingError):
dec.decode(enc.encode("INVALID"))
def test_struct(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
x = Person(first="harry", last="potter", age=13)
a = enc.encode(x)
assert (
enc.encode(
{"first": "harry", "last": "potter", "age": 13, "prefect": False}
)
== a
)
assert dec.decode(a) == x
with pytest.raises(msgspec.DecodingError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(msgspec.DecodingError, match="expected `struct`"):
dec.decode(enc.encode(1))
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `Person` field `first` \(`str`\): expected `str`, got `int`",
):
dec.decode(enc.encode({1: "harry"}))
def test_struct_field_wrong_type(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
bad = enc.encode({"first": "harry", "last": "potter", "age": "thirteen"})
with pytest.raises(msgspec.DecodingError, match="expected `int`"):
dec.decode(bad)
def test_struct_missing_fields(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
bad = enc.encode({"first": "harry", "last": "potter"})
with pytest.raises(msgspec.DecodingError, match="missing required field `age`"):
dec.decode(bad)
bad = enc.encode({})
with pytest.raises(
msgspec.DecodingError, match="missing required field `first`"
):
dec.decode(bad)
@pytest.mark.parametrize(
"extra", [None, False, True, 1, 2.0, "three", b"four", [1, 2], {3: 4}]
)
def test_struct_ignore_extra_fields(self, extra):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
a = enc.encode(
{
"extra1": extra,
"first": "harry",
"extra2": extra,
"last": "potter",
"age": 13,
"extra3": extra,
}
)
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
def test_struct_defaults_missing_fields(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Person)
a = enc.encode({"first": "harry", "last": "potter", "age": 13})
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
assert res.prefect is False
def test_struct_gc_maybe_untracked_on_decode(self):
class Test(msgspec.Struct):
x: Any
y: Any
z: Tuple = ()
enc = msgspec.Encoder()
dec = msgspec.Decoder(List[Test])
ts = [
Test(1, 2),
Test(3, "hello"),
Test([], []),
Test({}, {}),
Test(None, None, ()),
]
a, b, c, d, e = dec.decode(enc.encode(ts))
assert not gc.is_tracked(a)
assert not gc.is_tracked(b)
assert gc.is_tracked(c)
assert gc.is_tracked(d)
assert not gc.is_tracked(e)
def test_struct_recursive_definition(self):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Node)
x = Node(Node(Node(), Node(Node())))
s = enc.encode(x)
res = dec.decode(s)
assert res == x
@pytest.mark.parametrize(
"typ, value",
[
(bool, False),
(bool, True),
(int, 1),
(float, 2.5),
(str, "a"),
(bytes, b"a"),
(bytearray, bytearray(b"a")),
(FruitInt, FruitInt.APPLE),
(FruitStr, FruitStr.APPLE),
(Person, Person("harry", "potter", 13)),
(list, [1]),
(set, {1}),
(tuple, (1, 2)),
(Tuple[int, int], (1, 2)),
(dict, {1: 2}),
],
)
def test_optional(self, typ, value):
enc = msgspec.Encoder()
dec = msgspec.Decoder(Optional[typ])
s = enc.encode(value)
s2 = enc.encode(None)
assert dec.decode(s) == value
assert dec.decode(s2) is None
dec = msgspec.Decoder(typ)
with pytest.raises(msgspec.DecodingError):
dec.decode(s2)
@pytest.mark.parametrize(
"typ, value",
[
(List[Optional[int]], [1, None]),
(Tuple[Optional[int], int], (None, 1)),
(Set[Optional[int]], {1, None}),
(Dict[str, Optional[int]], {"a": 1, "b": None}),
(Dict[Optional[str], int], {"a": 1, None: 2}),
],
)
def test_optional_nested(self, typ, value):
enc = msgspec.Encoder()
dec = msgspec.Decoder(typ)
s = enc.encode(value)
assert dec.decode(s) == value
def test_decoding_error_no_struct_toplevel(self):
b = msgspec.Encoder().encode([{"a": 1}])
dec = msgspec.Decoder(List[Dict[str, str]])
with pytest.raises(
msgspec.DecodingError,
match=r"Error decoding `List\[Dict\[str, str\]\]`: expected `str`, got `int`",
):
dec.decode(b)
class CommonTypeTestBase:
"""Test msgspec untyped encode/decode"""
def test_none(self):
self.check(None)
@pytest.mark.parametrize("x", [False, True])
def test_bool(self, x):
self.check(x)
@pytest.mark.parametrize("x", INTS)
def test_int(self, x):
self.check(x)
@pytest.mark.parametrize("x", FLOATS)
def test_float(self, x):
self.check(x)
@pytest.mark.parametrize("size", SIZES)
def test_str(self, size):
self.check(" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_bytes(self, size):
self.check(b" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_dict(self, size):
self.check({str(i): i for i in range(size)})
@pytest.mark.parametrize("size", SIZES)
def test_list(self, size):
self.check(list(range(size)))
class TestDecodeArrayTypeUsesTupleIfHashableRequired:
def test_decode_tuple_dict_keys_as_tuples(self):
orig = {(1, 2): [1, 2, [3, 4]], (1, (2, 3)): [4, 5, 6]}
data = msgspec.encode(orig)
out = msgspec.decode(data)
assert orig == out
@pytest.mark.parametrize(
"typ",
[
Dict[Tuple[int, Tuple[int, int]], List[int]],
Dict[Tuple[int, Tuple[int, ...]], Any],
Dict[Tuple, List[int]],
Dict[Tuple[Any, ...], Any],
Dict[Tuple[Any, Any], Any],
],
)
def test_decode_dict_key_status_forwarded_through_typed_tuples(self, typ):
orig = {(1, (2, 3)): [1, 2, 3]}
data = msgspec.encode(orig)
out = msgspec.Decoder(typ).decode(data)
assert orig == out
def test_decode_tuple_set_keys_as_tuples(self):
orig = {(1, 2), (3, (4, 5)), 6}
data = msgspec.encode(orig)
out = msgspec.decode(data, type=set)
assert orig == out
def test_decode_hashable_struct_in_key(self):
class Test(msgspec.Struct):
data: List[int]
def __hash__(self):
return hash(tuple(self.data))
orig = {(1, Test([1, 2])): [1, 2]}
data = msgspec.encode(orig)
out = msgspec.Decoder(Dict[Tuple[int, Test], List[int]]).decode(data)
assert orig == out
class TestUntypedDecoder(CommonTypeTestBase):
"""Check the untyped deserializer works for common types"""
def check(self, x):
enc = msgspec.Encoder()
dec = msgspec.Decoder()
assert_eq(dec.decode(enc.encode(x)), x)
class TestCompatibility(CommonTypeTestBase):
"""Test compatibility with the existing python msgpack library"""
def check(self, x):
msgpack = pytest.importorskip("msgpack")
enc = msgspec.Encoder()
dec = msgspec.Decoder()
assert_eq(dec.decode(msgpack.dumps(x)), x)
assert_eq(msgpack.loads(enc.encode(x)), x) | 0.650689 | 0.596081 |
import os
os.chdir('../../..')
from pipelineFunctions import parseConfigFindList, parseConfigFindPath
root = os.getcwd()+'/'
print root
import sys
version,buildSample,buildRef,constructSample,CDSspecies,CDSOld,CDSgeneNaming, BB, nuc, weights_file, references_dir, query_dir, output_dir, bin = tuple(sys.argv[1:])
"""with open('scaffoldConfig.txt','r') as f:
(version,buildSample,buildRef,constructSample,CDSspecies,CDSOld,CDSgeneNaming, BB, nuc) = tuple([parseConfigFindPath(x,f) for x in
['version','buildSample','buildRef','constructSample','CDS','CDSFasta','geneNameOld','com_bb','com_Nuc']])
weights = parseConfigFindList('weights',f)"""
root, references_dir, query_dir, output_dir = os.path.abspath(bin)+'/', os.path.abspath(references_dir)+'/', os.path.abspath(query_dir)+'/', os.path.abspath(output_dir)+'/'
with open(weights_file) as f:
weights = map(lambda x: x.split(), f.read().splitlines())
try:
BB = int(BB)
except:
BB = 0
try:
nuc = int(nuc)
except:
nuc = 0
weightsText = '\n'.join([weights[0]]+[item for item in [CDSspecies+'nuc ' + str(int(weights[1].split()[-1]))] if nuc]+[item for item in [CDSspecies+'BB ' + str(int(weights[1].split()[-1]))] if BB]+weights[1:])
weights = {line.split()[0]:int(line.split()[-1]) for line in weights}
with open('references.txt','w') as f:
f.write('['+','.join("'%s'"%ref for ref in weights.keys())+']')
listSamples = [folder for folder in os.listdir(query_dir+version) if folder.endswith(query_dir+version)]
#listSamples = ['Bdist_100_v0','Bdist_001_v0','Bdist_011_v0']
headSh = """#!/bin/bash
module load cufflinks/2.2.1
module load samtools/1.3.1
module load gmap
module load parallel/20150222
module load bedtools/2.25.0
module unload gcc
module load gcc/6.3.0
module load bbtools
"""
print weightsText
for reference in weights.keys():
print reference
os.chdir(references_dir + reference)
fastaOld = [fasta for fasta in os.listdir('.') if
'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
with open('buildRef.sh','w') as f:
f.write('\n'.join([headSh,'samtools faidx %s' % fastaOld,
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS --id_attribute=Name -o %s' % (
[file for file in os.listdir('.') if 'cufflinks' not in file and reference in file and (file.endswith('.gff3') or file.endswith('.gff'))][
0], fastaOld, reference + '.cds'),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
[file for file in os.listdir('.') if 'cufflinks' not in file and reference in file and (file.endswith('.gff3') or file.endswith('.gff'))][
0], reference + '.bed'),
'python %sreplacepath.py %s' % (root, reference + '.bed'), 'mv %s %s ..' % (reference + '.bed', reference + '.cds')]+
['cd '+root,'python %sformatBed.py r %s %s' % (root, reference,version),'cd '+root, 'python %sformatCDS.py r %s %s' % (root, reference,version)]))
linkReferences = ['ln -s %s/%s.cds %s.cds\nln -s %s/%s.bed %s.bed'%(references_dir,ref,ref,references_dir,ref,ref) for ref in weights.keys()]
fastaNucOld = [fasta for fasta in os.listdir(references_dir+'%s'%CDSspecies) if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
nextVersion = 'v' + str(int(version.strip('v').strip('\n'))+1)
for sample in listSamples:
print sample.replace(version, nextVersion)
os.chdir(query_dir+version + '/' + sample)
with open('weights.txt','w') as f:
f.write(weightsText)
fastaNew = sample + '.fa'
geneNaming = sample.replace('_', '')
writeBuild = [headSh,'rm -r %s %s.gff3.db %s.chromosome *.iit %s.coords' % (geneNaming, geneNaming, geneNaming, geneNaming),
'dedupe.sh in=%s out=deduped.fasta ac=f requirematchingnames && mv deduped.fasta %s && samtools faidx %s' %(fastaNew, fastaNew, fastaNew),
'gmap_build --dir=. -d %s %s' % (geneNaming, fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 6 %s > %s 2> %s' % (
geneNaming, '%s/%s/' % (references_dir,CDSspecies + CDSOld), geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', CDSgeneNaming, geneNaming),
'python %sfixGFFCoordinates.py %s'%(root,geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=gene_name %s -o %s' % (geneNaming + '.gff3', sample + '.bed'),
'gffread -x %s.cds -g %s.fa %s.gff3 -E'%(sample,sample,geneNaming)]+linkReferences + ['cd '+root,'python %sformatBed.py s %s %s'%(root,sample,version),'cd '+root,'python %sformatCDS.py s %s %s'%(root,sample,version)]
"""'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s' % (
geneNaming+'.gff3', fastaNew,sample + '.cds')""" # key=Name was original, now gene_name with
nucCommands = [headSh]+ ['nucmer -t 6 -p %s %s %s'%(CDSspecies+'nuc',references_dir+'/%s/'%CDSspecies+fastaNucOld,sample+'.fa'),
'delta-filter -m -q -i 85 -u 50 %snuc.delta > %snuc2.delta'%(CDSspecies,CDSspecies),'show-tiling -a %snuc2.delta > %snuc.tiling'%(CDSspecies,CDSspecies)]
bbCommands = [headSh.replace('module load samtools/1.3.1\n','module unload samtools\nmodule load samtools/1.4\n')] + ['rm -r ref\nrm BBmapped.bed'] + ['bbmap.sh fastareadlen=600 in=%s.fa ref=%s minid=0.97 ef=0.01 outm=BBmapped.bam ambiguous=toss'%(sample,root+'referenceGenomes/%s/'%CDSspecies+fastaNucOld),
'python -m jcvi.formats.sam bed BBmapped.bed BBmapped.bam']#threads=6
commands1 = [headSh]+['rm *.anchors *.last *.filtered *.prj']+\
['nohup python -m jcvi.compara.catalog ortholog --full %s %s\nmv %s %s'%(ref,sample,'%s.%s.1x1.lifted.anchors'%(ref,sample),'%s.%s.lifted.anchors'%(sample,ref)) for ref in weights.keys()]
commands2=[headSh]+['rm multipleMapping.bed','\n'.join('python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %s'%('%s.%s.lifted.anchors'%(sample,ref),ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed','%s.synteny.bed'%(ref)) for ref in weights.keys())] \
+ [item for item in ['python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %snuc.synteny.bed'%('nucMap.bed',CDSspecies+'_nucSyn.bed',sample+'_nucSyn.bed',CDSspecies)] if nuc] \
+ [item for item in ['python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %sBB.synteny.bed' % (
'BBMap.bed', CDSspecies + '_BBSyn.bed', sample + '_BBSyn.bed', CDSspecies)] if BB] \
+ ['nohup python -m jcvi.assembly.allmaps mergebed %s -o %s'%(' '.join(['%s.synteny.bed'%(ref) for ref in (weights.keys() + [item for item in [CDSspecies+'nuc'] if nuc] + [item for item in [CDSspecies+'BB'] if BB])]),'multipleMapping.bed')]
qsub=[headSh]+['module swap python/2.7-anaconda_4.3.0 && source activate scaffolder\npython -m jcvi.assembly.allmaps path --skipconcorde --cpus=16 --ngen=400 --npop=60 multipleMapping.bed %s.fa' % (sample),
'mv multipleMapping.fasta %s%s/%s/%s.fa' % (output_dir,nextVersion,sample.replace(version, nextVersion), sample.replace(version, nextVersion))]
with open('build.sh','w') as f:
f.write('\n'.join(writeBuild))
with open('nucCommand.sh','w') as f:
f.write('\n'.join(nucCommands))
with open('constructv1_1.sh','w') as f:
f.write('\n'.join(commands1))
with open('constructv1_2.sh','w') as f:
f.write('\n'.join(commands2))
with open('BB_build.sh','w') as f:
f.write('\n'.join(bbCommands))
with open('qsub_build.sh','w') as f:
f.write('\n'.join(qsub))
os.chdir(root) | scaffolding_tool_bin/writeShFiles.py | import os
os.chdir('../../..')
from pipelineFunctions import parseConfigFindList, parseConfigFindPath
root = os.getcwd()+'/'
print root
import sys
version,buildSample,buildRef,constructSample,CDSspecies,CDSOld,CDSgeneNaming, BB, nuc, weights_file, references_dir, query_dir, output_dir, bin = tuple(sys.argv[1:])
"""with open('scaffoldConfig.txt','r') as f:
(version,buildSample,buildRef,constructSample,CDSspecies,CDSOld,CDSgeneNaming, BB, nuc) = tuple([parseConfigFindPath(x,f) for x in
['version','buildSample','buildRef','constructSample','CDS','CDSFasta','geneNameOld','com_bb','com_Nuc']])
weights = parseConfigFindList('weights',f)"""
root, references_dir, query_dir, output_dir = os.path.abspath(bin)+'/', os.path.abspath(references_dir)+'/', os.path.abspath(query_dir)+'/', os.path.abspath(output_dir)+'/'
with open(weights_file) as f:
weights = map(lambda x: x.split(), f.read().splitlines())
try:
BB = int(BB)
except:
BB = 0
try:
nuc = int(nuc)
except:
nuc = 0
weightsText = '\n'.join([weights[0]]+[item for item in [CDSspecies+'nuc ' + str(int(weights[1].split()[-1]))] if nuc]+[item for item in [CDSspecies+'BB ' + str(int(weights[1].split()[-1]))] if BB]+weights[1:])
weights = {line.split()[0]:int(line.split()[-1]) for line in weights}
with open('references.txt','w') as f:
f.write('['+','.join("'%s'"%ref for ref in weights.keys())+']')
listSamples = [folder for folder in os.listdir(query_dir+version) if folder.endswith(query_dir+version)]
#listSamples = ['Bdist_100_v0','Bdist_001_v0','Bdist_011_v0']
headSh = """#!/bin/bash
module load cufflinks/2.2.1
module load samtools/1.3.1
module load gmap
module load parallel/20150222
module load bedtools/2.25.0
module unload gcc
module load gcc/6.3.0
module load bbtools
"""
print weightsText
for reference in weights.keys():
print reference
os.chdir(references_dir + reference)
fastaOld = [fasta for fasta in os.listdir('.') if
'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
with open('buildRef.sh','w') as f:
f.write('\n'.join([headSh,'samtools faidx %s' % fastaOld,
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS --id_attribute=Name -o %s' % (
[file for file in os.listdir('.') if 'cufflinks' not in file and reference in file and (file.endswith('.gff3') or file.endswith('.gff'))][
0], fastaOld, reference + '.cds'),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
[file for file in os.listdir('.') if 'cufflinks' not in file and reference in file and (file.endswith('.gff3') or file.endswith('.gff'))][
0], reference + '.bed'),
'python %sreplacepath.py %s' % (root, reference + '.bed'), 'mv %s %s ..' % (reference + '.bed', reference + '.cds')]+
['cd '+root,'python %sformatBed.py r %s %s' % (root, reference,version),'cd '+root, 'python %sformatCDS.py r %s %s' % (root, reference,version)]))
linkReferences = ['ln -s %s/%s.cds %s.cds\nln -s %s/%s.bed %s.bed'%(references_dir,ref,ref,references_dir,ref,ref) for ref in weights.keys()]
fastaNucOld = [fasta for fasta in os.listdir(references_dir+'%s'%CDSspecies) if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
nextVersion = 'v' + str(int(version.strip('v').strip('\n'))+1)
for sample in listSamples:
print sample.replace(version, nextVersion)
os.chdir(query_dir+version + '/' + sample)
with open('weights.txt','w') as f:
f.write(weightsText)
fastaNew = sample + '.fa'
geneNaming = sample.replace('_', '')
writeBuild = [headSh,'rm -r %s %s.gff3.db %s.chromosome *.iit %s.coords' % (geneNaming, geneNaming, geneNaming, geneNaming),
'dedupe.sh in=%s out=deduped.fasta ac=f requirematchingnames && mv deduped.fasta %s && samtools faidx %s' %(fastaNew, fastaNew, fastaNew),
'gmap_build --dir=. -d %s %s' % (geneNaming, fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 6 %s > %s 2> %s' % (
geneNaming, '%s/%s/' % (references_dir,CDSspecies + CDSOld), geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', CDSgeneNaming, geneNaming),
'python %sfixGFFCoordinates.py %s'%(root,geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=gene_name %s -o %s' % (geneNaming + '.gff3', sample + '.bed'),
'gffread -x %s.cds -g %s.fa %s.gff3 -E'%(sample,sample,geneNaming)]+linkReferences + ['cd '+root,'python %sformatBed.py s %s %s'%(root,sample,version),'cd '+root,'python %sformatCDS.py s %s %s'%(root,sample,version)]
"""'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s' % (
geneNaming+'.gff3', fastaNew,sample + '.cds')""" # key=Name was original, now gene_name with
nucCommands = [headSh]+ ['nucmer -t 6 -p %s %s %s'%(CDSspecies+'nuc',references_dir+'/%s/'%CDSspecies+fastaNucOld,sample+'.fa'),
'delta-filter -m -q -i 85 -u 50 %snuc.delta > %snuc2.delta'%(CDSspecies,CDSspecies),'show-tiling -a %snuc2.delta > %snuc.tiling'%(CDSspecies,CDSspecies)]
bbCommands = [headSh.replace('module load samtools/1.3.1\n','module unload samtools\nmodule load samtools/1.4\n')] + ['rm -r ref\nrm BBmapped.bed'] + ['bbmap.sh fastareadlen=600 in=%s.fa ref=%s minid=0.97 ef=0.01 outm=BBmapped.bam ambiguous=toss'%(sample,root+'referenceGenomes/%s/'%CDSspecies+fastaNucOld),
'python -m jcvi.formats.sam bed BBmapped.bed BBmapped.bam']#threads=6
commands1 = [headSh]+['rm *.anchors *.last *.filtered *.prj']+\
['nohup python -m jcvi.compara.catalog ortholog --full %s %s\nmv %s %s'%(ref,sample,'%s.%s.1x1.lifted.anchors'%(ref,sample),'%s.%s.lifted.anchors'%(sample,ref)) for ref in weights.keys()]
commands2=[headSh]+['rm multipleMapping.bed','\n'.join('python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %s'%('%s.%s.lifted.anchors'%(sample,ref),ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed','%s.synteny.bed'%(ref)) for ref in weights.keys())] \
+ [item for item in ['python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %snuc.synteny.bed'%('nucMap.bed',CDSspecies+'_nucSyn.bed',sample+'_nucSyn.bed',CDSspecies)] if nuc] \
+ [item for item in ['python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %sBB.synteny.bed' % (
'BBMap.bed', CDSspecies + '_BBSyn.bed', sample + '_BBSyn.bed', CDSspecies)] if BB] \
+ ['nohup python -m jcvi.assembly.allmaps mergebed %s -o %s'%(' '.join(['%s.synteny.bed'%(ref) for ref in (weights.keys() + [item for item in [CDSspecies+'nuc'] if nuc] + [item for item in [CDSspecies+'BB'] if BB])]),'multipleMapping.bed')]
qsub=[headSh]+['module swap python/2.7-anaconda_4.3.0 && source activate scaffolder\npython -m jcvi.assembly.allmaps path --skipconcorde --cpus=16 --ngen=400 --npop=60 multipleMapping.bed %s.fa' % (sample),
'mv multipleMapping.fasta %s%s/%s/%s.fa' % (output_dir,nextVersion,sample.replace(version, nextVersion), sample.replace(version, nextVersion))]
with open('build.sh','w') as f:
f.write('\n'.join(writeBuild))
with open('nucCommand.sh','w') as f:
f.write('\n'.join(nucCommands))
with open('constructv1_1.sh','w') as f:
f.write('\n'.join(commands1))
with open('constructv1_2.sh','w') as f:
f.write('\n'.join(commands2))
with open('BB_build.sh','w') as f:
f.write('\n'.join(bbCommands))
with open('qsub_build.sh','w') as f:
f.write('\n'.join(qsub))
os.chdir(root) | 0.210523 | 0.090093 |
from collections import OrderedDict
import itertools
import sys
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TerminateOnNaN
from flowket.callbacks.monte_carlo import TensorBoardWithGeneratorValidationData, \
default_wave_function_stats_callbacks_factory, BadEigenStateStopping
from flowket.evaluation import evaluate
from flowket.operators.j1j2 import J1J2
from flowket.machines import ConvNetAutoregressive2D
from flowket.machines.ensemble import make_2d_obc_invariants
from flowket.optimization import VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import FastAutoregressiveSampler
params_grid_config = {
'width': [32],
'depth': [5],
'lr': [5e-3, 1e-3, 5e-4],
'weights_normalization': [True, False]
}
run_index = int(sys.argv[-1].strip())
ks, vs = zip(*params_grid_config.items())
params_options = list(itertools.product(*vs))
chosen_v = params_options[run_index % len(params_options)]
params = dict(zip(ks, chosen_v))
print('Chosen params: %s' % str(params))
hilbert_state_shape = (4, 4)
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=params['depth'], num_of_channels=params['width'],
weights_normalization=params['weights_normalization'])
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 2 ** 10
steps_per_epoch = 2 ** 8
true_ground_state_energy = -30.022227800323677
optimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
operator = J1J2(hilbert_state_shape=hilbert_state_shape, j2=0.5, pbc=False)
sampler = FastAutoregressiveSampler(conditional_log_probs_model, batch_size)
monte_carlo_generator = VariationalMonteCarlo(model, operator, sampler)
run_name = 'j1j2_4_monte_carlo_weights_normalization_%s_depth_%s_width_%s_adam_lr_%s_run_%s' % \
(params['weights_normalization'], params['depth'], params['width'], params['lr'], run_index)
tensorboard = TensorBoardWithGeneratorValidationData(log_dir='tensorboard_logs/%s' % run_name,
generator=monte_carlo_generator, update_freq='epoch',
histogram_freq=1, batch_size=batch_size, write_output=False)
warly_stopping = BadEigenStateStopping(true_ground_state_energy)
callbacks = default_wave_function_stats_callbacks_factory(monte_carlo_generator, log_in_batch_or_epoch=False,
true_ground_state_energy=true_ground_state_energy) + [
tensorboard, TerminateOnNaN(), warly_stopping]
model.fit_generator(monte_carlo_generator.to_generator(), steps_per_epoch=steps_per_epoch, epochs=60,
callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('before_increasing_batch__%s.h5' % run_name)
if warly_stopping.stopped_epoch is not None:
print('stopat epoch %s because of bad eigenstate' % warly_stopping.stopped_epoch)
sys.exit()
print('incresing batchsize to 8192')
sampler = FastAutoregressiveSampler(conditional_log_probs_model, batch_size * 8)
monte_carlo_generator.set_sampler(sampler)
model.fit_generator(monte_carlo_generator.to_generator(), steps_per_epoch=steps_per_epoch, epochs=80,
callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('final_%s.h5' % run_name)
evaluation_inputs = Input(shape=hilbert_state_shape, dtype='int8')
invariant_model = make_2d_obc_invariants(evaluation_inputs, model)
generator = VariationalMonteCarlo(invariant_model, operator, sampler)
evaluate(generator, steps=200, callbacks=callbacks[:4],
keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error'}) | examples/j1j2_2d_monte_carlo_4.py | from collections import OrderedDict
import itertools
import sys
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TerminateOnNaN
from flowket.callbacks.monte_carlo import TensorBoardWithGeneratorValidationData, \
default_wave_function_stats_callbacks_factory, BadEigenStateStopping
from flowket.evaluation import evaluate
from flowket.operators.j1j2 import J1J2
from flowket.machines import ConvNetAutoregressive2D
from flowket.machines.ensemble import make_2d_obc_invariants
from flowket.optimization import VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import FastAutoregressiveSampler
params_grid_config = {
'width': [32],
'depth': [5],
'lr': [5e-3, 1e-3, 5e-4],
'weights_normalization': [True, False]
}
run_index = int(sys.argv[-1].strip())
ks, vs = zip(*params_grid_config.items())
params_options = list(itertools.product(*vs))
chosen_v = params_options[run_index % len(params_options)]
params = dict(zip(ks, chosen_v))
print('Chosen params: %s' % str(params))
hilbert_state_shape = (4, 4)
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=params['depth'], num_of_channels=params['width'],
weights_normalization=params['weights_normalization'])
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 2 ** 10
steps_per_epoch = 2 ** 8
true_ground_state_energy = -30.022227800323677
optimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
operator = J1J2(hilbert_state_shape=hilbert_state_shape, j2=0.5, pbc=False)
sampler = FastAutoregressiveSampler(conditional_log_probs_model, batch_size)
monte_carlo_generator = VariationalMonteCarlo(model, operator, sampler)
run_name = 'j1j2_4_monte_carlo_weights_normalization_%s_depth_%s_width_%s_adam_lr_%s_run_%s' % \
(params['weights_normalization'], params['depth'], params['width'], params['lr'], run_index)
tensorboard = TensorBoardWithGeneratorValidationData(log_dir='tensorboard_logs/%s' % run_name,
generator=monte_carlo_generator, update_freq='epoch',
histogram_freq=1, batch_size=batch_size, write_output=False)
warly_stopping = BadEigenStateStopping(true_ground_state_energy)
callbacks = default_wave_function_stats_callbacks_factory(monte_carlo_generator, log_in_batch_or_epoch=False,
true_ground_state_energy=true_ground_state_energy) + [
tensorboard, TerminateOnNaN(), warly_stopping]
model.fit_generator(monte_carlo_generator.to_generator(), steps_per_epoch=steps_per_epoch, epochs=60,
callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('before_increasing_batch__%s.h5' % run_name)
if warly_stopping.stopped_epoch is not None:
print('stopat epoch %s because of bad eigenstate' % warly_stopping.stopped_epoch)
sys.exit()
print('incresing batchsize to 8192')
sampler = FastAutoregressiveSampler(conditional_log_probs_model, batch_size * 8)
monte_carlo_generator.set_sampler(sampler)
model.fit_generator(monte_carlo_generator.to_generator(), steps_per_epoch=steps_per_epoch, epochs=80,
callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('final_%s.h5' % run_name)
evaluation_inputs = Input(shape=hilbert_state_shape, dtype='int8')
invariant_model = make_2d_obc_invariants(evaluation_inputs, model)
generator = VariationalMonteCarlo(invariant_model, operator, sampler)
evaluate(generator, steps=200, callbacks=callbacks[:4],
keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error'}) | 0.586286 | 0.322753 |
from src.constants import *
from src.token import Token
from src.tokenizer import Tokenizer
from src.symbol_table import SymbolTable
from src.variable import Variable
from copy import copy
CLASSES = []
SUBROUTINES = []
class Parser(object):
def __init__(self, tokenizer):
""" Constructs parser object. """
self.xml_data = [] # For xml export.
self.symbol_table = SymbolTable() # Create symbol table for class scope.
self.tokenizer = tokenizer # Tokenizer.
self.token = None # Current token.
self.compile_class()
def check_for_value(self, value):
""" Check if current token has expected value. """
self.token = self.tokenizer.advance()
if self.token.value != value:
raise Exception("Error: Excpected value => '{0}' but got => '{1}'".format(value, self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def check_for_identifier(self):
""" Check if current token is valid identifier. """
self.token = self.tokenizer.advance()
if self.token.type != "identifier" or (not re.match(R_IDENTIFIER, self.token.value)):
raise Exception("Error: Identifier name not valid => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_type(self):
""" Check if current token has valid type. """
self.token = self.tokenizer.advance()
if self.token.value not in list(TYPES) + CLASSES:
raise Exception("Error: Not valid type => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_operator(self):
""" Check if current token is operator. """
self.token = self.tokenizer.advance()
if self.token.value not in OP:
raise Exception("Error: Invalid operator => '{0}'".format(self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def compile_class(self):
"""
Compile class.
-------------------------------------------------------------
Rule => 'class' className '{' classVarDec* subroutineDec* '}'
-------------------------------------------------------------
"""
self.xml_data.append("<class>") # Xml rep: <class>
self.check_for_value('class') # Xml rep: <keyword> class </keyword>
self.check_for_identifier() # Xml rep: <identifier> className </identifier>
CLASSES.append(self.token.value) # Add class name to list of classes.
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value != "}":
self.token = self.tokenizer.advance()
if self.token.value in ['static', 'field']:
self.compile_class_var_dec() # Compile class variable declarations.
elif self.token.value in ['constructor', 'function', 'method']:
self.compile_subroutine_dec() # Compile class subroutine declarations.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</class>") # Xml rep: </class>
def compile_class_var_dec(self):
"""
Compile class variable declarations.
-------------------------------------------------------------
Rule => ('static' | 'field') type varName (',', varName)* ';'
-------------------------------------------------------------
"""
self.xml_data.append("<classVarDec>") # Xml rep: <classVarDec>
variable = Variable()
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('static' | 'field') </keyword>
variable.kind = self.token.value
self.check_for_type() # Xml rep: <keyword> type </keyword>
variable.type = self.token.value
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
variable.name = self.token.value
self.symbol_table.add(variable) # Add variable to class scope symbol table.
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
v = copy(variable)
v.name = self.token.value
self.symbol_table.add(v) # Add variable to class scope symbol table.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</classVarDec>") # Xml rep: </classVarDec>
def compile_subroutine_dec(self):
"""
Compile class subroutine declarations.
-------------------------------------------------------------------------------------------------------------------
Rule => ('constructor' | 'function' | 'method') ('void' | type) subroutineName '(' parameterList ')' subroutineBody
-------------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineDec>") # Xml rep: <subroutineDec>
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('constructor' | 'function' | 'method')) </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
SUBROUTINES.append(self.token.value) # Add subroutine name to subroutine list.
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_parameter_list() # Compile parameter list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.compile_subroutine_body() # Compile subroutine body.
self.xml_data.append("</subroutineDec>") # Xml rep: </subroutineDec>
def compile_parameter_list(self):
"""
Compile parameter list.
---------------------------------------------
Rule => ((type varName) (',' type varName)*)?
---------------------------------------------
"""
self.xml_data.append("<parameterList>") # Xml rep: <parameterList>
if self.tokenizer.next().value != ")":
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.xml_data.append("</parameterList>") # Xml rep: </parameterList>
def compile_subroutine_body(self):
"""
Compile subroutine body.
----------------------------------
Rule => '{' varDec* statements '}'
----------------------------------
"""
self.xml_data.append("<subroutineBody>") # Xml rep: <subroutineBody>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value == "var":
self.compile_var_dec() # Compile variable declarations.
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</subroutineBody>") # Xml rep: </subroutineBody>
def compile_var_dec(self):
"""
Compile variable declarations.
----------------------------------------------
Rule => 'var' type varName (',', varName)* ';'
----------------------------------------------
"""
self.xml_data.append("<varDec>") # Xml rep: <varDec>
self.check_for_value("var") # Xml rep: <keyword> var </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> ; </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</varDec>") # Xml rep: </varDec>
def compile_statements(self):
"""
Compile statements.
-----------------------------------------------------------------------------------
Rule => letStatement | ifStatement | whileStatement | doStatement | returnStatement
-----------------------------------------------------------------------------------
"""
self.xml_data.append("<statements>") # Xml rep: <statements>
while self.tokenizer.next().value != "}":
token = self.tokenizer.next().value
if token == 'let':
self.compile_let_statement() # Compile let statement.
elif token == 'while':
self.compile_while_statement() # Compile while statement.
elif token == 'if':
self.compile_if_statement() # Compile if statement.
elif token == 'do':
self.compile_do_statement() # Compile do statement.
elif token == 'return':
self.compile_return_statement() # Compile return statement.
self.xml_data.append("</statements>") # Xml rep: </statements>
def compile_let_statement(self):
"""
Compile let statement.
--------------------------------------------------------------
Rule => 'let' varName ('[' expression ']')? '=' expression ';'
--------------------------------------------------------------
"""
self.xml_data.append("<letStatement>") # Xml rep: <letStatement>
self.check_for_value("let") # Xml rep: <keyword> let </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == '[':
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
self.check_for_value("=") # Xml rep: <symbol> = </symbol>
self.compile_expression(";") # Compile expression.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</letStatement>") # Xml rep: </letStatement>
def compile_while_statement(self):
"""
Compile while statement.
-----------------------------------------------------
Rule => 'while' '(' expression ')' '{' statements '}'
-----------------------------------------------------
"""
self.xml_data.append("<whileStatement>") # Xml rep: <whileStatement>
self.check_for_value("while") # Xml rep: <keyword> while </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</whileStatement>") # Xml rep: </whileStatement>
def compile_if_statement(self):
"""
Compile if statement.
-------------------------------------------------------------------------------
Rule => 'if' '(' expression ')' '{' statements '}' ('else' '{' statements '}')?
-------------------------------------------------------------------------------
"""
self.xml_data.append("<ifStatement>") # Xml rep: <ifStatement>
self.check_for_value("if") # Xml rep: <keyword> if </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
if self.tokenizer.next().value == 'else':
self.check_for_value('else') # Xml rep: <keyword> else </keyword>
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value('}') # Xml rep: <symbol> } </symbol>
self.xml_data.append("</ifStatement>") # Xml rep: </ifStatement>
def compile_do_statement(self):
"""
Compile do statement.
-------------------------------
Rule => 'do' subroutineCall ';'
-------------------------------
"""
self.xml_data.append("<doStatement>") # Xml rep: <doStatement>
self.check_for_value("do") # Xml rep: <keword> do </keyword>
self.compile_subroutine_call() # Compile subroutine call.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</doStatement>") # Xml rep: </doStatement>
def compile_return_statement(self):
"""
Compile return statement.
--------------------------------
Rule => 'return' expression? ';'
--------------------------------
"""
self.xml_data.append("<returnStatement>") # Xml rep: <returnStatement>
self.check_for_value("return") # Xml rep: <keword> return </keyword>
if self.tokenizer.next().value != ";":
self.compile_expression(';')
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</returnStatement>") # Xml rep: </returnStatement>
def compile_subroutine_call(self):
"""
Compile subroutine call.
---------------------------------------------------------------------------------------------------------------
Rule => subroutineName '(' expressionList ')' | (className | varName) '.' subroutineName '(' expressionList ')'
---------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineCall>") # Xml rep: <subroutineCall>
self.check_for_identifier() # Xml rep: <identifier> subroutineName | (className | varName) </identifier>
if self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</subroutineCall>") # Xml rep: </subroutineCall>
def compile_expression(self, *end):
"""
Compile expression.
-----------------------
Rule => term (op term)*
-----------------------
"""
self.xml_data.append("<expression>") # Xml rep:<expression>
self.compile_term() # Compile term.
while self.tokenizer.next().value not in end:
self.check_for_operator() # Xml rep: <symbol> operator </symbol>
self.compile_term() # Compile term.
self.xml_data.append("</expression>") # Xml rep: </expression>
def compile_term(self):
"""
Compile term.
----------------------------------------------------------------------------------
Rule => integerConstant | stringConstant | keywordConstant | unaryOp term |
varName | varName'[' expression ']' | subroutineCall | '(' expression ')'
----------------------------------------------------------------------------------
"""
self.xml_data.append("<term>") # Xml rep: <term>
if self.tokenizer.next().type in ["integerConstant", "stringConstant"] or self.tokenizer.next().value in KEYWORD_CONSANTS:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <integerConstant | stringConstant | keyword> value </integerConstant | stringConstant | keyword>
elif self.tokenizer.next().value in UNARY_OP:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <symbol> unaryOp </symbol>
self.compile_term() # Compile term.
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
else:
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == "[":
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
elif self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</term>") # Xml rep: </term>
def compile_expression_list(self):
"""
Compile expression list.
---------------------------------------
Rule => (expression (',' expression)*)?
---------------------------------------
"""
self.xml_data.append("<expressionList>") # Xml rep: <expressionList>
if self.tokenizer.next().value != ")":
self.compile_expression(",", ")") # Compile expression.
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.compile_expression(",", ")")
self.xml_data.append("</expressionList>") # Xml rep: </expressionList>
def export_xml(self, file_name):
""" Export code structure to file in xml format. """
with open("xml-export/{0}.structure.xml".format(file_name), "w") as xml_file:
for line in self.xml_data:
xml_file.write(line + "\n") | src/parser.py | from src.constants import *
from src.token import Token
from src.tokenizer import Tokenizer
from src.symbol_table import SymbolTable
from src.variable import Variable
from copy import copy
CLASSES = []
SUBROUTINES = []
class Parser(object):
def __init__(self, tokenizer):
""" Constructs parser object. """
self.xml_data = [] # For xml export.
self.symbol_table = SymbolTable() # Create symbol table for class scope.
self.tokenizer = tokenizer # Tokenizer.
self.token = None # Current token.
self.compile_class()
def check_for_value(self, value):
""" Check if current token has expected value. """
self.token = self.tokenizer.advance()
if self.token.value != value:
raise Exception("Error: Excpected value => '{0}' but got => '{1}'".format(value, self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def check_for_identifier(self):
""" Check if current token is valid identifier. """
self.token = self.tokenizer.advance()
if self.token.type != "identifier" or (not re.match(R_IDENTIFIER, self.token.value)):
raise Exception("Error: Identifier name not valid => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_type(self):
""" Check if current token has valid type. """
self.token = self.tokenizer.advance()
if self.token.value not in list(TYPES) + CLASSES:
raise Exception("Error: Not valid type => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_operator(self):
""" Check if current token is operator. """
self.token = self.tokenizer.advance()
if self.token.value not in OP:
raise Exception("Error: Invalid operator => '{0}'".format(self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def compile_class(self):
"""
Compile class.
-------------------------------------------------------------
Rule => 'class' className '{' classVarDec* subroutineDec* '}'
-------------------------------------------------------------
"""
self.xml_data.append("<class>") # Xml rep: <class>
self.check_for_value('class') # Xml rep: <keyword> class </keyword>
self.check_for_identifier() # Xml rep: <identifier> className </identifier>
CLASSES.append(self.token.value) # Add class name to list of classes.
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value != "}":
self.token = self.tokenizer.advance()
if self.token.value in ['static', 'field']:
self.compile_class_var_dec() # Compile class variable declarations.
elif self.token.value in ['constructor', 'function', 'method']:
self.compile_subroutine_dec() # Compile class subroutine declarations.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</class>") # Xml rep: </class>
def compile_class_var_dec(self):
"""
Compile class variable declarations.
-------------------------------------------------------------
Rule => ('static' | 'field') type varName (',', varName)* ';'
-------------------------------------------------------------
"""
self.xml_data.append("<classVarDec>") # Xml rep: <classVarDec>
variable = Variable()
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('static' | 'field') </keyword>
variable.kind = self.token.value
self.check_for_type() # Xml rep: <keyword> type </keyword>
variable.type = self.token.value
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
variable.name = self.token.value
self.symbol_table.add(variable) # Add variable to class scope symbol table.
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
v = copy(variable)
v.name = self.token.value
self.symbol_table.add(v) # Add variable to class scope symbol table.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</classVarDec>") # Xml rep: </classVarDec>
def compile_subroutine_dec(self):
"""
Compile class subroutine declarations.
-------------------------------------------------------------------------------------------------------------------
Rule => ('constructor' | 'function' | 'method') ('void' | type) subroutineName '(' parameterList ')' subroutineBody
-------------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineDec>") # Xml rep: <subroutineDec>
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('constructor' | 'function' | 'method')) </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
SUBROUTINES.append(self.token.value) # Add subroutine name to subroutine list.
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_parameter_list() # Compile parameter list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.compile_subroutine_body() # Compile subroutine body.
self.xml_data.append("</subroutineDec>") # Xml rep: </subroutineDec>
def compile_parameter_list(self):
"""
Compile parameter list.
---------------------------------------------
Rule => ((type varName) (',' type varName)*)?
---------------------------------------------
"""
self.xml_data.append("<parameterList>") # Xml rep: <parameterList>
if self.tokenizer.next().value != ")":
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.xml_data.append("</parameterList>") # Xml rep: </parameterList>
def compile_subroutine_body(self):
"""
Compile subroutine body.
----------------------------------
Rule => '{' varDec* statements '}'
----------------------------------
"""
self.xml_data.append("<subroutineBody>") # Xml rep: <subroutineBody>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value == "var":
self.compile_var_dec() # Compile variable declarations.
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</subroutineBody>") # Xml rep: </subroutineBody>
def compile_var_dec(self):
"""
Compile variable declarations.
----------------------------------------------
Rule => 'var' type varName (',', varName)* ';'
----------------------------------------------
"""
self.xml_data.append("<varDec>") # Xml rep: <varDec>
self.check_for_value("var") # Xml rep: <keyword> var </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> ; </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</varDec>") # Xml rep: </varDec>
def compile_statements(self):
"""
Compile statements.
-----------------------------------------------------------------------------------
Rule => letStatement | ifStatement | whileStatement | doStatement | returnStatement
-----------------------------------------------------------------------------------
"""
self.xml_data.append("<statements>") # Xml rep: <statements>
while self.tokenizer.next().value != "}":
token = self.tokenizer.next().value
if token == 'let':
self.compile_let_statement() # Compile let statement.
elif token == 'while':
self.compile_while_statement() # Compile while statement.
elif token == 'if':
self.compile_if_statement() # Compile if statement.
elif token == 'do':
self.compile_do_statement() # Compile do statement.
elif token == 'return':
self.compile_return_statement() # Compile return statement.
self.xml_data.append("</statements>") # Xml rep: </statements>
def compile_let_statement(self):
"""
Compile let statement.
--------------------------------------------------------------
Rule => 'let' varName ('[' expression ']')? '=' expression ';'
--------------------------------------------------------------
"""
self.xml_data.append("<letStatement>") # Xml rep: <letStatement>
self.check_for_value("let") # Xml rep: <keyword> let </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == '[':
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
self.check_for_value("=") # Xml rep: <symbol> = </symbol>
self.compile_expression(";") # Compile expression.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</letStatement>") # Xml rep: </letStatement>
def compile_while_statement(self):
"""
Compile while statement.
-----------------------------------------------------
Rule => 'while' '(' expression ')' '{' statements '}'
-----------------------------------------------------
"""
self.xml_data.append("<whileStatement>") # Xml rep: <whileStatement>
self.check_for_value("while") # Xml rep: <keyword> while </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</whileStatement>") # Xml rep: </whileStatement>
def compile_if_statement(self):
"""
Compile if statement.
-------------------------------------------------------------------------------
Rule => 'if' '(' expression ')' '{' statements '}' ('else' '{' statements '}')?
-------------------------------------------------------------------------------
"""
self.xml_data.append("<ifStatement>") # Xml rep: <ifStatement>
self.check_for_value("if") # Xml rep: <keyword> if </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
if self.tokenizer.next().value == 'else':
self.check_for_value('else') # Xml rep: <keyword> else </keyword>
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value('}') # Xml rep: <symbol> } </symbol>
self.xml_data.append("</ifStatement>") # Xml rep: </ifStatement>
def compile_do_statement(self):
"""
Compile do statement.
-------------------------------
Rule => 'do' subroutineCall ';'
-------------------------------
"""
self.xml_data.append("<doStatement>") # Xml rep: <doStatement>
self.check_for_value("do") # Xml rep: <keword> do </keyword>
self.compile_subroutine_call() # Compile subroutine call.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</doStatement>") # Xml rep: </doStatement>
def compile_return_statement(self):
"""
Compile return statement.
--------------------------------
Rule => 'return' expression? ';'
--------------------------------
"""
self.xml_data.append("<returnStatement>") # Xml rep: <returnStatement>
self.check_for_value("return") # Xml rep: <keword> return </keyword>
if self.tokenizer.next().value != ";":
self.compile_expression(';')
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</returnStatement>") # Xml rep: </returnStatement>
def compile_subroutine_call(self):
"""
Compile subroutine call.
---------------------------------------------------------------------------------------------------------------
Rule => subroutineName '(' expressionList ')' | (className | varName) '.' subroutineName '(' expressionList ')'
---------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineCall>") # Xml rep: <subroutineCall>
self.check_for_identifier() # Xml rep: <identifier> subroutineName | (className | varName) </identifier>
if self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</subroutineCall>") # Xml rep: </subroutineCall>
def compile_expression(self, *end):
"""
Compile expression.
-----------------------
Rule => term (op term)*
-----------------------
"""
self.xml_data.append("<expression>") # Xml rep:<expression>
self.compile_term() # Compile term.
while self.tokenizer.next().value not in end:
self.check_for_operator() # Xml rep: <symbol> operator </symbol>
self.compile_term() # Compile term.
self.xml_data.append("</expression>") # Xml rep: </expression>
def compile_term(self):
"""
Compile term.
----------------------------------------------------------------------------------
Rule => integerConstant | stringConstant | keywordConstant | unaryOp term |
varName | varName'[' expression ']' | subroutineCall | '(' expression ')'
----------------------------------------------------------------------------------
"""
self.xml_data.append("<term>") # Xml rep: <term>
if self.tokenizer.next().type in ["integerConstant", "stringConstant"] or self.tokenizer.next().value in KEYWORD_CONSANTS:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <integerConstant | stringConstant | keyword> value </integerConstant | stringConstant | keyword>
elif self.tokenizer.next().value in UNARY_OP:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <symbol> unaryOp </symbol>
self.compile_term() # Compile term.
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
else:
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == "[":
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
elif self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</term>") # Xml rep: </term>
def compile_expression_list(self):
"""
Compile expression list.
---------------------------------------
Rule => (expression (',' expression)*)?
---------------------------------------
"""
self.xml_data.append("<expressionList>") # Xml rep: <expressionList>
if self.tokenizer.next().value != ")":
self.compile_expression(",", ")") # Compile expression.
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.compile_expression(",", ")")
self.xml_data.append("</expressionList>") # Xml rep: </expressionList>
def export_xml(self, file_name):
""" Export code structure to file in xml format. """
with open("xml-export/{0}.structure.xml".format(file_name), "w") as xml_file:
for line in self.xml_data:
xml_file.write(line + "\n") | 0.496094 | 0.123762 |
from zorro.di import di, has_dependencies, dependency
from cairo import SolidPattern
import cairo
from .base import Widget
from tilenol.commands import CommandDispatcher
from tilenol.theme import Theme
from tilenol.ewmh import get_title
@has_dependencies
class Title(Widget):
dispatcher = dependency(CommandDispatcher, 'commander')
theme = dependency(Theme, 'theme')
stretched = True
def __zorro_di_done__(self):
bar = self.theme.bar
self.color = bar.text_color_pat
self.font = bar.font
self.padding = bar.text_padding
self.dispatcher.events['window'].listen(self.window_changed)
self.oldwin = None
def window_changed(self):
if self.oldwin is not None:
self.oldwin.property_changed.unlisten(self.bar.redraw.emit)
win = self.dispatcher.get('window', None)
if win is not None:
win.property_changed.listen(self.bar.redraw.emit)
self.oldwin = win
self.bar.redraw.emit()
def draw(self, canvas, l, r):
win = self.dispatcher.get('window', None)
if not win:
return r, r
canvas.set_source(self.color)
self.font.apply(canvas)
canvas.move_to(l + self.padding.left,
self.height - self.padding.bottom)
canvas.show_text(get_title(win) or '')
return r, r
@has_dependencies
class Icon(Widget):
dispatcher = dependency(CommandDispatcher, 'commander')
theme = dependency(Theme, 'theme')
def __zorro_di_done__(self):
self.padding = self.theme.bar.box_padding
self.dispatcher.events['window'].listen(self.window_changed)
self.oldwin = None
def window_changed(self):
if self.oldwin is not None:
self.oldwin.property_changed.unlisten(self.bar.redraw.emit)
win = self.dispatcher.get('window', None)
if win is not None:
win.property_changed.listen(self.bar.redraw.emit)
self.oldwin = win
self.bar.redraw.emit()
def draw(self, canvas, l, r):
win = self.dispatcher.get('window', None)
if not win or not getattr(win, 'icons', None):
return l, r
h = self.height - self.padding.bottom - self.padding.top
if self.right:
x = r - self.padding.right - h
else:
x = l + self.padding.left
win.draw_icon(canvas, x, self.padding.top, h)
if self.right:
return l, r - h - self.padding.left - self.padding.right
else:
return l + h + self.padding.left + self.padding.right, r | tilenol/widgets/title.py | from zorro.di import di, has_dependencies, dependency
from cairo import SolidPattern
import cairo
from .base import Widget
from tilenol.commands import CommandDispatcher
from tilenol.theme import Theme
from tilenol.ewmh import get_title
@has_dependencies
class Title(Widget):
dispatcher = dependency(CommandDispatcher, 'commander')
theme = dependency(Theme, 'theme')
stretched = True
def __zorro_di_done__(self):
bar = self.theme.bar
self.color = bar.text_color_pat
self.font = bar.font
self.padding = bar.text_padding
self.dispatcher.events['window'].listen(self.window_changed)
self.oldwin = None
def window_changed(self):
if self.oldwin is not None:
self.oldwin.property_changed.unlisten(self.bar.redraw.emit)
win = self.dispatcher.get('window', None)
if win is not None:
win.property_changed.listen(self.bar.redraw.emit)
self.oldwin = win
self.bar.redraw.emit()
def draw(self, canvas, l, r):
win = self.dispatcher.get('window', None)
if not win:
return r, r
canvas.set_source(self.color)
self.font.apply(canvas)
canvas.move_to(l + self.padding.left,
self.height - self.padding.bottom)
canvas.show_text(get_title(win) or '')
return r, r
@has_dependencies
class Icon(Widget):
dispatcher = dependency(CommandDispatcher, 'commander')
theme = dependency(Theme, 'theme')
def __zorro_di_done__(self):
self.padding = self.theme.bar.box_padding
self.dispatcher.events['window'].listen(self.window_changed)
self.oldwin = None
def window_changed(self):
if self.oldwin is not None:
self.oldwin.property_changed.unlisten(self.bar.redraw.emit)
win = self.dispatcher.get('window', None)
if win is not None:
win.property_changed.listen(self.bar.redraw.emit)
self.oldwin = win
self.bar.redraw.emit()
def draw(self, canvas, l, r):
win = self.dispatcher.get('window', None)
if not win or not getattr(win, 'icons', None):
return l, r
h = self.height - self.padding.bottom - self.padding.top
if self.right:
x = r - self.padding.right - h
else:
x = l + self.padding.left
win.draw_icon(canvas, x, self.padding.top, h)
if self.right:
return l, r - h - self.padding.left - self.padding.right
else:
return l + h + self.padding.left + self.padding.right, r | 0.505127 | 0.065995 |
import math
from pathlib import Path
from typing import Tuple
import moderngl
import moderngl_window as mglw
from moderngl_window import geometry
from moderngl_window.scene.camera import KeyboardCamera
import numpy as np
from pyrr import Matrix44
class Object:
def __init__(self) -> None:
self._scale = np.array((0.0, 0.0, 0.0))
self._rotation = np.array((0.0, 0.0, 0.0))
self._translation = np.array((0.0, 0.0, 0.0))
self._mt = np.eye(4)
self._mr = np.eye(4)
self._ms = np.eye(4)
self.matrix = None
# translation
def set_translate(self, *xyz: float) -> None:
"""Set the current translation by overwriting the old one."""
self._translation = xyz
self._mt = Matrix44.from_translation(self._translation)
def translate(self, *xyz: float) -> None:
"""Translate by xyz."""
self._translation += xyz
self._mt = Matrix44.from_translation(self._translation)
# rotation
def set_rotation(self, *xyz: float) -> None:
"""Set the current rotation by overwriting the old one."""
self._rotation = xyz
self._mr = Matrix44.from_eulers(self._rotation)
def rotate(self, *xyz: float) -> None:
"""Rotate by xyz."""
self._rotation += xyz
self._mr = Matrix44.from_eulers(self._rotation)
# scale
def set_scale(self, *xyz: float) -> None:
"""Set the current scale by overwriting the old one."""
self._scale = xyz
self._ms = Matrix44.from_scale(self._scale)
def scale(self, *xyz: float) -> None:
"""Scale by xyz."""
self._scale += xyz
self._ms = Matrix44.from_scale(self._scale)
def render(self, *args) -> None:
raise NotImplementedError()
@property
def matrix(self) -> Matrix44:
return (self._mt * self._mr * self._ms).astype("f4")
@matrix.setter
def matrix(self, value: Matrix44) -> None:
pass
class Cube(Object):
def __init__(
self,
pos: Tuple[float, float, float] = (0, 0, 0),
size: Tuple[float, float, float] = (1, 1, 1),
) -> None:
super().__init__()
self._cube = geometry.cube(size=size, center=pos)
def render(self, program) -> None:
self._cube.render(program)
class CubeViz(mglw.WindowConfig):
"""Base class with built in 3D camera support."""
title = "Cube"
resource_dir = (Path(__file__) / "../resources").absolute()
aspect_ratio = None
window_size = 1280, 720
resizable = True
samples = 16
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.wnd.mouse_exclusivity = True
self.camera = KeyboardCamera(self.wnd.keys, aspect_ratio=self.wnd.aspect_ratio)
self.camera_enabled = False
self.render_program = self.load_program("cube_shader.glsl")
self.render_program["projection"].write(self.camera.projection.tobytes())
self.render_program["m_camera"].write(self.camera.matrix.astype("f4").tobytes())
self.cube = Cube(size=(0.5, 0.5, 0.5))
def render(self, time: float, frame_time: float) -> None:
self.ctx.clear(51 / 255, 51 / 255, 51 / 255)
self.ctx.enable_only(moderngl.DEPTH_TEST | moderngl.CULL_FACE)
s = math.sin(time * 2) / 2 + 1.5
self.cube.set_rotation(time, time / 2, time / 3)
self.cube.set_translate(s * 4 - 6, 0, -3.0)
self.render_program["model"].write(self.cube.matrix)
self.render_program["m_camera"].write(self.camera.matrix.astype("f4"))
self.cube.render(self.render_program)
def resize(self, width: int, height: int) -> None:
self.camera.projection.update(aspect_ratio=self.wnd.aspect_ratio)
if __name__ == "__main__":
CubeViz.run() | visualization/CubeViz/cube_viz.py | import math
from pathlib import Path
from typing import Tuple
import moderngl
import moderngl_window as mglw
from moderngl_window import geometry
from moderngl_window.scene.camera import KeyboardCamera
import numpy as np
from pyrr import Matrix44
class Object:
def __init__(self) -> None:
self._scale = np.array((0.0, 0.0, 0.0))
self._rotation = np.array((0.0, 0.0, 0.0))
self._translation = np.array((0.0, 0.0, 0.0))
self._mt = np.eye(4)
self._mr = np.eye(4)
self._ms = np.eye(4)
self.matrix = None
# translation
def set_translate(self, *xyz: float) -> None:
"""Set the current translation by overwriting the old one."""
self._translation = xyz
self._mt = Matrix44.from_translation(self._translation)
def translate(self, *xyz: float) -> None:
"""Translate by xyz."""
self._translation += xyz
self._mt = Matrix44.from_translation(self._translation)
# rotation
def set_rotation(self, *xyz: float) -> None:
"""Set the current rotation by overwriting the old one."""
self._rotation = xyz
self._mr = Matrix44.from_eulers(self._rotation)
def rotate(self, *xyz: float) -> None:
"""Rotate by xyz."""
self._rotation += xyz
self._mr = Matrix44.from_eulers(self._rotation)
# scale
def set_scale(self, *xyz: float) -> None:
"""Set the current scale by overwriting the old one."""
self._scale = xyz
self._ms = Matrix44.from_scale(self._scale)
def scale(self, *xyz: float) -> None:
"""Scale by xyz."""
self._scale += xyz
self._ms = Matrix44.from_scale(self._scale)
def render(self, *args) -> None:
raise NotImplementedError()
@property
def matrix(self) -> Matrix44:
return (self._mt * self._mr * self._ms).astype("f4")
@matrix.setter
def matrix(self, value: Matrix44) -> None:
pass
class Cube(Object):
def __init__(
self,
pos: Tuple[float, float, float] = (0, 0, 0),
size: Tuple[float, float, float] = (1, 1, 1),
) -> None:
super().__init__()
self._cube = geometry.cube(size=size, center=pos)
def render(self, program) -> None:
self._cube.render(program)
class CubeViz(mglw.WindowConfig):
"""Base class with built in 3D camera support."""
title = "Cube"
resource_dir = (Path(__file__) / "../resources").absolute()
aspect_ratio = None
window_size = 1280, 720
resizable = True
samples = 16
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self.wnd.mouse_exclusivity = True
self.camera = KeyboardCamera(self.wnd.keys, aspect_ratio=self.wnd.aspect_ratio)
self.camera_enabled = False
self.render_program = self.load_program("cube_shader.glsl")
self.render_program["projection"].write(self.camera.projection.tobytes())
self.render_program["m_camera"].write(self.camera.matrix.astype("f4").tobytes())
self.cube = Cube(size=(0.5, 0.5, 0.5))
def render(self, time: float, frame_time: float) -> None:
self.ctx.clear(51 / 255, 51 / 255, 51 / 255)
self.ctx.enable_only(moderngl.DEPTH_TEST | moderngl.CULL_FACE)
s = math.sin(time * 2) / 2 + 1.5
self.cube.set_rotation(time, time / 2, time / 3)
self.cube.set_translate(s * 4 - 6, 0, -3.0)
self.render_program["model"].write(self.cube.matrix)
self.render_program["m_camera"].write(self.camera.matrix.astype("f4"))
self.cube.render(self.render_program)
def resize(self, width: int, height: int) -> None:
self.camera.projection.update(aspect_ratio=self.wnd.aspect_ratio)
if __name__ == "__main__":
CubeViz.run() | 0.900732 | 0.401043 |
from os import urandom
from typing import Callable
from typing import Collection
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from delorean import Delorean
from django.contrib.auth import get_user_model
from django.test import Client
from rest_framework import status
from applications.onboarding.models import AuthProfile
from applications.profile.models import Profile
User = get_user_model()
class UserTestMixin:
def create_user(
self,
placeholder: Optional[str] = None,
user_kw: Optional[Dict] = None,
verified=False,
) -> User:
placeholder = placeholder or urandom(4).hex()
form_data = {
"username": f"{placeholder}",
"email": f"<EMAIL>",
"password": <PASSWORD>,
}
user_kw = (user_kw or {}).copy()
user_kw.update(form_data)
user = User.objects.create_user(**user_kw)
user.save()
if verified:
self.create_auth_profile(user)
self.create_profile(user)
return user
@staticmethod
def create_auth_profile(user: User) -> AuthProfile:
auth = AuthProfile(
user=user, verification_code=user.username, verified_at=Delorean().datetime,
)
auth.save()
return auth
@staticmethod
def create_profile(user) -> Profile:
profile = Profile(user=user, name=f"name_{user.username}")
profile.save()
return profile
def create_auth_token(self, user, client: Optional[Client] = None) -> str:
cli = client or self.client
credentials = {"username": user.username, "password": user.username}
resp = cli.post("/api/obtain_auth_token/", credentials)
self.assertEqual(status.HTTP_200_OK, resp.status_code)
payload = resp.json()
self.assertEqual(1, len(payload))
self.assertIsInstance(payload, dict)
self.assertIn("token", payload)
token = payload["token"]
self.assertTrue(token)
return token
class TemplateResponseTestMixin:
def validate_response(
self,
*,
url: str,
client: Optional = None,
method: Optional[str] = "get",
form_data: Optional[Dict] = None,
expected_status_code: Optional[int] = 200,
expected_view: Optional[type] = None,
expected_view_name: Optional[str] = None,
expected_template: Optional[str] = None,
content_filters: Optional[Collection[Callable[[bytes], bool]]] = None,
expected_redirect_chain: Optional[List] = None,
):
cli = client or self.client
meth = getattr(cli, method)
meth_args = []
if form_data:
meth_args.append(form_data)
resp = meth(url, *meth_args, follow=True)
self.assertEqual(expected_status_code, resp.status_code)
if expected_redirect_chain is not None:
self.assertEqual(expected_redirect_chain, resp.redirect_chain)
good_resolver_codes = {
200,
}
if expected_status_code in good_resolver_codes:
self.assertEqual(expected_view_name, resp.resolver_match.view_name)
self.assertEqual(
expected_view.as_view().__name__, resp.resolver_match.func.__name__,
)
self.assertIn(expected_template, resp.template_name)
for content_filter in content_filters or []:
self.assertTrue(content_filter(resp.content))
class ApiTestMixin:
def validate_response(
self,
url: str,
*,
client: Optional = None,
method: Optional[str] = "get",
headers: Optional[Dict[Text, Text]] = None,
data: Optional = None,
expected_status_code: Optional[int] = 200,
expected_response_payload: Optional = None,
):
cli = client or self.client
meth = getattr(cli, method)
kwargs = (headers or {}).copy()
if data is not None:
kwargs["data"] = data
resp = meth(url, content_type="application/json", **kwargs)
self.assertEqual(expected_status_code, resp.status_code)
if expected_response_payload is not None:
payload = resp.json()
self.assertEqual(expected_response_payload, payload) | src/project/utils/xtests.py | from os import urandom
from typing import Callable
from typing import Collection
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from delorean import Delorean
from django.contrib.auth import get_user_model
from django.test import Client
from rest_framework import status
from applications.onboarding.models import AuthProfile
from applications.profile.models import Profile
User = get_user_model()
class UserTestMixin:
def create_user(
self,
placeholder: Optional[str] = None,
user_kw: Optional[Dict] = None,
verified=False,
) -> User:
placeholder = placeholder or urandom(4).hex()
form_data = {
"username": f"{placeholder}",
"email": f"<EMAIL>",
"password": <PASSWORD>,
}
user_kw = (user_kw or {}).copy()
user_kw.update(form_data)
user = User.objects.create_user(**user_kw)
user.save()
if verified:
self.create_auth_profile(user)
self.create_profile(user)
return user
@staticmethod
def create_auth_profile(user: User) -> AuthProfile:
auth = AuthProfile(
user=user, verification_code=user.username, verified_at=Delorean().datetime,
)
auth.save()
return auth
@staticmethod
def create_profile(user) -> Profile:
profile = Profile(user=user, name=f"name_{user.username}")
profile.save()
return profile
def create_auth_token(self, user, client: Optional[Client] = None) -> str:
cli = client or self.client
credentials = {"username": user.username, "password": user.username}
resp = cli.post("/api/obtain_auth_token/", credentials)
self.assertEqual(status.HTTP_200_OK, resp.status_code)
payload = resp.json()
self.assertEqual(1, len(payload))
self.assertIsInstance(payload, dict)
self.assertIn("token", payload)
token = payload["token"]
self.assertTrue(token)
return token
class TemplateResponseTestMixin:
def validate_response(
self,
*,
url: str,
client: Optional = None,
method: Optional[str] = "get",
form_data: Optional[Dict] = None,
expected_status_code: Optional[int] = 200,
expected_view: Optional[type] = None,
expected_view_name: Optional[str] = None,
expected_template: Optional[str] = None,
content_filters: Optional[Collection[Callable[[bytes], bool]]] = None,
expected_redirect_chain: Optional[List] = None,
):
cli = client or self.client
meth = getattr(cli, method)
meth_args = []
if form_data:
meth_args.append(form_data)
resp = meth(url, *meth_args, follow=True)
self.assertEqual(expected_status_code, resp.status_code)
if expected_redirect_chain is not None:
self.assertEqual(expected_redirect_chain, resp.redirect_chain)
good_resolver_codes = {
200,
}
if expected_status_code in good_resolver_codes:
self.assertEqual(expected_view_name, resp.resolver_match.view_name)
self.assertEqual(
expected_view.as_view().__name__, resp.resolver_match.func.__name__,
)
self.assertIn(expected_template, resp.template_name)
for content_filter in content_filters or []:
self.assertTrue(content_filter(resp.content))
class ApiTestMixin:
def validate_response(
self,
url: str,
*,
client: Optional = None,
method: Optional[str] = "get",
headers: Optional[Dict[Text, Text]] = None,
data: Optional = None,
expected_status_code: Optional[int] = 200,
expected_response_payload: Optional = None,
):
cli = client or self.client
meth = getattr(cli, method)
kwargs = (headers or {}).copy()
if data is not None:
kwargs["data"] = data
resp = meth(url, content_type="application/json", **kwargs)
self.assertEqual(expected_status_code, resp.status_code)
if expected_response_payload is not None:
payload = resp.json()
self.assertEqual(expected_response_payload, payload) | 0.799521 | 0.288575 |
import sys
import re
import itertools
class Moon:
def __init__(self, position, velocity):
self._position = position
self._velocity = velocity
def get_position(self):
return self._position
def pull_towards(self, position):
self._velocity = tuple(map(lambda x: self._new_axis(x, position), range(3)))
def _new_axis(self, i, position):
if self._position[i] < position[i]:
return self._velocity[i] + 1
elif self._position[i] > position[i]:
return self._velocity[i] - 1
return self._velocity[i]
def __repr__(self):
return f'pos={self._position}, vel={self._velocity}'
def apply_velocity(self):
self._position = tuple(map(sum, zip(self._position, self._velocity)))
def total_energy(self):
return self.potential_energy() * self.kinetic_energy()
def potential_energy(self):
return sum(map(abs, self._position))
def kinetic_energy(self):
return sum(map(abs, self._velocity))
class Simulation:
def __init__(self, moons):
self._moons = moons
def run(self, steps):
for i in range(steps):
self._step()
def _step(self):
self._apply_gravity()
self._apply_velocity()
def _apply_gravity(self):
for pair in itertools.combinations(self._moons, 2):
a_pos = pair[0].get_position()
b_pos = pair[1].get_position()
pair[0].pull_towards(b_pos)
pair[1].pull_towards(a_pos)
def _apply_velocity(self):
for moon in self._moons:
moon.apply_velocity()
def run(input):
moons = parse_moons(input)
simulation = create_simulation(moons)
simulation.run(1000)
return calc_total_energy(moons)
def parse_moons(input):
return list(map(parse_moon, input.splitlines()))
def parse_moon(line):
position = parse_vector3(line)
return Moon(position, (0, 0, 0))
def parse_vector3(string):
pattern = re.compile(r'<x=(.*), ?y=(.*), ?z=(.*)>')
match = pattern.match(string)
return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
def create_simulation(moons):
return Simulation(moons)
def calc_total_energy(moons):
return sum(map(lambda x: x.total_energy(), moons))
if __name__ == '__main__':
print(run(sys.stdin.read())) | python/2019_12_1.py | import sys
import re
import itertools
class Moon:
def __init__(self, position, velocity):
self._position = position
self._velocity = velocity
def get_position(self):
return self._position
def pull_towards(self, position):
self._velocity = tuple(map(lambda x: self._new_axis(x, position), range(3)))
def _new_axis(self, i, position):
if self._position[i] < position[i]:
return self._velocity[i] + 1
elif self._position[i] > position[i]:
return self._velocity[i] - 1
return self._velocity[i]
def __repr__(self):
return f'pos={self._position}, vel={self._velocity}'
def apply_velocity(self):
self._position = tuple(map(sum, zip(self._position, self._velocity)))
def total_energy(self):
return self.potential_energy() * self.kinetic_energy()
def potential_energy(self):
return sum(map(abs, self._position))
def kinetic_energy(self):
return sum(map(abs, self._velocity))
class Simulation:
def __init__(self, moons):
self._moons = moons
def run(self, steps):
for i in range(steps):
self._step()
def _step(self):
self._apply_gravity()
self._apply_velocity()
def _apply_gravity(self):
for pair in itertools.combinations(self._moons, 2):
a_pos = pair[0].get_position()
b_pos = pair[1].get_position()
pair[0].pull_towards(b_pos)
pair[1].pull_towards(a_pos)
def _apply_velocity(self):
for moon in self._moons:
moon.apply_velocity()
def run(input):
moons = parse_moons(input)
simulation = create_simulation(moons)
simulation.run(1000)
return calc_total_energy(moons)
def parse_moons(input):
return list(map(parse_moon, input.splitlines()))
def parse_moon(line):
position = parse_vector3(line)
return Moon(position, (0, 0, 0))
def parse_vector3(string):
pattern = re.compile(r'<x=(.*), ?y=(.*), ?z=(.*)>')
match = pattern.match(string)
return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
def create_simulation(moons):
return Simulation(moons)
def calc_total_energy(moons):
return sum(map(lambda x: x.total_energy(), moons))
if __name__ == '__main__':
print(run(sys.stdin.read())) | 0.389779 | 0.295211 |
import argparse
import plotly.graph_objects as go
import pandas as pd
import os
# Import processed data
from vaccine_dataprep_Swedentots import (
first_two_timeseries,
third_timseries,
fourth_timseries,
Swedish_population,
)
aparser = argparse.ArgumentParser(description="Generate text insert json")
aparser.add_argument("--output-dir", nargs="?", default="vaccine_plots",
help="Output directory where the files will be saved")
args = aparser.parse_args()
# calculate percentages based on population size
# first and second doses
first_two_timeseries["Vacc_perc_population"] = (
first_two_timeseries["Antal vaccinerade"] / Swedish_population
) * 100
# Third dose
third_timseries["Vacc_perc_population"] = (
third_timseries["Antal vaccinerade"] / Swedish_population
) * 100
# Fourth dose
fourth_timseries["Vacc_perc_population"] = (
fourth_timseries["Antal vaccinerade"] / Swedish_population
) * 100
# separate the first and second doses
least_one_dose = first_two_timeseries[(first_two_timeseries["Vaccinationsstatus"] == "Minst 1 dos")]
least_two_doses = first_two_timeseries[(first_two_timeseries["Vaccinationsstatus"] == "Minst 2 doser")]
## Figure based on percentages calculated using population size
trace1 = go.Bar(
x=least_one_dose["date"],
y=least_one_dose["Vacc_perc_population"],
name="At Least One Dose",
marker_color="rgb(5,48,97)",
marker_line_color="black",
hovertemplate="Number of Doses: One Dose"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace2 = go.Bar(
x=least_two_doses["date"],
y=least_two_doses["Vacc_perc_population"],
name="At Least Two Doses",
marker_color="rgb(178,24,43)",
marker_line_color="black",
hovertemplate="Number of Doses: Two Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace3 = go.Bar(
x=third_timseries["date"],
y=third_timseries["Vacc_perc_population"],
name="At Least Three Doses",
marker_color="rgb(255, 234, 0)",
marker_line_color="black",
hovertemplate="Number of Doses: Three Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace4 = go.Bar(
x=fourth_timseries["date"],
y=fourth_timseries["Vacc_perc_population"],
name="At Least Four Doses",
marker_color="rgb(146,197,222)",
marker_line_color="black",
hovertemplate="Number of Doses: Four Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
# figure layout
fig_pop = go.Figure(data=[trace1, trace2, trace3, trace4])
fig_pop.update_layout(
plot_bgcolor="white",
font=dict(size=14),
margin=dict(l=0, r=50, t=0, b=0),
showlegend=True,
legend=dict(
title=" ",
# orientation="h",
# yanchor="bottom",
y=1.15,
# xanchor="right",
x=0.05,
font=dict(size=14),
),
)
# modify x-axis
fig_pop.update_xaxes(
title="<b>Date</b>",
showgrid=True,
linecolor="black",
# set start point of x-axis
tick0=least_one_dose["date"].iloc[0],
)
# modify y-axis
fig_pop.update_yaxes(
title="<b>Percentage Vaccinated</b>",
showgrid=True,
gridcolor="lightgrey",
linecolor="black",
range=[0, 100],
)
# fig_pop.show()
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# make figure for web
fig_pop.write_json(os.path.join(args.output_dir, "vaccine_timeseries_pop_barchart.json"))
# fig_pop.write_image("Plots/vaccine_timeseries_pop_barchart.png") | Vaccine_page/vaccine_timeseries_barchart.py | import argparse
import plotly.graph_objects as go
import pandas as pd
import os
# Import processed data
from vaccine_dataprep_Swedentots import (
first_two_timeseries,
third_timseries,
fourth_timseries,
Swedish_population,
)
aparser = argparse.ArgumentParser(description="Generate text insert json")
aparser.add_argument("--output-dir", nargs="?", default="vaccine_plots",
help="Output directory where the files will be saved")
args = aparser.parse_args()
# calculate percentages based on population size
# first and second doses
first_two_timeseries["Vacc_perc_population"] = (
first_two_timeseries["Antal vaccinerade"] / Swedish_population
) * 100
# Third dose
third_timseries["Vacc_perc_population"] = (
third_timseries["Antal vaccinerade"] / Swedish_population
) * 100
# Fourth dose
fourth_timseries["Vacc_perc_population"] = (
fourth_timseries["Antal vaccinerade"] / Swedish_population
) * 100
# separate the first and second doses
least_one_dose = first_two_timeseries[(first_two_timeseries["Vaccinationsstatus"] == "Minst 1 dos")]
least_two_doses = first_two_timeseries[(first_two_timeseries["Vaccinationsstatus"] == "Minst 2 doser")]
## Figure based on percentages calculated using population size
trace1 = go.Bar(
x=least_one_dose["date"],
y=least_one_dose["Vacc_perc_population"],
name="At Least One Dose",
marker_color="rgb(5,48,97)",
marker_line_color="black",
hovertemplate="Number of Doses: One Dose"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace2 = go.Bar(
x=least_two_doses["date"],
y=least_two_doses["Vacc_perc_population"],
name="At Least Two Doses",
marker_color="rgb(178,24,43)",
marker_line_color="black",
hovertemplate="Number of Doses: Two Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace3 = go.Bar(
x=third_timseries["date"],
y=third_timseries["Vacc_perc_population"],
name="At Least Three Doses",
marker_color="rgb(255, 234, 0)",
marker_line_color="black",
hovertemplate="Number of Doses: Three Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
trace4 = go.Bar(
x=fourth_timseries["date"],
y=fourth_timseries["Vacc_perc_population"],
name="At Least Four Doses",
marker_color="rgb(146,197,222)",
marker_line_color="black",
hovertemplate="Number of Doses: Four Doses"
+ "<br>Date: %{x}"
+ "<br>Percent Vaccinated: %{y:.2f}%<extra></extra>",
)
# figure layout
fig_pop = go.Figure(data=[trace1, trace2, trace3, trace4])
fig_pop.update_layout(
plot_bgcolor="white",
font=dict(size=14),
margin=dict(l=0, r=50, t=0, b=0),
showlegend=True,
legend=dict(
title=" ",
# orientation="h",
# yanchor="bottom",
y=1.15,
# xanchor="right",
x=0.05,
font=dict(size=14),
),
)
# modify x-axis
fig_pop.update_xaxes(
title="<b>Date</b>",
showgrid=True,
linecolor="black",
# set start point of x-axis
tick0=least_one_dose["date"].iloc[0],
)
# modify y-axis
fig_pop.update_yaxes(
title="<b>Percentage Vaccinated</b>",
showgrid=True,
gridcolor="lightgrey",
linecolor="black",
range=[0, 100],
)
# fig_pop.show()
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# make figure for web
fig_pop.write_json(os.path.join(args.output_dir, "vaccine_timeseries_pop_barchart.json"))
# fig_pop.write_image("Plots/vaccine_timeseries_pop_barchart.png") | 0.617282 | 0.419172 |
import sys
from drone.actions.emr_launcher import launch_emr_task
from drone.actions.ssh_launcher import launch_ssh_task
from drone.job_runner.dependency_manager import dependencies_are_met
from drone.job_runner.job_progress_checker import check_running_job_progress
from drone.metadata.metadata import get_job_info, job_status, set_ready, set_running, set_failed
task_launcher = {'ssh': launch_ssh_task,
'emr': launch_emr_task}
def process(job_config, settings):
for job_id, schedule_time, execution_time, status, runs, uid in get_job_info(job_config.get('id'),
db_name=settings.metadata):
if status == job_status.get('failed'):
if (int(job_config.get('retry')) if job_config.get('retry') else 0) > int(runs):
settings.logger.debug(
'%s runs %s. set retries %s.' % (job_config.get('id'), runs, job_config.get('retry')))
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
continue
else:
continue
else:
continue
elif status == job_status.get('running'):
check_running_job_progress(job_config, schedule_time, uid, settings)
continue
elif status == job_status.get('ready'):
run(job_config, schedule_time, settings)
elif status == job_status.get('succeeded'):
continue
elif status == job_status.get('not_ready'):
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
else:
continue
else:
settings.logger.error('Unknown job status "%s"' % status)
sys.exit(1)
def run(job_config, schedule_time, settings):
settings.logger.info('Starting job "%s" "%s"' % (job_config.get('id'), schedule_time))
job_type = job_config.get('type')
try:
assert job_type in settings.supported_job_types
except:
settings.logger.warning(
'Unsupported job type %s. Valid types are %s' % (job_type, str(settings.supported_job_types)))
task_lauched_successfully, uid = task_launcher.get(job_type)(job_config, schedule_time, settings)
if task_lauched_successfully:
set_running(job_config.get('id'), schedule_time, uid, db_name=settings.metadata)
settings.logger.info('Started job "%s" "%s"' % (job_config.get('id'), schedule_time))
else:
set_failed(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.warning('Failed to start job "%s" "%s"' % (job_config.get('id'), schedule_time)) | drone/job_runner/job_runner.py | import sys
from drone.actions.emr_launcher import launch_emr_task
from drone.actions.ssh_launcher import launch_ssh_task
from drone.job_runner.dependency_manager import dependencies_are_met
from drone.job_runner.job_progress_checker import check_running_job_progress
from drone.metadata.metadata import get_job_info, job_status, set_ready, set_running, set_failed
task_launcher = {'ssh': launch_ssh_task,
'emr': launch_emr_task}
def process(job_config, settings):
for job_id, schedule_time, execution_time, status, runs, uid in get_job_info(job_config.get('id'),
db_name=settings.metadata):
if status == job_status.get('failed'):
if (int(job_config.get('retry')) if job_config.get('retry') else 0) > int(runs):
settings.logger.debug(
'%s runs %s. set retries %s.' % (job_config.get('id'), runs, job_config.get('retry')))
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
continue
else:
continue
else:
continue
elif status == job_status.get('running'):
check_running_job_progress(job_config, schedule_time, uid, settings)
continue
elif status == job_status.get('ready'):
run(job_config, schedule_time, settings)
elif status == job_status.get('succeeded'):
continue
elif status == job_status.get('not_ready'):
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
else:
continue
else:
settings.logger.error('Unknown job status "%s"' % status)
sys.exit(1)
def run(job_config, schedule_time, settings):
settings.logger.info('Starting job "%s" "%s"' % (job_config.get('id'), schedule_time))
job_type = job_config.get('type')
try:
assert job_type in settings.supported_job_types
except:
settings.logger.warning(
'Unsupported job type %s. Valid types are %s' % (job_type, str(settings.supported_job_types)))
task_lauched_successfully, uid = task_launcher.get(job_type)(job_config, schedule_time, settings)
if task_lauched_successfully:
set_running(job_config.get('id'), schedule_time, uid, db_name=settings.metadata)
settings.logger.info('Started job "%s" "%s"' % (job_config.get('id'), schedule_time))
else:
set_failed(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.warning('Failed to start job "%s" "%s"' % (job_config.get('id'), schedule_time)) | 0.101673 | 0.057599 |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'42 42',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '1\n')
if __name__ == '__main__':
unittest.main() | hackerearth/Algorithms/Where is Checkerboard/test.py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'42 42',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '1\n')
if __name__ == '__main__':
unittest.main() | 0.228845 | 0.078501 |
import pykazoo.restrequest
import pykazoo.phonenumbers
from unittest import TestCase
from unittest.mock import create_autospec
mock_rest_request = create_autospec(pykazoo.restrequest.RestRequest)
class TestPhoneNumbers(TestCase):
def setUp(self):
self.mock_rest_request = mock_rest_request
self.phone_numbers = pykazoo.phonenumbers.PhoneNumbers(
self.mock_rest_request)
self.account_id = '<KEY>'
self.phone_number = '+15555555555'
self.data = {'test': 'data'}
self.params = {'test': 'params'}
def test_get_phone_numbers_request_call(self):
self.phone_numbers.get_phone_numbers(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers',
self.params)
def test_get_phone_numbers_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.phone_numbers.get_phone_numbers(self.account_id,
self.params)
assert return_data is self.data
def test_get_phone_number_request_call(self):
self.phone_numbers.get_phone_number(self.account_id, self.phone_number,
self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
self.phone_number,
self.params)
def test_get_phone_number_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.phone_numbers.get_phone_number(self.account_id,
self.phone_number,
self.params)
assert return_data is self.data
def test_create_phone_numbers_request_call(self):
self.phone_numbers.create_phone_number(self.account_id,
self.phone_number,
self.data)
self.mock_rest_request.put.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
str(self.phone_number),
self.data)
def test_create_phone_numbers_returns_dict(self):
self.mock_rest_request.put.return_value = self.data
return_data = self.phone_numbers.create_phone_number(self.account_id,
self.phone_number,
self.data)
assert return_data is self.data
def test_update_phone_numbers_request_call(self):
self.phone_numbers.update_phone_number(self.account_id,
self.phone_number,
self.data)
self.mock_rest_request.post.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
str(self.phone_number),
self.data)
def test_update_phone_numbers_returns_dict(self):
self.mock_rest_request.post.return_value = self.data
return_data = self.phone_numbers.update_phone_number(self.account_id,
self.phone_number,
self.data)
assert return_data is self.data
def test_delete_phone_numbers_request_call(self):
self.phone_numbers.delete_phone_number(self.account_id,
self.phone_number)
self.mock_rest_request.delete.assert_called_with(
'accounts/' + self.account_id + '/phone_numbers/' +
str(self.phone_number))
def test_delete_phone_numbers_returns_dict(self):
self.mock_rest_request.delete.return_value = self.data
return_data = self.phone_numbers.delete_phone_number(self.account_id,
self.phone_number)
assert return_data is self.data | tests/test_phonenumbers.py | import pykazoo.restrequest
import pykazoo.phonenumbers
from unittest import TestCase
from unittest.mock import create_autospec
mock_rest_request = create_autospec(pykazoo.restrequest.RestRequest)
class TestPhoneNumbers(TestCase):
def setUp(self):
self.mock_rest_request = mock_rest_request
self.phone_numbers = pykazoo.phonenumbers.PhoneNumbers(
self.mock_rest_request)
self.account_id = '<KEY>'
self.phone_number = '+15555555555'
self.data = {'test': 'data'}
self.params = {'test': 'params'}
def test_get_phone_numbers_request_call(self):
self.phone_numbers.get_phone_numbers(self.account_id, self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers',
self.params)
def test_get_phone_numbers_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.phone_numbers.get_phone_numbers(self.account_id,
self.params)
assert return_data is self.data
def test_get_phone_number_request_call(self):
self.phone_numbers.get_phone_number(self.account_id, self.phone_number,
self.params)
self.mock_rest_request.get.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
self.phone_number,
self.params)
def test_get_phone_number_returns_dict(self):
self.mock_rest_request.get.return_value = self.data
return_data = self.phone_numbers.get_phone_number(self.account_id,
self.phone_number,
self.params)
assert return_data is self.data
def test_create_phone_numbers_request_call(self):
self.phone_numbers.create_phone_number(self.account_id,
self.phone_number,
self.data)
self.mock_rest_request.put.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
str(self.phone_number),
self.data)
def test_create_phone_numbers_returns_dict(self):
self.mock_rest_request.put.return_value = self.data
return_data = self.phone_numbers.create_phone_number(self.account_id,
self.phone_number,
self.data)
assert return_data is self.data
def test_update_phone_numbers_request_call(self):
self.phone_numbers.update_phone_number(self.account_id,
self.phone_number,
self.data)
self.mock_rest_request.post.assert_called_with('accounts/' +
self.account_id +
'/phone_numbers/' +
str(self.phone_number),
self.data)
def test_update_phone_numbers_returns_dict(self):
self.mock_rest_request.post.return_value = self.data
return_data = self.phone_numbers.update_phone_number(self.account_id,
self.phone_number,
self.data)
assert return_data is self.data
def test_delete_phone_numbers_request_call(self):
self.phone_numbers.delete_phone_number(self.account_id,
self.phone_number)
self.mock_rest_request.delete.assert_called_with(
'accounts/' + self.account_id + '/phone_numbers/' +
str(self.phone_number))
def test_delete_phone_numbers_returns_dict(self):
self.mock_rest_request.delete.return_value = self.data
return_data = self.phone_numbers.delete_phone_number(self.account_id,
self.phone_number)
assert return_data is self.data | 0.57069 | 0.28049 |
from typing import TYPE_CHECKING, Optional
from nats.js import api
from nats.js.errors import KeyDeletedError
from dataclasses import dataclass
import base64
if TYPE_CHECKING:
from nats.js import JetStreamContext
KV_OP = "KV-Operation"
KV_DEL = "DEL"
KV_PURGE = "PURGE"
MSG_ROLLUP_SUBJECT = "sub"
class KeyValue:
"""
KeyValue uses the JetStream KeyValue functionality.
.. note::
This functionality is EXPERIMENTAL and may be changed in later releases.
::
import asyncio
import nats
async def main():
nc = await nats.connect()
js = nc.jetstream()
# Create a KV
kv = await js.create_key_value(bucket='MY_KV')
# Set and retrieve a value
await kv.put('hello', b'world')
entry = await kv.get('hello')
print(f'KeyValue.Entry: key={entry.key}, value={entry.value}')
# KeyValue.Entry: key=hello, value=world
await nc.close()
if __name__ == '__main__':
asyncio.run(main())
"""
@dataclass
class Entry:
"""
An entry from a KeyValue store in JetStream.
"""
bucket: str
key: str
value: Optional[bytes]
revision: Optional[int]
@dataclass(frozen=True)
class BucketStatus:
"""
BucketStatus is the status of a KeyValue bucket.
"""
stream_info: api.StreamInfo
bucket: str
@property
def values(self) -> int:
"""
values returns the number of stored messages in the stream.
"""
return self.stream_info.state.messages
@property
def history(self) -> int:
"""
history returns the max msgs per subject.
"""
return self.stream_info.config.max_msgs_per_subject
@property
def ttl(self) -> Optional[float]:
"""
ttl returns the max age in seconds.
"""
if self.stream_info.config.max_age is None:
return None
return self.stream_info.config.max_age
def __init__(
self,
name: str,
stream: str,
pre: str,
js: "JetStreamContext",
) -> None:
self._name = name
self._stream = stream
self._pre = pre
self._js = js
async def get(self, key: str) -> Entry:
"""
get returns the latest value for the key.
"""
msg = await self._js.get_last_msg(self._stream, f"{self._pre}{key}")
data = None
if msg.data:
data = base64.b64decode(msg.data)
entry = KeyValue.Entry(
bucket=self._name,
key=key,
value=data,
revision=msg.seq,
)
# Check headers to see if deleted or purged.
if msg.headers:
op = msg.headers.get(KV_OP, None)
if op == KV_DEL or op == KV_PURGE:
raise KeyDeletedError(entry, op)
return entry
async def put(self, key: str, value: bytes) -> int:
"""
put will place the new value for the key into the store
and return the revision number.
"""
pa = await self._js.publish(f"{self._pre}{key}", value)
return pa.seq
async def update(self, key: str, value: bytes, last: int) -> int:
"""
update will update the value iff the latest revision matches.
"""
hdrs = {}
hdrs[api.Header.EXPECTED_LAST_SUBJECT_SEQUENCE] = str(last)
pa = await self._js.publish(f"{self._pre}{key}", value, headers=hdrs)
return pa.seq
async def delete(self, key: str) -> bool:
"""
delete will place a delete marker and remove all previous revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_DEL
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def purge(self, key: str) -> bool:
"""
purge will remove the key and all revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_PURGE
hdrs[api.Header.ROLLUP] = MSG_ROLLUP_SUBJECT
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def status(self) -> BucketStatus:
"""
status retrieves the status and configuration of a bucket.
"""
info = await self._js.stream_info(self._stream)
return KeyValue.BucketStatus(stream_info=info, bucket=self._name) | nats/js/kv.py |
from typing import TYPE_CHECKING, Optional
from nats.js import api
from nats.js.errors import KeyDeletedError
from dataclasses import dataclass
import base64
if TYPE_CHECKING:
from nats.js import JetStreamContext
KV_OP = "KV-Operation"
KV_DEL = "DEL"
KV_PURGE = "PURGE"
MSG_ROLLUP_SUBJECT = "sub"
class KeyValue:
"""
KeyValue uses the JetStream KeyValue functionality.
.. note::
This functionality is EXPERIMENTAL and may be changed in later releases.
::
import asyncio
import nats
async def main():
nc = await nats.connect()
js = nc.jetstream()
# Create a KV
kv = await js.create_key_value(bucket='MY_KV')
# Set and retrieve a value
await kv.put('hello', b'world')
entry = await kv.get('hello')
print(f'KeyValue.Entry: key={entry.key}, value={entry.value}')
# KeyValue.Entry: key=hello, value=world
await nc.close()
if __name__ == '__main__':
asyncio.run(main())
"""
@dataclass
class Entry:
"""
An entry from a KeyValue store in JetStream.
"""
bucket: str
key: str
value: Optional[bytes]
revision: Optional[int]
@dataclass(frozen=True)
class BucketStatus:
"""
BucketStatus is the status of a KeyValue bucket.
"""
stream_info: api.StreamInfo
bucket: str
@property
def values(self) -> int:
"""
values returns the number of stored messages in the stream.
"""
return self.stream_info.state.messages
@property
def history(self) -> int:
"""
history returns the max msgs per subject.
"""
return self.stream_info.config.max_msgs_per_subject
@property
def ttl(self) -> Optional[float]:
"""
ttl returns the max age in seconds.
"""
if self.stream_info.config.max_age is None:
return None
return self.stream_info.config.max_age
def __init__(
self,
name: str,
stream: str,
pre: str,
js: "JetStreamContext",
) -> None:
self._name = name
self._stream = stream
self._pre = pre
self._js = js
async def get(self, key: str) -> Entry:
"""
get returns the latest value for the key.
"""
msg = await self._js.get_last_msg(self._stream, f"{self._pre}{key}")
data = None
if msg.data:
data = base64.b64decode(msg.data)
entry = KeyValue.Entry(
bucket=self._name,
key=key,
value=data,
revision=msg.seq,
)
# Check headers to see if deleted or purged.
if msg.headers:
op = msg.headers.get(KV_OP, None)
if op == KV_DEL or op == KV_PURGE:
raise KeyDeletedError(entry, op)
return entry
async def put(self, key: str, value: bytes) -> int:
"""
put will place the new value for the key into the store
and return the revision number.
"""
pa = await self._js.publish(f"{self._pre}{key}", value)
return pa.seq
async def update(self, key: str, value: bytes, last: int) -> int:
"""
update will update the value iff the latest revision matches.
"""
hdrs = {}
hdrs[api.Header.EXPECTED_LAST_SUBJECT_SEQUENCE] = str(last)
pa = await self._js.publish(f"{self._pre}{key}", value, headers=hdrs)
return pa.seq
async def delete(self, key: str) -> bool:
"""
delete will place a delete marker and remove all previous revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_DEL
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def purge(self, key: str) -> bool:
"""
purge will remove the key and all revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_PURGE
hdrs[api.Header.ROLLUP] = MSG_ROLLUP_SUBJECT
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def status(self) -> BucketStatus:
"""
status retrieves the status and configuration of a bucket.
"""
info = await self._js.stream_info(self._stream)
return KeyValue.BucketStatus(stream_info=info, bucket=self._name) | 0.905465 | 0.207255 |
import os
import argparse
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from train_test_api.train_api import train
from train_test_api.test_api import eval_training
from conf import settings
from utils import get_network, get_training_dataloader, get_valid_dataloader, get_test_dataloader, \
get_parameter_number, save_best_result
import sys
import csv
rootPath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(rootPath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.with_cuda = True
cuda_condition = torch.cuda.is_available() and args.with_cuda
args.net = "msdensenet"
args.device = torch.device("cuda:0" if cuda_condition else "cpu")
args.b = 128
args.warm = 0
args.lr = 0.002
dataset_path = os.path.join("Dataset", "cell_dataset")
dataset_list = sorted(os.listdir(dataset_path))
result_csv_dir = os.path.join(rootPath, "cell_datasets_result", "result_csv")
if not os.path.exists(result_csv_dir):
os.makedirs(result_csv_dir)
data_index = 1
for dataset_ in dataset_list:
print("This is the ", data_index, " dataset",dataset_)
data_index += 1
net = get_network(args)
# print("NET:")
# print(net)
patience = settings.PATIENCE
# network parameters
print(get_parameter_number(net))
# data preprocessing:
dataset = os.path.join(dataset_path, dataset_)
dna_training_loader = get_training_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=True)
dna_valid_loader = get_valid_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=False)
dna_test_loader = get_test_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=False)
loss_function = nn.CrossEntropyLoss()
softmax_output = nn.Softmax(dim=1)
optimizer = optim.SGD(params=net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES, gamma=0.8)
recent_folder = ""
checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW, dataset_)
# use tensorboard
if not os.path.exists(settings.LOG_DIR):
os.mkdir(settings.LOG_DIR)
# record the epoch
df_path = os.path.join(settings.LOG_DIR, args.net, settings.TIME_NOW, dataset_)
if not os.path.exists(df_path):
os.makedirs(df_path)
df_file = os.path.join(df_path, "df_log.pickle")
if not os.path.isfile(df_file):
df_ = pd.DataFrame(columns=["epoch", "lr", "train_loss", "train_acc",
"valid_loss", "valid_acc", "valid_auc",
"test_loss", "test_acc", "test_auc"])
df_.to_pickle(df_file)
print("log DataFrame created!")
# create model_weights folder to save model
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')
best_auc = 0.0
best_testAUC = 0.0
best_testAcc = 0.0
best_testPrec = 0.0
best_testRecall = 0.0
best_testF1 = 0.0
best_epoch = 0
for epoch in range(1, settings.EPOCH + 1):
output_interval = settings.OUTPUT_INTERVAL
log_dic = train(net, dna_training_loader, optimizer, loss_function, epoch, args,
output_interval)
if epoch > args.warm:
train_scheduler.step()
epoch_, auc_valid, cur_result, pred_result_test, acc, prec, rec, f1 = eval_training(net, dna_valid_loader, dna_test_loader,
loss_function, softmax_output, args,
epoch=epoch, df_file=df_file,
log_dic=log_dic, train_after=True)
# start to save best performance model after learning rate decay to 0.01
if best_auc < auc_valid:
weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='best')
print('saving weights file to {}'.format(weights_path))
torch.save(net.state_dict(), weights_path)
best_auc = auc_valid
patience = settings.PATIENCE
# save best result
save_best_result(df_path, pred_result_test)
best_testAUC = cur_result
best_testAcc = acc
best_testPrec = prec
best_testRecall = rec
best_testF1 = f1
best_epoch = epoch_
continue
if not epoch % settings.SAVE_EPOCH:
weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='regular')
print('saving weights file to {}'.format(weights_path))
torch.save(net.state_dict(), weights_path)
patience -= 1
if patience == 0:
print("DatasetName:", dataset_,",The best epoch:",best_epoch , ", The best AUC:", best_testAUC)
print("The end!")
break
"""记录bestAUC"""
bestAUC_csv = os.path.join(result_csv_dir, "CellDataset_BestResult.csv")
if data_index == 2:
with open(bestAUC_csv, 'w+', newline="") as f:
csv_write = csv.writer(f)
csv_head = ["dataset","epoch", "AUC", "ACC", "Precision", "Recall", "F1score"]
csv_write.writerow(csv_head)
with open(bestAUC_csv, 'a+', newline="") as f:
csv_write = csv.writer(f)
data_row = [dataset_, best_epoch, best_testAUC, best_testAcc, best_testPrec, best_testRecall, best_testF1]
csv_write.writerow(data_row) | train_on_cell_datasets.py | import os
import argparse
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from train_test_api.train_api import train
from train_test_api.test_api import eval_training
from conf import settings
from utils import get_network, get_training_dataloader, get_valid_dataloader, get_test_dataloader, \
get_parameter_number, save_best_result
import sys
import csv
rootPath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(rootPath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.with_cuda = True
cuda_condition = torch.cuda.is_available() and args.with_cuda
args.net = "msdensenet"
args.device = torch.device("cuda:0" if cuda_condition else "cpu")
args.b = 128
args.warm = 0
args.lr = 0.002
dataset_path = os.path.join("Dataset", "cell_dataset")
dataset_list = sorted(os.listdir(dataset_path))
result_csv_dir = os.path.join(rootPath, "cell_datasets_result", "result_csv")
if not os.path.exists(result_csv_dir):
os.makedirs(result_csv_dir)
data_index = 1
for dataset_ in dataset_list:
print("This is the ", data_index, " dataset",dataset_)
data_index += 1
net = get_network(args)
# print("NET:")
# print(net)
patience = settings.PATIENCE
# network parameters
print(get_parameter_number(net))
# data preprocessing:
dataset = os.path.join(dataset_path, dataset_)
dna_training_loader = get_training_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=True)
dna_valid_loader = get_valid_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=False)
dna_test_loader = get_test_dataloader(path=dataset, num_workers=0, batch_size=args.b, shuffle=False)
loss_function = nn.CrossEntropyLoss()
softmax_output = nn.Softmax(dim=1)
optimizer = optim.SGD(params=net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES, gamma=0.8)
recent_folder = ""
checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW, dataset_)
# use tensorboard
if not os.path.exists(settings.LOG_DIR):
os.mkdir(settings.LOG_DIR)
# record the epoch
df_path = os.path.join(settings.LOG_DIR, args.net, settings.TIME_NOW, dataset_)
if not os.path.exists(df_path):
os.makedirs(df_path)
df_file = os.path.join(df_path, "df_log.pickle")
if not os.path.isfile(df_file):
df_ = pd.DataFrame(columns=["epoch", "lr", "train_loss", "train_acc",
"valid_loss", "valid_acc", "valid_auc",
"test_loss", "test_acc", "test_auc"])
df_.to_pickle(df_file)
print("log DataFrame created!")
# create model_weights folder to save model
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')
best_auc = 0.0
best_testAUC = 0.0
best_testAcc = 0.0
best_testPrec = 0.0
best_testRecall = 0.0
best_testF1 = 0.0
best_epoch = 0
for epoch in range(1, settings.EPOCH + 1):
output_interval = settings.OUTPUT_INTERVAL
log_dic = train(net, dna_training_loader, optimizer, loss_function, epoch, args,
output_interval)
if epoch > args.warm:
train_scheduler.step()
epoch_, auc_valid, cur_result, pred_result_test, acc, prec, rec, f1 = eval_training(net, dna_valid_loader, dna_test_loader,
loss_function, softmax_output, args,
epoch=epoch, df_file=df_file,
log_dic=log_dic, train_after=True)
# start to save best performance model after learning rate decay to 0.01
if best_auc < auc_valid:
weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='best')
print('saving weights file to {}'.format(weights_path))
torch.save(net.state_dict(), weights_path)
best_auc = auc_valid
patience = settings.PATIENCE
# save best result
save_best_result(df_path, pred_result_test)
best_testAUC = cur_result
best_testAcc = acc
best_testPrec = prec
best_testRecall = rec
best_testF1 = f1
best_epoch = epoch_
continue
if not epoch % settings.SAVE_EPOCH:
weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='regular')
print('saving weights file to {}'.format(weights_path))
torch.save(net.state_dict(), weights_path)
patience -= 1
if patience == 0:
print("DatasetName:", dataset_,",The best epoch:",best_epoch , ", The best AUC:", best_testAUC)
print("The end!")
break
"""记录bestAUC"""
bestAUC_csv = os.path.join(result_csv_dir, "CellDataset_BestResult.csv")
if data_index == 2:
with open(bestAUC_csv, 'w+', newline="") as f:
csv_write = csv.writer(f)
csv_head = ["dataset","epoch", "AUC", "ACC", "Precision", "Recall", "F1score"]
csv_write.writerow(csv_head)
with open(bestAUC_csv, 'a+', newline="") as f:
csv_write = csv.writer(f)
data_row = [dataset_, best_epoch, best_testAUC, best_testAcc, best_testPrec, best_testRecall, best_testF1]
csv_write.writerow(data_row) | 0.388618 | 0.202384 |
import attr
from bokeh.models import VArea
from bokeh.models.sources import DataSource
from typing import List, Tuple, Type, cast
from jira_analysis.cycle_time.cycle_time import CycleTime
from jira_analysis.cycle_time.stats import (
rolling_average_cycle_time,
standard_deviations,
)
from jira_analysis.chart.base import IChart, Plot
from .base import BaseCycleTimeLinePlot
from .utils import sort_cycle_times, unsplit
@attr.s(frozen=True)
class CycleTimeDeviationPlot(Plot):
cycle_times: List[CycleTime] = attr.ib()
data_source: Type[DataSource] = attr.ib()
def draw(self, chart: IChart) -> None:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
upper_deviation, lower_deviation = _get_standard_deviations(sorted_cycle_times)
data = self.to_data_source()
upper_plot = _DeviationLinePlot(
cycle_times=sorted_cycle_times,
data_source=self.data_source,
deviation_bound="Upper",
deviations=upper_deviation,
)
lower_plot = _DeviationLinePlot(
cycle_times=sorted_cycle_times,
data_source=self.data_source,
deviation_bound="Lower",
deviations=lower_deviation,
)
deviation_glyph = VArea(
x="x", y1="y1", y2="y2", fill_color="green", fill_alpha=0.3
)
chart.glyph(data, deviation_glyph)
upper_plot.draw(chart)
lower_plot.draw(chart)
def to_data_source(self) -> DataSource:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
upper_deviation, lower_deviation = _get_standard_deviations(sorted_cycle_times)
return self.data_source(
{"x": completions, "y1": upper_deviation, "y2": lower_deviation}
)
def _get_standard_deviations(
cycle_times: List[CycleTime],
) -> Tuple[Tuple[float, ...], Tuple[float, ...]]:
cycle_time_values = [c.cycle_time for c in cycle_times]
rolling_cycle_times = rolling_average_cycle_time(cycle_time_values)
zipped_deviations = zip(
rolling_cycle_times, standard_deviations(cycle_time_values),
)
return cast(
Tuple[Tuple[float, ...], Tuple[float, ...]],
tuple(zip(*((ct + sd, ct - sd) for ct, sd in zipped_deviations))),
)
@attr.s(frozen=True)
class _DeviationLinePlot(BaseCycleTimeLinePlot):
cycle_times: List[CycleTime] = attr.ib()
data_source: Type[DataSource] = attr.ib()
deviation_bound: str = attr.ib()
deviations: Tuple[float, ...] = attr.ib()
@property
def alpha(self) -> float:
return 0.3
@property
def color(self) -> str:
return "green"
@property
def label(self) -> str:
return f"{self.deviation_bound} bound"
@property
def width(self) -> int:
return 1
def to_data_source(self) -> DataSource:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
return self.data_source(
{
"x": completions,
"y": self.deviations,
"label": [self.label for _ in completions],
}
) | jira_analysis/cycle_time/chart/cycle_time/deviation.py | import attr
from bokeh.models import VArea
from bokeh.models.sources import DataSource
from typing import List, Tuple, Type, cast
from jira_analysis.cycle_time.cycle_time import CycleTime
from jira_analysis.cycle_time.stats import (
rolling_average_cycle_time,
standard_deviations,
)
from jira_analysis.chart.base import IChart, Plot
from .base import BaseCycleTimeLinePlot
from .utils import sort_cycle_times, unsplit
@attr.s(frozen=True)
class CycleTimeDeviationPlot(Plot):
cycle_times: List[CycleTime] = attr.ib()
data_source: Type[DataSource] = attr.ib()
def draw(self, chart: IChart) -> None:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
upper_deviation, lower_deviation = _get_standard_deviations(sorted_cycle_times)
data = self.to_data_source()
upper_plot = _DeviationLinePlot(
cycle_times=sorted_cycle_times,
data_source=self.data_source,
deviation_bound="Upper",
deviations=upper_deviation,
)
lower_plot = _DeviationLinePlot(
cycle_times=sorted_cycle_times,
data_source=self.data_source,
deviation_bound="Lower",
deviations=lower_deviation,
)
deviation_glyph = VArea(
x="x", y1="y1", y2="y2", fill_color="green", fill_alpha=0.3
)
chart.glyph(data, deviation_glyph)
upper_plot.draw(chart)
lower_plot.draw(chart)
def to_data_source(self) -> DataSource:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
upper_deviation, lower_deviation = _get_standard_deviations(sorted_cycle_times)
return self.data_source(
{"x": completions, "y1": upper_deviation, "y2": lower_deviation}
)
def _get_standard_deviations(
cycle_times: List[CycleTime],
) -> Tuple[Tuple[float, ...], Tuple[float, ...]]:
cycle_time_values = [c.cycle_time for c in cycle_times]
rolling_cycle_times = rolling_average_cycle_time(cycle_time_values)
zipped_deviations = zip(
rolling_cycle_times, standard_deviations(cycle_time_values),
)
return cast(
Tuple[Tuple[float, ...], Tuple[float, ...]],
tuple(zip(*((ct + sd, ct - sd) for ct, sd in zipped_deviations))),
)
@attr.s(frozen=True)
class _DeviationLinePlot(BaseCycleTimeLinePlot):
cycle_times: List[CycleTime] = attr.ib()
data_source: Type[DataSource] = attr.ib()
deviation_bound: str = attr.ib()
deviations: Tuple[float, ...] = attr.ib()
@property
def alpha(self) -> float:
return 0.3
@property
def color(self) -> str:
return "green"
@property
def label(self) -> str:
return f"{self.deviation_bound} bound"
@property
def width(self) -> int:
return 1
def to_data_source(self) -> DataSource:
sorted_cycle_times = sort_cycle_times(self.cycle_times)
_, completions, cycle_times = unsplit(sorted_cycle_times)
return self.data_source(
{
"x": completions,
"y": self.deviations,
"label": [self.label for _ in completions],
}
) | 0.841142 | 0.394901 |
import pprint
import re
import os
pp = pprint.PrettyPrinter(indent=4)
#------------------------------------------------------------------
#Defining the function
#------------------------------------------------------------------
def best_pos( sequence, primer):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
for i in range(0, len(sequence) - len(primer)): # -1 here to avoid going over length of i
local_score = 0
for j in range(0, len(primer)):
nr_comp += 1
if sequence[i + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
print "score:" + str(best_score) + ",".join(position)
return (best_score, position)
# ----------- MAIN LOOP --------------
def best_pos_bounds( sequence, primer):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
for i in range(0, len(sequence) - len(primer)): # -1 here to avoid going over length of i
local_score = 0
for j in range(0, len(primer)):
if ( best_score > len(primer) - j + local_score):
continue
# print "%d > %d - %d + %d" % (best_score, len(primer), j, local_score)
nr_comp += 1
if sequence[i + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
print "Score: " + str(best_score) + ", - ".join(position)
return (best_score, position)
def best_pos_by_index_seq(sequence, primer, seed_length):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
seeds = dict()
# build the index
for i in range(0, len(sequence) - seed_length):
seed = sequence[ i: i + seed_length]
if ( seed not in seeds):
seeds[ seed ] = []
seeds[seed].append( i )
primer_seed = primer[0:seed_length]
pp.pprint(seeds[ primer_seed])
for pos in (seeds[ primer_seed]):
local_score = 0
for j in range(0, len(primer)):
# if ( best_score > len(primer) - j + local_score):
# continue
nr_comp += 1
if sequence[pos + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
return
# ----------- MAIN LOOP --------------
best_pos("AGACCAGATCTGAGCTTGGGAGCTCTTGGCATAACTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_bounds("AGACCAGATCTGAGCTTGGGAGCTCTTGGCATAACTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_bounds("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_by_index_seq("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA", 3)
best_pos_by_index_seq("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA", 5) | primer_seq_counts.py | import pprint
import re
import os
pp = pprint.PrettyPrinter(indent=4)
#------------------------------------------------------------------
#Defining the function
#------------------------------------------------------------------
def best_pos( sequence, primer):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
for i in range(0, len(sequence) - len(primer)): # -1 here to avoid going over length of i
local_score = 0
for j in range(0, len(primer)):
nr_comp += 1
if sequence[i + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
print "score:" + str(best_score) + ",".join(position)
return (best_score, position)
# ----------- MAIN LOOP --------------
def best_pos_bounds( sequence, primer):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
for i in range(0, len(sequence) - len(primer)): # -1 here to avoid going over length of i
local_score = 0
for j in range(0, len(primer)):
if ( best_score > len(primer) - j + local_score):
continue
# print "%d > %d - %d + %d" % (best_score, len(primer), j, local_score)
nr_comp += 1
if sequence[i + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
print "Score: " + str(best_score) + ", - ".join(position)
return (best_score, position)
def best_pos_by_index_seq(sequence, primer, seed_length):
nr_comp = 0
primer.upper()
sequence.upper()
best_score = 0
position = []
seeds = dict()
# build the index
for i in range(0, len(sequence) - seed_length):
seed = sequence[ i: i + seed_length]
if ( seed not in seeds):
seeds[ seed ] = []
seeds[seed].append( i )
primer_seed = primer[0:seed_length]
pp.pprint(seeds[ primer_seed])
for pos in (seeds[ primer_seed]):
local_score = 0
for j in range(0, len(primer)):
# if ( best_score > len(primer) - j + local_score):
# continue
nr_comp += 1
if sequence[pos + j] == primer[j]: #Anchors I and then loops J over I
local_score += 1 # Append local score
#print best_score
if (local_score > best_score):
position = []
position.append( str(i) )
best_score = local_score
elif ( local_score == best_score): #Appends best local score to global best score.
pass
position.append(str(i))
print "Comparisons : " + str(nr_comp)
return
# ----------- MAIN LOOP --------------
best_pos("AGACCAGATCTGAGCTTGGGAGCTCTTGGCATAACTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_bounds("AGACCAGATCTGAGCTTGGGAGCTCTTGGCATAACTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_bounds("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA")
best_pos_by_index_seq("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA", 3)
best_pos_by_index_seq("AGACCAGACTTGGCATAATCTGAGCTTGGGAGCTCTAGGGAACCACAGTTTGAAACGT", "CTTGGCATAA", 5) | 0.056809 | 0.257048 |
import socketio
import json
import aiohttp
from aiohttp import web
import traceback
from appdaemon.appdaemon import AppDaemon
# socketio handler
class DashStream(socketio.AsyncNamespace):
def __init__(self, ADStream, path, AD):
super().__init__(path)
self.AD = AD
self.ADStream = ADStream
async def on_connect(self, sid, data):
await self.ADStream.on_connect()
async def on_up(self, sid, data):
await self.ADStream.on_msg(data)
class ADStream:
def __init__(self, ad: AppDaemon, app, transport, on_connect, on_msg):
self.AD = ad
self.logger = ad.logging.get_child("_stream")
self.access = ad.logging.get_access()
self.app = app
self.transport = transport
self.on_connect = on_connect
self.on_msg = on_msg
if self.transport == "ws":
self.app['websockets'] = {}
self.app.router.add_get('/stream', self.wshandler)
else:
self.dash_stream = DashStream(self, '/stream', self.AD)
self.sio = socketio.AsyncServer(async_mode='aiohttp')
self.sio.attach(self.app)
self.sio.register_namespace(self.dash_stream)
async def send_update(self, data):
try:
jdata = json.dumps(data)
if self.transport == "ws":
if len(self.app['websockets']) > 0:
self.logger.debug("Sending data: %s", jdata)
for ws in self.app['websockets']:
if "dashboard" in self.app['websockets'][ws]:
await ws.send_str(jdata)
else:
await self.dash_stream.emit('down', jdata)
except TypeError as e:
self.logger.debug('-' * 60)
self.logger.warning("Unexpected error in JSON conversion")
self.logger.debug("Data is: %s", data)
self.logger.debug("Error is: %s",e)
self.logger.debug('-' * 60)
except:
self.logger.debug('-' * 60)
self.logger.debug("Client disconnected unexpectedly")
self.access.info("Client disconnected unexpectedly")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60)
#@securedata
async def wshandler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'][ws] = {}
# noinspection PyBroadException
try:
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
await self.on_msg(msg.data)
request.app['websockets'][ws]["dashboard"] = msg.data
elif msg.type == aiohttp.WSMsgType.ERROR:
self.access.info("WebSocket connection closed with exception {}", ws.exception())
except:
self.logger.debug('-' * 60)
self.logger.debug("Unexpected client disconnection")
self.access.info("Unexpected client disconnection")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60)
finally:
request.app['websockets'].pop(ws, None)
return ws
# Websockets Handler
async def on_shutdown(self, application):
for ws in application['websockets']:
try:
print(ws.closed)
await ws.close()
print("done")
except:
self.logger.debug('-' * 60)
self.logger.warning("Unexpected error in on_shutdown()")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60) | appdaemon/stream.py | import socketio
import json
import aiohttp
from aiohttp import web
import traceback
from appdaemon.appdaemon import AppDaemon
# socketio handler
class DashStream(socketio.AsyncNamespace):
def __init__(self, ADStream, path, AD):
super().__init__(path)
self.AD = AD
self.ADStream = ADStream
async def on_connect(self, sid, data):
await self.ADStream.on_connect()
async def on_up(self, sid, data):
await self.ADStream.on_msg(data)
class ADStream:
def __init__(self, ad: AppDaemon, app, transport, on_connect, on_msg):
self.AD = ad
self.logger = ad.logging.get_child("_stream")
self.access = ad.logging.get_access()
self.app = app
self.transport = transport
self.on_connect = on_connect
self.on_msg = on_msg
if self.transport == "ws":
self.app['websockets'] = {}
self.app.router.add_get('/stream', self.wshandler)
else:
self.dash_stream = DashStream(self, '/stream', self.AD)
self.sio = socketio.AsyncServer(async_mode='aiohttp')
self.sio.attach(self.app)
self.sio.register_namespace(self.dash_stream)
async def send_update(self, data):
try:
jdata = json.dumps(data)
if self.transport == "ws":
if len(self.app['websockets']) > 0:
self.logger.debug("Sending data: %s", jdata)
for ws in self.app['websockets']:
if "dashboard" in self.app['websockets'][ws]:
await ws.send_str(jdata)
else:
await self.dash_stream.emit('down', jdata)
except TypeError as e:
self.logger.debug('-' * 60)
self.logger.warning("Unexpected error in JSON conversion")
self.logger.debug("Data is: %s", data)
self.logger.debug("Error is: %s",e)
self.logger.debug('-' * 60)
except:
self.logger.debug('-' * 60)
self.logger.debug("Client disconnected unexpectedly")
self.access.info("Client disconnected unexpectedly")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60)
#@securedata
async def wshandler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'][ws] = {}
# noinspection PyBroadException
try:
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
await self.on_msg(msg.data)
request.app['websockets'][ws]["dashboard"] = msg.data
elif msg.type == aiohttp.WSMsgType.ERROR:
self.access.info("WebSocket connection closed with exception {}", ws.exception())
except:
self.logger.debug('-' * 60)
self.logger.debug("Unexpected client disconnection")
self.access.info("Unexpected client disconnection")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60)
finally:
request.app['websockets'].pop(ws, None)
return ws
# Websockets Handler
async def on_shutdown(self, application):
for ws in application['websockets']:
try:
print(ws.closed)
await ws.close()
print("done")
except:
self.logger.debug('-' * 60)
self.logger.warning("Unexpected error in on_shutdown()")
self.logger.debug('-' * 60)
self.logger.debug(traceback.format_exc())
self.logger.debug('-' * 60) | 0.344003 | 0.069164 |
import pandas as pd
data = {
'apples': [3,2,5,7],
'oranges': [1,8,4,0]
}
# initializing dataframe
count = pd.DataFrame(data)
print (count)
'''
apples oranges
0 3 1
1 2 8
2 5 4
3 7 0
'''
# updating index
count = pd.DataFrame(data, index=['Mon','Tue','Wed','Thu'])
print (count)
'''
apples oranges
Mon 3 1
Tue 2 8
Wed 5 4
Thu 7 0
'''
# locate data by index name (day)
print (count.loc['Wed'])
'''
apples 5
oranges 4
Name: Wed, dtype: int64
'''
# get data of multiple rows by index name
print (count.loc[['Mon', 'Wed']])
'''
apples oranges
Mon 3 1
Wed 5 4
'''
# get 1st 2 rows
print (count[0:2])
'''
apples oranges
Mon 3 1
Tue 2 8
'''
# get 3rd and 4th rows
print (count[2:4])
'''
apples oranges
Wed 5 4
Thu 7 0
'''
# locate row data by its index id
print (count.iloc[1])
'''
apples 2
oranges 8
Name: Tue, dtype: int64
'''
# print matrix length rows x cols
print (count.shape)
'''
(4, 2)
'''
print (count.loc['Mon'].shape)
'''
(2,)
'''
print (count.iloc[0].shape)
'''
(2,)
'''
print (count.iloc[0:2].shape)
'''
(2, 2)
'''
print (count.loc[['Mon','Thu']].shape)
'''
(2, 2)
'''
# print column with heading 'apples'
print (count['apples'])
'''
Mon 3
Tue 2
Wed 5
Thu 7
Name: apples, dtype: int64
'''
# Grab data by slicing a colum
print (count['oranges'].loc['Tue'])
'''
8
'''
print (count['oranges'].iloc[1])
'''
8
'''
# Add a new dataframe to an existing dataframe
data_a = {
'apples': [1,2,3],
'oranges': [4,5,6]
}
count_a = pd.DataFrame(data_a, index=['Fri','Sat','Sun'])
print (count_a)
'''
apples oranges
Fri 1 4
Sat 2 5
Sun 3 6
'''
new_count = count.append(count_a)
print(new_count)
'''
apples oranges
Mon 3 1
Tue 2 8
Wed 5 4
Thu 7 0
Fri 1 4
Sat 2 5
Sun 3 6
'''
# Adding an new column to exiting dataframe with unqual data in columns
data_a = {
'apples': [1,2,3],
'oranges': [4,5,6],
'kiwi': [7,8,9]
}
count_a = pd.DataFrame(data_a, index=['Fri','Sat','Sun'])
print(count_a)
'''
apples oranges kiwi
Fri 1 4 7
Sat 2 5 8
Sun 3 6 9
'''
new_count = count.append(count_a)
print (new_count)
'''
apples kiwi oranges
Mon 3 NaN 1
Tue 2 NaN 8
Wed 5 NaN 4
Thu 7 NaN 0
Fri 1 7.0 4
Sat 2 8.0 5
Sun 3 9.0 6
''' | pandas/pandasBasics.py | import pandas as pd
data = {
'apples': [3,2,5,7],
'oranges': [1,8,4,0]
}
# initializing dataframe
count = pd.DataFrame(data)
print (count)
'''
apples oranges
0 3 1
1 2 8
2 5 4
3 7 0
'''
# updating index
count = pd.DataFrame(data, index=['Mon','Tue','Wed','Thu'])
print (count)
'''
apples oranges
Mon 3 1
Tue 2 8
Wed 5 4
Thu 7 0
'''
# locate data by index name (day)
print (count.loc['Wed'])
'''
apples 5
oranges 4
Name: Wed, dtype: int64
'''
# get data of multiple rows by index name
print (count.loc[['Mon', 'Wed']])
'''
apples oranges
Mon 3 1
Wed 5 4
'''
# get 1st 2 rows
print (count[0:2])
'''
apples oranges
Mon 3 1
Tue 2 8
'''
# get 3rd and 4th rows
print (count[2:4])
'''
apples oranges
Wed 5 4
Thu 7 0
'''
# locate row data by its index id
print (count.iloc[1])
'''
apples 2
oranges 8
Name: Tue, dtype: int64
'''
# print matrix length rows x cols
print (count.shape)
'''
(4, 2)
'''
print (count.loc['Mon'].shape)
'''
(2,)
'''
print (count.iloc[0].shape)
'''
(2,)
'''
print (count.iloc[0:2].shape)
'''
(2, 2)
'''
print (count.loc[['Mon','Thu']].shape)
'''
(2, 2)
'''
# print column with heading 'apples'
print (count['apples'])
'''
Mon 3
Tue 2
Wed 5
Thu 7
Name: apples, dtype: int64
'''
# Grab data by slicing a colum
print (count['oranges'].loc['Tue'])
'''
8
'''
print (count['oranges'].iloc[1])
'''
8
'''
# Add a new dataframe to an existing dataframe
data_a = {
'apples': [1,2,3],
'oranges': [4,5,6]
}
count_a = pd.DataFrame(data_a, index=['Fri','Sat','Sun'])
print (count_a)
'''
apples oranges
Fri 1 4
Sat 2 5
Sun 3 6
'''
new_count = count.append(count_a)
print(new_count)
'''
apples oranges
Mon 3 1
Tue 2 8
Wed 5 4
Thu 7 0
Fri 1 4
Sat 2 5
Sun 3 6
'''
# Adding an new column to exiting dataframe with unqual data in columns
data_a = {
'apples': [1,2,3],
'oranges': [4,5,6],
'kiwi': [7,8,9]
}
count_a = pd.DataFrame(data_a, index=['Fri','Sat','Sun'])
print(count_a)
'''
apples oranges kiwi
Fri 1 4 7
Sat 2 5 8
Sun 3 6 9
'''
new_count = count.append(count_a)
print (new_count)
'''
apples kiwi oranges
Mon 3 NaN 1
Tue 2 NaN 8
Wed 5 NaN 4
Thu 7 NaN 0
Fri 1 7.0 4
Sat 2 8.0 5
Sun 3 9.0 6
''' | 0.162413 | 0.249893 |
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1516665949.6025667
_enable_loop = True
_template_filename = 'C:/Users/mayaroney/PycharmProjects/fomo/homepage/templates/sections.html'
_template_uri = 'sections.html'
_source_encoding = 'utf-8'
import django_mako_plus
_exports = ['header_maintenance', 'content_left', 'content_right', 'content_center', 'top_center', 'bottom']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'app_base.html', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def content_right():
return render_content_right(context._locals(__M_locals))
def header_maintenance():
return render_header_maintenance(context._locals(__M_locals))
def content_center():
return render_content_center(context._locals(__M_locals))
def bottom():
return render_bottom(context._locals(__M_locals))
def top_center():
return render_top_center(context._locals(__M_locals))
def content_left():
return render_content_left(context._locals(__M_locals))
__M_writer = context.writer()
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'header_maintenance'):
context['self'].header_maintenance(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_left'):
context['self'].content_left(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_right'):
context['self'].content_right(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_center'):
context['self'].content_center(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_center'):
context['self'].top_center(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'bottom'):
context['self'].bottom(**pageargs)
__M_writer('\r\n\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_header_maintenance(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def header_maintenance():
return render_header_maintenance(context)
__M_writer = context.writer()
__M_writer('\r\n The site is currently down. Please try again later.\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_left(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_left():
return render_content_left(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>left side content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_right(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_right():
return render_content_right(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>right side content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_center(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_center():
return render_content_center(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>center content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_top_center(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def top_center():
return render_top_center(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>Top Center Area</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_bottom(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def bottom():
return render_bottom(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>Bottom Area</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "C:/Users/mayaroney/PycharmProjects/fomo/homepage/templates/sections.html", "uri": "sections.html", "source_encoding": "utf-8", "line_map": {"28": 0, "45": 1, "50": 5, "55": 9, "60": 13, "65": 17, "70": 21, "75": 25, "81": 3, "87": 3, "93": 7, "99": 7, "105": 11, "111": 11, "117": 15, "123": 15, "129": 19, "135": 19, "141": 23, "147": 23, "153": 147}}
__M_END_METADATA
""" | homepage/templates/.cached_templates/sections.html.py | from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1516665949.6025667
_enable_loop = True
_template_filename = 'C:/Users/mayaroney/PycharmProjects/fomo/homepage/templates/sections.html'
_template_uri = 'sections.html'
_source_encoding = 'utf-8'
import django_mako_plus
_exports = ['header_maintenance', 'content_left', 'content_right', 'content_center', 'top_center', 'bottom']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'app_base.html', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def content_right():
return render_content_right(context._locals(__M_locals))
def header_maintenance():
return render_header_maintenance(context._locals(__M_locals))
def content_center():
return render_content_center(context._locals(__M_locals))
def bottom():
return render_bottom(context._locals(__M_locals))
def top_center():
return render_top_center(context._locals(__M_locals))
def content_left():
return render_content_left(context._locals(__M_locals))
__M_writer = context.writer()
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'header_maintenance'):
context['self'].header_maintenance(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_left'):
context['self'].content_left(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_right'):
context['self'].content_right(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content_center'):
context['self'].content_center(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_center'):
context['self'].top_center(**pageargs)
__M_writer('\r\n\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'bottom'):
context['self'].bottom(**pageargs)
__M_writer('\r\n\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_header_maintenance(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def header_maintenance():
return render_header_maintenance(context)
__M_writer = context.writer()
__M_writer('\r\n The site is currently down. Please try again later.\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_left(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_left():
return render_content_left(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>left side content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_right(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_right():
return render_content_right(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>right side content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content_center(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content_center():
return render_content_center(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>center content</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_top_center(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def top_center():
return render_top_center(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>Top Center Area</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_bottom(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def bottom():
return render_bottom(context)
__M_writer = context.writer()
__M_writer('\r\n <h1>Bottom Area</h1>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "C:/Users/mayaroney/PycharmProjects/fomo/homepage/templates/sections.html", "uri": "sections.html", "source_encoding": "utf-8", "line_map": {"28": 0, "45": 1, "50": 5, "55": 9, "60": 13, "65": 17, "70": 21, "75": 25, "81": 3, "87": 3, "93": 7, "99": 7, "105": 11, "111": 11, "117": 15, "123": 15, "129": 19, "135": 19, "141": 23, "147": 23, "153": 147}}
__M_END_METADATA
""" | 0.344113 | 0.092401 |
import os
import torch
import math
from pytorch_lightning.root_module.memory import ModelSummary
from pytorch_lightning.root_module.grads import GradInformation
from pytorch_lightning.root_module.model_saving import ModelIO, load_hparams_from_tags_csv
from pytorch_lightning.root_module.hooks import ModelHooks
class LightningModule(GradInformation, ModelIO, ModelHooks):
def __init__(self, hparams):
super(LightningModule, self).__init__()
self.hparams = hparams
self.dtype = torch.FloatTensor
self.exp_save_path = None
self.current_epoch = 0
self.global_step = 0
self.loaded_optimizer_states_dict = {}
self.trainer = None
self.experiment = None
self.example_input_array = None
# track if gpu was requested for checkpointing
self.on_gpu = False
# computed vars for the dataloaders
self._tng_dataloader = None
self._val_dataloader = None
self._test_dataloader = None
def forward(self, *args, **kwargs):
"""
Expand model in into whatever you need.
Also need to return the target
:param x:
:return:
"""
raise NotImplementedError
def validation_step(self, data_batch, batch_nb):
"""
return whatever outputs will need to be aggregated in validation_end
:param data_batch:
:return:
"""
raise NotImplementedError
def validation_end(self, outputs):
"""
Outputs has the appended output after each validation step
:param outputs:
:return: dic_with_metrics for tqdm
"""
raise NotImplementedError
def training_step(self, data_batch, batch_nb):
"""
return loss, dict with metrics for tqdm
:param data_batch:
:return:
"""
raise NotImplementedError
def configure_optimizers(self):
"""
Return array of optimizers
:return:
"""
raise NotImplementedError
def loss(self, *args, **kwargs):
"""
Expand model_out into your components
:param model_out:
:return:
"""
raise NotImplementedError
def summarize(self):
model_summary = ModelSummary(self)
print(model_summary)
def freeze(self):
for param in self.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.parameters():
param.requires_grad = True
@property
def tng_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@property
def test_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@property
def val_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@classmethod
def load_from_metrics(cls, weights_path, tags_csv, on_gpu, map_location=None):
"""
Primary way of loading model from csv weights path
:param weights_path:
:param tags_csv:
:param on_gpu:
:param map_location: dic for mapping storage {'cuda:1':'cuda:0'}
:return:
"""
hparams = load_hparams_from_tags_csv(tags_csv)
hparams.__setattr__('on_gpu', on_gpu)
if on_gpu:
if map_location is not None:
checkpoint = torch.load(weights_path, map_location=map_location)
else:
checkpoint = torch.load(weights_path)
else:
checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage)
model = cls(hparams)
# allow model to load
model.load_model_specific(checkpoint)
model.load_state_dict(checkpoint['state_dict'], strict=False)
return model | pytorch_lightning/root_module/root_module.py | import os
import torch
import math
from pytorch_lightning.root_module.memory import ModelSummary
from pytorch_lightning.root_module.grads import GradInformation
from pytorch_lightning.root_module.model_saving import ModelIO, load_hparams_from_tags_csv
from pytorch_lightning.root_module.hooks import ModelHooks
class LightningModule(GradInformation, ModelIO, ModelHooks):
def __init__(self, hparams):
super(LightningModule, self).__init__()
self.hparams = hparams
self.dtype = torch.FloatTensor
self.exp_save_path = None
self.current_epoch = 0
self.global_step = 0
self.loaded_optimizer_states_dict = {}
self.trainer = None
self.experiment = None
self.example_input_array = None
# track if gpu was requested for checkpointing
self.on_gpu = False
# computed vars for the dataloaders
self._tng_dataloader = None
self._val_dataloader = None
self._test_dataloader = None
def forward(self, *args, **kwargs):
"""
Expand model in into whatever you need.
Also need to return the target
:param x:
:return:
"""
raise NotImplementedError
def validation_step(self, data_batch, batch_nb):
"""
return whatever outputs will need to be aggregated in validation_end
:param data_batch:
:return:
"""
raise NotImplementedError
def validation_end(self, outputs):
"""
Outputs has the appended output after each validation step
:param outputs:
:return: dic_with_metrics for tqdm
"""
raise NotImplementedError
def training_step(self, data_batch, batch_nb):
"""
return loss, dict with metrics for tqdm
:param data_batch:
:return:
"""
raise NotImplementedError
def configure_optimizers(self):
"""
Return array of optimizers
:return:
"""
raise NotImplementedError
def loss(self, *args, **kwargs):
"""
Expand model_out into your components
:param model_out:
:return:
"""
raise NotImplementedError
def summarize(self):
model_summary = ModelSummary(self)
print(model_summary)
def freeze(self):
for param in self.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.parameters():
param.requires_grad = True
@property
def tng_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@property
def test_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@property
def val_dataloader(self):
"""
Implement a function to load an h5py of this data
:return:
"""
raise NotImplementedError
@classmethod
def load_from_metrics(cls, weights_path, tags_csv, on_gpu, map_location=None):
"""
Primary way of loading model from csv weights path
:param weights_path:
:param tags_csv:
:param on_gpu:
:param map_location: dic for mapping storage {'cuda:1':'cuda:0'}
:return:
"""
hparams = load_hparams_from_tags_csv(tags_csv)
hparams.__setattr__('on_gpu', on_gpu)
if on_gpu:
if map_location is not None:
checkpoint = torch.load(weights_path, map_location=map_location)
else:
checkpoint = torch.load(weights_path)
else:
checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage)
model = cls(hparams)
# allow model to load
model.load_model_specific(checkpoint)
model.load_state_dict(checkpoint['state_dict'], strict=False)
return model | 0.869146 | 0.432483 |
from . import array_create
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array_create.array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array_create.array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array_create.array([1]), array_create.array([3, 4])]
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array_create.array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array_create.array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array_create.array([[1]]), array_create.array([[1, 2]]), array_create.array([[1, 2]])]
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[None, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array_create.array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape)
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1, 1)
elif len(ary.shape) == 1:
result = ary[None, :, None]
elif len(ary.shape) == 2:
result = ary[:, :, None]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def concatenate(array_list, axis=0):
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array_create.array([[1, 2], [3, 4]])
>>> b = np.array_create.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array_create.array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array_create.array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array_create.array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
if len(array_list) == 0:
return None
# We form an assignment to the new 'ret' array, which has a shape[axis] that are the sum of
# the axis dimensions in 'array_list'. Then we copy each array in 'array_list' into the axis dimension of 'ret'
ret_shape = list(array_list[0].shape)
ret_shape[axis] = 0
for ary in array_list:
ret_shape[axis] += ary.shape[axis]
ret = array_create.empty(ret_shape, dtype=array_list[0].dtype)
slice = "ret["
for i in range(ret.ndim):
if i == axis:
slice += "AXIS"
else:
slice += ":"
if i < ret.ndim - 1:
slice += ", "
slice += "]"
len_count = 0
for i in range(len(array_list)):
axis_slice = "%d:%d+%d" % (len_count, len_count, array_list[i].shape[axis])
cmd = slice.replace("AXIS", axis_slice)
cmd += " = array_list[i]"
exec (cmd)
len_count += array_list[i].shape[axis]
return ret
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that
are at least 2-dimensional.
Examples
--------
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.vstack((a,b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array_create.array([[1], [2], [3]])
>>> b = np.array_create.array([[2], [3], [4]])
>>> np.vstack((a,b))
array_create.array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array_create.array([1, 2, 3, 2, 3, 4])
>>> a = np.array_create.array([[1],[2],[3]])
>>> b = np.array_create.array([[2],[3],[4]])
>>> np.hstack((a,b))
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
else:
return concatenate(arrs, 1)
def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.
The `axis` parameter specifies the index of the new axis in the dimensions
of the result. For example, if ``axis=0`` it will be the first dimension
and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.stack((a, b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [array_create.array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
expanded_arrays = [arr[sl] for arr in arrays]
return concatenate(expanded_arrays, axis=axis) | bridge/npbackend/bohrium/concatenate.py | from . import array_create
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array_create.array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array_create.array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array_create.array([1]), array_create.array([3, 4])]
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array_create.array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array_create.array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array_create.array([[1]]), array_create.array([[1, 2]]), array_create.array([[1, 2]])]
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[None, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array_create.array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape)
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = array_create.array(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1, 1)
elif len(ary.shape) == 1:
result = ary[None, :, None]
elif len(ary.shape) == 2:
result = ary[:, :, None]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def concatenate(array_list, axis=0):
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array_create.array([[1, 2], [3, 4]])
>>> b = np.array_create.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array_create.array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array_create.array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array_create.array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
if len(array_list) == 0:
return None
# We form an assignment to the new 'ret' array, which has a shape[axis] that are the sum of
# the axis dimensions in 'array_list'. Then we copy each array in 'array_list' into the axis dimension of 'ret'
ret_shape = list(array_list[0].shape)
ret_shape[axis] = 0
for ary in array_list:
ret_shape[axis] += ary.shape[axis]
ret = array_create.empty(ret_shape, dtype=array_list[0].dtype)
slice = "ret["
for i in range(ret.ndim):
if i == axis:
slice += "AXIS"
else:
slice += ":"
if i < ret.ndim - 1:
slice += ", "
slice += "]"
len_count = 0
for i in range(len(array_list)):
axis_slice = "%d:%d+%d" % (len_count, len_count, array_list[i].shape[axis])
cmd = slice.replace("AXIS", axis_slice)
cmd += " = array_list[i]"
exec (cmd)
len_count += array_list[i].shape[axis]
return ret
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that
are at least 2-dimensional.
Examples
--------
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.vstack((a,b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array_create.array([[1], [2], [3]])
>>> b = np.array_create.array([[2], [3], [4]])
>>> np.vstack((a,b))
array_create.array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array_create.array([1, 2, 3, 2, 3, 4])
>>> a = np.array_create.array([[1],[2],[3]])
>>> b = np.array_create.array([[2],[3],[4]])
>>> np.hstack((a,b))
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
else:
return concatenate(arrs, 1)
def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.
The `axis` parameter specifies the index of the new axis in the dimensions
of the result. For example, if ``axis=0`` it will be the first dimension
and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array_create.array([1, 2, 3])
>>> b = np.array_create.array([2, 3, 4])
>>> np.stack((a, b))
array_create.array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array_create.array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [array_create.array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
expanded_arrays = [arr[sl] for arr in arrays]
return concatenate(expanded_arrays, axis=axis) | 0.930703 | 0.848784 |
from tkinter import *
from tkinter import ttk, Text
class Writing_Block_Remover:
def __init__(self) -> None:
self.root = Tk()
self.root.geometry("800x400")
self.root.title("Writing Block Remover")
self.frm = ttk.Frame(self.root, padding=10)
self.frm.grid()
self.style = ttk.Style()
# Instructions Label
self.instruction_label = ttk.Label(text="Once you start, keep writing for 5 minutes. If you stop for more than 5 seconds, everything you have typed will disappear.")
self.instruction_label.grid(row=0, column=0, columnspan=4, padx=15, pady=10)
# Start Restart button
self.start_restart_button = ttk.Button(text="Start/Restart", command=self.restart_test)
self.start_restart_button.grid(row=1, column=0, padx=5, pady=5)
# timer
self.timer = None
# time label
self.time_label = ttk.Label(text="Time: 00:00")
self.time_label.grid(row=1, column=3, padx=15, pady=5)
# Input Text
self.user_text = Text(self.root,width=105, height=20, wrap= "word")
self.user_text.grid(row=2, column=0, columnspan=4, padx=20, pady=5)
self.user_text.bind('<KeyPress>', self.typing)
self.user_text.config(state=DISABLED)
# TODO: export button
# timer
def update_time_label(self, mins, secs):
self.time_label.config(text=f"Time: {mins:02}:{secs:02}")
self.time_label.update()
def start_restart_timer(self, timer_value_in_seconds):
mins, secs = divmod(timer_value_in_seconds, 60)
self.update_time_label(mins, secs)
if timer_value_in_seconds > 1:
self.timer = self.root.after(1000, self.start_restart_timer, timer_value_in_seconds - 1)
else:
self.time_label.config(text=f"Time's up!")
self.time_label.update()
self.root.after_cancel(self.five_sec_timer)
self.start_restart_button['state'] = "normal"
self.user_text.config(state=DISABLED)
self.timer = None
def five_second_timer(self, timer_value_in_seconds):
print(timer_value_in_seconds)
if timer_value_in_seconds > 0:
self.five_sec_timer = self.root.after(1000, self.five_second_timer, timer_value_in_seconds - 1)
else:
self.user_text.delete('1.0', END)
self.five_sec_timer = self.root.after(1000, self.five_second_timer, 5)
def typing(self, key):
if self.timer is not None:
self.root.after_cancel(self.five_sec_timer)
self.five_second_timer(5)
# Start and Restart text
def restart_test(self):
if self.timer is not None:
self.root.after_cancel(self.timer)
self.start_restart_button['state'] = "disabled"
self.user_text.config(state="normal")
self.user_text.delete('1.0', END)
self.timer = None
self.start_restart_timer(300)
self.five_second_timer(5)
writing_block_remover = Writing_Block_Remover()
writing_block_remover.root.mainloop() | main.py | from tkinter import *
from tkinter import ttk, Text
class Writing_Block_Remover:
def __init__(self) -> None:
self.root = Tk()
self.root.geometry("800x400")
self.root.title("Writing Block Remover")
self.frm = ttk.Frame(self.root, padding=10)
self.frm.grid()
self.style = ttk.Style()
# Instructions Label
self.instruction_label = ttk.Label(text="Once you start, keep writing for 5 minutes. If you stop for more than 5 seconds, everything you have typed will disappear.")
self.instruction_label.grid(row=0, column=0, columnspan=4, padx=15, pady=10)
# Start Restart button
self.start_restart_button = ttk.Button(text="Start/Restart", command=self.restart_test)
self.start_restart_button.grid(row=1, column=0, padx=5, pady=5)
# timer
self.timer = None
# time label
self.time_label = ttk.Label(text="Time: 00:00")
self.time_label.grid(row=1, column=3, padx=15, pady=5)
# Input Text
self.user_text = Text(self.root,width=105, height=20, wrap= "word")
self.user_text.grid(row=2, column=0, columnspan=4, padx=20, pady=5)
self.user_text.bind('<KeyPress>', self.typing)
self.user_text.config(state=DISABLED)
# TODO: export button
# timer
def update_time_label(self, mins, secs):
self.time_label.config(text=f"Time: {mins:02}:{secs:02}")
self.time_label.update()
def start_restart_timer(self, timer_value_in_seconds):
mins, secs = divmod(timer_value_in_seconds, 60)
self.update_time_label(mins, secs)
if timer_value_in_seconds > 1:
self.timer = self.root.after(1000, self.start_restart_timer, timer_value_in_seconds - 1)
else:
self.time_label.config(text=f"Time's up!")
self.time_label.update()
self.root.after_cancel(self.five_sec_timer)
self.start_restart_button['state'] = "normal"
self.user_text.config(state=DISABLED)
self.timer = None
def five_second_timer(self, timer_value_in_seconds):
print(timer_value_in_seconds)
if timer_value_in_seconds > 0:
self.five_sec_timer = self.root.after(1000, self.five_second_timer, timer_value_in_seconds - 1)
else:
self.user_text.delete('1.0', END)
self.five_sec_timer = self.root.after(1000, self.five_second_timer, 5)
def typing(self, key):
if self.timer is not None:
self.root.after_cancel(self.five_sec_timer)
self.five_second_timer(5)
# Start and Restart text
def restart_test(self):
if self.timer is not None:
self.root.after_cancel(self.timer)
self.start_restart_button['state'] = "disabled"
self.user_text.config(state="normal")
self.user_text.delete('1.0', END)
self.timer = None
self.start_restart_timer(300)
self.five_second_timer(5)
writing_block_remover = Writing_Block_Remover()
writing_block_remover.root.mainloop() | 0.314156 | 0.118666 |
"""Module that contains widgets for managing AiiDAlab applications."""
from subprocess import CalledProcessError
import ipywidgets as ipw
import traitlets
from aiidalab.app import AppRemoteUpdateStatus as AppStatus
from aiidalab.app import AppVersion
from jinja2 import Template
from packaging.version import parse
from home.utils import load_logo
from home.widgets import LogOutputWidget, Spinner, StatusHTML
HTML_MSG_PROGRESS = """{}"""
HTML_MSG_SUCCESS = """<i class="fa fa-check" style="color:#337ab7;font-size:1em;" ></i>
{}"""
HTML_MSG_FAILURE = """<i class="fa fa-times" style="color:red;font-size:1em;" ></i>
{}"""
class VersionSelectorWidget(ipw.VBox):
"""Class to choose app's version."""
disabled = traitlets.Bool()
prereleases = traitlets.Bool()
def __init__(self, *args, **kwargs):
style = {"description_width": "100px"}
self.version_to_install = ipw.Dropdown(
description="Install version",
disabled=True,
style=style,
)
self.installed_version = ipw.Text(
description="Installed version",
disabled=True,
style=style,
)
self.info = StatusHTML(
value="",
layout={"max_width": "600px"},
style=style,
)
super().__init__(
children=[self.installed_version, self.version_to_install, self.info],
layout={"min_width": "300px"},
*args,
**kwargs,
)
@traitlets.observe("disabled")
def _observe_disabled(self, change):
self.version_to_install.disabled = change["new"]
class AppManagerWidget(ipw.VBox):
"""Widget for management of apps.
Shows basic information about the app (description, authors, etc.) and provides
an interface to install, uninstall, and update the application, as well as change
versions if possible.
"""
COMPATIBILTIY_WARNING = Template(
"""<div class="alert alert-danger">
The installed version of this app is not compatible with this AiiDAlab environment.
</div>"""
)
COMPATIBILITY_INFO = Template(
"""<div class="alert alert-warning alert-dismissible">
<a href="#" class="close" data-dismiss="alert" aria-label="close">×</a>
Reasons for incompatibility:
<ul>
{% for spec in app.compatibility_info %}
<li>{{ spec }}:
<ul>
{% for missing_req in app.compatibility_info[spec] %}
<li>missing: {{ missing_req }}</li>
{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
</div>"""
)
TEMPLATE = Template(
"""<b> <div style="font-size: 30px; text-align:center;">{{ app.title }}</div></b>
<br>
<b>Authors:</b> {{ app.authors }}
<br>
<b>Description:</b> {{ app.description }}
{% if app.url %}
<br>
<b>URL:</b> <a href="{{ app.url }}">{{ app.url }}</a>
{% endif %}"""
)
def __init__(self, app, with_version_selector=False):
self.app = app
self.compatibility_warning = ipw.HTML(self.COMPATIBILTIY_WARNING.render())
self.compatibility_warning.layout = {"width": "600px"}
self.compatibility_warning.layout.visibility = "hidden"
body = ipw.HTML(self.TEMPLATE.render(app=app))
body.layout = {"width": "600px"}
# Setup install_info
self.install_info = StatusHTML(
message="<p><br></p>"
) # show empty line by default
self.dependencies_log = LogOutputWidget(
layout=ipw.Layout(min_height="0px", max_height="100px")
) # max_height controls the maximum height of the log field.
self.dependencies_log.template = (
"Installing dependencies..." + self.dependencies_log.template
)
# Setup buttons
self.install_button = ipw.Button(description="Install", disabled=True)
self.install_button.on_click(self._install_version)
self.uninstall_button = ipw.Button(description="Uninstall", disabled=True)
self.uninstall_button.on_click(self._uninstall_app)
self.update_button = ipw.Button(description="Update", disabled=True)
self.update_button.on_click(self._update_app)
self.issue_indicator = ipw.HTML()
self.blocked_ignore = ipw.Checkbox(description="Ignore")
self.blocked_ignore.observe(self._refresh_widget_state)
self.compatibility_info = ipw.HTML()
self.spinner = Spinner("color:#337ab7;font-size:1em;")
ipw.dlink((self.app, "busy"), (self.spinner, "enabled"))
children = [
ipw.HBox([self.compatibility_warning]),
ipw.HBox([load_logo(app), body]),
ipw.HBox(
[
self.uninstall_button,
self.install_button,
self.update_button,
self.spinner,
]
),
ipw.HBox([self.install_info]),
ipw.HBox([self.dependencies_log]),
ipw.HBox([self.issue_indicator, self.blocked_ignore]),
ipw.HBox([self.compatibility_info]),
]
self.version_selector = VersionSelectorWidget()
ipw.dlink(
(self.app, "available_versions"),
(self.version_selector.version_to_install, "options"),
transform=lambda versions: [
(self._formatted_version(version), version) for version in versions
],
)
ipw.dlink(
(self.app, "installed_version"),
(self.version_selector.installed_version, "value"),
transform=self._formatted_version,
)
self.version_selector.layout.visibility = (
"visible" if with_version_selector else "hidden"
)
self.version_selector.disabled = True
self.version_selector.version_to_install.observe(
self._refresh_widget_state, "value"
)
children.insert(2, self.version_selector)
# Prereleases opt-in
self.include_prereleases = ipw.Checkbox(description="Include prereleases")
ipw.dlink(
(self.include_prereleases, "value"), (self.app, "include_prereleases")
)
self.app.observe(
self._refresh_prereleases, names=["has_prereleases", "installed_version"]
)
self._refresh_prereleases(change=dict(owner=self.app)) # initialize
children.insert(3, self.include_prereleases)
super().__init__(children=children)
self.app.observe(self._refresh_widget_state)
self.app.refresh_async() # init all widgets
@staticmethod
def _formatted_version(version):
"""Format the unambigious version identifiee to a human-friendly representation."""
if version is AppVersion.NOT_INSTALLED:
return "[not installed]"
if version is AppVersion.UNKNOWN:
return "[unknown version]"
if not version: # will be displayed during transition phases
return "[n/a]"
return version
def _refresh_prereleases(self, change):
app = change["owner"]
installed_version = app.installed_version
has_prereleases = app.has_prereleases
prerelease_installed = (
parse(installed_version).is_prerelease
if isinstance(installed_version, str)
else False
)
with self.hold_trait_notifications():
# The checkbox can only be enabled when the app has prereleases,
# and cannot be disabled when a prerelease is currently installed.
self.include_prereleases.disabled = (
prerelease_installed or not has_prereleases
)
# The checkbox is checked if it was already checked or a prerelease is installed:
self.include_prereleases.value = (
self.include_prereleases.value or prerelease_installed
)
def _refresh_widget_state(self, _=None):
"""Refresh the widget to reflect the current state of the app."""
with self.hold_trait_notifications():
# Collect information about app state.
installed = self.app.is_installed()
installed_version = self.app.installed_version
compatible = len(self.app.available_versions) > 0
registered = self.app.remote_update_status is not AppStatus.NOT_REGISTERED
cannot_reach_registry = (
self.app.remote_update_status is AppStatus.CANNOT_REACH_REGISTRY
)
busy = self.app.busy
detached = self.app.detached
available_versions = self.app.available_versions
override = detached and self.blocked_ignore.value
blocked_install = (
detached or not compatible
) and not self.blocked_ignore.value
blocked_uninstall = (
detached or not registered or cannot_reach_registry
) and not self.blocked_ignore.value
# Check app compatibility and show banner if not compatible.
self.compatibility_warning.layout.visibility = (
"visible"
if (self.app.is_installed() and self.app.compatible is False)
else "hidden"
)
# Prepare warning icons and messages depending on whether we override or not.
# These messages and icons are only shown if needed.
warn_or_ban_icon = "warning" if override else "ban"
if override:
tooltip_danger = "Operation will lead to potential loss of local data!"
else:
tooltip_danger = "Operation blocked due to potential data loss."
tooltip_incompatible = "The app is not supported for this environment."
# Determine whether we can install, updated, and uninstall.
can_switch = (
installed_version != self.version_selector.version_to_install.value
and available_versions
)
latest_selected = self.version_selector.version_to_install.index == 0
can_install = (
can_switch and (detached or not latest_selected)
) or not installed
can_uninstall = installed
try:
can_update = (
self.app.remote_update_status is AppStatus.UPDATE_AVAILABLE
and installed
)
except RuntimeError:
can_update = None
# Update the install button state.
self.install_button.disabled = busy or blocked_install or not can_install
self.install_button.button_style = "info" if can_install else ""
self.install_button.icon = (
""
if can_install and not detached
else warn_or_ban_icon
if can_install
else ""
)
if self.app.compatible:
self.install_button.tooltip = (
""
if can_install and not detached
else tooltip_danger
if can_install
else ""
)
else:
self.install_button.tooltip = (
""
if installed and not detached
else tooltip_danger
if installed
else tooltip_incompatible
)
self.install_button.description = (
"Install"
if not (installed and can_install)
else f"Install ({self._formatted_version(self.version_selector.version_to_install.value)})"
)
# Update the uninstall button state.
self.uninstall_button.disabled = (
busy or blocked_uninstall or not can_uninstall
)
self.uninstall_button.button_style = "danger" if can_uninstall else ""
self.uninstall_button.icon = warn_or_ban_icon if detached else "trash-o"
self.uninstall_button.tooltip = (
""
if can_uninstall and not detached
else tooltip_danger
if can_uninstall
else ""
)
# Update the update button state.
self.update_button.disabled = busy or blocked_install or not can_update
if self.app.is_installed() and can_update is None:
self.update_button.icon = "warning"
self.update_button.tooltip = (
"Unable to determine availability of updates."
)
else:
self.update_button.icon = (
"arrow-circle-up"
if can_update and not detached
else warn_or_ban_icon
if can_update
else ""
)
self.update_button.button_style = "success" if can_update else ""
self.update_button.tooltip = (
""
if can_update and not detached
else tooltip_danger
if can_update
else ""
)
# Update the version_selector widget state.
more_than_one_version = (
len(self.version_selector.version_to_install.options) > 1
)
self.version_selector.disabled = (
busy or blocked_install or not more_than_one_version
)
# Indicate whether there are local modifications and present option for user override.
if cannot_reach_registry:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> Unable to reach the registry server.'
elif not registered:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> The app is not registered.'
elif detached:
self.issue_indicator.value = (
f'<i class="fa fa-{warn_or_ban_icon}"></i> The app has local modifications or was checked out '
"to an unknown version."
)
elif not compatible:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> The app is not supported for this environment.'
else:
self.issue_indicator.value = ""
self.blocked_ignore.layout.visibility = (
"visible" if (detached or not compatible) else "hidden"
)
if (
any(self.app.compatibility_info.values())
and self.app.compatible is False
):
self.compatibility_info.value = self.COMPATIBILITY_INFO.render(
app=self.app
)
else:
self.compatibility_info.value = ""
def _show_msg_success(self, msg):
"""Show a message indicating successful execution of a requested operation."""
self.install_info.show_temporary_message(HTML_MSG_SUCCESS.format(msg))
def _show_msg_failure(self, msg):
"""Show a message indicating failure to execute a requested operation."""
self.install_info.show_temporary_message(HTML_MSG_FAILURE.format(msg))
def _check_detached_state(self):
"""Check whether the app is in a detached state which would prevent any install or other operations."""
self.app.refresh()
self._refresh_widget_state()
blocked = self.app.detached and not self.blocked_ignore.value
if blocked:
raise RuntimeError(
"Unable to perform operation, the app is in a detached state."
)
def _install_version(self, _=None):
"""Attempt to install the a specific version of the app."""
version = self.version_selector.version_to_install.value # can be None
try:
self._check_detached_state()
version = self.app.install_app(
version=version, stdout=self.dependencies_log
) # argument may be None
except (AssertionError, RuntimeError, CalledProcessError) as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success(
f"Installed app ({self._formatted_version(version)})."
)
def _update_app(self, _):
"""Attempt to update the app."""
try:
self._check_detached_state()
self.app.update_app(stdout=self.dependencies_log)
except (AssertionError, RuntimeError, CalledProcessError) as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success("Updated app.")
def _uninstall_app(self, _):
"""Attempt to uninstall the app."""
try:
self._check_detached_state()
self.app.uninstall_app()
except RuntimeError as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success("Uninstalled app.") | home/app_manager.py | """Module that contains widgets for managing AiiDAlab applications."""
from subprocess import CalledProcessError
import ipywidgets as ipw
import traitlets
from aiidalab.app import AppRemoteUpdateStatus as AppStatus
from aiidalab.app import AppVersion
from jinja2 import Template
from packaging.version import parse
from home.utils import load_logo
from home.widgets import LogOutputWidget, Spinner, StatusHTML
HTML_MSG_PROGRESS = """{}"""
HTML_MSG_SUCCESS = """<i class="fa fa-check" style="color:#337ab7;font-size:1em;" ></i>
{}"""
HTML_MSG_FAILURE = """<i class="fa fa-times" style="color:red;font-size:1em;" ></i>
{}"""
class VersionSelectorWidget(ipw.VBox):
"""Class to choose app's version."""
disabled = traitlets.Bool()
prereleases = traitlets.Bool()
def __init__(self, *args, **kwargs):
style = {"description_width": "100px"}
self.version_to_install = ipw.Dropdown(
description="Install version",
disabled=True,
style=style,
)
self.installed_version = ipw.Text(
description="Installed version",
disabled=True,
style=style,
)
self.info = StatusHTML(
value="",
layout={"max_width": "600px"},
style=style,
)
super().__init__(
children=[self.installed_version, self.version_to_install, self.info],
layout={"min_width": "300px"},
*args,
**kwargs,
)
@traitlets.observe("disabled")
def _observe_disabled(self, change):
self.version_to_install.disabled = change["new"]
class AppManagerWidget(ipw.VBox):
"""Widget for management of apps.
Shows basic information about the app (description, authors, etc.) and provides
an interface to install, uninstall, and update the application, as well as change
versions if possible.
"""
COMPATIBILTIY_WARNING = Template(
"""<div class="alert alert-danger">
The installed version of this app is not compatible with this AiiDAlab environment.
</div>"""
)
COMPATIBILITY_INFO = Template(
"""<div class="alert alert-warning alert-dismissible">
<a href="#" class="close" data-dismiss="alert" aria-label="close">×</a>
Reasons for incompatibility:
<ul>
{% for spec in app.compatibility_info %}
<li>{{ spec }}:
<ul>
{% for missing_req in app.compatibility_info[spec] %}
<li>missing: {{ missing_req }}</li>
{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
</div>"""
)
TEMPLATE = Template(
"""<b> <div style="font-size: 30px; text-align:center;">{{ app.title }}</div></b>
<br>
<b>Authors:</b> {{ app.authors }}
<br>
<b>Description:</b> {{ app.description }}
{% if app.url %}
<br>
<b>URL:</b> <a href="{{ app.url }}">{{ app.url }}</a>
{% endif %}"""
)
def __init__(self, app, with_version_selector=False):
self.app = app
self.compatibility_warning = ipw.HTML(self.COMPATIBILTIY_WARNING.render())
self.compatibility_warning.layout = {"width": "600px"}
self.compatibility_warning.layout.visibility = "hidden"
body = ipw.HTML(self.TEMPLATE.render(app=app))
body.layout = {"width": "600px"}
# Setup install_info
self.install_info = StatusHTML(
message="<p><br></p>"
) # show empty line by default
self.dependencies_log = LogOutputWidget(
layout=ipw.Layout(min_height="0px", max_height="100px")
) # max_height controls the maximum height of the log field.
self.dependencies_log.template = (
"Installing dependencies..." + self.dependencies_log.template
)
# Setup buttons
self.install_button = ipw.Button(description="Install", disabled=True)
self.install_button.on_click(self._install_version)
self.uninstall_button = ipw.Button(description="Uninstall", disabled=True)
self.uninstall_button.on_click(self._uninstall_app)
self.update_button = ipw.Button(description="Update", disabled=True)
self.update_button.on_click(self._update_app)
self.issue_indicator = ipw.HTML()
self.blocked_ignore = ipw.Checkbox(description="Ignore")
self.blocked_ignore.observe(self._refresh_widget_state)
self.compatibility_info = ipw.HTML()
self.spinner = Spinner("color:#337ab7;font-size:1em;")
ipw.dlink((self.app, "busy"), (self.spinner, "enabled"))
children = [
ipw.HBox([self.compatibility_warning]),
ipw.HBox([load_logo(app), body]),
ipw.HBox(
[
self.uninstall_button,
self.install_button,
self.update_button,
self.spinner,
]
),
ipw.HBox([self.install_info]),
ipw.HBox([self.dependencies_log]),
ipw.HBox([self.issue_indicator, self.blocked_ignore]),
ipw.HBox([self.compatibility_info]),
]
self.version_selector = VersionSelectorWidget()
ipw.dlink(
(self.app, "available_versions"),
(self.version_selector.version_to_install, "options"),
transform=lambda versions: [
(self._formatted_version(version), version) for version in versions
],
)
ipw.dlink(
(self.app, "installed_version"),
(self.version_selector.installed_version, "value"),
transform=self._formatted_version,
)
self.version_selector.layout.visibility = (
"visible" if with_version_selector else "hidden"
)
self.version_selector.disabled = True
self.version_selector.version_to_install.observe(
self._refresh_widget_state, "value"
)
children.insert(2, self.version_selector)
# Prereleases opt-in
self.include_prereleases = ipw.Checkbox(description="Include prereleases")
ipw.dlink(
(self.include_prereleases, "value"), (self.app, "include_prereleases")
)
self.app.observe(
self._refresh_prereleases, names=["has_prereleases", "installed_version"]
)
self._refresh_prereleases(change=dict(owner=self.app)) # initialize
children.insert(3, self.include_prereleases)
super().__init__(children=children)
self.app.observe(self._refresh_widget_state)
self.app.refresh_async() # init all widgets
@staticmethod
def _formatted_version(version):
"""Format the unambigious version identifiee to a human-friendly representation."""
if version is AppVersion.NOT_INSTALLED:
return "[not installed]"
if version is AppVersion.UNKNOWN:
return "[unknown version]"
if not version: # will be displayed during transition phases
return "[n/a]"
return version
def _refresh_prereleases(self, change):
app = change["owner"]
installed_version = app.installed_version
has_prereleases = app.has_prereleases
prerelease_installed = (
parse(installed_version).is_prerelease
if isinstance(installed_version, str)
else False
)
with self.hold_trait_notifications():
# The checkbox can only be enabled when the app has prereleases,
# and cannot be disabled when a prerelease is currently installed.
self.include_prereleases.disabled = (
prerelease_installed or not has_prereleases
)
# The checkbox is checked if it was already checked or a prerelease is installed:
self.include_prereleases.value = (
self.include_prereleases.value or prerelease_installed
)
def _refresh_widget_state(self, _=None):
"""Refresh the widget to reflect the current state of the app."""
with self.hold_trait_notifications():
# Collect information about app state.
installed = self.app.is_installed()
installed_version = self.app.installed_version
compatible = len(self.app.available_versions) > 0
registered = self.app.remote_update_status is not AppStatus.NOT_REGISTERED
cannot_reach_registry = (
self.app.remote_update_status is AppStatus.CANNOT_REACH_REGISTRY
)
busy = self.app.busy
detached = self.app.detached
available_versions = self.app.available_versions
override = detached and self.blocked_ignore.value
blocked_install = (
detached or not compatible
) and not self.blocked_ignore.value
blocked_uninstall = (
detached or not registered or cannot_reach_registry
) and not self.blocked_ignore.value
# Check app compatibility and show banner if not compatible.
self.compatibility_warning.layout.visibility = (
"visible"
if (self.app.is_installed() and self.app.compatible is False)
else "hidden"
)
# Prepare warning icons and messages depending on whether we override or not.
# These messages and icons are only shown if needed.
warn_or_ban_icon = "warning" if override else "ban"
if override:
tooltip_danger = "Operation will lead to potential loss of local data!"
else:
tooltip_danger = "Operation blocked due to potential data loss."
tooltip_incompatible = "The app is not supported for this environment."
# Determine whether we can install, updated, and uninstall.
can_switch = (
installed_version != self.version_selector.version_to_install.value
and available_versions
)
latest_selected = self.version_selector.version_to_install.index == 0
can_install = (
can_switch and (detached or not latest_selected)
) or not installed
can_uninstall = installed
try:
can_update = (
self.app.remote_update_status is AppStatus.UPDATE_AVAILABLE
and installed
)
except RuntimeError:
can_update = None
# Update the install button state.
self.install_button.disabled = busy or blocked_install or not can_install
self.install_button.button_style = "info" if can_install else ""
self.install_button.icon = (
""
if can_install and not detached
else warn_or_ban_icon
if can_install
else ""
)
if self.app.compatible:
self.install_button.tooltip = (
""
if can_install and not detached
else tooltip_danger
if can_install
else ""
)
else:
self.install_button.tooltip = (
""
if installed and not detached
else tooltip_danger
if installed
else tooltip_incompatible
)
self.install_button.description = (
"Install"
if not (installed and can_install)
else f"Install ({self._formatted_version(self.version_selector.version_to_install.value)})"
)
# Update the uninstall button state.
self.uninstall_button.disabled = (
busy or blocked_uninstall or not can_uninstall
)
self.uninstall_button.button_style = "danger" if can_uninstall else ""
self.uninstall_button.icon = warn_or_ban_icon if detached else "trash-o"
self.uninstall_button.tooltip = (
""
if can_uninstall and not detached
else tooltip_danger
if can_uninstall
else ""
)
# Update the update button state.
self.update_button.disabled = busy or blocked_install or not can_update
if self.app.is_installed() and can_update is None:
self.update_button.icon = "warning"
self.update_button.tooltip = (
"Unable to determine availability of updates."
)
else:
self.update_button.icon = (
"arrow-circle-up"
if can_update and not detached
else warn_or_ban_icon
if can_update
else ""
)
self.update_button.button_style = "success" if can_update else ""
self.update_button.tooltip = (
""
if can_update and not detached
else tooltip_danger
if can_update
else ""
)
# Update the version_selector widget state.
more_than_one_version = (
len(self.version_selector.version_to_install.options) > 1
)
self.version_selector.disabled = (
busy or blocked_install or not more_than_one_version
)
# Indicate whether there are local modifications and present option for user override.
if cannot_reach_registry:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> Unable to reach the registry server.'
elif not registered:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> The app is not registered.'
elif detached:
self.issue_indicator.value = (
f'<i class="fa fa-{warn_or_ban_icon}"></i> The app has local modifications or was checked out '
"to an unknown version."
)
elif not compatible:
self.issue_indicator.value = f'<i class="fa fa-{warn_or_ban_icon}"></i> The app is not supported for this environment.'
else:
self.issue_indicator.value = ""
self.blocked_ignore.layout.visibility = (
"visible" if (detached or not compatible) else "hidden"
)
if (
any(self.app.compatibility_info.values())
and self.app.compatible is False
):
self.compatibility_info.value = self.COMPATIBILITY_INFO.render(
app=self.app
)
else:
self.compatibility_info.value = ""
def _show_msg_success(self, msg):
"""Show a message indicating successful execution of a requested operation."""
self.install_info.show_temporary_message(HTML_MSG_SUCCESS.format(msg))
def _show_msg_failure(self, msg):
"""Show a message indicating failure to execute a requested operation."""
self.install_info.show_temporary_message(HTML_MSG_FAILURE.format(msg))
def _check_detached_state(self):
"""Check whether the app is in a detached state which would prevent any install or other operations."""
self.app.refresh()
self._refresh_widget_state()
blocked = self.app.detached and not self.blocked_ignore.value
if blocked:
raise RuntimeError(
"Unable to perform operation, the app is in a detached state."
)
def _install_version(self, _=None):
"""Attempt to install the a specific version of the app."""
version = self.version_selector.version_to_install.value # can be None
try:
self._check_detached_state()
version = self.app.install_app(
version=version, stdout=self.dependencies_log
) # argument may be None
except (AssertionError, RuntimeError, CalledProcessError) as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success(
f"Installed app ({self._formatted_version(version)})."
)
def _update_app(self, _):
"""Attempt to update the app."""
try:
self._check_detached_state()
self.app.update_app(stdout=self.dependencies_log)
except (AssertionError, RuntimeError, CalledProcessError) as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success("Updated app.")
def _uninstall_app(self, _):
"""Attempt to uninstall the app."""
try:
self._check_detached_state()
self.app.uninstall_app()
except RuntimeError as error:
self._show_msg_failure(str(error))
else:
self._show_msg_success("Uninstalled app.") | 0.691081 | 0.18508 |
import os
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators and utils required from airflow
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.models import Variable
from airflow.utils.task_group import TaskGroup
home_directory = os.getenv('AIRFLOW_HOME', '/opt/airflow')
metadata_directory = f"{home_directory}/metadata/"
git_branch = Variable.get("git_branch", default_var='intake')
git_repo = Variable.get("git_repo_metadata")
# Task Configuration
task_group_prefix = 'validate_metadata'
git_user_email = '<EMAIL>'
git_user_name = '<NAME>'
def validate_metadata_folder(**kwargs):
if not os.path.exists(metadata_directory):
return f"{task_group_prefix}.clone_metadata"
if len(os.listdir(metadata_directory)) == 0:
return f"{task_group_prefix}.clone_metadata"
return f"{task_group_prefix}.pull_metadata"
def build_validate_metadata_taskgroup(dag: DAG) -> TaskGroup:
validate_metadata_taskgroup = TaskGroup(group_id=task_group_prefix)
bash_command_configure = f"git config --global user.email \"{git_user_email}\" && git config --global user.name \"{git_user_name}\""
configure_git = BashOperator(
task_id='configure_git',
bash_command=bash_command_configure,
task_group=validate_metadata_taskgroup,
dag=dag)
""" Validates if the git folder is empty or not """
validate_git_folder = BranchPythonOperator(
task_id=f"{task_group_prefix}_folder",
python_callable=validate_metadata_folder,
task_group=validate_metadata_taskgroup,
dag=dag)
""" If the git folder is empty, clone the repo """
bash_command_clone = f"git clone --single-branch --branch {git_branch} {git_repo} {metadata_directory}"
git_clone = BashOperator(
task_id='clone_metadata',
bash_command=bash_command_clone,
task_group=validate_metadata_taskgroup,
dag=dag)
""" If the git folder is not empty, pull the latest changes """
bash_command_pull = f"git -C {metadata_directory} pull origin {git_branch}"
git_pull = BashOperator(
task_id='pull_metadata',
bash_command=bash_command_pull,
task_group=validate_metadata_taskgroup,
dag=dag)
""" Dummy operator (DO NOT DELETE, IT WOULD BREAK THE FLOW) """
finished_pulling = DummyOperator(
task_id='finished_pulling',
trigger_rule='none_failed',
task_group=validate_metadata_taskgroup,
dag=dag)
configure_git >> validate_git_folder >> [git_clone, git_pull] >> finished_pulling
return validate_metadata_taskgroup | dlme_airflow/task_groups/validate_dlme_metadata.py | import os
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators and utils required from airflow
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.models import Variable
from airflow.utils.task_group import TaskGroup
home_directory = os.getenv('AIRFLOW_HOME', '/opt/airflow')
metadata_directory = f"{home_directory}/metadata/"
git_branch = Variable.get("git_branch", default_var='intake')
git_repo = Variable.get("git_repo_metadata")
# Task Configuration
task_group_prefix = 'validate_metadata'
git_user_email = '<EMAIL>'
git_user_name = '<NAME>'
def validate_metadata_folder(**kwargs):
if not os.path.exists(metadata_directory):
return f"{task_group_prefix}.clone_metadata"
if len(os.listdir(metadata_directory)) == 0:
return f"{task_group_prefix}.clone_metadata"
return f"{task_group_prefix}.pull_metadata"
def build_validate_metadata_taskgroup(dag: DAG) -> TaskGroup:
validate_metadata_taskgroup = TaskGroup(group_id=task_group_prefix)
bash_command_configure = f"git config --global user.email \"{git_user_email}\" && git config --global user.name \"{git_user_name}\""
configure_git = BashOperator(
task_id='configure_git',
bash_command=bash_command_configure,
task_group=validate_metadata_taskgroup,
dag=dag)
""" Validates if the git folder is empty or not """
validate_git_folder = BranchPythonOperator(
task_id=f"{task_group_prefix}_folder",
python_callable=validate_metadata_folder,
task_group=validate_metadata_taskgroup,
dag=dag)
""" If the git folder is empty, clone the repo """
bash_command_clone = f"git clone --single-branch --branch {git_branch} {git_repo} {metadata_directory}"
git_clone = BashOperator(
task_id='clone_metadata',
bash_command=bash_command_clone,
task_group=validate_metadata_taskgroup,
dag=dag)
""" If the git folder is not empty, pull the latest changes """
bash_command_pull = f"git -C {metadata_directory} pull origin {git_branch}"
git_pull = BashOperator(
task_id='pull_metadata',
bash_command=bash_command_pull,
task_group=validate_metadata_taskgroup,
dag=dag)
""" Dummy operator (DO NOT DELETE, IT WOULD BREAK THE FLOW) """
finished_pulling = DummyOperator(
task_id='finished_pulling',
trigger_rule='none_failed',
task_group=validate_metadata_taskgroup,
dag=dag)
configure_git >> validate_git_folder >> [git_clone, git_pull] >> finished_pulling
return validate_metadata_taskgroup | 0.419172 | 0.2349 |
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from posts.models import Group, Post
User = get_user_model()
class PostsURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Тестовый заголовок группы',
slug='test-slug',
description='Тестовое описание группы',
)
cls.user = User.objects.create_user(username='Testuser')
cls.post = Post.objects.create(
text='Тестовый текст',
author=cls.user,
group=cls.group
)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(PostsURLTests.user)
def test_post_url_exists_at_desired_location(self):
"""Проверка доступности адресов в posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
guest = self.client
authorized = self.authorized_client
permitted_url_names = (
('/', guest),
(f'/group/{group_slug}/', guest),
('/new/', authorized),
('/follow/', authorized),
(f'/{username}/{post_id}/', guest),
(f'/{username}/{post_id}/edit/', authorized),
(f'/{username}/', guest)
)
for url, client in permitted_url_names:
with self.subTest(url=url):
response = client.get(url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_post_url_uses_correct_redirects(self):
"""Проверка redirect-ов для адресов posts.url."""
user2 = User.objects.create_user(username='Testuser2')
reader = Client()
reader.force_login(user2)
username = PostsURLTests.user.username
post_id = PostsURLTests.post.id
guest = self.client
auth_login = reverse('login') + '?next='
redirect_url_names = (
('/new/', guest,
auth_login + reverse('new_post')),
(f'/{username}/{post_id}/edit/', guest,
auth_login + reverse('post_edit', args=(username, post_id))),
(f'/{username}/{post_id}/edit/', reader,
reverse('post', args=(username, post_id))),
(f'/{username}/follow/', guest,
auth_login + reverse('profile_follow', args=(username,))),
(f'/{username}/follow/', reader,
reverse('profile', args=(username,))),
(f'/{username}/unfollow/', guest,
auth_login + reverse('profile_unfollow', args=(username,))),
(f'/{username}/{post_id}/comment/', guest,
auth_login + reverse('add_comment', args=(username, post_id))),
)
for url, client, redirect in redirect_url_names:
with self.subTest(url=url):
response = client.get(url, follow=True)
self.assertRedirects(response, redirect)
def test_post_url_uses_correct_name_path(self):
"""Проверка name path() для адресов posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
url_names = (
('/', 'index', None),
(f'/group/{group_slug}/', 'group_posts', (group_slug,)),
('/new/', 'new_post', None),
('/follow/', 'follow_index', None),
(f'/{username}/{post_id}/', 'post', (username, post_id)),
(f'/{username}/{post_id}/edit/', 'post_edit', (username, post_id)),
(f'/{username}/{post_id}/comment/', 'add_comment',
(username, post_id)),
(f'/{username}/follow/', 'profile_follow', (username,)),
(f'/{username}/unfollow/', 'profile_unfollow', (username,)),
(f'/{username}/', 'profile', (username,))
)
for url, name, args in url_names:
with self.subTest(url=url):
self.assertEqual(url, reverse(name, args=args))
def test_incorrect_url_return_404_error(self):
"""Страница /abraabra/abraabra/ возвращает 404 код ответа."""
response = self.client.get('/abraabra/abraabra/')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND) | yatube/posts/tests/test_urls.py | from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from posts.models import Group, Post
User = get_user_model()
class PostsURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Тестовый заголовок группы',
slug='test-slug',
description='Тестовое описание группы',
)
cls.user = User.objects.create_user(username='Testuser')
cls.post = Post.objects.create(
text='Тестовый текст',
author=cls.user,
group=cls.group
)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(PostsURLTests.user)
def test_post_url_exists_at_desired_location(self):
"""Проверка доступности адресов в posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
guest = self.client
authorized = self.authorized_client
permitted_url_names = (
('/', guest),
(f'/group/{group_slug}/', guest),
('/new/', authorized),
('/follow/', authorized),
(f'/{username}/{post_id}/', guest),
(f'/{username}/{post_id}/edit/', authorized),
(f'/{username}/', guest)
)
for url, client in permitted_url_names:
with self.subTest(url=url):
response = client.get(url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_post_url_uses_correct_redirects(self):
"""Проверка redirect-ов для адресов posts.url."""
user2 = User.objects.create_user(username='Testuser2')
reader = Client()
reader.force_login(user2)
username = PostsURLTests.user.username
post_id = PostsURLTests.post.id
guest = self.client
auth_login = reverse('login') + '?next='
redirect_url_names = (
('/new/', guest,
auth_login + reverse('new_post')),
(f'/{username}/{post_id}/edit/', guest,
auth_login + reverse('post_edit', args=(username, post_id))),
(f'/{username}/{post_id}/edit/', reader,
reverse('post', args=(username, post_id))),
(f'/{username}/follow/', guest,
auth_login + reverse('profile_follow', args=(username,))),
(f'/{username}/follow/', reader,
reverse('profile', args=(username,))),
(f'/{username}/unfollow/', guest,
auth_login + reverse('profile_unfollow', args=(username,))),
(f'/{username}/{post_id}/comment/', guest,
auth_login + reverse('add_comment', args=(username, post_id))),
)
for url, client, redirect in redirect_url_names:
with self.subTest(url=url):
response = client.get(url, follow=True)
self.assertRedirects(response, redirect)
def test_post_url_uses_correct_name_path(self):
"""Проверка name path() для адресов posts.url."""
username = PostsURLTests.user.username
group_slug = PostsURLTests.group.slug
post_id = PostsURLTests.post.id
url_names = (
('/', 'index', None),
(f'/group/{group_slug}/', 'group_posts', (group_slug,)),
('/new/', 'new_post', None),
('/follow/', 'follow_index', None),
(f'/{username}/{post_id}/', 'post', (username, post_id)),
(f'/{username}/{post_id}/edit/', 'post_edit', (username, post_id)),
(f'/{username}/{post_id}/comment/', 'add_comment',
(username, post_id)),
(f'/{username}/follow/', 'profile_follow', (username,)),
(f'/{username}/unfollow/', 'profile_unfollow', (username,)),
(f'/{username}/', 'profile', (username,))
)
for url, name, args in url_names:
with self.subTest(url=url):
self.assertEqual(url, reverse(name, args=args))
def test_incorrect_url_return_404_error(self):
"""Страница /abraabra/abraabra/ возвращает 404 код ответа."""
response = self.client.get('/abraabra/abraabra/')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND) | 0.46393 | 0.124372 |
import itertools
import re
from collections import Counter
import numpy as np
import pandas as pd
import pymystem3
mystem = pymystem3.Mystem()
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-zА-Яа-я0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels_pos_neg():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, maxlen=56, padding_word="<PAD/>"):
"""
Pads all sentences to the same length.
Returns padded sentences.
"""
sequence_length = maxlen # max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = max(0, sequence_length - len(sentence))
new_sentence = sentence[:sequence_length] + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_x(sentences, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
return x
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = build_input_x(sentences, vocabulary)
y = np.array(labels)
return [x, y]
def load_data_pos_neg():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels_pos_neg()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def build_word_level_data(train_data, test_data):
sentences_train, labels_train = train_data
sentences_test, labels_test = test_data
sentences_train = [clean_str(sent) for sent in sentences_train]
sentences_train = [mystem.lemmatize(s) for s in sentences_train]
sentences_test = [clean_str(sent) for sent in sentences_test]
sentences_test = [mystem.lemmatize(s) for s in sentences_test]
sentences_train_padded = pad_sentences(list(sentences_train))
sentences_test_padded = pad_sentences(list(sentences_test))
print(" ".join(sentences_train_padded[0]))
vocabulary, vocabulary_inv = \
build_vocab(sentences_train_padded + sentences_test_padded)
x_train, y_train = build_input_data(sentences_train_padded, labels_train, vocabulary)
x_test, y_test = build_input_data(sentences_test_padded, labels_test, vocabulary)
return x_train, y_train, x_test, y_test, vocabulary, vocabulary_inv
def encode_word_level_data(prepared_x, vocabulary):
x = build_input_x(pad_sentences(list(prepared_x.ix[:, 0])), vocabulary)
return x
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data.shape[0])
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def read_data_file(fname, target_index=0, normalize=True, binary=False):
content = pd.read_csv(fname, header=None, index_col=False)
content.dropna(inplace=True)
content.reset_index(inplace=True, drop=True)
x = content.ix[:, content.shape[1] - 1]
x = np.array(x)
y = content.ix[:, target_index].values + 0.0
if normalize:
max_y = np.max(np.abs(y))
y /= max_y
if binary:
vals = list(set(y))
if len(vals) > 2:
raise Exception("Binary input data is not binary! Dataset %s, target_index=%d" % (fname, target_index))
y = np.array([0 if a == vals[0] else 1 for a in y])
return x, y
def load_ok_data_gender():
train_data = read_data_file('./data/ok/ok_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_ok_user_data_gender():
train_data = read_data_file('./data/ok/ok_user_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_user_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_sentirueval_data():
train_data = read_data_file('./data/sentirueval/train.csv')
test_data = read_data_file('./data/sentirueval/test.csv')
return train_data, test_data
def shuffle_matrix(x, y):
stacked = np.hstack((np.matrix(x).T, np.asmatrix(y).T))
np.random.shuffle(stacked)
xi = np.array(stacked[:, 0]).flatten()
yi = np.array(stacked[:, 1:])
return xi, yi
def clean_data_np(x):
# load data
all = [s.strip() for s in list(x)]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
def clean_data_lists(x):
# load data
all = [s.strip() for s in x]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
if __name__ == '__main__':
# read_w2v()
df = pd.DataFrame([{"x": u"привет"}, {"x": u"пока"}]) | data_helpers.py | import itertools
import re
from collections import Counter
import numpy as np
import pandas as pd
import pymystem3
mystem = pymystem3.Mystem()
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-zА-Яа-я0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels_pos_neg():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, maxlen=56, padding_word="<PAD/>"):
"""
Pads all sentences to the same length.
Returns padded sentences.
"""
sequence_length = maxlen # max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = max(0, sequence_length - len(sentence))
new_sentence = sentence[:sequence_length] + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_x(sentences, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
return x
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = build_input_x(sentences, vocabulary)
y = np.array(labels)
return [x, y]
def load_data_pos_neg():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels_pos_neg()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def build_word_level_data(train_data, test_data):
sentences_train, labels_train = train_data
sentences_test, labels_test = test_data
sentences_train = [clean_str(sent) for sent in sentences_train]
sentences_train = [mystem.lemmatize(s) for s in sentences_train]
sentences_test = [clean_str(sent) for sent in sentences_test]
sentences_test = [mystem.lemmatize(s) for s in sentences_test]
sentences_train_padded = pad_sentences(list(sentences_train))
sentences_test_padded = pad_sentences(list(sentences_test))
print(" ".join(sentences_train_padded[0]))
vocabulary, vocabulary_inv = \
build_vocab(sentences_train_padded + sentences_test_padded)
x_train, y_train = build_input_data(sentences_train_padded, labels_train, vocabulary)
x_test, y_test = build_input_data(sentences_test_padded, labels_test, vocabulary)
return x_train, y_train, x_test, y_test, vocabulary, vocabulary_inv
def encode_word_level_data(prepared_x, vocabulary):
x = build_input_x(pad_sentences(list(prepared_x.ix[:, 0])), vocabulary)
return x
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data.shape[0])
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def read_data_file(fname, target_index=0, normalize=True, binary=False):
content = pd.read_csv(fname, header=None, index_col=False)
content.dropna(inplace=True)
content.reset_index(inplace=True, drop=True)
x = content.ix[:, content.shape[1] - 1]
x = np.array(x)
y = content.ix[:, target_index].values + 0.0
if normalize:
max_y = np.max(np.abs(y))
y /= max_y
if binary:
vals = list(set(y))
if len(vals) > 2:
raise Exception("Binary input data is not binary! Dataset %s, target_index=%d" % (fname, target_index))
y = np.array([0 if a == vals[0] else 1 for a in y])
return x, y
def load_ok_data_gender():
train_data = read_data_file('./data/ok/ok_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_ok_user_data_gender():
train_data = read_data_file('./data/ok/ok_user_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_user_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_sentirueval_data():
train_data = read_data_file('./data/sentirueval/train.csv')
test_data = read_data_file('./data/sentirueval/test.csv')
return train_data, test_data
def shuffle_matrix(x, y):
stacked = np.hstack((np.matrix(x).T, np.asmatrix(y).T))
np.random.shuffle(stacked)
xi = np.array(stacked[:, 0]).flatten()
yi = np.array(stacked[:, 1:])
return xi, yi
def clean_data_np(x):
# load data
all = [s.strip() for s in list(x)]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
def clean_data_lists(x):
# load data
all = [s.strip() for s in x]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
if __name__ == '__main__':
# read_w2v()
df = pd.DataFrame([{"x": u"привет"}, {"x": u"пока"}]) | 0.779783 | 0.555435 |
from unittest import mock
import pytest
@pytest.fixture
def apps():
""" Mocks 'apps.get_model()' parameter in migration """
from .models import DummyModel
mocked_apps = mock.MagicMock()
mocked_apps.get_model = mock.MagicMock(return_value=DummyModel)
return mocked_apps
@pytest.fixture
def schema_editor():
""" Mocks 'schema_editor.execute()' in migration """
mocked_schema_editor = mock.MagicMock()
mocked_schema_editor.execute = mock.MagicMock()
return mocked_schema_editor
def test_AddAuditToModel_upgrade(apps, schema_editor):
""" It should emit proper upgrade query for specified model """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests')
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 't', '{}')")
def test_AddAuditToModel_upgrade_exclude_sql(apps, schema_editor):
""" It should allow omission of SQL in audit log """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', include_query=False)
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 'f', '{}')")
def test_AddAuditToModel_upgrade_exclude_cols(apps, schema_editor):
""" It should allow exclusion of certain columns """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', exclude=['id'])
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 't', '{id}')")
def test_AddAuditToModel_downgrade(apps, schema_editor):
""" It should downgrade """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', exclude=['id'])
operation.reverse_code(apps, schema_editor)
assert schema_editor.execute.mock_calls == [
mock.call('DROP TRIGGER IF EXISTS audit_trigger_row ON tests_dummymodel'),
mock.call('DROP TRIGGER IF EXISTS audit_trigger_stm ON tests_dummymodel')
] | tests/test_migrating.py | from unittest import mock
import pytest
@pytest.fixture
def apps():
""" Mocks 'apps.get_model()' parameter in migration """
from .models import DummyModel
mocked_apps = mock.MagicMock()
mocked_apps.get_model = mock.MagicMock(return_value=DummyModel)
return mocked_apps
@pytest.fixture
def schema_editor():
""" Mocks 'schema_editor.execute()' in migration """
mocked_schema_editor = mock.MagicMock()
mocked_schema_editor.execute = mock.MagicMock()
return mocked_schema_editor
def test_AddAuditToModel_upgrade(apps, schema_editor):
""" It should emit proper upgrade query for specified model """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests')
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 't', '{}')")
def test_AddAuditToModel_upgrade_exclude_sql(apps, schema_editor):
""" It should allow omission of SQL in audit log """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', include_query=False)
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 'f', '{}')")
def test_AddAuditToModel_upgrade_exclude_cols(apps, schema_editor):
""" It should allow exclusion of certain columns """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', exclude=['id'])
operation.code(apps, schema_editor)
schema_editor.execute.assert_called_with(
"SELECT audit.audit_table('tests_dummymodel', 't', 't', '{id}')")
def test_AddAuditToModel_downgrade(apps, schema_editor):
""" It should downgrade """
from audit_trail.migrating import AddAuditToModel as Operation
operation = Operation('DummyModel', 'tests', exclude=['id'])
operation.reverse_code(apps, schema_editor)
assert schema_editor.execute.mock_calls == [
mock.call('DROP TRIGGER IF EXISTS audit_trigger_row ON tests_dummymodel'),
mock.call('DROP TRIGGER IF EXISTS audit_trigger_stm ON tests_dummymodel')
] | 0.559049 | 0.430267 |
from copy import deepcopy
import numpy as np
from random import Random
class Matrix:
# Initializes to zero matrix
def __init__(self, row, col):
self.row = row
self.col = col
self.matrix = [[0 for x in range(self.row)] for x in range(self.col) ]
# String representation of matrix
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
# Allows index access to matrix, ex: matrix[2][3]
def this(self, row, col):
return self.matrix[row][col]
# Get the number of rows in the matrix
def getNumRow(self):
return self.row
# Get the number of columns in the matrix
def getNumCol(self):
return self.col
# Converts 2d list to 1d array of floats
def toPackedArray(self):
pass
# TO be added
def fromPackedArray(self):
pass
# Create random matrix with single column
def createColumnMatrix(self, values):
self.matrix = self.matrix.append([values])
# Create random matrix with single row.
def createRowMatrix(self, row):
for i in row:
self.matrix.append(np.random.rand)
# Adds the value to every cell in the matrix
def add(self, row, col, value):
pass
# Set every cell in a matrix to zero
def clear(self):
self.matrix = [[0 for x in range(self.row)] for x in range(self.col) ]
# Clones matrix object
def clone(self):
return self.matrix
# Determines if matrices are equal with precision
def equals(self, precision):
pass
# Gets one column from matrix obj as new matrix obj
def getCol(self, col):
pass
# Gets one row from matrix obj as new matrix obj
def getRow(self, row):
pass
# isZero: Determines if every cell in a matrix object is zero.
def isZero(self):
pass
def randomize(self, minimum, maximum):
self.matrix = np.random.randint(minimum,maximum, size=(self.row, self.col))
# sumCell: Returns the sum of every cell in a matrix obj
def sumCell(self):
pass
# Sets the value of a cell
def setCell(self, row, col, value):
self.matrix[row][col] = value | ANN Python/matrix.py | from copy import deepcopy
import numpy as np
from random import Random
class Matrix:
# Initializes to zero matrix
def __init__(self, row, col):
self.row = row
self.col = col
self.matrix = [[0 for x in range(self.row)] for x in range(self.col) ]
# String representation of matrix
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
# Allows index access to matrix, ex: matrix[2][3]
def this(self, row, col):
return self.matrix[row][col]
# Get the number of rows in the matrix
def getNumRow(self):
return self.row
# Get the number of columns in the matrix
def getNumCol(self):
return self.col
# Converts 2d list to 1d array of floats
def toPackedArray(self):
pass
# TO be added
def fromPackedArray(self):
pass
# Create random matrix with single column
def createColumnMatrix(self, values):
self.matrix = self.matrix.append([values])
# Create random matrix with single row.
def createRowMatrix(self, row):
for i in row:
self.matrix.append(np.random.rand)
# Adds the value to every cell in the matrix
def add(self, row, col, value):
pass
# Set every cell in a matrix to zero
def clear(self):
self.matrix = [[0 for x in range(self.row)] for x in range(self.col) ]
# Clones matrix object
def clone(self):
return self.matrix
# Determines if matrices are equal with precision
def equals(self, precision):
pass
# Gets one column from matrix obj as new matrix obj
def getCol(self, col):
pass
# Gets one row from matrix obj as new matrix obj
def getRow(self, row):
pass
# isZero: Determines if every cell in a matrix object is zero.
def isZero(self):
pass
def randomize(self, minimum, maximum):
self.matrix = np.random.randint(minimum,maximum, size=(self.row, self.col))
# sumCell: Returns the sum of every cell in a matrix obj
def sumCell(self):
pass
# Sets the value of a cell
def setCell(self, row, col, value):
self.matrix[row][col] = value | 0.850903 | 0.594993 |
from enum import Enum
import datetime
import pprint
import xmltodict
from connectinfo import RawConnectInfo
class ManifestType(Enum):
CREATE_SLIVER = 0
RENEW_OR_LIST_SLIVER = 1
class Manifest(object):
'''Object to store parsed manifest'''
def __init__(self, manifest, do_parse=True):
self.data = xmltodict.parse(manifest.text) if do_parse else manifest
if self.__has_indices('rspec', 'node', 0):
self.__num_nodes = len(self.data['rspec']['node'])
else:
self.__num_nodes = 1
@property
def num_nodes(self):
return self.__num_nodes
@property
def expiration(self):
tmp = self.data['rspec']['@expires'] if ManifestType.CREATE_SLIVER else self.data['rspec']['pg_expires']
return datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%SZ')
def __len__(self):
return self.__num_nodes
def __str__(self):
return str(self.data)
def __repr__(self):
return self.__str__()
def print_full(self):
pprint.pprint(self.data)
def __has_indices(self, *indices):
ptr = self.data
try:
for x in indices:
ptr = ptr[x]
return True
except KeyError as e:
return False
def get_connect_info(self):
'''Returns iterable of `RawConnectInfo`:(name, user, ip_local, ip_public, port) for all found nodes'''
if self.__num_nodes > 1:
return [RawConnectInfo(
str(self.data['rspec']['node'][idx]['@client_id']),
str(self.data['rspec']['node'][idx]['services']['login']['@username']),
str(self.data['rspec']['node'][idx]['interface']['ip']['@address']),
str(self.data['rspec']['node'][idx]['host']['@ipv4']),
str(self.data['rspec']['node'][idx]['services']['login']['@port'])) for idx in range(self.__num_nodes)]
elif self.__num_nodes == 1:
name = str(self.data['rspec']['node']['@client_id'])
user = str(self.data['rspec']['node']['services']['login']['@username'])
ip_local = str(self.data['rspec']['node']['interface']['ip']['@address'])
ip_public = str(self.data['rspec']['node']['host']['@ipv4'])
port = str(self.data['rspec']['node']['services']['login']['@port'])
return [RawConnectInfo(name, user, ip_local, ip_public, port)]
else:
raise RuntimeError('No nodes found!') | metareserve_geni/internal/gni/py2/manifest/manifest.py | from enum import Enum
import datetime
import pprint
import xmltodict
from connectinfo import RawConnectInfo
class ManifestType(Enum):
CREATE_SLIVER = 0
RENEW_OR_LIST_SLIVER = 1
class Manifest(object):
'''Object to store parsed manifest'''
def __init__(self, manifest, do_parse=True):
self.data = xmltodict.parse(manifest.text) if do_parse else manifest
if self.__has_indices('rspec', 'node', 0):
self.__num_nodes = len(self.data['rspec']['node'])
else:
self.__num_nodes = 1
@property
def num_nodes(self):
return self.__num_nodes
@property
def expiration(self):
tmp = self.data['rspec']['@expires'] if ManifestType.CREATE_SLIVER else self.data['rspec']['pg_expires']
return datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%SZ')
def __len__(self):
return self.__num_nodes
def __str__(self):
return str(self.data)
def __repr__(self):
return self.__str__()
def print_full(self):
pprint.pprint(self.data)
def __has_indices(self, *indices):
ptr = self.data
try:
for x in indices:
ptr = ptr[x]
return True
except KeyError as e:
return False
def get_connect_info(self):
'''Returns iterable of `RawConnectInfo`:(name, user, ip_local, ip_public, port) for all found nodes'''
if self.__num_nodes > 1:
return [RawConnectInfo(
str(self.data['rspec']['node'][idx]['@client_id']),
str(self.data['rspec']['node'][idx]['services']['login']['@username']),
str(self.data['rspec']['node'][idx]['interface']['ip']['@address']),
str(self.data['rspec']['node'][idx]['host']['@ipv4']),
str(self.data['rspec']['node'][idx]['services']['login']['@port'])) for idx in range(self.__num_nodes)]
elif self.__num_nodes == 1:
name = str(self.data['rspec']['node']['@client_id'])
user = str(self.data['rspec']['node']['services']['login']['@username'])
ip_local = str(self.data['rspec']['node']['interface']['ip']['@address'])
ip_public = str(self.data['rspec']['node']['host']['@ipv4'])
port = str(self.data['rspec']['node']['services']['login']['@port'])
return [RawConnectInfo(name, user, ip_local, ip_public, port)]
else:
raise RuntimeError('No nodes found!') | 0.38341 | 0.100834 |
import csv
import re
# Regular expression to detect potential string values with missing quotes
sql_fun = ['true', 'false', 'avg', 'count', 'first', 'last', 'max', 'min',
'sum', 'ucase', 'lcase', 'mid', 'len', 'round', 'now', 'format']
string_exp = re.compile('^(?!["\']|{}).*[a-z]'.format('|'.join(sql_fun)),
re.IGNORECASE)
class CsvImporter(object):
"""
CsvImporter imports values from a csv file into records and creates sql
insert statements to create the corresponding rows in the target db.
:param path: Path to the csv file to import
:param dialect: Dictionary with csv reader dialect specifications
(see http://docs.python.org/2/library/csv.html#csv-fmt-params)
:param import_specs: Dictionary with import specifications for each
table. RecordSpecs are used to tell the script how to extract the
csv columns into db records.
Each entry can have multiple RecordSpecs, identified by a unique
key which is used to resolve cross references in the attr_map of
each RecordSpec.
"""
def __init__(self, path, dialect, import_specs):
self.path = path
self.dialect = dialect
# Flatten import_specs to {(table, instance): record_spec} "t,i,s" form
flat_specs = {}
for (t, table_spec) in import_specs.items():
specs = {(t, i): s for (i, s) in table_spec.items()}
flat_specs.update(specs)
# Create a XReference dependency map and sort it topologically
dependency_map = {}
for (path, s) in flat_specs.items():
deps = set([(x.table_name, x.instance_name) for x
in s.attr_map.values() if isinstance(x, XReference)])
dependency_map[path] = deps
sorted_keys = [val for sub in _toposort(dependency_map) for val in sub]
# Store sorted results in a list [(t, i, s), ...]
try:
self.specs = [(t, i, flat_specs[(t, i)]) for (t, i) in sorted_keys]
except KeyError:
print('ERROR: Could not find specification for "{}" in table '
'"{}". Check your XReferences.'.format(i, t))
exit(-1)
def import_data(self, id_col=None):
"""
Imports the csv into DbRecords and returns them.
The method uses the import specification (import_specs) that was passed
to the importer on init to convert csv table columns to DbRecord
objects.
"""
records = []
with open(self.path) as f:
csv.register_dialect('csv2db', **self.dialect)
reader = csv.DictReader(f, dialect='csv2db')
row_num = 0
for row in reader:
row_id = row[id_col] if id_col else row_num
records += self._records_for_row(row, row_id);
row_num += 1
return records
def _records_for_row(self, row, row_id):
"""
Import one single row and return the resulting DbRecord objects
"""
records = []
xref_map = {}
for (table, instance, spec) in self.specs:
if spec.condition(row) is False:
continue
# Create record and import attributes according to spec
record = DbRecord(table, row_id)
record.import_attributes(spec.attr_map, xref_map, row)
records.append(record)
# Keep a reference to each record instance that we create for
# resolving XReferences in later instances
instance_path = (table, instance)
xref_map[instance_path] = record
return records
class RecordSpec(object):
"""
Specifications for extracting csv columns into the corresponding
database record.
:param attr_map: A dictionary that maps database columns to csv
columns using any of the ...Value classes below.
:param condition: An optional callable that returns false if the
object should not be created for the row that is currently.
The callable must accept exactly one parameter (the current row).
"""
def __init__(self, attr_map, condition=None):
self.attr_map = attr_map
self.condition = condition if condition else lambda row: True
class ColumnValue(object):
"""
Read an input value from a csv column
:param col_name: Column name to read the value from
:param convert: Optional conversion function that takes exactly one
argument which is the row dict for the currently imported row
"""
def __init__(self, col_name, convert=None):
self.col_name = col_name
self.convert = convert
def _read(self, row, **kw_args):
value = row[self.col_name]
return self.convert(value) if self.convert else value
class MultiColumnValue(object):
"""
Reads input from multiple columns and contracts them into a single value
using the (non-optional) callable given in *convert*.
:param col_names: List of column names to read values from
:param convert: Conversion function that takes exactly one argument (the
row dict of the currently imported row) and contracts the values into
a single return value
"""
def __init__(self, col_names, convert):
if not convert:
raise ValueError('ERROR: You must provide a convert function')
self.col_names = col_names
self.convert = convert
def _read(self, row, **kw_args):
values = {key: row[key] for key in self.col_names}
return self.convert(values)
class ConstValue(object):
""" Always returns the same constant value
:param value: The value to return for each row
"""
def __init__(self, value):
self.value = value
def _read(self, row, **kw_args):
return self.value
class DynamicValue(object):
""" Creates a value dynamically using the callable *generate*
:param generate: A function or other callable that takes a single argument
(the current row dict) and returns a single value
"""
def __init__(self, generate):
self.generate = generate
def _read(self, row, **kw_args):
return self.generate(row)
class XReference(object):
""" Takes the value of a specific attribute of another record.
:param table_name: Table name in the import_specs table given to the
*CsvImporter*
:param instance_name: Identifies a specific instance under *table_name*
:param attribute_name: Name of the attribute to return
"""
def __init__(self, table_name, instance_name, attribute_name):
self.table_name = table_name
self.instance_name = instance_name
self.attribute_name = attribute_name
def _read(self, row, **kw_args):
existing_records = kw_args['existing_records']
path = (self.table_name, self.instance_name)
value = existing_records[path].attributes[self.attribute_name]
return value
class DbRecord(object):
"""
One or more DbRecords are created for each imported row accoding to the
RecordSpecs.
"""
def __init__(self, table_name, row_id):
self.row_id = row_id
self.table_name = table_name
self.attributes = {}
def import_attributes(self, attr_map, existing_records, row):
"""
Import attributes according to the attr_map and resolve cross
references to existing_records.
"""
try:
imported = {k: v._read(row, existing_records=existing_records)
for (k, v) in attr_map.iteritems()}
except AttributeError:
k, v = next((k, v) for (k, v) in attr_map.iteritems()
if '_read' not in dir(v))
print('ERROR: The RecordSpec for {} in {} does not seem to be '
'valid'.format(k, self.table_name))
exit(-1)
self.attributes.update(imported)
def insert_statement(self):
"""
Returns the insert statement sequence for the current object
"""
col = ' (%s)' % ', '.join(self.attributes.keys())
# sanity checks
error = False
for k, v in self.attributes.iteritems():
if not isinstance(v, str):
print('ERROR: The value ({}) for "{}" in table "{}" is not a '
'string. Make sure your specs only produce string '
'values (i.e. \'5\', \'TRUE\', \'"Some text"\', '
'...)'.format(v, k, self.table_name))
error = True
elif string_exp.match(v):
print ('WARNING: {} looks like a string value but is not in '
'quotes. If "{}" in "{}" is a CHAR or VARCHAR type '
'column, you should put the value in quotes.').\
format(v, k, self.table_name)
if error:
print 'Aborting due to errors.'
exit(-1)
val = ' (%s)' % ', '.join(self.attributes.values())
sql = 'INSERT INTO ' + self.table_name + col + ' VALUES' + val + ';\n'
return sql
# Private (internal) methods
def _toposort(data):
"""
Sort dependencies topologically
:param data: Dependency map of the form
data = {
'business': set(['fleet','address']),
'device': set(['business','model','status','pack']),
'txn': set(['device','business','operator'])
}
"""
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items = reduce(set.union, data.itervalues()) - set(data.iterkeys())
# Add empty dependences where needed
data.update({item: set() for item in extra_items})
while True:
ordered = set(item for item, dep in data.iteritems() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.iteritems() if item not in ordered}
assert not data, "Cyclic dependencies:\n%s" % \
'\n'.join(repr(x) for x in data.iteritems()) | csv2db.py | import csv
import re
# Regular expression to detect potential string values with missing quotes
sql_fun = ['true', 'false', 'avg', 'count', 'first', 'last', 'max', 'min',
'sum', 'ucase', 'lcase', 'mid', 'len', 'round', 'now', 'format']
string_exp = re.compile('^(?!["\']|{}).*[a-z]'.format('|'.join(sql_fun)),
re.IGNORECASE)
class CsvImporter(object):
"""
CsvImporter imports values from a csv file into records and creates sql
insert statements to create the corresponding rows in the target db.
:param path: Path to the csv file to import
:param dialect: Dictionary with csv reader dialect specifications
(see http://docs.python.org/2/library/csv.html#csv-fmt-params)
:param import_specs: Dictionary with import specifications for each
table. RecordSpecs are used to tell the script how to extract the
csv columns into db records.
Each entry can have multiple RecordSpecs, identified by a unique
key which is used to resolve cross references in the attr_map of
each RecordSpec.
"""
def __init__(self, path, dialect, import_specs):
self.path = path
self.dialect = dialect
# Flatten import_specs to {(table, instance): record_spec} "t,i,s" form
flat_specs = {}
for (t, table_spec) in import_specs.items():
specs = {(t, i): s for (i, s) in table_spec.items()}
flat_specs.update(specs)
# Create a XReference dependency map and sort it topologically
dependency_map = {}
for (path, s) in flat_specs.items():
deps = set([(x.table_name, x.instance_name) for x
in s.attr_map.values() if isinstance(x, XReference)])
dependency_map[path] = deps
sorted_keys = [val for sub in _toposort(dependency_map) for val in sub]
# Store sorted results in a list [(t, i, s), ...]
try:
self.specs = [(t, i, flat_specs[(t, i)]) for (t, i) in sorted_keys]
except KeyError:
print('ERROR: Could not find specification for "{}" in table '
'"{}". Check your XReferences.'.format(i, t))
exit(-1)
def import_data(self, id_col=None):
"""
Imports the csv into DbRecords and returns them.
The method uses the import specification (import_specs) that was passed
to the importer on init to convert csv table columns to DbRecord
objects.
"""
records = []
with open(self.path) as f:
csv.register_dialect('csv2db', **self.dialect)
reader = csv.DictReader(f, dialect='csv2db')
row_num = 0
for row in reader:
row_id = row[id_col] if id_col else row_num
records += self._records_for_row(row, row_id);
row_num += 1
return records
def _records_for_row(self, row, row_id):
"""
Import one single row and return the resulting DbRecord objects
"""
records = []
xref_map = {}
for (table, instance, spec) in self.specs:
if spec.condition(row) is False:
continue
# Create record and import attributes according to spec
record = DbRecord(table, row_id)
record.import_attributes(spec.attr_map, xref_map, row)
records.append(record)
# Keep a reference to each record instance that we create for
# resolving XReferences in later instances
instance_path = (table, instance)
xref_map[instance_path] = record
return records
class RecordSpec(object):
"""
Specifications for extracting csv columns into the corresponding
database record.
:param attr_map: A dictionary that maps database columns to csv
columns using any of the ...Value classes below.
:param condition: An optional callable that returns false if the
object should not be created for the row that is currently.
The callable must accept exactly one parameter (the current row).
"""
def __init__(self, attr_map, condition=None):
self.attr_map = attr_map
self.condition = condition if condition else lambda row: True
class ColumnValue(object):
"""
Read an input value from a csv column
:param col_name: Column name to read the value from
:param convert: Optional conversion function that takes exactly one
argument which is the row dict for the currently imported row
"""
def __init__(self, col_name, convert=None):
self.col_name = col_name
self.convert = convert
def _read(self, row, **kw_args):
value = row[self.col_name]
return self.convert(value) if self.convert else value
class MultiColumnValue(object):
"""
Reads input from multiple columns and contracts them into a single value
using the (non-optional) callable given in *convert*.
:param col_names: List of column names to read values from
:param convert: Conversion function that takes exactly one argument (the
row dict of the currently imported row) and contracts the values into
a single return value
"""
def __init__(self, col_names, convert):
if not convert:
raise ValueError('ERROR: You must provide a convert function')
self.col_names = col_names
self.convert = convert
def _read(self, row, **kw_args):
values = {key: row[key] for key in self.col_names}
return self.convert(values)
class ConstValue(object):
""" Always returns the same constant value
:param value: The value to return for each row
"""
def __init__(self, value):
self.value = value
def _read(self, row, **kw_args):
return self.value
class DynamicValue(object):
""" Creates a value dynamically using the callable *generate*
:param generate: A function or other callable that takes a single argument
(the current row dict) and returns a single value
"""
def __init__(self, generate):
self.generate = generate
def _read(self, row, **kw_args):
return self.generate(row)
class XReference(object):
""" Takes the value of a specific attribute of another record.
:param table_name: Table name in the import_specs table given to the
*CsvImporter*
:param instance_name: Identifies a specific instance under *table_name*
:param attribute_name: Name of the attribute to return
"""
def __init__(self, table_name, instance_name, attribute_name):
self.table_name = table_name
self.instance_name = instance_name
self.attribute_name = attribute_name
def _read(self, row, **kw_args):
existing_records = kw_args['existing_records']
path = (self.table_name, self.instance_name)
value = existing_records[path].attributes[self.attribute_name]
return value
class DbRecord(object):
"""
One or more DbRecords are created for each imported row accoding to the
RecordSpecs.
"""
def __init__(self, table_name, row_id):
self.row_id = row_id
self.table_name = table_name
self.attributes = {}
def import_attributes(self, attr_map, existing_records, row):
"""
Import attributes according to the attr_map and resolve cross
references to existing_records.
"""
try:
imported = {k: v._read(row, existing_records=existing_records)
for (k, v) in attr_map.iteritems()}
except AttributeError:
k, v = next((k, v) for (k, v) in attr_map.iteritems()
if '_read' not in dir(v))
print('ERROR: The RecordSpec for {} in {} does not seem to be '
'valid'.format(k, self.table_name))
exit(-1)
self.attributes.update(imported)
def insert_statement(self):
"""
Returns the insert statement sequence for the current object
"""
col = ' (%s)' % ', '.join(self.attributes.keys())
# sanity checks
error = False
for k, v in self.attributes.iteritems():
if not isinstance(v, str):
print('ERROR: The value ({}) for "{}" in table "{}" is not a '
'string. Make sure your specs only produce string '
'values (i.e. \'5\', \'TRUE\', \'"Some text"\', '
'...)'.format(v, k, self.table_name))
error = True
elif string_exp.match(v):
print ('WARNING: {} looks like a string value but is not in '
'quotes. If "{}" in "{}" is a CHAR or VARCHAR type '
'column, you should put the value in quotes.').\
format(v, k, self.table_name)
if error:
print 'Aborting due to errors.'
exit(-1)
val = ' (%s)' % ', '.join(self.attributes.values())
sql = 'INSERT INTO ' + self.table_name + col + ' VALUES' + val + ';\n'
return sql
# Private (internal) methods
def _toposort(data):
"""
Sort dependencies topologically
:param data: Dependency map of the form
data = {
'business': set(['fleet','address']),
'device': set(['business','model','status','pack']),
'txn': set(['device','business','operator'])
}
"""
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items = reduce(set.union, data.itervalues()) - set(data.iterkeys())
# Add empty dependences where needed
data.update({item: set() for item in extra_items})
while True:
ordered = set(item for item, dep in data.iteritems() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.iteritems() if item not in ordered}
assert not data, "Cyclic dependencies:\n%s" % \
'\n'.join(repr(x) for x in data.iteritems()) | 0.627038 | 0.539287 |
from django.db import models
class MinistryTime(models.Model):
Ministry = models.ForeignKey('Ministry', on_delete=models.CASCADE)
start_date = models.DateField(
auto_now=False, auto_now_add=False, default=None)
end_date = models.DateField(
auto_now=False, auto_now_add=False, default=None)
DAY_OF_WEEK_CHOICES = [('Sun', 'Sunday'), ('Mon', 'Monday'),
('Tues', 'Tuesday'), ('Wed', 'Wednesday'),
('Thurs', 'Thursday'), ('Fri', 'Friday'),
('Sat', 'Saturday'), ('N/A', 'N/A')]
day_of_week = models.CharField(
max_length=5, choices=DAY_OF_WEEK_CHOICES, default='N/A')
day_of_month = models.CharField(max_length=100, default='N/A')
day_of_year = models.SmallIntegerField(default=0)
start_time = models.TimeField()
end_time = models.TimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
REQUIRED_FIELDS = [
'start_time',
'end_time'
]
def __str__(self):
if self.start_date is not None:
ministry_time_str = self.start_date + ' - '
ministry_time_str += self.end_date + '\n'
elif self.day_of_week != 'N/A':
ministry_time_str = self.day_of_week + 's\n'
elif self.day_of_month != 'N/A':
if self.day_of_month == 1 or 21 or 31:
ministry_time_str = self.day_of_month + 'st of every month\n'
elif self.day_of_month == 2 or 22:
ministry_time_str = self.day_of_month + 'nd of every month\n'
elif self.day_of_month == 3 or 23:
ministry_time_str = self.day_of_month + 'rd of every month\n'
else:
ministry_time_str = self.day_of_month + 'th of every month\n'
else:
ministry_time_str = self.day_of_year + ' day of every year\n'
ministry_time_str += '@ ' + self.start_time + ' till ' + self.end_time
return ministry_time_str | cms/models/MinistryTime.py | from django.db import models
class MinistryTime(models.Model):
Ministry = models.ForeignKey('Ministry', on_delete=models.CASCADE)
start_date = models.DateField(
auto_now=False, auto_now_add=False, default=None)
end_date = models.DateField(
auto_now=False, auto_now_add=False, default=None)
DAY_OF_WEEK_CHOICES = [('Sun', 'Sunday'), ('Mon', 'Monday'),
('Tues', 'Tuesday'), ('Wed', 'Wednesday'),
('Thurs', 'Thursday'), ('Fri', 'Friday'),
('Sat', 'Saturday'), ('N/A', 'N/A')]
day_of_week = models.CharField(
max_length=5, choices=DAY_OF_WEEK_CHOICES, default='N/A')
day_of_month = models.CharField(max_length=100, default='N/A')
day_of_year = models.SmallIntegerField(default=0)
start_time = models.TimeField()
end_time = models.TimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
REQUIRED_FIELDS = [
'start_time',
'end_time'
]
def __str__(self):
if self.start_date is not None:
ministry_time_str = self.start_date + ' - '
ministry_time_str += self.end_date + '\n'
elif self.day_of_week != 'N/A':
ministry_time_str = self.day_of_week + 's\n'
elif self.day_of_month != 'N/A':
if self.day_of_month == 1 or 21 or 31:
ministry_time_str = self.day_of_month + 'st of every month\n'
elif self.day_of_month == 2 or 22:
ministry_time_str = self.day_of_month + 'nd of every month\n'
elif self.day_of_month == 3 or 23:
ministry_time_str = self.day_of_month + 'rd of every month\n'
else:
ministry_time_str = self.day_of_month + 'th of every month\n'
else:
ministry_time_str = self.day_of_year + ' day of every year\n'
ministry_time_str += '@ ' + self.start_time + ' till ' + self.end_time
return ministry_time_str | 0.361954 | 0.150965 |
from django.test import TestCase
from .models import *
# Create your tests here.
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
"""creation of profile for testing
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
def test_bio(self):
"""tests the profiles bio
"""
profile=Profile.objects.get(bio="hey")
self.assertEqual(profile.bio, "hey")
class ImageTestCase(TestCase):
def setUp(self):
"""image creation
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_name(self):
"""tests image name
"""
image=Image.objects.get(name="init")
self.assertEqual(image.name, "init")
class LikeTestCase(TestCase):
def setUp(self):
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_id(self):
user = User.objects.create(
username = 'yub',
first_name = 'yubus',
last_name = '_254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
) | insta/tests.py | from django.test import TestCase
from .models import *
# Create your tests here.
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
"""creation of profile for testing
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
def test_bio(self):
"""tests the profiles bio
"""
profile=Profile.objects.get(bio="hey")
self.assertEqual(profile.bio, "hey")
class ImageTestCase(TestCase):
def setUp(self):
"""image creation
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_name(self):
"""tests image name
"""
image=Image.objects.get(name="init")
self.assertEqual(image.name, "init")
class LikeTestCase(TestCase):
def setUp(self):
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_id(self):
user = User.objects.create(
username = 'yub',
first_name = 'yubus',
last_name = '_254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
) | 0.439747 | 0.197657 |
import pytest
from django.test.client import Client
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core import authentication
from sso.user.tests.factories import UserFactory
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def valid_session(user):
client = Client()
session = client.session
session['_auth_user_id'] = user.id
session.save()
return session
@pytest.fixture
def expired_session(user):
client = Client()
session = client.session
session['_auth_user_id'] = user.id
session.set_expiry(-1)
session.save()
return session
class TestView(APIView):
authentication_classes = [authentication.SessionAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
return Response()
@pytest.mark.django_db
def test_sso_session_authentication_invalid_header(rf):
request = rf.get('/', HTTP_AUTHORIZATION='SSO_SESSION_ID')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == (b'{"detail":"Invalid SSO_SESSION_ID header."}')
@pytest.mark.django_db
def test_sso_session_authentication_valid_session_key(valid_session, rf):
request = rf.get('/', HTTP_AUTHORIZATION=f'SSO_SESSION_ID {valid_session._session_key}')
response = TestView.as_view()(request)
assert response.status_code == 200
@pytest.mark.django_db
def test_sso_session_authentication_expired_session(expired_session, rf):
request = rf.get('/', HTTP_AUTHORIZATION=f'SSO_SESSION_ID {expired_session._session_key}')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == b'{"detail":"Invalid session id"}'
@pytest.mark.django_db
def test_sso_session_authentication_no_user(rf):
request = rf.get('/', HTTP_AUTHORIZATION='SSO_SESSION_ID not-exist')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == b'{"detail":"Invalid session id"}' | core/tests/test_authentication.py | import pytest
from django.test.client import Client
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core import authentication
from sso.user.tests.factories import UserFactory
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def valid_session(user):
client = Client()
session = client.session
session['_auth_user_id'] = user.id
session.save()
return session
@pytest.fixture
def expired_session(user):
client = Client()
session = client.session
session['_auth_user_id'] = user.id
session.set_expiry(-1)
session.save()
return session
class TestView(APIView):
authentication_classes = [authentication.SessionAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
return Response()
@pytest.mark.django_db
def test_sso_session_authentication_invalid_header(rf):
request = rf.get('/', HTTP_AUTHORIZATION='SSO_SESSION_ID')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == (b'{"detail":"Invalid SSO_SESSION_ID header."}')
@pytest.mark.django_db
def test_sso_session_authentication_valid_session_key(valid_session, rf):
request = rf.get('/', HTTP_AUTHORIZATION=f'SSO_SESSION_ID {valid_session._session_key}')
response = TestView.as_view()(request)
assert response.status_code == 200
@pytest.mark.django_db
def test_sso_session_authentication_expired_session(expired_session, rf):
request = rf.get('/', HTTP_AUTHORIZATION=f'SSO_SESSION_ID {expired_session._session_key}')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == b'{"detail":"Invalid session id"}'
@pytest.mark.django_db
def test_sso_session_authentication_no_user(rf):
request = rf.get('/', HTTP_AUTHORIZATION='SSO_SESSION_ID not-exist')
response = TestView.as_view()(request)
assert response.status_code == 401
assert response.render().content == b'{"detail":"Invalid session id"}' | 0.572245 | 0.292456 |
import os
from typing import List, Tuple
import cv2
import numpy as np
def get_data(muscima_pp_cropped_images_directory: str, visualise: bool = False) -> Tuple[List[dict], dict, dict]:
found_bg = False
all_imgs = {}
classes_count = {}
class_mapping = {}
annotation_file = os.path.join(muscima_pp_cropped_images_directory, "Annotations.txt")
with open(annotation_file, 'r') as f:
print('Parsing annotation files')
for line in f:
line_split = line.strip().split(',')
(filename, left, top, right, bottom, class_name) = line_split
filename = os.path.join(muscima_pp_cropped_images_directory, filename)
left, top, right, bottom = int(left), int(top), int(right), int(bottom)
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
if class_name == 'bg' and found_bg == False:
print("Found class name with special name bg. Will be treated as a background region (this is "
"usually for hard negative mining).")
found_bg = True
class_mapping[class_name] = len(class_mapping)
if filename not in all_imgs:
all_imgs[filename] = {}
img = cv2.imread(filename)
(rows, cols) = img.shape[:2]
all_imgs[filename]['filepath'] = filename
all_imgs[filename]['width'] = cols
all_imgs[filename]['height'] = rows
all_imgs[filename]['bboxes'] = []
if np.random.randint(0, 6) > 0:
all_imgs[filename]['imageset'] = 'train'
else:
all_imgs[filename]['imageset'] = 'val'
all_imgs[filename]['bboxes'].append(
{'class': class_name, 'x1': left, 'x2': right, 'y1': top, 'y2': bottom})
if visualise:
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255))
cv2.imshow('img', img)
cv2.waitKey(0)
all_data = []
for key in all_imgs:
all_data.append(all_imgs[key])
# make sure the bg class is last in the list
if found_bg:
if class_mapping['bg'] != len(class_mapping) - 1:
key_to_switch = [key for key in class_mapping.keys() if class_mapping[key] == len(class_mapping) - 1][0]
val_to_switch = class_mapping['bg']
class_mapping['bg'] = len(class_mapping) - 1
class_mapping[key_to_switch] = val_to_switch
return all_data, classes_count, class_mapping
if __name__ == "__main__":
all_data, classes_count, class_mapping = get_data("../data/muscima_pp_cropped_images", False)
number_of_bounding_boxes = sum(classes_count.values())
print("Found {0} samples with {1} bounding-boxes belonging to {2} classes".format(len(all_data),
number_of_bounding_boxes,
len(classes_count))) | keras_frcnn/muscima_pp_cropped_image_parser.py | import os
from typing import List, Tuple
import cv2
import numpy as np
def get_data(muscima_pp_cropped_images_directory: str, visualise: bool = False) -> Tuple[List[dict], dict, dict]:
found_bg = False
all_imgs = {}
classes_count = {}
class_mapping = {}
annotation_file = os.path.join(muscima_pp_cropped_images_directory, "Annotations.txt")
with open(annotation_file, 'r') as f:
print('Parsing annotation files')
for line in f:
line_split = line.strip().split(',')
(filename, left, top, right, bottom, class_name) = line_split
filename = os.path.join(muscima_pp_cropped_images_directory, filename)
left, top, right, bottom = int(left), int(top), int(right), int(bottom)
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
if class_name == 'bg' and found_bg == False:
print("Found class name with special name bg. Will be treated as a background region (this is "
"usually for hard negative mining).")
found_bg = True
class_mapping[class_name] = len(class_mapping)
if filename not in all_imgs:
all_imgs[filename] = {}
img = cv2.imread(filename)
(rows, cols) = img.shape[:2]
all_imgs[filename]['filepath'] = filename
all_imgs[filename]['width'] = cols
all_imgs[filename]['height'] = rows
all_imgs[filename]['bboxes'] = []
if np.random.randint(0, 6) > 0:
all_imgs[filename]['imageset'] = 'train'
else:
all_imgs[filename]['imageset'] = 'val'
all_imgs[filename]['bboxes'].append(
{'class': class_name, 'x1': left, 'x2': right, 'y1': top, 'y2': bottom})
if visualise:
cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255))
cv2.imshow('img', img)
cv2.waitKey(0)
all_data = []
for key in all_imgs:
all_data.append(all_imgs[key])
# make sure the bg class is last in the list
if found_bg:
if class_mapping['bg'] != len(class_mapping) - 1:
key_to_switch = [key for key in class_mapping.keys() if class_mapping[key] == len(class_mapping) - 1][0]
val_to_switch = class_mapping['bg']
class_mapping['bg'] = len(class_mapping) - 1
class_mapping[key_to_switch] = val_to_switch
return all_data, classes_count, class_mapping
if __name__ == "__main__":
all_data, classes_count, class_mapping = get_data("../data/muscima_pp_cropped_images", False)
number_of_bounding_boxes = sum(classes_count.values())
print("Found {0} samples with {1} bounding-boxes belonging to {2} classes".format(len(all_data),
number_of_bounding_boxes,
len(classes_count))) | 0.483161 | 0.206494 |
class Operator:
"""
The preconditions represent the facts that have to be true
before the operator can be applied.
add_effects are the facts that the operator makes true.
delete_effects are the facts that the operator makes false.
"""
def __init__(self, name, preconditions, add_effects, del_effects):
self.name = name
self.preconditions = frozenset(preconditions)
self.add_effects = frozenset(add_effects)
self.del_effects = frozenset(del_effects)
def applicable(self, state):
"""
Operators are applicable when their set of preconditions is a subset
of the facts that are true in "state".
@return True if the operator's preconditions is a subset of the state,
False otherwise
"""
return self.preconditions <= state
def apply(self, state):
"""
Applying an operator means removing the facts that are made false
by the operator from the set of true facts in state and adding
the facts made true.
Note that therefore it is possible to have operands that make a
fact both false and true. This results in the fact being true
at the end.
@param state The state that the operator should be applied to
@return A new state (set of facts) after the application of the
operator
"""
assert self.applicable(state)
assert type(state) in (frozenset, set)
return (state - self.del_effects) | self.add_effects
def __str__(self):
s = '%s\n' % self.name
for group, facts in [('PRE', self.preconditions),
('ADD', self.add_effects),
('DEL', self.del_effects)]:
for fact in facts:
s += ' %s: %s\n' % (group, fact)
return s
def __repr__(self):
return '<Op %s>' % self.name
class Task:
"""
A STRIPS planning task
"""
def __init__(self, name, facts, initial_state, goals, operators):
"""
@param name The task's name
@param facts A set of all the fact names that are valid in the domain
@param initial_state A set of fact names that are true at the beginning
@param goals A set of fact names that must be true to solve the problem
@param operators A set of operator instances for the domain
"""
self.name = name
self.facts = facts
self.initial_state = initial_state
self.goals = goals
self.operators = operators
def goal_reached(self, state):
"""
The goal has been reached if all facts that are true in "goals"
are true in "state".
@return True if all the goals are reached, False otherwise
"""
return self.goals <= state
def get_successor_states(self, state):
"""
@return A list with (op, new_state) pairs where "op" is the applicable
operator and "new_state" the state that results when "op" is applied
in state "state".
"""
return [(op, op.apply(state)) for op in self.operators
if op.applicable(state)]
def __str__(self):
s = 'Task {0}\n Vars: {1}\n Init: {2}\n Goals: {3}\n Ops: {4}'
return s.format(self.name, ', '.join(self.facts),
self.initial_state, self.goals,
'\n'.join(map(repr, self.operators)))
def __repr__(self):
string = '<Task {0}, vars: {1}, operators: {2}>'
return string.format(self.name, len(self.facts), len(self.operators)) | planner/mmp_explanations/src/grounder/task.py | class Operator:
"""
The preconditions represent the facts that have to be true
before the operator can be applied.
add_effects are the facts that the operator makes true.
delete_effects are the facts that the operator makes false.
"""
def __init__(self, name, preconditions, add_effects, del_effects):
self.name = name
self.preconditions = frozenset(preconditions)
self.add_effects = frozenset(add_effects)
self.del_effects = frozenset(del_effects)
def applicable(self, state):
"""
Operators are applicable when their set of preconditions is a subset
of the facts that are true in "state".
@return True if the operator's preconditions is a subset of the state,
False otherwise
"""
return self.preconditions <= state
def apply(self, state):
"""
Applying an operator means removing the facts that are made false
by the operator from the set of true facts in state and adding
the facts made true.
Note that therefore it is possible to have operands that make a
fact both false and true. This results in the fact being true
at the end.
@param state The state that the operator should be applied to
@return A new state (set of facts) after the application of the
operator
"""
assert self.applicable(state)
assert type(state) in (frozenset, set)
return (state - self.del_effects) | self.add_effects
def __str__(self):
s = '%s\n' % self.name
for group, facts in [('PRE', self.preconditions),
('ADD', self.add_effects),
('DEL', self.del_effects)]:
for fact in facts:
s += ' %s: %s\n' % (group, fact)
return s
def __repr__(self):
return '<Op %s>' % self.name
class Task:
"""
A STRIPS planning task
"""
def __init__(self, name, facts, initial_state, goals, operators):
"""
@param name The task's name
@param facts A set of all the fact names that are valid in the domain
@param initial_state A set of fact names that are true at the beginning
@param goals A set of fact names that must be true to solve the problem
@param operators A set of operator instances for the domain
"""
self.name = name
self.facts = facts
self.initial_state = initial_state
self.goals = goals
self.operators = operators
def goal_reached(self, state):
"""
The goal has been reached if all facts that are true in "goals"
are true in "state".
@return True if all the goals are reached, False otherwise
"""
return self.goals <= state
def get_successor_states(self, state):
"""
@return A list with (op, new_state) pairs where "op" is the applicable
operator and "new_state" the state that results when "op" is applied
in state "state".
"""
return [(op, op.apply(state)) for op in self.operators
if op.applicable(state)]
def __str__(self):
s = 'Task {0}\n Vars: {1}\n Init: {2}\n Goals: {3}\n Ops: {4}'
return s.format(self.name, ', '.join(self.facts),
self.initial_state, self.goals,
'\n'.join(map(repr, self.operators)))
def __repr__(self):
string = '<Task {0}, vars: {1}, operators: {2}>'
return string.format(self.name, len(self.facts), len(self.operators)) | 0.830766 | 0.669252 |
__author__ = 'HPE'
import sushy
from sushy.resources import base
from sushy.resources.system import system
from sushy import utils as sushy_utils
from proliantutils import exception
from proliantutils import log
from proliantutils.redfish.resources.system import bios
from proliantutils.redfish.resources.system import constants
from proliantutils.redfish.resources.system import ethernet_interface
from proliantutils.redfish.resources.system import mappings
from proliantutils.redfish.resources.system import memory
from proliantutils.redfish.resources.system import pci_device
from proliantutils.redfish.resources.system import secure_boot
from proliantutils.redfish.resources.system import smart_storage_config
from proliantutils.redfish.resources.system.storage import simple_storage
from proliantutils.redfish.resources.system.storage import \
smart_storage as hpe_smart_storage
from proliantutils.redfish.resources.system.storage import storage
from proliantutils.redfish import utils
LOG = log.get_logger(__name__)
PERSISTENT_BOOT_DEVICE_MAP = {
'CDROM': sushy.BOOT_SOURCE_TARGET_CD,
'NETWORK': sushy.BOOT_SOURCE_TARGET_PXE,
'ISCSI': sushy.BOOT_SOURCE_TARGET_UEFI_TARGET,
'HDD': sushy.BOOT_SOURCE_TARGET_HDD
}
class PowerButtonActionField(base.CompositeField):
allowed_values = base.Field('Push<EMAIL>',
adapter=list)
target_uri = base.Field('target', required=True)
class HpeActionsField(base.CompositeField):
computer_system_ext_powerbutton = (
PowerButtonActionField('#HpeComputerSystemExt.PowerButton'))
class HPESystem(system.System):
"""Class that extends the functionality of System resource class
This class extends the functionality of System resource class
from sushy
"""
model = base.Field(['Model'])
rom_version = base.Field(['Oem', 'Hpe', 'Bios', 'Current',
'VersionString'])
uefi_target_override_devices = (base.Field([
'Boot',
'UefiTargetBootSourceOverride@<EMAIL>Values'],
adapter=list))
smart_storage_config_identities = base.Field(
['Oem', 'Hpe', 'SmartStorageConfig'],
adapter=sushy_utils.get_members_identities)
supported_boot_mode = base.MappedField(
['Oem', 'Hpe', 'Bios', 'UefiClass'], mappings.SUPPORTED_BOOT_MODE,
default=constants.SUPPORTED_LEGACY_BIOS_ONLY)
"""System supported boot mode."""
post_state = base.MappedField(
['Oem', 'Hpe', 'PostState'], mappings.POST_STATE_MAP,
default=constants.POST_STATE_NULL)
"""System POST state"""
_hpe_actions = HpeActionsField(['Oem', 'Hpe', 'Actions'], required=True)
"""Oem specific system extensibility actions"""
_bios_settings = None # ref to BIOSSettings instance
_secure_boot = None # ref to SecureBoot instance
_smart_storage = None # SmartStorage instance
_simple_storages = None # SimpleStorage instance
_storages = None # Storage instance
_pci_devices = None # PCIDevice instance
_ethernet_interfaces = None # EthernetInterface instance
_memory = None # Memory instance
def _get_hpe_push_power_button_action_element(self):
push_action = self._hpe_actions.computer_system_ext_powerbutton
if not push_action:
raise exception.MissingAttributeError(
attribute='Oem/Hpe/Actions/#HpeComputerSystemExt.PowerButton',
resource=self.path)
return push_action
def push_power_button(self, target_value):
"""Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
if target_value not in mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_power_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_power_values': (
mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV.keys())})
raise exception.InvalidInputError(msg)
value = mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV[target_value]
target_uri = (
self._get_hpe_push_power_button_action_element().target_uri)
self._conn.post(target_uri, data={'PushType': value})
@property
def bios_settings(self):
"""Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._bios_settings is None:
self._bios_settings = bios.BIOSSettings(
self._conn, utils.get_subresource_path_by(self, 'Bios'),
redfish_version=self.redfish_version)
self._bios_settings.refresh(force=False)
return self._bios_settings
def update_persistent_boot(self, devices=[], persistent=False):
"""Changes the persistent boot device order in BIOS boot mode for host
Note: It uses first boot device from the devices and ignores rest.
:param devices: ordered list of boot devices
:param persistent: Boolean flag to indicate if the device to be set as
a persistent boot device
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
"""
device = PERSISTENT_BOOT_DEVICE_MAP.get(devices[0].upper())
if device == sushy.BOOT_SOURCE_TARGET_UEFI_TARGET:
try:
uefi_devices = self.uefi_target_override_devices
iscsi_device = None
for uefi_device in uefi_devices:
if uefi_device is not None and 'iSCSI' in uefi_device:
iscsi_device = uefi_device
break
if iscsi_device is None:
msg = 'No UEFI iSCSI bootable device found on system.'
raise exception.IloError(msg)
except sushy.exceptions.SushyError as e:
msg = ('Unable to get uefi target override devices. '
'Error %s') % (str(e))
raise exception.IloError(msg)
uefi_boot_settings = {
'Boot': {'UefiTargetBootSourceOverride': iscsi_device}
}
self._conn.patch(self.path, data=uefi_boot_settings)
elif device is None:
device = sushy.BOOT_SOURCE_TARGET_NONE
tenure = (sushy.BOOT_SOURCE_ENABLED_CONTINUOUS
if persistent else sushy.BOOT_SOURCE_ENABLED_ONCE)
self.set_system_boot_source(device, enabled=tenure)
@property
def pci_devices(self):
"""Provides the collection of PCI devices
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._pci_devices is None:
self._pci_devices = pci_device.PCIDeviceCollection(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'PCIDevices']))
self._pci_devices.refresh(force=False)
return self._pci_devices
@property
def secure_boot(self):
"""Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._secure_boot is None:
self._secure_boot = secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version)
self._secure_boot.refresh(force=False)
return self._secure_boot
def _do_refresh(self, force):
"""Do custom resource specific refresh activities
On refresh, all sub-resources are marked as stale, i.e.
greedy-refresh not done for them unless forced by ``force``
argument.
"""
super(HPESystem, self)._do_refresh(force)
if self._bios_settings is not None:
self._bios_settings.invalidate(force)
if self._pci_devices is not None:
self._pci_devices.invalidate(force)
if self._secure_boot is not None:
self._secure_boot.invalidate(force)
if self._ethernet_interfaces is not None:
self._ethernet_interfaces.invalidate(force)
if self._smart_storage is not None:
self._smart_storage.invalidate(force)
if self._storages is not None:
self._storages.invalidate(force)
if self._simple_storages is not None:
self._simple_storages.invalidate(force)
if self._memory is not None:
self._memory.invalidate(force)
def _get_hpe_sub_resource_collection_path(self, sub_res):
path = None
try:
path = utils.get_subresource_path_by(self, sub_res)
except exception.MissingAttributeError:
path = utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', sub_res])
return path
@property
def ethernet_interfaces(self):
"""Provide reference to EthernetInterfacesCollection instance"""
if self._ethernet_interfaces is None:
sub_res = 'EthernetInterfaces'
self._ethernet_interfaces = (
ethernet_interface.EthernetInterfaceCollection(
self._conn,
self._get_hpe_sub_resource_collection_path(sub_res),
redfish_version=self.redfish_version))
self._ethernet_interfaces.refresh(force=False)
return self._ethernet_interfaces
@property
def smart_storage(self):
"""This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage
"""
if self._smart_storage is None:
self._smart_storage = hpe_smart_storage.HPESmartStorage(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'SmartStorage']),
redfish_version=self.redfish_version)
self._smart_storage.refresh(force=False)
return self._smart_storage
@property
def storages(self):
"""This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
"""
if self._storages is None:
self._storages = storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version)
self._storages.refresh(force=False)
return self._storages
@property
def simple_storages(self):
"""This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages
"""
if self._simple_storages is None:
self._simple_storages = simple_storage.SimpleStorageCollection(
self._conn, utils.get_subresource_path_by(
self, 'SimpleStorage'),
redfish_version=self.redfish_version)
self._simple_storages.refresh(force=False)
return self._simple_storages
@property
def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._memory is None:
self._memory = memory.MemoryCollection(
self._conn, utils.get_subresource_path_by(
self, 'Memory'),
redfish_version=self.redfish_version)
self._memory.refresh(force=False)
return self._memory
def get_smart_storage_config(self, smart_storage_config_url):
"""Returns a SmartStorageConfig Instance for each controller."""
return (smart_storage_config.
HPESmartStorageConfig(self._conn, smart_storage_config_url,
redfish_version=self.redfish_version))
def _get_smart_storage_config_by_controller_model(self, controller_model):
"""Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller
"""
ac = self.smart_storage.array_controllers.array_controller_by_model(
controller_model)
for ssc_id in self.smart_storage_config_identities:
ssc_obj = self.get_smart_storage_config(ssc_id)
if ac.location == ssc_obj.location:
return ssc_obj
def check_smart_storage_config_ids(self):
"""Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO.
"""
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg)
def delete_raid(self):
"""Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
ld_exc_count = 0
for config_id in self.smart_storage_config_identities:
try:
ssc_obj = self.get_smart_storage_config(config_id)
ssc_obj.delete_raid()
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((config_id, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to delete the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
if ld_exc_count == len(self.smart_storage_config_identities):
msg = ('No logical drives are found in any controllers. Nothing '
'to delete.')
raise exception.IloLogicalDriveNotFoundError(msg)
def _parse_raid_config_data(self, raid_config):
"""It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives.
"""
default = (
self.smart_storage.array_controllers.get_default_controller.model)
controllers = {default: []}
for ld in raid_config['logical_disks']:
if 'controller' not in ld.keys():
controllers[default].append(ld)
else:
ctrl = ld['controller']
if ctrl not in controllers:
controllers[ctrl] = []
controllers[ctrl].append(ld)
return controllers
def create_raid(self, raid_config):
"""Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
controllers = self._parse_raid_config_data(raid_config)
# Creating raid on rest of the controllers
for controller in controllers:
try:
config = {'logical_disks': controllers[controller]}
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
ssc_obj.create_raid(config)
else:
members = (
self.smart_storage.array_controllers.get_members())
models = [member.model for member in members]
msg = ('Controller not found. Available controllers are: '
'%(models)s' % {'models': models})
any_exceptions.append((controller, msg))
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to create the '
'raid configuration for one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg) | proliantutils/redfish/resources/system/system.py |
__author__ = 'HPE'
import sushy
from sushy.resources import base
from sushy.resources.system import system
from sushy import utils as sushy_utils
from proliantutils import exception
from proliantutils import log
from proliantutils.redfish.resources.system import bios
from proliantutils.redfish.resources.system import constants
from proliantutils.redfish.resources.system import ethernet_interface
from proliantutils.redfish.resources.system import mappings
from proliantutils.redfish.resources.system import memory
from proliantutils.redfish.resources.system import pci_device
from proliantutils.redfish.resources.system import secure_boot
from proliantutils.redfish.resources.system import smart_storage_config
from proliantutils.redfish.resources.system.storage import simple_storage
from proliantutils.redfish.resources.system.storage import \
smart_storage as hpe_smart_storage
from proliantutils.redfish.resources.system.storage import storage
from proliantutils.redfish import utils
LOG = log.get_logger(__name__)
PERSISTENT_BOOT_DEVICE_MAP = {
'CDROM': sushy.BOOT_SOURCE_TARGET_CD,
'NETWORK': sushy.BOOT_SOURCE_TARGET_PXE,
'ISCSI': sushy.BOOT_SOURCE_TARGET_UEFI_TARGET,
'HDD': sushy.BOOT_SOURCE_TARGET_HDD
}
class PowerButtonActionField(base.CompositeField):
allowed_values = base.Field('Push<EMAIL>',
adapter=list)
target_uri = base.Field('target', required=True)
class HpeActionsField(base.CompositeField):
computer_system_ext_powerbutton = (
PowerButtonActionField('#HpeComputerSystemExt.PowerButton'))
class HPESystem(system.System):
"""Class that extends the functionality of System resource class
This class extends the functionality of System resource class
from sushy
"""
model = base.Field(['Model'])
rom_version = base.Field(['Oem', 'Hpe', 'Bios', 'Current',
'VersionString'])
uefi_target_override_devices = (base.Field([
'Boot',
'UefiTargetBootSourceOverride@<EMAIL>Values'],
adapter=list))
smart_storage_config_identities = base.Field(
['Oem', 'Hpe', 'SmartStorageConfig'],
adapter=sushy_utils.get_members_identities)
supported_boot_mode = base.MappedField(
['Oem', 'Hpe', 'Bios', 'UefiClass'], mappings.SUPPORTED_BOOT_MODE,
default=constants.SUPPORTED_LEGACY_BIOS_ONLY)
"""System supported boot mode."""
post_state = base.MappedField(
['Oem', 'Hpe', 'PostState'], mappings.POST_STATE_MAP,
default=constants.POST_STATE_NULL)
"""System POST state"""
_hpe_actions = HpeActionsField(['Oem', 'Hpe', 'Actions'], required=True)
"""Oem specific system extensibility actions"""
_bios_settings = None # ref to BIOSSettings instance
_secure_boot = None # ref to SecureBoot instance
_smart_storage = None # SmartStorage instance
_simple_storages = None # SimpleStorage instance
_storages = None # Storage instance
_pci_devices = None # PCIDevice instance
_ethernet_interfaces = None # EthernetInterface instance
_memory = None # Memory instance
def _get_hpe_push_power_button_action_element(self):
push_action = self._hpe_actions.computer_system_ext_powerbutton
if not push_action:
raise exception.MissingAttributeError(
attribute='Oem/Hpe/Actions/#HpeComputerSystemExt.PowerButton',
resource=self.path)
return push_action
def push_power_button(self, target_value):
"""Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
if target_value not in mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_power_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_power_values': (
mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV.keys())})
raise exception.InvalidInputError(msg)
value = mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV[target_value]
target_uri = (
self._get_hpe_push_power_button_action_element().target_uri)
self._conn.post(target_uri, data={'PushType': value})
@property
def bios_settings(self):
"""Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._bios_settings is None:
self._bios_settings = bios.BIOSSettings(
self._conn, utils.get_subresource_path_by(self, 'Bios'),
redfish_version=self.redfish_version)
self._bios_settings.refresh(force=False)
return self._bios_settings
def update_persistent_boot(self, devices=[], persistent=False):
"""Changes the persistent boot device order in BIOS boot mode for host
Note: It uses first boot device from the devices and ignores rest.
:param devices: ordered list of boot devices
:param persistent: Boolean flag to indicate if the device to be set as
a persistent boot device
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
"""
device = PERSISTENT_BOOT_DEVICE_MAP.get(devices[0].upper())
if device == sushy.BOOT_SOURCE_TARGET_UEFI_TARGET:
try:
uefi_devices = self.uefi_target_override_devices
iscsi_device = None
for uefi_device in uefi_devices:
if uefi_device is not None and 'iSCSI' in uefi_device:
iscsi_device = uefi_device
break
if iscsi_device is None:
msg = 'No UEFI iSCSI bootable device found on system.'
raise exception.IloError(msg)
except sushy.exceptions.SushyError as e:
msg = ('Unable to get uefi target override devices. '
'Error %s') % (str(e))
raise exception.IloError(msg)
uefi_boot_settings = {
'Boot': {'UefiTargetBootSourceOverride': iscsi_device}
}
self._conn.patch(self.path, data=uefi_boot_settings)
elif device is None:
device = sushy.BOOT_SOURCE_TARGET_NONE
tenure = (sushy.BOOT_SOURCE_ENABLED_CONTINUOUS
if persistent else sushy.BOOT_SOURCE_ENABLED_ONCE)
self.set_system_boot_source(device, enabled=tenure)
@property
def pci_devices(self):
"""Provides the collection of PCI devices
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._pci_devices is None:
self._pci_devices = pci_device.PCIDeviceCollection(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'PCIDevices']))
self._pci_devices.refresh(force=False)
return self._pci_devices
@property
def secure_boot(self):
"""Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._secure_boot is None:
self._secure_boot = secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version)
self._secure_boot.refresh(force=False)
return self._secure_boot
def _do_refresh(self, force):
"""Do custom resource specific refresh activities
On refresh, all sub-resources are marked as stale, i.e.
greedy-refresh not done for them unless forced by ``force``
argument.
"""
super(HPESystem, self)._do_refresh(force)
if self._bios_settings is not None:
self._bios_settings.invalidate(force)
if self._pci_devices is not None:
self._pci_devices.invalidate(force)
if self._secure_boot is not None:
self._secure_boot.invalidate(force)
if self._ethernet_interfaces is not None:
self._ethernet_interfaces.invalidate(force)
if self._smart_storage is not None:
self._smart_storage.invalidate(force)
if self._storages is not None:
self._storages.invalidate(force)
if self._simple_storages is not None:
self._simple_storages.invalidate(force)
if self._memory is not None:
self._memory.invalidate(force)
def _get_hpe_sub_resource_collection_path(self, sub_res):
path = None
try:
path = utils.get_subresource_path_by(self, sub_res)
except exception.MissingAttributeError:
path = utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', sub_res])
return path
@property
def ethernet_interfaces(self):
"""Provide reference to EthernetInterfacesCollection instance"""
if self._ethernet_interfaces is None:
sub_res = 'EthernetInterfaces'
self._ethernet_interfaces = (
ethernet_interface.EthernetInterfaceCollection(
self._conn,
self._get_hpe_sub_resource_collection_path(sub_res),
redfish_version=self.redfish_version))
self._ethernet_interfaces.refresh(force=False)
return self._ethernet_interfaces
@property
def smart_storage(self):
"""This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage
"""
if self._smart_storage is None:
self._smart_storage = hpe_smart_storage.HPESmartStorage(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'SmartStorage']),
redfish_version=self.redfish_version)
self._smart_storage.refresh(force=False)
return self._smart_storage
@property
def storages(self):
"""This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
"""
if self._storages is None:
self._storages = storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version)
self._storages.refresh(force=False)
return self._storages
@property
def simple_storages(self):
"""This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages
"""
if self._simple_storages is None:
self._simple_storages = simple_storage.SimpleStorageCollection(
self._conn, utils.get_subresource_path_by(
self, 'SimpleStorage'),
redfish_version=self.redfish_version)
self._simple_storages.refresh(force=False)
return self._simple_storages
@property
def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
if self._memory is None:
self._memory = memory.MemoryCollection(
self._conn, utils.get_subresource_path_by(
self, 'Memory'),
redfish_version=self.redfish_version)
self._memory.refresh(force=False)
return self._memory
def get_smart_storage_config(self, smart_storage_config_url):
"""Returns a SmartStorageConfig Instance for each controller."""
return (smart_storage_config.
HPESmartStorageConfig(self._conn, smart_storage_config_url,
redfish_version=self.redfish_version))
def _get_smart_storage_config_by_controller_model(self, controller_model):
"""Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller
"""
ac = self.smart_storage.array_controllers.array_controller_by_model(
controller_model)
for ssc_id in self.smart_storage_config_identities:
ssc_obj = self.get_smart_storage_config(ssc_id)
if ac.location == ssc_obj.location:
return ssc_obj
def check_smart_storage_config_ids(self):
"""Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO.
"""
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg)
def delete_raid(self):
"""Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
ld_exc_count = 0
for config_id in self.smart_storage_config_identities:
try:
ssc_obj = self.get_smart_storage_config(config_id)
ssc_obj.delete_raid()
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((config_id, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to delete the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
if ld_exc_count == len(self.smart_storage_config_identities):
msg = ('No logical drives are found in any controllers. Nothing '
'to delete.')
raise exception.IloLogicalDriveNotFoundError(msg)
def _parse_raid_config_data(self, raid_config):
"""It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives.
"""
default = (
self.smart_storage.array_controllers.get_default_controller.model)
controllers = {default: []}
for ld in raid_config['logical_disks']:
if 'controller' not in ld.keys():
controllers[default].append(ld)
else:
ctrl = ld['controller']
if ctrl not in controllers:
controllers[ctrl] = []
controllers[ctrl].append(ld)
return controllers
def create_raid(self, raid_config):
"""Create the raid configuration on the hardware.
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
controllers = self._parse_raid_config_data(raid_config)
# Creating raid on rest of the controllers
for controller in controllers:
try:
config = {'logical_disks': controllers[controller]}
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
ssc_obj.create_raid(config)
else:
members = (
self.smart_storage.array_controllers.get_members())
models = [member.model for member in members]
msg = ('Controller not found. Available controllers are: '
'%(models)s' % {'models': models})
any_exceptions.append((controller, msg))
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to create the '
'raid configuration for one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg) | 0.621311 | 0.107531 |
__author__ = '<NAME>'
from LearningAlgorithm import *
class Backpropagation(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate: double
:param input: list
:param output: list
:param network: [[Neuron]]
:return: [[Neuron]]
Training the network with Backpropagation algorithm, it does the following
1- Calculate the error signal for each neuron on each layer
2- Update the weights of each neuron according to its update formula
3- Return the new weights of the whole network
"""
for i in range(len(network) - 1, 0, -1):
for j in range(0, len(network[i])):
currentNeuron = network[i][j]
if i == len(network) - 1:
currentNeuron.SignalError = (output[j] - currentNeuron.Output) * \
currentNeuron.ActivationFunction.derivative(currentNeuron.Net)
else:
summation = 0.0
for k in range(0, len(network[i + 1])):
nextNeuron = network[i + 1][k]
summation += (nextNeuron.Weights[j] * nextNeuron.SignalError)
currentNeuron.SignalError = summation * currentNeuron.ActivationFunction.derivative(
currentNeuron.Net)
network[i][j] = currentNeuron
for i in range(0, len(network)):
for j in range(0, len(network[i])):
x = len(network[i])
currentWeights = network[i][j].Weights
currentBias = network[i][j].Bias
for k in range(0, len(currentWeights)):
if i == 0:
currentWeights[k] += learningRate * network[i][j].SignalError * input[k]
else:
currentWeights[k] += learningRate * network[i][j].SignalError * network[i - 1][k].Output
currentBias += learningRate * network[i][j].SignalError
network[i][j].update(currentWeights, currentBias)
x = len(network[i])
return network | OptimizationAlgorithms/Backpropagation.py | __author__ = '<NAME>'
from LearningAlgorithm import *
class Backpropagation(LearningAlgorithm):
def learn(self, learningRate, input, output, network):
"""
:param learningRate: double
:param input: list
:param output: list
:param network: [[Neuron]]
:return: [[Neuron]]
Training the network with Backpropagation algorithm, it does the following
1- Calculate the error signal for each neuron on each layer
2- Update the weights of each neuron according to its update formula
3- Return the new weights of the whole network
"""
for i in range(len(network) - 1, 0, -1):
for j in range(0, len(network[i])):
currentNeuron = network[i][j]
if i == len(network) - 1:
currentNeuron.SignalError = (output[j] - currentNeuron.Output) * \
currentNeuron.ActivationFunction.derivative(currentNeuron.Net)
else:
summation = 0.0
for k in range(0, len(network[i + 1])):
nextNeuron = network[i + 1][k]
summation += (nextNeuron.Weights[j] * nextNeuron.SignalError)
currentNeuron.SignalError = summation * currentNeuron.ActivationFunction.derivative(
currentNeuron.Net)
network[i][j] = currentNeuron
for i in range(0, len(network)):
for j in range(0, len(network[i])):
x = len(network[i])
currentWeights = network[i][j].Weights
currentBias = network[i][j].Bias
for k in range(0, len(currentWeights)):
if i == 0:
currentWeights[k] += learningRate * network[i][j].SignalError * input[k]
else:
currentWeights[k] += learningRate * network[i][j].SignalError * network[i - 1][k].Output
currentBias += learningRate * network[i][j].SignalError
network[i][j].update(currentWeights, currentBias)
x = len(network[i])
return network | 0.717408 | 0.653922 |
from variational_clustering.clustering import furthest_init
from variational_clustering.clustering import make_faces
from variational_clustering.clustering import k_means
from directional_clustering.clustering.kmeans import KMeans
from directional_clustering.fields import VectorField
__all__ = ["VariationalKMeans"]
class VariationalKMeans(KMeans):
"""
The variational shape approximation method for vector clustering.
Parameters
----------
mesh : `directional_clustering.mesh.MeshPlus`
A reference mesh.
vector_field : `directional_clustering.fields.VectorField`
The vector field to cluster.
n_clusters : `int`
The number of clusters to generate.
iters : `int`
The iterations to run the algorithm for.
tol : `float`
The tolerance to declare convergence.
Notes
-----
This method normalizes all vectors before doing clustering.
References
----------
[1] <NAME>., <NAME>., <NAME>. (2004). Variational Shape Approximation.
RR-5371, INRIA. 2004, pp.29. inria-00070632
"""
def __init__(self, mesh, vector_field, n_clusters, iters, tol):
# parent class constructor
args = mesh, vector_field, n_clusters, iters, tol
super(VariationalKMeans, self).__init__(*args)
# internal flag to control cluster splitting heuristic
self.merge_split = True
# to be set after initialization
self._initial_clusters = None
self._faces = None
# create seeds
self._create_seeds()
def cluster(self):
"""
Cluster a vector field.
Notes
-----
It sets `self._clustered_field`, `self_labels`, `self.centers`, and `self.loss`.
Returns `None`.
"""
# do clustering
cluster_log = k_means(self._initial_clusters,
self._faces,
self.iters,
self.merge_split)
# last chunk in the cluster log
final_clusters = cluster_log.pop()
# create a new vector field
clustered_field = VectorField()
clustered_labels = {}
centers = {}
# fill arrays with results
# TODO: Refactor this block!
loss = 0
for i, cluster in final_clusters.items():
centroid = cluster.proxy
centers[i] = centroid
loss += cluster.distortion
for fkey in cluster.faces_keys:
clustered_field.add_vector(fkey, centroid)
clustered_labels[fkey] = cluster.id
# assign arrays as attributes
self._clustered_field = clustered_field
self._labels = clustered_labels
self._centers = centers
self._loss = loss
def _create_seeds(self):
"""
Find the initial seeds for clustering using a farthest-point strategy.
Notes
-----
This is a private method.
It internally sets `self._faces` and `self._initial_clusters`.
Returns `None`.
"""
vectors = {key: vector for key, vector in self.vector_field.items()}
self._faces = make_faces(self.mesh, vectors)
self._initial_clusters = furthest_init(self.n_clusters, self._faces).pop() | src/directional_clustering/clustering/kmeans/variational.py | from variational_clustering.clustering import furthest_init
from variational_clustering.clustering import make_faces
from variational_clustering.clustering import k_means
from directional_clustering.clustering.kmeans import KMeans
from directional_clustering.fields import VectorField
__all__ = ["VariationalKMeans"]
class VariationalKMeans(KMeans):
"""
The variational shape approximation method for vector clustering.
Parameters
----------
mesh : `directional_clustering.mesh.MeshPlus`
A reference mesh.
vector_field : `directional_clustering.fields.VectorField`
The vector field to cluster.
n_clusters : `int`
The number of clusters to generate.
iters : `int`
The iterations to run the algorithm for.
tol : `float`
The tolerance to declare convergence.
Notes
-----
This method normalizes all vectors before doing clustering.
References
----------
[1] <NAME>., <NAME>., <NAME>. (2004). Variational Shape Approximation.
RR-5371, INRIA. 2004, pp.29. inria-00070632
"""
def __init__(self, mesh, vector_field, n_clusters, iters, tol):
# parent class constructor
args = mesh, vector_field, n_clusters, iters, tol
super(VariationalKMeans, self).__init__(*args)
# internal flag to control cluster splitting heuristic
self.merge_split = True
# to be set after initialization
self._initial_clusters = None
self._faces = None
# create seeds
self._create_seeds()
def cluster(self):
"""
Cluster a vector field.
Notes
-----
It sets `self._clustered_field`, `self_labels`, `self.centers`, and `self.loss`.
Returns `None`.
"""
# do clustering
cluster_log = k_means(self._initial_clusters,
self._faces,
self.iters,
self.merge_split)
# last chunk in the cluster log
final_clusters = cluster_log.pop()
# create a new vector field
clustered_field = VectorField()
clustered_labels = {}
centers = {}
# fill arrays with results
# TODO: Refactor this block!
loss = 0
for i, cluster in final_clusters.items():
centroid = cluster.proxy
centers[i] = centroid
loss += cluster.distortion
for fkey in cluster.faces_keys:
clustered_field.add_vector(fkey, centroid)
clustered_labels[fkey] = cluster.id
# assign arrays as attributes
self._clustered_field = clustered_field
self._labels = clustered_labels
self._centers = centers
self._loss = loss
def _create_seeds(self):
"""
Find the initial seeds for clustering using a farthest-point strategy.
Notes
-----
This is a private method.
It internally sets `self._faces` and `self._initial_clusters`.
Returns `None`.
"""
vectors = {key: vector for key, vector in self.vector_field.items()}
self._faces = make_faces(self.mesh, vectors)
self._initial_clusters = furthest_init(self.n_clusters, self._faces).pop() | 0.916465 | 0.633524 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CarePayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unit_price', models.FloatField(default=150.0, verbose_name='看护费(元/月)')),
('start_pay', models.DateField(verbose_name='支付开始日期(年-月)')),
('end_pay', models.DateField(verbose_name='支付截止日期(年-月)')),
('pay_date', models.DateField(verbose_name='支付日期')),
],
options={
'verbose_name': '看护费支付信息',
'verbose_name_plural': '看护费支付信息',
},
),
migrations.CreateModel(
name='Caretaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('gender', models.PositiveSmallIntegerField(choices=[(0, '男'), (1, '女')], default=0, verbose_name='性别')),
('id_card', models.CharField(max_length=64, verbose_name='身份证号')),
('address', models.CharField(max_length=256, verbose_name='地址')),
('status', models.PositiveSmallIntegerField(choices=[(1, '在看护'), (2, '曾看护'), (3, '中断'), (4, '其它')], verbose_name='状态')),
('start_time', models.DateField(blank=True, null=True, verbose_name='开始时间')),
('end_time', models.DateField(blank=True, null=True, verbose_name='结束时间')),
('remark', models.CharField(blank=True, max_length=256, null=True, verbose_name='备注')),
('is_main', models.BooleanField(default=True, verbose_name='主要看护人')),
],
options={
'verbose_name': '看护人信息',
'verbose_name_plural': '看护人信息',
},
),
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='台网代码')),
('name', models.CharField(max_length=64, verbose_name='台网名称')),
('start_time', models.DateField(blank=True, null=True, verbose_name='开始时间')),
('end_time', models.DateField(blank=True, null=True, verbose_name='结束时间')),
('min_longitude', models.FloatField(blank=True, null=True, verbose_name='台网最小经度')),
('max_longitude', models.FloatField(blank=True, null=True, verbose_name='台网最大经度')),
('min_latitude', models.FloatField(blank=True, null=True, verbose_name='台网最小纬度')),
('max_latitude', models.FloatField(blank=True, null=True, verbose_name='台网最大纬度')),
('status', models.PositiveSmallIntegerField(choices=[(0, '运行'), (1, '测试'), (2, '下线')], default=0, verbose_name='台网状体')),
('describe', models.TextField(blank=True, null=True, verbose_name='台网描述')),
('c_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('m_time', models.DateTimeField(auto_now=True, verbose_name='更新日期')),
],
options={
'verbose_name': '台网信息',
'verbose_name_plural': '台网信息',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=20, verbose_name='号码')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='seisnet.Caretaker', verbose_name='所有者')),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='台站代码')),
('en_name', models.CharField(max_length=64, verbose_name='台站名称(英文)')),
('zh_name', models.CharField(max_length=64, verbose_name='台站名称(中文)')),
('longitude', models.FloatField(verbose_name='台站经度')),
('latitude', models.FloatField(verbose_name='台站纬度')),
('altitude', models.FloatField(verbose_name='台站高程')),
('status', models.PositiveSmallIntegerField(choices=[(0, '运行'), (1, '测试'), (2, '故障'), (3, '下线')], default=0, verbose_name='台站状态')),
('describe', models.TextField(blank=True, null=True, verbose_name='台站描述')),
('location', models.TextField(blank=True, null=True, verbose_name='位置描述')),
('c_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('m_time', models.DateTimeField(auto_now=True, verbose_name='更新日期')),
('network', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='seisnet.Network', verbose_name='所属台网')),
],
options={
'verbose_name': '台站信息',
'verbose_name_plural': '台站信息',
},
),
migrations.AddField(
model_name='caretaker',
name='care_station',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='seisnet.Station', verbose_name='看护的台站'),
),
migrations.AddField(
model_name='carepayment',
name='caretaker',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='seisnet.Caretaker', verbose_name='看护人'),
),
] | mnolms/seisnet/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CarePayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unit_price', models.FloatField(default=150.0, verbose_name='看护费(元/月)')),
('start_pay', models.DateField(verbose_name='支付开始日期(年-月)')),
('end_pay', models.DateField(verbose_name='支付截止日期(年-月)')),
('pay_date', models.DateField(verbose_name='支付日期')),
],
options={
'verbose_name': '看护费支付信息',
'verbose_name_plural': '看护费支付信息',
},
),
migrations.CreateModel(
name='Caretaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('gender', models.PositiveSmallIntegerField(choices=[(0, '男'), (1, '女')], default=0, verbose_name='性别')),
('id_card', models.CharField(max_length=64, verbose_name='身份证号')),
('address', models.CharField(max_length=256, verbose_name='地址')),
('status', models.PositiveSmallIntegerField(choices=[(1, '在看护'), (2, '曾看护'), (3, '中断'), (4, '其它')], verbose_name='状态')),
('start_time', models.DateField(blank=True, null=True, verbose_name='开始时间')),
('end_time', models.DateField(blank=True, null=True, verbose_name='结束时间')),
('remark', models.CharField(blank=True, max_length=256, null=True, verbose_name='备注')),
('is_main', models.BooleanField(default=True, verbose_name='主要看护人')),
],
options={
'verbose_name': '看护人信息',
'verbose_name_plural': '看护人信息',
},
),
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='台网代码')),
('name', models.CharField(max_length=64, verbose_name='台网名称')),
('start_time', models.DateField(blank=True, null=True, verbose_name='开始时间')),
('end_time', models.DateField(blank=True, null=True, verbose_name='结束时间')),
('min_longitude', models.FloatField(blank=True, null=True, verbose_name='台网最小经度')),
('max_longitude', models.FloatField(blank=True, null=True, verbose_name='台网最大经度')),
('min_latitude', models.FloatField(blank=True, null=True, verbose_name='台网最小纬度')),
('max_latitude', models.FloatField(blank=True, null=True, verbose_name='台网最大纬度')),
('status', models.PositiveSmallIntegerField(choices=[(0, '运行'), (1, '测试'), (2, '下线')], default=0, verbose_name='台网状体')),
('describe', models.TextField(blank=True, null=True, verbose_name='台网描述')),
('c_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('m_time', models.DateTimeField(auto_now=True, verbose_name='更新日期')),
],
options={
'verbose_name': '台网信息',
'verbose_name_plural': '台网信息',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=20, verbose_name='号码')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='seisnet.Caretaker', verbose_name='所有者')),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, unique=True, verbose_name='台站代码')),
('en_name', models.CharField(max_length=64, verbose_name='台站名称(英文)')),
('zh_name', models.CharField(max_length=64, verbose_name='台站名称(中文)')),
('longitude', models.FloatField(verbose_name='台站经度')),
('latitude', models.FloatField(verbose_name='台站纬度')),
('altitude', models.FloatField(verbose_name='台站高程')),
('status', models.PositiveSmallIntegerField(choices=[(0, '运行'), (1, '测试'), (2, '故障'), (3, '下线')], default=0, verbose_name='台站状态')),
('describe', models.TextField(blank=True, null=True, verbose_name='台站描述')),
('location', models.TextField(blank=True, null=True, verbose_name='位置描述')),
('c_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('m_time', models.DateTimeField(auto_now=True, verbose_name='更新日期')),
('network', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='seisnet.Network', verbose_name='所属台网')),
],
options={
'verbose_name': '台站信息',
'verbose_name_plural': '台站信息',
},
),
migrations.AddField(
model_name='caretaker',
name='care_station',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='seisnet.Station', verbose_name='看护的台站'),
),
migrations.AddField(
model_name='carepayment',
name='caretaker',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='seisnet.Caretaker', verbose_name='看护人'),
),
] | 0.404037 | 0.16378 |
"""Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
@parameterized.parameters([True, False])
def test_sequence_feature(self, is_sparse):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
if is_sparse:
dataset = self._create_sparse_dataset(strategy)
else:
dataset = self._create_ragged_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
@parameterized.parameters([True, False])
def test_sequence_feature_with_build(self, is_updated_shape):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
if is_updated_shape:
mid_level_api.build([
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 3])
])
else:
mid_level_api.build([
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 3])
])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main() | tensorflow/python/tpu/tests/tpu_embedding_v2_sequence_feature_test.py | """Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_base_test
class TPUEmbeddingTest(tpu_embedding_base_test.TPUEmbeddingBaseTest):
@parameterized.parameters([True, False])
def test_sequence_feature(self, is_sparse):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
if is_sparse:
dataset = self._create_sparse_dataset(strategy)
else:
dataset = self._create_ragged_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
@parameterized.parameters([True, False])
def test_sequence_feature_with_build(self, is_updated_shape):
seq_length = 3
# Set the max_seq_length in feature config
for feature in self.feature_config:
feature.max_sequence_length = seq_length
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
feature_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
if is_updated_shape:
mid_level_api.build([
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 2]),
TensorShape([self.batch_size, seq_length, 3])
])
else:
mid_level_api.build([
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 2]),
TensorShape([self.batch_size, 3])
])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(feature_iter), training=False)
return strategy.run(step)
output = test_fn()
self.assertEqual(
self._get_replica_numpy(output[0], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[1], strategy, 0).shape, (2, 3, 4))
self.assertEqual(
self._get_replica_numpy(output[2], strategy, 0).shape, (2, 3, 2))
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main() | 0.874077 | 0.515559 |
import re # Regular expression operations
import wikipedia # Python library that makes it easy to access and parse data from Wikipedia
import wikipedia.exceptions # Exceptions of wikipedia library
import requests.exceptions # HTTP for Humans
from ava.utilities import nostderr # Submodule of Dragonfire to provide various utilities
class FindInWikiCommand():
"""Class to contains searching in wikipedia process with simply if-else struct.
"""
def first_compare(self, doc, h, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
doc: doc of com from __init__.py
h: doc helper from __init__.py
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
Keyword Args:
user_prefix: user's preferred titles.
"""
if (h.check_lemma("search") or h.check_lemma("find")) and h.check_lemma("wikipedia"):
with nostderr():
search_query = ""
for token in doc:
if not (
token.lemma_ == "search" or token.lemma_ == "find" or token.lemma_ == "wikipedia" or token.is_stop):
search_query += ' ' + token.text
search_query = search_query.strip()
if search_query:
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say("Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except wikipedia.exceptions.DisambiguationError as disambiguation:
user_answering['status'] = True
user_answering['for'] = 'wikipedia'
user_answering['reason'] = 'disambiguation'
user_answering['options'] = disambiguation.options[:3]
notify = "Wikipedia disambiguation. Which one of these you meant?:\n - " + disambiguation.options[0]
msg = user_prefix + ", there is a disambiguation. Which one of these you meant? " + disambiguation.options[0]
for option in disambiguation.options[1:3]:
msg += ", or " + option
notify += "\n - " + option
notify += '\nSay, for example: "THE FIRST ONE" to choose.'
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, notify)
return userin.say(msg)
except BaseException:
pass
return None
def second_compare(self, com, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
com (str): User's command.
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
user_prefix: user's preferred titles.
"""
if user_answering['status'] and user_answering['for'] == 'wikipedia':
if com.startswith("FIRST") or com.startswith("THE FIRST") or com.startswith("SECOND") or com.startswith(
"THE SECOND") or com.startswith("THIRD") or com.startswith("THE THIRD"):
user_answering['status'] = False
selection = None
if com.startswith("FIRST") or com.startswith("THE FIRST"):
selection = 0
elif com.startswith("SECOND") or com.startswith("THE SECOND"):
selection = 1
elif com.startswith("THIRD") or com.startswith("THE THIRD"):
selection = 2
with nostderr():
search_query = user_answering['options'][selection]
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say(
"Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except Exception:
return False
return None | ava/commands/find_in_wikipedia.py | import re # Regular expression operations
import wikipedia # Python library that makes it easy to access and parse data from Wikipedia
import wikipedia.exceptions # Exceptions of wikipedia library
import requests.exceptions # HTTP for Humans
from ava.utilities import nostderr # Submodule of Dragonfire to provide various utilities
class FindInWikiCommand():
"""Class to contains searching in wikipedia process with simply if-else struct.
"""
def first_compare(self, doc, h, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
doc: doc of com from __init__.py
h: doc helper from __init__.py
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
Keyword Args:
user_prefix: user's preferred titles.
"""
if (h.check_lemma("search") or h.check_lemma("find")) and h.check_lemma("wikipedia"):
with nostderr():
search_query = ""
for token in doc:
if not (
token.lemma_ == "search" or token.lemma_ == "find" or token.lemma_ == "wikipedia" or token.is_stop):
search_query += ' ' + token.text
search_query = search_query.strip()
if search_query:
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say("Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except wikipedia.exceptions.DisambiguationError as disambiguation:
user_answering['status'] = True
user_answering['for'] = 'wikipedia'
user_answering['reason'] = 'disambiguation'
user_answering['options'] = disambiguation.options[:3]
notify = "Wikipedia disambiguation. Which one of these you meant?:\n - " + disambiguation.options[0]
msg = user_prefix + ", there is a disambiguation. Which one of these you meant? " + disambiguation.options[0]
for option in disambiguation.options[1:3]:
msg += ", or " + option
notify += "\n - " + option
notify += '\nSay, for example: "THE FIRST ONE" to choose.'
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, notify)
return userin.say(msg)
except BaseException:
pass
return None
def second_compare(self, com, user_answering, userin, user_prefix):
"""Method to ava's first command struct of searching in wikipedia ability.
Args:
com (str): User's command.
user_answering: User answering string array.
userin: :class:`ava.utilities.TextToAction` instance.
user_prefix: user's preferred titles.
"""
if user_answering['status'] and user_answering['for'] == 'wikipedia':
if com.startswith("FIRST") or com.startswith("THE FIRST") or com.startswith("SECOND") or com.startswith(
"THE SECOND") or com.startswith("THIRD") or com.startswith("THE THIRD"):
user_answering['status'] = False
selection = None
if com.startswith("FIRST") or com.startswith("THE FIRST"):
selection = 0
elif com.startswith("SECOND") or com.startswith("THE SECOND"):
selection = 1
elif com.startswith("THIRD") or com.startswith("THE THIRD"):
selection = 2
with nostderr():
search_query = user_answering['options'][selection]
try:
wikiresult = wikipedia.search(search_query)
if len(wikiresult) == 0:
userin.say(
"Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.")
return True
wikipage = wikipedia.page(wikiresult[0])
wikicontent = "".join([i if ord(i) < 128 else ' ' for i in wikipage.content])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
cmds = [{'distro': 'All', 'name': ["sensible-browser", wikipage.url]}]
userin.execute(cmds, search_query)
return userin.say(wikicontent, cmd=["sensible-browser", wikipage.url])
except requests.exceptions.ConnectionError:
cmds = [{'distro': 'All', 'name': [" "]}]
userin.execute(cmds, "Wikipedia connection error.")
return userin.say(
"Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.")
except Exception:
return False
return None | 0.516595 | 0.292608 |
import logging
import confluent_kafka
from oslo_utils import encodeutils
log = logging.getLogger(__name__)
class KafkaProducer(object):
"""Wrapper around asynchronous Kafka Producer"""
def __init__(self, bootstrap_servers, **config):
"""
Create new Producer wrapper instance.
:param str bootstrap_servers: Initial list of brokers as a CSV
list of broker host or host:port.
:param config Configuration properties
"""
config['bootstrap.servers'] = bootstrap_servers
self._producer = confluent_kafka.Producer(config)
@staticmethod
def delivery_report(err, msg):
"""
Callback called once for each produced message to indicate the final
delivery result. Triggered by poll() or flush().
:param confluent_kafka.KafkaError err: Information about any error
that occurred whilst producing the message.
:param confluent_kafka.Message msg: Information about the message
produced.
:returns: None
:raises confluent_kafka.KafkaException
"""
if err is not None:
log.exception('Message delivery failed: {}'.format(err))
raise confluent_kafka.KafkaException(err)
else:
log.debug('Message delivered to {} [{}]: {}'.format(
msg.topic(), msg.partition(), msg.value()))
def publish(self, topic, messages, key=None, timeout=2):
"""
Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int
"""
if not isinstance(messages, list):
messages = [messages]
try:
for m in messages:
m = encodeutils.safe_encode(m, incoming='utf-8')
self._producer.produce(topic, m, key,
callback=KafkaProducer.delivery_report)
self._producer.poll(0)
return self._producer.flush(timeout)
except (BufferError, confluent_kafka.KafkaException,
NotImplementedError):
log.exception(u'Error publishing to {} topic.'.format(topic))
raise | monasca_common/confluent_kafka/producer.py |
import logging
import confluent_kafka
from oslo_utils import encodeutils
log = logging.getLogger(__name__)
class KafkaProducer(object):
"""Wrapper around asynchronous Kafka Producer"""
def __init__(self, bootstrap_servers, **config):
"""
Create new Producer wrapper instance.
:param str bootstrap_servers: Initial list of brokers as a CSV
list of broker host or host:port.
:param config Configuration properties
"""
config['bootstrap.servers'] = bootstrap_servers
self._producer = confluent_kafka.Producer(config)
@staticmethod
def delivery_report(err, msg):
"""
Callback called once for each produced message to indicate the final
delivery result. Triggered by poll() or flush().
:param confluent_kafka.KafkaError err: Information about any error
that occurred whilst producing the message.
:param confluent_kafka.Message msg: Information about the message
produced.
:returns: None
:raises confluent_kafka.KafkaException
"""
if err is not None:
log.exception('Message delivery failed: {}'.format(err))
raise confluent_kafka.KafkaException(err)
else:
log.debug('Message delivered to {} [{}]: {}'.format(
msg.topic(), msg.partition(), msg.value()))
def publish(self, topic, messages, key=None, timeout=2):
"""
Publish messages to the topic.
:param str topic: Topic to produce messages to.
:param list(str) messages: List of message payloads.
:param str key: Message key.
:param float timeout: Maximum time to block in seconds.
:returns: Number of messages still in queue.
:rtype int
"""
if not isinstance(messages, list):
messages = [messages]
try:
for m in messages:
m = encodeutils.safe_encode(m, incoming='utf-8')
self._producer.produce(topic, m, key,
callback=KafkaProducer.delivery_report)
self._producer.poll(0)
return self._producer.flush(timeout)
except (BufferError, confluent_kafka.KafkaException,
NotImplementedError):
log.exception(u'Error publishing to {} topic.'.format(topic))
raise | 0.756088 | 0.104249 |
import os
import pytest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from nagare.renderers import xml
from nagare.renderers import html_base as html
def test_parse1():
h = html.HeadRenderer()
root = h.fromfile(StringIO('<html><body/></html>'))
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromfile(StringIO('<html><body/></html>'), xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
root = h.fromstring('<html><body/></html>')
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromstring('<html><body/></html>', xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
def test_parse2():
h = html.Renderer()
root = h.fromfile(StringIO('<html><body/></html>'))
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromfile(StringIO('<html><body/></html>'), xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
root = h.fromstring('<html><body/></html>')
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromstring('<html><body/></html>', xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
def test_parse3():
""" XHTML namespace unit test - HTMLRender - parse_html - bad encoding """
h = html.Renderer()
filename = os.path.join(os.path.dirname(__file__), 'iso-8859.xml')
with pytest.raises(UnicodeDecodeError):
h.fromfile(filename, encoding='utf-8')
h.fromfile(filename, encoding='iso8859-1')
def test_parse4():
h = html.Renderer()
root = h.fromstring('<html><head><body></body></head><html>')
assert root.tostring() == b'<html><head></head><body></body></html>'
def test_parse5():
h = html.Renderer()
root = h.fromstring('test')
assert root.tostring() == b'<html><body><p>test</p></body></html>'
def test_parse6():
h = html.Renderer()
root = h.fromstring('<a>text</a>')
assert type(root) == html.Tag
x = xml.Renderer()
root = x.fromstring('<a>text</a>')
assert type(root) == xml.Tag
def test_parse8():
h = html.Renderer()
root = h.fromstring('<a>text</a>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 1
assert root[0].tostring() == b'<a>text</a>'
def test_parse9():
h = html.Renderer()
root = h.fromstring('<a>text</a><b>text</b>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 2
assert root[0].tostring() == b'<a>text</a>'
assert root[1].tostring() == b'<b>text</b>'
def test_parse10():
h = html.Renderer()
root = h.fromstring('hello<a>text</a><b>text</b>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 3
assert root[0] == b'hello'
assert root[1].tostring() == b'<a>text</a>'
assert root[2].tostring() == b'<b>text</b>'
root = h.fromstring('hello<a>text</a><b>text</b>', fragment=True, no_leading_text=True)
assert isinstance(root, tuple)
assert len(root) == 2
assert root[0].tostring() == b'<a>text</a>'
assert root[1].tostring() == b'<b>text</b>' | tests/test_parse.py |
import os
import pytest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from nagare.renderers import xml
from nagare.renderers import html_base as html
def test_parse1():
h = html.HeadRenderer()
root = h.fromfile(StringIO('<html><body/></html>'))
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromfile(StringIO('<html><body/></html>'), xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
root = h.fromstring('<html><body/></html>')
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromstring('<html><body/></html>', xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
def test_parse2():
h = html.Renderer()
root = h.fromfile(StringIO('<html><body/></html>'))
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromfile(StringIO('<html><body/></html>'), xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
root = h.fromstring('<html><body/></html>')
assert isinstance(root, html.Tag)
assert root.tostring() == b'<html><body></body></html>'
root = h.fromstring('<html><body/></html>', xml.Tag)
assert isinstance(root, xml.Tag)
assert root.tostring() == b'<html><body/></html>'
def test_parse3():
""" XHTML namespace unit test - HTMLRender - parse_html - bad encoding """
h = html.Renderer()
filename = os.path.join(os.path.dirname(__file__), 'iso-8859.xml')
with pytest.raises(UnicodeDecodeError):
h.fromfile(filename, encoding='utf-8')
h.fromfile(filename, encoding='iso8859-1')
def test_parse4():
h = html.Renderer()
root = h.fromstring('<html><head><body></body></head><html>')
assert root.tostring() == b'<html><head></head><body></body></html>'
def test_parse5():
h = html.Renderer()
root = h.fromstring('test')
assert root.tostring() == b'<html><body><p>test</p></body></html>'
def test_parse6():
h = html.Renderer()
root = h.fromstring('<a>text</a>')
assert type(root) == html.Tag
x = xml.Renderer()
root = x.fromstring('<a>text</a>')
assert type(root) == xml.Tag
def test_parse8():
h = html.Renderer()
root = h.fromstring('<a>text</a>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 1
assert root[0].tostring() == b'<a>text</a>'
def test_parse9():
h = html.Renderer()
root = h.fromstring('<a>text</a><b>text</b>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 2
assert root[0].tostring() == b'<a>text</a>'
assert root[1].tostring() == b'<b>text</b>'
def test_parse10():
h = html.Renderer()
root = h.fromstring('hello<a>text</a><b>text</b>', fragment=True)
assert isinstance(root, tuple)
assert len(root) == 3
assert root[0] == b'hello'
assert root[1].tostring() == b'<a>text</a>'
assert root[2].tostring() == b'<b>text</b>'
root = h.fromstring('hello<a>text</a><b>text</b>', fragment=True, no_leading_text=True)
assert isinstance(root, tuple)
assert len(root) == 2
assert root[0].tostring() == b'<a>text</a>'
assert root[1].tostring() == b'<b>text</b>' | 0.591841 | 0.320369 |
import datetime
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
from configs import DatabaseConfig
from database.models import Base, MoneyChanger, MoneyChangerBranch, PaymentRequest
from balebot.utils.logger import Logger
logger = Logger.get_logger()
engine = create_engine(DatabaseConfig.database_url)
Session = sessionmaker(engine)
session = Session()
def create_all_table():
Base.metadata.create_all(engine)
return True
def drop_all_table():
Base.metadata.drop_all(engine)
return True
def db_persist(func):
def persist(*args, **kwargs):
func(*args, **kwargs)
try:
session.commit()
logger.info("success calling db func: " + func.__name__)
return True
except SQLAlchemyError as e:
logger.error(e.args)
session.rollback()
return False
return persist
@db_persist
def insert_to_table(table_object):
if isinstance(table_object, list):
session.add_all(table_object)
else:
session.add(table_object)
@db_persist
def delete_from_table(table_object):
if isinstance(table_object, list):
for obj in table_object:
session.delete(obj)
else:
session.delete(table_object)
@db_persist
def insert_or_update(table_object):
return session.merge(table_object)
@db_persist
def update_money_changer_remittance_fee_percent(money_changer, percent):
if isinstance(money_changer, MoneyChanger):
money_changer.remittance_fee_percent = percent
@db_persist
def update_money_changer_dollar_rial(money_changer, dollar_rial):
if isinstance(money_changer, MoneyChanger):
money_changer.dollar_rial = dollar_rial
@db_persist
def update_money_changer_dollar_afghani(money_changer, dollar_afghani):
if isinstance(money_changer, MoneyChanger):
money_changer.dollar_afghani = dollar_afghani
@db_persist
def update_money_changer_card_number(money_changer, card_number):
if isinstance(money_changer, MoneyChanger):
money_changer.card_number = card_number
@db_persist
def update_money_changer_access_hash(money_changer, access_hash):
if isinstance(money_changer, MoneyChanger):
money_changer.access_hash = access_hash
@db_persist
def update_payment_is_done(payment_request):
if isinstance(payment_request, PaymentRequest):
payment_request.is_done = True
payment_request.pay_date_time = datetime.datetime.now()
def select_money_changer_by_peer_id(peer_id):
return session.query(MoneyChanger).filter(MoneyChanger.peer_id == peer_id).one_or_none()
def select_money_changer_by_id(money_changer_id):
return session.query(MoneyChanger).filter(MoneyChanger.id == money_changer_id).one_or_none()
def select_ready_money_changers():
return session.query(MoneyChanger).filter(MoneyChanger.access_hash.isnot(None)).all()
def select_all_province_names():
return [r.province_name for r in session.query(MoneyChangerBranch.province).distinct().all()]
def select_branches_by_money_changer_id(money_changer_id):
return session.query(MoneyChangerBranch).filter(MoneyChangerBranch.money_changer_id == money_changer_id).all()
def select_last_payment_request():
return session.query(PaymentRequest).order_by(PaymentRequest.id.desc()).first()
def select_payment_with_code(code):
return session.query(PaymentRequest).filter(PaymentRequest.code == code).one_or_none()
def select_all_payments():
return session.query(PaymentRequest).all() | database/operations.py | import datetime
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
from configs import DatabaseConfig
from database.models import Base, MoneyChanger, MoneyChangerBranch, PaymentRequest
from balebot.utils.logger import Logger
logger = Logger.get_logger()
engine = create_engine(DatabaseConfig.database_url)
Session = sessionmaker(engine)
session = Session()
def create_all_table():
Base.metadata.create_all(engine)
return True
def drop_all_table():
Base.metadata.drop_all(engine)
return True
def db_persist(func):
def persist(*args, **kwargs):
func(*args, **kwargs)
try:
session.commit()
logger.info("success calling db func: " + func.__name__)
return True
except SQLAlchemyError as e:
logger.error(e.args)
session.rollback()
return False
return persist
@db_persist
def insert_to_table(table_object):
if isinstance(table_object, list):
session.add_all(table_object)
else:
session.add(table_object)
@db_persist
def delete_from_table(table_object):
if isinstance(table_object, list):
for obj in table_object:
session.delete(obj)
else:
session.delete(table_object)
@db_persist
def insert_or_update(table_object):
return session.merge(table_object)
@db_persist
def update_money_changer_remittance_fee_percent(money_changer, percent):
if isinstance(money_changer, MoneyChanger):
money_changer.remittance_fee_percent = percent
@db_persist
def update_money_changer_dollar_rial(money_changer, dollar_rial):
if isinstance(money_changer, MoneyChanger):
money_changer.dollar_rial = dollar_rial
@db_persist
def update_money_changer_dollar_afghani(money_changer, dollar_afghani):
if isinstance(money_changer, MoneyChanger):
money_changer.dollar_afghani = dollar_afghani
@db_persist
def update_money_changer_card_number(money_changer, card_number):
if isinstance(money_changer, MoneyChanger):
money_changer.card_number = card_number
@db_persist
def update_money_changer_access_hash(money_changer, access_hash):
if isinstance(money_changer, MoneyChanger):
money_changer.access_hash = access_hash
@db_persist
def update_payment_is_done(payment_request):
if isinstance(payment_request, PaymentRequest):
payment_request.is_done = True
payment_request.pay_date_time = datetime.datetime.now()
def select_money_changer_by_peer_id(peer_id):
return session.query(MoneyChanger).filter(MoneyChanger.peer_id == peer_id).one_or_none()
def select_money_changer_by_id(money_changer_id):
return session.query(MoneyChanger).filter(MoneyChanger.id == money_changer_id).one_or_none()
def select_ready_money_changers():
return session.query(MoneyChanger).filter(MoneyChanger.access_hash.isnot(None)).all()
def select_all_province_names():
return [r.province_name for r in session.query(MoneyChangerBranch.province).distinct().all()]
def select_branches_by_money_changer_id(money_changer_id):
return session.query(MoneyChangerBranch).filter(MoneyChangerBranch.money_changer_id == money_changer_id).all()
def select_last_payment_request():
return session.query(PaymentRequest).order_by(PaymentRequest.id.desc()).first()
def select_payment_with_code(code):
return session.query(PaymentRequest).filter(PaymentRequest.code == code).one_or_none()
def select_all_payments():
return session.query(PaymentRequest).all() | 0.416322 | 0.061848 |
class Ropa:
def __init__(self):
pass
def tender(self, clima): # Ejemplo
if clima == "lluvia":
return "no tiendo"
else:
return "tiendo"
def tender_con_negacion(self, clima): # Ejemplo
"""
Este ejemplo es equivalente (esto quiere decir que para
la misma entrada dará el mismo resultado) a la función
tender, solo que usamos la negación != que significa
es distinto de, contrario a == que significa que es igual a
"""
if clima != "lluvia":
return "tiendo" # El return corta el flujo ya no se ocupa else
return "no tiendo"
# En al función de arriba siempre que clima sea distinto de lluvia
# vas a tender la ropa y regresaras el resultado "tiendo", el return
# corta el flujo por lo que la linea 26 (return no tiendo) nunca se
# ejecutará si clima es distinto de lluvia y la 25 sí, en cambio,
# si clima es lluvia la 25 no se ejefcutará y continuara el flujo
# en la línea 26
class Semaforo:
def __init__(self):
pass
def avanza(self, color):
"""
Debe regresar "avanza" si el color es verde, y "no avanza" si es amarillo o rojo
"""
if color == "verde":
return "avanza"
return "no avanza"
class Pastel:
def __init__(self):
pass
def suficiente(self, cantidad_rebanadas, cantidad_comensales):
"""
Debe regresar "suficiente" si cantidad_rebanadas es mayor o
igual que la cantidad de comensales, si es menor regresará
"insuficiente"
"""
if cantidad_rebanadas >= cantidad_comensales:
return "suficiente"
return "insuficiente"
def diferencia(self, cantidad_rebanadas, cantidad_comensales):
"""
Debe regresar el valor absoluto de la diferencia entre cantidad_rebanadas
y cantidad_comensales
"""
return abs(cantidad_rebanadas - cantidad_comensales)
def es_de_chocolate(self, sabor):
"""
Debe regresar "si" si el sabor es "chocolate", "no" si no lo es
"""
if sabor == "chocolate":
return "si"
return "no"
class Futbol:
def __init__(self):
pass
def equipo_valido(self, cantidad_jugadores):
"""
Debe regresar "valido" si tiene 11 jugadores,
de lo contrario regresara no valido
"""
if cantidad_jugadores == 11:
return "valido"
return "no valido" | if_statement/if_statement.py |
class Ropa:
def __init__(self):
pass
def tender(self, clima): # Ejemplo
if clima == "lluvia":
return "no tiendo"
else:
return "tiendo"
def tender_con_negacion(self, clima): # Ejemplo
"""
Este ejemplo es equivalente (esto quiere decir que para
la misma entrada dará el mismo resultado) a la función
tender, solo que usamos la negación != que significa
es distinto de, contrario a == que significa que es igual a
"""
if clima != "lluvia":
return "tiendo" # El return corta el flujo ya no se ocupa else
return "no tiendo"
# En al función de arriba siempre que clima sea distinto de lluvia
# vas a tender la ropa y regresaras el resultado "tiendo", el return
# corta el flujo por lo que la linea 26 (return no tiendo) nunca se
# ejecutará si clima es distinto de lluvia y la 25 sí, en cambio,
# si clima es lluvia la 25 no se ejefcutará y continuara el flujo
# en la línea 26
class Semaforo:
def __init__(self):
pass
def avanza(self, color):
"""
Debe regresar "avanza" si el color es verde, y "no avanza" si es amarillo o rojo
"""
if color == "verde":
return "avanza"
return "no avanza"
class Pastel:
def __init__(self):
pass
def suficiente(self, cantidad_rebanadas, cantidad_comensales):
"""
Debe regresar "suficiente" si cantidad_rebanadas es mayor o
igual que la cantidad de comensales, si es menor regresará
"insuficiente"
"""
if cantidad_rebanadas >= cantidad_comensales:
return "suficiente"
return "insuficiente"
def diferencia(self, cantidad_rebanadas, cantidad_comensales):
"""
Debe regresar el valor absoluto de la diferencia entre cantidad_rebanadas
y cantidad_comensales
"""
return abs(cantidad_rebanadas - cantidad_comensales)
def es_de_chocolate(self, sabor):
"""
Debe regresar "si" si el sabor es "chocolate", "no" si no lo es
"""
if sabor == "chocolate":
return "si"
return "no"
class Futbol:
def __init__(self):
pass
def equipo_valido(self, cantidad_jugadores):
"""
Debe regresar "valido" si tiene 11 jugadores,
de lo contrario regresara no valido
"""
if cantidad_jugadores == 11:
return "valido"
return "no valido" | 0.574514 | 0.469216 |
import torch
import numpy as np
# 'department' will be classified as 'course' in GCN
label2id = {
'student': 0,
'faculty': 1,
'project': 2,
'course': 3,
'staff': 4,
'department': 3,
}
id2label = {
0: 'student',
1: 'faculty',
2: 'project',
3: 'course',
4: 'staff',
}
def load_data(dataset_path,
uni_lt=['cornell', 'texas', 'wisconsin', 'washington', 'misc'],
cat_lt=['student', 'faculty', 'project', 'course', 'staff', 'department']):
"""Load the pre-processed data.
Make sure all the ids of label in cat_ltare defined in preprocessor.id2label
dictionary.
Args:
dataset_path: String of path to the pre-processed dataset,
which is like './xxx/dataset.tsv'
uni_lt: List of string containing the university names to load.
Default as ['cornell', 'texas', 'wisconsin', 'washington', 'other']
cat_lt: List of string containing the category names to load.
Default as ['student', 'faculty', 'project', 'course', 'staff', 'department']
Returns:
A tuple of two list (texts, labels).
texts is a list of string containing the clean text for tokenization.
labels is a list of single integer list in shape (x, 1).
"""
texts = []
labels = []
with open(dataset_path, 'r') as data_file:
for data_line in data_file:
uni_name, cat_name, text, url = data_line.strip('\n').split('\t')
if (uni_name in uni_lt) and (cat_name in cat_lt):
texts.append(text)
labels.append(label2id[cat_name])
return texts, labels
def statistics(train_labels=None, val_labels=None, test_labels=None):
"""Compute the statistics info of the datasets.
Args:
train_labels: (Optional) List of training labels.
val_labels: (Optional) List of validation labels.
test_labels=None: (Optional) List of testing labels.
Returns:
String of the statistics info (for logging).
"""
names = ['train set', 'val set', 'test set']
tot_siz = 0
s_log = ''
train_val_test_labels = [train_labels, val_labels, test_labels]
for set_name, label_ids in zip(names, train_val_test_labels):
if label_ids is not None:
tot_siz += len(label_ids)
s_log += f'[{set_name:>9}] '
for label_id, label_name in id2label.items():
label_siz = (np.array(label_ids) == label_id).sum()
s_log += f'{label_name}: {label_siz:4} | '
s_log += f'total: {len(label_ids)}\n'
s_log += f'total size = {tot_siz}\n'
return s_log
class WebKBDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
if __name__ == '__main__':
uni_lt = ['wisconsin']
cat_lt = ['student', 'faculty', 'project', 'course', 'staff', 'department']
split_id = 0
print('uni_lt: ', uni_lt)
train_texts, train_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'train',
uni_lt=uni_lt, cat_lt=cat_lt)
val_texts, val_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'val',
uni_lt=uni_lt, cat_lt=cat_lt)
test_texts, test_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'test',
uni_lt=uni_lt, cat_lt=cat_lt)
# Print statistics of the splitting.
s_stats = statistics(train_labels, val_labels, test_labels)
print(s_stats) | utils/dataloader.py | import torch
import numpy as np
# 'department' will be classified as 'course' in GCN
label2id = {
'student': 0,
'faculty': 1,
'project': 2,
'course': 3,
'staff': 4,
'department': 3,
}
id2label = {
0: 'student',
1: 'faculty',
2: 'project',
3: 'course',
4: 'staff',
}
def load_data(dataset_path,
uni_lt=['cornell', 'texas', 'wisconsin', 'washington', 'misc'],
cat_lt=['student', 'faculty', 'project', 'course', 'staff', 'department']):
"""Load the pre-processed data.
Make sure all the ids of label in cat_ltare defined in preprocessor.id2label
dictionary.
Args:
dataset_path: String of path to the pre-processed dataset,
which is like './xxx/dataset.tsv'
uni_lt: List of string containing the university names to load.
Default as ['cornell', 'texas', 'wisconsin', 'washington', 'other']
cat_lt: List of string containing the category names to load.
Default as ['student', 'faculty', 'project', 'course', 'staff', 'department']
Returns:
A tuple of two list (texts, labels).
texts is a list of string containing the clean text for tokenization.
labels is a list of single integer list in shape (x, 1).
"""
texts = []
labels = []
with open(dataset_path, 'r') as data_file:
for data_line in data_file:
uni_name, cat_name, text, url = data_line.strip('\n').split('\t')
if (uni_name in uni_lt) and (cat_name in cat_lt):
texts.append(text)
labels.append(label2id[cat_name])
return texts, labels
def statistics(train_labels=None, val_labels=None, test_labels=None):
"""Compute the statistics info of the datasets.
Args:
train_labels: (Optional) List of training labels.
val_labels: (Optional) List of validation labels.
test_labels=None: (Optional) List of testing labels.
Returns:
String of the statistics info (for logging).
"""
names = ['train set', 'val set', 'test set']
tot_siz = 0
s_log = ''
train_val_test_labels = [train_labels, val_labels, test_labels]
for set_name, label_ids in zip(names, train_val_test_labels):
if label_ids is not None:
tot_siz += len(label_ids)
s_log += f'[{set_name:>9}] '
for label_id, label_name in id2label.items():
label_siz = (np.array(label_ids) == label_id).sum()
s_log += f'{label_name}: {label_siz:4} | '
s_log += f'total: {len(label_ids)}\n'
s_log += f'total size = {tot_siz}\n'
return s_log
class WebKBDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
if __name__ == '__main__':
uni_lt = ['wisconsin']
cat_lt = ['student', 'faculty', 'project', 'course', 'staff', 'department']
split_id = 0
print('uni_lt: ', uni_lt)
train_texts, train_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'train',
uni_lt=uni_lt, cat_lt=cat_lt)
val_texts, val_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'val',
uni_lt=uni_lt, cat_lt=cat_lt)
test_texts, test_labels = load_split_data(
'./dataset.tsv', './dataset_split', split_id, 'test',
uni_lt=uni_lt, cat_lt=cat_lt)
# Print statistics of the splitting.
s_stats = statistics(train_labels, val_labels, test_labels)
print(s_stats) | 0.545044 | 0.468365 |
import math
import argparse
import keras as K
import numpy as np
from KnowledgeGraph import KnowledgeGraph
from common import *
class TransE:
@property
def embedding_entity(self):
return self.__embedding_entity
@property
def embedding_relation(self):
return self.__embedding_relation
def __init__(self, num_entity, num_relation, learning_rate, batch_size, num_epochs, margin, dimension, score_func):
self.__num_entity = num_entity
self.__num_relation = num_relation
self.__learning_rate = learning_rate
self.__batch_size = batch_size
self.__num_epochs = num_epochs
self.__margin = margin
self.__dimension = dimension
bound = 6 / math.sqrt(self.__dimension)
self.__embedding_entity = K.layers.Embedding(self.__num_entity, self.__dimension, name='embedding_entity',
embeddings_initializer=K.initializers.random_uniform(minval=-bound, maxval=bound),
embeddings_constraint=K.constraints.max_norm(max_value=1, axis=1))
self.__embedding_relation = K.layers.Embedding(self.__num_relation, self.__dimension, name='embedding_relation',
embeddings_initializer=K.initializers.random_uniform(minval=-bound, maxval=bound),
embeddings_constraint=K.constraints.max_norm(max_value=1, axis=1))
self.__train_model = None
self.__predict_model = None
self.__test_model = None
if score_func == 'l1':
self.__score = K.layers.Lambda(lambda x: K.backend.sum(K.backend.abs(x[0] + x[1] - x[2]), axis=-1))
elif score_func == 'l2':
self.__score = K.layers.Lambda(lambda x: K.backend.sum(K.backend.square(x[0] + x[1] - x[2]), axis=-1))
else:
raise Exception('Invalid score_func value.')
self.__loss = K.layers.Lambda(lambda x: K.backend.maximum(x[0] + self.__margin - x[1], 0.0))
def __compile_train_model(self):
positive_head = K.Input((1,), dtype='int32', name='positive_heads')
positive_relation = K.Input((1,), dtype='int32', name='positive_relations')
positive_tail = K.Input((1,), dtype='int32', name='positive_tails')
negative_head = K.Input((1,), dtype='int32', name='negative_heads')
negative_tail = K.Input((1,), dtype='int32', name='negative_tails')
embedding_positive_head = self.__embedding_entity(positive_head)
embedding_positive_tail = self.__embedding_entity(positive_tail)
embedding_positive_relation = self.__embedding_relation(positive_relation)
embedding_negative_head = self.__embedding_entity(negative_head)
embedding_negative_tail = self.__embedding_entity(negative_tail)
embedding_positive_triple = [embedding_positive_head, embedding_positive_relation, embedding_positive_tail]
embedding_negative_triple = [embedding_negative_head, embedding_positive_relation, embedding_negative_tail]
score_positive = self.__score(embedding_positive_triple)
score_negative = self.__score(embedding_negative_triple)
loss = self.__loss([score_positive, score_negative])
self.__train_model = K.Model(inputs=[positive_head, positive_relation, positive_tail, negative_head, negative_tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__train_model.compile(opt, loss=lambda y_true, y_pred: y_pred)
def __compile_eval_model(self):
head = K.Input((1,), dtype='int32', name='heads')
relation = K.Input((1,), dtype='int32', name='relations')
tail = K.Input((1,), dtype='int32', name='tails')
embedding_head = self.__embedding_entity(head)
embedding_tail = self.__embedding_entity(tail)
embedding_relation = self.__embedding_relation(relation)
embedding_triple = [embedding_head, embedding_relation, embedding_tail]
loss = self.__score(embedding_triple)
self.__predict_model = K.Model(inputs=[head, relation, tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__predict_model.compile(opt, loss='binary_crossentropy')
def __compile_test_model(self):
head = K.Input((self.__num_entity,), dtype='int32', name='heads')
relation = K.Input((self.__num_entity,), dtype='int32', name='relations')
tail = K.Input((self.__num_entity,), dtype='int32', name='tails')
embedding_head = self.__embedding_entity(head)
embedding_tail = self.__embedding_entity(tail)
embedding_relation = self.__embedding_relation(relation)
embedding_triple = [embedding_head, embedding_relation, embedding_tail]
loss = self.__score(embedding_triple)
self.__test_model = K.Model(inputs=[head, relation, tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__test_model.compile(opt, loss='binary_crossentropy')
def compile(self):
self.__compile_train_model()
self.__compile_eval_model()
self.__compile_test_model()
def train(self, ph, pr, pt, nh, nt):
label = np.zeros((len(ph), 1))
self.__train_model.fit(x=[ph, pr, pt, nh, nt], y=label, epochs=self.__num_epochs, batch_size=self.__batch_size)
def evaluate(self, h, r, t):
score = self.__predict_model.predict(x=[h, r, t])
print(score)
return score
def predict_head(self, r, t):
test_num = len(r)
heads = np.tile(np.arange(0, self.__num_entity), [test_num, 1])
relations = np.tile(np.reshape(r, [test_num, 1]), [1, self.__num_entity])
tails = np.tile(np.reshape(t, [test_num, 1]), [1, self.__num_entity])
score = self.__test_model.predict(x=[heads, relations, tails])
return score
def predict_tail(self, h, r):
test_num = len(r)
heads = np.tile(np.reshape(h, [test_num, 1]), [1, self.__num_entity])
relations = np.tile(np.reshape(r, [test_num, 1]), [1, self.__num_entity])
tails = np.tile(np.arange(0, self.__num_entity), [test_num, 1])
score = self.__test_model.predict(x=[heads, relations, tails])
return score
def save_embeddings(self):
w_entity = self.__embedding_entity.get_weights()
np.savetxt('./entity.tsv', w_entity[0], delimiter='\t')
w_relation = self.__embedding_relation.get_weights()
np.savetxt('./relation.tsv', w_relation[0], delimiter='\t')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="TransE")
parser.add_argument('--data_dir', dest='data_dir', type=str, default='../data/FB15k/')
parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.01)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=4096)
parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=100)
parser.add_argument('--dimension', dest='dimension', type=int, default=50)
parser.add_argument('--margin', dest='margin', type=float, help='margin', default=1.0)
parser.add_argument('--negative_sampling', dest='negative_sampling', type=str,
help='choose unit or bern to generate negative examples', default='bern')
parser.add_argument('--score_func', dest='score_func', type=str, default='l1',
help='choose l1 or l2 to calculate distance of vectors')
args = parser.parse_args()
print(args)
KG = KnowledgeGraph(data_dir=args.data_dir, negative_sampling=args.negative_sampling)
model = TransE(num_entity=KG.num_entity, num_relation=KG.num_relation, learning_rate=args.learning_rate,
batch_size=args.batch_size, num_epochs=args.num_epochs, margin=args.margin, dimension=args.dimension,
score_func=args.score_func)
model.compile()
tp, tn = KG.get_training_data()
train_model(model, tp, tn)
model.save_embeddings()
test_model(model, KG.get_test_data()) | src/TransE.py | import math
import argparse
import keras as K
import numpy as np
from KnowledgeGraph import KnowledgeGraph
from common import *
class TransE:
@property
def embedding_entity(self):
return self.__embedding_entity
@property
def embedding_relation(self):
return self.__embedding_relation
def __init__(self, num_entity, num_relation, learning_rate, batch_size, num_epochs, margin, dimension, score_func):
self.__num_entity = num_entity
self.__num_relation = num_relation
self.__learning_rate = learning_rate
self.__batch_size = batch_size
self.__num_epochs = num_epochs
self.__margin = margin
self.__dimension = dimension
bound = 6 / math.sqrt(self.__dimension)
self.__embedding_entity = K.layers.Embedding(self.__num_entity, self.__dimension, name='embedding_entity',
embeddings_initializer=K.initializers.random_uniform(minval=-bound, maxval=bound),
embeddings_constraint=K.constraints.max_norm(max_value=1, axis=1))
self.__embedding_relation = K.layers.Embedding(self.__num_relation, self.__dimension, name='embedding_relation',
embeddings_initializer=K.initializers.random_uniform(minval=-bound, maxval=bound),
embeddings_constraint=K.constraints.max_norm(max_value=1, axis=1))
self.__train_model = None
self.__predict_model = None
self.__test_model = None
if score_func == 'l1':
self.__score = K.layers.Lambda(lambda x: K.backend.sum(K.backend.abs(x[0] + x[1] - x[2]), axis=-1))
elif score_func == 'l2':
self.__score = K.layers.Lambda(lambda x: K.backend.sum(K.backend.square(x[0] + x[1] - x[2]), axis=-1))
else:
raise Exception('Invalid score_func value.')
self.__loss = K.layers.Lambda(lambda x: K.backend.maximum(x[0] + self.__margin - x[1], 0.0))
def __compile_train_model(self):
positive_head = K.Input((1,), dtype='int32', name='positive_heads')
positive_relation = K.Input((1,), dtype='int32', name='positive_relations')
positive_tail = K.Input((1,), dtype='int32', name='positive_tails')
negative_head = K.Input((1,), dtype='int32', name='negative_heads')
negative_tail = K.Input((1,), dtype='int32', name='negative_tails')
embedding_positive_head = self.__embedding_entity(positive_head)
embedding_positive_tail = self.__embedding_entity(positive_tail)
embedding_positive_relation = self.__embedding_relation(positive_relation)
embedding_negative_head = self.__embedding_entity(negative_head)
embedding_negative_tail = self.__embedding_entity(negative_tail)
embedding_positive_triple = [embedding_positive_head, embedding_positive_relation, embedding_positive_tail]
embedding_negative_triple = [embedding_negative_head, embedding_positive_relation, embedding_negative_tail]
score_positive = self.__score(embedding_positive_triple)
score_negative = self.__score(embedding_negative_triple)
loss = self.__loss([score_positive, score_negative])
self.__train_model = K.Model(inputs=[positive_head, positive_relation, positive_tail, negative_head, negative_tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__train_model.compile(opt, loss=lambda y_true, y_pred: y_pred)
def __compile_eval_model(self):
head = K.Input((1,), dtype='int32', name='heads')
relation = K.Input((1,), dtype='int32', name='relations')
tail = K.Input((1,), dtype='int32', name='tails')
embedding_head = self.__embedding_entity(head)
embedding_tail = self.__embedding_entity(tail)
embedding_relation = self.__embedding_relation(relation)
embedding_triple = [embedding_head, embedding_relation, embedding_tail]
loss = self.__score(embedding_triple)
self.__predict_model = K.Model(inputs=[head, relation, tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__predict_model.compile(opt, loss='binary_crossentropy')
def __compile_test_model(self):
head = K.Input((self.__num_entity,), dtype='int32', name='heads')
relation = K.Input((self.__num_entity,), dtype='int32', name='relations')
tail = K.Input((self.__num_entity,), dtype='int32', name='tails')
embedding_head = self.__embedding_entity(head)
embedding_tail = self.__embedding_entity(tail)
embedding_relation = self.__embedding_relation(relation)
embedding_triple = [embedding_head, embedding_relation, embedding_tail]
loss = self.__score(embedding_triple)
self.__test_model = K.Model(inputs=[head, relation, tail], outputs=loss)
opt = K.optimizers.Adam(lr=self.__learning_rate)
self.__test_model.compile(opt, loss='binary_crossentropy')
def compile(self):
self.__compile_train_model()
self.__compile_eval_model()
self.__compile_test_model()
def train(self, ph, pr, pt, nh, nt):
label = np.zeros((len(ph), 1))
self.__train_model.fit(x=[ph, pr, pt, nh, nt], y=label, epochs=self.__num_epochs, batch_size=self.__batch_size)
def evaluate(self, h, r, t):
score = self.__predict_model.predict(x=[h, r, t])
print(score)
return score
def predict_head(self, r, t):
test_num = len(r)
heads = np.tile(np.arange(0, self.__num_entity), [test_num, 1])
relations = np.tile(np.reshape(r, [test_num, 1]), [1, self.__num_entity])
tails = np.tile(np.reshape(t, [test_num, 1]), [1, self.__num_entity])
score = self.__test_model.predict(x=[heads, relations, tails])
return score
def predict_tail(self, h, r):
test_num = len(r)
heads = np.tile(np.reshape(h, [test_num, 1]), [1, self.__num_entity])
relations = np.tile(np.reshape(r, [test_num, 1]), [1, self.__num_entity])
tails = np.tile(np.arange(0, self.__num_entity), [test_num, 1])
score = self.__test_model.predict(x=[heads, relations, tails])
return score
def save_embeddings(self):
w_entity = self.__embedding_entity.get_weights()
np.savetxt('./entity.tsv', w_entity[0], delimiter='\t')
w_relation = self.__embedding_relation.get_weights()
np.savetxt('./relation.tsv', w_relation[0], delimiter='\t')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="TransE")
parser.add_argument('--data_dir', dest='data_dir', type=str, default='../data/FB15k/')
parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.01)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=4096)
parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=100)
parser.add_argument('--dimension', dest='dimension', type=int, default=50)
parser.add_argument('--margin', dest='margin', type=float, help='margin', default=1.0)
parser.add_argument('--negative_sampling', dest='negative_sampling', type=str,
help='choose unit or bern to generate negative examples', default='bern')
parser.add_argument('--score_func', dest='score_func', type=str, default='l1',
help='choose l1 or l2 to calculate distance of vectors')
args = parser.parse_args()
print(args)
KG = KnowledgeGraph(data_dir=args.data_dir, negative_sampling=args.negative_sampling)
model = TransE(num_entity=KG.num_entity, num_relation=KG.num_relation, learning_rate=args.learning_rate,
batch_size=args.batch_size, num_epochs=args.num_epochs, margin=args.margin, dimension=args.dimension,
score_func=args.score_func)
model.compile()
tp, tn = KG.get_training_data()
train_model(model, tp, tn)
model.save_embeddings()
test_model(model, KG.get_test_data()) | 0.830903 | 0.209227 |
import tflearn
test = 'test'
train = 'training'
def getXY():
X, Y = tflearn.data_utils.image_preloader(train, image_shape=(
80, 80), mode='folder', categorical_labels='True', normalize=True)
return X, Y
def main():
'''
Establishes architecture for CNN-fully-connected-RNN neural net.
'''
# Create the input layer: None: batch size, 120: frames, 80: height, 80: width, 3: RGB
network = tflearn.input_data(shape=[None, 120, 80, 80, 3], name='input')
# Convolutional network
network = tflearn.conv_3d(
network, 32, (3, 3, 3), activation='relu') # 32 conv layers of 3x3x3 (3x3x3 convolves for each 3 frames, 3px height, and 3px width)
network = tflearn.max_pool_3d(
network, (1, 2, 2), strides=(1, 2, 2)) # Pools results of the conv_3d layer
network = tflearn.conv_3d(
network, 64, (3, 3, 3), activation='relu') # 64 layers of 3x3x3
network = tflearn.max_pool_3d(network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_3d(
network, 128, (3, 3, 3), activation='relu') # 128 layers of 3x3x3
network = tflearn.conv_3d(
network, 128, (3, 3, 3), activation='relu') # another one?
network = tflearn.max_pool_3d(network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_3d(
network, 256, (2, 2, 2), activation='relu') # 256 layers of 2x2x2
network = tflearn.conv_3d(
network, 256, (2, 2, 2), activation='relu')
network = tflearn.max_pool_3d(
network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_2d(
network, 64, 4, activation='relu', regularizer="L2") # 64 layers of 4x4
network = tflearn.max_pool_2d(network, 2) # and then max pool
# Normalize activations of the previous layer at each batch.
network = tflearn.local_response_normalization(network)
# And now the fully-connected neural net (128 & 256 neurons + dropout)
network = tflearn.fully_connected(network, 128, activation='tanh')
network = tflearn.dropout(network, 0.8)
network = tflearn.fully_connected(network, 256, activation='tanh')
network = tflearn.dropout(network, 0.8)
network = tflearn.reshape(network, [-1, 1, 256]) # Why 256?
# LSTM layers
network = tflearn.lstm(network, 128, return_seq=True) # LSTM of 128 units
network = tflearn.lstm(network, 128)
network = tflearn.fully_connected(
network, 4, activation='softmax') # Just four neurons... okay?
network = tflearn.regression(
network, optimizer='adam', loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
X, Y = getXY()
model.fit(X, Y, n_epoch=1, validation_set=0.1,
show_metric=True, snapshot_step=100)
if __name__ == "__main__":
main() | architecture.py | import tflearn
test = 'test'
train = 'training'
def getXY():
X, Y = tflearn.data_utils.image_preloader(train, image_shape=(
80, 80), mode='folder', categorical_labels='True', normalize=True)
return X, Y
def main():
'''
Establishes architecture for CNN-fully-connected-RNN neural net.
'''
# Create the input layer: None: batch size, 120: frames, 80: height, 80: width, 3: RGB
network = tflearn.input_data(shape=[None, 120, 80, 80, 3], name='input')
# Convolutional network
network = tflearn.conv_3d(
network, 32, (3, 3, 3), activation='relu') # 32 conv layers of 3x3x3 (3x3x3 convolves for each 3 frames, 3px height, and 3px width)
network = tflearn.max_pool_3d(
network, (1, 2, 2), strides=(1, 2, 2)) # Pools results of the conv_3d layer
network = tflearn.conv_3d(
network, 64, (3, 3, 3), activation='relu') # 64 layers of 3x3x3
network = tflearn.max_pool_3d(network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_3d(
network, 128, (3, 3, 3), activation='relu') # 128 layers of 3x3x3
network = tflearn.conv_3d(
network, 128, (3, 3, 3), activation='relu') # another one?
network = tflearn.max_pool_3d(network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_3d(
network, 256, (2, 2, 2), activation='relu') # 256 layers of 2x2x2
network = tflearn.conv_3d(
network, 256, (2, 2, 2), activation='relu')
network = tflearn.max_pool_3d(
network, (1, 2, 2), strides=(1, 2, 2))
network = tflearn.conv_2d(
network, 64, 4, activation='relu', regularizer="L2") # 64 layers of 4x4
network = tflearn.max_pool_2d(network, 2) # and then max pool
# Normalize activations of the previous layer at each batch.
network = tflearn.local_response_normalization(network)
# And now the fully-connected neural net (128 & 256 neurons + dropout)
network = tflearn.fully_connected(network, 128, activation='tanh')
network = tflearn.dropout(network, 0.8)
network = tflearn.fully_connected(network, 256, activation='tanh')
network = tflearn.dropout(network, 0.8)
network = tflearn.reshape(network, [-1, 1, 256]) # Why 256?
# LSTM layers
network = tflearn.lstm(network, 128, return_seq=True) # LSTM of 128 units
network = tflearn.lstm(network, 128)
network = tflearn.fully_connected(
network, 4, activation='softmax') # Just four neurons... okay?
network = tflearn.regression(
network, optimizer='adam', loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
X, Y = getXY()
model.fit(X, Y, n_epoch=1, validation_set=0.1,
show_metric=True, snapshot_step=100)
if __name__ == "__main__":
main() | 0.83762 | 0.67858 |