content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 14:04:20 2018
@author: pgallego
"""
import numpy as np
from keras.callbacks import TensorBoard,ModelCheckpoint
from vgg16 import CreateModel
from PrepareDate import PrepareData
import os
input_shape=226
channels=3
X_train,y_train,X_val,y_val,X_test,y_test = PrepareData(0.8,0.1)
logdir= "Graph"
Rundir="Test"
logPath = os.path.join(logdir,Rundir)
checkpointcallback = ModelCheckpoint(os.path.join(logPath,'BestModel'), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
tbCallback = TensorBoard(log_dir=logdir+Rundir, histogram_freq=0,
write_graph=True, write_images=True)
model = CreateModel(input_shape,input_shape,channels,logdir,Rundir,20)
model.fit(X_train,y_train,batch_size=1,validation_data=(X_val,y_val), callbacks=[tbCallback,checkpointcallback],epochs=10000,shuffle=True)
model.load_weights(logdir+Rundir+'\\BestModel3')
#model.evaluate(XS[0:2672,:,:,:],yS[0:2672,:])
#model.evaluate(XS[2672:2682,:,:,:],yS[2672:2682,:])
#model.evaluate(XTest,yTest[:,0:20])
|
nilq/baby-python
|
python
|
# device provisioning - automate devices configuration files
def greeting(name):
print("Hello", name)
greeting(input("What is your name: \n"))
if_name = input("Please provide the interface name: \n")
if_name = if_name.lower()
print(if_name)
# ip_addr = '10.1.10.254'
# vrf = 'lab'
# ping = 'ping {} vrf {} '.format(ip_addr, vrf)
# print(ping)
ip_addr = input("Please provide the IP Address: \n")
vrf = input("Please provide the VRF name: \n")
ping = "ping {} vrf {}"
tracer = "traceroute {} vrf {}"
ping_command = ping.format(ip_addr, vrf)
tracer_command = tracer.format(ip_addr, vrf)
print(ping_command)
print(tracer_command)
# ping = 'ping' + ' ' + ip_addr + ' ' + 'vrf' + vrf
hostnames = ["X1", "X2", "X3", "X4", "X66"]
for devices in hostnames:
print(devices)
print(ip_addr.startswith('10'))
print(ip_addr.startswith('192'))
print(ip_addr.endswith('254'))
print(ip_addr.split('.'))
a = int(input("How old are you ? \n"))
print(a)
print(ip_addr.isdigit())
first_octet = '01101101'
print(first_octet.count('1'))
print(first_octet.count('0'))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""XML to dict parse."""
import json
import textwrap
from configparser import ConfigParser
from pathlib import Path
from typing import Iterable
import untangle
from jira_freeplane.common import LOG
from jira_freeplane.mm_settings import MMConfig
class Node:
"""Node class."""
COLLECTION = {}
def __init__(
self,
config: MMConfig,
node: untangle.Element,
depth: int,
parent: untangle.Element = None, # type: ignore
) -> None:
self.glb = config
self.depth = depth
self.id = node["ID"]
self.node = node
Node.COLLECTION[self.id] = self
self.parent_id = parent["ID"] if parent else None
self.text = node["TEXT"] or ""
self.link = node["LINK"] or ""
try:
rich = node.richcontent.html.body # type: ignore
lines = []
for p in rich.get_elements("p"):
lines.append(p.cdata.rstrip())
flat = textwrap.dedent("\n".join(line if line else "\n" for line in lines))
dat = flat.replace("\n\n", "\n").rstrip()
self.note = dat
except AttributeError:
self.note = ""
self.cfile = self.glb.data_dir.joinpath(f"{self.id}.ini")
self.parent_cfile = self.glb.data_dir.joinpath(f"{self.parent_id}.ini")
self._config = ConfigParser()
self._parent_config = ConfigParser()
def children(self) -> Iterable["Node"]:
"""Get subtask children."""
yield from node_tree_with_depth(self.glb, self.node)
@property
def child_text(self) -> str:
"""Get subtask children."""
if self.link:
txt = f"[{self.text}|{self.link}]"
else:
txt = self.text
newlinecnt = txt.count("\n")
if newlinecnt > 1:
txt = "{code}" + txt + "{code}"
return self.depth * "*" + " " + txt
def _load_config(self, config_val: ConfigParser, config_path: Path) -> ConfigParser:
"""Load config."""
if config_path.exists():
config_val.read(str(config_path))
return config_val
if config_val.sections():
return config_val
else:
config_val.add_section("jira")
with config_path.open("w") as f:
config_val.write(f)
return config_val
def _save(self, config_val: ConfigParser, config_path: Path) -> None:
"""Load config."""
with config_path.open("w") as f:
config_val.write(f)
@property
def config(self) -> ConfigParser:
"""Config property."""
return self._load_config(self._config, self.cfile)
@property
def parent_config(self) -> ConfigParser:
"""Parent config property."""
return self._load_config(self._parent_config, self.parent_cfile)
def parent_save(self) -> None:
"""Save parent config."""
self._save(self._parent_config, self.parent_cfile)
def save(self) -> None:
"""Save config."""
self._save(self._config, self.cfile)
def is_task(self) -> bool:
"""Check if node is task."""
return self.depth <= 3
@property
def depth_type(self) -> str:
"""Return depth type."""
if self.depth == 0:
return self.glb.TYPE_ROOT
if self.depth == 1:
return self.glb.TYPE_EPIC
if self.depth == 2:
return self.glb.TYPE_TASK
if self.depth == 3:
return self.glb.TYPE_SUBTASK
else:
return str(self.depth - 3)
def node_tree_with_depth(config: MMConfig, root: untangle.Element) -> Iterable[Node]:
"""Return a list of nodes with depth."""
def _vals(node: untangle.Element, depth=0, parent: untangle.Element = None): # type: ignore
yield node, depth, parent
children = node.get_elements("node")
if not children:
return
for child in children:
yield from _vals(child, depth + 1, node)
for node, depth, parent in _vals(root): # type: ignore
yield Node(config, node, depth, parent)
def create_subtasks(config: MMConfig, nodes: Iterable[Node]) -> None:
"""Create epic."""
for node in nodes:
if node.depth_type != config.TYPE_SUBTASK:
continue
if node.config.has_option("jira", "key"):
key = node.config.get("jira", "key")
LOG.info(f"{node.cfile} / {key} exists, skipping")
continue
parent_key = node.parent_config.get("jira", "key")
LOG.info(f'running "{node.text}" / linking to "{parent_key}"')
body = ""
if node.link:
body += f"\n\n{node.link}"
for i in node.children():
if i.depth == 0:
continue
body += f"\n{i.child_text}"
LOG.info(
f"Creating parent {node.id}, {node.depth_type}, {node.depth}, {node.text}"
)
if node.note:
body += f"-----------------------------\n\n\n{node.note}"
try:
parent_key = node.parent_config.get("jira", "key")
except Exception:
LOG.info("Err %s", node.parent_cfile.read_text())
LOG.info(node.parent_config.get("jira", "key"))
raise
working = dict(config.data_dct[config.TYPE_SUBTASK])
working["Summary"] = node.text
working["Parent"] = { # type: ignore
"key": parent_key,
}
working["Description"] = body or "---"
conv = config.jira.to_jira_dct(working)
key = config.jira.submit(conv)
LOG.info(f"Created Issue -> {config.jira_url}/browse/{key}")
node.config.set("jira", "json_body", json.dumps(working))
node.config.set("jira", "key", key)
node.config.set("jira", "is_linked", "false")
LOG.info(f"Writing config file {key}")
with node.cfile.open("w") as f:
LOG.info(f"writing {node.text} -> {node.cfile}")
node.config.write(f)
def create_tasks(config: MMConfig, nodes: Iterable[Node]) -> None:
"""Create epic."""
for node in nodes:
if node.depth_type != config.TYPE_TASK:
continue
if node.cfile.exists():
LOG.info(f"{node.cfile} exists, skipping")
continue
parent_key = node.parent_config["jira"]["key"]
working = dict(config.data_dct[config.TYPE_TASK])
working["Summary"] = node.text
working["Epic Link"] = parent_key
working["Description"] = node.note or "---"
conv = config.jira.to_jira_dct(working)
key = config.jira.submit(conv)
LOG.info(f"Created Issue -> {config.jira_url}/browse/{key}")
node.config.set("jira", "json_body", json.dumps(working))
node.config.set("jira", "key", key)
node.config.set("jira", "is_linked", "false")
LOG.info(f"Writing config file {key}")
with node.cfile.open("w") as f:
LOG.info(f"writing {node.text} -> {node.cfile}")
node.config.write(f)
def create_epics(config: MMConfig, nodes: Iterable[Node]) -> None:
"""Create epic."""
runlist = []
for node in nodes:
if node.depth_type != config.TYPE_EPIC:
continue
runlist.append(node)
if node.cfile.exists():
LOG.info(f"{node.cfile} exists, skipping")
continue
working = dict(config.data_dct[config.TYPE_EPIC])
working["Summary"] = node.text
working["Epic Name"] = node.text
working["Description"] = node.note or "---"
conv = config.jira.to_jira_dct(working)
key = config.jira.submit(conv)
LOG.info(f"Created Issue -> {config.jira_url}/browse/{key}")
node.config.set("jira", "json_body", json.dumps(working))
node.config.set("jira", "key", key)
node.config.set("jira", "is_linked", "false")
LOG.info(f"Writing config file {key}")
with node.cfile.open("w") as f:
LOG.info(f"writing {node.text} -> {node.cfile}")
node.config.write(f)
for node in runlist:
with node.cfile.open() as f:
node.config.read_file(f)
if node.config.get("jira", "key") == "None":
raise ValueError(f"{node.cfile} has no key")
if node.config.get("jira", "is_linked") == "true":
LOG.info(f"{node.cfile} is linked, skipping")
continue
config.jira.link_parent_issue(
node.config.get("jira", "key"), config.project_parent_issue_key
)
node.config.set("jira", "is_linked", "true")
with node.cfile.open("w") as f:
LOG.info(f"updating with linked {node.text} -> {node.cfile}")
node.config.write(f)
def show_summary(config: MMConfig, nodes: Iterable[Node]) -> None:
"""Create epic."""
for node in nodes:
if node.depth_type not in [
config.TYPE_EPIC,
config.TYPE_TASK,
config.TYPE_SUBTASK,
]:
continue
key = node.config.get("jira", "key")
LOG.info(f"{config.jira_url}/browse/{key} -> {node.text}")
|
nilq/baby-python
|
python
|
from spidermon.contrib.actions.telegram.notifiers import (
SendTelegramMessageSpiderFinished,
)
from spidermon.contrib.scrapy.monitors import ErrorCountMonitor, FinishReasonMonitor
from spidermon.core.suites import MonitorSuite
class SpiderCloseMonitorSuite(MonitorSuite):
monitors = [
ErrorCountMonitor,
FinishReasonMonitor,
]
monitors_finished_actions = [
SendTelegramMessageSpiderFinished,
]
|
nilq/baby-python
|
python
|
arr = [1, 4, 7, 9, 14, 17, 39, 56]
targets = (8, 39)
def linear_search(arr, target):
"""
>>> all(linear_search(arr, x) == arr.index(x) if x in arr else -1 for x in targets)
True
"""
for i, item in enumerate(arr):
if item == target:
return i
return -1
for target in targets:
print(f"linear_search({arr}, {target}) = {linear_search(arr, target)}")
|
nilq/baby-python
|
python
|
"""Test subscriptions interact with ISAs:
- Create an ISA.
- Create a subscription, response should include the pre-existing ISA.
- Modify the ISA, response should include the subscription.
- Delete the ISA, response should include the subscription.
- Delete the subscription.
"""
import datetime
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import rid
from monitoring.monitorlib.rid import SCOPE_READ, SCOPE_WRITE
from . import common
ISA_ID = '000000d5-aa3d-46b8-b2ec-dd22e7000000'
SUB_ID = '000000ee-85c7-4bc6-8995-aa5f81000000'
def test_ensure_clean_workspace(session):
resp = session.get('/identification_service_areas/{}'.format(ISA_ID), scope=SCOPE_READ)
if resp.status_code == 200:
version = resp.json()['service_area']['version']
resp = session.delete('/identification_service_areas/{}/{}'.format(ISA_ID, version), scope=SCOPE_WRITE)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
resp = session.get('/subscriptions/{}'.format(SUB_ID), scope=SCOPE_READ)
if resp.status_code == 200:
version = resp.json()['subscription']['version']
resp = session.delete('/subscriptions/{}/{}'.format(SUB_ID, version), scope=SCOPE_READ)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected
pass
else:
assert False, resp.content
@default_scope(SCOPE_WRITE)
def test_create_isa(session):
time_start = datetime.datetime.utcnow()
time_end = time_start + datetime.timedelta(minutes=60)
resp = session.put(
'/identification_service_areas/{}'.format(ISA_ID),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 20,
'altitude_hi': 400,
},
'time_start': time_start.strftime(rid.DATE_FORMAT),
'time_end': time_end.strftime(rid.DATE_FORMAT),
},
'flights_url': 'https://example.com/dss',
})
assert resp.status_code == 200, resp.content
@default_scope(SCOPE_READ)
def test_create_subscription(session):
time_start = datetime.datetime.utcnow()
time_end = time_start + datetime.timedelta(minutes=60)
resp = session.put(
'/subscriptions/{}'.format(SUB_ID),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 20,
'altitude_hi': 400,
},
'time_start': time_start.strftime(rid.DATE_FORMAT),
'time_end': time_end.strftime(rid.DATE_FORMAT),
},
'callbacks': {
'identification_service_area_url': 'https://example.com/foo'
},
})
assert resp.status_code == 200, resp.content
# The response should include our ISA.
data = resp.json()
assert data['subscription']['notification_index'] == 0
assert ISA_ID in [x['id'] for x in data['service_areas']]
def test_modify_isa(session):
# GET the ISA first to find its version.
resp = session.get('/identification_service_areas/{}'.format(ISA_ID), scope=SCOPE_READ)
assert resp.status_code == 200, resp.content
version = resp.json()['service_area']['version']
# Then modify it.
time_end = datetime.datetime.utcnow() + datetime.timedelta(minutes=60)
resp = session.put(
'/identification_service_areas/{}/{}'.format(ISA_ID, version),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 12345,
'altitude_hi': 67890,
},
'time_end': time_end.strftime(rid.DATE_FORMAT),
},
'flights_url': 'https://example.com/dss',
}, scope=SCOPE_WRITE)
assert resp.status_code == 200, resp.content
# The response should include our subscription.
data = resp.json()
assert {
'url':
'https://example.com/foo',
'subscriptions': [{
'notification_index': 1,
'subscription_id': SUB_ID,
},],
} in data['subscribers']
def test_delete_isa(session):
# GET the ISA first to find its version.
resp = session.get('/identification_service_areas/{}'.format(ISA_ID), scope=SCOPE_READ)
assert resp.status_code == 200, resp.content
version = resp.json()['service_area']['version']
# Then delete it.
resp = session.delete('/identification_service_areas/{}/{}'.format(
ISA_ID, version), scope=SCOPE_WRITE)
assert resp.status_code == 200, resp.content
# The response should include our subscription.
data = resp.json()
assert {
'url':
'https://example.com/foo',
'subscriptions': [{
'notification_index': 2,
'subscription_id': SUB_ID,
},],
} in data['subscribers']
@default_scope(SCOPE_READ)
def test_delete_subscription(session):
# GET the sub first to find its version.
resp = session.get('/subscriptions/{}'.format(SUB_ID))
assert resp.status_code == 200, resp.content
data = resp.json()
version = data['subscription']['version']
assert data['subscription']['notification_index'] == 2
# Then delete it.
resp = session.delete('/subscriptions/{}/{}'.format(SUB_ID, version))
assert resp.status_code == 200, resp.content
|
nilq/baby-python
|
python
|
import argparse
import glob
import os
import pandas as pd
import numpy as np
import cv2
from math import sqrt
import random
import wget
import zipfile
'''
Labels used in
[1] T. Kawashima et al., "Action recognition from extremely low-resolution
thermal image sequence," 2017 14th IEEE International Conference on Advanced Video
and Signal Based Surveillance (AVSS), Lecce, 2017, pp. 1-6.
'''
PAPER_LABELS_REGEX = dict([
(r'walk.*', 0),
(r'sitdown', 1),
(r'standup', 2),
(r'falling.*', 3),
(r'^(sit|lie|stand)$', 4),
])
LABELS_REGEX = dict([
(r'walk.*', 0),
(r'sitdown', 1),
(r'standup', 2),
(r'falling.*', 3),
(r'sit', 4),
(r'lie', 5),
(r'stand', 6),
])
SKIP_FRAMES = 20
DATSAET_URL = "https://github.com/muralab/Low-Resolution-FIR-Action-Dataset/archive/master.zip"
DATASET_FN_ZIP = "Low-Resolution-FIR-Action-Dataset-master.zip"
def download(dataset_dir: str, dataset_name: str = "dataset"):
print("Downloading FIR Action Dataset...")
wget.download(DATSAET_URL, bar=wget.bar_thermometer)
path, filename = dataset_dir, dataset_name
with zipfile.ZipFile(DATASET_FN_ZIP, "r") as zip_ref:
zip_ref.extractall(path)
os.remove(DATASET_FN_ZIP)
dataset_fn = DATASET_FN_ZIP.split(".")[-2]
os.rename(os.path.join(path, dataset_fn), os.path.join(path, filename))
print("")
print("Dataset downloaded to %s" % os.path.join(path, filename))
return
def load_annotation(dataset_dir: str) -> pd.core.frame.DataFrame:
pattern = os.path.join(dataset_dir, 'annotation', '*_human.csv')
generator = glob.iglob(pattern)
return pd.concat([pd.read_csv(fn, header=None)
for fn in generator], ignore_index=True)
def read_sequence_annotation(sequence_name: str, annotation: pd.core.frame.DataFrame = None) -> list:
if annotation is None:
return []
sequence_annotation_pd = annotation[annotation[0] == sequence_name]
return sequence_annotation_pd.iloc[:, 1:].values.tolist()
def list_sequences(dataset_dir: str) -> list:
pattern = os.path.join(dataset_dir, '*', 'raw', '*.csv')
generator = glob.iglob(pattern)
return [sequence for sequence in generator]
def sequence_heatmap(sequence: np.ndarray, min: int = 20, max: int = 40, cv_colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
sequence_clipped = np.clip(sequence, min, max)
sequence_normalized = (255 * ((sequence_clipped-min) /
(max-min))).astype(np.uint8)
shape = sequence.shape
heatmap_flat = cv2.applyColorMap(
sequence_normalized.flatten(), cv_colormap)
return heatmap_flat.reshape([shape[0], shape[1], shape[2], 3])
class Dataset():
def __init__(self, dataset_dir: str, sample: bool = False, samples_k: int = 10, labels=None):
self.annotation = load_annotation(dataset_dir)
self.sequences = list_sequences(dataset_dir)
if sample:
self.sequences = random.sample(self.sequences, samples_k)
if labels:
self.labels = labels
self.directory = dataset_dir
def __len__(self):
return len(self.sequences)
def __getitem__(self, idx):
return Sequence(self.sequences[idx], dataset_annotation=self.annotation)
class Action(Dataset):
def __init__(self, dataset, label, samples_k=3):
annotation = dataset.annotation
self.annotation = annotation[annotation[3].str.contains(
label)].sample(samples_k)
#[sequence for sequence in dataset.sequences if b[0].str.contains(sequence.split(os.path.sep)[-1]).any()]
self.sequences = list(self.annotation[0].unique())
self.directory = dataset.directory
def __len__(self):
return len(self.annotation)
def __getitem__(self, idx):
sequence_name = self.annotation[0].iloc[idx]
fn = os.path.join(self.directory, sequence_name.split("_")[
0], "raw", sequence_name)
return Sequence(fn, frame_start=self.annotation[1].iloc[idx], frame_stop=self.annotation[2].iloc[idx])
class Sequence(np.ndarray):
def __new__(cls, fn: str, dataset_annotation=None, frame_start=None, frame_stop=None):
# read dataframe
dataframe = pd.read_csv(fn, skiprows=[0, 1], header=None)
# skip time and PTAT columns
pixels = dataframe.iloc[:, 2:].values
PTAT = dataframe.iloc[:, 1:2].values
min = pixels[SKIP_FRAMES:].min()
max = pixels[SKIP_FRAMES:].max()
PTAT = PTAT[frame_start:frame_stop]
pixels = pixels[frame_start:frame_stop][:]
# reshape to [frames, h, w] array
frames, h, w = pixels.shape[0], (int)(
sqrt(pixels.shape[1])), (int)(sqrt(pixels.shape[1]))
obj = np.asarray(pixels.reshape([frames, h, w])).view(cls)
# add custom sequence attributes
obj.filename = fn
path, sequence_name = os.path.split(fn)
obj.sequence_name = sequence_name
obj.dataset_annotation = dataset_annotation
obj.start = frame_start
obj.stop = frame_stop
obj.temp_min = min
obj.temp_max = max
obj.PTAT = PTAT
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.filename = getattr(obj, 'filename', None)
self.sequence_name = getattr(obj, 'sequence_name', None)
self.dataset_annotation = getattr(obj, 'dataset_annotation', None)
self.start = getattr(obj, 'start', None)
self.stop = getattr(obj, 'stop', None)
self.PTAT = getattr(obj, 'PTAT', None)
def annotation(self):
return read_sequence_annotation(self.sequence_name, self.dataset_annotation)
|
nilq/baby-python
|
python
|
class Account:
"""
Class that generates new instances of accounts
"""
account_list = []
def __init__(self,account_name,password):
"""
__init__ method that helps us define the properties for our objects
Args:
account_name: New account name
password: New account password
"""
self.acc_name = account_name
self.password = password
def save_account(self):
"""
save_account method saves the account objects into the account_list
"""
Account.account_list.append(self)
def delete_account(self):
"""
delete_account method deletes a saved account from the account list
"""
Account.account_list.remove(self)
@classmethod
def display_account(cls):
"""
method that returns a list of all saved accounts
"""
return cls.account_list
@classmethod
def find_by_account_name(cls,acc_name):
'''
Method that takes in the account name and returns a account that matches that name.
Args:
acc_name: Account name to search for
Returns :
Account that matches the name.
'''
for account in cls.account_list:
if account.acc_name == acc_name:
return account
@classmethod
def account_exist(cls,acc_name):
'''
Method that checks if an account exists from the account list.
Args:
acc_name: Account name to search if it exists
Returns :
Boolean: True or false depending if the account exists
'''
for account in cls.account_list:
if account.acc_name == acc_name:
return True
return False
|
nilq/baby-python
|
python
|
"""
Problem 16: https://projecteuler.net/problem=16
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
def solution(power: int = 1000) -> int:
"""
Returns the sum of the digits of the number 2^power.
>>> solution(1000)
1366
>>> solution(50)
76
>>> solution(20)
31
>>> solution(15)
26
"""
num = 2 ** power
string_num = str(num)
list_num = list(string_num)
sum_of_num = 0
for i in list_num:
sum_of_num += int(i)
return sum_of_num
if __name__ == "__main__":
power = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2 ** power)
result = solution(power)
print("Sum of the digits is: ", result)
|
nilq/baby-python
|
python
|
from flask import Flask, jsonify, make_response, request
app = Flask(__name__)
@app.route('/parse/json', methods=['GET', 'POST', 'DELETE', 'PUT'])
def add():
if request.headers.get("Content-Type") == 'application/json':
# HTTPリクエストのMIMEタイプがapplication/json
data = request.get_json()
return jsonify(data)
else:
json_message = {
'error':'Not supported: {}'.format(request.headers.get("Content-Type"))
}
return make_response(jsonify(json_message), 400)
|
nilq/baby-python
|
python
|
import logging
from subprocess import (
PIPE,
Popen
)
SUDO_PATH = '/usr/bin/sudo'
SUDO_PRESERVE_ENVIRONMENT_ARG = '-E'
SUDO_USER_ARG = '-u'
log = logging.getLogger(__name__)
def sudo_popen(*args, **kwargs):
"""
Helper method for building and executing Popen command. This is potentially
sensetive code so should probably be centralized.
"""
user = kwargs.get("user", None)
full_command = [SUDO_PATH, SUDO_PRESERVE_ENVIRONMENT_ARG]
if user:
full_command.extend([SUDO_USER_ARG, user])
full_command.extend(args)
log.info(f"About to execute the following sudo command - [{' '.join(full_command)}]")
p = Popen(full_command, shell=False, stdout=PIPE, stderr=PIPE)
return p
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_visualise_graph.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_visualiseGraph(object):
def setupUi(self, Dialog_visualiseGraph):
Dialog_visualiseGraph.setObjectName("Dialog_visualiseGraph")
Dialog_visualiseGraph.resize(1098, 753)
self.gridLayout = QtWidgets.QGridLayout(Dialog_visualiseGraph)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Dialog_visualiseGraph)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_visualiseGraph)
self.groupBox_2.setMinimumSize(QtCore.QSize(0, 40))
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.pushButton_view = QtWidgets.QPushButton(self.groupBox_2)
self.pushButton_view.setGeometry(QtCore.QRect(0, 0, 161, 27))
self.pushButton_view.setObjectName("pushButton_view")
self.checkBox_blackandwhite = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_blackandwhite.setGeometry(QtCore.QRect(170, 0, 191, 22))
self.checkBox_blackandwhite.setObjectName("checkBox_blackandwhite")
self.checkBox_fontsize = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_fontsize.setGeometry(QtCore.QRect(370, 0, 281, 22))
self.checkBox_fontsize.setObjectName("checkBox_fontsize")
self.comboBox = QtWidgets.QComboBox(self.groupBox_2)
self.comboBox.setGeometry(QtCore.QRect(660, 0, 421, 30))
self.comboBox.setObjectName("comboBox")
self.gridLayout.addWidget(self.groupBox_2, 2, 0, 1, 1)
self.retranslateUi(Dialog_visualiseGraph)
QtCore.QMetaObject.connectSlotsByName(Dialog_visualiseGraph)
def retranslateUi(self, Dialog_visualiseGraph):
_translate = QtCore.QCoreApplication.translate
Dialog_visualiseGraph.setWindowTitle(_translate("Dialog_visualiseGraph", "Graph Visualisation"))
self.pushButton_view.setText(_translate("Dialog_visualiseGraph", "View graph"))
self.checkBox_blackandwhite.setText(_translate("Dialog_visualiseGraph", "Black and white"))
self.checkBox_fontsize.setText(_translate("Dialog_visualiseGraph", "Categories larger font"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_visualiseGraph = QtWidgets.QDialog()
ui = Ui_Dialog_visualiseGraph()
ui.setupUi(Dialog_visualiseGraph)
Dialog_visualiseGraph.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2005 Freescale Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# o Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# o Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# o Neither the name of Freescale Semiconductor, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import unittest
__all__ = ["align_down", "align_up", "mymkarg", "findPathListCommonPrefix", "splitPath", "rebuildPathSimple", "onlyHyphensPlease", "suite"]
def get_dict_default(d, k, default):
if not (k in d):
return default
else:
return d[k]
def align_down(x, a):
return x & ~(a - 1)
def align_up(x, a):
return (x + a - 1) / a * a
# This is a modifed version of mkarg from commands module. It will never use single
# quoting, because the DOS shell does not like that.
def mymkarg(x):
# XXX return empty string as quoted???
if len(x) == 0:
return ' ""'
# return with whitespace prefix if all one word with no quotes
if '"' not in x and "'" not in x and " " not in x and "\t" not in x:
return " " + x
# return double quoted if no double quotes
if '"' not in x:
return ' "' + x + '"'
escapeChars = '\\$"`'
if sys.platform == 'win32':
escapeChars = r'"'
# otherwise, return double quoted, but escape double quotes
s = ' "'
for c in x:
if c in escapeChars:
s += "\\"
s += c
return s + '"'
def mkcmdline(x):
return ''.join([mymkargs(i) for i in x])
# Takes a list containing lists of the directories in paths. Returns a list
# containing the common directories between all members of the paths argument.
def findPathListCommonPrefix(paths):
result = []
if len(paths) == 0:
return result
for i in range(0, min(map(lambda p: len(p), paths))):
pathComponent = paths[0][i]
for thisPath in paths:
if thisPath[i] != pathComponent:
return result
result.append(pathComponent)
return result
# Returns a list containing the elements of the path argument.
def splitPath(path):
if len(path) == 0:
return ['']
else:
return os.path.normpath(path).split(os.path.sep)
#//////////////////////////////////////////////////////////////////////////////
# Finds the SOCFirmware root path, does not yet support UNC
#//////////////////////////////////////////////////////////////////////////////
def findRoot(curPath, basedir="SOCFirmware", caseSensitive=True, loop=False):
"returns path to SOCFirmware"
Parent, Directory = os.path.split(curPath)
root = Parent
if caseSensitive == True:
if Directory != basedir:
root = findRoot(Parent, basedir, caseSensitive, True)
else:
if string.upper(Directory) != string.upper(basedir):
root = findRoot(Parent, basedir, caseSensitive, True)
if loop == True: # is this a recursive call?
return root
return os.path.join(root,basedir)
# This is a not-so-smart path rebuilder. Takes "path" which is relative to
# "originalDir", and returns the same path modified so that it is relative to
# "newDir".
def rebuildPathSimple(originalDir, newDir, path):
# just return absolute paths unchanged
if os.path.isabs(path):
return path
absOriginal = os.path.abspath(originalDir)
absNew = os.path.abspath(newDir)
print ("absOrig=",absOriginal)
print ('absNew=',absNew)
originalDirSplit = absOriginal.split(os.path.sep)
newDirSplit = absNew.split(os.path.sep)
commonSplit = findPathListCommonPrefix([originalDirSplit, newDirSplit])
pathComponents = []
newSplit = absNew.split(os.path.sep)
stepsBack = len(newSplit) - len(commonSplit)
if stepsBack > 0:
backList = ['..'] * stepsBack
pathComponents.append(os.path.join(*backList))
# determine path from common to original
originalSplit = absOriginal.split(os.path.sep)
stepsForward = len(originalSplit) - len(commonSplit)
if stepsForward > 0:
forwardList = originalSplit[-stepsForward:]
pathComponents.append(os.path.join(*forwardList))
pathComponents.append(path)
return os.path.normpath(os.path.join(*pathComponents))
class mymkargUnitTest(unittest.TestCase):
def test_mymkarg(self):
self.assertEqual(mymkarg("foo"), ' foo')
self.assertEqual(mymkarg(""), ' ""')
self.assertEqual(mymkarg('he "said"'), r' "he \"said\""')
if sys.platform == 'win32':
self.assertEqual(mymkarg('$10.00'), r' $10.00')
else:
self.assertEqual(mymkarg('$10.00'), r' $10.00')
# Unit test for findPathListCommonPrefix() function.
class FindPathListCommonPrefixUnitTest(unittest.TestCase):
def split(self, path):
return splitPath(path)
def test_empty(self):
prefix = findPathListCommonPrefix([])
self.assertEqual(len(prefix), 0)
def test_single(self):
path = self.split("/Library/Widgets")
prefix = findPathListCommonPrefix([path])
self.assertEqual(prefix, path)
def test_multiple_equal(self):
path = self.split("/usr/local/apache/include/httpd.h")
prefix = findPathListCommonPrefix([path, path])
self.assertEqual(prefix, path)
prefix = findPathListCommonPrefix([path, path, path])
self.assertEqual(prefix, path)
prefix = findPathListCommonPrefix([path, path, path, path, path, path, path])
self.assertEqual(prefix, path)
def test_not_equal(self):
path1 = self.split("foo/bar")
path2 = self.split("baz/buz")
prefix = findPathListCommonPrefix([path1, path2])
self.assertEqual(prefix, [])
def test_complex(self):
path1 = self.split("/usr/local/apache/include/httpd.h")
path2 = self.split("/usr/local/apache/conf/httpd.conf")
path3 = self.split("/usr/local/bin/python")
path4 = self.split("/System/Library/Frameworks")
prefix = findPathListCommonPrefix([path1, path2])
self.assertEqual(prefix, self.split("/usr/local/apache"))
prefix = findPathListCommonPrefix([path1, path2, path3])
self.assertEqual(prefix, self.split("/usr/local"))
prefix = findPathListCommonPrefix([path1, path2, path3, path4])
self.assertEqual(prefix, self.split(""))
class rebuildPathSimpleUnitTest(unittest.TestCase):
def test_rebuild(self):
pass
def onlyHyphensPlease( argList ):
"""
argList is a list of strings, such as the argv of a
Python script. This function searches in argList for the
presence of em- or en-dashes. If any are found, they are
converted to hyphens.
Return:
True if found
False if none found
"""
bReplaced = False
for i in range(len(argList)):
if (argList[i].find('\x97') >= 0) or (argList[i].find('\x96') >= 0) :
bReplaced = True
arg = argList[i].replace('\x97','-') # Replace em-dashes with hyphens.
argList[i] = arg.replace('\x96','-') # Replace en-dashes with hyphens.
return bReplaced
class onlyHyphensPleaseUnitTest( unittest.TestCase ):
def test_hyphens(self):
em_dash = "kung\x97fu"
en_dash = "kung\x96fu"
argList = [ em_dash, en_dash ]
hyphen = "kung-fu"
onlyHyphensPlease( argList )
self.assertEqual( argList[0] , hyphen )
self.assertEqual( argList[1] , hyphen )
def suite():
argSuite = unittest.makeSuite(mymkargUnitTest)
pathListSuite = unittest.makeSuite(FindPathListCommonPrefixUnitTest)
rebuildSuite = unittest.makeSuite(rebuildPathSimpleUnitTest)
hyphenSuite = unittest.makeSuite(onlyHyphensPleaseUnitTest)
suite = unittest.TestSuite()
suite.addTests((argSuite, pathListSuite, rebuildSuite, hyphenSuite))
return suite
# Run unit tests when this source file is executed directly from the command
# line.
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
|
nilq/baby-python
|
python
|
from voluptuous import *
from ..defaults import settings, filtertypes
from ..exceptions import ConfigurationError
from . import SchemaCheck
import logging
logger = logging.getLogger(__name__)
def filtertype():
return {
Required('filtertype'): Any(
In(settings.all_filtertypes()),
msg='filtertype must be one of {0}'.format(
settings.all_filtertypes()
)
)
}
def structure():
# This is to first ensure that only the possible keys/filter elements are
# there, and get a dictionary back to work with.
retval = settings.structural_filter_elements()
retval.update(filtertype())
return Schema(retval)
def single(action, data):
try:
ft = data['filtertype']
except KeyError:
raise ConfigurationError('Missing key "filtertype"')
f = filtertype()
for each in getattr(filtertypes, ft)(action, data):
f.update(each)
return Schema(f)
def Filters(action, location=None):
def f(v):
def prune_nones(mydict):
return dict([(k,v) for k, v in mydict.items() if v != None and v != 'None'])
# This validator method simply validates all filters in the list.
for idx in range(0, len(v)):
pruned = prune_nones(v[idx])
filter_dict = SchemaCheck(
pruned,
single(action, pruned),
'filter',
'{0}, filter #{1}: {2}'.format(location, idx, pruned)
).result()
logger.debug('Filter #{0}: {1}'.format(idx, filter_dict))
v[idx] = filter_dict
# If we've made it here without raising an Exception, it's valid
return v
return f
|
nilq/baby-python
|
python
|
sessions = int(input())
teams = [int(x) for x in input().split()]
possible = True
for i in range(sessions - 1):
if teams[i] < 0: possible = False
teams[i+1] -= teams[i]%2
if teams[sessions - 1] % 2 == 1: possible = False
print("YES" if possible else "NO")
|
nilq/baby-python
|
python
|
# Copyright (c) 2014, Yuta Okamoto <okapies@gmail.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, mutually_exclusive
class Source(AWSProperty):
props = {
'Password': (str, False),
'Revision': (str, False),
'SshKey': (str, False),
'Type': (str, False),
'Url': (str, False),
'Username': (str, False),
}
class SslConfiguration(AWSProperty):
props = {
'Certificate': (str, True),
'Chain': (str, False),
'PrivateKey': (str, True),
}
class ChefConfiguration(AWSProperty):
props = {
'BerkshelfVersion': (str, False),
'ManageBerkshelf': (boolean, False),
}
class Recipes(AWSProperty):
props = {
'Configure': ([str], False),
'Deploy': ([str], False),
'Setup': ([str], False),
'Shutdown': ([str], False),
'Undeploy': ([str], False),
}
def validate_volume_type(volume_type):
volume_types = ('standard', 'io1', 'gp2')
if volume_type not in volume_types:
raise ValueError("VolumeType (given: %s) must be one of: %s" % (
volume_type, ', '.join(volume_types)))
return volume_type
class VolumeConfiguration(AWSProperty):
props = {
'Encrypted': (boolean, False),
'Iops': (integer, False),
'MountPoint': (str, True),
'NumberOfDisks': (integer, True),
'RaidLevel': (integer, False),
'Size': (integer, True),
'VolumeType': (validate_volume_type, False)
}
def validate(self):
volume_type = self.properties.get('VolumeType')
iops = self.properties.get('Iops')
if volume_type == 'io1' and not iops:
raise ValueError("Must specify Iops if VolumeType is 'io1'.")
if volume_type != 'io1' and iops:
raise ValueError("Cannot specify Iops if VolumeType is not 'io1'.")
class StackConfigurationManager(AWSProperty):
props = {
'Name': (str, False),
'Version': (str, False),
}
class TimeBasedAutoScaling(AWSProperty):
props = {
'Monday': (dict, False),
'Tuesday': (dict, False),
'Wednesday': (dict, False),
'Thursday': (dict, False),
'Friday': (dict, False),
'Saturday': (dict, False),
'Sunday': (dict, False),
}
class AutoScalingThresholds(AWSProperty):
props = {
'CpuThreshold': (float, False),
'IgnoreMetricsTime': (integer, False),
'InstanceCount': (integer, False),
'LoadThreshold': (float, False),
'MemoryThreshold': (float, False),
'ThresholdsWaitTime': (integer, False),
}
class Environment(AWSProperty):
props = {
'Key': (str, True),
'Secure': (bool, False),
'Value': (str, True),
}
class LoadBasedAutoScaling(AWSProperty):
props = {
'DownScaling': (AutoScalingThresholds, False),
'Enable': (bool, False),
'UpScaling': (AutoScalingThresholds, False),
}
def validate_data_source_type(data_source_type):
data_source_types = (
'AutoSelectOpsworksMysqlInstance',
'OpsworksMysqlInstance',
'RdsDbInstance'
)
if data_source_type not in data_source_types:
raise ValueError("Type (given: %s) must be one of: %s" % (
data_source_type, ', '.join(data_source_types)))
return data_source_type
class DataSource(AWSProperty):
props = {
'Arn': (str, False),
'DatabaseName': (str, False),
'Type': (validate_data_source_type, False)
}
class App(AWSObject):
resource_type = "AWS::OpsWorks::App"
props = {
'AppSource': (Source, False),
'Attributes': (dict, False),
'DataSources': ([DataSource], False),
'Description': (str, False),
'Domains': ([str], False),
'EnableSsl': (boolean, False),
'Environment': ([Environment], False),
'Name': (str, True),
'Shortname': (str, False),
'SslConfiguration': (SslConfiguration, False),
'StackId': (str, True),
'Type': (str, True),
}
class ElasticLoadBalancerAttachment(AWSObject):
resource_type = "AWS::OpsWorks::ElasticLoadBalancerAttachment"
props = {
'ElasticLoadBalancerName': (str, True),
'LayerId': (str, True),
'Tags': ((Tags, list), False),
}
class EbsBlockDevice(AWSProperty):
props = {
'DeleteOnTermination': (boolean, False),
'Iops': (integer, False),
'SnapshotId': (str, False),
'VolumeSize': (integer, False),
'VolumeType': (str, False),
}
class BlockDeviceMapping(AWSProperty):
props = {
'DeviceName': (str, False),
'Ebs': (EbsBlockDevice, False),
'NoDevice': (str, False),
'VirtualName': (str, False),
}
def validate(self):
conds = [
'Ebs',
'VirtualName',
]
mutually_exclusive(self.__class__.__name__, self.properties, conds)
class Instance(AWSObject):
resource_type = "AWS::OpsWorks::Instance"
props = {
'AgentVersion': (str, False),
'AmiId': (str, False),
'Architecture': (str, False),
'AutoScalingType': (str, False),
'AvailabilityZone': (str, False),
'BlockDeviceMappings': ([BlockDeviceMapping], False),
'EbsOptimized': (boolean, False),
'ElasticIps': ([str], False),
'Hostname': (str, False),
'InstallUpdatesOnBoot': (boolean, False),
'InstanceType': (str, True),
'LayerIds': ([str], True),
'Os': (str, False),
'RootDeviceType': (str, False),
'SshKeyName': (str, False),
'StackId': (str, True),
'SubnetId': (str, False),
'Tenancy': (str, False),
'TimeBasedAutoScaling': (TimeBasedAutoScaling, False),
'VirtualizationType': (str, False),
'Volumes': ([str], False),
}
class ShutdownEventConfiguration(AWSProperty):
props = {
'DelayUntilElbConnectionsDrained': (boolean, False),
'ExecutionTimeout': (integer, False),
}
class LifeCycleConfiguration(AWSProperty):
props = {
'ShutdownEventConfiguration': (ShutdownEventConfiguration, False),
}
class Layer(AWSObject):
resource_type = "AWS::OpsWorks::Layer"
props = {
'Attributes': (dict, False),
'AutoAssignElasticIps': (boolean, True),
'AutoAssignPublicIps': (boolean, True),
'CustomInstanceProfileArn': (str, False),
'CustomJson': ((str, dict), False),
'CustomRecipes': (Recipes, False),
'CustomSecurityGroupIds': ([str], False),
'EnableAutoHealing': (boolean, True),
'InstallUpdatesOnBoot': (boolean, False),
'LifecycleEventConfiguration': (LifeCycleConfiguration, False),
'LoadBasedAutoScaling': (LoadBasedAutoScaling, False),
'Name': (str, True),
'Packages': ([str], False),
'Shortname': (str, True),
'StackId': (str, True),
'Type': (str, True),
'VolumeConfigurations': ([VolumeConfiguration], False),
}
class RdsDbInstance(AWSProperty):
props = {
'DbPassword': (str, True),
'DbUser': (str, True),
'RdsDbInstanceArn': (str, True)
}
class ElasticIp(AWSProperty):
props = {
'Ip': (str, True),
'Name': (str, False),
}
class Stack(AWSObject):
resource_type = "AWS::OpsWorks::Stack"
props = {
'AgentVersion': (str, False),
'Attributes': (dict, False),
'ChefConfiguration': (ChefConfiguration, False),
'CloneAppIds': ([str], False),
'ClonePermissions': (boolean, False),
'ConfigurationManager': (StackConfigurationManager, False),
'CustomCookbooksSource': (Source, False),
'CustomJson': ((str, dict), False),
'DefaultAvailabilityZone': (str, False),
'DefaultInstanceProfileArn': (str, True),
'DefaultOs': (str, False),
'DefaultRootDeviceType': (str, False),
'DefaultSshKeyName': (str, False),
'DefaultSubnetId': (str, False),
'EcsClusterArn': (str, False),
'ElasticIps': ([ElasticIp], False),
'HostnameTheme': (str, False),
'Name': (str, True),
'RdsDbInstances': ([RdsDbInstance], False),
'ServiceRoleArn': (str, True),
'SourceStackId': (str, False),
'Tags': ((Tags, list), False),
'UseCustomCookbooks': (boolean, False),
'UseOpsworksSecurityGroups': (boolean, False),
'VpcId': (str, False),
}
def validate(self):
if 'VpcId' in self.properties and \
'DefaultSubnetId' not in self.properties:
raise ValueError('Using VpcId requires DefaultSubnetId to be'
'specified')
return True
class UserProfile(AWSObject):
resource_type = "AWS::OpsWorks::UserProfile"
props = {
'AllowSelfManagement': (boolean, False),
'IamUserArn': (str, True),
'SshPublicKey': (str, False),
'SshUsername': (str, False),
}
class Volume(AWSObject):
resource_type = "AWS::OpsWorks::Volume"
props = {
'Ec2VolumeId': (str, True),
'MountPoint': (str, False),
'Name': (str, False),
'StackId': (str, True),
}
class EngineAttribute(AWSProperty):
props = {
'Name': (str, False),
'Value': (str, False),
}
class Server(AWSObject):
resource_type = "AWS::OpsWorksCM::Server"
props = {
'AssociatePublicIpAddress': (boolean, False),
'BackupId': (str, False),
'BackupRetentionCount': (integer, False),
'CustomCertificate': (str, False),
'CustomDomain': (str, False),
'CustomPrivateKey': (str, False),
'DisableAutomatedBackup': (boolean, False),
'Engine': (str, False),
'EngineAttributes': ([EngineAttribute], False),
'EngineModel': (str, False),
'EngineVersion': (str, False),
'InstanceProfileArn': (str, True),
'InstanceType': (str, True),
'KeyPair': (str, False),
'PreferredBackupWindow': (str, False),
'PreferredMaintenanceWindow': (str, False),
'SecurityGroupIds': ([str], False),
'ServerName': (str, False),
'ServiceRoleArn': (str, True),
'SubnetIds': ([str], False),
'Tags': ((Tags, list), False),
}
|
nilq/baby-python
|
python
|
# posting to: http://localhost:3000/api/articles/update/:articleid with title, content
# changes title, content
#
# id1: (darwinbot1 P@ssw0rd!! 57d748bc67d0eaf026dff431) <-- this will change with differing mongo instances
import time # for testing, this is not good
import requests # if not installed already, run python -m pip install requests OR pip install requests, whatever you normally do
r = requests.post('http://localhost:80/api/games/search', data={'devkey': "581cef76756322705301183e", 'username': 'darwinbot1'}) # search for new game
json = r.json() # when request comes back, that means you've found a match! (validation if server goes down?)
print(json)
gameID = json['gameID']
playerID = json['playerID']
print(gameID)
print(playerID)
input = ' '
while input != '':
input = raw_input('input move: ')
r = requests.post('http://localhost:80/api/games/submit/' + gameID, data={'playerID': playerID, 'move': input, 'devkey': "581cef76756322705301183e"}); # submit sample move
json = r.json()
print(json)
|
nilq/baby-python
|
python
|
import re
from src.util import poe_consts
from src.util.logging import log
from src.util.pob import pob_conf
class Gem:
__slots__ = 'name', 'level', 'quality', 'id', 'skill_part', 'enabled', 'second_name', 'active_part', 'is_active'
def __init__(self, id, name, level, quality, skill_part, enabled=''):
self.name = self.translate_name(id) if name == "" else name
self.level = int(level)
self.quality = int(quality)
self.id = id
self.skill_part = int(skill_part) if skill_part else None
self.enabled = True if enabled == 'true' else False
self.second_name = name.split("Vaal ", 1)
if len(self.second_name) > 1:
self.second_name = self.second_name[1]
else:
self.second_name = None
self.active_part = 0
self.is_active = self.determine_active(self.id)
def __repr__(self) -> str:
return "Gem [name={}]".format(self.get_name())
def determine_active(self, id):
return False if not id else "Support".lower() not in id.lower()
def get_name(self):
return self.name if self.active_part == 0 else self.second_name
def set_active_part(self, part_id):
self.active_part = part_id
def translate_name(self, id):
if id == 'UniqueAnimateWeapon':
id = 'Manifest Dancing Dervish'
if id == 'ChaosDegenAuraUnique':
id = "Death Aura"
if id == 'IcestormUniqueStaff12':
id = "Ice Storm"
if id == 'TriggeredMoltenStrike':
id = "Molten Burst"
return id
class Skill:
def __init__(self, gems, main_active_skill, slot=None, enabled=False):
self.slot = slot
self.gems = gems
self.enabled = True if enabled == 'true' else False
try:
self.main_active_skill = int(main_active_skill)
except:
self.main_active_skill = None
self.links = len(gems)
def __repr__(self) -> str:
return "Skill [slot={}; gems={}; links={}; selected={}; enabled={}]".format(self.slot, self.gems, self.links,
self.main_active_skill,
self.enabled)
def get_active_gems(self):
return [gem for gem in self.gems if gem.is_active]
def get_selected(self):
"""
Gets the selected main skill gem. first filter the this gem to only allow supports, then get the right gem
via the main_active_skill.
With new Vaal skills: Players can select the non vaal version in index+1 which is not saved in the xml.
:return:
"""
gem = None
if self.main_active_skill:
active_gems = [gem for gem in self.gems if gem.id and "support" not in gem.id.lower()]
full_list = []
# easier abstraction than calculating the stuff
for gem in active_gems:
if 'vaal' in gem.name.lower():
full_list.append(gem)
full_list.append(gem)
if len(full_list) > 1:
gem = full_list[self.main_active_skill - 1]
# if the previous gem has the same name, toggle it to be the non val version.
gem.set_active_part(1 if gem == full_list[self.main_active_skill - 2] else 0)
return gem
def get_links(self, item=None, join_str=" + "):
# Join the gem names, if they are in the selected skill group and if they are enable d. Show quality and level
# if level is >20 or quality is set.
ret = join_str.join(
[gem.name + " ({}/{})".format(gem.level, gem.quality)
if (gem.level > 20 or gem.quality > 0)
else gem.name for gem in self.gems if gem.name and
gem.enabled == True and gem.name != '' and 'jewel' not in gem.name.lower()]
)
if item:
supports = item.added_supports
if supports and isinstance(supports, list):
ret += "\n(+ " + join_str.join([gem['name'] + " (" + gem['level'] + ")" for gem in supports])
ret += " from: *{}*)".format(item.name)
return ret
class ItemSlot:
def __init__(self, name, item_id, item, active=False):
self.name = name
self.item_id = item_id
self.item = item
self.active = bool(active)
def __repr__(self) -> str:
return "ItemSlot [name={}; item_id={}; item={}; active={}]".format(self.name, self.item_id, self.item,
self.active)
class Item:
def __init__(self, id, raw_content, variant=None):
self.id = id
self.raw_content = raw_content.strip()
self.variant = variant
self.name = self.parse_item_name()
self.added_supports = self.parse_item_for_support()
def __repr__(self) -> str:
return "Item [id={}; name={}; Supports={}]".format(self.id, self.name, self.added_supports)
def parse_item_name(self):
# see here for regex: https://regex101.com/r/MivGPM/1
regex = r"\s*Rarity:.*\n\s*(.*)\n"
matches = re.findall(regex, self.raw_content, re.IGNORECASE)
name = "UNDEFINED"
try:
name = matches[0]
except IndexError as err:
log.warning("Name could not be retrieved. Trying string split method Err={}".format(err))
name = self.raw_content.split('\n')[0]
return name
def parse_item_for_support(self):
# Socketed Gems are Supported by level 20 Elemental Proliferation
add_supports = []
# see here for regex: https://regex101.com/r/CcxRuz/1
pattern = r"({variant:([0-9,]*)}|)Socketed Gems are Supported by level ([0-9]*) ([a-zA-Z ]*)"
try:
supports = re.findall(pattern, self.raw_content, re.IGNORECASE)
for support in supports:
# if either no variant exists, or our variant matches the current supports variant
if 'variant' not in support[0] or self.variant in support[0]:
add_supports.append({"name": support[3], "level": support[2]})
except AttributeError as err:
return
return add_supports
class Build:
def __init__(self, level, version, bandit, class_name, ascendency_name, tree, skills, activeSkill, item_slots):
self.level = int(level)
self.version = version
self.bandit = bandit
self.class_name = class_name
self.ascendency_name = ascendency_name
self.stats = {}
self.config = {}
self.tree = tree
self.skills = skills
self.active_skill_id = int(activeSkill) if activeSkill else None
self.item_slots = item_slots
self.aura_count, self.curse_count = self.count_curses_auras()
def count_curses_auras(self):
"""
Iterates through all skills and gems and counts socketed auras and curses
:return: auracount, curse count as named tuple
"""
aura_count = 0
curse_count = 0
for skill in self.skills:
if skill.enabled:
for gem in skill.gems:
if gem.enabled:
if gem.get_name() in poe_consts.curse_list:
curse_count += 1
if gem.get_name() in poe_consts.aura_list:
aura_count += 1
return aura_count, curse_count
def append_stat(self, key, val, stat_owner):
# remove "Stat" from the string
stat_owner = stat_owner[:-4]
if not stat_owner in self.stats:
self.stats[stat_owner] = {}
self.stats[stat_owner][key] = float(val)
# print("owner_key={}; key={}, val={}".format(stat_owner, key, val))
def append_conf(self, key, val):
conf_entry = pob_conf.fetch_entry(key)
# ignore unknown settings.
if conf_entry:
self.config[key] = {'value': val}
self.config[key].update(conf_entry)
def __repr__(self) -> str:
return "{}".format(self.__dict__)
def get_item(self, slot):
item_slot = self.item_slots.get(slot)
if item_slot:
return item_slot.item
def get_stat(self, owner, key, threshold=0):
if owner in self.stats and key in self.stats[owner]:
val = self.stats[owner][key]
return val if val >= threshold else None
else:
return None
def to_string(self):
ret = ""
for item in self.__dict__:
val = self.__dict__[item]
if isinstance(val, list):
pass
else:
ret += item + ": " + val + "\n"
return ret
def get_active_skill(self):
if len(self.skills) < 1 or self.active_skill_id == None or self.active_skill_id < 1:
return None
return self.skills[self.active_skill_id - 1]
|
nilq/baby-python
|
python
|
#! /usr/bin/python
#Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import m3.gui as m3g
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.actuator_ec_pb2 as mec
import m3.component_factory as m3f
import math
class M3Proc:
def __init__(self):
self.proxy = m3p.M3RtProxy()
self.gui = m3g.M3Gui(stride_ms=125)
self.cnt=0
self.bias=[]
def stop(self):
self.proxy.stop()
def start(self):
self.proxy.start()
cnames=self.proxy.get_available_components('m3actuator_ec')
self.names=m3t.user_select_components_interactive(cnames)
if len(self.names)==0:
return
self.actuator_ec=[]
for name in self.names:
self.actuator_ec.append(m3f.create_component(name))
self.proxy.subscribe_status(self.actuator_ec[-1])
self.proxy.publish_command(self.actuator_ec[-1])
self.proxy.publish_param(self.actuator_ec[-1])
self.proxy.make_operational(name)
#pwr_ec=self.proxy.get_available_components('m3pwr_ec')
#pwr_rt=self.proxy.get_available_components('m3pwr')
#print 'A',pwr_rt[0],pwr_ec[0]
#if len(pwr_rt):
#pr=m3f.create_component(pwr_rt[0])
#self.proxy.publish_command(pr)
#self.proxy.make_operational(pwr_rt[0])
#self.proxy.make_operational(pwr_ec[0])
#pr.set_motor_power_on()
pwr_rt=m3t.get_actuator_ec_pwr_component_name(self.names[0])
pwr_ec=pwr_rt.replace('m3pwr','m3pwr_ec')
pr=m3f.create_component(pwr_rt)
self.proxy.publish_command(pr)
self.proxy.make_operational(pwr_rt)
self.proxy.make_operational(pwr_ec)
pr.set_motor_power_on()
tmax=[x.param.t_max for x in self.actuator_ec]
tmin=[x.param.t_min for x in self.actuator_ec]
self.proxy.step()
for c in self.actuator_ec:
self.bias.append(c.status.adc_torque)
tl=min(tmin)-self.bias[0]
tu=max(tmax)-self.bias[0]
self.cycle_pwm=False
self.cycle_last_pwm=False
self.cycle_tq=False
self.cycle_last_tq=False
self.step_period=[2000.0]*len(self.actuator_ec)
self.brake=[0]
#Create gui
self.mode=[0]*len(self.actuator_ec)
self.t_desire_a=[0]*len(self.actuator_ec)
self.t_desire_b=[0]*len(self.actuator_ec)
self.pwm_desire_a=[0]*len(self.actuator_ec)
self.pwm_desire_b=[0]*len(self.actuator_ec)
self.current_desire_a=[0]*len(self.actuator_ec)
self.current_desire_b=[0]*len(self.actuator_ec)
self.save=False
self.save_last=False
self.do_scope_torque=False
self.scope_torque=None
self.status_dict=self.proxy.get_status_dict()
self.param_dict=self.proxy.get_param_dict()
self.gui.add('M3GuiTree', 'Status', (self,'status_dict'),[],[],m3g.M3GuiRead,column=2)
self.gui.add('M3GuiTree', 'Param', (self,'param_dict'),[],[],m3g.M3GuiWrite,column=3)
self.gui.add('M3GuiModes', 'Mode', (self,'mode'),range(len(self.actuator_ec)),[['Off','Pwm','PID','CURRENT'],1],m3g.M3GuiWrite)
self.gui.add('M3GuiModes', 'Brake', (self,'brake'),range(1),[['Enabled','Disabled'],1],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','tqDesire', (self,'t_desire_a'),range(len(self.actuator_ec)),[tl,tu],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','tqDesire', (self,'t_desire_b'),range(len(self.actuator_ec)),[tl,tu],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','pwmDesireA', (self,'pwm_desire_a'),range(len(self.actuator_ec)),[-3200,3200],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','pwmDesireB', (self,'pwm_desire_b'),range(len(self.actuator_ec)),[-3200,3200],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','currentDesireA', (self,'current_desire_a'),range(len(self.actuator_ec)),[-100,100],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','currentDesireB', (self,'current_desire_b'),range(len(self.actuator_ec)),[-3200,3200],m3g.M3GuiWrite)
self.gui.add('M3GuiSliders','StepPeriod (ms) ', (self,'step_period'),range(len(self.actuator_ec)),[0,4000],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'CyclePwm', (self,'cycle_pwm'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'CycleTq', (self,'cycle_tq'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'Save', (self,'save'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.add('M3GuiToggle', 'Scope', (self,'do_scope_torque'),[],[['On','Off']],m3g.M3GuiWrite)
self.gui.start(self.step)
def get_theta_raw_deg(self,c): #12bit MA3
try:
e= int((c.status.qei_on*4097)/c.status.qei_period)-1
except ZeroDivisionError:
e= 0
scale=0.087890625
e=e*scale
return e
def step(self):
self.proxy.step()
if self.do_scope_torque and self.scope_torque is None and len(self.actuator_ec)==1:
self.scope_torque=m3t.M3Scope2(xwidth=100,yrange=None)
if False and self.cnt%5==0:
for n in self.names:
self.proxy.pretty_print_component(n)
if False and self.cnt%5==0:
print '---------------'
for c in self.actuator_ec:
print 'Timestamp',c.name,m3t.timestamp_string(c.status.timestamp)
self.cnt=self.cnt+1
self.status_dict=self.proxy.get_status_dict()
self.proxy.set_param_from_dict(self.param_dict)
idx=0
for c in self.actuator_ec:
if not self.cycle_last_pwm and self.cycle_pwm:
self.step_start=time.time()
if not self.cycle_last_tq and self.cycle_tq:
self.step_start=time.time()
self.cycle_last_pwm=self.cycle_pwm
self.cycle_last_tq=self.cycle_tq
pwm=self.pwm_desire_a[idx]
tq=self.t_desire_a[idx]
current=self.current_desire_a[idx]
if self.cycle_pwm:
dt=time.time()-self.step_start
if math.fmod(dt,self.step_period[idx]/1000.0)>self.step_period[idx]/2000.0:
pwm=self.pwm_desire_b[idx]
if self.cycle_tq:
dt=time.time()-self.step_start
if math.fmod(dt,self.step_period[idx]/1000.0)>self.step_period[idx]/2000.0:
tq=self.t_desire_b[idx]
c.command.mode=int(self.mode[idx])
if self.mode[idx]==mec.ACTUATOR_EC_MODE_PWM:
c.command.t_desire=int(pwm)
if self.mode[idx]==mec.ACTUATOR_EC_MODE_TORQUE:
c.command.t_desire=int(tq+self.bias[idx]) #Bias slider around 'zero'
print 'Desired',c.name,c.command.t_desire
if self.mode[idx]==mec.ACTUATOR_EC_MODE_CURRENT:
c.command.t_desire=int(current)
print 'Desired',c.name,c.command.t_desire
if self.do_scope_torque and self.scope_torque is not None:
if self.mode[idx]==mec.ACTUATOR_EC_MODE_TORQUE:
self.scope_torque.plot(c.status.adc_torque,c.command.t_desire)
else:
self.scope_torque.plot(c.status.adc_torque,c.status.adc_torque)
idx=idx+1
if (self.save and not self.save_last):
c.write_config()
c.command.brake_off=int(self.brake[0])
print 't_desire:', c.command.t_desire
self.save_last=self.save
if __name__ == '__main__':
t=M3Proc()
try:
t.start()
except (KeyboardInterrupt,EOFError):
pass
t.stop()
|
nilq/baby-python
|
python
|
import unittest
from unittest.mock import Mock
from data_repo_client import RepositoryApi
from dagster_utils.contrib.data_repo.jobs import poll_job, JobFailureException, JobTimeoutException
from dagster_utils.contrib.data_repo.typing import JobId
class PollJobTestCase(unittest.TestCase):
def setUp(self):
self.data_repo_client = Mock(spec=RepositoryApi)
def test_returns_success_on_job_complete(self):
result = poll_job(
JobId("fake_job_id"),
2,
1,
self.data_repo_client
)
self.assertEqual(result, "fake_job_id")
def test_raises_on_poll_timeout(self):
job_status_result = Mock()
job_status_result.completed = False
self.data_repo_client.retrieve_job = Mock(return_value=job_status_result)
with self.assertRaises(JobTimeoutException):
result = poll_job(
JobId("fake_job_id"),
2,
1,
self.data_repo_client
)
self.assertEqual(result, "fake_job_id")
def test_raises_on_job_failure(self):
job_status_result = Mock()
job_status_result.completed = True
job_status_result.job_status = 'failed'
self.data_repo_client.retrieve_job = Mock(return_value=job_status_result)
with self.assertRaises(JobFailureException):
result = poll_job(
JobId("fake_job_id"),
2,
1,
self.data_repo_client
)
self.assertEqual(result, "fake_job_id")
|
nilq/baby-python
|
python
|
from gobbli.dataset.cmu_movie_summary import MovieSummaryDataset
from gobbli.dataset.imdb import IMDBDataset
from gobbli.dataset.newsgroups import NewsgroupsDataset
from gobbli.dataset.trivial import TrivialDataset
__all__ = ["TrivialDataset", "NewsgroupsDataset", "IMDBDataset", "MovieSummaryDataset"]
|
nilq/baby-python
|
python
|
from argparse import ArgumentParser
from datetime import datetime
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
ap = ArgumentParser()
ap.add_argument('-ns' , '--n_resamp' , required=False, type=int , default=0 , help="Number of resamples to perform (GBR=1; No Resamp=0).")
ap.add_argument('-nt' , '--n_trees' , required=False, type=int , default=100 , help="Number of trees in the forest.")
ap.add_argument('-c' , '--core' , required=False, type=int , default=0 , help="Which Core to Use GBR only Uses 1 Core at a time.")
ap.add_argument('-pp' , '--pre_process' , required=False, type=bool, default=True , help="Flag whether to use StandardScaler to pre-process the data.")
ap.add_argument('-std', '--do_std' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression.")
ap.add_argument('-pca', '--do_pca' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with PCA preprocessing.")# nargs='?', const=True,
ap.add_argument('-ica', '--do_ica' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with ICA preprocessing.")
ap.add_argument('-rfi', '--do_rfi' , required=False, type=bool, default=False, help="Use Standard Random Forest Regression with PCA preprocessing.")
ap.add_argument('-gbr', '--do_gbr' , required=False, type=bool, default=False, help="Use Gradient Boosting Regression with PCA preprocessing.")
ap.add_argument('-rs' , '--random_state', required=False, type=int , default=42 , help="Seed for random state with which to reinitialize a specific instance.")
ap.add_argument('-pdb', '--pdb_stop' , required=False, type=bool, default=False, help="Stop the trace at the end with pdb.set_trace().")
ap.add_argument('-nj' , '--n_jobs' , required=False, type=int , default=-1 , help="Number of cores to use Default:-1.")
ap.add_argument('-df' , '--data_file' , required=False, type=str , default='' , help="The csv file with the Spitzer Calibration Information.")
ap.add_argument('-v' , '--verbose' , required=False, type=str2bool, nargs='?', default=False, help="Whether to print out lots of things or just a few things")
try:
args = vars(ap.parse_args())
n_resamp= args['n_resamp']
n_trees = args['n_trees']
do_std = args['do_std']
do_pca = args['do_pca']
do_ica = args['do_ica']
do_rfi = args['do_rfi']
do_gbr = args['do_gbr']
do_pp = args['pre_process']
pdb_stop= args['pdb_stop']
n_jobs = args['n_jobs']
sp_fname= args['data_file']
verbose = args['verbose']
except Exception as e:
# This section is for if/when I copy/paste the code into a ipython sesssion
print('Error: {}'.format(e))
n_resamp = 0
n_trees = 100
core = 'A' # unknown
do_std = False
do_pca = False
do_ica = False
do_rfi = False
do_gbr = False
do_pp = False
rand_state = 42
pdb_stop = False
n_jobs = -1
sp_fname = ''
verbose = True
import pandas as pd
import numpy as np
import pdb
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
import xgboost as xgb
from tqdm import tqdm
from glob import glob
from time import time
start0 = time()
def setup_features_full(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
if label in inputData.keys():
labels = inputData[label]
# explicitly remove the label
inputData.drop(label, axis=1, inplace=True)
else:
labels = np.ones(len(inputData))
feature_columns = inputData.drop(notFeatures,axis=1).columns
features = inputData[feature_columns]# inputData.drop(notFeatures,axis=1)
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection
def setup_features_basic(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
inputData = dataRaw.copy()
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
input_labels = sorted(input_labels)
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
if label in inputData.keys():
labels = pd.DataFrame(inputData[label], columns=[label])
# explicitly remove the label
inputData.drop(label, axis=1, inplace=True)
else:
labels = np.ones(len(inputData))
features = inputData.drop(notFeatures,axis=1)
return features, labels
def random_forest_wrapper(features, labels, n_trees, n_jobs, grad_boost=False, header='PCA',
core_num=0, samp_num=0, loss='quantile', learning_rate=0.1,
max_depth=3, subsample=1.0, full_output=False, verbose=False):
print('Performing {} Random Forest'.format(header))
features_ = features.copy()
labels_ = labels.copy()
if grad_boost:
rgr = xgb.XGBRegressor( max_depth = max_depth,
learning_rate = learning_rate,
n_estimators = n_trees,
silent = not verbose,
n_jobs = n_jobs)
# objective='reg:linear', booster='gbtree',
# gamma=0, min_child_weight=1, max_delta_step=0, subsample=1,
# colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1,
# scale_pos_weight=1, base_score=0.5, random_state=0, seed=None,
# missing=None
features, testX, labels, testY = train_test_split(features_, labels_, test_size=0.25)
else:
rgr = RandomForestRegressor( n_estimators = n_trees ,
n_jobs = n_jobs ,
oob_score = True ,
warm_start = True ,
verbose = verbose )
if verbose: print('Feature Shape: {}\nLabel Shape: {}'.format(features.shape, labels.shape))
if verbose: start=time()
rgr.fit(features, labels)
rgr_oob = r2_score(testY, rgr.predict(testX)) if grad_boost else rgr.oob_score_
rgr_Rsq = r2_score(labels_, rgr.predict(features_))
test_label = {True:'Test R^2', False:'OOB'}
if verbose: print('{} Pretrained Random Forest:\n\t{} Score: \
{:.3f}%\n\tTrain R^2 score: {:.3f}%\
\n\tRuntime: {:.3f} seconds'.format(header, test_label[grad_boost],
rgr_oob*100, rgr_Rsq*100, time()-start))
output_savename = 'randForest_{}_approach_{}trees_{}resamp_{}core.save'.format(header, n_trees, samp_num, core_num)
print('Storing New File to {}'.format(output_savename))
joblib.dump(rgr, output_savename)
if full_output: return rgr
if n_jobs == 1: print('WARNING: You are only using 1 core!')
# Check if requested to complete more than one operatiion
# if so delete old instances
files_in_directory = glob('./*')
# ## Load CSVs data
flux_normalized = ['fluxerr', 'bg_flux', 'sigma_bg_flux', 'flux']
spitzerCalNotFeatures = ['flux', 'fluxerr', 'bmjd', 'dn_peak', 'xycov', 't_cernox', 'xerr', 'yerr', 'sigma_bg_flux']
spitzerCalFilename = 'pmap_ch2_0p1s_x4_rmulti_s3_7.csv' if sp_fname == '' else sp_fname
spitzerCalKeepFeatures = ['xpos', 'ypos', 'np', 'xfwhm', 'yfwhm', 'bg_flux', #'bmjd',
'pix1', 'pix2', 'pix3', 'pix4', 'pix5', 'pix6', 'pix7', 'pix8', 'pix9']
spitzerCalRawData = pd.read_csv(spitzerCalFilename)
for key in flux_normalized:
spitzerCalRawData[key] = spitzerCalRawData[key] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['fluxerr'] = spitzerCalRawData['fluxerr'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bg_flux'] = spitzerCalRawData['bg_flux'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['sigma_bg_flux'] = spitzerCalRawData['sigma_bg_flux'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['flux'] = spitzerCalRawData['flux'] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bmjd_err'] = np.median(0.5*np.diff(spitzerCalRawData['bmjd']))
spitzerCalRawData['np_err'] = np.sqrt(spitzerCalRawData['yerr'])
for colname in spitzerCalRawData.columns:
if 'err' not in colname.lower() and ('pix' in colname.lower() or 'pld' in colname.lower()):
spitzerCalRawData[colname+'_err'] = spitzerCalRawData[colname] * spitzerCalRawData['fluxerr']
spitzer_cal_features, spitzer_cal_labels = setup_features_basic(spitzerCalRawData[['flux']+spitzerCalKeepFeatures])
idx_train, idx_test = train_test_split(np.arange(spitzer_cal_labels.size), test_size=0.75, random_state=42)
do_xgb = True
if do_xgb:
import xgboost as xgb
xgb_rgr = xgb.XGBRegressor(max_depth=5, learning_rate=0.05, n_estimators=10000, silent=True,
objective='reg:linear', booster='gbtree', n_jobs=-1,random_state=42)
start = time()
xgb_rgr.fit(spitzer_cal_features.iloc[idx_train], spitzer_cal_labels.iloc[idx_train])
print('XGB took {:.3f} minutes'.format((time()-start)/60))
do_lgb = False
if do_lgb:
import lightgbm as lgb
lgb_rgr = lgb.LGBMRegressor(boosting_type='gbdt', num_leaves=31, max_depth=5, learning_rate=0.1, n_estimators=10000)
start = time()
lgb_rgr.fit(spitzer_cal_features.iloc[idx_train], spitzer_cal_labels.iloc[idx_train], eval_set=(spitzer_cal_features.iloc[idx_test], spitzer_cal_labels.iloc[idx_test]))
print('LGB took {:.3f} minutes'.format((time()-start)/60))
print("Transforming Data ", end=" ")
operations = []
header = 'GBR' if do_gbr else 'RFI' if do_rfi else 'STD'
if do_pp:
print('Adding Standard Scaler Preprocessing to Pipeline')
operations.append(('std_sclr', StandardScaler()))
header += '_SS'
if do_pca:
print('Adding PCA to Pipeline')
operations.append(('pca', PCA(whiten=True)))
header += '_PCA'
if do_ica:
print('Adding ICA to Pipeline')
operations.append(('ica', FastICA(whiten=True)))
header += '_ICA'
pipe = Pipeline(operations) if len(operations) else None
if do_rfi:
importance_filename = 'randForest_STD_feature_importances.txt'
if len(glob(importance_filename)) == 0:
raise Exception("MUST Run 'STD' operation before 'RFI', to generate file: {}".format(importance_filename))
print('Computing Importances for RFI Random Forest')
importances = np.loadtxt(importance_filename)
indices = np.argsort(importances)[::-1]
imp_sum = np.cumsum(importances[indices])
nImportantSamples = np.argmax(imp_sum >= 0.95) + 1
print('took {} seconds'.format(time() - start))
if 'core' in args.keys():
core = args['core']
elif do_gbr:
from glob import glob
output_name = 'randForest_{}_approach_{}trees_{}resamp_{}core.save'.format(header, n_trees, samp_num, core_num)
existing_saves = glob('randForest_{}_approach_{}trees_{}resamp_*core.save'.format(header, n_trees, n_resamp))
core_nums = []
for fname in existing_saves:
core_nums.append(fname.split('randForest_{}_approach_{}trees_{}resamp_'.format(header, n_trees, n_resamp))[-1].split('core.save')[0])
core = max(core_nums) + 1
else:
core = 'A'
if n_resamp == 0:
print('No Resampling')
features, labels, pipe_fitted = setup_features( dataRaw = spitzerCalRawData,
pipeline = pipe,
verbose = verbose,
notFeatures = spitzerCalNotFeatures,
resample = False,
returnAll = True)
features = features.T[indices][:nImportantSamples].T if do_rfi else features
random_forest_wrapper(features, labels, n_trees, n_jobs, grad_boost=do_gbr, header=header, core_num=core, samp_num='no_', verbose=verbose)
pipeline_save_name = 'spitzerCalFeature_pipeline_trnsfrmr_no_resamp_{}core.save'.format(core)
print('Saving NO RESAMP Pipeline as {}'.format(pipeline_save_name))
# Save the stack if the stack does not exist and the pipeline is not None
save_calibration_stacks = pipeline_save_name not in files_in_directory and pipe_fitted is not None
# Need to Transform the Scaled Features based off of the calibration distribution
if save_calibration_stacks: joblib.dump(pipe_fitted, pipeline_save_name)
for k_samp in tqdm(range(n_resamp),total=n_resamp):
if k_samp == 0: print('Starting Resampling')
spitzer_cal_features, spitzer_cal_labels, pipe_fitted = setup_features( dataRaw = spitzerCalRawData,
pipeline = pipe ,
verbose = verbose,
resample = True ,
returnAll = True )
features = features.T[indices][:nImportantSamples].T if do_rfi else features
random_forest_wrapper(features, labels, n_trees, n_jobs, grad_boost=do_gbr, header=header, core_num=core, samp_num=k_samp, verbose=verbose)
pipeline_save_name = 'spitzerCalFeature_pipeline_trnsfrmr_{}resamp_{}core.save'.format(k_samp, core)
print('Saving SAMP {} Pipeline as {} on Core {}'.format(k_samp, pipeline_save_name, core))
# Save the stack if the stack does not exist and the pipeline is not None
save_calibration_stacks = pipeline_save_name not in files_in_directory and pipe_fitted is not None
# Need to Transform the Scaled Features based off of the calibration distribution
if save_calibration_stacks: joblib.dump(pipe_fitted, pipeline_save_name)
print('\n\nFull Operation took {:.2f} minutes'.format((time() - start0)/60))
if pdb_stop: pdb.set_trace()
'''
def predict_with_scaled_transformer(dataRaw, notFeatures=None, transformer=None, feature_scaler=None, label_scaler=None, verbose=False):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
transformer (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
dataRaw = pd.read_csv(filename) if isinstance(dataRaw,str) else dataRaw
PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
# PLDpixels = {}
# for key in dataRaw.columns.values:
# if 'pix' in key:
# PLDpixels[key] = dataRaw[key]
# PLDpixels = pd.DataFrame(PLDpixels)
PLDnorm = np.sum(np.array(PLDpixels),axis=1)
PLDpixels = (PLDpixels.T / PLDnorm).T
inputData = dataRaw.copy()
for key in dataRaw.columns:
if key in PLDpixels.columns:
inputData[key] = PLDpixels[key]
if verbose:
testPLD = np.array(pd.DataFrame({key:inputData[key] for key in inputData.columns.values if 'pix' in key}))
assert(not sum(abs(testPLD - np.array(PLDpixels))).all())
print('Confirmed that PLD Pixels have been Normalized to Spec')
feature_columns = inputData.drop(notFeatures,axis=1).columns.values
features = inputData.drop(notFeatures,axis=1).values
labels = inputData['flux'].values
# **PCA Preconditioned Random Forest Approach**
if verbose: print('Performincg PCA')
labels_scaled = label_scaler.transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_scaled = feature_scaler.transform(features) if feature_scaler is not None else features
features_trnsfrmd = transformer.transform(features_scaled) if transformer is not None else features_scaled
return features_trnsfrmd, labels_scaled
'''
if pmap_xo3b:
from glob import glob
xo3b_files = glob('XO3_Data/XO3_r464*.csv')
for fname in xo3b_files:
if 'NALU' in fname:
xo3b_files.remove(fname)
for fname in xo3b_files:
if 'NALU' in fname:
xo3b_files.remove(fname)
for fname in tqdm(xo3b_files, total=len(xo3b_files)):
key = fname.split('_')[-1].split('.')[0]
med_flux = np.median(xo3b_data[key]['raw']['flux'].values)
xo3b_data[key] = {'raw':pd.read_csv(fname)}
features, labels = setup_features_basic( dataRaw=val['raw'][['flux']+spitzerCalKeepFeatures])
xo3b_data[key]['raw']['fluxerr'] = xo3b_data[key]['raw']['fluxerr'] / med_flux
xo3b_data[key]['raw']['bg_flux'] = xo3b_data[key]['raw']['bg_flux'] / med_flux
xo3b_data[key]['raw']['flux'] = xo3b_data[key]['raw']['flux'] / med_flux
xo3b_data[key]['features'] = features
xo3b_data[key]['labels'] = labels
xo3b_data[key]['pmap'] = xgb_rgr.predict(features)
"""
6-11-2016 - 6-11-2017
right eye: "right OD"; "left OS"
sphere: -2.50
cylinder: -1.75
axis = 5
left eye (OS)
sphere: -1.75
cylinder: -0.75
axis = 180
dpd: left eye = 31.5
right eye = 31.5
together = 63
"""
|
nilq/baby-python
|
python
|
"""changed covid to remove source
Revision ID: faaf679b71ce
Revises: d57323c5f17d
Create Date: 2020-03-23 14:18:04.931393
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'faaf679b71ce'
down_revision = 'd57323c5f17d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('covid', 'locala')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('covid', sa.Column('locala', sa.VARCHAR(length=120), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
from abc import ABC
import scrapy
import re
import json
from copyheaders import headers_raw_to_dict
from ..items import HistoricNetValueItem
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
'''
HistoricNetSpider arguments:
mode: 0/1, 0 means crawl all, 1 means crawl specific
fetchmagic:36500
fundcode: fund code
command example: scrapy crawl netvalue -a mode=1 -a fetchmagic=36500 -a fundcode=000001
'''
class HistoricNetSpider(scrapy.Spider, ABC):
name = 'netvalue'
custom_settings = {
'ITEM_PIPELINES': {
'fund.pipelines.HistoricNetWriterPipeline': 400
}
}
header = b'''
Accept: */*
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.9
Connection: keep-alive
Host: fund.eastmoney.com
Referer: http://fund.eastmoney.com/data/fundranking.html
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36
'''
def __init__(self, mode=None, fetchmagic=None, fundcode=None, *args, **kwargs):
"""inhere command line arguments, mode & fundCode"""
super(HistoricNetSpider, self).__init__(*args, **kwargs)
self.mode = mode
self.fetchmagic = fetchmagic
self.fundcode = fundcode
def start_requests(self):
print("mode:"+self.mode)
print("fetchmagic"+self.fetchmagic)
"""crawl all"""
if int(self.mode) == 0:
"""request for fund code"""
yield scrapy.Request(
"http://fund.eastmoney.com/allfund.html",
callback=self.parse_fund_code)
elif int(self.mode) == 1:
total_count = self.fetchmagic
fund_code = self.fundcode
yield scrapy.Request(
"http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery183036648984792081185_1575425405289&"
"fundCode={fc}"
"&pageIndex=1&pageSize={tc}".format(fc=fund_code, tc=total_count),
headers=headers_raw_to_dict(self.header),
callback=self.parse_fund_earning_perday,
cb_kwargs=dict(fund_code=fund_code),
errback=self.errback_logger)
else:
print("error mode")
def parse_fund_code(self, response):
print("begin all")
cols = response.xpath('//div[@class=\'data-list m_b\']//div[@id=\'code_content\']//div[@class=\'num_box\']')
for col in cols:
funds_link = col.xpath('.//ul[@class=\'num_right\']/li/div/a[1]/@href').getall()
for fund_link in funds_link:
'''request for total records number'''
fund_code = re.findall('[0-9]+', fund_link)[0]
total_count = self.fetchmagic
yield scrapy.Request(
"http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery183036648984792081185_1575425405289&"
"fundCode={fc}"
"&pageIndex=1&pageSize={tc}".format(fc=fund_code, tc=total_count),
headers=headers_raw_to_dict(self.header),
callback=self.parse_fund_earning_perday,
cb_kwargs=dict(fund_code=fund_code),
errback=self.errback_logger)
# def test(self, fund_code):
# yield scrapy.Request(
# "http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery183036648984792081185_1575425405289&"
# "fundCode={fc}"
# "&pageIndex=1&pageSize={tc}".format(fc=fund_code, tc=100),
# headers=headers_raw_to_dict(self.header),
# callback=self.parse_fund_earning_perday,
# cb_kwargs=dict(fund_code=fund_code),
# errback=self.errback_logger)
# def get_records_count(self, fund_code, total_count):
# yield scrapy.Request(
# "http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery183036648984792081185_1575425405289&"
# "fundCode={fc}"
# "&pageIndex=1&pageSize=20".format(fc=fund_code),
# headers=headers_raw_to_dict(self.header),
# callback=self.parse_records_count,
# cb_kwargs=dict(total_count=total_count))
#
# def parse_records_count(self, response, total_count):
# print("count")
# response = response.text
# data = re.findall(r'\((.*?)\)$', response)[0]
# data = json.loads(data)
# total_count = data.get("TotalCount")
def parse_fund_earning_perday(self, response, fund_code):
response = response.text
data = re.findall(r'\((.*?)\)$', response)[0]
data = json.loads(data)
for i in data.get("Data").get("LSJZList"):
net_value = HistoricNetValueItem()
net_value['FundCode'] = fund_code
net_value['date'] = i.get("FSRQ")
net_value['NAV'] = i.get("DWJZ")
net_value['accumulative_value'] = i.get("LJJZ")
# net_value['rate_day'] = i.get("JZZZL")
# net_value['buy_status'] = i.get("SGZT")
# net_value['sell_status'] = i.get("SHZT")
# net_value['profit'] = i.get("FHSP")
yield net_value
def errback_logger(self, failure):
self.logger.error(repr(failure))
if failure.check(HttpError):
response = failure.value.response
self.logger.error('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
request = failure.request
self.logger.error('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
|
nilq/baby-python
|
python
|
from Optimizador.C3D import *
t = -1
e = -1
def getEncabezado():
content = "from goto import with_goto\n"
content += "from Instrucciones.TablaSimbolos.Tabla import Tabla\n"
content += "from Instrucciones.Sql_insert import insertTable\n"
content += "from Instrucciones.Sql_drop import DropTable,DropDatabase\n"
content += "from Instrucciones.Sql_alter import AlterDatabase,AlterDBOwner,AlterTableAddColumn,AlterTableAddConstraintFK,AlterTableAddFK\n"
content += "from Instrucciones.TablaSimbolos.Arbol import Arbol\n"
content += "from storageManager.jsonMode import *\n"
content += "import sintactico\n\n"
content += "tablaGlobal = Tabla(None)\n"
content += "arbol = Arbol()\n\n"
content += "def call_funcion_intermedia():\n"
content += " dropAll()\n"
content += " input = \"\"\n"
content += " for i in stack:\n"
content += " input += stack[i] + \"\\n\"\n"
content += " print(input)\n"
content += " inst = sintactico.ejecutar_analisis(input)\n"
content += " arbol = Arbol(inst)\n"
content += " for i in arbol.instrucciones:\n"
content += " resultado = i.ejecutar(tablaGlobal,arbol)\n\n"
#Funcion para insertar elementos en tabla c3d
content += "def call_insert_table():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.bdUsar = heap[p-3]\n"
content += " tabla = insertTable.insertTable(heap[p-2], None, heap[p-1], heap[p], '', 0, 0)\n"
content += " tabla.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion drop table para c3d
content += "def call_drop_table():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 1])\n"
content += " drop = DropTable.DropTable(heap[p],None, '', 0, 0)\n"
content += " drop.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion drop database para c3d
content += "def call_drop_database():\n"
content += " arbolAux = arbol\n"
content += " drop = DropDatabase.DropDatabase(heap[p - 2],None,heap[p - 1],heap[p],'',0,0)\n"
content += " drop.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter database para c3d
content += "def call_alter_database():\n"
content += " arbolAux = arbol\n"
content += " alter = AlterDatabase.AlterDatabase(heap[p - 2], None, heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter owner database para c3d
content += "def call_alterowner_database():\n"
content += " arbolAux = arbol\n"
content += " alter = AlterDBOwner.AlterDBOwner(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table add check para c3d
content += "def call_alterTable_addCheck():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableAddCheck.AlterTableAddCheck(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table add column para c3d
content += "def call_alterTable_addColumn():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableAddColumn.AlterTableAddColumn(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table add constraint para c3d
content += "def call_alterTable_addConstraint():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 3])\n"
content += " alter = AlterTableAddConstraint.AlterTableAddConstraint(heap[p - 2], heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table add constrainst fk para c3d
content += "def call_alterTable_addConstraintFK():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 5])\n"
content += " alter = AlterTableAddConstraintFK.AlterTableAddConstraintFK(heap[p - 4], heap[p - 3], heap[p - 2], heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table add fk para c3d
content += "def call_alterTable_addFK():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 4])\n"
content += " alter = AlterTableAddFK.AlterTableAddFK(heap[p - 3], heap[p - 2], heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table alter column para c3d
content += "def call_alterTable_alterColumn():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableAlterColumn.AlterTableAlterColumn(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table alter column type para c3d
content += "def call_alterTable_columnType():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableAlterColumnType.AlterTableAlterColumnType(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table drop column para c3d
content += "def call_alterTable_dropColumn():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableDropColumn.AlterTableDropColumn(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
#Funcion alter table drop constraint para c3d
content += "def call_alterTable_dropConstraint():\n"
content += " arbolAux = arbol\n"
content += " arbolAux.setBaseDatos(heap[p - 2])\n"
content += " alter = AlterTableDropColumn.AlterTableDropColumn(heap[p - 1], heap[p], '' ,0,0)\n"
content += " alter.ejecutar(tablaGlobal, arbolAux)\n\n"
content += "stack = {}\nheap = {}\n"
content += "p = 0\nh = 0\n\n"
content += "@with_goto\n"
content += "def exec():\n"
content += " global p"
return content
def getPie():
content = "\n\nexec()\n"
content += "call_funcion_intermedia()"
return content
def getTemporal():
global t
t += 1
return 't' + str(t)
def getLastTemporal():
global t
return 't' + str(t)
def getEtiqueta():
global e
e += 1
return 'L' + str(e)
def asignacionString(temporal, valor):
return Asignacion(Identificador(temporal), Valor('"' + valor + '"', "STRING"))
def asignacionH():
return Asignacion(Identificador("h"), Identificador("p"))
def aumentarP():
return Asignacion(Identificador("p"), Operacion(Identificador("p"), Valor(1, "INTEGER"), OP_ARITMETICO.SUMA))
def operacion(temporal, op1, op2, operador):
return Asignacion(Identificador(temporal), Operacion(op1, op2, operador))
def asignacionStack(valor, tipo):
if tipo == "STRING" and valor != None:
valor = "\"" + str(valor) + "\""
return Asignacion(Arreglo(Identificador("stack"), Identificador("p")), Valor(valor, tipo))
def asignacionTemporalStack(id):
return Asignacion(Arreglo(Identificador("stack"), Identificador("p")), Identificador(id))
|
nilq/baby-python
|
python
|
from django.contrib import admin
from . import models
admin.site.register(models.Training)
admin.site.register(models.Education)
admin.site.register(models.Experience)
admin.site.register(models.Skills)
admin.site.register(models.cv)
|
nilq/baby-python
|
python
|
from builtins import license
from os.path import basename
from typing import List
from pydantic import BaseModel
from iiif_binder import Config, Metadata, Image
def generate_manifest(
identifier: str, config: Config, metadata: Metadata, images: List[Image]
) -> dict:
manifest = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@type": "sc:Manifest",
"@id": f"{config.base_url}/{identifier}/manifest",
}
if metadata.title is not None:
manifest["label"] = metadata.title
else:
manifest["label"] = identifier
if metadata.navdate is not None:
manifest["navdate"] = metadata.navdate
if metadata.license is not None:
manifest["license"] = metadata.license
if metadata.attribution is not None:
manifest["attribution"] = metadata.attribution
manifest["sequences"] = [
{
"@id": "https://api.digitale-sammlungen.de/iiif/presentation/v2/bsb00109488/sequences/normal",
"@type": "sc:Sequence",
"canvases": [
canvas(index, config, image) for (index, image) in enumerate(images)
],
"viewingHint": config.viewing_hint,
}
]
if metadata.viewing_hint is not None:
manifest["viewingHint"] = metadata.viewing_hint
elif config.viewing_hint is not None:
manifest["viewingHint"] = config.viewing_hint
else:
manifest["viewingHint"] = "individuals"
if len(images) > 0:
thumbnail = images[0]
manifest["thumbnail"] = {
"@id": f"https://api.digitale-sammlungen.de/iiif/image/v2/{thumbnail.url_path}/full/!{config.thumbnail_size},{config.thumbnail_size}/0/default.jpg",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": f"https://api.digitale-sammlungen.de/iiif/image/v2/{thumbnail.url_path}",
"profile": "http://iiif.io/api/image/2/level2.json",
"protocol": "http://iiif.io/api/image",
},
"format": thumbnail.media_type,
}
return manifest
def canvas(index: int, config: Config, image: Image):
canvas_id = f"{config.base_url}/{image.url_path}/canvas/{index}"
canvas = {
"@id": canvas_id,
"@type": "sc:Canvas",
"label": image.label,
"images": [
{
"@type": "oa:Annotation",
"motivation": "sc:painting",
"resource": {
"@id": f"{config.image_base_url}/{image.url_id}/full/full/0/default.jpg",
"@type": "dctypes:Image",
"service": {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": f"https://api.digitale-sammlungen.de/iiif/image/v2/{image.url_id}",
"profile": "http://iiif.io/api/image/2/level2.json",
"protocol": "http://iiif.io/api/image",
},
"format": image.media_type,
"width": image.width,
"height": image.height,
},
"on": canvas_id,
}
],
"width": image.width,
"height": image.height,
}
return canvas
|
nilq/baby-python
|
python
|
from PyQt5 import QtCore
from pyqtgraph import PlotCurveItem, PlotDataItem, ImageItem
from .DataItem import ExtendedDataItem
from ...logging import get_logger
logger = get_logger("PlotMenu")
class PlotMenuMixin:
def raiseContextMenu(self, ev):
"""
Raise the context menu, removing extra separators as they are added pretty recklessly
"""
menu = self.getContextMenus(ev)
# Let the scene add on to the end of our context menu
# (this is optional)
menu = self.scene().addParentContextMenus(self, menu, ev)
# Collapse sequential separators
i = 1
actions = menu.actions()
while i < len(actions):
if actions[i].isSeparator() and actions[i-1].isSeparator():
menu.removeAction(actions[i])
actions.remove(actions[i])
continue
i += 1
# Display the separator
pos = ev.screenPos()
logger.debug("Screen pos: %r, %r", pos.x(), pos.y())
menu.popup(QtCore.QPoint(int(pos.x()), int(pos.y())))
ev.accept()
return True
def addPlotContextMenus(self, items, itemNumbers, menu, rect=None):
"""
Add plot items to the menu
Args:
items: List of plot items to add to the menu
itemNumbers: Dictionary mapping items to the index in the plot
menu: The menu to add items to
"""
# If there are added items, remove them all
menuItems = getattr(self, "addedMenuItems", None)
if menuItems is not None:
for item in menuItems:
menu.removeAction(item)
menuItems.clear()
else:
menuItems = []
self.addedMenuItems = menuItems
# And create a sorted list of items under the rectangle
itemsToAdd = []
for item in items:
if not isinstance(item, (PlotCurveItem, PlotDataItem, ImageItem)):
continue
if isinstance(item, PlotCurveItem):
dataitem = item.parentObject()
else:
dataitem = item
if not hasattr(dataitem, "getContextMenus"):
continue
# Figure out the name and references of this item
if hasattr(dataitem, "name"):
name = dataitem.name()
else:
name = None
ind = itemNumbers[dataitem]
if name is None:
name = f"(Trace: {ind+1})"
else:
name = f"{name} (Trace: {ind+1})"
# Create menus for each of the items
if isinstance(dataitem, ExtendedDataItem):
menu = dataitem.getContextMenus(rect=rect, event=None)
else:
menu = dataitem.getContextMenus(event=None)
menu.setTitle(name)
itemsToAdd.append((ind, menu))
# Sort the items by the index
itemsToAdd.sort(key=lambda x: x[0])
# Add each of the items in to the menu
if itemsToAdd:
menuItems.append(self.menu.addSeparator())
if len(itemsToAdd) == 1:
for item in itemsToAdd[0][1].actions():
menuItems.append(item)
self.menu.addAction(item)
else:
for item in itemsToAdd:
menuItems.append(self.menu.addMenu(item[1]))
return itemsToAdd
class ImageMenuMixin:
pass
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, Piet Hein Schouten. All rights reserved.
# Licensed under the terms of the MIT license.
from .card import Card
from .file_attachment import FileAttachment
from .retrieval_attempt import RetrievalAttempt
from .tag import Tag
|
nilq/baby-python
|
python
|
"""Module for the custom Django sampledata command."""
import csv
import random
from django.core import management
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.gis.geos import Point
from allauth.account.models import EmailAddress
from tests.users.factories import EntityFactory
from resources.models import (
Language,
TechnologicalArea,
ProgressOutcome,
YearLevel,
CurriculumLearningArea,
)
from tests.resources.factories import (
ResourceFactory,
NZQAStandardFactory,
)
# Events
from events.models import (
Location,
Series,
)
from tests.events.factories import (
EventFactory,
)
# DTTA
from tests.dtta.factories import (
NewsArticleFactory,
PageFactory,
ProjectFactory,
RelatedLinkFactory,
)
# POET
from tests.poet.factories import (
POETFormResourceFactory,
POETFormSubmissionFactory,
POETFormProgressOutcomeGroupFactory,
)
class Command(management.base.BaseCommand):
"""Required command class for the custom Django sampledata command."""
help = "Add sample data to database."
def handle(self, *args, **options):
"""Automatically called when the sampledata command is given."""
if settings.DEPLOYMENT_TYPE == 'prod' and not settings.DEBUG:
raise management.base.CommandError(
'This command can only be executed in DEBUG mode on non-production website.'
)
# Clear all data
management.call_command('flush', interactive=False)
print('Database wiped.')
User = get_user_model()
# Create admin account
admin = User.objects.create_superuser(
'admin',
'admin@dthm4kaiako.ac.nz',
password=settings.SAMPLE_DATA_ADMIN_PASSWORD,
first_name='Admin',
last_name='Account'
)
EmailAddress.objects.create(
user=admin,
email=admin.email,
primary=True,
verified=True
)
print('Admin created.')
# Create user account
user = User.objects.create_user(
'user',
'user@dthm4kaiako.ac.nz',
password=settings.SAMPLE_DATA_USER_PASSWORD,
first_name='Alex',
last_name='Doe'
)
EmailAddress.objects.create(
user=user,
email=user.email,
primary=True,
verified=True
)
print('User created.')
# Create entities
EntityFactory.create_batch(size=10)
print('Entities created.')
# Resources
Language.objects.create(name='English', css_class='language-en')
Language.objects.create(name='Māori', css_class='language-mi')
print('Languages created.')
curriculum_learning_areas = {
'English': 'english',
'Arts': 'arts',
'Health and physical education': 'health-pe',
'Learning languages': 'languages',
'Mathematics and statistics': 'mathematics',
'Science': 'science',
'Social sciences': 'social-sciences',
'Technology': 'technology',
}
for area_name, area_css_class in curriculum_learning_areas.items():
CurriculumLearningArea.objects.create(
name=area_name,
css_class=area_css_class,
)
print('Curriculum learning areas created.')
ta_ct = TechnologicalArea.objects.create(
name='Computational thinking',
abbreviation='CT',
css_class='ta-ct',
)
for i in range(1, 9):
ProgressOutcome.objects.create(
name='Computational thinking - Progress outcome {}'.format(i),
abbreviation='CT PO{}'.format(i),
technological_area=ta_ct,
css_class='po-ct',
)
ta_dddo = TechnologicalArea.objects.create(
name='Designing and developing digital outcomes',
abbreviation='DDDO',
css_class='ta-dddo',
)
for i in range(1, 7):
ProgressOutcome.objects.create(
name='Designing and developing digital outcomes - Progress outcome {}'.format(i),
abbreviation='DDDO PO{}'.format(i),
technological_area=ta_dddo,
css_class='po-dddo',
)
print('Technological areas created.')
print('Progress outcomes created.')
NZQAStandardFactory.create_batch(size=20)
for i in range(0, 14):
YearLevel.objects.create(
level=i
)
print('NZQA standards created.')
ResourceFactory.create_batch(size=20)
print('Resources created.')
# Events
event_series = {
(
'Computer Science for High Schools',
'CS4HS',
),
(
'Computer Science for Primary Schools',
'CS4PS',
),
(
'Computer Science for Professional Development',
'CS4PD',
),
(
'Code Club for Teachers',
'CC4T',
),
}
for (name, abbreviation) in event_series:
Series.objects.create(
name=name,
abbreviation=abbreviation,
)
print('Event series created.')
region_codes = dict()
region_suffix = ' region'
for (code, name) in Location.REGION_CHOICES:
if name.endswith(region_suffix):
name = name[:-len(region_suffix)]
region_codes[name] = code
with open('general/management/commands/sample-data/nz-schools.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in random.sample(list(reader), 100):
if row['Longitude'] and row['Latitude'] and row['Region']:
Location.objects.create(
room='Room A',
name=row['Name'],
street_address=row['Street'],
suburb=row['Suburb'],
city=row['City'],
region=region_codes[row['Region']],
coords=Point(
float(row['Longitude']),
float(row['Latitude'])
),
)
print('Event locations created.')
EventFactory.create_batch(size=50)
print('Events created.')
# DTTA
NewsArticleFactory.create_batch(size=20)
print('DTTA news articles created.')
PageFactory.create_batch(size=5)
print('DTTA pages created.')
ProjectFactory.create_batch(size=5)
print('DTTA projects created.')
RelatedLinkFactory.create_batch(size=10)
print('DTTA related links created.')
# POET
management.call_command('load_poet_data')
POETFormResourceFactory.create_batch(size=20)
print('POET resources created.')
POETFormProgressOutcomeGroupFactory.create_batch(size=6)
print('POET progress outcome groups created.')
POETFormSubmissionFactory.create_batch(size=800)
print('POET submissions created.')
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/search-a-2d-matrix/
#
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
# Integers in each row are sorted from left to right.
# The first integer of each row is greater than the last integer of the previous row.
#
# For example,
# Consider the following matrix:
# [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# Given target = 3, return true.
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m = len(matrix)
left, right = 0, m - 1
if target < matrix[0][0] or target > matrix[-1][-1]:
return False
while left != right:
mid = (left + right) / 2
if target < matrix[mid][-1]:
right = mid
else:
left = mid + 1
if target in matrix[left]:
return True
else:
return False
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from comments.forms import CommentForm
from django.http import HttpResponseBadRequest
# Create your views here.
def create_comment(request):
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.save()
return redirect(comment.product.get_absolute_url())
return HttpResponseBadRequest()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.decoder - article decoder
"""
import logging
import hashlib
import queue
from threading import Thread
import sabnzbd
from sabnzbd.constants import SABYENC_VERSION_REQUIRED
from sabnzbd.articlecache import ArticleCache
from sabnzbd.downloader import Downloader
from sabnzbd.nzbqueue import NzbQueue
import sabnzbd.cfg as cfg
from sabnzbd.misc import match_str
# Check for correct SABYenc version
SABYENC_VERSION = None
try:
import sabyenc3
SABYENC_ENABLED = True
SABYENC_VERSION = sabyenc3.__version__
# Verify version to at least match minor version
if SABYENC_VERSION[:3] != SABYENC_VERSION_REQUIRED[:3]:
raise ImportError
except ImportError:
SABYENC_ENABLED = False
class CrcError(Exception):
def __init__(self, needcrc, gotcrc, data):
Exception.__init__(self)
self.needcrc = needcrc
self.gotcrc = gotcrc
self.data = data
class BadYenc(Exception):
def __init__(self):
Exception.__init__(self)
class Decoder:
""" Implement thread-like coordinator for the decoders """
do = None
def __init__(self):
logging.debug("Initializing decoders")
# Initialize queue and servers
self.decoder_queue = queue.Queue()
# Initialize decoders
self.decoder_workers = []
for i in range(cfg.num_decoders()):
self.decoder_workers.append(DecoderWorker(self.decoder_queue))
Decoder.do = self
def start(self):
for decoder_worker in self.decoder_workers:
decoder_worker.start()
def is_alive(self):
# Check all workers
for decoder_worker in self.decoder_workers:
if not decoder_worker.is_alive():
return False
return True
def stop(self):
# Put multiple to stop all decoders
for _ in self.decoder_workers:
self.decoder_queue.put(None)
def join(self):
# Wait for all decoders to finish
for decoder_worker in self.decoder_workers:
try:
decoder_worker.join()
except:
pass
def process(self, article, raw_data):
# We use reported article-size, just like sabyenc does
ArticleCache.do.reserve_space(article.bytes)
self.decoder_queue.put((article, raw_data))
def queue_full(self):
# Check if the queue size exceeds the limits
return self.decoder_queue.qsize() >= ArticleCache.do.decoder_cache_article_limit
class DecoderWorker(Thread):
""" The actuall workhorse that handles decoding! """
def __init__(self, decoder_queue):
Thread.__init__(self)
logging.debug("Initializing decoder %s", self.name)
self.decoder_queue = decoder_queue
def stop(self):
# Put multiple to stop all decoders
self.decoder_queue.put(None)
self.decoder_queue.put(None)
def run(self):
while 1:
# Let's get to work!
art_tup = self.decoder_queue.get()
if not art_tup:
logging.info("Shutting down decoder %s", self.name)
break
article, raw_data = art_tup
nzo = article.nzf.nzo
art_id = article.article
# Free space in the decoder-queue
ArticleCache.do.free_reserved_space(article.bytes)
# Keeping track
decoded_data = None
article_success = False
try:
if nzo.precheck:
raise BadYenc
if sabnzbd.LOG_ALL:
logging.debug("Decoding %s", art_id)
decoded_data = decode(article, raw_data)
article_success = True
except MemoryError:
logging.warning(T("Decoder failure: Out of memory"))
logging.info("Decoder-Queue: %d", self.decoder_queue.qsize())
logging.info("Cache: %d, %d, %d", *ArticleCache.do.cache_info())
logging.info("Traceback: ", exc_info=True)
Downloader.do.pause()
# This article should be fetched again
NzbQueue.do.reset_try_lists(article)
continue
except CrcError:
logging.info("CRC Error in %s" % art_id)
# Continue to the next one if we found new server
if search_new_server(article):
continue
except (BadYenc, ValueError):
# Handles precheck and badly formed articles
if nzo.precheck and raw_data and raw_data[0].startswith(b"223 "):
# STAT was used, so we only get a status code
article_success = True
else:
# Examine headers (for precheck) or body (for download)
# Look for DMCA clues (while skipping "X-" headers)
# Detect potential UUencode
for line in raw_data:
lline = line.lower()
if b"message-id:" in lline:
article_success = True
if not lline.startswith(b"X-") and match_str(
lline, (b"dmca", b"removed", b"cancel", b"blocked")
):
article_success = False
logging.info("Article removed from server (%s)", art_id)
break
if lline.find(b"\nbegin ") >= 0:
logme = T("UUencode detected, only yEnc encoding is supported [%s]") % nzo.final_name
logging.error(logme)
nzo.fail_msg = logme
NzbQueue.do.end_job(nzo)
break
# Pre-check, proper article found so just register
if nzo.precheck and article_success and sabnzbd.LOG_ALL:
logging.debug("Server %s has article %s", article.fetcher, art_id)
elif not article_success:
# If not pre-check, this must be a bad article
if not nzo.precheck:
logging.info("Badly formed yEnc article in %s", art_id, exc_info=True)
# Continue to the next one if we found new server
if search_new_server(article):
continue
except:
logging.warning(T("Unknown Error while decoding %s"), art_id)
logging.info("Traceback: ", exc_info=True)
# Continue to the next one if we found new server
if search_new_server(article):
continue
if decoded_data:
# If the data needs to be written to disk due to full cache, this will be slow
# Causing the decoder-queue to fill up and delay the downloader
ArticleCache.do.save_article(article, decoded_data)
NzbQueue.do.register_article(article, article_success)
def decode(article, raw_data):
# Let SABYenc do all the heavy lifting
decoded_data, yenc_filename, crc, crc_expected, crc_correct = sabyenc3.decode_usenet_chunks(raw_data, article.bytes)
# Mark as decoded
article.decoded = True
# Assume it is yenc
article.nzf.type = "yenc"
# Only set the name if it was found and not obfuscated
if not article.nzf.filename_checked and yenc_filename:
# Set the md5-of-16k if this is the first article
if article.lowest_partnum:
article.nzf.md5of16k = hashlib.md5(decoded_data[:16384]).digest()
# Try the rename, even if it's not the first article
# For example when the first article was missing
article.nzf.nzo.verify_nzf_filename(article.nzf, yenc_filename)
# CRC check
if not crc_correct:
raise CrcError(crc_expected, crc, decoded_data)
return decoded_data
def search_new_server(article):
""" Shorthand for searching new server or else increasing bad_articles """
# Continue to the next one if we found new server
if not article.search_new_server():
# Increase bad articles if no new server was found
article.nzf.nzo.increase_bad_articles_counter("bad_articles")
return False
return True
|
nilq/baby-python
|
python
|
from gatekeeper import Endpoint
class Hello(Endpoint):
path = '/hello'
def get(self, request, response):
response.body = 'hello world'
|
nilq/baby-python
|
python
|
from app import manager
if __name__ == "__main__":
manager.run()
|
nilq/baby-python
|
python
|
from __future__ import annotations
from src.models.verse_reference import VerseReference
class ChapterReference:
book_name: str
chapter_number: int
version: str
def __init__(self, book_name: str, chapter_number: int, version: str) -> None:
self.book_name = book_name
self.chapter_number = chapter_number
self.version = version
@staticmethod
def from_verse_reference(verse_reference: VerseReference) -> ChapterReference:
return ChapterReference(
book_name=verse_reference.book_name,
chapter_number=verse_reference.chapter_number,
version=verse_reference.version
)
|
nilq/baby-python
|
python
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""variant_labeler for DeepVariant."""
from deepvariant.labeler import positional_labeler
from deepvariant.labeler import variant_labeler
from third_party.nucleus.util import struct_utils
# ---------------------------------------------------------------------------
# CustomizedClassesVariantLabel
#
class CustomizedClassesVariantLabel(variant_labeler.VariantLabel):
"""Dataclass containing information about a label assigned to a variant.
Attributes:
is_confident: bool. True if we could confidently assign a label to this
variant, False otherwise.
variant: nucleus.protos.Variant proto that we assigned a label for.
class_status: string. One of the keys in classes_dict
"""
classes_dict = None
info_field_name = None
def __init__(self, is_confident, variant, truth_variant, classes_list,
info_field_name):
self.info_field_name = info_field_name
self.classes_dict = {k: v for v, k in enumerate(classes_list.split(','))}
self.is_confident = is_confident
self.variant = variant
self.truth_variant = truth_variant
def label_for_alt_alleles(self, alt_alleles_indices):
"""Computes the label value for an example.
This function computes the TensorFlow label value (0, 1, 2, .. N-1) we train
DeepVariant to predict.
The `alt_alleles_indices` being passed in is from the candidates (not
truth), so they could still have multiple alts. If any of the alt alleles
matches the truth, we'll return the label of the truth.
redacted
Note that this function currently doesn't handle multi-allelic cases
correctly. For example it assumes `truth_alt` is the first one.
Args:
alt_alleles_indices: list[int]. A list of the alt_allele_indices.
Returns:
int >= 0. Label for the classes in `classes_dict`.
"""
if not self.truth_variant:
return 0
if self.truth_variant.calls[0].genotype == [0, 0]:
return 0
# If the ref of the candidate and the truth doesn't match, return 0 (ref).
if self.truth_variant.reference_bases != self.variant.reference_bases:
return 0
true_class_status = self.get_class_status(self.truth_variant.info)
truth_alt = self.truth_variant.alternate_bases[0]
# Default is label 0. Usually reference.
label = 0
# Note that this logic below might not be the best when
# `alt_alleles_indices` is a composite one, like [0, 1]. For now we'll
# return the corresponding label if any of them matches truth_alt.
for ind in alt_alleles_indices:
if self.variant.alternate_bases[ind] == truth_alt:
# allele in called variant is the same as truth_alt
label = self.classes_dict[true_class_status]
return label
def get_class_status(self, info_field):
"""Extract class status from nucleus.protos.Variant.info.
Args:
info_field: INFO field of nucleus.protos.Variant proto to extract the
classes status from. Must contain `info_field_name` field which is set
to one of self.classes_dict.keys().
Returns:
string. Class status. Has to be one of the keys of `classes_dict`.
Raises:
ValueError: if type is missing in info_field
ValueError: if type is not in self.classes_dict.keys()
"""
if self.info_field_name not in info_field.keys():
raise ValueError('Cannot create class labels: ' +
'VCF file does not contain INFO/{} field'.format(
self.info_field_name))
class_status = struct_utils.get_string_field(info_field,
self.info_field_name, True)
if class_status not in self.classes_dict.keys():
raise ValueError('class_status status unknown: {}. '
'Known status: {}'.format(class_status,
self.classes_dict.keys()))
return class_status
# ---------------------------------------------------------------------------
# CustomizedClassesVariantLabeler
#
class CustomizedClassesVariantLabeler(
positional_labeler.PositionalVariantLabeler):
"""Extracts the class of the variant (possible values are keys in
`classes_dict`) from INFO/`info_field_name` field in VCF file.
"""
def __init__(self, truth_vcf_reader, confident_regions, classes_list,
info_field_name):
"""Creates a new CustomizedClassesVariantLabeler.
Args:
truth_vcf_reader: a VcfReader object that points to our truth variant set.
confident_regions: A RangeSet containing all of the confidently called
regions. A variant that falls outside of one of these regions will be
receive a special not-confident marker.
classes_list: A common-separated string of classes.
info_field_name: the name in INFO field where we should get the customized
field from.
Raises:
ValueError: if vcf_reader is None.
"""
super(CustomizedClassesVariantLabeler, self).__init__(
truth_vcf_reader=truth_vcf_reader, confident_regions=confident_regions)
self.classes_list = classes_list
self.info_field_name = info_field_name
def label_variants(self, variants, region=None):
"""Gets label information for each variant in variants.
This is the primary API for assigning labels to variants. This function
takes and iterable of variants and yield a VariantLabel object for each
variant. The VariantLabel can be used to determine the variant type label
for each variant suitable for training a DeepVariant model. The API accepts
an iterable of Variants because, in the general case, the labeling of
variants aren't independent, in that the label assigned to one variant may
impact the label we assign to a nearby variant.
Args:
variants: iterable[nucleus.protos.Variant]: An iterable of variants to
label. The variants should be in coordinate-sorted order and all on the
same chromosome.
region: A nucleus.genomics.v1.Range object specifying the region over
which we are labeling variants. This should span at least the span of
variants, but may be larger. Statistics about the labeling will be
computed over region.
Yields:
A VariantLabel object for each variant in variants, in order.
"""
for variant in variants:
is_confident, truth_variant = self._match(variant)
yield CustomizedClassesVariantLabel(
is_confident=is_confident,
variant=variant,
truth_variant=truth_variant,
classes_list=self.classes_list,
info_field_name=self.info_field_name)
|
nilq/baby-python
|
python
|
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
annotations.Align.aligners.aligner.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from .basicalign import BasicAligner
from .juliusalign import JuliusAligner
from .hvitealign import HviteAligner
# ---------------------------------------------------------------------------
# List of supported aligners.
aligners = (BasicAligner, JuliusAligner, HviteAligner)
# ---------------------------------------------------------------------------
class sppasAligners(object):
"""Manager of the aligners implemented in the package.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
def __init__(self):
"""Create a sppasAligners to manage the aligners supported by SPPAS."""
self._aligners = dict()
for a in aligners:
self._aligners[a().name()] = a
# ---------------------------------------------------------------------------
def get(self):
"""Return a dictionary of aligners (key=name, value=instance)."""
return self._aligners
# ---------------------------------------------------------------------------
@staticmethod
def default_aligner_name():
"""Return the name of the default aligner."""
return BasicAligner().name()
# ---------------------------------------------------------------------------
def names(self):
"""Return the list of aligner names."""
return tuple(self._aligners.keys())
# ---------------------------------------------------------------------------
def classes(self, aligner_name=None):
"""Return the list of aligner classes.
:param aligner_name: (str) A specific aligner
:returns: BasicAligner, or a list if no aligner name is given
"""
if aligner_name is not None:
self.check(aligner_name)
return self._aligners[aligner_name]
return tuple(self._aligners.values())
# ---------------------------------------------------------------------------
def extensions(self, aligner_name=None):
"""Return the list of supported extensions of each aligner.
:param aligner_name: (str) A specific aligner
:returns: list of str, or a dict of list if no aligner name is given
"""
if aligner_name is not None:
sppasAligners.check(aligner_name)
return self._aligners[aligner_name].extensions()
ext = dict()
for a in self._aligners:
ext[a] = self._aligners[a]().extensions()
return ext
# ---------------------------------------------------------------------------
def default_extension(self, aligner_name=None):
"""Return the default extension of each aligner.
:param aligner_name: (str) A specific aligner
:returns: str, or a dict of str if no aligner name is given
"""
if aligner_name is not None:
self.check()
return self._aligners[aligner_name].outext()
ext = dict()
for a in self._aligners:
ext[a] = self._aligners[a]().outext()
return ext
# ---------------------------------------------------------------------------
def check(self, aligner_name):
"""Check whether the aligner name is known or not.
:param aligner_name: (str) Name of the aligner.
:returns: formatted alignername
"""
a = aligner_name.lower().strip()
if a not in self._aligners.keys():
raise KeyError('Unknown aligner name {:s}.'.format(a))
return a
# ---------------------------------------------------------------------------
def instantiate(self, model_dir=None, aligner_name="basic"):
"""Instantiate an aligner to the appropriate system from its name.
If an error occurred, the basic aligner is returned.
:param model_dir: (str) Directory of the acoustic model
:param aligner_name: (str) Name of the aligner
:returns: an Aligner instance.
"""
a = self.check(aligner_name)
return self._aligners[a](model_dir)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Get one postal code from the command line
# Example ./get_latlong_cmd A1B2C3
import sys
import os
import googlemaps
import json
gmaps = googlemaps.Client(key=os.environ['DIRECTIONS_API_KEY'])
code = sys.argv[1]
print(code)
place_result = gmaps.find_place(input = code,
input_type="textquery",
fields=set(["geometry","formatted_address"])
)
if place_result['status'] != 'OK':
print ("whoops")
else:
print(json.dumps(place_result, indent=4, sort_keys=True))
|
nilq/baby-python
|
python
|
import coreapi
import coreschema
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.filters import HaystackFilter
from drf_haystack.generics import HaystackGenericAPIView
from rest_framework import viewsets
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ViewSetMixin
from oldp.api import SmallResultsSetPagination
from oldp.apps.laws.models import Law, LawBook
from oldp.apps.laws.search_indexes import LawIndex
from oldp.apps.laws.serializers import LawSerializer, LawBookSerializer, LawSearchSerializer
from oldp.apps.search.filters import SearchSchemaFilter
class LawViewSet(viewsets.ModelViewSet):
queryset = Law.objects.all().order_by('order')
serializer_class = LawSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('book_id', 'book__latest', 'book__revision_date')
class LawBookViewSet(viewsets.ModelViewSet):
queryset = LawBook.objects.all().order_by('code')
serializer_class = LawBookSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('slug', 'code', 'latest', 'revision_date')
class LawSearchSchemaFilter(SearchSchemaFilter):
search_index_class = LawIndex
def get_default_schema_fields(self):
return [
# Search query field is required
coreapi.Field(
name='text',
location='query',
required=True,
schema=coreschema.String(description='Search query on text content (Lucence syntax support).'),
)
]
class LawSearchViewSet(ListModelMixin, ViewSetMixin, HaystackGenericAPIView):
"""
Search view
"""
permission_classes = (AllowAny,)
pagination_class = SmallResultsSetPagination # limit page (other content field blows up response size)
index_models = [
Law
]
serializer_class = LawSearchSerializer
filter_backends = (HaystackFilter, LawSearchSchemaFilter,)
|
nilq/baby-python
|
python
|
from django.urls import path, include
from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register('namespaces', viewsets.NamespaceViewSet)
app_name = 'api'
urlpatterns = [
path('', include(router.urls)),
]
|
nilq/baby-python
|
python
|
import logging
import numpy as np
from Bio import SeqIO
logger = logging.getLogger(__name__)
import os
import re
import sys
import click
import pandas as pd
import typing as t
sys.path.append("..")
from utils.rna_struct_utils import RNAStructUtils
df = pd.DataFrame({"id": [1, 2, 3, 4]})
def get_secondary_struct(
sequence_data_path: str, workdir: str, significance_score_cutoff: float = 0.9
) -> t.Tuple[
t.List[str],
t.List[str],
t.List[str],
t.List[int],
t.List[int],
t.List[float],
t.List[bool],
t.List[float],
t.List[float],
t.List[float],
t.List[float],
t.List[str]
]:
"""
this pipeline follows the one of RNASIV, which can be found in: https://www.mdpi.com/1999-4915/11/5/401/htm#B30-viruses-11-00401
:param sequence_data_path: alignment data path to provide as input to the rna secondary structures prediction
:param workdir: directory to write the pipeline output files in
:param significance_score_cutoff: threshold between 0 and 1 determining the cutoff of secondary structure RNAz
probability based on which the structure will be determined as significant or not
:return: a dataframe corresponding to the secondary structures inferred for the respective cluster id
"""
(
struct_representation,
struct_sequence,
struct_src_aln_path,
struct_start_position,
struct_end_position,
struct_prob,
struct_significance,
struct_mfe,
struct_zscore,
struct_entropy,
struct_conservation_index,
struct_pred_src
) = ([], [], [], [], [], [], [], [], [], [], [], [])
if not os.path.exists(sequence_data_path):
logger.error(
f"no MSA is available at {sequence_data_path} and thus no secondary structures will be computed"
)
return (
struct_representation,
struct_sequence,
struct_src_aln_path,
struct_start_position,
struct_end_position,
struct_prob,
struct_significance,
struct_mfe,
struct_zscore,
struct_entropy,
struct_conservation_index,
struct_pred_src,
)
num_sequences = len(list(SeqIO.parse(sequence_data_path, format="fasta")))
secondary_structures = []
os.makedirs(workdir, exist_ok=True)
if num_sequences > 1:
logger.info(f"computing rnaz reliable windows for prediction")
rnaz_window_output_path = f"{workdir}/rnaz_window.out"
RNAStructUtils.exec_rnaz_window(input_path=sequence_data_path, output_path=rnaz_window_output_path)
if os.stat(rnaz_window_output_path).st_size > 0:
logger.info(f"executing RNAz predictor on initial windows")
rnaz_output_path = f"{workdir}/rnaz_initial.out"
res = RNAStructUtils.exec_rnaz(input_path=rnaz_window_output_path, output_path=rnaz_output_path)
logger.info(f"clustering RNAz hits of overlapping windows")
rnaz_cluster_output_path = f"{workdir}/rnaz_cluster.dat"
res = RNAStructUtils.exec_rnaz_cluster(input_path=rnaz_output_path, output_path=rnaz_cluster_output_path)
if res == 0:
logger.info(f"extracting sequence data per selected window for mlocarna refinement")
rnaz_candidates_output_dir = f"{workdir}/rnaz_candidates_sequence_data/"
RNAStructUtils.parse_candidates(candidates_info_path=rnaz_cluster_output_path, sequence_data_path=rnaz_window_output_path, output_dir=rnaz_candidates_output_dir)
logger.info(f"creating refined alignments of candidates with mlocarna")
mlocarna_output_dir = f"{workdir}/rnaz_candidates_mlocarna_aligned/"
os.makedirs(mlocarna_output_dir, exist_ok=True)
for path in os.listdir(rnaz_candidates_output_dir):
input_path = f"{rnaz_candidates_output_dir}{path}"
output_path = f"{mlocarna_output_dir}{path.replace('.fasta', '.clustal')}"
res = RNAStructUtils.exec_mlocarna(input_path=input_path, output_path=output_path)
logger.info(f"executing prediction on aligned windows with rnaz to be able to classify the selected structures")
rnaz_refined_output_dir = f"{workdir}/rnaz_final_output/"
os.makedirs(rnaz_refined_output_dir, exist_ok=True)
for path in os.listdir(mlocarna_output_dir):
if ".clustal" in path:
input_path=f"{mlocarna_output_dir}{path}"
output_path = f"{rnaz_refined_output_dir}{path.replace('.clustal', '_rnaz.out')}"
res = RNAStructUtils.exec_rnaz(input_path=input_path, output_path=output_path)
logger.info(f"parsing the obtained rna structures")
for path in os.listdir(rnaz_refined_output_dir):
if ".out" in path:
struct = RNAStructUtils.parse_rnaz_output(rnaz_output_path=f"{rnaz_refined_output_dir}{path}", significance_score_cutoff=significance_score_cutoff)
secondary_structures.append(struct)
else:
logger.info(f"executing RNALfold on the single sequence obtained for the species")
rnalfold_output_path = f"{workdir}/rnalfold.out"
res = RNAStructUtils.exec_rnalfold(input_path=sequence_data_path, output_path=rnalfold_output_path)
if res == 0:
secondary_structures = RNAStructUtils.parse_rnalfold_result(rnalfold_path=rnalfold_output_path, sequence_data_path=sequence_data_path)
functional_structures = [struct for struct in secondary_structures if bool(struct.is_significant) and bool(struct.is_functional_structure)]
logger.info(f"out of {len(secondary_structures)}, {len(functional_structures)} are significant and functional")
if len(functional_structures) > 1:
logger.info(f"the mean z-score for the predicted structures is {np.mean([struct.mean_zscore for struct in functional_structures])} and standard deviation of {np.std([struct.mean_zscore for struct in functional_structures])}")
for struct in secondary_structures: # here, I will save all the structures and filter out weight them by svm_rna_probability (= prb > 0.5 means it is a functional RNA, prob larger than 0.9 is more stringent and what was used in RNASIV)
struct_representation.append(struct.consensus_representation)
struct_sequence.append(struct.consensus_sequence)
struct_start_position.append(struct.start_position)
struct_end_position.append(struct.end_position)
struct_src_aln_path.append(struct.alignment_path)
struct_prob.append(struct.svm_rna_probability)
struct_significance.append(struct.is_significant)
struct_mfe.append(struct.mean_single_sequence_mfe)
struct_zscore.append(struct.mean_zscore)
struct_entropy.append(struct.shannon_entropy)
struct_conservation_index.append(struct.structure_conservation_index)
struct_pred_src.append(struct.structure_prediction_tool)
return (
struct_representation,
struct_sequence,
struct_src_aln_path,
struct_start_position,
struct_end_position,
struct_prob,
struct_significance,
struct_mfe,
struct_zscore,
struct_entropy,
struct_conservation_index,
struct_pred_src
)
def compute_rna_secondary_structures(
input_df: pd.DataFrame,
sequence_data_dir: str,
workdir: str,
output_path: str,
significance_score_cutoff: float = 0.9,
):
"""
:param input_df: dataframe with viral species of interest
:param sequence_data_dir: directory holding sequence data of the viral species of interest
:param workdir: directory to
:param output_path: path of output dataframe
:param significance_score_cutoff: significance_score_cutoff: threshold between 0 and 1 determining the cutoff of secondary structure RNAz
probability based on which the structure will be determined as significant or not
:return:
"""
secondary_structures_df = pd.DataFrame(
{"virus_species_name": input_df["virus_species_name"].unique()}
)
secondary_struct_fields = [
"struct_representation",
"struct_sequence",
"struct_src_aln_path",
"struct_start_pos",
"struct_end_pos",
"struct_prob",
"struct_significance",
"struct_mfe",
"struct_zscore",
"struct_entropy",
"struct_conservation_index",
"struct_prediction_tool"
]
secondary_structures_df[secondary_struct_fields] = secondary_structures_df[["virus_species_name"]].apply(
func=lambda sp_name: get_secondary_struct(
sequence_data_path=f"{sequence_data_dir}{re.sub('[^0-9a-zA-Z]+', '_', sp_name.values[0])}_aligned.fasta",
workdir=f"{workdir}/{re.sub('[^0-9a-zA-Z]+', '_', sp_name.values[0])}/",
significance_score_cutoff=significance_score_cutoff),
axis=1,
result_type="expand")
secondary_structures_df = secondary_structures_df.set_index(['virus_species_name']).apply(pd.Series.explode, axis=0).reset_index()
secondary_structures_df.to_csv(output_path, index=False)
@click.command()
@click.option(
"--associations_data_path",
type=click.Path(exists=True, file_okay=True, readable=True),
help="input path of associations grouped viral species and host species",
)
@click.option(
"--sequence_data_dir",
type=click.Path(exists=False, file_okay=True, readable=True),
help="directory holding sequence data files per species with their collected sequences",
)
@click.option(
"--workdir",
type=click.Path(exists=False, file_okay=True, readable=True),
help="directory to hold the RNA prediction pipeline files in",
required=False,
default=None
)
@click.option(
"--log_path",
type=click.Path(exists=False, file_okay=True, readable=True),
help="path holding the logging of the script",
)
@click.option(
"--df_output_path",
type=click.Path(exists=False, file_okay=True, readable=True),
help="path holding the output dataframe to write",
)
@click.option(
"--significance_score_cutoff",
type=click.FloatRange(min=0, max=1),
help="significance_score_cutoff: threshold between 0 and 1 determining the cutoff of secondary structure RNAz probability based on which the structure will be determined as significant or not",
required=False,
default = 0.9,
)
@click.option(
"--limit_to_species_with_multiple_sequences",
type=bool,
help="significance_score_cutoff: threshold between 0 and 1 determining the cutoff of secondary structure RNAz probability based on which the structure will be determined as significant or not",
required=False,
default = True,
)
def predict_secondary_structures(
associations_data_path: click.Path,
sequence_data_dir: click.Path,
workdir: t.Optional[click.Path],
log_path: click.Path,
df_output_path: click.Path,
significance_score_cutoff: float,
limit_to_species_with_multiple_sequences: bool,
):
# initialize the logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s module: %(module)s function: %(funcName)s line: %(lineno)d %(message)s",
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler(str(log_path)),
],
force=True, # run over root logger settings to enable simultaneous writing to both stdout and file handler
)
if not workdir:
workdir = f"{os.path.dirname(str(associations_data_path))}/rna_pred_aux/"
logger.info(f"creating working directory {workdir}")
os.makedirs(workdir, exist_ok=True)
associations_data = pd.read_csv(associations_data_path)
if limit_to_species_with_multiple_sequences:
associations_data = associations_data.loc[associations_data['#sequences'] > 1]
compute_rna_secondary_structures(
input_df=associations_data,
sequence_data_dir=str(sequence_data_dir),
workdir=str(workdir),
output_path=str(df_output_path),
significance_score_cutoff=significance_score_cutoff
)
if __name__ == '__main__':
predict_secondary_structures()
|
nilq/baby-python
|
python
|
def includeme(config):
config.add_static_view('static', 'static', cache_max_age=0)
config.add_route('home', '/')
config.add_route('auth', '/auth')
config.add_route('pantry', '/pantry')
config.add_route('detail', '/detail/{upc}')
config.add_route('manage_item', '/manage_item')
config.add_route('logout', '/logout')
config.add_route('about', '/about_us')
|
nilq/baby-python
|
python
|
from method import *
from friends import *
import numpy as np
import os
import sys
# Run FOF algorithm on all blocks of data in 'split_dir'
'''
Required parameters in config file:
directory, split_dir, m1, m2, t_gap, v_gap, tstart, tsamp, vsamp, fof_testing_mode
And from FITS header:
TBIN, CHAN_BW, OBSFREQ, NCHAN
'''
def main(hotpotato):
print("Running Friend-Of-Friends.\n")
params_list= ['split_dir', 'filetype', 'm1', 'm2', 't_gap', 'v_gap', 'tstart', 'fof_testing_mode',
'tsamp', 'vsamp', 'dec_block_list']
fits_params_list= ['TBIN', 'CHAN_BW', 'OBSFREQ', 'NCHAN']
fil_params_list= ['tsamp', 'foff', 'vlow', 'vhigh']
print_params(params_list)
print_fits_params(fits_params_list)
print_fil_params(fil_params_list)
# Get data file location
split_dir= get_value(hotpotato, 'split_dir')
filetype= get_value(hotpotato, 'filetype')
dec_name= get_value(hotpotato, 'dec_name')
# Get parameters from hotpotato
m1 = get_value(hotpotato, 'm1')
m2 = get_value(hotpotato, 'm2')
t_gap = int(get_value(hotpotato, 't_gap'))
v_gap = int(get_value(hotpotato, 'v_gap'))
tstart = get_value(hotpotato, 'tstart')
testing_mode= get_value(hotpotato, 'fof_testing_mode')
# Set up dictionary of global parameters
gd = {}
if filetype == 'psrfits':
dt= get_value(hotpotato, 'TBIN')
dv= abs(get_value(hotpotato, 'CHAN_BW'))
tsamp= int(get_value(hotpotato, 'tsamp'))
vsamp= int(get_value(hotpotato, 'vsamp'))
gd['tsamp']= tsamp
gd['vsamp']= vsamp
gd['vlow'] = get_value(hotpotato, 'OBSFREQ') - dv * get_value(hotpotato, 'NCHAN') / 2.0
gd['vhigh'] = get_value(hotpotato, 'OBSFREQ') + dv * get_value(hotpotato, 'NCHAN') / 2.0
elif filetype == 'filterbank':
dt= get_value(hotpotato, 'tsamp')
dv= abs(get_value(hotpotato, 'foff'))
# Note the naming convention change:
tcombine= int(get_value(hotpotato, 'tcombine'))
vcombine= int(get_value(hotpotato, 'vcombine'))
gd['tsamp']= tcombine
gd['vsamp']= vcombine
gd['vlow']= get_value(hotpotato, 'vlow')
gd['vhigh']= get_value(hotpotato, 'vhigh')
else:
print('Filetype not recognized. Quitting... ')
sys.exit()
gd['dt']= dt
gd['dv']= dv
# Get Files
dec_block_list= get_value(hotpotato, 'dec_block_list')
if dec_block_list == '':
# Get list of data files
dec_block_list= os.listdir(split_dir)
print("Files in split_dir: " + str(dec_block_list))
# Get Relevant Files
files_len= len(dec_block_list)
n= 0
while n < files_len:
block= dec_block_list[n]
splitted= block.split('_')
if splitted[0] != dec_name or len(splitted) < 3:
dec_block_list.remove(block)
files_len-= 1
else:
n+= 1
else:
pass
print(dec_block_list)
# Run FOF on each block
fof_block_list= []
for dec_block_name in dec_block_list:
n= int(dec_block_name.split('_')[1][5:])
print('Block: %d' %(n))
try:
print('%s/%s' %(split_dir, dec_block_name))
data= np.load('%s/%s' %(split_dir, dec_block_name))
except:
print('The file -- %s -- does not exist in %s' %(dec_block_name, split_dir))
continue
print('Data Shape: ' + str(data.shape))
if data.shape[0] > 0 and data.shape[1] > 0:
fof(gd, data, m1, m2, t_gap, v_gap, tstart, testing_mode, True, n)
if get_value(hotpotato, 'bandpass_name') != '':
clust_name= 'block%d_clust_%.1f_%d_%d_%d_%d_%d.txt' %(n, m1, m2, tcombine, vcombine, t_gap, v_gap)
superclust_name= 'block%d_superclust_%.1f_%d_%d_%d_%d_%d.txt' %(n, m1, m2, tcombine, vcombine, t_gap, v_gap)
png_name= 'block%d_clust_%.1f_%d_%d_%d_%d_%d.png' %(n, m1, m2, tcombine, vcombine, t_gap, v_gap)
bandpass_str= dec_block_name[dec_block_name.find('chans'):dec_block_name.find('.npy')]
bandpass_clust_name= 'block%d_%s_clust_%.1f_%d_%d_%d_%d_%d.txt' %(n, bandpass_str, m1, m2, tcombine, vcombine, t_gap, v_gap)
bandpass_superclust_name= 'block%d_%s_superclust_%.1f_%d_%d_%d_%d_%d.txt' %(n, bandpass_str, m1, m2, tcombine, vcombine, t_gap, v_gap)
bandpass_png_name= 'block%d_%s_clust_%.1f_%d_%d_%d_%d_%d.png' %(n, bandpass_str, m1, m2, tcombine, vcombine, t_gap, v_gap)
cmd1= 'mv %s %s' %(clust_name, bandpass_clust_name)
cmd2= 'mv %s %s' %(superclust_name, bandpass_superclust_name)
cmd3= 'mv %s %s' %(png_name, bandpass_png_name)
try_cmd(cmd1)
try_cmd(cmd2)
try_cmd(cmd3)
fof_block_list.append(bandpass_clust_name)
else:
clust_name= 'block%d_clust_%.1f_%d_%d_%d_%d_%d.txt' %(n, m1, m2, tsamp, vsamp, t_gap, v_gap)
fof_block_list.append(clust_name)
cmd= "mv *clust_* %s" %(split_dir)
try_cmd(cmd)
hotpotato['fof_block_list']= fof_block_list
return hotpotato
|
nilq/baby-python
|
python
|
class ClientTrader():
def __init__(self) -> None:
pass
def login(self) -> None:
pass
if __name__ == '__main__':
pass
|
nilq/baby-python
|
python
|
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Type, Optional
import abc
import copy
import numpy as np
from piquasso.api.config import Config
class State(abc.ABC):
"""The base class from which all `State` classes are derived.
Properties:
d (int): Instance attribute specifying the number of modes.
"""
_config_class: Type[Config] = Config
def __init__(self, config: Optional[Config] = None) -> None:
self._config = config.copy() if config is not None else self._config_class()
def _get_auxiliary_modes(self, modes: Tuple[int, ...]) -> Tuple[int, ...]:
return tuple(np.delete(np.arange(self.d), modes))
def copy(self) -> "State":
"""Returns an exact copy of this state.
Returns:
State: An exact copy of this state.
"""
return copy.deepcopy(self)
@property
@abc.abstractmethod
def d(self) -> int:
pass
@property
@abc.abstractmethod
def fock_probabilities(self) -> np.ndarray:
"""Returns the particle detection probabilities.
Note:
The ordering of the Fock basis is increasing with particle numbers, and in
each particle number conserving subspace, lexicographic ordering is used.
Returns:
numpy.ndarray: The particle detection probabilities.
"""
pass
@abc.abstractmethod
def validate(self) -> None:
"""Validates the state."""
pass
@abc.abstractmethod
def get_particle_detection_probability(
self, occupation_number: Tuple[int, ...]
) -> float:
"""
Returns the particle number detection probability using the occupation number
specified as a parameter.
Args:
occupation_number (tuple):
Tuple of natural numbers representing the number of particles in each
mode.
Returns:
float: The probability of detection.
"""
pass
|
nilq/baby-python
|
python
|
selenium_wrapper_web_driver_not_found_error = "Web Driver not found"
selenium_wrapper_opera_path_error = "Opera need executable path"
selenium_wrapper_set_options_error = "only accept dict type"
selenium_wrapper_set_argument_error = "only accept str type"
|
nilq/baby-python
|
python
|
"""
This module defines the bulk modulus workflow.
"""
from uuid import uuid4
from atomate.utils.utils import get_logger
from atomate.vasp.firetasks.parse_outputs import FitEOSToDb
from atomate.vasp.workflows.base.deformations import get_wf_deformations
from fireworks import Firework, Workflow
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.io.vasp.sets import MPStaticSet
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
def get_wf_bulk_modulus(
structure,
deformations,
vasp_input_set=None,
vasp_cmd="vasp",
db_file=None,
user_kpoints_settings=None,
eos="vinet",
tag=None,
copy_vasp_outputs=False,
user_incar_settings=None,
):
"""
Returns the workflow that computes the bulk modulus by fitting to the given equation
of state.
Args:
structure (Structure): input structure.
deformations (list): list of deformation matrices (list of lists).
vasp_input_set (VaspInputSet): for the static deformation calculations
vasp_cmd (str): vasp command to run.
db_file (str): path to the db file.
user_kpoints_settings (dict): example: {"grid_density": 7000}
eos (str): equation of state used for fitting the energies and the volumes.
supported equation of states: "quadratic", "murnaghan", "birch",
"birch_murnaghan", "pourier_tarantola", "vinet", "deltafactor".
See pymatgen.analysis.eos.py
tag (str): something unique to identify the tasks in this workflow. If None a
random uuid will be assigned.
copy_vasp_outputs (bool): whether or not copy the outputs from the previous calc
(usually structure optimization) before the deformations are performed.
user_incar_settings (dict):
Returns:
Workflow
"""
tag = tag or "bulk_modulus group: >>{}<<".format(str(uuid4()))
deformations = [Deformation(defo_mat) for defo_mat in deformations]
vis_static = vasp_input_set or MPStaticSet(
structure=structure,
force_gamma=True,
user_kpoints_settings=user_kpoints_settings,
user_incar_settings=user_incar_settings,
)
wf_bulk_modulus = get_wf_deformations(
structure,
deformations,
name="bulk_modulus deformation",
vasp_input_set=vis_static,
vasp_cmd=vasp_cmd,
copy_vasp_outputs=copy_vasp_outputs,
db_file=db_file,
tag=tag,
)
fit_eos = FitEOSToDb(tag=tag, db_file=db_file, eos=eos)
fw_analysis = Firework(fit_eos, name="fit equation of state")
wf_analysis = Workflow.from_Firework(fw_analysis)
wf_bulk_modulus.append_wf(wf_analysis, wf_bulk_modulus.leaf_fw_ids)
formula = structure.composition.reduced_formula
wf_bulk_modulus.name = "{}:{}".format(formula, "Bulk modulus")
return wf_bulk_modulus
|
nilq/baby-python
|
python
|
import abc
from torch.nn import Module
class CuriosityModule(abc.ABC, Module):
def __init__(self):
super().__init__()
# self.get_single_intrinsic_reward = single_batch(self.get_intrinsic_reward)
# self.get_single_training_loss = single_batch(self.get_training_loss)
@abc.abstractmethod
def get_intrinsic_reward(self, state, action, next_state):
raise NotImplemented
@abc.abstractmethod
def get_training_loss(self, state, action, next_state):
raise NotImplemented
# def get_single_intrinsic_reward(self, state, action, next_state):
# raise NotImplemented
# def get_single_training_loss(self, state, action, next_state):
# raise NotImplemented
|
nilq/baby-python
|
python
|
from collections import namedtuple
GamePlayerScores = namedtuple(
'GamePlayerScores',
['assists', 'creep_score', 'deaths', 'kills', 'ward_score']
)
GamePlayerItem = namedtuple(
'GamePlayerItem',
['id', 'name', 'slot', 'can_use', 'consumable', 'count', 'price']
)
GamePlayerRunes = namedtuple(
'GamePlayerRunes',
['keystone', 'primary_tree', 'secondary_tree']
)
GameActivePlayerAbilities = namedtuple(
'GameActivePlayerAbilities',
['q', 'w', 'e', 'r', 'passive']
)
GameActivePlayer = namedtuple(
'GameActivePlayer',
['name', 'gold', 'champion_stats', 'abilities']
)
class GamePlayer:
"""
Represents a player in a League of Legends game, as defined by the League
of Legends client API.
"""
def __init__(self, data, active_player: GameActivePlayer = None):
"""
Initializes a GamePlayer (class to represent a player in League of Legends).
:param data: The JSON data from the client API for the player.
:param active_player: If specified, this is used to add additional properties
if this class represents the active player.
"""
if data.get('summonerName'):
self.summoner = data['summonerName']
self.team = data['team']
self.is_bot = data['isBot']
self.is_dead = data['isDead']
self.champion = data['championName']
self.level = data['level']
self.skin_id = data['skinID']
self.respawn_timer = data['respawnTimer']
self.scores = GamePlayerScores(
assists=data['scores']['assists'],
creep_score=data['scores']['creepScore'],
deaths=data['scores']['deaths'],
kills=data['scores']['kills'],
ward_score=data['scores']['wardScore']
)
if data.get('summonerSpells'):
self.spells = []
if data['summonerSpells'].get('summonerSpellOne'):
self.spells.append(data['summonerSpells']['summonerSpellOne']['displayName'])
if data['summonerSpells'].get('summonerSpellTwo'):
self.spells.append(data['summonerSpells']['summonerSpellTwo']['displayName'])
if data.get('runes'):
self.runes = GamePlayerRunes(
keystone=data['runes'].get('keystone'),
primary_tree=data['runes'].get('primaryRuneTree'),
secondary_tree=data['runes'].get('secondaryRuneTree')
)
if data.get('items'):
self.items = []
for item in data['items']:
self.items.append(GamePlayerItem(
id=item['itemID'],
name=item['displayName'],
slot=item['slot'],
can_use=item['canUse'],
consumable=item['consumable'],
count=item['count'],
price=item['price']
))
self.is_active_player = None
if active_player is not None:
self.is_active_player = False
if active_player.name == self.summoner:
self.is_active_player = True
self.gold = active_player.gold
self.champion_stats = active_player.champion_stats
self.abilities = active_player.abilities
|
nilq/baby-python
|
python
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os, os.path as path
from .web_server import web_server
from bes.fs.file_util import file_util
from bes.fs.file_path import file_path
from bes.fs.testing.temp_content import temp_content
from bes.archive.temp_archive import temp_archive
class file_web_server(web_server):
'A simple web server that serves whatever files are found in its root dir'
def __init__(self, root_dir, *args, **kargs):
super(file_web_server, self).__init__(log_tag = 'file_web_server', *args, **kargs)
self._root_dir = root_dir
def handle_request(self, environ, start_response):
path_info = self.path_info(environ)
if not path.isfile(path_info.rooted_filename):
return self.response_error(start_response, 404)
mime_type = self.mime_type(path_info.rooted_filename)
content = file_util.read(path_info.rooted_filename)
headers = [
( 'Content-Type', str(mime_type) ),
( 'Content-Length', str(len(content)) ),
]
return self.response_success(start_response, 200, [ content ], headers)
def write_temp_content(self, items):
temp_content.write_items(items, self._root_dir)
def write_file(self, filename, content, codec = 'utf-8', mode = None):
p = self.file_path(filename)
if path.exists(p):
raise IOError('already existsL {}'.format(filename))
file_util.save(p, content = content, codec = codec, mode = mode)
def read_file(self, filename, codec = 'utf-8'):
return file_util.read(self.file_path(filename), codec = codec)
def has_file(self, filename):
return path.exists(self.file_path(filename))
def file_path(self, filename):
return path.join(self._root_dir, filename)
def write_archive(self, filename, items):
p = self.file_path(filename)
if path.exists(p):
raise IOError('already existsL {}'.format(filename))
extension = file_util.extension(filename)
tmp_archive = temp_archive.make_temp_archive(items, extension)
file_util.rename(tmp_archive, p)
|
nilq/baby-python
|
python
|
from aiohttp import web
from Bubot.Helpers.Helper import Helper
class ReportHandler(web.View):
def __init__(self, request):
web.View.__init__(self, request)
self.obj_type = self.request.match_info.get('objType')
self.obj_name = self.request.match_info.get('objName')
self.report_name = self.request.match_info.get('reportName')
self.report_section = self.request.match_info.get('reportSection')
try:
self.handler = Helper.get_obj_class(f'jay.{self.obj_type}.{self.obj_name}', 'reports', self.report_name)()
except Exception as err:
raise err
async def get(self):
try:
handler = getattr(self.handler, self.report_section)
return await handler(self)
except Exception as err:
return web.HTTPInternalServerError(text=str(err))
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RenderTaskApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_dashboard_render_task(self, dashboard_id, result_format, body, width, height, **kwargs):
"""
Create Dashboard Render Task
### Create a new task to render a dashboard to a document or image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_dashboard_render_task(dashboard_id, result_format, body, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int dashboard_id: Id of dashboard to render (required)
:param str result_format: Output type: pdf, png, or jpg (required)
:param CreateDashboardRenderTask body: Dashboard render task parameters (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, **kwargs)
else:
(data) = self.create_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, **kwargs)
return data
def create_dashboard_render_task_with_http_info(self, dashboard_id, result_format, body, width, height, **kwargs):
"""
Create Dashboard Render Task
### Create a new task to render a dashboard to a document or image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int dashboard_id: Id of dashboard to render (required)
:param str result_format: Output type: pdf, png, or jpg (required)
:param CreateDashboardRenderTask body: Dashboard render task parameters (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'result_format', 'body', 'width', 'height', 'fields']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_dashboard_render_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `create_dashboard_render_task`")
# verify the required parameter 'result_format' is set
if ('result_format' not in params) or (params['result_format'] is None):
raise ValueError("Missing the required parameter `result_format` when calling `create_dashboard_render_task`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_dashboard_render_task`")
# verify the required parameter 'width' is set
if ('width' not in params) or (params['width'] is None):
raise ValueError("Missing the required parameter `width` when calling `create_dashboard_render_task`")
# verify the required parameter 'height' is set
if ('height' not in params) or (params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `create_dashboard_render_task`")
collection_formats = {}
resource_path = '/render_tasks/dashboards/{dashboard_id}/{result_format}'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
if 'result_format' in params:
path_params['result_format'] = params['result_format']
query_params = {}
if 'width' in params:
query_params['width'] = params['width']
if 'height' in params:
query_params['height'] = params['height']
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderTask',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_look_render_task(self, look_id, result_format, width, height, **kwargs):
"""
Create Look Render Task
### Create a new task to render a look to an image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_look_render_task(look_id, result_format, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int look_id: Id of look to render (required)
:param str result_format: Output type: png, or jpg (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_look_render_task_with_http_info(look_id, result_format, width, height, **kwargs)
else:
(data) = self.create_look_render_task_with_http_info(look_id, result_format, width, height, **kwargs)
return data
def create_look_render_task_with_http_info(self, look_id, result_format, width, height, **kwargs):
"""
Create Look Render Task
### Create a new task to render a look to an image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_look_render_task_with_http_info(look_id, result_format, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int look_id: Id of look to render (required)
:param str result_format: Output type: png, or jpg (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['look_id', 'result_format', 'width', 'height', 'fields']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_look_render_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'look_id' is set
if ('look_id' not in params) or (params['look_id'] is None):
raise ValueError("Missing the required parameter `look_id` when calling `create_look_render_task`")
# verify the required parameter 'result_format' is set
if ('result_format' not in params) or (params['result_format'] is None):
raise ValueError("Missing the required parameter `result_format` when calling `create_look_render_task`")
# verify the required parameter 'width' is set
if ('width' not in params) or (params['width'] is None):
raise ValueError("Missing the required parameter `width` when calling `create_look_render_task`")
# verify the required parameter 'height' is set
if ('height' not in params) or (params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `create_look_render_task`")
collection_formats = {}
resource_path = '/render_tasks/looks/{look_id}/{result_format}'.replace('{format}', 'json')
path_params = {}
if 'look_id' in params:
path_params['look_id'] = params['look_id']
if 'result_format' in params:
path_params['result_format'] = params['result_format']
query_params = {}
if 'width' in params:
query_params['width'] = params['width']
if 'height' in params:
query_params['height'] = params['height']
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderTask',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_lookml_dashboard_render_task(self, dashboard_id, result_format, body, width, height, **kwargs):
"""
Create Lookml Dashboard Render Task
### Create a new task to render a lookml dashboard to a document or image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_lookml_dashboard_render_task(dashboard_id, result_format, body, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of lookml dashboard to render (required)
:param str result_format: Output type: pdf, png, or jpg (required)
:param CreateDashboardRenderTask body: Dashboard render task parameters (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_lookml_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, **kwargs)
else:
(data) = self.create_lookml_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, **kwargs)
return data
def create_lookml_dashboard_render_task_with_http_info(self, dashboard_id, result_format, body, width, height, **kwargs):
"""
Create Lookml Dashboard Render Task
### Create a new task to render a lookml dashboard to a document or image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_lookml_dashboard_render_task_with_http_info(dashboard_id, result_format, body, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dashboard_id: Id of lookml dashboard to render (required)
:param str result_format: Output type: pdf, png, or jpg (required)
:param CreateDashboardRenderTask body: Dashboard render task parameters (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dashboard_id', 'result_format', 'body', 'width', 'height', 'fields']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_lookml_dashboard_render_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dashboard_id' is set
if ('dashboard_id' not in params) or (params['dashboard_id'] is None):
raise ValueError("Missing the required parameter `dashboard_id` when calling `create_lookml_dashboard_render_task`")
# verify the required parameter 'result_format' is set
if ('result_format' not in params) or (params['result_format'] is None):
raise ValueError("Missing the required parameter `result_format` when calling `create_lookml_dashboard_render_task`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_lookml_dashboard_render_task`")
# verify the required parameter 'width' is set
if ('width' not in params) or (params['width'] is None):
raise ValueError("Missing the required parameter `width` when calling `create_lookml_dashboard_render_task`")
# verify the required parameter 'height' is set
if ('height' not in params) or (params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `create_lookml_dashboard_render_task`")
collection_formats = {}
resource_path = '/render_tasks/lookml_dashboards/{dashboard_id}/{result_format}'.replace('{format}', 'json')
path_params = {}
if 'dashboard_id' in params:
path_params['dashboard_id'] = params['dashboard_id']
if 'result_format' in params:
path_params['result_format'] = params['result_format']
query_params = {}
if 'width' in params:
query_params['width'] = params['width']
if 'height' in params:
query_params['height'] = params['height']
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderTask',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_query_render_task(self, query_id, result_format, width, height, **kwargs):
"""
Create Query Render Task
### Create a new task to render an existing query to an image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_query_render_task(query_id, result_format, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int query_id: Id of the query to render (required)
:param str result_format: Output type: png or jpg (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_query_render_task_with_http_info(query_id, result_format, width, height, **kwargs)
else:
(data) = self.create_query_render_task_with_http_info(query_id, result_format, width, height, **kwargs)
return data
def create_query_render_task_with_http_info(self, query_id, result_format, width, height, **kwargs):
"""
Create Query Render Task
### Create a new task to render an existing query to an image. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_query_render_task_with_http_info(query_id, result_format, width, height, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int query_id: Id of the query to render (required)
:param str result_format: Output type: png or jpg (required)
:param int width: Output width in pixels (required)
:param int height: Output height in pixels (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['query_id', 'result_format', 'width', 'height', 'fields']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_query_render_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'query_id' is set
if ('query_id' not in params) or (params['query_id'] is None):
raise ValueError("Missing the required parameter `query_id` when calling `create_query_render_task`")
# verify the required parameter 'result_format' is set
if ('result_format' not in params) or (params['result_format'] is None):
raise ValueError("Missing the required parameter `result_format` when calling `create_query_render_task`")
# verify the required parameter 'width' is set
if ('width' not in params) or (params['width'] is None):
raise ValueError("Missing the required parameter `width` when calling `create_query_render_task`")
# verify the required parameter 'height' is set
if ('height' not in params) or (params['height'] is None):
raise ValueError("Missing the required parameter `height` when calling `create_query_render_task`")
collection_formats = {}
resource_path = '/render_tasks/queries/{query_id}/{result_format}'.replace('{format}', 'json')
path_params = {}
if 'query_id' in params:
path_params['query_id'] = params['query_id']
if 'result_format' in params:
path_params['result_format'] = params['result_format']
query_params = {}
if 'width' in params:
query_params['width'] = params['width']
if 'height' in params:
query_params['height'] = params['height']
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderTask',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def render_task(self, render_task_id, **kwargs):
"""
Get Render Task
### Get information about a render task. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.render_task(render_task_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str render_task_id: Id of render task (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.render_task_with_http_info(render_task_id, **kwargs)
else:
(data) = self.render_task_with_http_info(render_task_id, **kwargs)
return data
def render_task_with_http_info(self, render_task_id, **kwargs):
"""
Get Render Task
### Get information about a render task. Returns a render task object. To check the status of a render task, pass the render_task.id to [Get Render Task](#!/RenderTask/get_render_task). Once the render task is complete, you can download the resulting document or image using [Get Render Task Results](#!/RenderTask/get_render_task_results).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.render_task_with_http_info(render_task_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str render_task_id: Id of render task (required)
:param str fields: Requested fields.
:return: RenderTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['render_task_id', 'fields']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method render_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'render_task_id' is set
if ('render_task_id' not in params) or (params['render_task_id'] is None):
raise ValueError("Missing the required parameter `render_task_id` when calling `render_task`")
collection_formats = {}
resource_path = '/render_tasks/{render_task_id}'.replace('{format}', 'json')
path_params = {}
if 'render_task_id' in params:
path_params['render_task_id'] = params['render_task_id']
query_params = {}
if 'fields' in params:
query_params['fields'] = params['fields']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderTask',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def render_task_results(self, render_task_id, **kwargs):
"""
Render Task Results
### Get the document or image produced by a completed render task. Returns `102 Processing` if the render task has not completed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.render_task_results(render_task_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str render_task_id: Id of render task (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.render_task_results_with_http_info(render_task_id, **kwargs)
else:
(data) = self.render_task_results_with_http_info(render_task_id, **kwargs)
return data
def render_task_results_with_http_info(self, render_task_id, **kwargs):
"""
Render Task Results
### Get the document or image produced by a completed render task. Returns `102 Processing` if the render task has not completed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.render_task_results_with_http_info(render_task_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str render_task_id: Id of render task (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['render_task_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method render_task_results" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'render_task_id' is set
if ('render_task_id' not in params) or (params['render_task_id'] is None):
raise ValueError("Missing the required parameter `render_task_id` when calling `render_task_results`")
collection_formats = {}
resource_path = '/render_tasks/{render_task_id}/results'.replace('{format}', 'json')
path_params = {}
if 'render_task_id' in params:
path_params['render_task_id'] = params['render_task_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['image/jpeg', 'image/png', 'application/pdf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
nilq/baby-python
|
python
|
"""
Author: shikechen
Function: Calculate AQI(Air Quality Index)
Version: 1.0
Date: 2019/3/9
"""
def cal_linear(iaqi_lo, iaqi_hi, bp_lo, bp_hi, cp):
iaqi = (iaqi_hi - iaqi_lo) * (cp - bp_lo) / (bp_hi - bp_lo) + iaqi_lo
return iaqi
def cal_pm_iaqi(pm_val):
if 0 <= pm_val < 36:
iaqi = cal_linear(0, 50, 0, 35, pm_val)
elif 36 <= pm_val < 76:
iaqi = cal_linear(50, 100, 35, 75, pm_val)
elif 76 <= pm_val < 116:
iaqi = cal_linear(100, 150, 75, 115, pm_val)
elif 116 <= pm_val < 151:
iaqi = cal_linear(150, 200, 115, 150, pm_val)
elif 151 <= pm_val < 251:
iaqi = cal_linear(200, 300, 150, 250, pm_val)
elif 251 <= pm_val < 351:
iaqi = cal_linear(300, 400, 250, 350, pm_val)
elif 351 <= pm_val < 501:
iaqi = cal_linear(400, 500, 350, 500, pm_val)
return iaqi
def cal_co_iaqi(co_val):
if 0 <= co_val < 3:
iaqi = cal_linear(0, 50, 0, 2, co_val)
elif 3 <= co_val < 5:
iaqi = cal_linear(50, 100, 2, 4, co_val)
elif 5 <= co_val < 15:
iaqi = cal_linear(100, 150, 4, 14, co_val)
elif 15 <= co_val < 25:
iaqi = cal_linear(150, 200, 14, 24, co_val)
elif 25 <= co_val < 37:
iaqi = cal_linear(200, 300, 24, 36, co_val)
elif 37 <= co_val < 49:
iaqi = cal_linear(300, 400, 36, 48, co_val)
elif 49 <= co_val < 61:
iaqi = cal_linear(400, 500, 48, 60, co_val)
return iaqi
def cal_aqi(param_list):
pm_value = param_list[0]
co_value = param_list[1]
pm_iaqi = cal_pm_iaqi(pm_value)
co_iaqi = cal_co_iaqi(co_value)
iaqi_list = []
iaqi_list.append(pm_iaqi)
iaqi_list.append(co_iaqi)
aqi = max(iaqi_list)
return aqi
def main():
print('Please input data')
input_str = input('(1)PM2.5 (2)CO:')
str_list = input_str.split(' ')
pm_value = float(str_list[0])
co_value = float(str_list[1])
param_list = []
param_list.append(pm_value)
param_list.append(co_value)
aqi_value = cal_aqi(param_list)
print('AQI: {}'.format(aqi_value))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import pytest
from .data import TEST_DATA_ROOT
TEXTGRID_PATHS = sorted(
TEST_DATA_ROOT.glob('wav-textgrid/*.TextGrid')
)
@pytest.fixture
def textgrid_paths():
return TEXTGRID_PATHS
@pytest.fixture(params=TEXTGRID_PATHS)
def a_textgrid_path(request):
return request.param
|
nilq/baby-python
|
python
|
__all__ = ['testing']
from .testing import pic1 , pic2
|
nilq/baby-python
|
python
|
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modeling_strategy_descriptor."""
from absl.testing import absltest
from os.path import join
from tempfile import TemporaryDirectory
from typing import Dict
from typing import Iterable
from typing import List
from typing import Type
import math
import numpy as np
import pandas as pd
from wfa_planning_evaluation_framework.data_generators.publisher_data import (
PublisherData,
)
from wfa_planning_evaluation_framework.data_generators.data_design import DataDesign
from wfa_planning_evaluation_framework.data_generators.data_set import DataSet
from wfa_planning_evaluation_framework.data_generators.heterogeneous_impression_generator import (
HeterogeneousImpressionGenerator,
)
from wfa_planning_evaluation_framework.data_generators.fixed_price_generator import (
FixedPriceGenerator,
)
from wfa_planning_evaluation_framework.models.goerg_model import (
GoergModel,
)
from wfa_planning_evaluation_framework.models.reach_curve import (
ReachCurve,
)
from wfa_planning_evaluation_framework.models.reach_point import (
ReachPoint,
)
from wfa_planning_evaluation_framework.models.reach_surface import (
ReachSurface,
)
from wfa_planning_evaluation_framework.models.pairwise_union_reach_surface import (
PairwiseUnionReachSurface,
)
from wfa_planning_evaluation_framework.simulator.halo_simulator import (
HaloSimulator,
)
from wfa_planning_evaluation_framework.simulator.modeling_strategy import (
ModelingStrategy,
)
from wfa_planning_evaluation_framework.simulator.privacy_tracker import (
DP_NOISE_MECHANISM_GAUSSIAN,
DP_NOISE_MECHANISM_LAPLACE,
NoisingEvent,
PrivacyBudget,
PrivacyTracker,
)
from wfa_planning_evaluation_framework.simulator.system_parameters import (
LiquidLegionsParameters,
SystemParameters,
)
from wfa_planning_evaluation_framework.driver.experiment_parameters import (
TEST_POINT_STRATEGIES,
ExperimentParameters,
)
from wfa_planning_evaluation_framework.driver.experimental_trial import (
ExperimentalTrial,
)
from wfa_planning_evaluation_framework.driver.modeling_strategy_descriptor import (
MODELING_STRATEGIES,
ModelingStrategyDescriptor,
)
from wfa_planning_evaluation_framework.driver.test_point_generator import (
TestPointGenerator,
)
from wfa_planning_evaluation_framework.driver.trial_descriptor import (
TrialDescriptor,
)
class FakeReachSurface(ReachSurface):
def __init__(self):
self._max_reach = 1
def by_impressions(
self, impressions: Iterable[int], max_frequency: int = 1
) -> ReachPoint:
return ReachPoint(impressions, [1], impressions)
def by_spend(self, spend: Iterable[float], max_frequency: int = 1) -> ReachPoint:
return ReachPoint([1] * len(spend), [1], spend)
class FakeModelingStrategy(ModelingStrategy):
def __init__(
self,
single_pub_model: Type[ReachCurve],
single_pub_model_kwargs: Dict,
multi_pub_model: Type[ReachSurface],
multi_pub_model_kwargs: Dict,
x: int,
):
self.name = "fake"
self.x = 1
super().__init__(
single_pub_model,
single_pub_model_kwargs,
multi_pub_model,
multi_pub_model_kwargs,
)
def fit(
self, halo: HaloSimulator, params: SystemParameters, budget: PrivacyBudget
) -> ReachSurface:
return FakeReachSurface()
class FakeTestPointGenerator(TestPointGenerator):
def __init__(self, dataset, rng):
pass
def test_points(self) -> Iterable[List[float]]:
return [[1.0, 2.0]]
class GoergModelingStrategy(ModelingStrategy):
"""Models a single publisher using Goerg's model."""
def fit(
self, halo: HaloSimulator, params: SystemParameters, budget: PrivacyBudget
) -> ReachSurface:
total_reach = ReachPoint(
[
2,
],
[
2,
],
[2.0],
)
curve = GoergModel([total_reach])
curve._fit()
return curve
class GoergTestPointGenerator(TestPointGenerator):
def __init__(self, dataset, rng):
pass
def test_points(self) -> Iterable[List[float]]:
return [[1.0]]
class ExperimentalTrialTest(absltest.TestCase):
def test_privacy_tracking_vars_dataframe(self):
tracker = PrivacyTracker()
eparams = ExperimentParameters(
PrivacyBudget(1.0, 0.01), 1, 3, "test_point_strategy"
)
trial_descriptor = TrialDescriptor(None, None, eparams)
trial = ExperimentalTrial("", None, "", trial_descriptor)
actual0 = trial._make_privacy_tracking_vars_dataframe(tracker)
expected0 = pd.DataFrame(
{
"privacy_budget_epsilon": [1.0],
"privacy_budget_delta": [0.01],
"privacy_used_epsilon": [0.0],
"privacy_used_delta": [0.0],
"privacy_mechanisms": [""],
}
)
pd.testing.assert_frame_equal(actual0, expected0)
tracker.append(
NoisingEvent(PrivacyBudget(0.5, 0.005), DP_NOISE_MECHANISM_LAPLACE, {})
)
actual1 = trial._make_privacy_tracking_vars_dataframe(tracker)
expected1 = pd.DataFrame(
{
"privacy_budget_epsilon": [1.0],
"privacy_budget_delta": [0.01],
"privacy_used_epsilon": [0.5],
"privacy_used_delta": [0.005],
"privacy_mechanisms": ["Laplace"],
}
)
pd.testing.assert_frame_equal(actual1, expected1)
tracker.append(
NoisingEvent(PrivacyBudget(0.2, 0.002), DP_NOISE_MECHANISM_GAUSSIAN, {})
)
actual2 = trial._make_privacy_tracking_vars_dataframe(tracker)
expected2 = pd.DataFrame(
{
"privacy_budget_epsilon": [1.0],
"privacy_budget_delta": [0.01],
"privacy_used_epsilon": [0.7],
"privacy_used_delta": [0.007],
"privacy_mechanisms": ["Gaussian/Laplace"],
}
)
pd.testing.assert_frame_equal(actual2, expected2)
def test_make_independent_vars_dataframe(self):
with TemporaryDirectory() as d:
pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1")
pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2")
data_set = DataSet([pdf1, pdf2], "dataset")
data_design = DataDesign(join(d, "data_design"))
data_design.add(data_set)
msd = ModelingStrategyDescriptor(
"strategy", {}, "single_pub_model", {}, "multi_pub_model", {}
)
sparams = SystemParameters(
[0.03, 0.05],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(
PrivacyBudget(1.0, 0.01), 3, 5, "test_point_strategy"
)
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial("edir", data_design, "dataset", trial_descriptor)
actual = trial._make_independent_vars_dataframe()
expected_trial_name = "strategy,single_pub_model,multi_pub_model,spends=[0.03,0.05],decay_rate=13,sketch_size=1000000.0,epsilon=1.0,delta=0.01,replica_id=3,max_frequency=5,test_point_strategy=test_point_strategy"
expected = pd.DataFrame(
{
"dataset": ["dataset"],
"trial": [expected_trial_name],
"replica_id": [3],
"single_pub_model": ["single_pub_model"],
"multi_pub_model": ["multi_pub_model"],
"strategy": ["strategy"],
"liquid_legions_sketch_size": [1e6],
"liquid_legions_decay_rate": [13],
"maximum_reach": [4],
"ncampaigns": [2],
"largest_pub_reach": [3],
"max_frequency": [5],
"average_spend_fraction": [0.04],
}
)
pd.testing.assert_frame_equal(actual, expected)
def test_compute_trial_results_path(self):
with TemporaryDirectory() as d:
pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1")
pdf2 = PublisherData([(2, 0.03), (4, 0.06)], "pdf2")
data_set = DataSet([pdf1, pdf2], "dataset")
data_design = DataDesign(join(d, "data_design"))
data_design.add(data_set)
msd = ModelingStrategyDescriptor(
"strategy", {}, "single_pub_model", {}, "multi_pub_model", {}
)
sparams = SystemParameters(
[0.03, 0.05],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "tps")
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial("edir", data_design, "dataset", trial_descriptor)
actual = trial._compute_trial_results_path()
expected = "{}/{}/{},{},{},{}".format(
"edir",
"dataset",
"strategy,single_pub_model,multi_pub_model",
"spends=[0.03,0.05],decay_rate=13,sketch_size=1000000.0",
"epsilon=1.0,delta=0.01,replica_id=3,max_frequency=5",
"test_point_strategy=tps",
)
self.assertEqual(actual, expected)
def test_evaluate(self):
with TemporaryDirectory() as d:
pdf1 = PublisherData([(1, 0.01), (2, 0.02), (1, 0.04), (3, 0.05)], "pdf1")
pdf2 = PublisherData([(2, 0.02), (2, 0.03), (4, 0.06)], "pdf2")
data_set = DataSet([pdf1, pdf2], "dataset")
data_design_dir = join(d, "data_design")
experiment_dir = join(d, "experiments")
data_design = DataDesign(data_design_dir)
data_design.add(data_set)
MODELING_STRATEGIES["fake"] = FakeModelingStrategy
TEST_POINT_STRATEGIES["fake_tps"] = FakeTestPointGenerator
msd = ModelingStrategyDescriptor(
"fake", {"x": 1}, "goerg", {}, "pairwise_union", {}
)
sparams = SystemParameters(
[0.9, 0.9],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "fake_tps")
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial(
experiment_dir, data_design, "dataset", trial_descriptor
)
result = trial.evaluate(seed=1)
# We don't check each column in the resulting dataframe, because these have
# been checked by the preceding unit tests. However, we make a few strategic
# probes.
self.assertEqual(result.shape[0], 1)
self.assertEqual(result["dataset"][0], "dataset")
self.assertEqual(result["replica_id"][0], 3)
self.assertEqual(result["privacy_budget_epsilon"][0], 1.0)
self.assertEqual(result["npoints"][0], 1)
self.assertEqual(result["model_succeeded"][0], 1)
self.assertEqual(result["model_exception"][0], "")
def test_evaluate_when_there_is_a_modeling_exception(self):
with TemporaryDirectory() as d:
pdf1 = PublisherData([(1, 0.01), (2, 0.02), (3, 0.04), (4, 0.05)], "pdf1")
data_set = DataSet([pdf1], "dataset")
data_design_dir = join(d, "data_design")
experiment_dir = join(d, "experiments")
data_design = DataDesign(data_design_dir)
data_design.add(data_set)
MODELING_STRATEGIES["fake"] = GoergModelingStrategy
TEST_POINT_STRATEGIES["fake_tps"] = GoergTestPointGenerator
msd = ModelingStrategyDescriptor(
"fake", {}, "goerg", {}, "pairwise_union", {}
)
sparams = SystemParameters(
[0.5],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "fake_tps")
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial(
experiment_dir, data_design, "dataset", trial_descriptor
)
result = trial.evaluate(seed=1)
# We don't check each column in the resulting dataframe, because these have
# been checked by the preceding unit tests. However, we make a few strategic
# probes.
self.assertEqual(result.shape[0], 1)
self.assertEqual(result["dataset"][0], "dataset")
self.assertEqual(result["replica_id"][0], 3)
self.assertEqual(result["privacy_budget_epsilon"][0], 1.0)
self.assertEqual(result["model_succeeded"][0], 0)
self.assertEqual(
result["model_exception"][0],
"Cannot fit Goerg model when impressions <= reach.",
)
def test_evaluate_singe_publisher_model_with_exception(self):
with TemporaryDirectory() as d:
pdf1 = PublisherData([(1, 0.01), (2, 0.02), (3, 0.04), (4, 0.05)], "pdf1")
data_set = DataSet([pdf1], "dataset")
data_design_dir = join(d, "data_design")
experiment_dir = join(d, "experiments")
data_design = DataDesign(data_design_dir)
data_design.add(data_set)
MODELING_STRATEGIES["fake"] = GoergModelingStrategy
TEST_POINT_STRATEGIES["fake_tps"] = GoergTestPointGenerator
msd = ModelingStrategyDescriptor(
"fake", {}, "goerg", {}, "pairwise_union", {}
)
sparams = SystemParameters(
[0.5],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(PrivacyBudget(1.0, 0.01), 3, 5, "fake_tps")
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial(
experiment_dir,
data_design,
"dataset",
trial_descriptor,
analysis_type="single_pub",
)
result = trial.evaluate(seed=1)
# We don't check each column in the resulting dataframe, because these have
# been checked by the preceding unit tests. However, we make a few strategic
# probes.
self.assertEqual(result.shape[0], 1)
self.assertTrue(math.isnan(result["relative_error_at_100"][0]))
def test_evaluate_single_publisher_model(self):
with TemporaryDirectory() as d:
data1 = HeterogeneousImpressionGenerator(
1000, gamma_shape=1.0, gamma_scale=3.0
)()
pdf1 = PublisherData(FixedPriceGenerator(0.1)(data1))
data_set = DataSet([pdf1], "dataset")
data_design_dir = join(d, "data_design")
experiment_dir = join(d, "experiments")
data_design = DataDesign(data_design_dir)
data_design.add(data_set)
msd = ModelingStrategyDescriptor(
"single_publisher", {}, "goerg", {}, "pairwise_union", {}
)
sparams = SystemParameters(
[0.5],
LiquidLegionsParameters(13, 1e6, 1),
np.random.default_rng(),
)
eparams = ExperimentParameters(
PrivacyBudget(1.0, 0.01), 3, 5, "grid", {"grid_size": 5}
)
trial_descriptor = TrialDescriptor(msd, sparams, eparams)
trial = ExperimentalTrial(
experiment_dir,
data_design,
"dataset",
trial_descriptor,
analysis_type="single_pub",
)
result = trial.evaluate(seed=1)
# We don't check each column in the resulting dataframe, because these have
# been checked by the preceding unit tests. However, we make a few strategic
# probes.
self.assertEqual(result.shape[0], 1)
self.assertAlmostEqual(result["relative_error_at_100"][0], 0.0, delta=0.01)
self.assertGreater(result["max_nonzero_frequency_from_halo"][0], 0)
self.assertEqual(result["max_nonzero_frequency_from_data"][0], 1)
if __name__ == "__main__":
absltest.main()
|
nilq/baby-python
|
python
|
from flask import jsonify
from meli.morse.app.exceptions import ValidationError
from . import api
def bad_request(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
@api.errorhandler(ValidationError)
def validation_error(err):
return bad_request(err.args[0])
|
nilq/baby-python
|
python
|
# Demonstration local server.
# In one window:
# python server.py -D localhost
# In another window:
# python coapget.py -h localhost -v
# python coapget.py -h localhost -u uptime
# python coapget.py -h localhost -u counter
# python coapget.py -h localhost -u unknown
import sys
import coapy.connection
import coapy.options
import coapy.link
import time
import socket
import getopt
# --verbose (-v): Print all message metadata
verbose = False
port = coapy.COAP_PORT
address_family = socket.AF_INET
# --discovery-addresses csv (-D): Provide a comma-separated list of
# host names for local interfaces on which CoAP service discovery
# should be supported.
discovery_addresses = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'vp:46D:', [ 'verbose', '--port=', '--ipv4', '--ipv6', '--discovery-addresses=' ])
for (o, a) in opts:
if o in ('-v', '--verbose'):
verbose = True
elif o in ('-p', '--port'):
port = int(a)
elif o in ('-4', '--ipv4'):
address_family = socket.AF_INET
elif o in ('-6', '--ipv6'):
address_family = socket.AF_INET6
elif o in ('-D', '--discovery_addresses'):
discovery_addresses = a
except getopt.GetoptError, e:
print 'Option error: %s' % (e,)
sys.exit(1)
if socket.AF_INET == address_family:
bind_addr = ('', port)
elif socket.AF_INET6 == address_family:
bind_addr = ('::', port, 0, 0)
ep = coapy.connection.EndPoint(address_family=address_family)
ep.bind(bind_addr)
if discovery_addresses is not None:
for da_fqdn in discovery_addresses.split(','):
ep.bindDiscovery(da_fqdn)
class CounterService (coapy.link.LinkValue):
__counter = 0
def process (self, rx_record):
ctr = self.__counter
self.__counter += 1
msg = coapy.connection.Message(coapy.connection.Message.ACK, code=coapy.OK, payload='%d' % (ctr,))
rx_record.ack(msg)
class AsyncCounterService (coapy.link.LinkValue):
__counter = 0
def process (self, rx_record):
rx_record.ack()
ctr = self.__counter
self.__counter += 1
msg = coapy.connection.Message(coapy.connection.Message.CON, code=coapy.OK, payload='%d delayed' % (ctr,))
for opt in rx_record.message.options:
msg.addOption(opt)
rx_record.end_point.send(msg, rx_record.remote)
class UptimeService (coapy.link.LinkValue):
__started = time.time()
def process (self, rx_record):
uptime = time.time() - self.__started
msg = coapy.connection.Message(coapy.connection.Message.ACK, code=coapy.OK, payload='%g' % (uptime,))
rx_record.ack(msg)
class ResourceService (coapy.link.LinkValue):
__services = None
def __init__ (self, *args, **kw):
super(ResourceService, self).__init__('.well-known/r', ct=[coapy.media_types_rev.get('application/link-format')])
self.__services = { self.uri : self }
def add_service (self, service):
self.__services[service.uri] = service
def lookup (self, uri):
return self.__services.get(uri)
def process (self, rx_record):
msg = coapy.connection.Message(coapy.connection.Message.ACK, code=coapy.OK, content_type='application/link-format')
msg.payload = ",".join([ _s.encode() for _s in self.__services.itervalues() ])
rx_record.ack(msg)
services = ResourceService()
services.add_service(CounterService('counter'))
services.add_service(UptimeService('uptime'))
services.add_service(AsyncCounterService('async'))
while True:
rxr = ep.process(10000)
if rxr is None:
print 'No activity'
continue
print '%s: %s' % (rxr.remote, rxr.message)
msg = rxr.message
if coapy.GET != msg.code:
rxr.reset()
continue
uri = msg.findOption(coapy.options.UriPath)
if uri is None:
continue
service = services.lookup(uri.value)
print 'Lookup %s got %s' % (uri, service)
if service is None:
rxr.reset()
continue
service.process(rxr)
|
nilq/baby-python
|
python
|
import requests
import sqlite3
import random
from html.parser import HTMLParser
parser = HTMLParser()
connection=sqlite3.connect('previous')
cursor=connection.cursor()
import sys
import json
import discord
from discord.ext import commands
import time
TOKEN = 'NDQwOTMzMjE1NTc5MTQ0MjAz.DmWipQ.p110Y5lhaNCZMYiDYI8mNtghNpk'
description = '''ninjaBot in Python'''
bot = commands.Bot(command_prefix='?', description=description)
client=discord.Client()
prev=''
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
prev=''
while True:
cursor.execute('''SELECT * FROM news''')
a=cursor.fetchall()[0]
name=a[0]
title=a[1]
content=a[2]
link=a[3]
picture=a[4]
if link!=prev:
prev=link
for server in bot.servers:
for channel in server.channels:
if 'text' in str(channel.type) and str(channel.name).lower() == 'news':
embed = discord.Embed(title=title,description=parser.unescape(content.replace('<br>','')),url=link)
embed.set_author(name=name,icon_url=picture)
await bot.send_message(channel, embed=embed)
bot.run(TOKEN)
|
nilq/baby-python
|
python
|
"""Decodes and logs angular data from AMS AS5048A."""
# pylint: disable=import-error, import-outside-toplevel, fixme, missing-function-docstring
import argparse
import logging
import os
import time
from typing import Any, List
from meter import Meter
from sensor import Sensor
from volume import Volume
from writer import DataWriter
import spi
# pylint: disable=too-few-public-methods
class Reader:
"""Reads data from the sensor and sends it to the listeners."""
def __init__(self, spi: Any, writers: List[DataWriter]) -> None:
self.spi = spi
self.writers = writers
_SAMPLE_PERIOD_NS: int = 4000000 # 0.004s = 250hz = 4x oversampling
@staticmethod
def _wait_for_next_sample() -> None:
"""Sleeps until time to take the next sample."""
now_ns: int = time.time_ns()
waiting_ns: int = int(Reader._SAMPLE_PERIOD_NS - (now_ns % Reader._SAMPLE_PERIOD_NS))
time.sleep(waiting_ns / 1e9)
def run(self) -> None:
"""Handles input in a continuous loop."""
sensor: Sensor = Sensor(self.spi)
meter: Meter = Meter()
volume: Volume = Volume()
# used for error recovery and startup
make_extra_request: bool = True
while True:
try:
if make_extra_request:
make_extra_request = False
sensor.read_angle()
Reader._wait_for_next_sample()
now_ns = time.time_ns()
# TODO: hide this spi-specific stuff
#angle = sensor.transfer(Sensor.ANGLE_READ_REQUEST) & Sensor.RESPONSE_MASK
angle = sensor.read_angle()
logging.debug("angle %s", angle)
meter.update(angle)
volume.update(now_ns, meter.read())
for writer in self.writers:
writer.write(now_ns, meter.read(), volume.read())
except Sensor.ResponseLengthException as err:
make_extra_request = True
logging.debug("Response Length Exception %s", err)
except Sensor.ResponseParityException as err:
make_extra_request = True
logging.debug("Response Parity Exception %s", err)
except Sensor.ResponseErrorRegisterException as err:
make_extra_request = True
logging.debug("Response Error Register %s", err)
def parse() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument("--fake", action="store_true", help="use fake spidev, for testing")
parser.add_argument("--verbose", action="store_true", help="read everything, not just angle")
args: argparse.Namespace = parser.parse_args()
return args
def main() -> None:
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S', level=logging.INFO)
os.makedirs('data', exist_ok=True)
writer_min = DataWriter("data/data_min", 60, 0) # archival, keep forever
writer_sec = DataWriter("data/data_sec", 1, 604800) # temporary, keep 7 days
Reader(spi.make_and_setup_spi(parse()), [writer_sec, writer_min]).run()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import shutil
import os.path
def build(source_path, build_path, install_path, targets):
pass
|
nilq/baby-python
|
python
|
from watchmen.common.watchmen_model import WatchmenModel
from watchmen.raw_data.rule_schema import RuleType, DSLType
class RuleContext(WatchmenModel):
type: RuleType = None
dsl: DSLType = None
orgId: int = None
orgName: str = None
productId: int = None
productName: str = None
ruleId: int = None
ruleName: str = None
|
nilq/baby-python
|
python
|
import pytest
from typing import List
from io import BytesIO
from dafni_cli.datasets.dataset_metadata import DataFile, DatasetMetadata
@pytest.fixture
def get_dataset_list_fixture() -> List[dict]:
"""Test fixture for simulating the dataset data return
from calling the get datasets API
Returns:
List[dict]: example get Dataset response
"""
datasets = {
"metadata": [
{
"id": {
"dataset_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"version_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000b",
"metadata_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000c",
"asset_id": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a:0a0a0a0a-0a00-0a00-a000-0a0a0000000b:0a0a0a0a-0a00-0a00-a000-0a0a0000000c",
},
"title": "Title 1",
"description": "Description 1",
"subject": "Planning / Cadastre",
"source": "DAFNI",
"date_range": {"begin": None, "end": None},
"modified_date": "2021-03-04T15:59:26+00:00",
"formats": [None],
"auth": {
"name": "Executor",
"view": True,
"read": True,
"update": False,
"destroy": False,
"reason": "Accessed as part of the Public group",
},
},
{
"id": {
"dataset_uuid": "1a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"version_uuid": "1a0a0a0a-0a00-0a00-a000-0a0a0000000b",
"metadata_uuid": "1a0a0a0a-0a00-0a00-a000-0a0a0000000c",
"asset_id": "1a0a0a0a-0a00-0a00-a000-0a0a0000000a:1a0a0a0a-0a00-0a00-a000-0a0a0000000b:1a0a0a0a-0a00-0a00-a000-0a0a0000000c",
},
"title": "Title 2",
"description": "Description 2",
"subject": "Environment",
"source": "DAFNI Workflows",
"date_range": {
"begin": "2019-01-01T12:00:00.000Z",
"end": "2021-01-01T12:00:00.000Z",
},
"modified_date": "2020-08-26T13:21:18.522Z",
"formats": ["application/zip", None, "text/csv", "text/plain"],
"auth": {
"name": "Executor",
"view": True,
"read": True,
"update": False,
"destroy": False,
"reason": "Accessed as part of the Public group",
},
},
],
"filters": {
"sources": {
"Companies House": 1,
"DAFNI": 1,
"DAFNI Workflows": 1,
"Newcastle University": 28,
"Office for National Statistics": 455,
"Office of Rail and Road": 2,
},
"subjects": {
"Climatology / Meteorology / Atmosphere": 16,
"Economy": 1,
"Environment": 1,
"Oceans": 2,
"Planning / Cadastre": 1,
"Society": 455,
"Transportation": 10,
"Utilities / Communication": 2,
},
"formats": {
"text/plain": 1,
"text/csv": 483,
"application/zip": 2,
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": 3,
"application/vnd.ms-excel": 1,
"application/pdf": 1,
"application/octet-stream": 3,
},
},
}
return datasets
@pytest.fixture
def dataset_metadata_fixture() -> dict:
"""Test fixture returning an example dataset metadata response dict
Returns:
dict: Example Dataset metadata response dict
"""
data = {
"@context": ["metadata-v1"],
"@type": "dcat:Dataset",
"dct:title": "An example workflow definition",
"dct:description": "Dataset description",
"dct:identifier": [
"0a0a0a0a-0a00-0a00-a000-0a0a0000000a:0a0a0a0a-0a00-0a00-a000-0a0a0000000b:0a0a0a0a-0a00-0a00-a000-0a0a0000000c"
],
"dct:subject": "Biota",
"dcat:theme": ["Utility and governmental services"],
"dct:language": "en",
"dcat:keyword": ["test"],
"dct:conformsTo": {
"@id": "https://www.iso.org/standard/39229.html",
"@type": "dct:Standard",
"label": "ISO 19115-2:2009",
},
"dct:spatial": {"@id": None, "@type": "dct:Location", "rdfs:label": "England"},
"geojson": {},
"dct:PeriodOfTime": {
"type": "dct:PeriodOfTime",
"time:hasBeginning": "2019-03-27T00:00:00Z",
"time:hasEnd": "2021-03-27T00:00:00Z",
},
"dct:accrualPeriodicity": "Semiannual",
"dct:creator": [
{
"@type": "foaf:Organization",
"@id": "http://www.stfc.ac.uk",
"foaf:name": "STFC",
"internalID": None,
}
],
"dct:created": "2021-03-16",
"dct:publisher": {
"@id": None,
"@type": "foaf:Organization",
"foaf:name": "Publisher",
"internalID": None,
},
"dcat:contactPoint": {
"@type": "vcard:Organization",
"vcard:fn": "Joe",
"vcard:hasEmail": "joe.bloggsd@stfc.ac.uk",
},
"dct:license": {
"@type": "LicenseDocument",
"@id": "https://creativecommons.org/licences/by/4.0/",
"rdfs:label": None,
},
"dct:rights": "Open Government Licence.",
"dafni_version_note": "Initial Dataset version",
"@id": {
"asset_id": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a:0a0a0a0a-0a00-0a00-a000-0a0a0000000b:0a0a0a0a-0a00-0a00-a000-0a0a0000000c",
"dataset_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"version_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000b",
"metadata_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000c",
},
"dct:modified": "2021-03-16T09:27:21+00:00",
"dct:issued": "2021-03-16T09:27:21+00:00",
"dcat:distribution": [
{
"spdx:fileName": "workflow_def.csv",
"dcat:mediaType": "text/csv",
"dcat:byteSize": 6720,
"dcat:downloadURL": "url/to/file",
}
],
"mediatypes": [None],
"version_history": {
"dataset_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"versions": [
{
"version_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000b",
"metadata_versions": [
{
"metadata_uuid": "0a0a0a0a-0a00-0a00-a000-0a0a0000000c",
"dafni_version_note": "Initial Dataset version",
"modified_date": "2021-03-16T09:27:21+00:00",
}
],
}
],
},
"auth": {
"asset_id": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"reason": "Accessed as part of the Tessella CLI group",
"view": True,
"read": True,
"update": False,
"destroy": False,
},
}
return data
def datafile_mock(
name: str = "File 1",
size: str = "120 B",
file_format: str = "CSV",
download: str = "download/url",
contents: BytesIO = b"Test Data",
) -> DataFile:
"""Test fixture to generate a DataFile object with given attributes
Args:
name (str, optional): File name. Defaults to "File 1".
size (str, optional): Formatted file size string. Defaults to "120 B".
file_format (str, optional): File Format. Defaults to "CSV".
download (str, optional): Download URL for file. defaults to "download/url"
contents (BytesIO, optional): File Contents as bytes. defaults to b"Test Data"
Returns:
DataFile: Generated DataFile for testing
"""
datafile = DataFile()
datafile.name = name
datafile.size = size
datafile.format = file_format
datafile.download = download
datafile.contents = contents
return datafile
def dataset_meta_mock(
created: str = "March 20 2021",
creator: str = "DAFNI",
contact: str = "contact@email.com",
description: str = "description here",
identifier: List[str] = ["id 1", "id 2"],
location: str = "UK",
start_date: str = "May 1 2000",
end_date: str = "June 1 2020",
files: List[DataFile] = [datafile_mock()],
keywords: List[str] = ["Key word 1"],
themes: List[str] = ["Theme 1", "Theme 2"],
publisher: str = "Pubisher",
issued: str = "June 12 2021",
rights: str = "Some Rights",
language: str = "en",
standard: str = "ISO 9001",
update: str = "Annual",
title: str = "Title",
dataset_id: str = "Dataset ID",
version_id: str = "Version ID",
) -> DatasetMetadata:
"""Function to generate a DatasetMetadata object with mock data for testing
Args:
created (str, optional): Created date. Defaults to "March 20 2021".
creator (str, optional): Created by. Defaults to "DAFNI".
contact (str, optional): Point of contact. Defaults to "contact@email.com".
description (str, optional): Description. Defaults to "description here".
identifier (List[str], optional): List of identifiers. Defaults to ["id 1", "id 2"].
location (str, optional): Location relating to data. Defaults to "UK".
start_date (str, optional): Start of date range. Defaults to "May 1 2000".
end_date (str, optional): End of date range. Defaults to "June 1 2020".
files (List[DataFile], optional): Associated DataFile objects. Defaults to [mock_datafile()].
keywords (List[str], optional): Keywords. Defaults to ["Key word 1"].
themes (List[str], optional): Themes. Defaults to ["Theme 1", "Theme 2"].
publisher (str, optional): Published by. Defaults to "Pubisher".
issued (str, optional): Issued date. Defaults to "June 12 2021".
rights (str, optional): Associated rights. Defaults to "Some Rights".
language (str, optional): Associated Language. Defaults to "en".
standard (str, optional): Associated standards. Defaults to "ISO 9001".
update (str, optional): Frequency updated. Defaults to "Annual".
title (str, optional): Associated Title. Defaults to "Title".
dataset_id (str, optional): Dataset ID. Defaults to "Dataset ID".
version_id (str, optional): Dataset Version ID. Defaults to "Version ID".
Returns:
DatasetMetadata: DatasetMetadata object with mock data
"""
instance = DatasetMetadata()
instance.created = created
instance.creator = creator
instance.contact = contact
instance.description = description
instance.identifier = identifier
instance.location = location
instance.start_date = start_date
instance.end_date = end_date
instance.files = files
instance.keywords = keywords
instance.themes = themes
instance.publisher = publisher
instance.issued = issued
instance.rights = rights
instance.language = language
instance.standard = standard
instance.update = update
instance.title = title
instance.dataset_id = dataset_id
instance.version_id = version_id
return instance
@pytest.fixture
def upload_metadata_fixture() -> dict:
"""Fixture to return an example metadata dict for
creating a dataset
Returns:
dict: JSON formatted metadata dict for a Dataset
"""
metadata = {
"@context": ["metadata-v1"],
"@type": "dcat:Dataset",
"dafni_version_note": "Initial Dataset version",
"dcat:contactPoint": {
"@type": "vcard:Organization",
"vcard:fn": "Tester 1",
"vcard:hasEmail": "test@email.com",
},
"dcat:keyword": ["Test"],
"dcat:theme": ["Utility and governmental services"],
"dct:PeriodOfTime": {
"type": "dct:PeriodOfTime",
"time:hasBeginning": None,
"time:hasEnd": None,
},
"dct:accrualPeriodicity": None,
"dct:conformsTo": {"@id": None, "@type": "dct:Standard", "label": None},
"dct:created": "2021-03-29",
"dct:creator": [
{
"@type": "foaf:Organization",
"@id": "https://testing.com",
"foaf:name": "Testing",
"internalID": None,
}
],
"dct:description": "Some data for testing",
"dct:identifier": [],
"dct:language": "en",
"dct:license": {
"@type": "LicenseDocument",
"@id": "https://creativecommons.org/licences/by/4.0/",
"rdfs:label": None,
},
"dct:publisher": {
"@id": None,
"@type": "foaf:Organization",
"foaf:name": None,
"internalID": None,
},
"dct:rights": None,
"dct:spatial": {"@id": None, "@type": "dct:Location", "rdfs:label": None},
"dct:subject": "Utilities / Communication",
"dct:title": "Jamie test data",
"geojson": {},
}
return metadata
|
nilq/baby-python
|
python
|
import argparse
import os
import signal
from typing import Dict, Optional
import numpy as np
import torch
import torchaudio
from loguru import logger
from torch import Tensor, nn
from torch.optim import Adam, AdamW, Optimizer, RMSprop
from torch.types import Number
from df.checkpoint import load_model, read_cp, write_cp
from df.config import config
from df.logger import init_logger, log_metrics, log_model_summary
from df.loss import Istft, Loss, MaskLoss
from df.model import ModelParams
from df.modules import get_device
from df.utils import (
as_complex,
as_real,
check_finite_module,
check_manual_seed,
clip_grad_norm_,
detach_hidden,
get_norm_alpha,
make_np,
)
from libdf import DF
from libdfdata import PytorchDataLoader as DataLoader
should_stop = False
debug = False
state: Optional[DF] = None
istft: Optional[nn.Module]
@logger.catch
def main():
global should_stop, debug, state
parser = argparse.ArgumentParser()
parser.add_argument("data_config_file", type=str, help="Path to a dataset config file.")
parser.add_argument(
"data_dir", type=str, help="Path to the dataset directory containing .hdf5 files."
)
parser.add_argument(
"base_dir", type=str, help="Directory to store logs, summaries, checkpoints, etc."
)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
if not os.path.isfile(args.data_config_file):
raise FileNotFoundError("Dataset config not found at {}".format(args.data_config_file))
if not os.path.isdir(args.data_dir):
NotADirectoryError("Data directory not found at {}".format(args.data_dir))
os.makedirs(args.base_dir, exist_ok=True)
summary_dir = os.path.join(args.base_dir, "summaries")
os.makedirs(summary_dir, exist_ok=True)
debug = args.debug
log_level = "DEBUG" if debug else "INFO"
init_logger(file=os.path.join(args.base_dir, "train.log"), level=log_level)
config.load(os.path.join(args.base_dir, "config.ini"))
seed = config("SEED", 42, int, section="train")
check_manual_seed(seed)
logger.info("Running on device {}".format(get_device()))
signal.signal(signal.SIGUSR1, get_sigusr1_handler(args.base_dir))
p = ModelParams()
state = DF(
sr=p.sr,
fft_size=p.fft_size,
hop_size=p.hop_size,
nb_bands=p.nb_erb,
min_nb_erb_freqs=p.min_nb_freqs,
)
checkpoint_dir = os.path.join(args.base_dir, "checkpoints")
os.makedirs(checkpoint_dir, exist_ok=True)
mask_only: bool = config("MASK_ONLY", False, bool, section="train")
train_df_only: bool = config("DF_ONLY", False, bool, section="train")
jit = config("JIT", False, cast=bool, section="train")
model, epoch = load_model(
checkpoint_dir,
state,
jit=False,
mask_only=mask_only,
train_df_only=train_df_only,
)
opt = load_opt(checkpoint_dir, model, mask_only, train_df_only)
lrs = torch.optim.lr_scheduler.StepLR(opt, 3, 0.9, last_epoch=epoch - 1)
try:
log_model_summary(model, verbose=args.debug)
except Exception as e:
logger.warning(f"Failed to print model summary: {e}")
if jit:
# Load as jit after log_model_summary
model = torch.jit.script(model)
bs: int = config("BATCH_SIZE", 1, int, section="train")
bs_eval: int = config("BATCH_SIZE_EVAL", 0, int, section="train")
bs_eval = bs_eval if bs_eval > 0 else bs
dataloader = DataLoader(
ds_dir=args.data_dir,
ds_config=args.data_config_file,
sr=p.sr,
batch_size=bs,
batch_size_eval=bs_eval,
num_workers=config("NUM_WORKERS", 4, int, section="train"),
max_len_s=config("MAX_SAMPLE_LEN_S", 5.0, float, section="train"),
fft_dataloader=True,
fft_size=p.fft_size,
hop_size=p.hop_size,
nb_erb=p.nb_erb,
nb_spec=p.nb_df,
norm_alpha=get_norm_alpha(),
p_atten_lim=config("p_atten_lim", 0.2, float, section="train"),
p_reverb=config("p_reverb", 0.2, float, section="train"),
prefetch=10,
overfit=config("OVERFIT", False, bool, section="train"),
seed=seed,
min_nb_erb_freqs=p.min_nb_freqs,
)
losses = setup_losses()
if config("START_EVAL", False, cast=bool, section="train"):
val_loss = run_epoch(
model=model,
epoch=epoch - 1,
loader=dataloader,
split="valid",
opt=opt,
losses=losses,
summary_dir=summary_dir,
)
metrics = {"loss": val_loss}
metrics.update(
{n: torch.mean(torch.stack(vals)).item() for n, vals in losses.get_summaries()}
)
log_metrics(f"[{epoch - 1}] [valid]", metrics)
losses.reset_summaries()
max_epochs = config("MAX_EPOCHS", 10, int, section="train")
# Save default values to disk
config.save(os.path.join(args.base_dir, "config.ini"))
for epoch in range(epoch, max_epochs):
train_loss = run_epoch(
model=model,
epoch=epoch,
loader=dataloader,
split="train",
opt=opt,
losses=losses,
summary_dir=summary_dir,
)
metrics = {"loss": train_loss, "lr": lrs.get_last_lr()[0]}
if debug:
metrics.update(
{n: torch.mean(torch.stack(vals)).item() for n, vals in losses.get_summaries()}
)
log_metrics(f"[{epoch}] [train]", metrics)
write_cp(model, "model", checkpoint_dir, epoch + 1)
write_cp(opt, "opt", checkpoint_dir, epoch + 1)
losses.reset_summaries()
val_loss = run_epoch(
model=model,
epoch=epoch,
loader=dataloader,
split="valid",
opt=opt,
losses=losses,
summary_dir=summary_dir,
)
metrics = {"loss": val_loss}
metrics.update(
{n: torch.mean(torch.stack(vals)).item() for n, vals in losses.get_summaries()}
)
log_metrics(f"[{epoch}] [valid]", metrics)
losses.reset_summaries()
if should_stop:
logger.info("Stopping training")
exit(0)
lrs.step()
test_loss = run_epoch(
model=model,
epoch=epoch,
loader=dataloader,
split="test",
opt=opt,
losses=losses,
summary_dir=summary_dir,
)
metrics: Dict[str, Number] = {"loss": test_loss}
metrics.update({n: torch.mean(torch.stack(vals)).item() for n, vals in losses.get_summaries()})
log_metrics(f"[{epoch}] [test]", metrics)
logger.info("Finished training")
def run_epoch(
model: nn.Module,
epoch: int,
loader: DataLoader,
split: str,
opt: Optimizer,
losses: Loss,
summary_dir: str,
) -> float:
global debug
logger.info("Start {} epoch {}".format(split, epoch))
log_freq = config("LOG_FREQ", cast=int, default=100, section="train")
if split != "train" and loader.batch_size_eval is not None:
bs = loader.batch_size_eval
else:
bs = loader.batch_size
detect_anomaly: bool = config("DETECT_ANOMALY", False, bool, section="train")
if detect_anomaly:
logger.info("Running with autograd profiling")
dev = get_device()
l_mem = []
is_train = split == "train"
summary_fn = summary_write # or summary_noop
model.train(mode=is_train)
losses.store_losses = debug or not is_train
max_steps = loader.len(split)
seed = epoch if is_train else 42
n_nans = 0
logger.info("Dataloader len: {}".format(loader.len(split)))
for i, batch in enumerate(loader.iter_epoch(split, seed)):
opt.zero_grad()
assert batch.feat_spec is not None
assert batch.feat_erb is not None
feat_erb = batch.feat_erb.to(dev, non_blocking=True)
feat_spec = as_real(batch.feat_spec.to(dev, non_blocking=True))
noisy = batch.noisy.to(dev, non_blocking=True)
clean = batch.speech.to(dev, non_blocking=True)
atten = batch.atten.to(dev, non_blocking=True)
snrs = batch.snr.to(dev, non_blocking=True)
with torch.autograd.set_detect_anomaly(detect_anomaly):
with torch.set_grad_enabled(is_train):
enh, m, lsnr, df_alpha = model.forward(
spec=as_real(noisy),
feat_erb=feat_erb,
feat_spec=feat_spec,
atten_lim=atten,
)
try:
err = losses.forward(
clean,
noisy,
enh,
m,
lsnr,
df_alpha=df_alpha,
max_freq=batch.max_freq,
snrs=snrs,
)
except Exception as e:
if "nan" in str(e).lower() or "finite" in str(e).lower():
logger.warning("NaN in loss computation: {}. Skipping backward.".format(str(e)))
check_finite_module(model)
n_nans += 1
if n_nans > 10:
raise e
continue
raise e
if is_train:
try:
err.backward()
clip_grad_norm_(model.parameters(), 1.0, error_if_nonfinite=True)
except RuntimeError as e:
e_str = str(e)
if "nan" in e_str.lower() or "non-finite" in e_str:
check_finite_module(model)
cleanup(err, noisy, clean, enh, m, feat_erb, feat_spec, batch)
logger.error(e_str)
n_nans += 1
if n_nans > 10:
raise e
continue
else:
raise e
opt.step()
detach_hidden(model)
l_mem.append(err.detach())
if i % log_freq == 0:
l_mean = torch.stack(l_mem[-100:]).mean().cpu()
if torch.isnan(l_mean):
check_finite_module(model)
l_dict = {"loss": l_mean.item()}
if debug:
l_dict.update(
{
n: torch.mean(torch.stack(vals[-bs:])).item()
for n, vals in losses.get_summaries()
}
)
log_metrics(f"[{epoch}] [{i}/{max_steps}]", l_dict)
summary_fn(
clean,
noisy,
enh,
batch.snr,
lsnr,
df_alpha,
summary_dir,
mask_loss=losses.ml,
split=split,
)
cleanup(err, noisy, clean, enh, m, feat_erb, feat_spec, batch)
return torch.stack(l_mem).mean().cpu().item()
def setup_losses() -> Loss:
global state, istft
assert state is not None
p = ModelParams()
istft = Istft(p.fft_size, p.hop_size, torch.as_tensor(state.fft_window().copy())).to(
get_device()
)
loss = Loss(state, istft).to(get_device())
# loss = torch.jit.script(loss)
return loss
def load_opt(
cp_dir: str, model: nn.Module, mask_only: bool = False, df_only: bool = False
) -> torch.optim.Optimizer:
lr = config("LR", 1e-4, float, section="train")
decay = config("WEIGHT_DECAY", 1e-3, float, section="train")
optimizer = config("OPTIMIZER", "adamw", str, section="train").lower()
if mask_only:
params = []
for n, p in model.named_parameters():
if not ("dfrnn" in n or "df_dec" in n):
params.append(p)
elif df_only:
params = (p for n, p in model.named_parameters() if "df" in n.lower())
else:
params = model.parameters()
if optimizer == "adamw":
opt = AdamW(params, lr=lr, weight_decay=decay)
elif optimizer == "adam":
opt = Adam(params, lr=lr, weight_decay=decay)
elif optimizer == "rmsprop":
opt = RMSprop(params, lr=lr, weight_decay=decay)
else:
raise ValueError(f"Unsupported optimizer: {optimizer}")
try:
read_cp(opt, "opt", cp_dir)
except ValueError as e:
logger.error(f"Could not load optimizer state: {e}")
for group in opt.param_groups:
group.setdefault("initial_lr", lr)
return opt
@torch.no_grad()
def summary_write(
clean: Tensor,
noisy: Tensor,
enh: Tensor,
snrs: Tensor,
lsnr: Tensor,
df_alpha: Tensor,
summary_dir: str,
mask_loss: Optional[MaskLoss] = None,
split="train",
):
global state
assert state is not None
p = ModelParams()
snr = snrs[0].detach().cpu().item()
def synthesis(x: Tensor) -> Tensor:
return torch.as_tensor(state.synthesis(make_np(as_complex(x.detach()))))
if mask_loss is not None:
ideal = mask_loss.erb_mask_compr(clean[0], noisy[0], compressed=False)
ideal = noisy[0] * mask_loss.erb_inv(ideal)
torchaudio.save(
os.path.join(summary_dir, f"{split}_idealmask_snr{snr}.wav"), synthesis(ideal), p.sr
)
torchaudio.save(
os.path.join(summary_dir, f"{split}_clean_snr{snr}.wav"), synthesis(clean[0]), p.sr
)
torchaudio.save(
os.path.join(summary_dir, f"{split}_noisy_snr{snr}.wav"), synthesis(noisy[0]), p.sr
)
torchaudio.save(os.path.join(summary_dir, f"{split}_enh_snr{snr}.wav"), synthesis(enh[0]), p.sr)
np.savetxt(
os.path.join(summary_dir, f"{split}_lsnr_snr{snr}.txt"),
lsnr[0].detach().cpu().numpy(),
fmt="%.3f",
)
np.savetxt(
os.path.join(summary_dir, f"{split}_df_alpha_snr{snr}.txt"),
df_alpha[0].detach().cpu().numpy(),
)
def summary_noop(*__args, **__kwargs): # type: ignore
pass
def get_sigusr1_handler(base_dir):
def h(*__args): # type: ignore
global should_stop
logger.warning("Received timeout signal. Stopping after current epoch")
should_stop = True
continue_file = os.path.join(base_dir, "continue")
logger.warning(f"Writing {continue_file}")
open(continue_file, "w").close()
return h
def cleanup(*args):
import gc
for arg in args:
del arg
gc.collect()
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Houdini utility functions and classes
"""
from __future__ import print_function, division, absolute_import
import hou
import hdefereval
def get_houdini_version(as_string=True):
"""
Returns version of the executed Houdini
:param as_string: bool, Whether to return the stiring version or not
:return: variant, int or str
"""
if as_string:
return hou.applicationVersionString()
else:
return hou.applicationVersion()
def get_houdini_pass_main_thread_function():
"""
Return Houdini function to execute function in Houdini main thread
:return: fn
"""
return hdefereval.executeInMainThreadWithResult
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.dolby_vision_metadata import DolbyVisionMetadata
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.encodings.streams.hdr.dolby_vision.dolby_vision_metadata_list_query_params import DolbyVisionMetadataListQueryParams
class DolbyVisionApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(DolbyVisionApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, encoding_id, stream_id, dolby_vision_metadata, **kwargs):
# type: (string_types, string_types, DolbyVisionMetadata, dict) -> DolbyVisionMetadata
"""Add Dolby Vision Metadata
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param stream_id: Id of the stream.
:type stream_id: string_types, required
:param dolby_vision_metadata: The Dolby Vision Metadata to be added
:type dolby_vision_metadata: DolbyVisionMetadata, required
:return: Dolby Vision Metadata details
:rtype: DolbyVisionMetadata
"""
return self.api_client.post(
'/encoding/encodings/{encoding_id}/streams/{stream_id}/hdr/dolby-vision',
dolby_vision_metadata,
path_params={'encoding_id': encoding_id, 'stream_id': stream_id},
type=DolbyVisionMetadata,
**kwargs
)
def delete(self, encoding_id, stream_id, hdr_id, **kwargs):
# type: (string_types, string_types, string_types, dict) -> BitmovinResponse
"""Delete Dolby Vision Metadata
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param stream_id: Id of the stream.
:type stream_id: string_types, required
:param hdr_id: Id of the Dolby Vision Metadata.
:type hdr_id: string_types, required
:return: Id of the Dolby Vision Metadata
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/encodings/{encoding_id}/streams/{stream_id}/hdr/dolby-vision/{hdr_id}',
path_params={'encoding_id': encoding_id, 'stream_id': stream_id, 'hdr_id': hdr_id},
type=BitmovinResponse,
**kwargs
)
def get(self, encoding_id, stream_id, hdr_id, **kwargs):
# type: (string_types, string_types, string_types, dict) -> DolbyVisionMetadata
"""Dolby Vision Metadata Details
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param stream_id: Id of the stream.
:type stream_id: string_types, required
:param hdr_id: Id of the Dolby Vision Metadata.
:type hdr_id: string_types, required
:return: Dolby Vision Metadata details
:rtype: DolbyVisionMetadata
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/streams/{stream_id}/hdr/dolby-vision/{hdr_id}',
path_params={'encoding_id': encoding_id, 'stream_id': stream_id, 'hdr_id': hdr_id},
type=DolbyVisionMetadata,
**kwargs
)
def list(self, encoding_id, stream_id, query_params=None, **kwargs):
# type: (string_types, string_types, DolbyVisionMetadataListQueryParams, dict) -> DolbyVisionMetadata
"""List Dolby Vision Metadata
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param stream_id: Id of the stream.
:type stream_id: string_types, required
:param query_params: Query parameters
:type query_params: DolbyVisionMetadataListQueryParams
:return: List of Dolby Vision Metadata
:rtype: DolbyVisionMetadata
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/streams/{stream_id}/hdr/dolby-vision',
path_params={'encoding_id': encoding_id, 'stream_id': stream_id},
query_params=query_params,
pagination_response=True,
type=DolbyVisionMetadata,
**kwargs
)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.13 on 2021-07-30 14:42
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("extras", "0005_configcontext_device_types"),
]
operations = [
migrations.CreateModel(
name="Dashboard",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("created", models.DateField(auto_now_add=True, null=True)),
("last_updated", models.DateTimeField(auto_now=True, null=True)),
(
"_custom_field_data",
models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
("dashboard_slug", models.CharField(max_length=255, unique=True)),
("friendly_name", models.CharField(blank=True, default="", max_length=255)),
("dashboard_uid", models.CharField(max_length=64, unique=True)),
("tags", taggit.managers.TaggableManager(through="extras.TaggedItem", to="extras.Tag")),
],
options={
"ordering": ["dashboard_slug"],
},
),
migrations.CreateModel(
name="Panel",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("created", models.DateField(auto_now_add=True, null=True)),
("last_updated", models.DateTimeField(auto_now=True, null=True)),
(
"_custom_field_data",
models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
("command_name", models.CharField(max_length=64)),
("friendly_name", models.CharField(default="", max_length=64)),
("panel_id", models.IntegerField()),
("active", models.BooleanField(default=False)),
(
"dashboard",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="nautobot_plugin_chatops_grafana.dashboard"
),
),
],
options={
"ordering": ["command_name", "dashboard"],
},
),
migrations.CreateModel(
name="PanelVariable",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True
),
),
("created", models.DateField(auto_now_add=True, null=True)),
("last_updated", models.DateTimeField(auto_now=True, null=True)),
(
"_custom_field_data",
models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
("name", models.CharField(max_length=32)),
("friendly_name", models.CharField(max_length=64)),
("query", models.CharField(max_length=64)),
("includeincmd", models.BooleanField(default=False)),
("includeinurl", models.BooleanField(default=True)),
("modelattr", models.CharField(max_length=64)),
("value", models.TextField(max_length=64)),
("response", models.CharField(max_length=255)),
(
"filter",
models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
("positional_order", models.IntegerField(default=100)),
(
"panel",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="nautobot_plugin_chatops_grafana.panel"
),
),
],
options={
"ordering": ["name"],
},
),
]
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
from aiohttp import web
import logging
from .subscribers import Subscribers
_logger = logging.getLogger('s3replicationmanager')
# Route table declaration
routes = web.RouteTableDef()
@routes.post('/subscribers') # noqa: E302
async def add_subscriber(request):
"""Handler to add subscriber."""
# Get subscriber details from payload
subscriber = await request.json()
_logger.debug('API: POST /subscribers\nContent : {}'.format(subscriber))
subscribers_list = request.app['subscribers']
# Check if subscriber is already present
subscriber_obj = subscribers_list.add_subscriber(subscriber)
_logger.debug('Subscriber added : {}'.format(
subscriber_obj.get_dictionary()))
return web.json_response(subscriber_obj.get_dictionary(),
status=201)
@routes.get('/subscribers') # noqa: E302
async def list_subscribers(request):
"""Handler to list subscribers."""
_logger.debug('API: GET /subscribers')
subscribers = request.app['subscribers']
_logger.debug('Number of subscribers {}'.format(subscribers.count()))
return web.json_response(subscribers, dumps=Subscribers.dumps, status=200)
@routes.get('/subscribers/{subscriber_id}') # noqa: E302
async def get_subscriber(request):
"""Handler to get subscriber attributes."""
subscriber_id = request.match_info['subscriber_id']
_logger.debug('API: GET /subscribers/{}'.format(subscriber_id))
subscribers = request.app['subscribers']
subscriber = subscribers.get_subscriber(subscriber_id)
if subscriber is not None:
_logger.debug('Subscriber found with subscriber_id : {} '.
format(subscriber_id))
_logger.debug('Subscriber details : {} '.format(
subscriber.get_dictionary()))
return web.json_response(subscriber.get_dictionary(), status=200)
else:
_logger.debug('Subscriber missing with subscriber_id : {} '.
format(subscriber_id))
return web.json_response(
{'ErrorResponse': 'Subscriber Not Found!'}, status=404)
@routes.delete('/subscribers/{subscriber_id}') # noqa: E302
async def remove_subscriber(request):
"""Handler to remove subscriber."""
subscribers = request.app['subscribers']
subscriber_id = (request.match_info['subscriber_id'])
_logger.debug('API: DELETE /subscribers/{}'.format(subscriber_id))
# Check if subscriber is already present
if subscribers.is_subscriber_present(subscriber_id):
subscriber = subscribers.remove_subscriber(subscriber_id)
_logger.debug('Subscriber removed : {}'.format(
subscriber.get_dictionary()))
return web.json_response({'subscriber_id': subscriber_id}, status=204)
else:
return web.json_response(
{'ErrorResponse': 'Subscriber Not Found!'}, status=404)
|
nilq/baby-python
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""args"""
import argparse
import ast
parser = argparse.ArgumentParser(description='AECRNet')
# Hardware specifications
parser.add_argument('--seed', type=int, default=1,
help='random seed')
# Data specifications
parser.add_argument('--dir_data', type=str, default='/cache/data/',
help='dataset directory')
parser.add_argument('--data_train', type=str, default='RESIDE',
help='train dataset name')
parser.add_argument('--data_test', type=str, default='Dense',
help='test dataset name')
parser.add_argument('--ext', type=str, default='sep',
help='dataset file extension')
parser.add_argument('--patch_size', type=int, default=240,
help='output patch size')
parser.add_argument('--rgb_range', type=int, default=255,
help='maximum value of RGB')
parser.add_argument('--n_colors', type=int, default=3,
help='number of color channels to use')
parser.add_argument('--no_augment', action='store_true',
help='do not use data augmentation')
# Training specifications
parser.add_argument('--test_every', type=int, default=4000,
help='do test per every N batches')
parser.add_argument('--epochs', type=int, default=1000,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16,
help='input batch size for training')
# Optimization specifications
parser.add_argument('--lr', type=float, default=1e-5,
help='learning rate')
parser.add_argument('--loss_scale', type=float, default=1024.0,
help='scaling factor for optim')
parser.add_argument('--init_loss_scale', type=float, default=65536.,
help='scaling factor')
parser.add_argument('--decay', type=str, default='200',
help='learning rate decay type')
parser.add_argument('--betas', type=tuple, default=(0.9, 0.999),
help='ADAM beta')
parser.add_argument('--epsilon', type=float, default=1e-8,
help='ADAM epsilon for numerical stability')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
# ckpt specifications
parser.add_argument('--ckpt_save_path', type=str, default='./ckpt/',
help='path to save ckpt')
parser.add_argument('--ckpt_save_interval', type=int, default=10,
help='save ckpt frequency, unit is epoch')
parser.add_argument('--ckpt_save_max', type=int, default=100,
help='max number of saved ckpt')
parser.add_argument('--ckpt_path', type=str, default='',
help='path of saved ckpt')
parser.add_argument('--filename', type=str, default='')
parser.add_argument('--device_target', type=str, default='GPU')
# ModelArts
parser.add_argument('--modelArts_mode', type=ast.literal_eval, default=False,
help='train on modelarts or not, default is False')
parser.add_argument('--data_url', type=str, default='', help='the directory path of saved file')
parser.add_argument('--train_url', type=str, default='', help='')
# CR Loss
parser.add_argument('--neg_num', type=int, default=10)
parser.add_argument('--contra_lambda', type=float, default=0.1, help='weight of contra_loss')
parser.add_argument('--vgg_ckpt_path', type=str, default='./')
parser.add_argument('--vgg_ckpt', type=str, default='vgg19_ImageNet.ckpt', help='filename of vgg checkpoint')
args, unparsed = parser.parse_known_args()
args.data_train = args.data_train.split('+')
args.data_test = args.data_test.split('+')
if args.epochs == 0:
args.epochs = 1e8
for arg in vars(args):
if vars(args)[arg] == 'True':
vars(args)[arg] = True
elif vars(args)[arg] == 'False':
vars(args)[arg] = False
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
class comparison:
def __init__(self,X_test,y_test):
self.X_test = X_test
self.y_test = y_test
self.predictions_dict = {"True Labels":{"predictions": self.y_test,"threshold": 0.5}}
self.labels_dict = {"True Labels":{"labels": self.y_test,"x_all":0, "y_all":0,"true_x":0,"true_y":0}}
self.figSize_x = 20
self.figSize_y = 10
self.figureName = 'Comparison of Predictions'
self.bottomSpace = None
self.topSpace = None
self.hspace = 0.2
self.wspace = None
self.set_x_y_all(self.y_test,"True Labels")
def set_figSize(self,x,y):
"""This function gets width and height to set figure size"""
self.figSize_x = x
self.figSize_y = y
def set_figName(self,name):
"""This function get a name to set Main Plot Name"""
self.figureName = name
def set_spaces(self,bottomSpace = None,topSpace = None,hspace = 0.2,wspace = None):
"""This function get variables to set subplot spaces
bottomSpace # the bottom of the subplots of the figure
topSpace # the top of the subplots of the figure
wspace # the amount of width reserved for space between subplots,expressed as a fraction of the average axis width
hspace # the amount of height reserved for space between subplots,expressed as a fraction of the average axis height
"""
self.bottomSpace = bottomSpace
self.topSpace = topSpace
self.hspace = hspace
self.wspace = wspace
def update(self):
self.labels_dict["True Labels"]["labels"] = self.y_test
self.predictions_dict["True Labels"]["labels"] = self.y_test
self.find_true_index_predictedLabels("True Labels")
def oneHot_to_integer(self):
"""If your labels are one hot encoded use that function
Basicly from [[0,1],[1,0]] -> [1,0]"""
self.y_test = [np.where(r==1)[0][0] for r in self.y_test]
def order_test_samples(self):
"""This function for ordering indexes of positive and negative test examples.
It helps us to get more clear illustration for predictions
Use output of that function for your prediction"""
#unique_elements, counts_elements = np.unique(y, return_counts=True)
negative_indexes = list(np.where(self.y_test==0)[0])
positive_indexes = list(np.where(self.y_test==1)[0])
positive_samples = self.X_test[positive_indexes]
negative_samples = self.X_test[negative_indexes]
negative_labels = np.zeros((len(negative_indexes)))
positive_labels = np.ones((len(positive_indexes)))
self.y_test = np.concatenate([positive_labels,negative_labels])
self.X_test = np.concatenate([positive_samples,negative_samples],axis=0)
self.update()
return self.X_test, self.y_test
def set_x_y_all(self,y,modelName):
"""This function set x and y arrays for creating a black background space in our plot"""
y_position = list(range(len(y)))
x_position = np.ones((len(y)))
self.labels_dict[modelName]["y_all"] = y_position
self.labels_dict[modelName]["x_all"] = x_position
def predicted_labels(self, y_probs,threshold):
"""This function takes probabilities and threshold as inputs
Determine labels by using threshold"""
labels = np.zeros((len(y_probs))) #Create a zero array, thus we can look at probabilities for 1's
for index in range(len(y_probs)):
if y_probs[index][1] >= threshold: #Look at probs for 1's. If prob is larger than threshold predict it as 1.
labels[index] = 1
return labels
def arrenge_x_axes(self,true_index):
"""This function determines hight of the true predictions -> 1s"""
return np.ones((len(true_index)))
def find_true_index_predictedLabels(self,modelName):
"""This function determines indexes of 1 in our predictions"""
y_pred = self.labels_dict[modelName]["labels"]
true_index = []
for i in range(len(y_pred)):
if y_pred[i] == 1:
true_index.append(i)
true_x = self.arrenge_x_axes(true_index)
self.labels_dict[modelName]["true_y"] = true_index
self.labels_dict[modelName]["true_x"] = true_x
def set_prob_predictions(self,modelName,preds,threshold=0.5):
"""Each prediction will be saved in dictionary, thus we can use them later.
This function also set all necessary indexes for plotting step"""
self.predictions_dict[modelName] = {"predictions": preds,"threshold": threshold}
pred_labels = self.predicted_labels(preds,threshold)
self.set_label_predictions(modelName,pred_labels)
self.set_x_y_all(pred_labels,modelName)
self.find_true_index_predictedLabels(modelName)
def set_label_predictions(self,modelName,labels):
"""Label version of set_prob_predictions function. """
self.labels_dict[modelName] = {"labels": labels,"x_all":0, "y_all":0,"true_x":0,"true_y":0}
self.set_x_y_all(labels,modelName)
self.find_true_index_predictedLabels(modelName)
"""def set_model_threshold(self,modelName,threshold):
self.predictions_dict[modelName]["threshold"] = threshold
pred_labels = self.predicted_labels(self.predictions_dict[modelName]["predictions"],threshold)
self.set_label_predictions(modelName,pred_labels)
self.set_x_y_all(pred_labels,modelName)
self.find_true_index_predictedLabels(modelName)"""
def clear_all(self):
"""This function can be called to erase all instances for dictionaries"""
print("Saved Predictions will be cleaned!")
self.labels_dict.clear()
self.predictions_dict.clear()
print("Cleaning was done!")
def delete_element(self,modelName):
"""This function deletes dictionary elements with respect to model name input"""
if modelName not in self.labels_dict and modelName not in self.predictions_dict:
raise Exception(f"{modelName} is not an element of any dictionary!")
else:
print(f"Saved Predictions for model {modelName} will be cleaned!")
if modelName in self.labels_dict:
self.labels_dict.pop(modelName)
if modelName in self.predictions_dict:
self.predictions_dict.pop(modelName)
print("Cleaning was done!")
def compare_3_prediction(self,modelName1,modelName2,modelName3):
"""If you want to take detailed report for comparison of 3 models this helper function will be called in compare_predictions
It takes names of the 3 models and examine common predictions
Common predictions and mistakes are important
Individual mistakes can be exported from that report for further investigation to use Voting
"""
predicted_labels1 = self.labels_dict[modelName1]["labels"]
predicted_labels2 = self.labels_dict[modelName2]["labels"]
predicted_labels3 = self.labels_dict[modelName3]["labels"]
correct_labels1 = np.where(self.y_test == predicted_labels1)[0]
incorrect_labels1 = np.where(self.y_test != predicted_labels1)[0]
correct_labels2 = np.where(self.y_test == predicted_labels2)[0]
incorrect_labels2 = np.where(self.y_test != predicted_labels2)[0]
correct_labels3 = np.where(self.y_test == predicted_labels3)[0]
incorrect_labels3 = np.where(self.y_test != predicted_labels3)[0]
same_correct1_2 = np.intersect1d(correct_labels1, correct_labels2)
same_correct_123 = np.intersect1d(same_correct1_2 , correct_labels3)
same_incorrect1_2 = np.intersect1d(incorrect_labels1 , incorrect_labels2)
same_incorrect_123 = np.intersect1d(same_incorrect1_2 , incorrect_labels3)
common_predictions_len = len(same_incorrect_123) + len(same_correct_123)
print(f"{len(same_correct_123)} common samples were correctly predicted by Three predictor")
print(f"{len(same_incorrect_123)} common samples were wrongly predicted by Three predictor")
print(f"{len(predicted_labels1)-common_predictions_len} samples were predicted differently")
def compare_2_prediction(self,modelName1,modelName2):
"""If you want to take detailed report for comparison of 2 models this helper function will be called in compare_predictions
It takes names of the 2 models and examine common predictions
Common predictions and mistakes are important
Individual mistakes can be exported from that report for further investigation to use Voting
"""
predicted_labels1 = self.labels_dict[modelName1]["labels"]
predicted_labels2 = self.labels_dict[modelName2]["labels"]
correct_labels1 = np.where(self.y_test == predicted_labels1)[0]
incorrect_labels1 = np.where(self.y_test != predicted_labels1)[0]
correct_labels2 = np.where(self.y_test == predicted_labels2)[0]
incorrect_labels2 = np.where(self.y_test != predicted_labels2)[0]
same_correct = np.isin(correct_labels1, correct_labels2)
common_correct = correct_labels1[same_correct]
same_incorrect = np.isin(incorrect_labels1, incorrect_labels2)
common_incorrect = incorrect_labels1[same_incorrect]
common_predictions_len = len(common_incorrect) + len(common_correct)
print(f"{len(common_correct)} common samples were correctly predicted by both predictor")
print(f"{len(common_incorrect)} common samples were wrongly predicted by both predictor")
print(f"{len(predicted_labels1)-common_predictions_len} samples were predicted differently")
def compare_with_golds(self,modelName):
"""This function compares individual models with gold standarts"""
predicted_labels = self.labels_dict[modelName]["labels"]
true_predictions = np.where(self.y_test == predicted_labels)[0]
false_predictions = np.where(self.y_test != predicted_labels)[0]
print(f"{len(true_predictions)} samples were correctly predicted and {len(false_predictions)} samples were falsely predicted out of {len(self.y_test)} samples by Model: {modelName}")
def compare_predictions(self,modelName1=None,modelName2=None,modelName3=None):
"""If you want to take detailed explanation of comparison you can use that function.
This function take 1 to 3 models
Each model will be compared with Gold Standarts
Each model compared with each other to find individual mistakes
After that Voting can be applied
"""
if modelName1 != None:
if modelName1 not in self.labels_dict:
raise Exception(f"{modelName1} is not an element of any dictionary!")
else:
counts_elements = np.unique(self.y_test, return_counts=True)[1]
print(f"There are {counts_elements[0]} negative and {counts_elements[1]} positive samples in labels")
self.compare_with_golds(modelName1)
if modelName2 != None and modelName3 == None:
if modelName2 not in self.labels_dict:
raise Exception(f"{modelName2} is not an element of any dictionary!")
else:
self.compare_with_golds(modelName2)
self.compare_2_prediction(modelName1,modelName2)
if modelName3 != None:
if modelName3 not in self.labels_dict:
raise Exception(f"{modelName3} is not an element of any dictionary!")
else:
self.compare_with_golds(modelName2)
self.compare_with_golds(modelName3)
self.compare_3_prediction(modelName1,modelName2,modelName3)
def plot_predictions(self):
"""To plot predictions of each model and Gold Standarts use that function"""
model_numbers = len(self.labels_dict)
fig, ax = plt.subplots(model_numbers,figsize=(self.figSize_x, self.figSize_y))
fig.suptitle(self.figureName)
model_index = 0
for model_name,values in self.labels_dict.items(): #Create subplots
X_all = values["x_all"]
y_all = values["y_all"]
X_true = values["true_x"]
y_true = values["true_y"]
ax[model_index].bar(y_all,X_all,width=1,color='black')
ax[model_index].bar(y_true,X_true,width=1,color='#DAA520')
ax[model_index].set_title(model_name)
model_index += 1
hspace_ = self.hspace + model_index*0.1 #Arrange space between submodels
plt.subplots_adjust(bottom=self.bottomSpace, top=self.topSpace, hspace=hspace_ , wspace=self.wspace)
plt.show()
|
nilq/baby-python
|
python
|
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
namespace Seymour
{
public partial class AddFeedDialog : Form
{
public AddFeedDialog()
{
InitializeComponent();
}
public string Url
{
get
{
return dFeedUrl.Text;
}
}
private void AddFeedDialog_Load(object sender, EventArgs e)
{
dFeedUrl.SelectAll();
dFeedUrl.Focus();
this.ActiveControl = dFeedUrl;
}
private void dOk_Click(object sender, EventArgs e)
{
DialogResult = DialogResult.OK;
Close();
}
private void dCancel_Click(object sender, EventArgs e)
{
DialogResult = DialogResult.Cancel;
Close();
}
}
}
|
nilq/baby-python
|
python
|
# The Twitter API v2 recent search endpoint provides developers with API access to public Tweets posted over the last week. The endpoint, receiving a single search query and responding with matching Tweets.
import requests, configparser, json
# Read the keys from auth.ini and define them
config = configparser.ConfigParser()
config.read('auth.ini')
BEARER_TOKEN = config.get('credentials', 'bearer_token')
headers = {"Authorization": f"Bearer {BEARER_TOKEN}"}
params = {
'query': 'from:editvideobot -is:retweet',
'tweet.fields': 'author_id'
}
url = "https://api.twitter.com/2/tweets/search/recent"
response = requests.get(url, headers=headers, params=params)
print(response.json())
|
nilq/baby-python
|
python
|
import unittest
from query import QueryBuilder
class TestBingoQL(unittest.TestCase):
def setUp(self):
self.builder = QueryBuilder()
def testQueryByPropName(self):
query = self.builder.build_query('"monoisotopic_weight"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'monoisotopic_weight'}, self.builder.bind_params)
query = self.builder.build_query('"BBB log([brain]:[blood])"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'bbb log([brain]:[blood])'}, self.builder.bind_params)
query = self.builder.build_query('~"count"')
self.assertEquals(u"(elems->>'y' LIKE %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'%count%'}, self.builder.bind_params)
query = self.builder.build_query('count')
self.assertEquals(u"(elems->>'y' LIKE %(property_term_0)s)", query)
self.assertEquals({'property_term_0': u'%count%'}, self.builder.bind_params)
def testQueryPropWithValue(self):
query = self.builder.build_query('"atom_count" != 30')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float != %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'atom_count', 'property_value_0': '30'}, self.builder.bind_params)
query = self.builder.build_query('"weight" > 0.537')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'weight', 'property_value_0': '0.537'}, self.builder.bind_params)
query = self.builder.build_query('count > 25')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%count%', 'property_value_0': '25'}, self.builder.bind_params)
query = self.builder.build_query('"formula" = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query("'formula' != " + '"C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' != %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query('~"molecular formula" = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%molecular formula%', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query('formula = "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' = %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%formula%', 'property_value_0': 'c14h21n3o2'}, self.builder.bind_params)
query = self.builder.build_query("'formula' ~ 'C14H21N3O2'")
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'formula', 'property_value_0': '%c14h21n3o2%'}, self.builder.bind_params)
query = self.builder.build_query("formula !~ C14H21N3O2")
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND elems->>'y' NOT LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'%formula%', 'property_value_0': '%c14h21n3o2%'}, self.builder.bind_params)
query = self.builder.build_query('"P-gp category_Probability" ~ "no"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'p-gp category_probability', 'property_value_0': '%no%'}, self.builder.bind_params)
query = self.builder.build_query('"PPB90 category_Probability" ~ "high = 0.18;"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'ppb90 category_probability', 'property_value_0': u'%high = 0.18;%'}, self.builder.bind_params)
query = self.builder.build_query('"molecular_formula" !~ "C14H21N3O2"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND elems->>'y' NOT LIKE %(property_value_0)s)", query)
self.assertEquals({'property_term_0': u'molecular_formula', 'property_value_0': u'%c14h21n3o2%'}, self.builder.bind_params)
def testQueryCompound(self):
query = self.builder.build_query('"mass" > 30 OR ~"probability" !~ "LOW"')
self.assertEquals(u"(elems->>'x' = %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s) OR (elems->>'x' LIKE %(property_term_1)s AND elems->>'y' NOT LIKE %(property_value_1)s)", query)
self.assertEquals({
'property_term_0': u'mass',
'property_value_0': u'30',
'property_term_1': u'%probability%',
'property_value_1': u'%low%',
}, self.builder.bind_params)
query = self.builder.build_query('"STATUS" or ~"NAME" or "CODE"')
self.assertEquals(u"(elems->>'y' = %(property_term_0)s) OR (elems->>'y' LIKE %(property_term_1)s) OR (elems->>'y' = %(property_term_2)s)", query)
self.assertEquals({
'property_term_0': u'status',
'property_term_1': u'%name%',
'property_term_2': u'code',
}, self.builder.bind_params)
query = self.builder.build_query('logP > 2 and StdDev < 0.5')
self.assertEquals(u"(elems->>'x' LIKE %(property_term_0)s AND jsonb_typeof(elems->'y') = 'number' AND (elems->>'y')::float > %(property_value_0)s))\n inner join {1} t1 on str.s = t1.s\n inner join jsonb_array_elements(t1.p) elems_t1 on ((elems_t1->>'x' LIKE %(property_term_1)s AND jsonb_typeof(elems_t1->'y') = 'number' AND (elems_t1->>'y')::float < %(property_value_1)s)", query)
self.assertEquals({
'property_term_0': u'%logp%',
'property_term_1': u'%stddev%',
'property_value_0': u'2',
'property_value_1': u'0.5',
}, self.builder.bind_params)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import click
from ..cli import with_context
@click.command('test', short_help='Run a suite of tests to validate the correctness of a book')
@with_context
def test_command(ctx=None):
pass
|
nilq/baby-python
|
python
|
import random
class StatesPool:
def __init__(self, capacity = 10000000):
self.capacity = capacity
self.pool = []
self.position = 0
def push(self, state):
if len(self.pool) < self.capacity:
self.pool.append(None)
self.pool[self.position] = state
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.pool, batch_size)
def __len__(self):
return len(self.pool)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3.6
import os
import re
import sys
import yaml
from glob import glob
from collections import OrderedDict
from typing import Any, List
import numpy as np
import pandas as pd
import lightgbm as lgb
from scipy.stats import describe
from tqdm import tqdm
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.metrics import fbeta_score
from debug import dprint
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
INPUT_PATH = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/'
NUM_ATTEMPTS = 100
NUM_FOLDS = 5
NUM_CLASSES = 1103
def parse_labels(s: str) -> np.array:
res = np.zeros(NUM_CLASSES)
res[list(map(int, s.split()))] = 1
return res
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'usage: {sys.argv[0]} predict1.npy ...')
sys.exit()
# load data
fold_num = np.load('folds.npy')
train_df = pd.read_csv(INPUT_PATH + 'train.csv')
all_labels = np.vstack(list(map(parse_labels, train_df.attribute_ids)))
dprint(fold_num.shape)
dprint(all_labels.shape)
# build dataset
all_predicts_list = []
predicts = sys.argv[1:]
for filename in predicts:
assert 'level1_train_' in filename
m = re.match(r'(.*)_f(\d)_e\d+.*\.npy', filename)
assert m
model_path = m.group(1)
predict = np.zeros((train_df.shape[0], NUM_CLASSES))
for fold in range(NUM_FOLDS):
filenames = glob(f'{model_path}_f{fold}_*.npy')
if len(filenames) != 1:
dprint(filenames)
assert False # the model must be unique in this fold
filename = filenames[0]
print('reading', filename)
data = np.load(filename)
predict[fold_num == fold] = data
all_predicts_list.append(predict)
all_predicts = np.dstack(all_predicts_list)
# FIXME: use real thresholds here
all_predicts -= np.min(all_predicts, axis=1, keepdims=True)
dprint(all_predicts.shape)
dprint(all_labels.shape)
for class_ in tqdm(range(NUM_CLASSES)):
# print('-' * 80)
# dprint(class_)
x_train = all_predicts[fold_num != 0][:, class_]
y_train = all_labels[fold_num != 0][:, class_]
x_val = all_predicts[fold_num == 0][:, class_]
y_val = all_labels[fold_num == 0][:, class_]
# dprint(x_train.shape)
# dprint(y_train.shape)
# dprint(x_val.shape)
# dprint(y_val.shape)
#
# dprint(describe(x_train))
# dprint(describe(x_val))
# dprint(describe(y_train))
# dprint(describe(y_val))
#
# dprint(np.unique(y_val))
classif = SVC(kernel='linear')
classif.fit(x_train, y_train)
y_pred = classif.predict(x_val)
# FIXME: do I have to find the best threshold?
y_pred = y_pred > 0.1
if np.sum(y_pred) > 0:
score = fbeta_score(y_val, y_pred, beta=2)
else:
score = 0
print('class', class_, 'F2 score:', score)
|
nilq/baby-python
|
python
|
import abc
import asyncio
import time
from typing import Awaitable, Callable, List, Optional
import multidict
import yarl
from .base import ClosableResponse, EmptyResponse, Header, Request
from .circuit_breaker import CircuitBreaker
from .deadline import Deadline
from .metrics import MetricsProvider
from .priority import Priority
from .response_classifier import ResponseClassifier, ResponseVerdict
from .tracing import SpanKind, Tracer
from .transport import Transport
NextModuleFunc = Callable[[yarl.URL, Request, Deadline, Priority], Awaitable[ClosableResponse]]
class RequestModule(abc.ABC):
__slots__ = ()
@abc.abstractmethod
async def execute(
self,
next: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
...
class BypassModule(RequestModule):
__slots__ = ()
async def execute(
self, next: NextModuleFunc, *, endpoint: yarl.URL, request: Request, deadline: Deadline, priority: Priority
) -> ClosableResponse:
return await next(endpoint, request, deadline, priority)
class LowTimeoutModule(RequestModule):
__slots__ = ("_low_timeout_threshold",)
def __init__(self, low_timeout_threshold: float):
self._low_timeout_threshold = low_timeout_threshold
async def execute(
self,
next: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
if deadline.expired or deadline.timeout < self._low_timeout_threshold:
return EmptyResponse(status=408)
return await next(endpoint, request, deadline, priority)
class TransportModule(RequestModule):
__slots__ = ("_transport", "_emit_system_headers", "_request_enricher")
def __init__(
self,
transport: Transport,
*,
emit_system_headers: bool,
request_enricher: Optional[Callable[[Request, bool], Awaitable[Request]]],
):
self._transport = transport
self._emit_system_headers = emit_system_headers
self._request_enricher = request_enricher
async def execute(
self,
_: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
if self._emit_system_headers:
request = request.update_headers(
{
Header.X_REQUEST_DEADLINE_AT: str(deadline), # for backward compatibility
Header.X_REQUEST_PRIORITY: str(priority),
Header.X_REQUEST_TIMEOUT: str(deadline.timeout),
}
)
request = (
await self._request_enricher(request, self._emit_system_headers)
if self._request_enricher is not None
else request
)
return await self._transport.send(endpoint, request, deadline.timeout)
class MetricsModule(RequestModule):
__slots__ = ("_metrics_provider",)
def __init__(self, metrics_provider: MetricsProvider):
self._metrics_provider = metrics_provider
async def execute(
self,
next: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
started_at = time.perf_counter()
try:
response = await next(endpoint, request, deadline, priority)
self._capture_metrics(
endpoint=endpoint,
request=request,
status=response.status,
circuit_breaker=Header.X_CIRCUIT_BREAKER in response.headers,
started_at=started_at,
)
return response
except asyncio.CancelledError:
self._capture_metrics(
endpoint=endpoint, request=request, status=499, circuit_breaker=False, started_at=started_at
)
raise
def _capture_metrics(
self, *, endpoint: yarl.URL, request: Request, status: int, circuit_breaker: bool, started_at: float
) -> None:
tags = {
"request_endpoint": endpoint.human_repr(),
"request_method": request.method,
"request_path": request.url.path,
"response_status": str(status),
"circuit_breaker": int(circuit_breaker),
}
elapsed = max(0.0, time.perf_counter() - started_at)
self._metrics_provider.increment_counter("aio_request_status", tags)
self._metrics_provider.observe_value("aio_request_latency", tags, elapsed)
class TracingModule(RequestModule):
__slots__ = ("_tracer", "_emit_system_headers")
def __init__(self, tracer: Tracer, *, emit_system_headers: bool):
self._tracer = tracer
self._emit_system_headers = emit_system_headers
async def execute(
self,
next: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
span_name = str(request.url)
with self._tracer.start_span(span_name, SpanKind.CLIENT) as span:
span.set_request_method(request.method)
span.set_request_endpoint(endpoint)
span.set_request_path(request.url)
response = await next(
endpoint,
(request.update_headers(self._tracer.get_context_headers()) if self._emit_system_headers else request),
deadline,
priority,
)
span.set_response_status(response.status)
return response
class CircuitBreakerModule(RequestModule):
__slots__ = ("_circuit_breaker", "_fallback", "_response_classifier")
def __init__(
self,
circuit_breaker: CircuitBreaker[yarl.URL, ClosableResponse],
*,
status_code: int = 502,
response_classifier: ResponseClassifier,
):
self._circuit_breaker = circuit_breaker
self._response_classifier = response_classifier
headers = multidict.CIMultiDict[str]()
headers[Header.X_DO_NOT_RETRY] = "1"
headers[Header.X_CIRCUIT_BREAKER] = "1"
self._fallback = EmptyResponse(
status=status_code,
headers=multidict.CIMultiDictProxy[str](headers),
)
async def execute(
self,
next: NextModuleFunc,
*,
endpoint: yarl.URL,
request: Request,
deadline: Deadline,
priority: Priority,
) -> ClosableResponse:
return await self._circuit_breaker.execute(
scope=endpoint,
operation=lambda: next(endpoint, request, deadline, priority),
fallback=self._fallback,
is_successful=lambda x: _response_verdict_to_bool(self._response_classifier.classify(x)),
)
def build_pipeline(modules: List[RequestModule]) -> NextModuleFunc:
async def _unsupported(
_: yarl.URL,
__: Request,
___: Deadline,
____: Priority,
) -> ClosableResponse:
raise NotImplementedError()
def _execute_module(m: RequestModule, n: NextModuleFunc) -> NextModuleFunc:
return lambda e, r, d, p: m.execute(n, endpoint=e, request=r, deadline=d, priority=p)
pipeline: NextModuleFunc = _unsupported
for module in reversed(modules):
if isinstance(module, BypassModule):
continue
pipeline = _execute_module(module, pipeline)
return pipeline
def _response_verdict_to_bool(response_verdict: ResponseVerdict) -> bool:
if response_verdict == ResponseVerdict.ACCEPT:
return True
if response_verdict == ResponseVerdict.REJECT:
return False
raise RuntimeError(f"Unexpected {response_verdict}")
|
nilq/baby-python
|
python
|
description = 'Camini Camera Synchronisation Detector'
group = 'lowlevel'
pvprefix = 'SQ:ICON:CAMINI:'
pvprefix_sumi = 'SQ:ICON:sumi:'
pvprefix_ai = 'SQ:ICON:B5ADC:'
includes = ['shutters']
display_order = 90
devices = dict(
cam_shut = device('nicos.devices.epics.EpicsReadable',
epicstimeout = 3.0,
description = 'Camera shutter open',
readpv = pvprefix + 'SHUTTER',
lowlevel = True,
),
cam_arm = device('nicos.devices.epics.EpicsReadable',
epicstimeout = 3.0,
description = 'Camera ready for acquisition',
readpv = pvprefix + 'ARM',
lowlevel = True,
),
cam_trig = device('nicos.devices.epics.EpicsDigitalMoveable',
epicstimeout = 3.0,
description = 'Camera trigger signal',
readpv = pvprefix + 'TRIG',
writepv = pvprefix + 'TRIG',
lowlevel = True,
),
cam_aux = device('nicos.devices.epics.EpicsDigitalMoveable',
epicstimeout = 3.0,
description = 'Exposure valid signal',
readpv = pvprefix + 'AUX',
writepv = pvprefix + 'AUX',
lowlevel = True,
),
cam_valid = device('nicos.devices.epics.EpicsDigitalMoveable',
epicstimeout = 3.0,
description = 'Metadata valid signal',
readpv = pvprefix + 'VALID',
writepv = pvprefix + 'VALID',
lowlevel = True,
),
camini = device('nicos_sinq.icon.devices.camini.CaminiDetector',
epicstimeout = 3.0,
description = 'Synchronization with the CAMINI camera '
'software',
trigpv = pvprefix + 'TRIG',
validpv = pvprefix + 'VALID',
metapv = pvprefix + 'META',
shutpv = pvprefix + 'SHUTTER',
armpv = pvprefix + 'ARM',
filepv = pvprefix + 'FILE',
shutter = 'exp_shutter',
auto = 'exp_auto',
beam_current = 'beam_current',
rate_threshold = 'exp_threshold',
arm_timeout = 5.0,
shutter_timeout = 5.0,
exposure_timeout = 300.0,
lowlevel = False
),
exp_threshold = device('nicos.devices.epics.EpicsAnalogMoveable',
description = 'Exposure threshold',
readpv = pvprefix_sumi + 'THRES',
writepv = pvprefix_sumi + 'THRES',
abslimits = (-100, 2000),
epicstimeout = 3.0
),
exp_ok = device('nicos.devices.epics.EpicsReadable',
description = 'Exposure sufficient',
readpv = pvprefix + 'AUX',
epicstimeout = 3.0
),
exp_avg = device('nicos.devices.epics.EpicsReadable',
description = 'Average exposure',
readpv = pvprefix_sumi + 'BEAMAVG',
epicstimeout = 3.0
),
beam_current = device('nicos.devices.epics.EpicsReadable',
description = 'Beam current',
readpv = pvprefix_ai + 'V4',
epicstimeout = 3.0
),
exp_time = device('nicos.devices.epics.EpicsReadable',
description = 'Exposure time',
readpv = pvprefix_sumi + 'EXPTIME',
epicstimeout = 3.0
),
oracle = device('nicos_sinq.icon.devices.beamoracle.BeamOracle',
description = 'Device to sum proton count',
pvprefix = pvprefix_sumi,
lowlevel = True,
epicstimeout = 3.0
),
camera = device('nicos_sinq.icon.devices.ccdcontrol.NIAGControl',
description = 'Count control for NIAG CCD detectors',
trigger = 'camini',
followers = ['oracle'],
rate_monitor = 'oracle',
rate_threshold = 'exp_threshold',
exp_ok = 'exp_ok',
)
)
startupcode = '''
SetDetectors(camera)
'''
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
使用Elman网络(简单局部回归网络)
@author: simon
"""
import sys,time
import getopt
import numpy
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from collections import OrderedDict
import copy
import utilities.datagenerator as DG
reload(DG)
compile_mode = 'FAST_COMPILE'
theano.config.exception_verbosity = 'low'
dtype=theano.config.floatX
class RNN(object):
def __init__(self,
build_method=0, # 0: RNN
init_method=0, # 0: normal 1: uniform
n_input=7,n_hidden=5,n_output=1,
batch_size=1,
continue_train=False):
# 设置网络参数
self.n_input = n_input
self.n_hidden = n_hidden
self.n_output = n_output
self.n_predict = 150
self.continue_train = continue_train
if continue_train:
build_method = 1
else:
batch_size = 1
self.build_method = build_method
self.init_method = init_method
self.batch_size = batch_size
self.patience = 100
self.valid_fre = 20
self.h_init = theano.shared(numpy.zeros((1,n_hidden), dtype=dtype), name='h_init') # 网络隐层初始值
mu,sigma = 0.0, 0.1
if init_method == 0:
self.W_in = [theano.shared(numpy.random.normal(size=(1, n_hidden),
loc=mu, scale=sigma).astype(dtype),
name='W_in' + str(u)) for u in range(n_input)]
self.b_in = theano.shared(numpy.zeros((n_hidden,), dtype=dtype), name="b_in")
self.W_hid = theano.shared(numpy.random.normal(size=(n_hidden, n_hidden),
loc=mu, scale=sigma).astype(dtype), name='W_hid')
self.W_out = theano.shared(numpy.random.normal(size=(n_hidden,n_output),
loc=mu,scale=sigma).astype(dtype),name="W_out")
self.b_out = theano.shared(numpy.zeros((n_output,), dtype=dtype),name="b_out")
else:
self.W_in = [theano.shared(numpy.random.uniform(size=(1, n_hidden),
low=-0.01, high=0.01).astype(dtype),
name='W_in' + str(u)) for u in range(n_input)]
self.b_in = theano.shared(numpy.zeros((n_hidden,), dtype=dtype), name="b_in")
self.W_hid = theano.shared(numpy.random.uniform(size=(n_hidden, n_hidden),
low=-0.01, high=0.01).astype(dtype), name='W_hid')
self.W_out = theano.shared(numpy.random.uniform(size=(n_hidden,n_output),
low=-0.01,high=0.01).astype(dtype),name="W_out")
self.b_out = theano.shared(numpy.zeros((n_output,), dtype=dtype),name="b_out")
def set_init_parameters(self, SEED, P0, Qw0):
numpy.random.seed(SEED)
mu,sigma = 0.0, 0.1
for i in self.W_in:
i.set_value(numpy.random.normal(size=(1, self.n_hidden), loc=mu, scale=sigma))
self.b_in.set_value( numpy.zeros((self.n_hidden,), dtype=dtype))
self.W_hid.set_value(numpy.random.normal(size=(self.n_hidden, self.n_hidden), loc=mu, scale=sigma))
# self.W_hid.set_value(numpy.eye(self.n_hidden))
self.W_out.set_value(numpy.random.normal(size=(self.n_hidden, self.n_output), loc=mu, scale=sigma))
self.b_out.set_value(numpy.zeros((self.n_output,), dtype=dtype))
self.h_init.set_value(numpy.zeros((1,self.n_hidden), dtype=dtype))
self.P.set_value(numpy.eye(self.P.get_value().shape[0]) * numpy.asarray(P0, dtype=dtype))
self.Qw.set_value(numpy.eye(self.Qw.get_value().shape[0])* numpy.asarray(Qw0, dtype=dtype))
self.Qv.set_value(numpy.eye(self.Qv.get_value().shape[0])* numpy.asarray(0.01, dtype=dtype))
def step(self, *args):
x = [args[u] for u in xrange(self.n_input)]
hid_taps = args[self.n_input]
h = T.dot(x[0], self.W_in[0])
for j in xrange(1, self.n_input): # 前向部分
h += T.dot(x[j], self.W_in[j])
h += T.dot(hid_taps, self.W_hid) # 回归部分
h += self.b_in # 偏置部分
h = T.tanh(h)
y = T.dot(h,self.W_out) + self.b_out # 线性输出
return h, y
def gen_drive_sin(self,sampleNum,N):
'''
生成一个长度为sampleNum, 周期为N的正弦信号
'''
data = 1.0 * numpy.sin(2 * numpy.pi / N * numpy.arange(sampleNum))
return data
def prepare_data(self, data_x, data_mask, data_y):
'''
将数据分为训练集,验证集和测试集
注意,因为要进行hstack, 行向量会变为列向量
'''
data_len = len(data_y)
train_end = numpy.floor(data_len * 0.5)
test_end = numpy.floor(data_len * 0.8)
if data_x.ndim == 1:
data_x.resize((data_x.shape[0],1))
if data_mask != [] and data_mask.ndim == 1:
data_mask.resize((data_mask.shape[0],1))
if data_y.ndim == 1:
data_y.resize((data_y.shape[0],1))
if data_mask == []:
allData = numpy.concatenate((data_x,data_y), axis=1)
else:
allData = numpy.concatenate((data_x,data_mask,data_y), axis=1)
train_data = allData[:train_end,...]
test_data = allData[train_end:test_end,...]
valid_data = allData[test_end:,...]
return train_data, valid_data, test_data
def build_model(self):
# 构造网络
x_in = T.vector() # 输入向量,第1维是时间
y_out = T.vector() # 输出向量
lr = T.scalar() # 学习速率,标量
H = T.matrix() # 隐单元的初始化值
start_time = time.clock()
input_taps = range(1-self.n_input, 1)
output_taps = [-1]
[h_tmp,y], _ = theano.scan(self.step, # 计算BPTT的函数
sequences=dict(input=x_in, taps=input_taps), # 从输出值中延时-1抽取
outputs_info=[dict(initial = H, taps=output_taps), None])
y = T.flatten(y)
params = []
params.extend(self.W_in)
params.extend([self.b_in])
params.extend([self.W_hid])
params.extend([self.W_out])
params.extend([self.b_out])
update_W, self.P, self.Qw, self.Qv, cost = DG.PublicFunction.extend_kalman_train(params, y, self.batch_size, y_out)
self.f_train = theano.function([x_in, y_out], [cost, h_tmp[-self.batch_size]], updates=update_W,
name='EKF_f_train',
mode=compile_mode,
givens=[(H, self.h_init)])
self.sim_fn = theano.function([x_in], outputs=y, givens=[(H, self.h_init)])
self.pred_cost = theano.function([x_in, y_out], outputs=cost, givens=[(H, self.h_init)])
print 'build time (%.5fs)' % ((time.clock() - start_time) / 1.)
def train(self, SEED, n_epochs, noise, P0, Qw0):
# 加要处理的数据
g = DG.Generator()
data_x,data_y = g.get_data('mackey_glass')
# data_x,data_y = g.get_data('sea_clutter_lo')
print data_x.shape
noise_begin = int(data_x.shape[0] * 0.65)
noise_end = int(data_x.shape[0] * 0.7)
data_x[noise_begin:noise_end] += 0.1*self.gen_drive_sin(noise_end-noise_begin,10)
normal_noise = numpy.random.normal(size=data_x.shape, loc=0, scale=0.02)
# data_x += normal_noise
plt.figure(123)
plt.plot(normal_noise,'r')
plt.plot(data_x,'b')
data_y = data_x
train_data, valid_data, test_data = self.prepare_data(data_x, [], data_y) # data_x 会成为列向量
print 'train info:', train_data.shape
print 'valid info:', valid_data.shape
print 'test info:', test_data.shape
self.history_errs = numpy.zeros((n_epochs*train_data.shape[0],3), dtype=dtype)
history_errs_cur_index= 0
bad_counter = 0
start_time = time.clock()
mu_noise, sigma_noise = 0, noise
self.saveto = 'MaskRNN_b{}_i{}_h{}_nh{}_S{}._p{}.npz'.format(
self.build_method, self.init_method, self.n_hidden, sigma_noise, SEED,n_epochs)
print 'Result will be saved to: ',self.saveto
print "noise level:", mu_noise, sigma_noise
# 初始化参数
self.set_init_parameters(SEED, P0, Qw0)
for epochs_index in xrange(n_epochs) :
kf = DG.DataPrepare.get_seq_minibatches_idx(train_data.shape[0], self.batch_size, self.n_input, shuffle=False)
for batch_index, train_index in kf:
sub_seq = train_data[train_index,1]
_x, _y = DG.PublicFunction.data_get_data_x_y(sub_seq, self.n_input)
train_err, h_init_continue = self.f_train(_x, _y)
if self.continue_train:
# sigma_noise = numpy.sqrt(numpy.max(self.Qw.get_value()))
noise_add = numpy.random.normal(size=(1,self.n_hidden), loc=mu_noise, scale=sigma_noise)
self.h_init.set_value(h_init_continue + noise_add)
# self.h_init.set_value(numpy.random.normal(size=(1,self.n_hidden), loc=0, scale=0.5))
# else:
# self.h_init.set_value(h_init_continue)
# print '{}.{}: online train error={:.6f}'.format(epochs_index, batch_index, float(train_err))
if numpy.mod(batch_index+1, self.valid_fre) == 0:
train_err = self.pred_cost(train_data[:-1,0], train_data[self.n_input:,1]) / train_data.shape[0]
test_err = self.pred_cost(test_data[:-1,0], test_data[self.n_input:,1]) / test_data.shape[0]
valid_err = self.pred_cost(valid_data[:-1,0], valid_data[self.n_input:,1]) / valid_data.shape[0]
print '{}: train error={:.6f}, valid error={:.6f}, test error={:.6f}'.format(
epochs_index, float(train_err), float(valid_err), float(test_err))
self.history_errs[history_errs_cur_index,:] = [train_err, valid_err, test_err]
history_errs_cur_index += 1
if valid_err <= self.history_errs[:history_errs_cur_index,1].min():
bad_counter = 0
if history_errs_cur_index > self.patience and valid_err >= self.history_errs[:history_errs_cur_index-self.patience,1].min():
bad_counter += 1
if bad_counter > self.patience * train_data.shape[0]:
print 'Early Stop!'
break
self.history_errs = self.history_errs[:history_errs_cur_index,:]
# 计算多步误差
x_train_end = train_data[-self.n_input:,0]
if self.continue_train:
self.h_init.set_value(h_init_continue)
y_predict = numpy.zeros((self.n_predict,))
cumulative_error = 0
cumulative_error_list = numpy.zeros((self.n_predict,))
for i in numpy.arange(self.n_predict):
y_predict[i] = self.sim_fn(x_train_end)
x_train_end[:-1] = x_train_end[1:]
x_train_end[-1] = y_predict[i]
cumulative_error += numpy.abs(y_predict[i] - test_data[i,1])
cumulative_error_list[i] = cumulative_error
# 计算整体的单步误差
y_sim = self.sim_fn(data_x[:-1,0])
print 'y_sim.shape: ', y_sim.shape
# 保存结果
numpy.savez(self.saveto, cumulative_error=cumulative_error_list,
history_errs = self.history_errs)
print 'Result have been saved to: ',self.saveto
# plot 数据
self.data_x = data_x
self.data_y = data_y
self.train_data = train_data
self.test_data = test_data
self.valid_data = valid_data
self.y_sim = y_sim
self.y_predict = y_predict
self.cumulative_error_list = cumulative_error_list
print 'train time (%.5fs)' % ((time.clock() - start_time) / 1.)
def plot_data(self):
plt.figure(1)
plt.plot(numpy.arange(self.n_predict), self.cumulative_error_list)
plt.title('cumulative error')
plt.grid(True)
plt.figure(2)
plt.plot(numpy.arange(self.y_predict.shape[0]), self.y_predict,'r')
plt.plot(numpy.arange(self.y_predict.shape[0]), self.test_data[:self.y_predict.shape[0],-1],'g')
plt.figure(3)
index_start = self.data_x.shape[0]-self.y_sim.shape[0]
index_train_end = self.train_data.shape[0]
index_test_end = index_train_end + self.test_data.shape[0]
index_valid_end = index_test_end + self.valid_data.shape[0]
train_index = numpy.arange(index_train_end-index_start)
test_index = numpy.arange(index_train_end-index_start,index_test_end-index_start)
valid_index = numpy.arange(index_test_end-index_start,index_valid_end-index_start)
plt.plot(train_index, self.y_sim[train_index],'r')
plt.plot(test_index, self.y_sim[test_index],'y')
plt.plot(valid_index, self.y_sim[valid_index],'b')
plt.plot(self.data_y[self.n_input:],'k') # 原始信号
plt.plot(self.y_sim-self.data_y[self.n_input:,0], 'g')
plt.figure(4)
plt.plot( self.history_errs[:,0], 'r')
plt.plot( self.history_errs[:,1], 'g')
plt.plot( self.history_errs[:,2], 'b')
plt.show()
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "pcs:i:h:o:n:",
["plot","continue","seed=", "input=","hidden=","output=","epochs="])
except getopt.GetoptError:
print 'parameter Error! '
sys.exit()
SEED = 8
n_input=10
n_hidden=7
n_output=1
n_epochs=10
noise = 0.5
P0 = 10
Qw0 = 10
b_plot = False
continue_train = False
for o, a in opts:
if o in ("-p","--plot"):
b_plot = True
if o in ("-c","--continue"):
continue_train = True
if o in ("-s", "--seed"):
SEED = int(a)
if o in ("-i", "--input"):
n_input = int(a)
if o in ("-h", "--hidden"):
n_hidden = int(a)
if o in ("-o", "--output"):
n_output = int(a)
if o in ("-n", "--epochs"):
n_epochs = int(a)
rnn = RNN( n_input=n_input, n_hidden=n_hidden, n_output=n_output, continue_train = continue_train)
rnn.build_model()
rnn.train(SEED, n_epochs,noise,P0,Qw0)
if b_plot:
rnn.plot_data()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
try:
from setuptools import setup, find_packages
except:
from distutils.core import setup
setup(name='sprinter',
version='1.4.2',
description='a utility library to help environment bootstrapping scripts',
long_description=open('README.rst').read(),
author='Yusuke Tsutsumi',
author_email='yusuke@yusuketsutsumi.com',
url='http://toumorokoshi.github.io/sprinter',
packages=find_packages(),
install_requires=[
'clint>=0.3.3',
'docopt>=0.6.1',
# TODO: test this
'pip>=19.2',
'requests>=2.3.0',
'six>=1.4.1',
'virtualenv>=15.1.0,<16',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Software Distribution',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': [
'sprinter = sprinter.install:main'
]
},
tests_require=['mock>=1.0.1', 'nose>=1.3.0', 'httpretty==0.6.5'],
test_suite='nose.collector'
)
|
nilq/baby-python
|
python
|
"""
Script for calculating GMM predictive
"""
import numpy as np
from scipy.stats import norm
import copy
import scipy as sp
from sklearn.mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
import npl.sk_gaussian_mixture as skgm
def lppd(y,pi,mu,sigma,K): #calculate posterior predictive of test
model = skgm.GaussianMixture(K, covariance_type = 'diag')
B = np.shape(mu)[0]
N_test = np.shape(y)[0]
ll_test = np.zeros((B,N_test))
model.fit(y,np.ones(N_test))
for i in range(B):
model.means_ = mu[i,:]
model.covariances_ = sigma[i,:]**2
model.precisions_ = 1/(sigma[i,:]**2)
model.weights_ = pi[i,:]
model.precisions_cholesky_ = _compute_precision_cholesky(model.covariances_, model.covariance_type)
ll_test[i] = model.score_lppd(y)
lppd_test = np.sum(sp.special.logsumexp(ll_test,axis = 0)- np.log(B))
return lppd_test
|
nilq/baby-python
|
python
|
from decimal import Decimal
from django.core import exceptions
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_model, get_class
Benefit = get_model("offer", "Benefit")
HiddenPostOrderAction = get_class("offer.results", "HiddenPostOrderAction")
class FinancingPlan(models.Model):
"""
An individual WFRS plan number and related metadata about it
"""
plan_number = models.PositiveIntegerField(
_("Plan Number"),
unique=True,
validators=[
MinValueValidator(1001),
MaxValueValidator(9999),
],
)
description = models.TextField(_("Description"), blank=True, default="")
fine_print_superscript = models.CharField(
_("Fine Print Superscript"), blank=True, default="", max_length=10
)
apr = models.DecimalField(
_("Annual percentage rate (0.0 – 100.0)"),
max_digits=5,
decimal_places=2,
default="0.00",
validators=[
MinValueValidator(Decimal("0.00")),
MaxValueValidator(Decimal("100.00")),
],
)
term_months = models.PositiveSmallIntegerField(
_("Term Length (months)"), default=12
)
product_price_threshold = models.DecimalField(
_("Minimum Product Price for Plan Availability Advertising"),
decimal_places=2,
max_digits=12,
default="0.00",
validators=[MinValueValidator(Decimal("0.00"))],
)
advertising_enabled = models.BooleanField(
_("Is Advertising Enabled for Plan?"), default=False
)
is_default_plan = models.BooleanField(_("Is Default Plan?"), default=False)
allow_credit_application = models.BooleanField(
_("Allow new credit applications when user is eligible for this plan?"),
default=True,
)
class Meta:
ordering = ("plan_number",)
verbose_name = _("Financing Plan")
verbose_name_plural = _("Financing Plans")
@classmethod
def get_advertisable_plan_by_price(cls, price):
plan = (
cls.objects.exclude(term_months=0)
.filter(advertising_enabled=True)
.filter(product_price_threshold__gte="0.00")
.filter(product_price_threshold__lte=price)
.order_by("-product_price_threshold", "-apr")
.first()
)
return plan
def __str__(self):
return _("%(description)s (plan number %(number)s)") % dict(
description=self.description, number=self.plan_number
)
def save(self, *args, **kwargs):
if self.is_default_plan:
self.__class__._default_manager.filter(is_default_plan=True).update(
is_default_plan=False
)
super().save(*args, **kwargs)
class FinancingPlanBenefit(Benefit):
"""
A group of WFRS plan numbers made available to a customer as the applied benefit of an offer or voucher. This
makes it possible to offer different plan numbers to different customers based on any of the normal offer conditions.
"""
group_name = models.CharField(_("Name"), max_length=200)
plans = models.ManyToManyField(FinancingPlan)
class Meta(Benefit.Meta):
app_label = "wellsfargo"
verbose_name = _("Financing Plan Benefit")
verbose_name_plural = _("Financing Plan Benefits")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy_class = "wellsfargo.models.%s" % self.__class__.__name__
def __str__(self):
return self.group_name
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, [])
return HiddenPostOrderAction(_("Financing is available for your order"))
def apply_deferred(self, basket, order, application):
return _("Financing was available for your order: %s") % self.group_name
@property
def name(self):
return self.group_name
@property
def description(self):
nums = ", ".join([str(p.plan_number) for p in self.plans.all()])
return (
_("Causes the following Wells Fargo financing plans to be available: %s")
% nums
)
def _clean(self):
group_name = getattr(self, "group_name", None)
if not group_name:
raise exceptions.ValidationError(
_(
(
"Wells Fargo Financing Plan Benefit must have a group name. "
"Use the Financing > Wells Fargo Plan Group dashboard to create this type of benefit."
)
)
)
|
nilq/baby-python
|
python
|
"""
Reordering generator for C source code.
This is an ANTLR generated parse tree listener, adapted to
walk a Python parse tree, randomly introduce multi scale reorderings
and regenerate the source code with these reorderings.
"""
import random
from antlr4 import ParseTreeWalker
from antlr4.tree.Tree import TerminalNodeImpl
from parsers.C.CListener import CListener
from parsers.C.CParser import CParser
class CGenerator(CListener):
"""
Parse Tree Listener for the Python language.
Enter- and exit functions generated by ANTLR.
"""
MODES = {
"SUB_STATEMENT": 0,
"STATEMENTS": 1,
"FUNCTIONS": 2,
"CONDITIONALS": 3
}
MODE = MODES["STATEMENTS"]
SMALL_REORDERED_TYPES = [
CParser.ParameterListContext, # Function parameters
CParser.ArgumentExpressionListContext, # Arguments in function call
CParser.MultiplicativeExpressionContext, # +, -
CParser.AdditiveExpressionContext, # *, /, %
]
SMALL_STATEMENTS = [
CParser.ExpressionStatementContext,
CParser.DeclarationContext,
CParser.JumpStatementContext
]
TOP_LEVEL_REORDERED_TYPES = [
CParser.TranslationUnitContext
]
def __init__(self, tree, file_name):
super().__init__()
self.tree = tree
self.hashed_tree = None
self.current = None
self.sorted_trees = {}
self.sub_tree_sizes = []
self.out_file = '/home/philo/Documents/uva/Jaar_3/thesis/CRDS/synthetic_data/reordered_statements/C/Graphics/' + file_name.split('/')[-1]
self.reorderings_executed = 0
def start(self):
walker = ParseTreeWalker()
walker.walk(self, self.tree)
def is_function(self, ctx):
is_function = False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
while len(filtered) > 0:
c_ctx = filtered[0]
if type(c_ctx) == CParser.FunctionDefinitionContext:
is_function = True
break
filtered = [c for c in c_ctx.children if type(c) != TerminalNodeImpl]
return is_function
def is_small_stmt(self, ctx):
is_small_stmt = False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
while len(filtered) == 1:
c_ctx = filtered[0]
if type(c_ctx) in self.SMALL_STATEMENTS:
is_small_stmt = True
break
filtered = [c for c in c_ctx.children if type(c) != TerminalNodeImpl]
return is_small_stmt
def is_stmt_in_blockitem(self, ctx):
if type(ctx) != CParser.BlockItemContext:
return False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
statement = filtered[0]
return type(statement) == CParser.StatementContext
def is_case_stmt(self, ctx):
if type(ctx) != CParser.BlockItemContext:
return False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
statement = filtered[0]
if type(statement) != CParser.StatementContext:
return False
filtered = [c for c in statement.children if type(c) != TerminalNodeImpl]
return type(filtered[0]) == CParser.LabeledStatementContext
def shuffle_children(self, ctx):
"""
Shuffle the children of a Parser context node.
We need to leave TerminalNodeImpl types in the same place
(those are commas, brackets etc.)
"""
reorder = []
indices = []
cases = {}
curr_case = None
is_switch_case = False
for i, child in enumerate(ctx.children):
if type(child) != TerminalNodeImpl:
if ((self.MODE == self.MODES["FUNCTIONS"] and not self.is_function(child)) or
(self.MODE == self.MODES["STATEMENTS"] and not self.is_small_stmt(child))):
continue
elif self.MODE == self.MODES["CONDITIONALS"]:
if type(ctx) == CParser.BlockItemListContext:
if not self.is_stmt_in_blockitem(child):
continue
if self.is_case_stmt(child):
is_switch_case = True
cases[i] = []
curr_case = i
indices.append(i)
elif is_switch_case:
cases[curr_case].append(i)
continue
reorder.append(child)
indices.append(i)
if is_switch_case:
old_indices = list(indices)
if len(indices) < 2:
return
while True:
if indices != old_indices:
break
random.shuffle(indices)
new_children = []
for i in indices:
new_children.append(ctx.children[i])
stmts = [ctx.children[j] for j in cases[i]]
new_children.extend(stmts)
ctx.children = list(new_children)
self.reorderings_executed += 1
else:
old_order = list(reorder)
reordered = False
if len(reorder) < 2:
return
while True:
for i, c in enumerate(reorder):
if id(c) != id(old_order[i]):
reordered = True
break
if reordered:
break
random.shuffle(reorder)
self.reorderings_executed += 1
for j, child in enumerate(reorder):
index = indices[j]
ctx.children[index] = child
def switch_if_else(self, ctx):
if type(ctx) != CParser.SelectionStatementContext:
return
children = [child for child in ctx.children if type(child) != TerminalNodeImpl]
if len(children) != 3:
return
if type(children[0]) != CParser.ExpressionContext:
print("IF WITHOUT CONDITIONAL??")
return
tmp = list(ctx.children)
ctx.children[4] = tmp[6]
ctx.children[6] = tmp[4]
self.reorderings_executed += 1
def enter_rule(self, ctx):
pass
def exit_rule(self, ctx):
"""
If the node is of a type that needs
reordering, reorder its children.
"""
if self.MODE == self.MODES['STATEMENTS']:
self.shuffle_children(ctx)
elif self.MODE == self.MODES["CONDITIONALS"]:
if type(ctx) == CParser.BlockItemListContext:
self.shuffle_children(ctx)
elif type(ctx) == CParser.SelectionStatementContext:
self.switch_if_else(ctx)
elif self.MODE == self.MODES['SUB_STATEMENT']:
if type(ctx) in self.SMALL_REORDERED_TYPES:
self.shuffle_children(ctx)
elif type(ctx) in self.TOP_LEVEL_REORDERED_TYPES:
self.shuffle_children(ctx)
def enterCompilationUnit(self, ctx:CParser.CompilationUnitContext):
"""Compilation Unit subtree, this is the root node."""
self.enter_rule(ctx)
def exitCompilationUnit(self, ctx:CParser.CompilationUnitContext):
self.exit_rule(ctx)
with open(self.out_file, 'w+') as f:
f.write(f'// REORDERINGS EXECUTED: {self.reorderings_executed}\n\n')
f.write(ctx.getText())
# --------------------------------------------------------------------
# Below are all the enter- and exit methods for every ctx type
# --------------------------------------------------------------------
# Enter a parse tree produced by CParser#primaryExpression.
def enterPrimaryExpression(self, ctx:CParser.PrimaryExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#primaryExpression.
def exitPrimaryExpression(self, ctx:CParser.PrimaryExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericSelection.
def enterGenericSelection(self, ctx:CParser.GenericSelectionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericSelection.
def exitGenericSelection(self, ctx:CParser.GenericSelectionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericAssocList.
def enterGenericAssocList(self, ctx:CParser.GenericAssocListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericAssocList.
def exitGenericAssocList(self, ctx:CParser.GenericAssocListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericAssociation.
def enterGenericAssociation(self, ctx:CParser.GenericAssociationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericAssociation.
def exitGenericAssociation(self, ctx:CParser.GenericAssociationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#postfixExpression.
def enterPostfixExpression(self, ctx:CParser.PostfixExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#postfixExpression.
def exitPostfixExpression(self, ctx:CParser.PostfixExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#argumentExpressionList.
def enterArgumentExpressionList(self, ctx:CParser.ArgumentExpressionListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#argumentExpressionList.
def exitArgumentExpressionList(self, ctx:CParser.ArgumentExpressionListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#unaryExpression.
def enterUnaryExpression(self, ctx:CParser.UnaryExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#unaryExpression.
def exitUnaryExpression(self, ctx:CParser.UnaryExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#unaryOperator.
def enterUnaryOperator(self, ctx:CParser.UnaryOperatorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#unaryOperator.
def exitUnaryOperator(self, ctx:CParser.UnaryOperatorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#castExpression.
def enterCastExpression(self, ctx:CParser.CastExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#castExpression.
def exitCastExpression(self, ctx:CParser.CastExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#multiplicativeExpression.
def enterMultiplicativeExpression(self, ctx:CParser.MultiplicativeExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#multiplicativeExpression.
def exitMultiplicativeExpression(self, ctx:CParser.MultiplicativeExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#additiveExpression.
def enterAdditiveExpression(self, ctx:CParser.AdditiveExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#additiveExpression.
def exitAdditiveExpression(self, ctx:CParser.AdditiveExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#shiftExpression.
def enterShiftExpression(self, ctx:CParser.ShiftExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#shiftExpression.
def exitShiftExpression(self, ctx:CParser.ShiftExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#relationalExpression.
def enterRelationalExpression(self, ctx:CParser.RelationalExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#relationalExpression.
def exitRelationalExpression(self, ctx:CParser.RelationalExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#equalityExpression.
def enterEqualityExpression(self, ctx:CParser.EqualityExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#equalityExpression.
def exitEqualityExpression(self, ctx:CParser.EqualityExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#andExpression.
def enterAndExpression(self, ctx:CParser.AndExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#andExpression.
def exitAndExpression(self, ctx:CParser.AndExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#exclusiveOrExpression.
def enterExclusiveOrExpression(self, ctx:CParser.ExclusiveOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#exclusiveOrExpression.
def exitExclusiveOrExpression(self, ctx:CParser.ExclusiveOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#inclusiveOrExpression.
def enterInclusiveOrExpression(self, ctx:CParser.InclusiveOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#inclusiveOrExpression.
def exitInclusiveOrExpression(self, ctx:CParser.InclusiveOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#logicalAndExpression.
def enterLogicalAndExpression(self, ctx:CParser.LogicalAndExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#logicalAndExpression.
def exitLogicalAndExpression(self, ctx:CParser.LogicalAndExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#logicalOrExpression.
def enterLogicalOrExpression(self, ctx:CParser.LogicalOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#logicalOrExpression.
def exitLogicalOrExpression(self, ctx:CParser.LogicalOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#conditionalExpression.
def enterConditionalExpression(self, ctx:CParser.ConditionalExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#conditionalExpression.
def exitConditionalExpression(self, ctx:CParser.ConditionalExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#assignmentExpression.
def enterAssignmentExpression(self, ctx:CParser.AssignmentExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#assignmentExpression.
def exitAssignmentExpression(self, ctx:CParser.AssignmentExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#assignmentOperator.
def enterAssignmentOperator(self, ctx:CParser.AssignmentOperatorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#assignmentOperator.
def exitAssignmentOperator(self, ctx:CParser.AssignmentOperatorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#expression.
def enterExpression(self, ctx:CParser.ExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#expression.
def exitExpression(self, ctx:CParser.ExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#constantExpression.
def enterConstantExpression(self, ctx:CParser.ConstantExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#constantExpression.
def exitConstantExpression(self, ctx:CParser.ConstantExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declaration.
def enterDeclaration(self, ctx:CParser.DeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declaration.
def exitDeclaration(self, ctx:CParser.DeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationSpecifiers.
def enterDeclarationSpecifiers(self, ctx:CParser.DeclarationSpecifiersContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationSpecifiers.
def exitDeclarationSpecifiers(self, ctx:CParser.DeclarationSpecifiersContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationSpecifiers2.
def enterDeclarationSpecifiers2(self, ctx:CParser.DeclarationSpecifiers2Context):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationSpecifiers2.
def exitDeclarationSpecifiers2(self, ctx:CParser.DeclarationSpecifiers2Context):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationSpecifier.
def enterDeclarationSpecifier(self, ctx:CParser.DeclarationSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationSpecifier.
def exitDeclarationSpecifier(self, ctx:CParser.DeclarationSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#initDeclaratorList.
def enterInitDeclaratorList(self, ctx:CParser.InitDeclaratorListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#initDeclaratorList.
def exitInitDeclaratorList(self, ctx:CParser.InitDeclaratorListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#initDeclarator.
def enterInitDeclarator(self, ctx:CParser.InitDeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#initDeclarator.
def exitInitDeclarator(self, ctx:CParser.InitDeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#storageClassSpecifier.
def enterStorageClassSpecifier(self, ctx:CParser.StorageClassSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#storageClassSpecifier.
def exitStorageClassSpecifier(self, ctx:CParser.StorageClassSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#typeSpecifier.
def enterTypeSpecifier(self, ctx:CParser.TypeSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#typeSpecifier.
def exitTypeSpecifier(self, ctx:CParser.TypeSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structOrUnionSpecifier.
def enterStructOrUnionSpecifier(self, ctx:CParser.StructOrUnionSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structOrUnionSpecifier.
def exitStructOrUnionSpecifier(self, ctx:CParser.StructOrUnionSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structOrUnion.
def enterStructOrUnion(self, ctx:CParser.StructOrUnionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structOrUnion.
def exitStructOrUnion(self, ctx:CParser.StructOrUnionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structDeclarationList.
def enterStructDeclarationList(self, ctx:CParser.StructDeclarationListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structDeclarationList.
def exitStructDeclarationList(self, ctx:CParser.StructDeclarationListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structDeclaration.
def enterStructDeclaration(self, ctx:CParser.StructDeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structDeclaration.
def exitStructDeclaration(self, ctx:CParser.StructDeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#specifierQualifierList.
def enterSpecifierQualifierList(self, ctx:CParser.SpecifierQualifierListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#specifierQualifierList.
def exitSpecifierQualifierList(self, ctx:CParser.SpecifierQualifierListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structDeclaratorList.
def enterStructDeclaratorList(self, ctx:CParser.StructDeclaratorListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structDeclaratorList.
def exitStructDeclaratorList(self, ctx:CParser.StructDeclaratorListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#structDeclarator.
def enterStructDeclarator(self, ctx:CParser.StructDeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#structDeclarator.
def exitStructDeclarator(self, ctx:CParser.StructDeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#enumSpecifier.
def enterEnumSpecifier(self, ctx:CParser.EnumSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#enumSpecifier.
def exitEnumSpecifier(self, ctx:CParser.EnumSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#enumeratorList.
def enterEnumeratorList(self, ctx:CParser.EnumeratorListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#enumeratorList.
def exitEnumeratorList(self, ctx:CParser.EnumeratorListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#enumerator.
def enterEnumerator(self, ctx:CParser.EnumeratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#enumerator.
def exitEnumerator(self, ctx:CParser.EnumeratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#enumerationConstant.
def enterEnumerationConstant(self, ctx:CParser.EnumerationConstantContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#enumerationConstant.
def exitEnumerationConstant(self, ctx:CParser.EnumerationConstantContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#atomicTypeSpecifier.
def enterAtomicTypeSpecifier(self, ctx:CParser.AtomicTypeSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#atomicTypeSpecifier.
def exitAtomicTypeSpecifier(self, ctx:CParser.AtomicTypeSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#typeQualifier.
def enterTypeQualifier(self, ctx:CParser.TypeQualifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#typeQualifier.
def exitTypeQualifier(self, ctx:CParser.TypeQualifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#functionSpecifier.
def enterFunctionSpecifier(self, ctx:CParser.FunctionSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#functionSpecifier.
def exitFunctionSpecifier(self, ctx:CParser.FunctionSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#alignmentSpecifier.
def enterAlignmentSpecifier(self, ctx:CParser.AlignmentSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#alignmentSpecifier.
def exitAlignmentSpecifier(self, ctx:CParser.AlignmentSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarator.
def enterDeclarator(self, ctx:CParser.DeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarator.
def exitDeclarator(self, ctx:CParser.DeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#directDeclarator.
def enterDirectDeclarator(self, ctx:CParser.DirectDeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#directDeclarator.
def exitDirectDeclarator(self, ctx:CParser.DirectDeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#gccDeclaratorExtension.
def enterGccDeclaratorExtension(self, ctx:CParser.GccDeclaratorExtensionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#gccDeclaratorExtension.
def exitGccDeclaratorExtension(self, ctx:CParser.GccDeclaratorExtensionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#gccAttributeSpecifier.
def enterGccAttributeSpecifier(self, ctx:CParser.GccAttributeSpecifierContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#gccAttributeSpecifier.
def exitGccAttributeSpecifier(self, ctx:CParser.GccAttributeSpecifierContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#gccAttributeList.
def enterGccAttributeList(self, ctx:CParser.GccAttributeListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#gccAttributeList.
def exitGccAttributeList(self, ctx:CParser.GccAttributeListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#gccAttribute.
def enterGccAttribute(self, ctx:CParser.GccAttributeContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#gccAttribute.
def exitGccAttribute(self, ctx:CParser.GccAttributeContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#nestedParenthesesBlock.
def enterNestedParenthesesBlock(self, ctx:CParser.NestedParenthesesBlockContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#nestedParenthesesBlock.
def exitNestedParenthesesBlock(self, ctx:CParser.NestedParenthesesBlockContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#pointer.
def enterPointer(self, ctx:CParser.PointerContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#pointer.
def exitPointer(self, ctx:CParser.PointerContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#typeQualifierList.
def enterTypeQualifierList(self, ctx:CParser.TypeQualifierListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#typeQualifierList.
def exitTypeQualifierList(self, ctx:CParser.TypeQualifierListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#parameterTypeList.
def enterParameterTypeList(self, ctx:CParser.ParameterTypeListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#parameterTypeList.
def exitParameterTypeList(self, ctx:CParser.ParameterTypeListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#parameterList.
def enterParameterList(self, ctx:CParser.ParameterListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#parameterList.
def exitParameterList(self, ctx:CParser.ParameterListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#parameterDeclaration.
def enterParameterDeclaration(self, ctx:CParser.ParameterDeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#parameterDeclaration.
def exitParameterDeclaration(self, ctx:CParser.ParameterDeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#identifierList.
def enterIdentifierList(self, ctx:CParser.IdentifierListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#identifierList.
def exitIdentifierList(self, ctx:CParser.IdentifierListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#typeName.
def enterTypeName(self, ctx:CParser.TypeNameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#typeName.
def exitTypeName(self, ctx:CParser.TypeNameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#abstractDeclarator.
def enterAbstractDeclarator(self, ctx:CParser.AbstractDeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#abstractDeclarator.
def exitAbstractDeclarator(self, ctx:CParser.AbstractDeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#directAbstractDeclarator.
def enterDirectAbstractDeclarator(self, ctx:CParser.DirectAbstractDeclaratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#directAbstractDeclarator.
def exitDirectAbstractDeclarator(self, ctx:CParser.DirectAbstractDeclaratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#typedefName.
def enterTypedefName(self, ctx:CParser.TypedefNameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#typedefName.
def exitTypedefName(self, ctx:CParser.TypedefNameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#initializer.
def enterInitializer(self, ctx:CParser.InitializerContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#initializer.
def exitInitializer(self, ctx:CParser.InitializerContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#initializerList.
def enterInitializerList(self, ctx:CParser.InitializerListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#initializerList.
def exitInitializerList(self, ctx:CParser.InitializerListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#designation.
def enterDesignation(self, ctx:CParser.DesignationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#designation.
def exitDesignation(self, ctx:CParser.DesignationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#designatorList.
def enterDesignatorList(self, ctx:CParser.DesignatorListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#designatorList.
def exitDesignatorList(self, ctx:CParser.DesignatorListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#designator.
def enterDesignator(self, ctx:CParser.DesignatorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#designator.
def exitDesignator(self, ctx:CParser.DesignatorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#staticAssertDeclaration.
def enterStaticAssertDeclaration(self, ctx:CParser.StaticAssertDeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#staticAssertDeclaration.
def exitStaticAssertDeclaration(self, ctx:CParser.StaticAssertDeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#statement.
def enterStatement(self, ctx:CParser.StatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#statement.
def exitStatement(self, ctx:CParser.StatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#labeledStatement.
def enterLabeledStatement(self, ctx:CParser.LabeledStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#labeledStatement.
def exitLabeledStatement(self, ctx:CParser.LabeledStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#compoundStatement.
def enterCompoundStatement(self, ctx:CParser.CompoundStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#compoundStatement.
def exitCompoundStatement(self, ctx:CParser.CompoundStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#blockItemList.
def enterBlockItemList(self, ctx:CParser.BlockItemListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#blockItemList.
def exitBlockItemList(self, ctx:CParser.BlockItemListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#blockItem.
def enterBlockItem(self, ctx:CParser.BlockItemContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#blockItem.
def exitBlockItem(self, ctx:CParser.BlockItemContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#expressionStatement.
def enterExpressionStatement(self, ctx:CParser.ExpressionStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#expressionStatement.
def exitExpressionStatement(self, ctx:CParser.ExpressionStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#selectionStatement.
def enterSelectionStatement(self, ctx:CParser.SelectionStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#selectionStatement.
def exitSelectionStatement(self, ctx:CParser.SelectionStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#iterationStatement.
def enterIterationStatement(self, ctx:CParser.IterationStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#iterationStatement.
def exitIterationStatement(self, ctx:CParser.IterationStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#forCondition.
def enterForCondition(self, ctx:CParser.ForConditionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#forCondition.
def exitForCondition(self, ctx:CParser.ForConditionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#forDeclaration.
def enterForDeclaration(self, ctx:CParser.ForDeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#forDeclaration.
def exitForDeclaration(self, ctx:CParser.ForDeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#forExpression.
def enterForExpression(self, ctx:CParser.ForExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#forExpression.
def exitForExpression(self, ctx:CParser.ForExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#jumpStatement.
def enterJumpStatement(self, ctx:CParser.JumpStatementContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#jumpStatement.
def exitJumpStatement(self, ctx:CParser.JumpStatementContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#translationUnit.
def enterTranslationUnit(self, ctx:CParser.TranslationUnitContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#translationUnit.
def exitTranslationUnit(self, ctx:CParser.TranslationUnitContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#externalDeclaration.
def enterExternalDeclaration(self, ctx:CParser.ExternalDeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#externalDeclaration.
def exitExternalDeclaration(self, ctx:CParser.ExternalDeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#functionDefinition.
def enterFunctionDefinition(self, ctx:CParser.FunctionDefinitionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#functionDefinition.
def exitFunctionDefinition(self, ctx:CParser.FunctionDefinitionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationList.
def enterDeclarationList(self, ctx:CParser.DeclarationListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationList.
def exitDeclarationList(self, ctx:CParser.DeclarationListContext):
self.exit_rule(ctx)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from mpi4py import MPI
from .adaptive_calibration import calibration_scale_factor_adaptive
from .dip import dip_scale_factor
from .bandwidth import h_crit_scale_factor
def compute_calibration(calibration_file, test, null, alpha, adaptive=True,
lower_lambda=0, upper_lambda=2.0, comm=MPI.COMM_WORLD):
'''
Compute calibration constant lambda_alpha and save to file
'calibration_file'.
Input:
test - 'dip' or 'bw'.
null - 'shoulder' or 'normal'. Reference
distribution.
alpha - significance level.
adaptive - should adaptive probabilistic bisection
search be used?
lower_lambda - lower bound for lambda_alpha in
bisection search.
upper_lambda - upper bound for lambda_alpha in
bisection search.
comm - MPI communicator.
'''
if comm.Get_rank() == 0:
try:
with open(calibration_file, 'a') as f:
pass # check that it is possible to write to file
except Exception as e:
exc = e
else:
exc = None
else:
exc = None
exc = comm.bcast(exc)
if not exc is None:
raise exc
if adaptive:
return calibration_scale_factor_adaptive(alpha, test, null, lower_lambda, upper_lambda,
comm, calibration_file)
if test == 'dip':
return dip_scale_factor(alpha, null, lower_lambda, upper_lambda,
comm, calibration_file)
if test == 'bw':
return h_crit_scale_factor(alpha, null, lower_lambda, upper_lambda,
comm, calibration_file)
|
nilq/baby-python
|
python
|
import json
from django import template
from django.contrib.gis.db.models import Extent
from django.contrib.gis.db.models.functions import Envelope, Transform
from django.conf import settings
from django.db.models.functions import Coalesce
from django.urls import reverse
from geotrek.zoning.models import District, City, RestrictedArea, RestrictedAreaType
register = template.Library()
def get_bbox_cities():
return City.objects.annotate(label=Coalesce("name", "code"), extent=Extent(Transform(Envelope('geom'), settings.API_SRID))).\
values_list('label', 'extent').order_by('label')
def get_bbox_districts():
return District.objects.annotate(extent=Extent(Transform(Envelope('geom'), settings.API_SRID))).\
values_list('name', 'extent').order_by('name')
def get_bbox_areas():
return RestrictedArea.objects.annotate(extent=Extent(Transform(Envelope('geom'), settings.API_SRID))).\
values_list('name', 'extent').order_by('name')
@register.inclusion_tag('zoning/_bbox_fragment.html')
def combobox_bbox_land():
cities = get_bbox_cities() if settings.LAND_BBOX_CITIES_ENABLED else []
districts = get_bbox_districts() if settings.LAND_BBOX_DISTRICTS_ENABLED else []
areas = get_bbox_areas() if settings.LAND_BBOX_AREAS_ENABLED else []
return {
'bbox_cities': cities,
'bbox_districts': districts,
'bbox_areas': areas
}
@register.simple_tag
def restricted_area_types():
all_used_types = RestrictedArea.objects.values_list('area_type', flat=True)
used_types = RestrictedAreaType.objects.filter(pk__in=all_used_types)
serialized = []
for area_type in used_types:
area_type_url = reverse('zoning:restrictedarea_type_layer',
kwargs={'type_pk': area_type.pk})
serialized.append({
'id': 'restrictedarea',
'name': area_type.name,
'url': area_type_url
})
return json.dumps(serialized)
@register.simple_tag
def restricted_areas_by_type():
restricted_areas_by_type = {
str(type.pk): {
'areas': [{
str(area.pk): area.area_type.name + " - " + area.name
} for area in type.restrictedarea_set.order_by('name')
] # We use an array instead of dict because JS parsing would re-order JSON dict
}
for type in RestrictedAreaType.objects.all()
}
return json.dumps(restricted_areas_by_type)
@register.simple_tag
def all_restricted_areas():
all_restricted_areas = [{
str(area.pk): area.area_type.name + " - " + area.name
} for area in RestrictedArea.objects.order_by('area_type__name', 'name')
] # We use an array instead of dict because JS parsing would re-order JSON dict
return json.dumps(all_restricted_areas)
|
nilq/baby-python
|
python
|
import copy, unittest
from bibliopixel.project import project
from bibliopixel.animation.sequence import Sequence
from bibliopixel.animation import matrix
from bibliopixel.layout.matrix import Matrix
from bibliopixel.project.data_maker import Maker
def classname(c):
return '%s.%s' % c.__module__, c.__name__
class Project2Test(unittest.TestCase):
def test_empty(self):
project.project()
def test_single(self):
source = {
'animation': 'bibliopixel.animation.matrix.Matrix',
'shape': [23, 32],
}
pr = project.project(source)
self.assertEqual(
[matrix.Matrix, 1, Matrix, Maker, 23, 32],
[
type(pr.animation),
len(pr.drivers),
type(pr.layout),
type(pr.maker),
pr.layout.width,
pr.layout.height,
])
|
nilq/baby-python
|
python
|
from pdf2image import convert_from_path
import os
import gc
import cv2
import easyocr
import pandas as pd
import Levenshtein as lev
from datetime import datetime
test_template_data = [{'id': 1, 'name': 'JK agency', 'height': 2338, 'width': 1653,
'product_region': ((167, 473), (503, 1650)),
'unit_region': ((511, 473), (623, 1650)),
'batch_region': ((610, 473), (770, 1650)),
'exp_region': ((770, 473), (850, 1650)),
'qty_region': ((1026, 473), (1100, 1650))
},
{'id': 2, 'name': 'CD Associates', 'height': 2339, 'width': 1653,
'product_region': ((117, 436), (630, 1929)),
'unit_region': ((630, 436), (723, 1929)),
'batch_region': ((723, 436), (870, 1929)),
'exp_region': ((870, 436), (950, 1929)),
'qty_region': ((1026, 473), (1100, 1650))
}]
text_reader = easyocr.Reader(['en'], gpu=False)
def convert2images(filepath):
filename = filepath.split('/')[-1].split('.')[0]
images = convert_from_path(filepath)
res_dir = f'results/{filename}'
if not os.path.exists(res_dir):
os.makedirs(res_dir)
for i in range(len(images)):
# Save pages as images from pdf
images[i].save(f'results/{filename}/page_{i + 1}' + '.jpg', 'JPEG')
gc.collect()
return res_dir
def get_template(image, templates=[], temp_id=0):
# get ID of template
for template in test_template_data:
if template['id'] == temp_id:
gc.collect()
return template
def getlines_from_extract(data):
lines = []
last_y = 0
line = ''
ctr = 1
for d in data:
if d[0][-1][1] - last_y > 20:
lines.append(line)
line = d[1]
last_y = d[0][-1][1]
else:
line += ' ' + d[1]
if ctr == len(data):
lines.append(line)
ctr += 1
return lines
def change_date(dt_str):
if len(dt_str) > 0:
dt = datetime.strptime(dt_str, '%m-%y')
return dt.strftime('%-m/%y')
return dt_str
def extract_text(image, temp_id):
template = get_template(image, temp_id=temp_id)
# scale height & width
scale_x = 1.0 # original_image_width / current_image_width
scale_y = 1.0 # original_image_height / current_image_height
# Get products
product_x1, product_y1 = template['product_region'][0][0] * scale_x, template['product_region'][0][1] * scale_y
product_x2, product_y2 = template['product_region'][1][0] * scale_x, template['product_region'][1][1] * scale_y
product_region = image[int(product_y1): int(product_y2), int(product_x1): int(product_x2)]
result = text_reader.readtext(product_region)
products = getlines_from_extract(result)
# Get units
unit_x1, unit_y1 = template['unit_region'][0][0] * scale_x, template['unit_region'][0][1] * scale_y
unit_x2, unit_y2 = template['unit_region'][1][0] * scale_x, template['unit_region'][1][1] * scale_y
unit_region = image[int(unit_y1): int(unit_y2), int(unit_x1): int(unit_x2)]
result = text_reader.readtext(unit_region)
units = getlines_from_extract(result)
# Get Batches
batch_x1, batch_y1 = template['batch_region'][0][0] * scale_x, template['batch_region'][0][1] * scale_y
batch_x2, batch_y2 = template['batch_region'][1][0] * scale_x, template['batch_region'][1][1] * scale_y
batch_region = image[int(batch_y1): int(batch_y2), int(batch_x1): int(batch_x2)]
result = text_reader.readtext(batch_region)
batches = getlines_from_extract(result)
# Get Expiry
exp_x1, exp_y1 = template['exp_region'][0][0] * scale_x, template['exp_region'][0][1] * scale_y
exp_x2, exp_y2 = template['exp_region'][1][0] * scale_x, template['exp_region'][1][1] * scale_y
exp_region = image[int(exp_y1): int(exp_y2), int(exp_x1): int(exp_x2)]
result = text_reader.readtext(exp_region)
expiry_dates = getlines_from_extract(result)
if temp_id == 2:
expiry_dates = list(map(change_date, expiry_dates))
# Get Quantity
qty_x1, qty_y1 = template['qty_region'][0][0] * scale_x, template['qty_region'][0][1] * scale_y
qty_x2, qty_y2 = template['qty_region'][1][0] * scale_x, template['qty_region'][1][1] * scale_y
qty_region = image[int(qty_y1): int(qty_y2), int(qty_x1): int(qty_x2)]
result = text_reader.readtext(qty_region)
quantities = getlines_from_extract(result)
return products, units, batches, expiry_dates, quantities
def get_final_csv(result_dir, temp_id):
pages = []
for r, d, files in os.walk(result_dir):
for file in files:
if file.split('.')[-1] in ['jpg', 'JPG', 'JPEG']:
pages.append(file)
pages = pages[::-1]
break
final_products = []
final_units = []
final_batches = []
final_expiry = []
final_quantities = []
for img_name in pages:
img = cv2.imread(result_dir + f'/{img_name}')
print(result_dir + f'/{img_name}', type(img))
products, units, batches, expiry_dates, quantities = extract_text(img, temp_id)
final_products.extend(products[1:])
final_units.extend(units[1:])
final_batches.extend(batches[1:])
final_expiry.extend(expiry_dates[1:])
final_quantities.extend(quantities[1:])
df = pd.DataFrame({'PRODUCTS': final_products, 'UNITS': final_units,
'BATCHES': final_batches, 'EXPIRY': final_expiry})
sorted_csv = df.sort_values(by=['PRODUCTS'])
sorted_csv.to_csv(f'{result_dir}/final.csv', index=False)
return f'{result_dir}/final.csv'
def max5_similarities(list_of_tup):
lst = len(list_of_tup)
for i in range(0, lst):
for j in range(0, lst - i - 1):
if list_of_tup[j][1] > list_of_tup[j + 1][1]:
list_of_tup[j], list_of_tup[j + 1] = list_of_tup[j + 1], list_of_tup[j]
# print(list_of_tup)
return list_of_tup[lst-5:][::-1]
def color_cells(x):
global rows_to_color
color = 'background-color: red'
df1 = pd.DataFrame('', index=x.index, columns=x.columns)
for i in rows_to_color:
df1.iloc[i, :] = 'background-color: red'
return df1
if __name__ == "__main__":
jk_pdf = input('Give pdf of JK Agency ({specific year} template): ')
cd_pdf = input('Give pdf of CD Associates: ')
result_dir = convert2images(jk_pdf)
jk_csv = get_final_csv(result_dir, 1)
result_dir = convert2images(cd_pdf)
cd_csv = get_final_csv(result_dir, 2)
# ---------------------------------------------------------
jk_df = pd.read_csv(jk_csv, usecols=['PRODUCTS', 'UNITS', 'BATCHES', 'EXPIRY'])
# aggregation_functions
jk_list = []
for i, row in jk_df.iterrows():
jk_list.append(' - '.join((row['PRODUCTS'], row['UNITS'], row['BATCHES'], row['EXPIRY'])))
# ---------------------------------------------------------
cd_df = pd.read_csv(cd_csv, usecols=['PRODUCTS', 'UNITS', 'BATCHES', 'EXPIRY'])
# aggregation_functions
cd_list = []
for i, row in cd_df.iterrows():
cd_list.append(' - '.join((row['PRODUCTS'], row['UNITS'], row['BATCHES'], row['EXPIRY'])))
rows_to_color = []
for i in range(len(cd_list)):
ratios = [(x, round(lev.ratio(cd_list[i], x), 3)) for x in jk_list]
ratios = max5_similarities(ratios)
print(cd_list[i], ratios)
# print(return_list[i], '--', ratios)
if ratios[0][1] < 0.7:
rows_to_color.append(i)
excel_filename = f'verified_result/comparison of {jk_pdf.split("/")[-1].split(".")[0]} & '
excel_filename += f'{cd_pdf.split("/")[-1].split(".")[0]}.xlsx'
cd_df.style.apply(color_cells, axis=None).to_excel(excel_filename,
engine='openpyxl', index=False)
print(f'result stored at "{excel_filename}"')
os.system(f'libreoffice --calc "{excel_filename}"')
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
from .visualize import *
from .detection import *
|
nilq/baby-python
|
python
|
"""
CSC110 Final Project - Analysis of Public Sentiment over New Cases
"""
if __name__ == '__main__':
import app
app.run_app()
|
nilq/baby-python
|
python
|
r"""
Special extensions of function fields
This module currently implements only constant field extension.
Constant field extensions
-------------------------
EXAMPLES:
Constant field extension of the rational function field over rational numbers::
sage: K.<x> = FunctionField(QQ)
sage: N.<a> = QuadraticField(2)
sage: L = K.extension_constant_field(N)
sage: L
Rational function field in x over Number Field in a with defining
polynomial x^2 - 2 with a = 1.4142... over its base
sage: d = (x^2 - 2).divisor()
sage: d
-2*Place (1/x)
+ Place (x^2 - 2)
sage: L.conorm_divisor(d)
-2*Place (1/x)
+ Place (x - a)
+ Place (x + a)
Constant field extension of a function field over a finite field::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: E
Function field in y defined by y^3 + x^6 + x^4 + x^2 over its base
sage: p = F.get_place(3)
sage: E.conorm_place(p) # random
Place (x + z3, y + z3^2 + z3)
+ Place (x + z3^2, y + z3)
+ Place (x + z3^2 + z3, y + z3^2)
sage: q = F.get_place(2)
sage: E.conorm_place(q) # random
Place (x + 1, y^2 + y + 1)
sage: E.conorm_divisor(p + q) # random
Place (x + 1, y^2 + y + 1)
+ Place (x + z3, y + z3^2 + z3)
+ Place (x + z3^2, y + z3)
+ Place (x + z3^2 + z3, y + z3^2)
AUTHORS:
- Kwankyu Lee (2021-12-24): added constant field extension
"""
from sage.rings.ring_extension import RingExtension_generic
from .constructor import FunctionField
class FunctionFieldExtension(RingExtension_generic):
"""
Abstract base class of function field extensions.
"""
pass
class ConstantFieldExtension(FunctionFieldExtension):
"""
Constant field extension.
INPUT:
- ``F`` -- a function field whose constant field is `k`
- ``k_ext`` -- an extension of `k`
"""
def __init__(self, F, k_ext):
"""
Initialize.
TESTS::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: TestSuite(E).run(skip=['_test_elements', '_test_pickling'])
"""
k = F.constant_base_field()
F_base = F.base_field()
F_ext_base = FunctionField(k_ext, F_base.variable_name())
if F.degree() > 1:
# construct constant field extension F_ext of F
def_poly = F.polynomial().base_extend(F_ext_base)
F_ext = F_ext_base.extension(def_poly, names=def_poly.variable_name())
else: # rational function field
F_ext = F_ext_base
# embedding of F into F_ext
embedk = k_ext.coerce_map_from(k)
embedF_base = F_base.hom(F_ext_base.gen(), embedk)
if F.degree() > 1:
embedF = F.hom(F_ext.gen(), embedF_base)
else:
embedF = embedF_base
self._embedk = embedk
self._embedF = embedF
self._F_ext = F_ext
self._k = k
super().__init__(embedF, is_backend_exposed=True)
def top(self):
"""
Return the top function field of this extension.
EXAMPLES::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: E.top()
Function field in y defined by y^3 + x^6 + x^4 + x^2
"""
return self._F_ext
def defining_morphism(self):
"""
Return the defining morphism of this extension.
This is the morphism from the base to the top.
EXAMPLES::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: E.defining_morphism()
Function Field morphism:
From: Function field in y defined by y^3 + x^6 + x^4 + x^2
To: Function field in y defined by y^3 + x^6 + x^4 + x^2
Defn: y |--> y
x |--> x
1 |--> 1
"""
return self._embedF
def conorm_place(self, p):
"""
Return the conorm of the place `p` in this extension.
INPUT:
- ``p`` -- place of the base function field
OUTPUT: divisor of the top function field
EXAMPLES::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: p = F.get_place(3)
sage: d = E.conorm_place(p)
sage: [pl.degree() for pl in d.support()]
[1, 1, 1]
sage: p = F.get_place(2)
sage: d = E.conorm_place(p)
sage: [pl.degree() for pl in d.support()]
[2]
"""
embedF = self.defining_morphism()
O_ext = self.maximal_order()
Oinf_ext = self.maximal_order_infinite()
if p.is_infinite_place():
ideal = Oinf_ext.ideal([embedF(g) for g in p.prime_ideal().gens()])
else:
ideal = O_ext.ideal([embedF(g) for g in p.prime_ideal().gens()])
return ideal.divisor()
def conorm_divisor(self, d):
"""
Return the conorm of the divisor ``d`` in this extension.
INPUT:
- ``d`` -- divisor of the base function field
OUTPUT: a divisor of the top function field
EXAMPLES::
sage: K.<x> = FunctionField(GF(2)); R.<Y> = K[]
sage: F.<y> = K.extension(Y^3 - x^2*(x^2 + x + 1)^2)
sage: E = F.extension_constant_field(GF(2^3))
sage: p1 = F.get_place(3)
sage: p2 = F.get_place(2)
sage: c = E.conorm_divisor(2*p1+ 3*p2)
sage: c1 = E.conorm_place(p1)
sage: c2 = E.conorm_place(p2)
sage: c == 2*c1 + 3*c2
True
"""
div_top = self.divisor_group()
c = div_top.zero()
for pl, mul in d.list():
c += mul * self.conorm_place(pl)
return c
|
nilq/baby-python
|
python
|
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from notifications.models import Notification
from notifications.serializers import NotificationSerializer
class Notification_ListOwn_ApiView(ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = NotificationSerializer
def get_queryset(self):
qs = Notification.objects.filter(user=self.request.user)
qs.select_for_update().update(is_viewed=True)
return qs
|
nilq/baby-python
|
python
|
import os, re, json
import torch
import argparse
import pyhocon
import pickle
from nltk import tokenize
EOS_token = '<EOS>'
BOS_token = '<BOS>'
parallel_pattern = re.compile(r'^(.+?)(\t)(.+?)$')
# swbd_align = {
# '<Uninterpretable>': ['%', 'x'],
# '<Statement>': ['sd', 'sv', '^2', 'no', 't3', 't1', 'oo', 'cc', 'co', 'oo_co_cc'],
# '<Question>': ['q', 'qy', 'qw', 'qy^d', 'bh', 'qo', 'qh', 'br', 'qrr', '^g', 'qw^d'],
# '<Directive>': ['ad'],
# '<Propose>': ['p'],
# '<Greeting>': ['fp', 'fc'],
# '<Apology>': ['fa', 'nn', 'ar', 'ng', 'nn^e', 'arp', 'nd', 'arp_nd'],
# '<Agreement>': ['aa', 'aap', 'am', 'aap_am', 'ft'],
# '<Understanding>': ['b', 'bf', 'ba', 'bk', 'na', 'ny', 'ny^e'],
# '<Other>': ['o', 'fo', 'bc', 'by', 'fw', 'h', '^q', 'b^m', '^h', 'bd', 'fo_o_fw_"_by_bc'],
# '<turn>': ['<turn>']
# }
damsl_align = {
'<Uninterpretable>': ['abandoned_or_turn-exit/uninterpretable', 'non-verbal'],
'<Statement>': ['statement-non-opinion', 'statement-opinion', 'collaborative_completion',
'other_answers', '3rd-party-talk', 'self-talk'],
'<Question>': ['yes-no-question', 'wh-question', 'declarative_yes-no-question', 'backchannel_in_question_form',
'open-question', 'rhetorical-questions', 'signal-non-understanding', 'or-clause', 'tag-question', 'declarative_wh-question'],
'<Directive>': ['action-directive'],
'<Propose>': ['offers,_options_commits'],
'<Greeting>': ['conventional-opening', 'conventional-closing'],
'<Apology>': ['apology', 'no_answers', 'reject', 'negative_non-no_answers', 'dispreferred_answers'],
'<Agreement>': ['agree/accept', 'maybe/accept-part', 'thanking'],
'<Understanding>': ['acknowledge_(backchannel)', 'summarize/reformulate', 'appreciation',
'response_acknowledgement', 'affirmative_non-yes_answers', 'yes_answers'],
'<Other>': ['other', 'quotation', 'repeat-phrase', 'hedge', 'hold_before_answer/agreement', 'downplayer'],
'<turn>': ['<turn>']
}
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--expr', '-e', default='DAestimate', help='input experiment config')
parser.add_argument('--gpu', '-g', type=int, default=0, help='input gpu num')
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu)
return args
def initialize_env(name):
corpus_path = {
'swda': {'path': './data/corpus/swda', 'pattern': r'^sw\_{}\_([0-9]*?)\.jsonlines$', 'lang': 'en'},
'talkback': {'path': '/projects/anga5835/data/tb-jsonlines-filt', 'pattern': r'^{}\_(.*?)\.jsonlines$', 'lang': 'en'},
'dailydialog': {'path': './data/corpus/dailydialog', 'pattern': r'^DailyDialog\_{}\_([0-9]*?)\.jsonlines$', 'lang': 'en'}
}
config = pyhocon.ConfigFactory.parse_file('experiments.conf')[name]
config['log_dir'] = os.path.join(config['log_root'], name)
config['train_path'] = corpus_path[config['corpus']]['path']
config['corpus_pattern'] = corpus_path[config['corpus']]['pattern']
config['lang'] = corpus_path[config['corpus']]['lang']
if not os.path.exists(config['log_dir']):
os.makedirs(config['log_dir'])
print('loading setting "{}"'.format(name))
print('log_root: {}'.format(config['log_root']))
print('corpus: {}'.format(config['corpus']))
return config
class da_Vocab:
def __init__(self, config, das=[], create_vocab=True):
self.word2id = None
self.id2word = None
self.config = config
self.das = das
if create_vocab:
self.construct()
else:
self.load()
def construct(self):
vocab = {'<PAD>': 0, }
vocab_count = {}
for token in self.das:
if token in vocab_count:
vocab_count[token] += 1
else:
vocab_count[token] = 1
for k, _ in sorted(vocab_count.items(), key=lambda x: -x[1]):
vocab[k] = len(vocab)
self.word2id = vocab
self.id2word = {v : k for k, v in vocab.items()}
return vocab
def tokenize(self, X_tensor):
X_tensor = [[self.word2id[token] for token in sentence] for sentence in X_tensor]
return X_tensor
def save(self):
pickle.dump(self.word2id, open(os.path.join(self.config['log_root'], 'da_vocab.dict'), 'wb'))
def load(self):
self.word2id = pickle.load(open(os.path.join(self.config['log_root'], 'da_vocab.dict'), 'rb'))
self.id2word = {v: k for k, v in self.word2id.items()}
class utt_Vocab:
def __init__(self, config, sentences=[], create_vocab=True):
self.word2id = None
self.id2word = None
self.config = config
self.sentences = sentences
if create_vocab:
self.construct()
else:
self.load()
def construct(self):
vocab = {'<UNK>': 0, '<EOS>': 1, '<BOS>': 2, '<PAD>': 3, '<SEP>': 4}
vocab_count = {}
for sentence in self.sentences:
for word in sentence:
if word in vocab: continue
if word in vocab_count:
vocab_count[word] += 1
else:
vocab_count[word] = 1
for k, _ in sorted(vocab_count.items(), key=lambda x: -x[1]):
vocab[k] = len(vocab)
if len(vocab) >= self.config['UTT_MAX_VOCAB']: break
self.word2id = vocab
self.id2word = {v : k for k, v in vocab.items()}
return vocab
def tokenize(self, X_tensor):
X_tensor = [[[self.word2id[token] if token in self.word2id else self.word2id['<UNK>'] for token in seq] for seq in dialogue] for dialogue in X_tensor]
return X_tensor
def save(self):
pickle.dump(self.word2id, open(os.path.join(self.config['log_root'], 'utterance_vocab.dict'), 'wb'))
def load(self):
self.word2id = pickle.load(open(os.path.join(self.config['log_root'], 'utterance_vocab.dict'), 'rb'))
self.id2word = {v: k for k, v in self.word2id.items()}
def create_traindata(config, prefix='train'):
file_pattern = re.compile(config['corpus_pattern'].format(prefix))
files = [f for f in os.listdir(config['train_path']) if file_pattern.match(f)]
da_posts = []
da_cmnts = []
utt_posts = []
utt_cmnts = []
turn = []
# 1file 1conversation
for filename in files:
with open(os.path.join(config['train_path'], filename), 'r') as f:
data = f.read().split('\n')
data.remove('')
da_seq = []
utt_seq = []
turn_seq = []
# 1line 1turn
for idx, line in enumerate(data, 1):
jsondata = json.loads(line)
for da, utt in zip(jsondata['DA'], jsondata['sentence']):
if config['lang'] == 'en':
_utt = [BOS_token] + en_preprocess(utt) + [EOS_token]
else:
_utt = [BOS_token] + utt.split(' ') + [EOS_token]
if config['corpus'] == 'swda':
da_seq.append(easy_damsl(da))
else:
da_seq.append(da)
utt_seq.append(_utt)
turn_seq.append(0)
turn_seq[-1] = 1
da_seq = [da for da in da_seq]
if len(da_seq) <= config['window_size']: continue
for i in range(max(1, len(da_seq) - 1 - config['window_size'])):
assert len(da_seq[i:min(len(da_seq)-1, i + config['window_size'])]) >= config['window_size'], filename
da_posts.append(da_seq[i:min(len(da_seq)-1, i + config['window_size'])])
da_cmnts.append(da_seq[1 + i:min(len(da_seq), 1 + i + config['window_size'])])
utt_posts.append(utt_seq[i:min(len(da_seq)-1, i + config['window_size'])])
utt_cmnts.append(utt_seq[1 + i:min(len(da_seq), 1 + i + config['window_size'])])
turn.append(turn_seq[i:min(len(da_seq), i + config['window_size'])])
assert len(da_posts) == len(da_cmnts), 'Unexpect length da_posts and da_cmnts'
assert len(utt_posts) == len(utt_cmnts), 'Unexpect length utt_posts and utt_cmnts'
assert all(len(ele) == config['window_size'] for ele in da_posts), {len(ele) for ele in da_posts}
return da_posts, da_cmnts, utt_posts, utt_cmnts, turn
def create_todbert_traindata(config, tokenizer, prefix='train'):
file_pattern = re.compile(config['corpus_pattern'].format(prefix))
files = [f for f in os.listdir(config['train_path']) if file_pattern.match(f)]
da_posts = []
da_cmnts = []
utt_posts = []
plain_utt_posts = []
speaker_posts = []
utt_cmnts = []
turn = []
# 1file 1conversation
for filename in files:
with open(os.path.join(config['train_path'], filename), 'r') as f:
data = f.read().split('\n')
data.remove('')
da_seq = []
utt_seq = []
turn_seq = []
plain_utt_seq = []
speaker_seq = []
# 1line 1turn
for idx, line in enumerate(data, 1):
jsondata = json.loads(line)
speaker = jsondata['caller']
if speaker == 'Teacher':
speaker_tok = '[SYS]'
else:
speaker_tok = '[USR]'
for da, utt in zip(jsondata['DA'], jsondata['sentence']):
plain_utt_seq.append(utt)
if config['lang'] == 'en':
_utt = [BOS_token] + en_preprocess(utt) + [EOS_token]
else:
_utt = [BOS_token] + utt.split(' ') + [EOS_token]
if config['corpus'] == 'swda':
da_seq.append(easy_damsl(da))
else:
da_seq.append(da)
utt_seq.append(_utt)
turn_seq.append(0)
speaker_seq.append(speaker_tok)
turn_seq[-1] = 1
da_seq = [da for da in da_seq]
if len(da_seq) <= config['window_size']: continue
for i in range(max(1, len(da_seq) - 1 - config['window_size'])):
assert len(da_seq[i:min(len(da_seq)-1, i + config['window_size'])]) >= config['window_size'], filename
da_posts.append(da_seq[i:min(len(da_seq)-1, i + config['window_size'])])
da_cmnts.append(da_seq[1 + i:min(len(da_seq), 1 + i + config['window_size'])])
utt_posts.append(utt_seq[i:min(len(da_seq)-1, i + config['window_size'])])
plain_utt_posts.append(plain_utt_seq[i:min(len(da_seq)-1, i + config['window_size'])])
speaker_posts.append(speaker_seq[i:min(len(da_seq)-1, i + config['window_size'])])
utt_cmnts.append(utt_seq[1 + i:min(len(da_seq), 1 + i + config['window_size'])])
turn.append(turn_seq[i:min(len(da_seq), i + config['window_size'])])
assert len(da_posts) == len(da_cmnts), 'Unexpect length da_posts and da_cmnts'
assert len(utt_posts) == len(utt_cmnts), 'Unexpect length utt_posts and utt_cmnts'
assert all(len(ele) == config['window_size'] for ele in da_posts), {len(ele) for ele in da_posts}
assert len(utt_posts) == len(plain_utt_posts), "Wrong tokenization"
tod_context = []
for i in range(len(plain_utt_posts)):
context_str = "[CLS]"
prev_speaker = None
#assert len(tod_posts[i]) == len(plain_utt_posts[i]) == len(speaker_posts[i])
assert len(plain_utt_posts[i]) == len(speaker_posts[i])
for j in range(len(speaker_posts[i])):
if speaker_posts[i][j] == prev_speaker:
context_str = context_str + ' ' + plain_utt_posts[i][j]
else:
context_str = context_str + ' ' + speaker_posts[i][j] + ' ' + plain_utt_posts[i][j]
prev_speaker = speaker_posts[i][j]
#print(context_str)
context_tokens = tokenizer.tokenize(context_str)
context_tokenized = tokenizer.convert_tokens_to_ids(context_tokens)
tod_context.append(context_tokenized)
assert len(tod_context) == len(utt_posts)
return da_posts, da_cmnts, utt_posts, tod_context, utt_cmnts, turn
def easy_damsl(tag):
easy_tag = [k for k, v in damsl_align.items() if tag in v]
return easy_tag[0] if not len(easy_tag) < 1 else tag
def separate_data(posts, cmnts, turn):
split_size = round(len(posts) / 10)
if split_size == 0: split_size = 1
X_train, Y_train, Tturn = posts[split_size * 2:], cmnts[split_size * 2:], turn[split_size * 2:]
X_valid, Y_valid, Vturn = posts[split_size: split_size * 2], cmnts[split_size: split_size * 2], turn[split_size: split_size * 2]
X_test, Y_test, Testturn = posts[:split_size], cmnts[:split_size], turn[:split_size]
assert len(X_train) == len(Y_train), 'Unexpect to separate train data'
return X_train, Y_train, X_valid, Y_valid, X_test, Y_test, Tturn, Vturn, Testturn
def en_preprocess(utterance):
if utterance == '': return ['<Silence>']
return tokenize.word_tokenize(utterance.lower())
|
nilq/baby-python
|
python
|
class Solution:
"""
@param arr: a integer array
@return: return ids sum is minimum.
"""
def UniqueIDSum(self, arr):
# write your code here
table = set()
for a in arr:
while a in table:
a += 1
table.add(a)
return sum(table)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser(description='JS Fixups')
parser.add_argument('file', help="file to process")
args = parser.parse_args()
file = open(args.file)
text = file.read()
pat = 'HEAP32\[(?P<base>.*)+?P<offset>.*)>>?P<shift>.*)\]'
pat = 'HEAP32\[(\w*?)\+(\d*?)>>2\]'
# print pat
rex = re.compile(pat, re.MULTILINE)
def replace(match):
# print match.group(0)
base = match.group(1)
offset = match.group(2)
return "HEAP32[(" + base + ">>2)+(" + offset + ">>2)]"
text = rex.sub(replace, text)
print text
|
nilq/baby-python
|
python
|
# Copyright (c) 2019,20-22 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import os
import torch
from kaolin.ops.batch import get_first_idx, tile_to_packed, list_to_packed
from kaolin.utils.testing import FLOAT_TYPES, with_seed, check_tensor
from kaolin.ops import mesh
from kaolin.ops.mesh.trianglemesh import _unbatched_subdivide_vertices, subdivide_trianglemesh
from kaolin.io import obj
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, os.pardir, os.pardir, 'samples/')
@pytest.mark.parametrize("device,dtype", FLOAT_TYPES)
class TestFaceAreas:
def test_face_areas(self, device, dtype):
vertices = torch.tensor([[[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[2., 0., 0.2]],
[[-1., -1., -1.],
[-1., -1., 1.],
[-1, 1., -1.],
[3, -1., -0.6]]],
device=device, dtype=dtype)
faces = torch.tensor([[0, 1, 2],
[1, 0, 3]],
device=device, dtype=torch.long)
output = mesh.face_areas(vertices, faces)
expected_output = torch.tensor([[0.5, 1.], [2., 4.]], device=device, dtype=dtype)
assert torch.equal(output, expected_output)
def test_packed_face_areas(self, device, dtype):
vertices = torch.tensor([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[2., 0., 0.2],
[0., 0., 0.],
[0., 1., 1.],
[2., 0., 0.]],
device=device, dtype=dtype)
faces = torch.tensor([[0, 1, 2],
[1, 0, 3],
[0, 1, 2]], device=device, dtype=torch.long)
first_idx_vertices = torch.LongTensor([0, 4, 7], device='cpu')
num_faces_per_mesh = torch.LongTensor([2, 1], device='cpu')
output = mesh.packed_face_areas(vertices, first_idx_vertices,
faces, num_faces_per_mesh)
expected_output = torch.tensor([0.5, 1., math.sqrt(2.)], device=device, dtype=dtype)
assert torch.allclose(output, expected_output)
@pytest.mark.parametrize("device,dtype", FLOAT_TYPES)
class TestSamplePoints:
@pytest.fixture(autouse=True)
def vertices(self, device, dtype):
# TODO(cfujitsang): extend the test with Z variation
return torch.tensor([[[0., 0., 0.],
[0., 1., 0.],
[1., 0., 0.],
[-1, 0., 0.]],
[[1., 1., 3.],
[1., 1.5, 3.],
[1.5, 1., 3.],
[0.5, 1., 3.]]],
device=device, dtype=dtype)
return vertices
@pytest.fixture(autouse=True)
def faces(self, device, dtype):
return torch.tensor([[0, 1, 2],
[1, 0, 3]],
device=device, dtype=torch.long)
@pytest.fixture(autouse=True)
def face_features(self, device, dtype):
return torch.tensor(
[[[[0., 0.], [0., 1.], [0., 2.]],
[[1., 3.], [1., 4.], [1., 5.]]],
[[[2., 6.], [2., 7.], [2., 8.]],
[[3., 9.], [3., 10.], [3., 11.]]]],
device=device, dtype=torch.long)
######## FIXED ########
@pytest.mark.parametrize('use_features', [False, True])
def test_sample_points(self, vertices, faces, face_features,
use_features, device, dtype):
batch_size, num_vertices = vertices.shape[:2]
num_faces = faces.shape[0]
num_samples = 1000
if use_features:
points, face_choices, interpolated_features = mesh.sample_points(
vertices, faces, num_samples, face_features=face_features)
else:
points, face_choices = mesh.sample_points(
vertices, faces, num_samples)
check_tensor(points, shape=(batch_size, num_samples, 3),
dtype=dtype, device=device)
check_tensor(face_choices, shape=(batch_size, num_samples),
dtype=torch.long, device=device)
# check that all faces are sampled
num_0 = torch.sum(face_choices == 0, dim=1)
assert torch.all(num_0 + torch.sum(face_choices == 1, dim=1) == num_samples)
sampling_prob = num_samples / 2
tolerance = sampling_prob * 0.2
assert torch.all(num_0 < sampling_prob + tolerance) and \
torch.all(num_0 > sampling_prob - tolerance)
face_vertices = mesh.index_vertices_by_faces(vertices, faces)
face_vertices_choices = torch.gather(
face_vertices, 1, face_choices[:, :, None, None].repeat(1, 1, 3, 3))
# compute distance from the point to the plan of the face picked
face_normals = mesh.face_normals(face_vertices_choices, unit=True)
v0_p = points - face_vertices_choices[:, :, 0] # batch_size x num_points x 3
len_v0_p = torch.sqrt(torch.sum(v0_p ** 2, dim=-1))
cos_a = torch.matmul(v0_p.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)).reshape(
batch_size, num_samples) / len_v0_p
point_to_face_dist = len_v0_p * cos_a
if dtype == torch.half:
atol = 1e-2
rtol = 1e-3
else:
atol = 1e-4
rtol = 1e-5
# check that the point is close to the plan
assert torch.allclose(point_to_face_dist,
torch.zeros((batch_size, num_samples),
device=device, dtype=dtype),
atol=atol, rtol=rtol)
# check that the point lie in the triangle
edges0 = face_vertices_choices[:, :, 1] - face_vertices_choices[:, :, 0]
edges1 = face_vertices_choices[:, :, 2] - face_vertices_choices[:, :, 1]
edges2 = face_vertices_choices[:, :, 0] - face_vertices_choices[:, :, 2]
v0_p = points - face_vertices_choices[:, :, 0]
v1_p = points - face_vertices_choices[:, :, 1]
v2_p = points - face_vertices_choices[:, :, 2]
# Normals of the triangle formed by an edge and the point
normals1 = torch.cross(edges0, v0_p)
normals2 = torch.cross(edges1, v1_p)
normals3 = torch.cross(edges2, v2_p)
# cross-product of those normals with the face normals must be positive
margin = -5e-3 if dtype == torch.half else 0.
assert torch.all(torch.matmul(normals1.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
assert torch.all(torch.matmul(normals2.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
assert torch.all(torch.matmul(normals3.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
if use_features:
feat_dim = face_features.shape[-1]
check_tensor(interpolated_features, shape=(batch_size, num_samples, feat_dim),
dtype=dtype, device=device)
# face_vertices_choices (batch_size, num_samples, 3, 3)
# points (batch_size, num_samples, 3)
ax = face_vertices_choices[:, :, 0, 0]
ay = face_vertices_choices[:, :, 0, 1]
bx = face_vertices_choices[:, :, 1, 0]
by = face_vertices_choices[:, :, 1, 1]
cx = face_vertices_choices[:, :, 2, 0]
cy = face_vertices_choices[:, :, 2, 1]
m = bx - ax
p = by - ay
n = cx - ax
q = cy - ay
s = points[:, :, 0] - ax
t = points[:, :, 1] - ay
# sum_weights = torch.sum(weights, dim=-1)
# zeros_idxs = torch.where(sum_weights == 0)
#weights = weights / torch.sum(weights, keepdims=True, dim=-1)
k1 = s * q - n * t
k2 = m * t - s * p
k3 = m * q - n * p
w1 = k1 / (k3 + 1e-7)
w2 = k2 / (k3 + 1e-7)
w0 = (1. - w1) - w2
weights = torch.stack([w0, w1, w2], dim=-1)
gt_points = torch.sum(
face_vertices_choices * weights.unsqueeze(-1), dim=-2)
assert torch.allclose(points, gt_points, atol=atol, rtol=rtol)
_face_choices = face_choices[..., None, None].repeat(1, 1, 3, feat_dim)
face_features_choices = torch.gather(face_features, 1, _face_choices)
gt_interpolated_features = torch.sum(
face_features_choices * weights.unsqueeze(-1), dim=-2)
assert torch.allclose(interpolated_features, gt_interpolated_features,
atol=atol, rtol=rtol)
def test_sample_points_with_areas(self, vertices, faces, dtype, device):
num_samples = 1000
face_areas = mesh.face_areas(vertices, faces)
points1, face_choices1 = with_seed(1234)(
mesh.sample_points)(vertices, faces, num_samples, face_areas)
points2, face_choices2 = with_seed(1234)(
mesh.sample_points)(vertices, faces, num_samples)
assert torch.allclose(points1, points2)
assert torch.equal(face_choices1, face_choices2)
def test_sample_points_with_areas_with_features(self, vertices, faces,
face_features, dtype, device):
num_samples = 1000
face_areas = mesh.face_areas(vertices, faces)
points1, face_choices1, interpolated_features1 = with_seed(1234)(
mesh.sample_points)(vertices, faces, num_samples, face_areas,
face_features=face_features)
points2, face_choices2, interpolated_features2 = with_seed(1234)(
mesh.sample_points)(vertices, faces, num_samples,
face_features=face_features)
assert torch.allclose(points1, points2)
assert torch.equal(face_choices1, face_choices2)
assert torch.allclose(interpolated_features1, interpolated_features2)
def test_diff_sample_points(self, vertices, faces, device, dtype):
num_samples = 1000
points1, face_choices1 = with_seed(1234)(
mesh.sample_points)(vertices, faces, num_samples)
points2, face_choices2 = with_seed(1235)(
mesh.sample_points)(vertices, faces, num_samples)
assert not torch.equal(points1, points2)
assert not torch.equal(face_choices1, face_choices2)
######## PACKED ########
@pytest.fixture(autouse=True)
def packed_vertices_info(self, device, dtype):
vertices = torch.tensor([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[2., 0., 0.2],
[0., 0., 0.],
[0., 1., 1.],
[2., 0., 0.]],
device=device, dtype=dtype)
first_idx_vertices = torch.LongTensor([0, 4, 7], device='cpu')
return vertices, first_idx_vertices
@pytest.fixture(autouse=True)
def packed_faces_info(self, device, dtype):
faces = torch.tensor([[0, 1, 2],
[1, 0, 3],
[0, 1, 2]], device=device, dtype=torch.long)
num_faces_per_mesh = torch.LongTensor([2, 1], device='cpu')
return faces, num_faces_per_mesh
def test_packed_sample_points(self, packed_vertices_info, packed_faces_info,
device, dtype):
vertices, first_idx_vertices = packed_vertices_info
faces, num_faces_per_mesh = packed_faces_info
total_num_vertices = vertices.shape[0]
total_num_faces = faces.shape[0]
batch_size = num_faces_per_mesh.shape[0]
num_samples = 1000
points, face_choices = mesh.packed_sample_points(
vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples)
check_tensor(points, shape=(batch_size, num_samples, 3),
dtype=dtype, device=device)
check_tensor(face_choices, shape=(batch_size, num_samples),
dtype=torch.long, device=device)
# check that all faces are sampled
assert torch.all(face_choices[1] == 2)
num_0 = torch.sum(face_choices[0] == 0)
assert num_0 + torch.sum(face_choices[0] == 1) == num_samples
sampling_prob = num_samples / 3.
tolerance = sampling_prob * 0.2
assert (num_0 < sampling_prob + tolerance) and \
(num_0 > sampling_prob - tolerance)
merged_faces = faces + tile_to_packed(first_idx_vertices[:-1].to(vertices.device),
num_faces_per_mesh)
face_vertices = torch.index_select(
vertices, 0, merged_faces.reshape(-1)).reshape(total_num_faces, 3, 3)
face_vertices_choices = torch.gather(
face_vertices, 0, face_choices.reshape(-1, 1, 1).repeat(1, 3, 3)
).reshape(batch_size, num_samples, 3, 3)
# compute distance from the point to the plan of the face picked
face_normals = mesh.face_normals(face_vertices_choices, unit=True)
v0_p = points - face_vertices_choices[:, :, 0] # batch_size x num_points x 3
len_v0_p = torch.sqrt(torch.sum(v0_p ** 2, dim=-1))
cos_a = torch.matmul(v0_p.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)).reshape(
batch_size, num_samples) / len_v0_p
point_to_face_dist = len_v0_p * cos_a
if dtype == torch.half:
atol = 1e-2
rtol = 1e-3
else:
atol = 1e-4
rtol = 1e-5
# check that the point is close to the plan
assert torch.allclose(point_to_face_dist,
torch.zeros((batch_size, num_samples),
device=device, dtype=dtype),
atol=atol, rtol=rtol)
# check that the point lie in the triangle
edges0 = face_vertices_choices[:, :, 1] - face_vertices_choices[:, :, 0]
edges1 = face_vertices_choices[:, :, 2] - face_vertices_choices[:, :, 1]
edges2 = face_vertices_choices[:, :, 0] - face_vertices_choices[:, :, 2]
v0_p = points - face_vertices_choices[:, :, 0]
v1_p = points - face_vertices_choices[:, :, 1]
v2_p = points - face_vertices_choices[:, :, 2]
# Normals of the triangle formed by an edge and the point
normals1 = torch.cross(edges0, v0_p)
normals2 = torch.cross(edges1, v1_p)
normals3 = torch.cross(edges2, v2_p)
# cross-product of those normals with the face normals must be positive
margin = -2e-3 if dtype == torch.half else 0.
assert torch.all(torch.matmul(normals1.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
assert torch.all(torch.matmul(normals2.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
assert torch.all(torch.matmul(normals3.reshape(-1, 1, 3),
face_normals.reshape(-1, 3, 1)) >= margin)
def test_packed_sample_points_with_areas(self, packed_vertices_info, packed_faces_info,
dtype, device):
num_samples = 1000
vertices, first_idx_vertices = packed_vertices_info
faces, num_faces_per_mesh = packed_faces_info
face_areas = mesh.packed_face_areas(vertices, first_idx_vertices,
faces, num_faces_per_mesh)
points1, face_choices1 = with_seed(1234)(mesh.packed_sample_points)(
vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples, face_areas)
points2, face_choices2 = with_seed(1234)(mesh.packed_sample_points)(
vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples)
assert torch.allclose(points1, points2)
assert torch.equal(face_choices1, face_choices2)
def test_diff_packed_sample_points(self, packed_vertices_info, packed_faces_info,
dtype, device):
num_samples = 1000
vertices, first_idx_vertices = packed_vertices_info
faces, num_faces_per_mesh = packed_faces_info
points1, face_choices1 = with_seed(1234)(mesh.packed_sample_points)(
vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples)
points2, face_choices2 = with_seed(1235)(mesh.packed_sample_points)(
vertices, first_idx_vertices, faces, num_faces_per_mesh, num_samples)
assert not torch.equal(points1, points2)
assert not torch.equal(face_choices1, face_choices2)
@pytest.mark.parametrize('device, dtype', FLOAT_TYPES)
def test_adjacency_matrix_sparse(device, dtype):
num_vertices = 5
faces = torch.tensor([[1, 3, 2],
[1, 4, 0]], dtype=torch.long, device=device)
output = mesh.adjacency_matrix(num_vertices, faces).to_dense()
expected = torch.tensor([[0, 1, 0, 0, 1],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], dtype=torch.float, device=device)
assert torch.equal(output, expected)
@pytest.mark.parametrize('device, dtype', FLOAT_TYPES)
def test_adjacency_matrix_dense(device, dtype):
num_vertices = 5
faces = torch.tensor([[1, 3, 2],
[1, 4, 0]], dtype=torch.long, device=device)
output = mesh.adjacency_matrix(num_vertices, faces, sparse=False)
expected = torch.tensor([[0, 1, 0, 0, 1],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], dtype=torch.float, device=device)
assert torch.equal(output, expected)
@pytest.mark.parametrize('device, dtype', FLOAT_TYPES)
def test_adjacency_consistent(device, dtype):
test_mesh = obj.import_mesh(os.path.join(ROOT_DIR, 'model.obj'))
vertices = test_mesh.vertices
faces = test_mesh.faces
num_vertices = vertices.shape[0]
sparse = mesh.adjacency_matrix(num_vertices, faces)
sparse_to_dense = sparse.to_dense()
dense = mesh.adjacency_matrix(num_vertices, faces, sparse=False)
assert torch.equal(sparse_to_dense, dense)
@pytest.mark.parametrize('device, dtype', FLOAT_TYPES)
class TestUniformLaplacian:
def test_uniform_laplacian(self, device, dtype):
num_vertices = 5
faces = torch.tensor([[1, 3, 2],
[1, 4, 0]], dtype=torch.long, device=device)
output = mesh.uniform_laplacian(num_vertices, faces)
expected = torch.tensor([[-1, 0.5, 0, 0, 0.5],
[0.25, -1, 0.25, 0.25, 0.25],
[0, 0.5, -1, 0.5, 0],
[0, 0.5, 0.5, -1, 0],
[0.5, 0.5, 0, 0, -1]], dtype=torch.float, device=device)
assert torch.equal(output, expected)
def test_not_connected_mesh(self, device, dtype):
num_vertices = 4
faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device=device)
result = mesh.uniform_laplacian(num_vertices, faces)
# Any row and column related to V3 is zeros.
assert torch.equal(result[3, :3], torch.zeros((3), device=device, dtype=torch.float))
assert torch.equal(result[:3, 3], torch.zeros((3), device=device, dtype=torch.float))
@pytest.mark.parametrize('device, dtype', FLOAT_TYPES)
class TestSubdivide:
def test_subdivide(self, device, dtype):
vertices = torch.tensor([[0, 0, 0],
[1, 0, 0],
[0, 0, 1]], dtype=dtype, device=device)
faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device=device)
new_vertices = _unbatched_subdivide_vertices(vertices, faces, 3)
expected_vertices = torch.tensor([[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.1250],
[0.0000, 0.0000, 0.2500],
[0.0000, 0.0000, 0.3750],
[0.0000, 0.0000, 0.5000],
[0.0000, 0.0000, 0.6250],
[0.0000, 0.0000, 0.7500],
[0.0000, 0.0000, 0.8750],
[0.0000, 0.0000, 1.0000],
[0.1250, 0.0000, 0.0000],
[0.1250, 0.0000, 0.1250],
[0.1250, 0.0000, 0.2500],
[0.1250, 0.0000, 0.3750],
[0.1250, 0.0000, 0.5000],
[0.1250, 0.0000, 0.6250],
[0.1250, 0.0000, 0.7500],
[0.1250, 0.0000, 0.8750],
[0.2500, 0.0000, 0.0000],
[0.2500, 0.0000, 0.1250],
[0.2500, 0.0000, 0.2500],
[0.2500, 0.0000, 0.3750],
[0.2500, 0.0000, 0.5000],
[0.2500, 0.0000, 0.6250],
[0.2500, 0.0000, 0.7500],
[0.3750, 0.0000, 0.0000],
[0.3750, 0.0000, 0.1250],
[0.3750, 0.0000, 0.2500],
[0.3750, 0.0000, 0.3750],
[0.3750, 0.0000, 0.5000],
[0.3750, 0.0000, 0.6250],
[0.5000, 0.0000, 0.0000],
[0.5000, 0.0000, 0.1250],
[0.5000, 0.0000, 0.2500],
[0.5000, 0.0000, 0.3750],
[0.5000, 0.0000, 0.5000],
[0.6250, 0.0000, 0.0000],
[0.6250, 0.0000, 0.1250],
[0.6250, 0.0000, 0.2500],
[0.6250, 0.0000, 0.3750],
[0.7500, 0.0000, 0.0000],
[0.7500, 0.0000, 0.1250],
[0.7500, 0.0000, 0.2500],
[0.8750, 0.0000, 0.0000],
[0.8750, 0.0000, 0.1250],
[1.0000, 0.0000, 0.0000]], dtype=dtype, device=device)
assert torch.equal(new_vertices, expected_vertices)
def test_subdivide_2(self, device, dtype):
vertices = torch.tensor([[0, 0, 0],
[1, 0, 0],
[0, 0, 1]], dtype=dtype, device=device)
faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device=device)
new_vertices = _unbatched_subdivide_vertices(vertices, faces, 2)
expected_vertices = torch.tensor([[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.1250],
[0.0000, 0.0000, 0.2500],
[0.0000, 0.0000, 0.3750],
[0.0000, 0.0000, 0.5000],
[0.0000, 0.0000, 0.6250],
[0.0000, 0.0000, 0.7500],
[0.0000, 0.0000, 0.8750],
[0.0000, 0.0000, 1.0000],
[0.1250, 0.0000, 0.0000],
[0.1250, 0.0000, 0.1250],
[0.1250, 0.0000, 0.2500],
[0.1250, 0.0000, 0.3750],
[0.1250, 0.0000, 0.5000],
[0.1250, 0.0000, 0.6250],
[0.1250, 0.0000, 0.7500],
[0.1250, 0.0000, 0.8750],
[0.2500, 0.0000, 0.0000],
[0.2500, 0.0000, 0.1250],
[0.2500, 0.0000, 0.2500],
[0.2500, 0.0000, 0.3750],
[0.2500, 0.0000, 0.5000],
[0.2500, 0.0000, 0.6250],
[0.2500, 0.0000, 0.7500],
[0.3750, 0.0000, 0.0000],
[0.3750, 0.0000, 0.1250],
[0.3750, 0.0000, 0.2500],
[0.3750, 0.0000, 0.3750],
[0.3750, 0.0000, 0.5000],
[0.3750, 0.0000, 0.6250],
[0.5000, 0.0000, 0.0000],
[0.5000, 0.0000, 0.1250],
[0.5000, 0.0000, 0.2500],
[0.5000, 0.0000, 0.3750],
[0.5000, 0.0000, 0.5000],
[0.6250, 0.0000, 0.0000],
[0.6250, 0.0000, 0.1250],
[0.6250, 0.0000, 0.2500],
[0.6250, 0.0000, 0.3750],
[0.7500, 0.0000, 0.0000],
[0.7500, 0.0000, 0.1250],
[0.7500, 0.0000, 0.2500],
[0.8750, 0.0000, 0.0000],
[0.8750, 0.0000, 0.1250],
[1.0000, 0.0000, 0.0000]], device=device, dtype=dtype)
assert torch.equal(new_vertices, expected_vertices)
def test_subdivide_3(self, device, dtype):
vertices = torch.tensor([[0, 0, 0],
[0, 0.5, 0],
[0, 0, 1]], dtype=dtype, device=device)
faces = torch.tensor([[0, 1, 2]], dtype=torch.long, device=device)
new_vertices = _unbatched_subdivide_vertices(vertices, faces, 2)
expected_vertices = torch.tensor([[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.1250],
[0.0000, 0.0000, 0.2500],
[0.0000, 0.0000, 0.3750],
[0.0000, 0.0000, 0.5000],
[0.0000, 0.0000, 0.6250],
[0.0000, 0.0000, 0.7500],
[0.0000, 0.0000, 0.8750],
[0.0000, 0.0000, 1.0000],
[0.0000, 0.0625, 0.0000],
[0.0000, 0.0625, 0.1250],
[0.0000, 0.0625, 0.2500],
[0.0000, 0.0625, 0.3750],
[0.0000, 0.0625, 0.5000],
[0.0000, 0.0625, 0.6250],
[0.0000, 0.0625, 0.7500],
[0.0000, 0.0625, 0.8750],
[0.0000, 0.1250, 0.0000],
[0.0000, 0.1250, 0.1250],
[0.0000, 0.1250, 0.2500],
[0.0000, 0.1250, 0.3750],
[0.0000, 0.1250, 0.5000],
[0.0000, 0.1250, 0.6250],
[0.0000, 0.1250, 0.7500],
[0.0000, 0.1875, 0.0000],
[0.0000, 0.1875, 0.1250],
[0.0000, 0.1875, 0.2500],
[0.0000, 0.1875, 0.3750],
[0.0000, 0.1875, 0.5000],
[0.0000, 0.1875, 0.6250],
[0.0000, 0.2500, 0.0000],
[0.0000, 0.2500, 0.1250],
[0.0000, 0.2500, 0.2500],
[0.0000, 0.2500, 0.3750],
[0.0000, 0.2500, 0.5000],
[0.0000, 0.3125, 0.0000],
[0.0000, 0.3125, 0.1250],
[0.0000, 0.3125, 0.2500],
[0.0000, 0.3125, 0.3750],
[0.0000, 0.3750, 0.0000],
[0.0000, 0.3750, 0.1250],
[0.0000, 0.3750, 0.2500],
[0.0000, 0.4375, 0.0000],
[0.0000, 0.4375, 0.1250],
[0.0000, 0.5000, 0.0000]], dtype=dtype, device=device)
assert torch.equal(new_vertices, expected_vertices)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
class TestCheckSign:
@pytest.fixture(autouse=True)
def verts(self, device):
verts = []
verts.append(torch.tensor([[0., 0., 0.],
[1., 0.5, 1.],
[0.5, 1., 1.],
[1., 1., 0.5]], device=device))
verts.append(torch.tensor([[0., 0., 0.],
[1., 0, 0],
[0, 0, 1.],
[0, 1., 0]], device=device))
return torch.stack(verts)
@pytest.fixture(autouse=True)
def faces(self, device):
faces = torch.tensor([[0, 3, 1],
[0, 1, 2],
[0, 2, 3],
[3, 2, 1]], device=device)
return faces
@pytest.fixture(autouse=True)
def points(self, device):
axis = torch.linspace(0.1, 0.9, 3, device=device)
p_x, p_y, p_z = torch.meshgrid(axis + 0.01, axis + 0.02, axis + 0.03)
points = torch.cat((p_x.unsqueeze(-1), p_y.unsqueeze(-1), p_z.unsqueeze(-1)), dim=3)
points = points.view(1, -1, 3).expand(2, -1, -1)
return points
@pytest.fixture(autouse=True)
def expected(self, device):
expected = []
expected.append(torch.tensor([True, False, False, False, False, False, False, False,
False, False, False, False, False, True, False, False,
False, True, False, False, False, False, False, True,
False, True, False], device=device))
expected.append(torch.tensor([True, True, False, True, False, False, False, False, False, True,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False], device=device))
return torch.stack(expected)
def test_verts_type(self, verts, faces, points):
with pytest.raises(TypeError,
match=r"Expected verts entries to be torch.float32 "
r"but got torch.float64."):
verts = verts.double()
mesh.check_sign(verts, faces, points)
def test_faces_type(self, verts, faces, points):
with pytest.raises(TypeError,
match=r"Expected faces entries to be torch.int64 "
r"but got torch.int32."):
faces = faces.int()
mesh.check_sign(verts, faces, points)
def test_points_type(self, verts, faces, points):
with pytest.raises(TypeError,
match=r"Expected points entries to be torch.float32 "
r"but got torch.float16."):
points = points.half()
mesh.check_sign(verts, faces, points)
def test_hash_resolution_type(self, verts, faces, points):
with pytest.raises(TypeError,
match=r"Expected hash_resolution to be int "
r"but got <class 'float'>."):
mesh.check_sign(verts, faces, points, 512.0)
def test_verts_ndim(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected verts to have 3 dimensions "
r"but got 4 dimensions."):
verts = verts.unsqueeze(-1)
mesh.check_sign(verts, faces, points)
def test_faces_ndim(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected faces to have 2 dimensions "
r"but got 3 dimensions."):
faces = faces.unsqueeze(-1)
mesh.check_sign(verts, faces, points)
def test_points_ndim(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected points to have 3 dimensions "
r"but got 4 dimensions."):
points = points.unsqueeze(-1)
mesh.check_sign(verts, faces, points)
def test_verts_shape(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected verts to have 3 coordinates "
r"but got 2 coordinates."):
verts = verts[..., :2]
mesh.check_sign(verts, faces, points)
def test_faces_shape(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected faces to have 3 vertices "
r"but got 2 vertices."):
faces = faces[:, :2]
mesh.check_sign(verts, faces, points)
def test_points_shape(self, verts, faces, points):
with pytest.raises(ValueError,
match=r"Expected points to have 3 coordinates "
r"but got 2 coordinates."):
points = points[..., :2]
mesh.check_sign(verts, faces, points)
def test_single_batch(self, verts, faces, points, expected):
output = mesh.check_sign(verts[0:1], faces, points[0:1])
assert(torch.equal(output, expected[0:1]))
def test_meshes(self, verts, faces, points, expected):
output = mesh.check_sign(verts, faces, points)
assert(torch.equal(output, expected))
def test_faces_with_zero_area(self, verts, faces, points, expected):
faces = torch.cat([faces, torch.tensor([[1, 1, 1],
[0, 0, 0],
[2, 2, 2],
[3, 3, 3]]).to(faces.device)])
output = mesh.check_sign(verts, faces, points)
assert(torch.equal(output, expected))
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
class TestSubdivideTrianglemesh:
@pytest.fixture(autouse=True)
def vertices_icosahedron(self, device):
return torch.tensor([[[-0.5257, 0.8507, 0.0000],
[0.5257, 0.8507, 0.0000],
[-0.5257, -0.8507, 0.0000],
[0.5257, -0.8507, 0.0000],
[0.0000, -0.5257, 0.8507],
[0.0000, 0.5257, 0.8507],
[0.0000, -0.5257, -0.8507],
[0.0000, 0.5257, -0.8507],
[0.8507, 0.0000, -0.5257],
[0.8507, 0.0000, 0.5257],
[-0.8507, 0.0000, -0.5257],
[-0.8507, 0.0000, 0.5257]]], dtype=torch.float, device=device)
@pytest.fixture(autouse=True)
def faces_icosahedron(self, device):
return torch.tensor([[0, 11, 5],
[0, 5, 1],
[0, 1, 7],
[0, 7, 10],
[0, 10, 11],
[1, 5, 9],
[5, 11, 4],
[11, 10, 2],
[10, 7, 6],
[7, 1, 8],
[3, 9, 4],
[3, 4, 2],
[3, 2, 6],
[3, 6, 8],
[3, 8, 9],
[4, 9, 5],
[2, 4, 11],
[6, 2, 10],
[8, 6, 7],
[9, 8, 1]], dtype=torch.long, device=device)
@pytest.fixture(autouse=True)
def expected_vertices_default_alpha(self, device):
return torch.tensor([[[-0.4035, 0.6529, 0.0000],
[0.4035, 0.6529, 0.0000],
[-0.4035, -0.6529, 0.0000],
[0.4035, -0.6529, 0.0000],
[0.0000, -0.4035, 0.6529],
[0.0000, 0.4035, 0.6529],
[0.0000, -0.4035, -0.6529],
[0.0000, 0.4035, -0.6529],
[0.6529, 0.0000, -0.4035],
[0.6529, 0.0000, 0.4035],
[-0.6529, 0.0000, -0.4035],
[-0.6529, 0.0000, 0.4035],
[0.0000, 0.7694, 0.0000],
[-0.2378, 0.6225, 0.3847],
[-0.2378, 0.6225, -0.3847],
[-0.6225, 0.3847, -0.2378],
[-0.6225, 0.3847, 0.2378],
[0.2378, 0.6225, 0.3847],
[0.2378, 0.6225, -0.3847],
[0.6225, 0.3847, -0.2378],
[0.6225, 0.3847, 0.2378],
[0.0000, -0.7694, 0.0000],
[-0.2378, -0.6225, 0.3847],
[-0.2378, -0.6225, -0.3847],
[-0.6225, -0.3847, -0.2378],
[-0.6225, -0.3847, 0.2378],
[0.2378, -0.6225, 0.3847],
[0.2378, -0.6225, -0.3847],
[0.6225, -0.3847, -0.2378],
[0.6225, -0.3847, 0.2378],
[0.0000, 0.0000, 0.7694],
[0.3847, -0.2378, 0.6225],
[-0.3847, -0.2378, 0.6225],
[0.3847, 0.2378, 0.6225],
[-0.3847, 0.2378, 0.6225],
[0.0000, 0.0000, -0.7694],
[0.3847, -0.2378, -0.6225],
[-0.3847, -0.2378, -0.6225],
[0.3847, 0.2378, -0.6225],
[-0.3847, 0.2378, -0.6225],
[0.7694, 0.0000, 0.0000],
[-0.7694, 0.0000, 0.0000]]], dtype=torch.float, device=device)
@pytest.fixture(autouse=True)
def expected_vertices_zero_alpha(self, device):
return torch.tensor([[[-0.5257, 0.8507, 0.0000],
[0.5257, 0.8507, 0.0000],
[-0.5257, -0.8507, 0.0000],
[0.5257, -0.8507, 0.0000],
[0.0000, -0.5257, 0.8507],
[0.0000, 0.5257, 0.8507],
[0.0000, -0.5257, -0.8507],
[0.0000, 0.5257, -0.8507],
[0.8507, 0.0000, -0.5257],
[0.8507, 0.0000, 0.5257],
[-0.8507, 0.0000, -0.5257],
[-0.8507, 0.0000, 0.5257],
[0.0000, 0.7694, 0.0000],
[-0.2378, 0.6225, 0.3847],
[-0.2378, 0.6225, -0.3847],
[-0.6225, 0.3847, -0.2378],
[-0.6225, 0.3847, 0.2378],
[0.2378, 0.6225, 0.3847],
[0.2378, 0.6225, -0.3847],
[0.6225, 0.3847, -0.2378],
[0.6225, 0.3847, 0.2378],
[0.0000, -0.7694, 0.0000],
[-0.2378, -0.6225, 0.3847],
[-0.2378, -0.6225, -0.3847],
[-0.6225, -0.3847, -0.2378],
[-0.6225, -0.3847, 0.2378],
[0.2378, -0.6225, 0.3847],
[0.2378, -0.6225, -0.3847],
[0.6225, -0.3847, -0.2378],
[0.6225, -0.3847, 0.2378],
[0.0000, 0.0000, 0.7694],
[0.3847, -0.2378, 0.6225],
[-0.3847, -0.2378, 0.6225],
[0.3847, 0.2378, 0.6225],
[-0.3847, 0.2378, 0.6225],
[0.0000, 0.0000, -0.7694],
[0.3847, -0.2378, -0.6225],
[-0.3847, -0.2378, -0.6225],
[0.3847, 0.2378, -0.6225],
[-0.3847, 0.2378, -0.6225],
[0.7694, 0.0000, 0.0000],
[-0.7694, 0.0000, 0.0000]]], dtype=torch.float, device=device)
@pytest.fixture(autouse=True)
def expected_faces_icosahedron_1_iter(self, device):
return torch.tensor([[11, 34, 16],
[0, 16, 13],
[5, 13, 34],
[13, 16, 34],
[5, 17, 13],
[0, 13, 12],
[1, 12, 17],
[12, 13, 17],
[1, 18, 12],
[0, 12, 14],
[7, 14, 18],
[14, 12, 18],
[7, 39, 14],
[0, 14, 15],
[10, 15, 39],
[15, 14, 39],
[10, 41, 15],
[0, 15, 16],
[11, 16, 41],
[16, 15, 41],
[5, 33, 17],
[1, 17, 20],
[9, 20, 33],
[20, 17, 33],
[11, 32, 34],
[5, 34, 30],
[4, 30, 32],
[30, 34, 32],
[10, 24, 41],
[11, 41, 25],
[2, 25, 24],
[25, 41, 24],
[7, 35, 39],
[10, 39, 37],
[6, 37, 35],
[37, 39, 35],
[1, 19, 18],
[7, 18, 38],
[8, 38, 19],
[38, 18, 19],
[9, 31, 29],
[3, 29, 26],
[4, 26, 31],
[26, 29, 31],
[4, 22, 26],
[3, 26, 21],
[2, 21, 22],
[21, 26, 22],
[2, 23, 21],
[3, 21, 27],
[6, 27, 23],
[27, 21, 23],
[6, 36, 27],
[3, 27, 28],
[8, 28, 36],
[28, 27, 36],
[8, 40, 28],
[3, 28, 29],
[9, 29, 40],
[29, 28, 40],
[9, 33, 31],
[4, 31, 30],
[5, 30, 33],
[30, 31, 33],
[4, 32, 22],
[2, 22, 25],
[11, 25, 32],
[25, 22, 32],
[2, 24, 23],
[6, 23, 37],
[10, 37, 24],
[37, 23, 24],
[6, 35, 36],
[8, 36, 38],
[7, 38, 35],
[38, 36, 35],
[8, 19, 40],
[9, 40, 20],
[1, 20, 19],
[20, 40, 19]], dtype=torch.long, device=device)
def test_subdivide_trianglemesh_1_iter_default_alpha(self, vertices_icosahedron, faces_icosahedron, expected_vertices_default_alpha, expected_faces_icosahedron_1_iter):
new_vertices, new_faces = subdivide_trianglemesh(vertices_icosahedron, faces_icosahedron, 1)
assert torch.allclose(new_vertices, expected_vertices_default_alpha, atol=1e-04)
assert torch.equal(new_faces, expected_faces_icosahedron_1_iter)
def test_subdivide_trianglemesh_1_iter_zero_alpha(self, vertices_icosahedron, faces_icosahedron, expected_vertices_zero_alpha, expected_faces_icosahedron_1_iter):
alpha = torch.zeros_like(vertices_icosahedron[..., 0])
new_vertices, new_faces = subdivide_trianglemesh(vertices_icosahedron, faces_icosahedron, 1, alpha)
assert torch.allclose(new_vertices, expected_vertices_zero_alpha, atol=1e-04)
assert torch.equal(new_faces, expected_faces_icosahedron_1_iter)
def test_subdivide_trianglemesh_5_iter(self, vertices_icosahedron, faces_icosahedron):
new_vertices, new_faces = subdivide_trianglemesh(vertices_icosahedron, faces_icosahedron, 5)
# check total area of all faces
assert torch.allclose(mesh.face_areas(new_vertices, new_faces).sum(),
torch.tensor([6.2005], dtype=new_vertices.dtype, device=new_faces.device), atol=1e-04)
assert new_faces.shape[0] == faces_icosahedron.shape[0] * 4 ** 5
|
nilq/baby-python
|
python
|
# Andrew Boslett
# Rochester Data Science Consortium
# Email: andrew_boslett@urmc.rochester.edu
# Set options
import arcpy
import os
import csv
import sys
# Set up environments
arcpy.env.overwriteOutput = True
box_dir = 'C:/Users/aboslett/Box'
pers_dir = 'C:/Users/aboslett/Documents'
if not arcpy.Exists(os.path.join(box_dir + '/shale-varying/Scratch/' + 'Spatial_Data.gdb')):
arcpy.CreateFileGDB_management(out_folder_path = os.path.join(box_dir + '/shale-varying/Scratch/'),
out_name= 'Spatial_Data',
out_version="CURRENT")
print "New geodatabase created"
else:
print "Geodatabase already exists"
# Project databases
for fff in ['ShalePlays_US_EIA_Sep2019', 'tl_2020_us_county']:
arcpy.Project_management(
in_dataset = os.path.join(box_dir + '/shale-varying/Data/GIS/' + fff + '.shp'),
out_dataset = os.path.join(box_dir + '/shale-varying/Data/GIS/' + fff + '_prj.shp'),
out_coor_system="PROJCS['USA_Contiguous_Albers_Equal_Area_Conic',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Albers'],PARAMETER['False_Easting',0.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-96.0],PARAMETER['Standard_Parallel_1',29.5],PARAMETER['Standard_Parallel_2',45.5],PARAMETER['Latitude_Of_Origin',37.5],UNIT['Meter',1.0]]",
preserve_shape="NO_PRESERVE_SHAPE",
max_deviation="",
vertical="NO_VERTICAL")
# Create and export near table
# (1) Shale play connection
arcpy.GenerateNearTable_analysis(
near_features = os.path.join(box_dir + "/shale-varying/Data/GIS/" + 'ShalePlays_US_EIA_Sep2019_prj.shp'),
in_features = os.path.join(box_dir + "/shale-varying/Data/GIS/" + "tl_2020_us_county_prj.shp"),
out_table = os.path.join(box_dir + '/shale-varying/Scratch/' + 'Spatial_Data.gdb' + '/USCB_County_to_USEIA_Shale_Play'),
search_radius = "50 MILES",
location = "NO_LOCATION",
angle = "NO_ANGLE",
closest = "ALL",
closest_count = "",
method = "PLANAR")
arcpy.TableToTable_conversion(
in_rows = os.path.join(box_dir + '/shale-varying/Scratch/' + 'Spatial_Data.gdb' + '/USCB_County_to_USEIA_Shale_Play'),
out_path= box_dir + '/shale-varying/Scratch/',
out_name= 'USCB_County_to_USEIA_Shale_Play_50Miles' + '.csv')
# (2) Wells
arcpy.GenerateNearTable_analysis(
near_features = os.path.join(box_dir + "/shale-varying/Data/GIS/" + 'National_HD_Wells.shp'),
in_features = os.path.join(box_dir + "/shale-varying/Data/GIS/" + "tl_2020_us_county_prj.shp"),
out_table = os.path.join(box_dir + '/shale-varying/Scratch/' + 'Spatial_Data.gdb' + '/USCB_County_to_DI_HD_Wells'),
search_radius = "5 MILES",
location = "NO_LOCATION",
angle = "NO_ANGLE",
closest = "ALL",
closest_count = "",
method = "PLANAR")
arcpy.TableToTable_conversion(
in_rows = os.path.join(box_dir + '/shale-varying/Scratch/' + 'Spatial_Data.gdb' + '/USCB_County_to_DI_HD_Wells'),
out_path= box_dir + '/shale-varying/Scratch/',
out_name= 'USCB_County_to_DI_HD_Wells_5Miles' + '.csv')
# Add square miles calculation to county shapefile
arcpy.management.AddField(os.path.join(box_dir + "/shale-varying/Data/GIS/" + "tl_2020_us_county_prj.shp"),
'sq_miles,
'DOUBLE)
arcpy.management.CalculateField(os.path.join(box_dir + "/shale-varying/Data/GIS/" + "tl_2020_us_county_prj.shp"),
'sq_miles',
'!shape.area@SQUAREMILES!',
'PYTHON_9.3)
|
nilq/baby-python
|
python
|
import unittest
import xmlrunner
import secrets
import pagemodels.videopage
import tests.pickledlogin
import browserconfig
# TEST CATEGORIES
# 1.) Pause Tests
# 2.) Mute Tests
# 3.) Volume Tests
# 4.) Full screen Tests
# 5.) Audio&Subtitles Tests
# 6.) Skip_forward/backward Tests
# 7.) Time/Duration Tests
# 8.) Exit Player Tests
# 9.) Keyboard shortcuts TODO- 'f' for fullscreen, 'm' for mute, etc.
# HELP
# NO IDEA WHAT IM LOOKING AT
# VIDEO EXAMPLE OF EXECUTION:
# https://gyazo.com/7c703e6bba5af706849052df65772089
# 2020-04-22 All tests passing
# Some tests are failing. All with deltas.
# BUG-
# WHEN I ADDED THE WAIT FOR CHANGE_CURRENT_TIME ( wait for ui to disappear after mousing to center)
# FIX
class VideoPageTests(unittest.TestCase):
"""The following tests test basic use cases for Netflix's video player(dubbed 'Akira player'
by Netflix). The individual actions are defined at ./pagemodels/videopage.py"""
@classmethod
def setUpClass(cls):
"""Launch the webdriver of choice with selected options(see browserconfig.py).
Then login using pickled cookies(see tests/pickledlogin.py)."""
if browserconfig.current_browser in ['chrome', 'firefox']:
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
desired_capabilities=browserconfig.capabilities
)
elif browserconfig.current_browser == 'edge':
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
capabilities=browserconfig.capabilities
)
tests.pickledlogin.pickled_login(cls.driver)
@classmethod
def tearDownClass(cls):
"""Closes the browser and shuts down the driver executable."""
cls.driver.quit()
def setUp(self):
"""Load some random movie, Avengers: Infinity War in this instance, stored in secrets.py"""
self.driver.get(secrets.URL_OF_VIDEO_TO_TEST)
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.initial_spinner_wait() # Wait for the player to load
# PAUSE TESTS
def test_pause_from_unpaused_state(self):
"""From an unpaused state, pause the player."""
video_page = pagemodels.videopage.VideoPage(self.driver)
self.assertFalse(
video_page.player_is_paused(),
msg="pause_from_unpaused_state wasnt an unpaused state"
)
video_page.pause_player()
self.assertTrue(
video_page.player_is_paused(),
msg="pause_from_unpaused_state major test failed"
)
# TEST CLEANUP- Netflix's Akira player remembers the paused state in the next test.
video_page.unpause_player()
def test_unpause_from_paused_state(self):
"""From a paused state, unpause the player."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.pause_player()
self.assertTrue(
video_page.player_is_paused(),
msg="unpause_from_paused_state wasnt paused state"
)
video_page.unpause_player()
self.assertFalse(
video_page.player_is_paused(),
msg="unpause_from_paused_state major test failed"
)
# MUTE TESTS
def test_unmute_from_muted_state(self):
"""From a muted state, unmute the player."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.mute_player()
self.assertTrue(
video_page.player_is_muted(),
msg="test_unmute_from_muted_state isnt a muted_state"
)
video_page.unmute_player()
self.assertFalse(
video_page.player_is_muted(),
msg="test_unmute_from_muted_state failed to unmute the player"
)
def test_mute_from_unmuted_state(self):
"""From an unmuted state, mute the player."""
video_page = pagemodels.videopage.VideoPage(self.driver)
self.assertFalse(
video_page.player_is_muted(),
msg="test_mute_from_unmuted_state isnt an unumuted state"
)
video_page.mute_player()
self.assertTrue(
video_page.player_is_muted(),
msg="test_mute_from_unmuted_state failed to mute the player"
)
# TEST CLEANUP
video_page.unmute_player()
# VOLUME TESTS
def test_cut_volume_in_half(self):
"""Whatever the current volume is, cut it in half using the volume slider."""
# There is a lot going on under the hood with .get_current_volume() and
# .change_volume_using_percentage() . Check out /pagemodels/videopage.py
video_page = pagemodels.videopage.VideoPage(self.driver)
current_volume = video_page.get_current_volume() # returns a float 0 <= x <= 1
target_volume = current_volume/2
video_page.change_volume_using_percentage(target_volume)
new_volume = video_page.get_current_volume()
self.assertAlmostEqual(new_volume, target_volume, delta=0.02)
# TEST CLEANUP- default state is 50% volume(not strictly enfored but recommended state).
video_page.change_volume_using_percentage(.5)
def test_double_volume(self):
"""Double the current volume (upper limit 100%) using the volume slider."""
video_page = pagemodels.videopage.VideoPage(self.driver)
current_volume = video_page.get_current_volume() # returns a float 0 <= x <= 1
target_volume = current_volume*2
if target_volume > 1:
# If double the volume is greater than 100%, set target to 100%.
target_volume = 1
video_page.change_volume_using_percentage(target_volume)
new_volume = video_page.get_current_volume()
self.assertAlmostEqual(new_volume, target_volume, delta=0.02)
def test_set_volume_to_33_percent(self):
"""Set the current volume to 33 percent using the volume slider."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.change_volume_using_percentage(.33)
new_volume = video_page.get_current_volume()
self.assertAlmostEqual(new_volume, .33, delta=.02)
# FULLSCREEN TESTS
def test_full_screen_from_normal_screen_state(self):
"""From a normal_screen state, go full screen."""
video_page = pagemodels.videopage.VideoPage(self.driver)
self.assertFalse(
video_page.player_is_full_screen(),
msg="full screen_from_normal_screen was not a normal screen state"
)
video_page.make_full_screen()
self.assertTrue(
video_page.player_is_full_screen(),
msg="full screen_from_normal_screen failed to make the player go full screen"
)
def test_normal_screen_from_full_screen_state(self):
"""From a full screen state, go normal screen."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.make_full_screen()
self.assertTrue(
video_page.player_is_full_screen(),
msg="normal_screen_from_full_screen_state was not a full screen state"
)
video_page.make_normal_screen()
self.assertFalse(
video_page.player_is_full_screen(),
msg="normal_screen_from_full_screen_state failed to make the screen normal screen"
)
# AUDIO AND SUBTITLES TESTS
def test_add_subtitles_from_no_subtitles_state(self):
"""From a state of no subtitles, add english subtitles."""
video_page = pagemodels.videopage.VideoPage(self.driver)
# Adding an extra step here to clean up subtitles if altered during personal use.
if video_page.has_subtitles():
video_page.remove_subtitles()
self.assertFalse(
video_page.has_subtitles(),
msg="add_subitles_from_no_subtitles_state was not a no subtitles state from start,\
THIS COULD HAVE BEEN CAUSED BY PERSONAL VIEWING BY YOU"
)
video_page.add_english_subtitles()
self.assertTrue(
video_page.has_subtitles(),
msg="add_subitles_from_no_subtitles_state failed to add (english) subtitles "
)
# TEST CLEANUP- Netflix's Akira player remembers subtitles from previous viewings.
video_page.remove_subtitles()
def test_remove_subtitles_from_subtitle_state(self):
"""From a state with subtitles, remove subtitles."""
video_page = pagemodels.videopage.VideoPage(self.driver)
if not video_page.has_subtitles():
video_page.add_english_subtitles()
self.assertTrue(
video_page.has_subtitles(),
msg="remove_subtitles_from_subtitle_state was not a subtitle state from start"
)
video_page.remove_subtitles()
self.assertFalse(
video_page.has_subtitles(),
msg="remove_subtitles_from_subtitle_state failed to remove subtitles"
)
def test_change_audio_to_spanish_from_english_state(self):
"""From english audio state, change to spanish audio."""
video_page = pagemodels.videopage.VideoPage(self.driver)
# Default state is always english.
current_audio = video_page.get_current_audio()
self.assertIn(
'nglish',
current_audio,
msg="test_change_audio_to_spanish_from_english_state wasnt an english state"
)
video_page.change_audio_to_spanish()
new_audio = video_page.get_current_audio()
self.assertIn(
'anish',
new_audio,
msg="test_change_audio_to_spanish_from_english_state failed to change audio spanish"
)
# TEST CLEANUP
video_page.change_audio_to_english_original()
def test_change_audio_to_english_from_spanish_state(self):
"""From spanish audio state, change to english audio."""
# NOTE- english original not english. Doesnt work on non english original shows
video_page = pagemodels.videopage.VideoPage(self.driver)
# DEFAULT STATE IS ALWAYS ENGLISH
video_page.change_audio_to_spanish()
current_audio = video_page.get_current_audio()
self.assertIn(
'anish',
current_audio,
msg="test_change_audio_to_english_from_spanish_state wasnt a Spanish state"
)
video_page.change_audio_to_english_original()
new_audio = video_page.get_current_audio()
self.assertIn(
'nglish',
new_audio,
msg="test_change_audio_to_english_from_spanish_state failed to change audio English"
)
# SKIP FORWARD/BACKWARD TESTS
def test_skip_forward_30_seconds(self):
"""Using the skip forwad button, skip forwad 30 seconds."""
video_page = pagemodels.videopage.VideoPage(self.driver)
current_time = video_page.get_remaining_time_in_seconds()
video_page.skip_forward()
video_page.skip_forward()
video_page.skip_forward()
new_time = video_page.get_remaining_time_in_seconds()
# When paused, delta < 0.01. When not paused and good connection, delta < 5.
self.assertAlmostEqual(current_time + 30, new_time, delta=5)
def test_skip_back_30_seconds(self):
"""Using the skip back button, skip back 30 seconds."""
# Skipping back at 0:00 will cause the test to fail even though the act of skipping
# back three times will not fail(its possible to press skip back at 0:00).
video_page = pagemodels.videopage.VideoPage(self.driver)
current_remaining_time = video_page.get_remaining_time_in_seconds()
show_duration = video_page.get_show_duration_in_seconds()
self.assertGreater(
show_duration-current_remaining_time,
35,
msg="test_skip_back_30_seconds can't skip back when the video isnt even 30 seconds in"
)
video_page.skip_backward()
video_page.skip_backward()
video_page.skip_backward()
new_remaining_time = video_page.get_remaining_time_in_seconds()
# When paused, delta < 0.01. When not paused and good connection, delta < 5.
self.assertAlmostEqual(current_remaining_time-30, new_remaining_time, delta=5)
# # # TIME/DURATION TESTS
def test_go_to_halfway_point(self):
"""Go to the halfway point in the show/movie using the duration slider."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.change_to_percentage_time(.5)
show_duration = video_page.get_show_duration_in_seconds()
current_remaining_time = video_page.get_remaining_time_in_seconds()
# print(f" show duration is {show_duration}")
# print(f"current time is {current_time}")
self.assertAlmostEqual(show_duration/2, current_remaining_time, delta=10)
# Largest observed delta is 6.5 seconds. Not sure what is causing this delta,
# seems to be intermittent. Could be the off by a pixel again. BUG- low priority
# Maybe it would be eliminated by making a .get_current_time_in_seconds function
# instead of relying on .get_remaining_time_in_seconds()
def test_restart_show(self):
"""Restart a show by setting the percentage_time to 0."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.change_to_percentage_time(0)
current_remaining_time = video_page.get_remaining_time_in_seconds()
show_duration = video_page.get_show_duration_in_seconds()
self.assertAlmostEqual(current_remaining_time, show_duration, delta=5)
# TEST CLEANUP- halfway point is the default state
video_page.change_to_percentage_time(.5)
# EXIT PLAYER TESTS
def test_exit_player(self):
"""Exit the player by clicking the built-in back arrow button."""
video_page = pagemodels.videopage.VideoPage(self.driver)
video_page.go_back_to_shows()
self.assertNotIn('watch', self.driver.current_url)
# when watching a show, the url structure is "https://www.netflix.com/watch/600230...""
# # # # # # # TESTS THAT DIDNT MAKE THE FIRST CUT
# # # # GO TO CREDITS COMPLICATES THINGS
# # # # TODO- I NEED A videopage FUCNTIONS TO "watch credits" to redisplay the scrubber
# def test_go_to_credits(self):
# """ UNTESTED, DO NOT USE"""
# """ go to the .98 point in the show/movie USING THE SLIDER"""
# video_page = pagemodels.videopage.VideoPage(self.driver)
# video_page.change_to_percentage_time(.98)
# show_duration = video_page.get_show_duration_in_seconds()
# current_remaining_time = video_page.get_remaining_time_in_seconds()
if __name__ == '__main__':
with open(r'xmltestresults\pretestresults.xml', 'wb') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Project
class ProjectAdmin(admin.ModelAdmin):
fields = ("name", "public_key", "created_at", "updated_at")
readonly_fields = ("public_key", "created_at", "updated_at")
list_display = ("name",)
def has_add_permission(self, request):
return False
admin.site.register(Project, ProjectAdmin)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.6 on 2021-08-21 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('education', '0018_alter_materialblocks_color'),
]
operations = [
migrations.AlterModelOptions(
name='materialblocks',
options={'verbose_name': 'Блок материала', 'verbose_name_plural': 'Блоки материала'},
),
migrations.AlterField(
model_name='materialblocks',
name='block',
field=models.FileField(upload_to='templates/education/material_sections', verbose_name='Информация блока'),
),
]
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.