id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
159954 | <reponame>cyph3r-exe/python-practice-files
"""
Using nested loops to iterate over
all the items in a matrix list
"""
#Defining a matrix list
matrix = [
[1,2,3],
[4,5,6],
[7,8,9]
]
#This loop will iterate over all the items inside the matrix list
for row in matrix:
for index in row:
print(index)
| StarcoderdataPython |
3243967 | import numpy as np
from homog.util import jit, guvec, float32, float64
def is_valid_quat_rot(quat):
assert quat.shape[-1] == 4
return np.isclose(1, np.linalg.norm(quat, axis=-1))
def quat_to_upper_half(quat):
ineg0 = (quat[..., 0] < 0)
ineg1 = (quat[..., 0] == 0) * (quat[..., 1] < 0)
ineg2 = (quat[..., 0] == 0) * (quat[..., 1] == 0) * (quat[..., 2] < 0)
ineg3 = ((quat[..., 0] == 0) * (quat[..., 1] == 0) * (quat[..., 2] == 0) *
(quat[..., 3] < 0))
# print(ineg0.shape)
# print(ineg1.shape)
# print(ineg2.shape)
# print(ineg3.shape)
ineg = ineg0 + ineg1 + ineg2 + ineg3
quat = quat.copy()
quat[ineg] = -quat[ineg]
return quat
@jit
def kernel_quat_to_upper_half(quat, ret):
ineg0 = (quat[0] < 0)
ineg1 = (quat[0] == 0) * (quat[1] < 0)
ineg2 = (quat[0] == 0) * (quat[1] == 0) * (quat[2] < 0)
ineg3 = ((quat[0] == 0) * (quat[1] == 0) * (quat[2] == 0) * (quat[3] < 0))
mul = -1.0 if ineg0 or ineg1 or ineg2 or ineg3 else 1.0
for i in range(4):
ret[i] = mul * quat[i]
@jit
def numba_quat_to_upper_half(quat):
ret = np.empty(4, dtype=quat.dtype)
kernel_quat_to_upper_half(quat, ret)
return ret
def rand_quat(shape=()):
if isinstance(shape, int): shape = (shape, )
q = np.random.randn(*shape, 4)
q /= np.linalg.norm(q, axis=-1)[..., np.newaxis]
return quat_to_upper_half(q)
def rot_to_quat(xform):
x = np.asarray(xform)
t0, t1, t2 = x[..., 0, 0], x[..., 1, 1], x[..., 2, 2]
tr = t0 + t1 + t2
quat = np.empty(x.shape[:-2] + (4, ))
case0 = tr > 0
S0 = np.sqrt(tr[case0] + 1) * 2
quat[case0, 0] = 0.25 * S0
quat[case0, 1] = (x[case0, 2, 1] - x[case0, 1, 2]) / S0
quat[case0, 2] = (x[case0, 0, 2] - x[case0, 2, 0]) / S0
quat[case0, 3] = (x[case0, 1, 0] - x[case0, 0, 1]) / S0
case1 = ~case0 * (t0 >= t1) * (t0 >= t2)
S1 = np.sqrt(1.0 + x[case1, 0, 0] - x[case1, 1, 1] - x[case1, 2, 2]) * 2
quat[case1, 0] = (x[case1, 2, 1] - x[case1, 1, 2]) / S1
quat[case1, 1] = 0.25 * S1
quat[case1, 2] = (x[case1, 0, 1] + x[case1, 1, 0]) / S1
quat[case1, 3] = (x[case1, 0, 2] + x[case1, 2, 0]) / S1
case2 = ~case0 * (t1 > t0) * (t1 >= t2)
S2 = np.sqrt(1.0 + x[case2, 1, 1] - x[case2, 0, 0] - x[case2, 2, 2]) * 2
quat[case2, 0] = (x[case2, 0, 2] - x[case2, 2, 0]) / S2
quat[case2, 1] = (x[case2, 0, 1] + x[case2, 1, 0]) / S2
quat[case2, 2] = 0.25 * S2
quat[case2, 3] = (x[case2, 1, 2] + x[case2, 2, 1]) / S2
case3 = ~case0 * (t2 > t0) * (t2 > t1)
S3 = np.sqrt(1.0 + x[case3, 2, 2] - x[case3, 0, 0] - x[case3, 1, 1]) * 2
quat[case3, 0] = (x[case3, 1, 0] - x[case3, 0, 1]) / S3
quat[case3, 1] = (x[case3, 0, 2] + x[case3, 2, 0]) / S3
quat[case3, 2] = (x[case3, 1, 2] + x[case3, 2, 1]) / S3
quat[case3, 3] = 0.25 * S3
assert (np.sum(case0) + np.sum(case1) + np.sum(case2) +
np.sum(case3) == np.prod(xform.shape[:-2]))
return quat_to_upper_half(quat)
xform_to_quat = rot_to_quat
@jit
def kernel_rot_to_quat(xform, quat):
t0, t1, t2 = xform[0, 0], xform[1, 1], xform[2, 2]
tr = t0 + t1 + t2
if tr > 0:
S0 = np.sqrt(tr + 1) * 2
quat[0] = 0.25 * S0
quat[1] = (xform[2, 1] - xform[1, 2]) / S0
quat[2] = (xform[0, 2] - xform[2, 0]) / S0
quat[3] = (xform[1, 0] - xform[0, 1]) / S0
elif t0 >= t1 and t0 >= t2:
S1 = np.sqrt(1.0 + xform[0, 0] - xform[1, 1] - xform[2, 2]) * 2
quat[0] = (xform[2, 1] - xform[1, 2]) / S1
quat[1] = 0.25 * S1
quat[2] = (xform[0, 1] + xform[1, 0]) / S1
quat[3] = (xform[0, 2] + xform[2, 0]) / S1
elif t1 > t0 and t1 >= t2:
S2 = np.sqrt(1.0 + xform[1, 1] - xform[0, 0] - xform[2, 2]) * 2
quat[0] = (xform[0, 2] - xform[2, 0]) / S2
quat[1] = (xform[0, 1] + xform[1, 0]) / S2
quat[2] = 0.25 * S2
quat[3] = (xform[1, 2] + xform[2, 1]) / S2
elif t2 > t0 and t2 > t1:
S3 = np.sqrt(1.0 + xform[2, 2] - xform[0, 0] - xform[1, 1]) * 2
quat[0] = (xform[1, 0] - xform[0, 1]) / S3
quat[1] = (xform[0, 2] + xform[2, 0]) / S3
quat[2] = (xform[1, 2] + xform[2, 1]) / S3
quat[3] = 0.25 * S3
kernel_quat_to_upper_half(quat, quat)
@jit
def numba_rot_to_quat(xform):
quat = np.empty(4, dtype=xform.dtype)
kernel_rot_to_quat(xform, quat)
return quat
gu_rot_to_quat = guvec([
(float64[:, :], float64[:]),
(float32[:, :], float32[:]),
], '(n,n)->(n)', kernel_rot_to_quat)
def quat_to_rot(quat, dtype='f8', shape=(3, 3)):
quat = np.asarray(quat)
assert quat.shape[-1] == 4
qr = quat[..., 0]
qi = quat[..., 1]
qj = quat[..., 2]
qk = quat[..., 3]
outshape = quat.shape[:-1]
rot = np.zeros(outshape + shape, dtype=dtype)
rot[..., 0, 0] = 1 - 2 * (qj**2 + qk**2)
rot[..., 0, 1] = 2 * (qi * qj - qk * qr)
rot[..., 0, 2] = 2 * (qi * qk + qj * qr)
rot[..., 1, 0] = 2 * (qi * qj + qk * qr)
rot[..., 1, 1] = 1 - 2 * (qi**2 + qk**2)
rot[..., 1, 2] = 2 * (qj * qk - qi * qr)
rot[..., 2, 0] = 2 * (qi * qk - qj * qr)
rot[..., 2, 1] = 2 * (qj * qk + qi * qr)
rot[..., 2, 2] = 1 - 2 * (qi**2 + qj**2)
return rot
def quat_to_xform(quat, dtype='f8'):
r = quat_to_rot(quat, dtype, shape=(4, 4))
r[..., 3, 3] = 1
return r
def quat_multiply(q, r):
q, r = np.broadcast_arrays(q, r)
q0, q1, q2, q3 = np.moveaxis(q, -1, 0)
r0, r1, r2, r3 = np.moveaxis(r, -1, 0)
assert np.all(q1 == q[..., 1])
t = np.empty_like(q)
t[..., 0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
t[..., 1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
t[..., 2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
t[..., 3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return t
@jit
def kernel_quat_multiply(q, r, out):
q0, q1, q2, q3 = q
r0, r1, r2, r3 = r
out[0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
out[1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
out[2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
out[3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
gu_quat_multiply = guvec([(float64[:], float64[:], float64[:])],
'(n),(n)->(n)', kernel_quat_multiply)
@jit
def numba_quat_multiply(q, r):
out = np.empty(4, dtype=q.dtype)
kernel_quat_multiply(q, r, out)
return out
| StarcoderdataPython |
3223441 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-09 08:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("georegion", "0001_initial_squashed_0004_auto_20180307_2026"),
]
operations = [
migrations.AlterField(
model_name="georegion",
name="part_of",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="georegion.GeoRegion",
verbose_name="Part of",
),
),
]
| StarcoderdataPython |
1764984 | <filename>python/read_data.py<gh_stars>0
import os, sys
sys.path.append('../application')
import math
import matplotlib.pyplot as plt
import random
import time
import serial
from Point import *
import re
HOST = 'nb-arnault4'
PORT = 5000
# speed = 9600
speed = 115200
def func():
# return random.random()
return math.sin(t)
def using_points():
origin = time.time()
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
plotter = Plotter(fig, ax)
pv_end = '(.+)'
pv_in = '([^,]+)[,]'
pv_ranges = pv_in*3 + pv_end + '$'
pv_data = pv_in*4 + pv_end + '$'
t0 = time.time()
try:
arduino = serial.Serial('COM5', speed, timeout=.1)
except:
arduino = None
number = 0
##with open("../application/data.csv", "r") as f:
with open("../application/Carnutes.csv", "r") as f:
lines = f.readlines()
first = True
for line in lines:
if first:
first = False
m = re.match(pv_ranges, line)
if m is None:
continue
min_x = float(m[1])
max_x = float(m[2])
min_y = float(m[3])
max_y = float(m[4])
left = plotter.add_point("left", 1, colorx="r", colory="g")
left.set_x_range(min_x, max_x)
left.set_y_range(min_y, max_y)
right = plotter.add_point("right", 0, colorx="b", colory="y")
right.set_x_range(min_x, max_x)
right.set_y_range(min_y, max_y)
plotter.start_plotting()
continue
m = re.match(pv_data, line)
if m is None:
continue
t = float(m[1])
x1 = float(m[2])
y1 = float(m[3])
x2 = float(m[4])
y2 = float(m[5])
plotter.plot(t - t0, [x1, x2], [1000 - y1, 1000 - y2])
if not arduino is None:
arduino.write("{}|{}|{}|{}|{}#".format(number, int(x1/8), int(y1/8), int(x2/8), int(y2/8)).encode("utf-8"))
data = arduino.readline()
if data:
print("received >>>", data.strip())
number += 1
# print("t=", t)
# time.sleep(random.random()*0.1)
using_points()
| StarcoderdataPython |
3368258 | <filename>tools/os.bzl
"""A collection of OS-related utilities intended for use in repository rules,
i.e., rules used by WORKSPACE files, not BUILD files.
"""
load("@slime//tools:execute.bzl", "which")
def exec_using_which(repository_ctx, command):
"""Run the given command (a list), using the which() function in
execute.bzl to locate the executable named by the zeroth index of
`command`.
Return struct with attributes:
- error (None when success, or else str message)
- stdout (str command output, possibly empty)
"""
# Find the executable.
fullpath = which(repository_ctx, command[0])
if fullpath == None:
return struct(
stdout = "",
error = "could not find which '%s'" % command[0],
)
# Run the executable.
result = repository_ctx.execute([fullpath] + command[1:])
if result.return_code != 0:
error = "error %d running %r (command %r, stdout %r, stderr %r)" % (
result.return_code,
command[0],
command,
result.stdout,
result.stderr,
)
return struct(stdout = result.stdout, error = error)
# Success.
return struct(stdout = result.stdout, error = None)
def _make_result(
error = None,
ubuntu_release = None,
macos_release = None):
"""Return a fully-populated struct result for determine_os, below."""
return struct(
error = error,
distribution = (
"ubuntu" if (ubuntu_release != None) else "macos" if (macos_release != None) else None
),
is_macos = (macos_release != None),
is_ubuntu = (ubuntu_release != None),
ubuntu_release = ubuntu_release,
macos_release = macos_release,
)
def _determine_linux(repository_ctx):
"""Handle determine_os on Linux."""
# Shared error message text across different failure cases.
error_prologue = "could not determine Linux distribution: "
# Run sed to determine Linux NAME and VERSION_ID.
sed = exec_using_which(repository_ctx, [
"sed",
"-n",
"/^\(NAME\|VERSION_ID\)=/{s/[^=]*=//;s/\"//g;p}",
"/etc/os-release",
])
if sed.error != None:
return _make_result(error = error_prologue + sed.error)
# Compute an identifying string, in the form of "$NAME $VERSION_ID".
lines = [line.strip() for line in sed.stdout.strip().split("\n")]
distro = " ".join([x for x in lines if len(x) > 0])
# Match supported Ubuntu release(s).
for ubuntu_release in ["16.04"]:
if distro == "Ubuntu " + ubuntu_release:
return _make_result(ubuntu_release = ubuntu_release)
# Nothing matched.
return _make_result(
error = error_prologue + "unsupported distribution '%s'" % distro,
)
def _determine_macos(repository_ctx):
"""Handle determine_os on macOS."""
# Shared error message text across different failure cases.
error_prologue = "could not determine macOS version: "
# Run sw_vers to determine macOS version.
sw_vers = exec_using_which(repository_ctx, [
"sw_vers",
"-productVersion",
])
if sw_vers.error != None:
return _make_result(error = error_prologue + sw_vers.error)
major_minor_versions = sw_vers.stdout.strip().split(".")[:2]
macos_release = ".".join(major_minor_versions)
# Match supported macOS release(s).
if macos_release in ["10.11", "10.12", "10.13"]:
return _make_result(macos_release = macos_release)
# Nothing matched.
return _make_result(
error = error_prologue + "unsupported macOS '%s'" % macos_release,
)
def determine_os(repository_ctx):
"""
A repository_rule helper function that determines which of the supported OS
versions we are targeting.
Argument:
repository_ctx: The context passed to the repository_rule calling this.
Result:
a struct, with attributes:
- error: str iff any error occurred, else None
- distribution: str either "ubuntu" or "macos" if no error
- is_macos: True iff on a supported macOS release, else False
- macos_release: str like "10.13" iff on a supported macOS, else None
- is_ubuntu: True iff on a supported Ubuntu version, else False
- ubuntu_release: str like "16.04" iff on a supported ubuntu, else None
"""
os_name = repository_ctx.os.name
if os_name == "mac os x":
return _determine_macos(repository_ctx)
elif os_name == "linux":
return _determine_linux(repository_ctx)
else:
return _make_result(error = "unknown or unsupported OS '%s'" % os_name)
def os_specific_alias(repository_ctx, mapping):
"""
A repository_rule helper function that creates a BUILD file with alias()
declarations based on which supported OS version we are targeting.
Argument:
repository_ctx: The context passed to the repository_rule calling this.
mapping: dict(str, list(str)) where the keys match the OS, and the list
of values are of the form name=actual as in alias(name, actual).
The keys of mapping are searched in the following preferential order:
- Exact release, via e.g., "Ubuntu 16.04" or "macOS 10.12"
- Any release, via "Ubuntu default" or "macOS default"
- Anything else, via "default"
"""
os_result = determine_os(repository_ctx)
if os_result.error != None:
fail(os_result.error)
# Find the best match in the mapping dict for our OS.
keys = []
if os_result.ubuntu_release:
keys = [
"Ubuntu " + os_result.ubuntu_release,
"Ubuntu default",
"default",
]
elif os_result.macos_release:
keys = [
"macOS " + os_result.macos_release,
"macOS default",
"default",
]
found_items = None
for key in keys:
if key in mapping:
found_items = mapping[key]
break
if not found_items:
fail("Unsupported os_result " + repr(os_result))
# Emit the list of aliases.
file_content = """# -*- python -*-
# DO NOT EDIT: generated by os_specific_alias_repository()
package(default_visibility = ["//visibility:public"])
"""
for item in found_items:
name, actual = item.split("=")
file_content += 'alias(name = "{}", actual = "{}")\n'.format(
name,
actual,
)
repository_ctx.file(
"BUILD.bazel",
content = file_content,
executable = False,
)
def _os_specific_alias_impl(repository_ctx):
os_specific_alias(repository_ctx, repository_ctx.attr.mapping)
os_specific_alias_repository = repository_rule(
attrs = {
"mapping": attr.string_list_dict(mandatory = True),
},
implementation = _os_specific_alias_impl,
)
| StarcoderdataPython |
188245 | <gh_stars>0
"""
Find Largest Value in Each Tree Row
You need to find the largest value in each row of a binary tree.
Example:
Input:
1
/ \
3 2
/ \ \
5 3 9
Output: [1, 3, 9]
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
from queue import PriorityQueue
class Solution(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
queue = deque()
queue.append(root)
results = []
while len(queue):
size = len(queue)
currentLevel = PriorityQueue()
while size > 0:
node = queue.popleft()
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
currentLevel.put(-node.val)
size -= 1
results.append(-currentLevel.get())
return results
from collections import deque
class Solution2(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
queue = deque()
queue.append(root)
results = []
while len(queue):
size = len(queue)
currentMax = None
while size > 0:
node = queue.popleft()
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
currentMax = max(currentMax, node.val) if currentMax != None else node.val
size -= 1
if currentMax != None:
results.append(currentMax)
return results | StarcoderdataPython |
1613598 | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
from data.coco_test import *
import torch.utils.data as data
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
COCOroot = os.path.join("/media/trans/mnt", "data/coco/")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets',
'Main', '{:s}.txt')
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
print("[DEBUG] length: ", num_images)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap) + 1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer(), 'total':Timer() }
output_dir = get_output_dir('ssd300_coco_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
if False:
_t['total'].tic()
for i in range(num_images):
# print("[DEBUG] print i = ", i)
im, gt, h, w = dataset.__getitem__(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
# print("______________________\n", x.size())
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time), end='\r')
total_time = _t['total'].toc()
print("Total time: ", total_time, "\t ms: ", total_time / float(num_images))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
dataset.evaluate_detections(all_boxes, output_dir)
#
# def evaluate_detections(box_list, output_dir, dataset):
# write_voc_results_file(box_list, dataset)
# do_python_eval(output_dir)
def main(trained_model):
# load net
net = build_ssd('test', 300, 80)
# print(net)
net = net.cuda() # initialize SSD
net.load_state_dict(torch.load(trained_model))
# resume_ckpt(trained_model,net)
net.eval()
print('Finished loading model!')
# load data
# dataset = VOCDetection(args.voc_root, [('2007', set_type)],
# BaseTransform(300, dataset_mean),
# VOCAnnotationTransform())
dataset = COCODetection(root=COCOroot,
image_sets=[('2014', 'minival')],
preproc=BaseTransform(300, dataset_mean))
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
def resume_ckpt(trained_model, net):
checkpoint = torch.load(trained_model)
# print(list(checkpoint.items())[0][0])
if 'module.' in list(checkpoint.items())[0][0]:
pretrained_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint.items())}
checkpoint = pretrained_dict
for k, v in checkpoint.items():
if 'vgg.0' in k:
print(k, v)
if __name__ == "__main__":
for i in range(10):
pth = "results/DataParallel/mixupCOCO/1002/ssd300_COCO_" + str(i + 150) + ".pth"
print(pth)
# modelname = 'weights/lm/ssd300_VOC_0.pth'
# modelname = 'weights/ssd300_mAP_77.43_v2.pth'
# modelname = 'weights/mixup/ssd300_VOC_' + str(i+23) + '0.pth'
iii = i + 150
modelname = "results/DataParallel/mixup005/1002/ssd300_VOC_" + str(iii) + ".pth"
print("----------------------------------\n"
" EVAL modelname: {}\n"
"----------------------------------\n".format(modelname))
main(modelname)
# AP for aeroplane = 0.8207
# AP for bicycle = 0.8568
# AP for bird = 0.7546
# AP for boat = 0.6952
# AP for bottle = 0.5019
# AP for bus = 0.8479
# AP for car = 0.8584
# AP for cat = 0.8734
# AP for chair = 0.6136
# AP for cow = 0.8243
# AP for diningtable = 0.7906
# AP for dog = 0.8566
# AP for horse = 0.8714
# AP for motorbike = 0.8403
# AP for person = 0.7895
# AP for pottedplant = 0.5069
# AP for sheep = 0.7767
# AP for sofa = 0.7894
# AP for train = 0.8623
# AP for tvmonitor = 0.7670
# Mean AP = 0.7749
| StarcoderdataPython |
1730745 | <gh_stars>0
import os
import json
import yaml
def load_config_file(file_name):
filename, file_extension = os.path.splitext(file_name)
file_extension = file_extension.lower()[1:]
if file_extension == 'json':
return json.load(file_name)
elif file_extension in ['yaml', 'yml']:
return yaml.load(file_name)
raise ValueError('Extension {} is not supported as a valid config file'.format(file_extension))
| StarcoderdataPython |
3240416 | <reponame>benoitc/pypy<filename>pypy/jit/tl/tla/add_10.tla.py
from pypy.jit.tl.tla import tla
code = [
tla.CONST_INT, 10,
tla.ADD,
tla.RETURN
]
| StarcoderdataPython |
3397413 | <reponame>eRuaro/Dog-Breed-Classifier
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import pandas as pd
class Model:
def load_model(self, model_path):
"""
Loads a saved model from specified path
"""
print(f"Loading saved model from: {model_path}...")
model = tf.keras.models.load_model(
model_path,
custom_objects={"KerasLayer": hub.KerasLayer}
)
return model
def process_image(self, image_path, img_size=24):
"""
Takes an image file path and turns the image into a Tensor
"""
#read an image file into a variable
image = tf.io.read_file(image_path)
#turn image into tensors
image = tf.image.decode_jpeg(image, channels=3) #3 for Red Green Blue
#convert color channel values from 0 - 255 to 0 - 1 values (decimals)
image = tf.image.convert_image_dtype(image, tf.float32)
#resize image to (224, 224)
image = tf.image.resize(image, size=[img_size, img_size])
#return the image
return image
def get_image_label(self, image_path, label):
"""
Takes an image file path name and the associated label,
and processes the image and returns a tuple of (image, label)
"""
image = process_image(image_path)
return image, label
def create_data_batches(self, X, y=None, batch_size=32, valid_data=False, test_data=False):
"""
Create batches of data out of image (X) and label(y) pairs.
Shuffles the data if it's training data but doesn't shuffle if it's validation data.
Also accepts test data as input (no labels)
"""
if test_data:
print("Creating test data batches...")
data = tf.data.Dataset.from_tensor_slices((tf.constant(X)))
data_batch = data.map(process_image).batch(batch_size)
return data_batch
elif valid_data:
#if data is a valid data set, no need to shuffle
print("Creating validation data batches...")
data = tf.data.Dataset.from_tensor_slices((tf.constant(X), tf.constant(y)))
data_batch = data.map(get_image_label).batch(batch_size)
return data_batch
else:
print("Creating training data batches...")
#turn filepaths and labels into tensors
data = tf.data.Dataset.from_tensor_slices((tf.constant(X), tf.constant(y)))
#shuffle pathnames and labels
data = data.shuffle(buffer_size=len(X))
#create (image, label) tuples, also turns image path into a preprocessed image
data = data.map(get_image_label)
data_batch = data.batch(batch_size)
return data_batch
def get_pred_labels(prediction_probabilities):
"""
Turns an array of prediction probabilities into a label
"""
path = 'C:\\Users\\<NAME>\\Documents\\GitHub\\Dog-Breed-Classifier\\labels\\labels.csv'
labels_csv = pd.read_csv(path)
labels = labels_csv['breed'].to_numpy()
unique_breeds = np.unique(labels)
return unique_breeds[np.argmax(prediction_probabilities)] | StarcoderdataPython |
3293558 | from conans import ConanFile, CMake, tools
class CinderConan(ConanFile):
name = "cinder"
version = "0.9.2"
license = """
Copyright (c) 2010, The Cinder Project
This code is intended to be used with the Cinder C++ library, http://libcinder.org
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and
the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
requires = [
'boost/1.71.0',
'glm/0.9.9.8',
]
def source(self):
self.run("git clone -b v0.9.2 --recurse-submodules https://github.com/cinder/Cinder")
tools.replace_in_file("Cinder/CMakeLists.txt", "project( cinder )",
'''project( cinder )
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="./Cinder")
cmake.build()
def package(self):
self.copy("*.h", dst="include", src="./Cinder/include")
self.copy("*cinder.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["cinder"]
| StarcoderdataPython |
1676962 | <gh_stars>10-100
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Auxillary expectation functions
===============================
**Module name:** :mod:`pennylane_sf.expectations`
.. currentmodule:: pennylane_sf.expectations
Contains auxillary functions which convert from PennyLane-style expectations,
to the corresponding state methods in Strawberry Fields.
.. autosummary::
identity
mean_photon
number_expectation
fock_state
homodyne
poly_xp
Code details
~~~~~~~~~~~~
"""
import numpy as np
import strawberryfields as sf
from strawberryfields.backends.states import BaseFockState, BaseGaussianState
import pennylane.ops
def identity(state, device_wires, params):
"""Computes the expectation value of ``qml.Identity``
observable in Strawberry Fields, corresponding to the trace.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Wires): the measured modes
params (Sequence): sequence of parameters (not used)
Returns:
float, float: trace and its variance
"""
# pylint: disable=unused-argument
if isinstance(state, BaseGaussianState):
# Gaussian state representation will always have trace of 1
return 1, 0
N = state.num_modes
D = state.cutoff_dim
# get the reduced density matrix
N = len(device_wires)
dm = state.reduced_dm(modes=device_wires.tolist())
# construct the standard 2D density matrix, and take the trace
new_ax = np.arange(2 * N).reshape([N, 2]).T.flatten()
tr = np.trace(dm.transpose(new_ax).reshape([D**N, D**N])).real
return tr, tr - tr**2
def mean_photon(state, device_wires, params):
"""Computes the expectation value of the ``qml.NumberOperator``
observable in Strawberry Fields.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Sequence[int]): the measured mode
params (Sequence): sequence of parameters (not used)
Returns:
float, float: mean photon number and its variance
"""
# pylint: disable=unused-argument
return state.mean_photon(device_wires.labels[0])
def number_expectation(state, device_wires, params):
"""Computes the expectation value of tensor products consisting of the
``qml.NumberOperator`` observable on specified modes in Strawberry Fields.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Wires): the sequence of modes to measure
params (Sequence): sequence of parameters (not used)
Returns:
float, float: the expectation value of the number operator and its
variance
"""
# pylint: disable=unused-argument
return state.number_expectation(device_wires.labels)
def fock_state(state, device_wires, params):
"""Computes the expectation value of the ``qml.FockStateProjector``
observable in Strawberry Fields.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
device_wires (Wires): the measured mode
params (Sequence): sequence of parameters
Returns:
float, float: Fock state probability and its variance
"""
# pylint: disable=unused-argument
n = params[0]
N = state.num_modes
if N == len(device_wires):
# expectation value of the entire system
ex = state.fock_prob(n)
return ex, ex - ex**2
# otherwise, we must trace out remaining systems.
if isinstance(state, BaseFockState):
# fock state
dm = state.reduced_dm(modes=device_wires.tolist())
ex = dm[tuple(n[i // 2] for i in range(len(n) * 2))].real
elif isinstance(state, BaseGaussianState):
# Reduced Gaussian state
mu, cov = state.reduced_gaussian(modes=device_wires.tolist())
# scale so that hbar = 2
mu /= np.sqrt(sf.hbar / 2)
cov /= sf.hbar / 2
# create reduced Gaussian state
new_state = BaseGaussianState((mu, cov), len(device_wires))
ex = new_state.fock_prob(n)
var = ex - ex**2
return ex, var
def homodyne(phi=None):
"""Function factory that returns the ``qml.QuadOperator`` expectation
function for Strawberry Fields.
``homodyne(phi)`` returns a function
.. code-block:: python
homodyne_expectation(state, wires, phi)
that is used to determine the homodyne expectation value of a wire within a
Strawberry Fields state object, measured along a particular phase-space
angle ``phi``.
Note that:
* If ``phi`` is not None, the returned function will be hardcoded to return the
homodyne expectation value at angle ``phi`` in the phase space.
* If ``phi`` the value of ``phi`` must be set when calling the returned function.
Args:
phi (float): the default phase-space axis to perform the homodyne measurement on
Returns:
function: a function that accepts a SF state, the wire to measure,
and phase space angle phi, and returns the quadrature expectation
value and variance
"""
if phi is not None:
return lambda state, device_wires, params: state.quad_expectation(
device_wires.labels[0], phi
)
return lambda state, device_wires, params: state.quad_expectation(
device_wires.labels[0], *params
)
def poly_xp(state, all_wires, wires, params):
r"""Computes the expectation value of an observable that is a second-order
polynomial in :math:`\{\hat{x}_i, \hat{p}_i\}_i`.
Args:
state (strawberryfields.backends.states.BaseState): the quantum state
all_wires (Wires): all modes on the device
wires (Wires): measured modes for this observable
params (Sequence[array]): Q is a matrix or vector of coefficients
using the :math:`(\I, \x_1,\p_1, \x_2,\p_2, \dots)` ordering
Returns:
float, float: expectation value, variance
"""
Q = params[0]
# HACK, we need access to the Poly instance in order to expand the matrix!
op = pennylane.ops.PolyXP(Q, wires=wires, do_queue=False)
Q = op.heisenberg_obs(all_wires)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return state.poly_quad_expectation(None, d, Q[0])
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
return state.poly_quad_expectation(M[1:, 1:], d1 + d2, M[0, 0])
| StarcoderdataPython |
65940 | # For backwards compatibility, importing the PIL drawers here.
from .pil import CircleModuleDrawer # noqa: F401
from .pil import GappedSquareModuleDrawer # noqa: F401
from .pil import HorizontalBarsDrawer # noqa: F401
from .pil import RoundedModuleDrawer # noqa: F401
from .pil import SquareModuleDrawer # noqa: F401
from .pil import VerticalBarsDrawer # noqa: F401
| StarcoderdataPython |
1658733 | <reponame>fhowar/benchexec
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.template
import benchexec.result as result
import tempfile
import re
import subprocess
import logging
from benchexec.tools.sv_benchmarks_util import get_data_model_from_task, ILP32, LP64
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for AProVE.
URL: http://aprove.informatik.rwth-aachen.de/
Only the binary (jar) distribution of AProVE is supported.
"""
REQUIRED_PATHS = ["aprove.jar", "AProVE.sh", "bin", "newstrategy.strategy"]
BIT_WIDTH_PARAMETER_NAME = "--bit-width"
def executable(self, tool_locator):
return tool_locator.find_executable("AProVE.sh")
def name(self):
return "AProVE"
def version(self, executable):
with tempfile.NamedTemporaryFile(suffix=".c") as trivial_example:
trivial_example.write(b"int main() { return 0; }\n")
trivial_example.flush()
cmd = [executable, trivial_example.name]
try:
process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
logging.warning("Unable to determine AProVE version: %s", e.strerror)
return ""
version_aprove_match = re.search(
r"^# AProVE Commit ID: (.*)",
process.stdout,
re.MULTILINE,
)
if not version_aprove_match:
logging.warning(
"Unable to determine AProVE version: %s",
process.stdout,
)
return ""
return version_aprove_match.group(1)[:10]
def cmdline(self, executable, options, task, rlimits):
data_model_param = get_data_model_from_task(task, {ILP32: "32", LP64: "64"})
if data_model_param and self.BIT_WIDTH_PARAMETER_NAME not in options:
options += [self.BIT_WIDTH_PARAMETER_NAME, data_model_param]
return [executable, *options, task.single_input_file]
def determine_result(self, run):
if not run.output:
return result.RESULT_ERROR
first_output_line = run.output[0]
if "YES" in first_output_line:
return result.RESULT_TRUE_PROP
elif "TRUE" in first_output_line:
return result.RESULT_TRUE_PROP
elif "FALSE" in first_output_line:
return result.RESULT_FALSE_TERMINATION
elif "NO" in first_output_line:
return result.RESULT_FALSE_TERMINATION
else:
return result.RESULT_UNKNOWN
| StarcoderdataPython |
1727262 |
import random
from typing_extensions import Required
#from sqlalchemy.sql.sqltypes import Boolean
from graphene import ObjectType, String, Field, ID, List, DateTime, Mutation, Boolean, Int
from models.AcreditationRelated.StudyPlan import StudyPlanModel
from models.AcreditationRelated.StudyPlanItem import StudyPlanItemModel
from graphqltypes.Utils import extractSession
from graphqltypes.Utils import createRootResolverById, createRootResolverByName, createMutationClass
StudyPlanRootResolverById = createRootResolverById(StudyPlanModel)
StudyPlanRootResolverByName = createRootResolverByName(StudyPlanModel)
CreateStudyPlan = createMutationClass(
StudyPlanModel, Field('graphqltypes.StudyPlan.StudyPlanType'), parentItemName=None,
name = String()
)
class StudyPlanType(ObjectType):
id = ID()
lastchange = DateTime()
externalId = String()
name = String()
studyplanitems = List('graphqltypes.StudyPlan.StudyPlanItemType')
create_new_item = createMutationClass(
StudyPlanItemModel, Field('graphqltypes.StudyPlan.StudyPlanItemType'), parentItemName='studyplan',
name = String()
).Field()
def resolve_studyplanitems(parent, info):
if hasattr(parent, 'studyplanitems'):
result = parent.studyplanitems
else:
session = extractSession(info)
result = session.query(StudyPlanItemModel).filter(StudyPlanItemModel.studyplan_id == parent.id).all()
return result
class StudyPlanItemType(ObjectType):
id = ID()
name = String()
studyplan = Field('graphqltypes.StudyPlan.StudyPlanType')
itemevents = List('graphqltypes.StudyPlan.StudyPlanItemEventType')
class StudyPlanItemEventType(ObjectType):
id = ID()
name = String()
studyplanitem = Field('graphqltypes.StudyPlan.StudyPlanItemType')
| StarcoderdataPython |
3218395 | <gh_stars>0
#!/usr/bin/env python3
import os
import unittest
import sys
doxyqml_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
sys.path.insert(0, doxyqml_path)
from qmlclasstestcase import *
from qmlparsertestcase import *
from lexertestcase import *
def main():
unittest.main()
if __name__ == "__main__":
main()
# vi: ts=4 sw=4 et
| StarcoderdataPython |
1623097 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from sqlalchemy.orm import joinedload
from tahiti.models import *
from flask import current_app
def test_list_operations_simple_data_success(client):
headers = {'X-Auth-Token': str(client.secret)}
params = {'simple': 'true'}
rv = client.get('/operations', headers=headers, query_string=params)
assert 200 == rv.status_code, 'Incorrect status code'
resp = rv.json
assert set(resp['data'][0].keys()) == {'id', 'name', 'slug'}
with current_app.app_context():
current_translation = db.aliased(Operation.current_translation)
total = Operation.query.join(current_translation).filter(
Operation.enabled,
Operation.platforms.any(enabled=True),
OperationTranslation.locale == 'en',).with_entities(
Operation.id,
OperationTranslation.name,
Operation.slug)
ops1 = list(total)
ops2 = [tuple(x.values()) for x in resp['data']]
assert set(ops1) == set(ops2)
def test_list_operations_per_platform_success(client):
platforms = [4, 1, 5, ]
headers = {'X-Auth-Token': str(client.secret)}
for platform in platforms:
params = {'platform': platform}
rv = client.get('/operations', headers=headers, query_string=params)
assert 200 == rv.status_code, 'Incorrect status code'
resp = rv.json
with current_app.app_context():
current_translation = db.aliased(Operation.current_translation)
total = Operation.query.join(Operation.platforms).join(
current_translation).filter(
Operation.enabled,
Platform.id == platform,
Operation.platforms.any(enabled=True),
OperationTranslation.locale == 'en',)
ops1 = [op.id for op in total]
ops2 = [op['id'] for op in resp['data']]
assert set(ops1) == set(ops2)
def test_list_operations_filtered_disabled_success(client):
pass
def test_list_operations_filtered_subset_success(client):
pass
def test_list_operations_filtered_workflow_success(client):
pass
def test_list_operations_filtered_name_success(client):
pass
| StarcoderdataPython |
4826870 | from onegov.ballot import PartyResult
from onegov.election_day import _
from sqlalchemy.orm import object_session
def has_party_results(item):
""" Returns True, if the item has party results. """
if getattr(item, 'type', 'proporz') == 'proporz':
if item.party_results.first():
return True
return False
def get_party_results(item):
""" Returns the aggregated party results as list. """
if not has_party_results(item):
return [], {}
session = object_session(item)
# Get the totals votes per year
query = session.query(PartyResult.year, PartyResult.total_votes)
query = query.filter(PartyResult.owner == item.id).distinct()
totals = dict(query)
years = sorted((str(key) for key in totals.keys()))
parties = {}
for result in item.party_results:
party = parties.setdefault(result.name, {})
year = party.setdefault(str(result.year), {})
year['color'] = result.color
year['mandates'] = result.number_of_mandates
year['votes'] = {
'total': result.votes,
'permille': int(
round(1000 * (result.votes / (totals.get(result.year) or 1)))
)
}
return years, parties
def get_party_results_deltas(election, years, parties):
""" Returns the aggregated party results with the differences to the
last elections.
"""
deltas = len(years) > 1
results = {}
for index, year in enumerate(years):
results[year] = []
for key in sorted(parties.keys()):
result = [key]
party = parties[key]
values = party.get(year)
if values:
result.append(values.get('mandates', ''))
result.append(values.get('votes', {}).get('total', ''))
permille = values.get('votes', {}).get('permille')
result.append('{}%'.format(permille / 10 if permille else ''))
else:
result.append('')
result.append('')
result.append('')
if deltas:
delta = ''
if index:
last = party.get(years[index - 1])
if values and last:
diff = (
(values.get('votes', {}).get('permille', 0) or 0)
- (last.get('votes', {}).get('permille', 0) or 0)
) / 10
delta = '{}%'.format(diff)
result.append(delta)
results[year].append(result)
return deltas, results
def get_party_results_data(item):
""" Retuns the data used for the grouped bar diagram showing the party
results.
"""
if not has_party_results(item):
return {
'results': [],
'title': item.title
}
years, parties = get_party_results(item)
names = sorted(parties.keys())
results = []
for party in names:
for year in parties[party]:
front = parties[party].get(year, {}).get('mandates', 0)
back = parties[party].get(year, {}).get('votes', {})
back = back.get('permille', 0) / 10.0
color = parties[party].get(year, {}).get('color', '#999999')
results.append({
'group': party,
'item': year,
'value': {
'front': front,
'back': back,
},
'active': year == str(item.date.year),
'color': color
})
return {
'groups': names,
'labels': years,
'maximum': {
'front': item.number_of_mandates,
'back': 100,
},
'axis_units': {
'front': '',
'back': '%'
},
'results': results,
'title': item.title
}
def get_parties_panachage_data(item, request=None):
"""" Get the panachage data as JSON. Used to for the panachage sankey
chart.
"""
if getattr(item, 'type', 'proporz') == 'majorz':
return {}
results = item.panachage_results.all()
party_results = item.party_results.filter_by(year=item.date.year).all()
if not results:
return {}
parties = sorted(
set([result.source for result in results])
| set([result.target for result in results])
| set([result.name for result in party_results])
)
def left_node(party):
return parties.index(party)
def right_node(party):
return parties.index(party) + len(parties)
colors = dict(set((r.name, r.color) for r in party_results))
intra_party_votes = dict(set((r.name, r.votes) for r in party_results))
# Create the links
links = []
for result in results:
if result.source == result.target:
continue
if result.target in intra_party_votes:
intra_party_votes[result.target] -= result.votes
links.append({
'source': left_node(result.source),
'target': right_node(result.target),
'value': result.votes,
'color': colors.get(result.source, '#999')
})
for party, votes in intra_party_votes.items():
links.append({
'source': left_node(party),
'target': right_node(party),
'value': votes,
'color': colors.get(party, '#999')
})
# Create the nodes
blank = request.translate(_("Blank list")) if request else '-'
nodes = [
{
'name': name or blank,
'id': count + 1,
'color': colors.get(name, '#999')
}
for count, name in enumerate(2 * parties)
]
return {
'nodes': nodes,
'links': links,
'title': item.title
}
| StarcoderdataPython |
1745484 | <gh_stars>0
#!/usr/bin/env python
import socket
import json
import sys
import rospy
from geometry_msgs.msg import Vector3Stamped
if len(sys.argv)<3:
print("usage cmd ip_address topic_name")
exit()
ip = sys.argv[1]
port = 7005
# Create a UDP socket at client side
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.settimeout(0.3)
# Send to server using created UDP socket
pub = rospy.Publisher(sys.argv[2], Vector3Stamped, queue_size=10)
rospy.init_node("orientation")
rate = rospy.Rate(1000) # 100hz
while not rospy.is_shutdown():
try:
UDPClientSocket.sendto(" ".encode(), (ip,port))
i=0
while i<100:
msgFromServer = UDPClientSocket.recvfrom(1024)
data = json.loads(msgFromServer[0].decode())
timestamp = data["timestamp"]
angles = (data["fusionPose"])
# print(data)
dataToPublish = Vector3Stamped()
dataToPublish.vector.x = angles[2]
dataToPublish.vector.y = angles[1]
dataToPublish.vector.z = angles[0]
pub.publish(dataToPublish)
# rospy.spinonce()
# print(angles)
# print("\n")
rate.sleep()
i=i+1
except socket.timeout as e:
pass
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
3287460 | <gh_stars>0
import unittest
from wizard_game import WizardPlayer
class TestWizardPlayer(unittest.TestCase):
def test1(self):
p1 = WizardPlayer()
table = []
hand = ['B3', 'Y9', 'R10', 'G2']
p1.receive_hand(hand)
self.assertTrue(len(p1.get_valid_indices(table)) == len(hand))
table = ['Y12']
valid_indices = p1.get_valid_indices(table)
self.assertEqual(len(valid_indices), 1)
self.assertEqual(hand[valid_indices[0]], 'Y9')
table = ['B8', 'Y12']
valid_indices = p1.get_valid_indices(table)
self.assertEqual(len(valid_indices), 1)
self.assertEqual(hand[valid_indices[0]], 'B3')
hand.append('B0')
valid_indices = p1.get_valid_indices(table)
valid_hand = [hand[i] for i in valid_indices]
self.assertEqual(len(valid_indices), 2)
self.assertTrue('B0' in valid_hand)
self.assertTrue('B3' in valid_hand)
hand.append('Y14')
valid_indices = p1.get_valid_indices(table)
valid_hand = [hand[i] for i in valid_indices]
self.assertEqual(len(valid_indices), 3)
self.assertTrue('B0' in valid_hand)
self.assertTrue('B3' in valid_hand)
self.assertTrue('Y14' in valid_hand)
def test2(self):
p1 = WizardPlayer()
table = ['R14']
hand = ['B3', 'Y9', 'R10', 'G2']
p1.receive_hand(hand)
valid_indices = p1.get_valid_indices(table)
valid_hand = [hand[i] for i in valid_indices]
self.assertTrue(len(valid_hand) == len(hand))
for card in valid_hand:
self.assertTrue(card in hand)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
105400 | <reponame>h3kker/hinkskalle<gh_stars>1-10
"""remove tag image_id not null
Revision ID: 5961c96f6a2e
Revises: <PASSWORD>
Create Date: 2021-06-11 19:42:36.870527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('tag', 'image_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('tag', 'image_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
| StarcoderdataPython |
3257169 | <reponame>bjorn/Paste-It<filename>api/urls.py<gh_stars>1-10
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'api.views.index'),
url(r'^v01/add/?', 'api.v01.views.add'),
url(r'^v02/add/?', 'api.v02.views.add'),
url(r'^v02/list/?', 'api.v02.views.list'),
)
| StarcoderdataPython |
3360540 | <filename>ch03tests/solutions/diffusionmodel/diffusion_model.py
""" Simplistic 1-dimensional diffusion model """
def energy(density, coefficient=1):
""" Energy associated with the diffusion model
:Parameters:
density: array of positive integers
Number of particles at each position i in the array/geometry
"""
from numpy import array, any, sum
# Make sure input is an array
density = array(density)
# of the right kind (integer). Unless it is zero length, in which case type does not matter.
if density.dtype.kind != 'i' and len(density) > 0:
raise TypeError("Density should be an array of *integers*.")
# and the right values (positive or null)
if any(density < 0):
raise ValueError("Density should be an array of *positive* integers.")
if density.ndim != 1:
raise ValueError("Density should be an a *1-dimensional* array of positive integers.")
return coefficient * 0.5 * sum(density * (density - 1))
def partial_derivative(function, x, index):
""" Computes right derivative of function over integers
:Parameters:
function: callable object
The function for which to compute the delta/derivative
x: array of integers
The point at which to compute the right-derivative
index: integer
Partial derivative direction.
"""
from numpy import array
# Computes left value
left_value = function(x)
# Copies and modifies x. Could do it without copy, but that complicates mocking.
x = array(x)
x[index] += 1
right_value = function(x)
return right_value - left_value
| StarcoderdataPython |
1672898 | import contextlib
import io
from dataclasses import dataclass, field
from io import StringIO
from typing import Dict, List, Optional, Union
import alembic
import alembic.config
from alembic.runtime.environment import EnvironmentContext
from alembic.script.base import ScriptDirectory
from sqlalchemy import MetaData, Table
from sqlalchemy.engine import Connection
from pytest_alembic.config import Config
@dataclass
class CommandExecutor:
alembic_config: alembic.config.Config
stdout: StringIO
stream_position: int
script: ScriptDirectory
@classmethod
def from_config(cls, config: Config):
stdout = StringIO()
alembic_config = config.make_alembic_config(stdout)
return cls(
alembic_config=alembic_config,
stdout=stdout,
stream_position=0,
script=ScriptDirectory.from_config(alembic_config),
)
def configure(self, **kwargs):
for key, value in kwargs.items():
self.alembic_config.attributes[key] = value
def run_command(self, command, *args, **kwargs):
self.stream_position = self.stdout.tell()
executable_command = getattr(alembic.command, command)
try:
# Hide the (relatively) worthless logs of the upgrade revision path, it just clogs
# up the logs when errors actually occur, but without providing any context.
buffer = io.StringIO()
with contextlib.redirect_stderr(buffer):
executable_command(self.alembic_config, *args, **kwargs)
except alembic.util.exc.CommandError as e:
raise RuntimeError(e)
self.stdout.seek(self.stream_position)
return self.stdout.readlines()
def heads(self):
return [rev.revision for rev in self.script.get_revisions("heads")]
def upgrade(self, revision):
"""Upgrade to the given `revision`."""
def upgrade(rev, _):
return self.script._upgrade_revs(revision, rev)
self._run_env(upgrade, revision)
def downgrade(self, revision):
"""Downgrade to the given `revision`."""
def downgrade(rev, _):
return self.script._downgrade_revs(revision, rev)
self._run_env(downgrade, revision)
def _run_env(self, fn, revision=None):
"""Execute the migrations' env.py, given some function to execute."""
dont_mutate = revision is None
with EnvironmentContext(
self.alembic_config,
self.script,
fn=fn,
destination_rev=revision,
dont_mutate=dont_mutate,
):
self.script.run_env()
@dataclass
class ConnectionExecutor:
metadatas: Dict[str, MetaData] = field(default_factory=dict)
def metadata(self, revision: str) -> MetaData:
metadata = self.metadatas.get(revision)
if metadata is None:
metadata = MetaData()
self.metadatas[revision] = metadata
return metadata
def table(self, connection, revision: str, name: str, schema: Optional[str] = None) -> Table:
meta = self.metadata(revision)
if name in meta.tables:
return meta.tables[name]
return Table(name, meta, schema=schema, autoload_with=connection)
def table_insert(
self,
connection: Connection,
revision: str,
data: Union[Dict, List],
tablename: Optional[str] = None,
schema: Optional[str] = None,
):
if isinstance(data, dict):
data = [data]
for item in data:
_tablename = item.get("__tablename__", None)
table = _tablename or tablename
if table is None:
raise ValueError(
"No table name provided as either `table` argument, or '__tablename__' key in `data`."
)
try:
# Attempt to parse the schema out of the tablename
schema, table = table.split(".", 1)
except ValueError:
# However, if it doesn't work, both `table` and `schema` are in scope, so failure is fine.
pass
table = self.table(connection, revision, table, schema=schema)
values = {k: v for k, v in item.items() if k != "__tablename__"}
connection.execute(table.insert().values(values))
| StarcoderdataPython |
3341441 | import threading
import time
from datetime import datetime, timezone
import anyio
import pytest
from anyio import fail_after
from apscheduler.enums import JobOutcome
from apscheduler.events import (
Event, JobAdded, ScheduleAdded, ScheduleRemoved, SchedulerStarted, SchedulerStopped, TaskAdded)
from apscheduler.exceptions import JobLookupError
from apscheduler.schedulers.async_ import AsyncScheduler
from apscheduler.schedulers.sync import Scheduler
from apscheduler.triggers.date import DateTrigger
pytestmark = pytest.mark.anyio
async def dummy_async_job(delay: float = 0, fail: bool = False) -> str:
await anyio.sleep(delay)
if fail:
raise RuntimeError('failing as requested')
else:
return 'returnvalue'
def dummy_sync_job(delay: float = 0, fail: bool = False) -> str:
time.sleep(delay)
if fail:
raise RuntimeError('failing as requested')
else:
return 'returnvalue'
class TestAsyncScheduler:
async def test_schedule_job(self) -> None:
def listener(received_event: Event) -> None:
received_events.append(received_event)
if len(received_events) == 5:
event.set()
received_events: list[Event] = []
event = anyio.Event()
scheduler = AsyncScheduler(start_worker=False)
scheduler.events.subscribe(listener)
trigger = DateTrigger(datetime.now(timezone.utc))
async with scheduler:
await scheduler.add_schedule(dummy_async_job, trigger, id='foo')
with fail_after(3):
await event.wait()
# The scheduler was first started
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStarted)
# Then the task was added
received_event = received_events.pop(0)
assert isinstance(received_event, TaskAdded)
assert received_event.task_id == 'test_schedulers:dummy_async_job'
# Then a schedule was added
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleAdded)
assert received_event.schedule_id == 'foo'
# assert received_event.task_id == 'task_id'
# Then that schedule was processed and a job was added for it
received_event = received_events.pop(0)
assert isinstance(received_event, JobAdded)
assert received_event.schedule_id == 'foo'
assert received_event.task_id == 'test_schedulers:dummy_async_job'
# Then the schedule was removed since the trigger had been exhausted
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleRemoved)
assert received_event.schedule_id == 'foo'
# Finally, the scheduler was stopped
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStopped)
# There should be no more events on the list
assert not received_events
async def test_get_job_result_success(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2})
result = await scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.success
assert result.return_value == 'returnvalue'
async def test_get_job_result_error(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2, 'fail': True})
result = await scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.error
assert isinstance(result.exception, RuntimeError)
assert str(result.exception) == 'failing as requested'
async def test_get_job_result_nowait_not_yet_ready(self) -> None:
async with AsyncScheduler() as scheduler:
job_id = await scheduler.add_job(dummy_async_job, kwargs={'delay': 0.2})
with pytest.raises(JobLookupError):
await scheduler.get_job_result(job_id, wait=False)
async def test_run_job_success(self) -> None:
async with AsyncScheduler() as scheduler:
return_value = await scheduler.run_job(dummy_async_job)
assert return_value == 'returnvalue'
async def test_run_job_failure(self) -> None:
async with AsyncScheduler() as scheduler:
with pytest.raises(RuntimeError, match='failing as requested'):
await scheduler.run_job(dummy_async_job, kwargs={'fail': True})
class TestSyncScheduler:
def test_schedule_job(self):
def listener(received_event: Event) -> None:
received_events.append(received_event)
if len(received_events) == 5:
event.set()
received_events: list[Event] = []
event = threading.Event()
scheduler = Scheduler(start_worker=False)
scheduler.events.subscribe(listener)
trigger = DateTrigger(datetime.now(timezone.utc))
with scheduler:
scheduler.add_schedule(dummy_sync_job, trigger, id='foo')
event.wait(3)
# The scheduler was first started
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStarted)
# Then the task was added
received_event = received_events.pop(0)
assert isinstance(received_event, TaskAdded)
assert received_event.task_id == 'test_schedulers:dummy_sync_job'
# Then a schedule was added
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleAdded)
assert received_event.schedule_id == 'foo'
# Then that schedule was processed and a job was added for it
received_event = received_events.pop(0)
assert isinstance(received_event, JobAdded)
assert received_event.schedule_id == 'foo'
assert received_event.task_id == 'test_schedulers:dummy_sync_job'
# Then the schedule was removed since the trigger had been exhausted
received_event = received_events.pop(0)
assert isinstance(received_event, ScheduleRemoved)
assert received_event.schedule_id == 'foo'
# Finally, the scheduler was stopped
received_event = received_events.pop(0)
assert isinstance(received_event, SchedulerStopped)
# There should be no more events on the list
assert not received_events
def test_get_job_result(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job)
result = scheduler.get_job_result(job_id)
assert result.outcome is JobOutcome.success
assert result.return_value == 'returnvalue'
def test_get_job_result_error(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job, kwargs={'delay': 0.2, 'fail': True})
result = scheduler.get_job_result(job_id)
assert result.job_id == job_id
assert result.outcome is JobOutcome.error
assert isinstance(result.exception, RuntimeError)
assert str(result.exception) == 'failing as requested'
def test_get_job_result_nowait_not_yet_ready(self) -> None:
with Scheduler() as scheduler:
job_id = scheduler.add_job(dummy_sync_job, kwargs={'delay': 0.2})
with pytest.raises(JobLookupError):
scheduler.get_job_result(job_id, wait=False)
def test_run_job_success(self) -> None:
with Scheduler() as scheduler:
return_value = scheduler.run_job(dummy_sync_job)
assert return_value == 'returnvalue'
def test_run_job_failure(self) -> None:
with Scheduler() as scheduler:
with pytest.raises(RuntimeError, match='failing as requested'):
scheduler.run_job(dummy_sync_job, kwargs={'fail': True})
| StarcoderdataPython |
1602889 | #! /usr/bin/env python3
import os
import sys
import math
from itertools import product
from mule_local.JobGeneration import *
from mule.JobParallelizationDimOptions import *
from mule.JobParallelization import *
p = JobGeneration()
verbose = False
#verbose = True
##################################################
##################################################
p.compile.mode = 'release'
if '_gnu' in os.getenv('MULE_PLATFORM_ID'):
p.compile.compiler = 'gnu'
else:
p.compile.compiler = 'intel'
p.compile.sweet_mpi = 'enable'
p.compile.sweet_mpi = 'disable'
p.runtime.space_res_spectral = 128
p.runtime.reuse_plans = -1 # enforce using plans (todo, enforcing not yet implemented)!
p.compile.rexi_timings_additional_barriers = 'disable'
p.compile.rexi_allreduce = 'disable'
p.parallelization.core_oversubscription = False
p.parallelization.core_affinity = 'compact'
p.compile.threading = 'omp'
p.compile.rexi_thread_parallel_sum = 'disable'
params_simtime = []
#params_simtime += [60*60*i for i in range(1, 2)]
#params_simtime += [60*60*i for i in range(1, 24)]
#params_simtime += [60*60*i for i in range(1, 24)]
#params_simtime += [60*60*24*i for i in range(1, 6)]
params_simtime += [60*60*24*i for i in range(1, 2)]
gen_reference_solution = True
timestep_size_reference = 10
#params_timestep_sizes_explicit = [30]
params_timestep_sizes_explicit = [15/16, 15/8, 15/4, 15, 30, 60, 120, 240, 480, 960]
#params_timestep_sizes_implicit = [30]
params_timestep_sizes_implicit = [15/16, 15/8, 15/4, 15, 30, 60, 120, 240, 480, 960]
#params_timestep_sizes_rexi = [30]
params_timestep_sizes_rexi = params_timestep_sizes_implicit[:]
# Parallelization
params_pspace_num_cores_per_rank = [p.platform_resources.num_cores_per_socket]
#params_pspace_num_threads_per_rank = [i for i in range(1, p.platform_resources.num_cores_per_socket+1)]
params_pspace_num_threads_per_rank = [p.platform_resources.num_cores_per_socket]
params_ptime_num_cores_per_rank = [1]
unique_id_filter = []
#unique_id_filter.append('simparams')
unique_id_filter.append('compile')
unique_id_filter.append('disc_space')
unique_id_filter.append('timestep_order')
#unique_id_filter.append('timestep_size')
unique_id_filter.append('rexi_params')
unique_id_filter.append('benchmark')
p.unique_id_filter = unique_id_filter
# No output
#p.runtime.output_filename = "-"
# REXI stuff
params_ci_N = [128]
#params_ci_N = [1]
params_ci_max_imag = [30.0]
params_ci_max_real = [10.0]
#
# Scale the CI circle radius relative to this time step size
# We do this simply to get a consistent time stepping method
# Otherwise, CI would not behave consistently
#
params_ci_max_imag_scaling_relative_to_timestep_size = 480
#params_ci_max_imag_scaling_relative_to_timestep_size = None
##################################################
#
# Force deactivating Turbo mode
#
p.parallelization.force_turbo_off = True
def estimateWallclockTime(p):
"""
Return an estimated wallclock time
"""
###
### ALWAYS RETURN 15 MINUTES
### We're interested in runs which take up to 10 minutes
###
return 60*15
###
###
###
#
# Reference wallclock time and corresponding time step size
# e.g. for explicit RK2 integration scheme
#
# On Cheyenne with GNU compiler
# OMP_NUM_THREADS=18
# 247.378 seconds for ln_erk2 with dt=30 m=128 t=432000
#
ref_wallclock_seconds = 60*4
ref_simtime = 432000
ref_timestep_size = 60
ref_mode_res = 128
# Use this scaling for additional wallclock time
safety_scaling = 10
# 5 Min additionaly
safety_add = 60*5
wallclock_seconds = ref_wallclock_seconds
# inv. linear with simulation time
wallclock_seconds *= p.runtime.max_simulation_time/ref_simtime
# linear with time step size
wallclock_seconds *= ref_timestep_size/p.runtime.timestep_size
# quadratic with resolution
wallclock_seconds *= pow(ref_mode_res/p.runtime.space_res_spectral, 2.0)
if p.runtime.rexi_method != '':
if p.runtime.rexi_method != 'ci':
raise Exception("TODO: Support other REXI methods")
# Complex-valued
wallclock_seconds *= 2.0
# Number of REXI terms
wallclock_seconds *= p.runtime.rexi_ci_n
# Parallelization in time
wallclock_seconds /= p.parallelization.pardims_dict['time'].num_ranks
if wallclock_seconds <= 0:
raise Exception("Estimated wallclock_seconds <= 0")
wallclock_seconds *= safety_scaling
wallclock_seconds += safety_add
if wallclock_seconds > p.platform_resources.max_wallclock_seconds:
wallclock_seconds = p.platform_resources.max_wallclock_seconds
return wallclock_seconds
p.compile.lapack = 'enable'
p.compile.mkl = 'disable'
# Request dedicated compile script
p.compilecommand_in_jobscript = False
#
# Run simulation on plane or sphere
#
p.compile.program = 'swe_sphere'
p.compile.plane_spectral_space = 'disable'
p.compile.plane_spectral_dealiasing = 'disable'
p.compile.sphere_spectral_space = 'enable'
p.compile.sphere_spectral_dealiasing = 'enable'
p.compile.benchmark_timings = 'enable'
p.compile.quadmath = 'enable'
#
# Activate Fortran source
#
p.compile.fortran_source = 'enable'
# Verbosity mode
p.runtime.verbosity = 0
#
# Mode and Physical resolution
#
p.runtime.space_res_spectral = 128
p.runtime.space_res_physical = -1
#
# Benchmark
#
p.runtime.benchmark_name = "galewsky"
#
# Compute error
#
p.runtime.compute_error = 0
#
# Preallocate the REXI matrices
#
p.runtime.rexi_sphere_preallocation = 1
# Leave instability checks activated
# Don't activate them since they are pretty costly!!!
p.runtime.instability_checks = 0
#
# REXI method
# N=64, SX,SY=50 and MU=0 with circle primitive provide good results
#
p.runtime.rexi_method = ''
p.runtime.rexi_ci_n = 128
p.runtime.rexi_ci_max_real = -999
p.runtime.rexi_ci_max_imag = -999
p.runtime.rexi_ci_sx = -1
p.runtime.rexi_ci_sy = -1
p.runtime.rexi_ci_mu = 0
p.runtime.rexi_ci_primitive = 'circle'
#p.runtime.rexi_beta_cutoff = 1e-16
#p.runtime.rexi_beta_cutoff = 0
p.runtime.viscosity = 0.0
p.runtime.sphere_extended_modes = 0
# Groups to execute, see below
# l: linear
# ln: linear and nonlinear
#groups = ['l1', 'l2', 'ln1', 'ln2', 'ln4']
groups = ['ln2']
#
# allow including this file
#
if __name__ == "__main__":
if len(sys.argv) > 1:
groups = [sys.argv[1]]
print("Groups: "+str(groups))
for group in groups:
# 1st order linear
# 2nd order nonlinear
if group == 'ln2':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
###########
# Runge-Kutta
###########
['ln_erk', 2, 2, 0],
###########
# CN
###########
# ['lg_irk_lc_n_erk_ver0', 2, 2, 0],
['lg_irk_lc_n_erk_ver1', 2, 2, 0],
# ['l_irk_n_erk_ver0', 2, 2, 0],
# ['l_irk_n_erk_ver1', 2, 2, 0],
###########
# REXI
###########
# ['lg_rexi_lc_n_erk_ver0', 2, 2, 0],
# ['lg_rexi_lc_n_erk_ver1', 2, 2, 0],
# ['l_rexi_n_erk_ver0', 2, 2, 0],
# ['l_rexi_n_erk_ver1', 2, 2, 0],
###########
# ETDRK
###########
# ['lg_rexi_lc_n_etdrk', 2, 2, 0],
# ['l_rexi_n_etdrk', 2, 2, 0],
]
# 4th order nonlinear
if group == 'ln4':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
['l_rexi_n_etdrk', 4, 4, 0],
['ln_erk', 4, 4, 0],
]
for p.runtime.max_simulation_time in params_simtime:
p.runtime.output_timestep_size = p.runtime.max_simulation_time
#
# Reference solution
#
if gen_reference_solution:
tsm = ts_methods[0]
p.runtime.timestep_size = timestep_size_reference
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
pspace.num_ranks = 1
# Setup parallelization
p.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
p.parallelization.print()
if len(tsm) > 4:
s = tsm[4]
p.load_from_dict(tsm[4])
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_benchref_'+p.getUniqueID())
p.reference_job_unique_id = p.job_unique_id
#
# Create job scripts
#
for tsm in ts_methods[1:]:
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
if len(tsm) > 4:
s = tsm[4]
p.runtime.load_from_dict(tsm[4])
tsm_name = tsm[0]
if 'ln_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit
elif 'l_irk' in tsm_name or 'lg_irk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_implicit
elif 'l_rexi' in tsm_name or 'lg_rexi' in tsm_name:
params_timestep_sizes = params_timestep_sizes_rexi
else:
print("Unable to identify time stepping method "+tsm_name)
sys.exit(1)
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, p.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, params_timestep_sizes):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
if not '_rexi' in p.runtime.timestepping_method:
p.runtime.rexi_method = ''
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
ptime.setup()
p.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
p.parallelization.print()
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_bench_'+p.getUniqueID())
else:
for ci_N, ci_max_imag, ci_max_real in product(params_ci_N, params_ci_max_imag, params_ci_max_real):
if params_ci_max_imag_scaling_relative_to_timestep_size != None:
ci_max_imag *= (p.runtime.timestep_size/params_ci_max_imag_scaling_relative_to_timestep_size)
p.runtime.load_from_dict({
'rexi_method': 'ci',
'ci_n':ci_N,
'ci_max_real':ci_max_real,
'ci_max_imag':ci_max_imag,
'half_poles':0,
#'ci_gaussian_filter_scale':0.0,
#'ci_gaussian_filter_dt_norm':0.0, # unit scaling for T128 resolution
#'ci_gaussian_filter_exp_N':0.0,
})
time_ranks = ci_N
time_ranks = 8
#for time_ranks in params_ptime_num_cores_per_rank:
if True:
# Update TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1
ptime.num_ranks = time_ranks
ptime.setup()
p.setup_parallelization([pspace, ptime])
if verbose:
pspace.print()
ptime.print()
p.parallelization.print()
# Generate only scripts with max number of cores
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
if int(p.runtime.max_simulation_time / p.runtime.timestep_size) * p.runtime.timestep_size != p.runtime.max_simulation_time:
raise Exception("Simtime "+str(p.runtime.max_simulation_time)+" not dividable without remainder by "+str(p.runtime.timestep_size))
p.gen_jobscript_directory('job_bench_'+p.getUniqueID())
if p.runtime.reuse_plans > 0:
#
# SHTNS plan generation scripts
#
p.runtime.reuse_plans = 1 # search for awesome plans and store them
#
# Create dummy scripts to be used for SHTNS script generation
#
# No parallelization in time
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1
ptime.num_ranks = 1
ptime.setup()
for tsm in ts_methods[1:2]:
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
if not '_rexi' in p.runtime.timestepping_method:
p.runtime.rexi_method = ''
else:
p.runtime.rexi_method = 'ci'
if len(tsm) > 4:
s = tsm[4]
p.runtime.load_from_dict(tsm[4])
for pspace_num_cores_per_rank, pspace_num_threads_per_rank, p.runtime.timestep_size in product(params_pspace_num_cores_per_rank, params_pspace_num_threads_per_rank, [params_timestep_sizes_explicit[0]]):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
p.setup_parallelization([pspace, ptime])
# Use 10 minutes per default to generate plans
p.parallelization.max_wallclock_seconds = 60*10
# Set simtime to 0
p.runtime.max_simulation_time = 0
# No output
p.runtime.output_timestep_size = -1
p.runtime.output_filename = "-"
jobdir = 'job_plan_'+p.getUniqueID()
p.gen_jobscript_directory(jobdir)
# Write compile script
p.write_compilecommands("./compile_platform_"+p.platforms.platform_id+".sh")
| StarcoderdataPython |
1650907 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ~/python/03_petle_01.py
print "Alfabet w porządku naturalnym:"
for i in range(65, 91):
litera = chr(i)
tmp = litera + " => " + litera.lower()
print tmp,
print "\nAlfabet w porządku odwróconym:"
for i in range(122, 96, -1):
litera = chr(i)
print litera.upper(), "=>", litera,
| StarcoderdataPython |
1746672 | """This module contains the general information for AdaptorRssProfile ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorRssProfileConsts:
RECEIVE_SIDE_SCALING_DISABLED = "disabled"
RECEIVE_SIDE_SCALING_ENABLED = "enabled"
class AdaptorRssProfile(ManagedObject):
"""This is AdaptorRssProfile class."""
consts = AdaptorRssProfileConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorRssProfile", "adaptorRssProfile", "rss", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["admin", "ls-config-policy", "ls-network", "ls-server-policy"], ['adaptorHostEthIf', 'adaptorHostEthIfProfile', 'adaptorUsnicConnDef', 'adaptorVmmqConnDef'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"receive_side_scaling": MoPropertyMeta("receive_side_scaling", "receiveSideScaling", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["disabled", "enabled"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"receiveSideScaling": "receive_side_scaling",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.receive_side_scaling = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "AdaptorRssProfile", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
3254768 | # coding: utf-8
from nose.tools import eq_
import acmd
def test_default_values():
s = acmd.Server('foobar')
assert s.name == 'foobar'
assert s.host == 'http://localhost:4502'
assert s.username == 'admin'
assert s.password == '<PASSWORD>'
def test_constructor():
s = acmd.Server('foobar', host='http://sb3.com:4711',
username='bjorn', password='<PASSWORD>')
assert s.name == 'foobar'
assert s.host == 'http://sb3.com:4711'
assert s.username == 'bjorn'
assert s.password == '<PASSWORD>'
eq_(('bjorn', 'foobar'), s.auth)
def test_url():
s = acmd.Server('foobar')
eq_('http://localhost:4502/local/path', s.url('/local/path'))
| StarcoderdataPython |
139126 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_asyncio_utils
----------------------------------
Tests for ``asyncio_utils`` module.
"""
import pytest
import collections
from asyncio_utils import *
pytestmark = pytest.mark.asyncio
async def test_aiter():
async def gen():
yield 1
async for v in aiter(gen): # oops, forgot to call gen()
assert v == 1
async for v in aiter(arange(1, 5)):
assert v in range(1, 5)
async for v in aiter(range(1, 5)):
assert v in range(1, 5)
async def test_arange():
myrange = await arange(1, 5)
assert isinstance(myrange, collections.AsyncIterator)
mylist = [n async for n in myrange]
assert mylist == [1, 2, 3, 4]
async def test_transform_factory_with_async__type():
async def type_fn(iterable):
return set(iterable)
myset = await transform_factory(arange(1, 5), _type=type_fn)
assert myset == {1, 2, 3, 4}
async def test_transform_factory_fails_if_type_not_callable():
with pytest.raises(TypeError):
await transform_factory(await arange(1, 5), _type=None)
async def test_alist():
mylist = await alist(arange(1, 5))
assert mylist == [1, 2, 3, 4]
async def test_atuple():
mytuple = await atuple(await arange(1, 5))
assert mytuple == (1, 2, 3, 4)
async def test_amap():
formatter = '${}'.format
expects = ['$1', '$2', '$3', '$4']
mymap = await alist(
amap(formatter, arange(1, 5))
)
assert mymap == expects
async def aformatter(val):
return f'${val}'
mymap2 = await alist(
amap(aformatter, await arange(1, 5))
)
assert mymap2 == expects
async def test_anext():
myrange = await arange(1, 5)
for n in range(1, 5):
val = await anext(myrange)
assert val == n
with pytest.raises(StopAsyncIteration):
await anext(myrange)
with pytest.raises(TypeError):
await anext(iter(range(1, 5)))
async def test_anext_with_default_arg():
myrange = await arange(1)
assert await anext(myrange) == 0
assert await anext(myrange, 3) == 3
async def test_anext_with_default_kwarg():
myrange = await arange(1)
assert await anext(myrange) == 0
assert await anext(myrange, default=3) == 3
async def test_aset():
myset = await aset(arange(1, 5))
assert myset == {1, 2, 3, 4}
async def test_adict():
async def k_v_gen():
async for n in await arange(1, 5):
yield (n, n * 2)
mydict = await adict(k_v_gen())
assert mydict == {1: 2, 2: 4, 3: 6, 4: 8}
async def test_make_async():
def always_one():
return 1
async_always_one = make_async(always_one)
assert await async_always_one() == always_one()
@make_async
def sync_a():
return 'a'
assert await sync_a() == 'a'
@make_async
class AlwaysOneClass(object):
def __init__(self):
self.value = 1
def __repr__(self):
return f'{self.__class__.__name__}(value={self.value})'
expects = 'AlwaysOneClass(value=1)'
assert repr(await AlwaysOneClass()) == expects
async def test_afilter():
myfilter = afilter(lambda x: x == 2, arange(1, 5))
assert await anext(myfilter) == 2
assert await anext(myfilter, None) is None
async def filter_func(val):
return val == 2
myfilter2 = afilter(filter_func, arange(1, 5))
assert await anext(myfilter2) == 2
with pytest.raises(StopAsyncIteration):
await anext(myfilter2)
| StarcoderdataPython |
3222574 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
def test_extension():
from .test_ext import get_the_answer
assert get_the_answer() == 42
def test_eager_resources():
from .test_ext import read_the_answer
assert read_the_answer() == 42
| StarcoderdataPython |
1755078 | class Stack(object):
def __init__(self):
self.stack = []
self.mx = []
self.n = 0
def push(self, x):
self.stack.append(x)
mx = self.mx[-1] if self.n > 0 else float('-inf')
self.mx.append(max(mx, x))
self.n += 1
def pop(self):
if self.n == 0:
return None
v = self.stack.pop(-1)
self.mx = self.mx[:-1]
self.n -= 1
return v
def max(self):
if self.n == 0:
return None
return self.mx[-1]
def test():
s = Stack()
assert s.max() is None
s.push(1)
assert 1 == s.max()
s.push(4)
assert 4 == s.max()
s.push(2)
assert 4 == s.max()
s.pop()
s.pop()
s.push(2)
assert 2 == s.max()
print 'pass'
if __name__ == '__main__':
test()
| StarcoderdataPython |
195864 | <reponame>openprocurement/openprocurement.auctions.dgf
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import opresource
from openprocurement.auctions.core.endpoints import ENDPOINTS
from openprocurement.auctions.dgf.views.other.item import AuctionItemResource
@opresource(
name='dgfFinancialAssets:Auction Items',
collection_path=ENDPOINTS['items'],
path=ENDPOINTS['item'],
auctionsprocurementMethodType="dgfFinancialAssets",
description="Auction items")
class AuctionItemResource(AuctionItemResource):
pass
| StarcoderdataPython |
72021 | <filename>bin/scientificLaws.py
from functools import reduce
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from bin.rational import RandomParameterization
from dreamcoder.domains.arithmetic.arithmeticPrimitives import (
f0,
f1,
fpi,
real_power,
real_subtraction,
real_addition,
real_division,
real_multiplication,
)
from dreamcoder.domains.list.listPrimitives import bootstrapTarget
from dreamcoder.dreamcoder import explorationCompression, commandlineArguments
from dreamcoder.grammar import Grammar
from dreamcoder.program import Program
from dreamcoder.recognition import RecurrentFeatureExtractor, DummyFeatureExtractor
from dreamcoder.task import DifferentiableTask, squaredErrorLoss
from dreamcoder.type import baseType, tlist, arrow
from dreamcoder.utilities import eprint, numberOfCPUs
tvector = baseType("vector")
treal = baseType("real")
tpositive = baseType("positive")
def makeTrainingData(
request,
law,
# Number of examples
N=10,
# Vector dimensionality
D=2,
# Maximum absolute value of a random number
S=20.0,
):
from random import random, randint
def sampleArgument(a, listLength):
if a.name == "real":
return random() * S * 2 - S
elif a.name == "positive":
return random() * S
elif a.name == "vector":
return [random() * S * 2 - S for _ in range(D)]
elif a.name == "list":
return [
sampleArgument(a.arguments[0], listLength) for _ in range(listLength)
]
else:
assert False, "unknown argument tp %s" % a
arguments = request.functionArguments()
e = []
for _ in range(N):
# Length of any requested lists
l = randint(1, 4)
xs = tuple(sampleArgument(a, l) for a in arguments)
y = law(*xs)
e.append((xs, y))
return e
def makeTask(
name,
request,
law,
# Number of examples
N=20,
# Vector dimensionality
D=3,
# Maximum absolute value of a random number
S=20.0,
):
print(name)
e = makeTrainingData(request, law, N=N, D=D, S=S)
print(e)
print()
def genericType(t):
if t.name == "real":
return treal
elif t.name == "positive":
return treal
elif t.name == "vector":
return tlist(treal)
elif t.name == "list":
return tlist(genericType(t.arguments[0]))
elif t.isArrow():
return arrow(genericType(t.arguments[0]), genericType(t.arguments[1]))
else:
assert False, "could not make type generic: %s" % t
return DifferentiableTask(
name,
genericType(request),
e,
BIC=10.0,
likelihoodThreshold=-0.001,
restarts=2,
steps=25,
maxParameters=1,
loss=squaredErrorLoss,
)
def norm(v):
return sum(x * x for x in v) ** 0.5
def unit(v):
return scaleVector(1.0 / norm(v), v)
def scaleVector(a, v):
return [a * x for x in v]
def innerProduct(a, b):
return sum(x * y for x, y in zip(a, b))
def crossProduct(a, b):
(a1, a2, a3) = tuple(a)
(b1, b2, b3) = tuple(b)
return [a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1]
def vectorAddition(u, v):
return [a + b for a, b in zip(u, v)]
def vectorSubtraction(u, v):
return [a - b for a, b in zip(u, v)]
class LearnedFeatureExtractor(RecurrentFeatureExtractor):
def tokenize(self, examples):
# Should convert both the inputs and the outputs to lists
def t(z):
if isinstance(z, list):
return ["STARTLIST"] + [y for x in z for y in t(x)] + ["ENDLIST"]
assert isinstance(z, (float, int))
return ["REAL"]
return [(tuple(map(t, xs)), t(y)) for xs, y in examples]
def __init__(self, tasks, examples, testingTasks=[], cuda=False):
lexicon = {
c
for t in tasks + testingTasks
for xs, y in self.tokenize(t.examples)
for c in reduce(lambda u, v: u + v, list(xs) + [y])
}
super(LearnedFeatureExtractor, self).__init__(
lexicon=list(lexicon), cuda=cuda, H=64, tasks=tasks, bidirectional=True
)
def featuresOfProgram(self, p, tp):
p = program.visit(RandomParameterization.single)
return super(LearnedFeatureExtractor, self).featuresOfProgram(p, tp)
if __name__ == "__main__":
pi = 3.14 # I think this is close enough to pi
# Data taken from:
# https://secure-media.collegeboard.org/digitalServices/pdf/ap/ap-physics-1-equations-table.pdf
# https://secure-media.collegeboard.org/digitalServices/pdf/ap/physics-c-tables-and-equations-list.pdf
# http://mcat.prep101.com/wp-content/uploads/ES_MCATPhysics.pdf
# some linear algebra taken from "parallel distributed processing"
tasks = [
# parallel distributed processing
makeTask(
"vector addition (2)", arrow(tvector, tvector, tvector), vectorAddition
),
makeTask(
"vector addition (many)",
arrow(tlist(tvector), tvector),
lambda vs: reduce(vectorAddition, vs),
),
makeTask(
"vector norm", arrow(tvector, treal), lambda v: innerProduct(v, v) ** 0.5
),
# mcat
makeTask(
"freefall velocity = (2gh)**.5",
arrow(tpositive, treal),
lambda h: (2 * 9.8 * h) ** 0.5,
),
makeTask(
"v^2 = v_0^2 + 2a(x-x0)",
arrow(treal, treal, treal, treal, treal),
lambda v0, a, x, x0: v0**2 + 2 * a * (x - x0),
),
makeTask(
"v = (vx**2 + vy**2)**0.5",
arrow(treal, treal, treal),
lambda vx, vy: (vx**2 + vy**2) ** 0.5,
),
makeTask(
"a_r = v**2/R", arrow(treal, tpositive, treal), lambda v, r: v * v / r
),
makeTask(
"e = mc^2", arrow(tpositive, tpositive, treal), lambda m, c: m * c * c
),
makeTask(
"COM (general scalar)",
arrow(tvector, tvector, treal),
lambda ms, xs: sum(m * x for m, x in zip(ms, xs)) / sum(ms),
),
makeTask(
"COM (2 vectors)",
arrow(tvector, tvector, tpositive, tpositive, tvector),
lambda x1, x2, m1, m2: scaleVector(
1.0 / (m1 + m2),
vectorAddition(scaleVector(m1, x1), scaleVector(m2, x2)),
),
),
makeTask(
"density = mass/volume", arrow(treal, treal, treal), lambda m, v: m / v
),
makeTask(
"pressure = force/area", arrow(treal, treal, treal), lambda m, v: m / v
),
makeTask("P = I^2R", arrow(treal, treal, treal), lambda i, r: i * i * r),
makeTask("P = V^2/R", arrow(treal, treal, treal), lambda v, r: v * v / r),
makeTask("V_{rms} = V/sqrt2", arrow(treal, treal), lambda v: v / (2.0**0.5)),
makeTask(
"U = 1/2CV^2", arrow(treal, treal, treal), lambda c, v: 0.5 * c * v * v
),
makeTask("U = 1/2QV", arrow(treal, treal, treal), lambda c, v: 0.5 * c * v),
makeTask(
"U = 1/2Q^2/C", arrow(treal, tpositive, treal), lambda q, c: 0.5 * q * q / c
),
makeTask("P = 1/f", arrow(tpositive, tpositive), lambda f: 1.0 / f),
makeTask("c = 1/2*r", arrow(treal, treal), lambda r: r / 2.0),
# AP physics
makeTask(
"Fnet = sum(F)",
arrow(tlist(tvector), tvector),
lambda vs: reduce(vectorAddition, vs),
),
makeTask(
"a = sum(F)/m",
arrow(tpositive, tlist(tvector), tvector),
lambda m, vs: scaleVector(1.0 / m, reduce(vectorAddition, vs)),
),
makeTask(
"work = F.d",
arrow(tvector, tvector, treal),
lambda f, d: innerProduct(f, d),
S=20.0,
),
makeTask(
"P = F.v",
arrow(tvector, tvector, treal),
lambda f, d: innerProduct(f, d),
S=20.0,
),
makeTask(
"F = qvxB (3d)",
arrow(treal, tvector, tvector, tvector),
lambda q, v, b: scaleVector(q, crossProduct(v, b)),
),
makeTask(
"F = qvxB (2d)",
arrow(treal, treal, treal, treal, treal, treal),
lambda q, a1, a2, b1, b2: q * (a1 * b2 - a2 * b1),
),
makeTask("tau = rxF (3d)", arrow(tvector, tvector, tvector), crossProduct),
makeTask(
"tau = rxF (2d)",
arrow(treal, treal, treal, treal, treal),
lambda a1, a2, b1, b2: a1 * b2 - a2 * b1,
),
makeTask(
"v(t)", arrow(treal, treal, treal, treal), lambda v0, a, t: v0 + a * t
),
makeTask(
"x(t)",
arrow(treal, treal, treal, treal, treal),
lambda x0, v0, a, t: x0 + v0 * t + 0.5 * a * t * t,
),
makeTask(
"p=mv",
arrow(tpositive, tvector, tvector),
lambda m, v: [m * _v for _v in v],
),
makeTask(
"dp=Fdt", arrow(treal, tvector, tvector), lambda m, v: [m * _v for _v in v]
),
makeTask(
"K=1/2mv^2",
arrow(tpositive, tvector, tpositive),
lambda m, v: 0.5 * m * norm(v) ** 2,
),
makeTask(
"K=1/2Iw^2",
arrow(tpositive, tpositive, tpositive),
lambda m, v: 0.5 * m * v**2,
),
makeTask(
"E=pJ", arrow(treal, tvector, tvector), lambda p, j: [p * _j for _j in j]
),
makeTask(
"Fs=kx", arrow(treal, tvector, tvector), lambda k, x: [k * _x for _x in x]
),
makeTask("P=dE/dt", arrow(treal, treal, treal), lambda de, dt: de / dt),
makeTask(
"theta(t)",
arrow(treal, treal, treal, treal, treal),
lambda x0, v0, a, t: x0 + v0 * t + 0.5 * a * t * t,
),
makeTask(
"omega(t)", arrow(treal, treal, treal, treal), lambda v0, a, t: v0 + a * t
),
makeTask("T=2pi/w", arrow(tpositive, tpositive), lambda w: 2 * pi / w),
makeTask(
"Ts=2pi(m/k)^1/2",
arrow(tpositive, tpositive, tpositive),
lambda m, k: 2 * pi * (m / k) ** 0.5,
),
makeTask(
"Tp=2pi(l/g)^1/2",
arrow(tpositive, tpositive, tpositive),
lambda m, k: 2 * pi * (m / k) ** 0.5,
),
# makeTask("Newtonian gravitation (2 vectors)",
# arrow(tpositive, tpositive, tvector, tvector, tvector),
# lambda m1, m2, r1, r2: scaleVector(m1 * m2 / (norm(vectorSubtraction(r1, r2)) ** 2),
# unit(vectorSubtraction(r1, r2)))),
makeTask(
"Coulomb's law (2 vectors)",
arrow(tpositive, tpositive, tvector, tvector, tvector),
lambda m1, m2, r1, r2: scaleVector(
m1 * m2 / (norm(vectorSubtraction(r1, r2)) ** 2),
unit(vectorSubtraction(r1, r2)),
),
),
makeTask(
"Newtonian gravitation (vector)",
arrow(tpositive, tpositive, tvector, tvector),
lambda m1, m2, r: scaleVector(m1 * m2 / (norm(r) ** 2), unit(r)),
),
makeTask(
"Coulomb's law (vector)",
arrow(tpositive, tpositive, tvector, tvector),
lambda m1, m2, r: scaleVector(m1 * m2 / (norm(r) ** 2), unit(r)),
),
makeTask(
"Newtonian gravitation (scalar)",
arrow(tpositive, tpositive, tvector, treal),
lambda m1, m2, r: m1 * m2 / (norm(r) ** 2),
),
makeTask(
"Coulomb's law (scalar)",
arrow(tpositive, tpositive, tvector, treal),
lambda m1, m2, r: m1 * m2 / (norm(r) ** 2),
),
makeTask(
"Hook's law",
arrow(treal, tpositive, tpositive),
lambda k, x: -k * x * x,
N=20,
S=20,
),
makeTask(
"Hook's law (2 vectors)",
arrow(treal, tvector, tvector, tpositive),
lambda k, u, v: k * norm(vectorSubtraction(u, v)),
N=20,
S=20,
),
makeTask(
"Ohm's law",
arrow(tpositive, tpositive, tpositive),
lambda r, i: r * i,
N=20,
S=20,
),
makeTask(
"power/current/voltage relation",
arrow(tpositive, tpositive, tpositive),
lambda i, v: v * i,
N=20,
S=20,
),
makeTask(
"gravitational potential energy",
arrow(tpositive, treal, treal),
lambda m, h: 9.8 * m * h,
N=20,
S=20,
),
makeTask(
"time/frequency relation",
arrow(tpositive, tpositive),
lambda t: 1.0 / t,
N=20,
S=2.0,
),
makeTask(
"Plank relation",
arrow(tpositive, tpositive),
lambda p: 1.0 / p,
N=20,
S=2.0,
),
makeTask(
"capacitance from charge and voltage",
arrow(tpositive, tpositive, tpositive),
lambda v, q: v / q,
N=20,
S=20,
),
makeTask(
"series resistors",
arrow(tlist(tpositive), tpositive),
lambda cs: sum(cs),
N=20,
S=20,
),
# makeTask("parallel resistors",
# arrow(tlist(tpositive), tpositive),
# lambda cs: sum(c**(-1) for c in cs)**(-1),
# N=20,
# S=20),
makeTask(
"parallel capacitors",
arrow(tlist(tpositive), tpositive),
lambda cs: sum(cs),
N=20,
S=20,
),
makeTask(
"series capacitors",
arrow(tlist(tpositive), tpositive),
lambda cs: sum(c ** (-1) for c in cs) ** (-1),
N=20,
S=20,
),
makeTask("A = pir^2", arrow(tpositive, tpositive), lambda r: pi * r * r),
makeTask(
"c^2 = a^2 + b^2",
arrow(tpositive, tpositive, tpositive),
lambda a, b: a * a + b * b,
),
]
bootstrapTarget()
equationPrimitives = [
# real,
f0,
f1,
fpi,
real_power,
real_subtraction,
real_addition,
real_division,
real_multiplication,
] + [
Program.parse(n) for n in ["map", "fold", "empty", "cons", "car", "cdr", "zip"]
]
baseGrammar = Grammar.uniform(equationPrimitives)
eprint("Got %d equation discovery tasks..." % len(tasks))
explorationCompression(
baseGrammar,
tasks,
outputPrefix="experimentOutputs/scientificLaws",
evaluationTimeout=0.1,
testingTasks=[],
**commandlineArguments(
compressor="ocaml",
featureExtractor=DummyFeatureExtractor,
iterations=10,
CPUs=numberOfCPUs(),
structurePenalty=0.5,
helmholtzRatio=0.5,
a=3,
maximumFrontier=10000,
topK=2,
pseudoCounts=10.0,
)
)
| StarcoderdataPython |
4809538 | import unittest
from meraki_cli.__main__ import _translate_input
INPUT = [
{
'changeme': '100',
'leavemealone': '101'
},
{
'changeme': '200',
'leavemealone': '201'
},
]
OUTPUT = [
{
'changed': '100',
'leavemealone': '101'
},
{
'changed': '200',
'leavemealone': '201'
},
]
BAD_TRANSLATION = 'changed-changeme' # Uses a minus 'accidently'
class TestTranslations(unittest.TestCase):
def testTranslation(self):
assert _translate_input(INPUT, ['changed=changeme']) == OUTPUT
def testTranslationBadFormatLogThenExit(self):
with self.assertLogs(level='CRITICAL'):
with self.assertRaises(SystemExit):
_translate_input(INPUT, BAD_TRANSLATION) == OUTPUT
| StarcoderdataPython |
3203147 | <reponame>tayyipcanbay/solidity-uzerine-denemeler
from brownie import FundMe, MockV3Aggregator, network, config
from scripts.helpful_scripts import (
deploy_mocks,
get_account,
deploy_mocks,
LOCAL_BLOCKCHAIN_ENVIROMENTS,
)
from web3 import Web3
def deploy_fund_me():
account = get_account()
# pass the price feed address to our fundme contract
# if we are on the persistent network like rinkeby, use the assosciated address
# otherwise, deploy mocks
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIROMENTS:
price_feed_address = config["networks"][network.show_active()][
"eth_usd_price_feed"
]
else:
deploy_mocks()
price_feed_address = MockV3Aggregator[-1].address
fund_me = FundMe.deploy(
"0x8A753747A1Fa494EC906cE90E9f37563A8AF630e",
{"from": account},
publish_source=config["networks"][network.show_active()].get("verify"),
)
print(f"Contract deployed to {fund_me.address}")
def main():
deploy_fund_me()
| StarcoderdataPython |
72870 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 16:32:18 2017
@author: <NAME> <EMAIL>
"""
import json
#import cPickle as pickle
import twit_token
import unicodedata as uniD
import os
import nltk
import re
from pymongo import MongoClient
#MongoDB credentials and collections
#DBname = 'test-tree'
#DBname = 'test_recurse'
DBname = 'Alta_Real_New'
DBhost = 'localhost'
DBport = 27017
# initiate Mongo Client
client = MongoClient()
client = MongoClient(DBhost, DBport)
db = client[DBname]
# this code needs to change for Python 3 to use the function
reply_db = db.replies_to_trump
root_db = db.trump_tweets
DB_train = client['semeval2017']
# this code needs to change for Python 3 to use the function
reply_train_db = DB_train.replies_to_trump
root_train_db = DB_train.trump_tweets
#top_path = ferg_1_path
zub_id_text_dic = {}
twit_id_text_dic = {}
zub_text_list = []
twit_text_list = []
id_list = []
train_dir = "Data/semeval2017-task8-dataset"
train_dic_dir = "traindev"
train_data_dir ="rumoureval-data"
top_path = "/".join([train_dir,train_data_dir])
#top_path = ferg_1_path
source_id = ""
reply_id = ""
#get tweets from files
if not DB_train.edge_list.find_one():
walk = os.walk(top_path)
for current_dir in walk:
last_dir = current_dir[0].split("\\")[-1]
if last_dir == "source-tweet" or last_dir == "replies":
for json_path in current_dir[-1]:
with open(current_dir[0]+"\\"+json_path,"r")as jsonfile:
filedic = json.load(jsonfile)
text = filedic["text"].lower()
zub_text = " ".join(nltk.word_tokenize(re.sub(r'([^\s\w]|_)+', '',text)))
zub_id_text_dic[filedic["id"]] = zub_text
zub_text_list.append(zub_text)
text_in = text.replace("\n","N3WL1N3")#+'\r\n'
twit_text = " ".join(twit_token.ize(text_in))
twit_id_text_dic[filedic["id"]] = twit_text
twit_text_list.append(twit_text)
id_list.append(filedic["id"])
# Get tweets from DB
for tweet in list(root_db.find())+list(reply_db.find())+list(reply_train_db.find())+list(root_train_db.find()):
if tweet.get("full_text", None):
text = tweet["full_text"].lower()
else:
text = tweet["text"].lower()
zub_text = " ".join(nltk.word_tokenize(re.sub(r'([^\s\w]|_)+', '',text)))
zub_id_text_dic[tweet["id"]] = zub_text
zub_text_list.append(zub_text)
text_in = text.replace("\n","N3WL1N3")#+'\r\n'
twit_text = " ".join(twit_token.ize(text_in))
twit_id_text_dic[tweet["id"]] = twit_text
twit_text_list.append(twit_text)
id_list.append(tweet["id"])
# I save all the containers I use to create teh doc2vec training file
# I do this to make sure that debugging doc2vec will be easy and
# I'll have all the data I need to ask any question I want to
for z_tweet,t_tweet in zip(zub_text_list,twit_text_list[:10]):
print(z_tweet)
print(t_tweet,'\n')
doc2vec_dir ="Data/doc2vec/trump_plus"
with open(doc2vec_dir+"twit_id_text_dic.json","w") as picfile:
json.dump(twit_id_text_dic,picfile)
with open(doc2vec_dir+"twit_text_list.json","w") as picfile:
json.dump(twit_text_list,picfile)
with open(doc2vec_dir+"twit_doc2vec_train_corpus.txt","wb")as corpusfile:
corpusfile.writelines([txt.encode("utf8")+"\r\n".encode("utf8") for txt in twit_text_list])
with open(doc2vec_dir+"zub_id_text_dic.json","w") as picfile:
json.dump(zub_id_text_dic,picfile)
with open(doc2vec_dir+"zub_text_list.json","w") as picfile:
json.dump(zub_text_list,picfile)
with open(doc2vec_dir+"zub_doc2vec_train_corpus.txt","wb")as corpusfile:
corpusfile.writelines([txt.encode("utf8")+"\r\n".encode("utf8") for txt in zub_text_list])
with open(doc2vec_dir+"id_list.json","w") as picfile:
json.dump(id_list,picfile)
| StarcoderdataPython |
126714 | <gh_stars>0
def minion_game(string):
# your code goes here
Kevin = 0
Stuart = 0
word = list(string)
x = len(word)
vowels = ['A','E','I','O','U']
for inx, w in enumerate(word):
if w in vowels:
Kevin = Kevin + x
else:
Stuart = Stuart + x
x = x - 1
if Stuart > Kevin:
print ('Stuart', Stuart)
elif Kevin > Stuart:
print ('Kevin', Kevin)
else:
print ('Draw')
| StarcoderdataPython |
21596 | <filename>todoapi/apps.py<gh_stars>10-100
from django.apps import AppConfig
class TodoapiConfig(AppConfig):
name = 'todoapi'
| StarcoderdataPython |
3272263 | import torch
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217
def cov(x, rowvar=False):
if x.dim() > 2:
raise ValueError('x has more than 2 dimensions')
if x.dim() < 2:
x = x.view(1, -1)
if not rowvar and x.size(0) != 1:
x = x.t()
x_ctr = x - torch.mean(x, dim=1, keepdim=True)
return x_ctr.matmul(x_ctr.t()).squeeze() / (x.size(1) - 1)
| StarcoderdataPython |
1603701 | #!/usr/bin/env python3
#
# Copyright (c) 2019-2021 Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import argparse
import os
import subprocess
import sys
import logging
import tempfile
import yaml
def case_infra_error(case):
try:
if case["metadata"]["error_type"] == "Infrastructure":
logging.error("case %s: infra error is type Infrastructure", case["id"])
return False
elif "timed out" in case["metadata"]["error_msg"]:
logging.error(
"case %s: infra error: %s", case["id"], case["metadata"]["error_msg"]
)
return False
else:
return True
except KeyError:
return True
def not_infra_error(path):
"""Returns a boolean indicating if there was not an infra error"""
try:
with open(path) as file:
results = yaml.safe_load(file)
return all(case_infra_error(tc) for tc in results)
except FileNotFoundError:
logging.warning("Could not open results file %s", path)
return True
def run_one_job(cmd):
"""Run a job and return a boolean indicating if there was not an infra error.
Raises a `subprocess.CalledProcessError` when the called script fails.
"""
subprocess.run(cmd, check=True)
return not_infra_error("job_results.yaml")
def retry_job(cmd, retries):
"""Run a job until there was not an infra error or retries are exhausted.
Raises a `subprocess.CalledProcessError` when the called script fails.
"""
logging.debug("trying job %s up to %d times", str(cmd), retries)
return any(run_one_job(cmd) for _ in range(retries))
if __name__ == "__main__":
# To deploy and boot the artefacts on a board in LAVA a platform specific
# yaml file should be dispatched to LAVA. The below logic will identify
# the name of the yaml file at run time for the platform defined in run_cfg.
platform_list = ['n1sdp', 'juno']
run_cfg = os.environ["RUN_CONFIG"]
res = [i for i in platform_list if i in run_cfg]
if res:
platform_yaml=''.join(res)+'.yaml'
else:
logging.critical("Exiting: Platform not found for LAVA in run-config %s", os.environ["RUN_CONFIG"])
sys.exit(-1)
parser = argparse.ArgumentParser(
description="Lava job runner with infrastructure error dectection and retry."
)
parser.add_argument(
"script",
nargs="?",
default=os.path.join(os.path.dirname(__file__), "run_lava_job.sh"),
help="bash job script to run a lava job",
)
parser.add_argument(
"job",
nargs="?",
default=os.path.join("artefacts", os.environ["BIN_MODE"], platform_yaml),
help="the Lava job description file",
)
parser.add_argument(
"retries",
type=int,
nargs="?",
default=3,
help="Number of retries. defaluts to 3",
)
parser.add_argument(
"--save",
default=tempfile.mkdtemp(prefix="job-output"),
help="directory to store the job_output.log",
)
parser.add_argument(
"--username",
required=True,
help="the user name for lava server",
)
parser.add_argument(
"--token",
required=True,
help="the token for lava server",
)
parser.add_argument(
"-v", action="count", default=0, help="Increase printing of debug ouptut"
)
args = parser.parse_args()
if args.v >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.v >= 1:
logging.getLogger().setLevel(logging.INFO)
logging.debug(args)
try:
if not retry_job([args.script, args.job, args.save, args.username, args.token],\
args.retries):
logging.critical("All jobs failed with infra errors; retries exhausted")
sys.exit(-1)
else:
sys.exit(0)
except subprocess.CalledProcessError as e:
logging.critical("Job script returned error code %d", e.returncode)
sys.exit(e.returncode)
| StarcoderdataPython |
1604124 | import torch
from . import factories
def test_base_agent_initialize():
agent = factories.PSOAgentFactory.create()
swarm = factories.SwarmFactory.create()
data = torch.tensor([
[3.0, 3.0, 3.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
])
agent.initialize(data=data, swarm=swarm)
if not agent.swarm == swarm:
raise AssertionError()
if not len(agent.centroids) == agent.n_clusters:
raise AssertionError()
def test_base_agent_memorize():
agent = factories.PSOAgentFactory.create()
tensor = torch.tensor([3.0, 3.0, 3.0])
agent.memorize(score=1, position=tensor)
if not torch.all(torch.eq(tensor, agent.memory.memory[0][1])) == True:
raise AssertionError()
def test_base_agent_topk():
agent = factories.PSOAgentFactory.create()
tensor = torch.tensor([3.0, 3.0, 3.0])
agent.memorize(score=1, position=tensor)
topk = agent.topk(k=1)[0]
if not torch.all(torch.eq(tensor, topk)) == True:
raise AssertionError()
| StarcoderdataPython |
170845 | <filename>commerce/commerce/auctions/migrations/0010_remove_auction_seller.py
# Generated by Django 3.1.7 on 2021-03-30 13:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0009_auto_20210330_1826'),
]
operations = [
migrations.RemoveField(
model_name='auction',
name='Seller',
),
]
| StarcoderdataPython |
3259978 | <filename>oldathena_read.py
"""
Read Athena4 output data files.
"""
# Python modules
import numpy as np
#=======================================================================================
def vtk(filename):
"""Read .vtk files and return dict of arrays of data."""
# Python module
import struct
# Read raw data
with open(filename, 'r') as data_file:
raw_data = data_file.read()
# Skip header
current_index = 0
current_char = raw_data[current_index]
# while current_char == '#':
# while current_char != '\n':
# current_index += 1
# current_char = raw_data[current_index]
# current_index += 1
# current_char = raw_data[current_index]
# Skip the first line
while current_char != '\n':
current_index += 1
current_char = raw_data[current_index]
#print current_char
# Extract time info from the second line after time=...
while current_char != '=':
current_index += 1
current_char = raw_data[current_index]
#print current_char
stime = ""
while current_char != ' ':
current_index += 1
current_char = raw_data[current_index]
stime += current_char
current_index += 1
current_char = raw_data[current_index]
time = float(stime)
#print 'time = ',time
while current_char != '\n':
current_index += 1
current_char = raw_data[current_index]
current_index += 1
# Function for skipping though the file
def skip_string(expected_string):
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] != expected_string:
raise AthenaError('File not formatted as expected')
return current_index+expected_string_len
# Read metadata
current_index = skip_string('BINARY\nDATASET RECTILINEAR_GRID\nDIMENSIONS ')
end_of_line_index = current_index + 1
while raw_data[end_of_line_index] != '\n':
end_of_line_index += 1
face_dimensions = map(int, raw_data[current_index:end_of_line_index].split(' '))
current_index = end_of_line_index + 1
# Function for reading interface locations
def read_faces(letter, num_faces):
identifier_string = '{0}_COORDINATES {1} float\n'.format(letter,num_faces)
begin_index = skip_string(identifier_string)
format_string = '>' + 'f'*num_faces
end_index = begin_index + 4*num_faces
vals = np.array(struct.unpack(format_string, raw_data[begin_index:end_index]))
return vals,end_index+1
# Read interface locations
x_faces,current_index = read_faces('X', face_dimensions[0])
y_faces,current_index = read_faces('Y', face_dimensions[1])
z_faces,current_index = read_faces('Z', face_dimensions[2])
# Prepare to read quantities defined on grid
cell_dimensions = np.array([max(dim-1,1)
for dim in face_dimensions])
num_cells = cell_dimensions.prod()
current_index = skip_string('CELL_DATA {0}\n'.format(num_cells))
if raw_data[current_index:current_index+1] == '\n':
current_index = skip_string('\n') # extra newline inserted by join script
data = {}
# Function for reading scalar data
def read_cell_scalars():
begin_index = skip_string('SCALARS ')
end_of_word_index = begin_index + 1
while raw_data[end_of_word_index] != ' ':
end_of_word_index += 1
array_name = raw_data[begin_index:end_of_word_index]
string_to_skip = 'SCALARS {0} float\nLOOKUP_TABLE default\n'.format(array_name)
begin_index = skip_string(string_to_skip)
format_string = '>' + 'f'*num_cells
end_index = begin_index + 4*num_cells
data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index])
dimensions = tuple(cell_dimensions[::-1])
data[array_name] = np.array(data[array_name]).reshape(dimensions)
return end_index+1
# Function for reading vector data
def read_cell_vectors():
begin_index = skip_string('VECTORS ')
end_of_word_index = begin_index + 1
while raw_data[end_of_word_index] != '\n':
end_of_word_index += 1
array_name = raw_data[begin_index:end_of_word_index]
string_to_skip = 'VECTORS {0}\n'.format(array_name)
array_name = array_name[:-6] # remove ' float'
begin_index = skip_string(string_to_skip)
format_string = '>' + 'f'*num_cells*3
end_index = begin_index + 4*num_cells*3
data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index])
dimensions = tuple(np.append(cell_dimensions[::-1],3))
data[array_name] = np.array(data[array_name]).reshape(dimensions)
return end_index+1
# Read quantities defined on grid
while current_index < len(raw_data):
expected_string = 'SCALARS'
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] == expected_string:
current_index = read_cell_scalars()
continue
expected_string = 'VECTORS'
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] == expected_string:
current_index = read_cell_vectors()
continue
raise AthenaError('File not formatted as expected')
return time,x_faces,y_faces,z_faces,data
#=======================================================================================
def athdf(filename, data=None, quantities=None):
"""Read .athdf files and populate dict of arrays of data."""
# Python module for reading hdf5 files
import h5py
# Open file
with h5py.File(filename, 'r') as f:
# Create list of all quantities if none given
if data is not None:
quantities = data.values()
elif quantities is None:
quantities = f[u'MeshBlock0'].keys()
quantities = [q for q in quantities \
if q != u'x1f' and q != u'x2f' and q != u'x3f']
# Get block count, dimensions, and sizes
num_blocks = len(f.keys())
dims = 0
block_size = []
coords = [u'x1f',u'x2f',u'x3f']
for key in coords:
if key in f[u'MeshBlock0'].keys():
dims += 1
block_size.append(len(f[u'MeshBlock0'][key][:]) - 1)
coords = coords[:dims]
# Order blocks
edges = np.empty((num_blocks,dims))
for block_num,block_name in zip(range(num_blocks),f.keys()):
for dim,coord in zip(range(dims),coords):
edges[block_num,dim] = f[block_name][coord][0]
edges_unique = []
for dim in range(dims):
edges_unique.append(set(edges[:,dim]))
indices = np.empty((num_blocks,3,2), dtype=int)
for block_num in range(num_blocks):
for dim in range(dims):
num_prior = sum(edge < edges[block_num,dim] for edge in edges_unique[dim])
indices[block_num,dim,0] = num_prior * block_size[dim]
indices[block_num,dim,1] = (num_prior+1) * block_size[dim]
for dim in range(dims,3):
indices[block_num,dim,0] = 0
indices[block_num,dim,1] = 1
# Prepare arrays if needed
nx1 = block_size[0] * len(edges_unique[0])
nx2 = block_size[1] * len(edges_unique[1]) if dims >= 2 else 1
nx3 = block_size[2] * len(edges_unique[2]) if dims >= 3 else 1
if data is None:
data = {}
for q in quantities:
data[q] = np.empty((nx3,nx2,nx1))
data[u'x1f'] = np.empty(nx1+1)
if dims >= 2:
data[u'x2f'] = np.empty(nx2+1)
if dims >= 3:
data[u'x3f'] = np.empty(nx3+1)
# Read interface data
for n,block_name in zip(range(num_blocks),f.keys()):
for dim,coord in zip(range(dims),coords):
need_interfaces = True
for dim_other in range(dims):
if dim_other == dim:
continue
if indices[n,dim_other,0] != 0:
need_interfaces = False
if not need_interfaces:
continue
data[coord][indices[n,dim,0]:indices[n,dim,1]] = f[block_name][coord][:-1]
if indices[n,dim,1] == block_size[dim] * len(edges_unique[dim]):
data[coord][indices[n,dim,1]] = f[block_name][coord][-1]
# Read value data
for n,block_name in zip(range(num_blocks),f.keys()):
kl = indices[n,2,0]
ku = indices[n,2,1]
jl = indices[n,1,0]
ju = indices[n,1,1]
il = indices[n,0,0]
iu = indices[n,0,1]
for q in quantities:
data[q][kl:ku,jl:ju,il:iu] = f[block_name][q][:]
return data
#=======================================================================================
class AthenaError(RuntimeError):
"""General exception class for Athena++ read functions."""
pass
| StarcoderdataPython |
1744416 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='frasco-bootstrap',
version='0.1.3',
url='http://github.com/frascoweb/frasco-bootstrap',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description="Bootstrap (frontend framework) integration for Frasco",
packages=find_packages(),
package_data={
'frasco_bootstrap': [
'macros/*.html',
'templates/*.html',
'templates/users/*.html']
},
zip_safe=False,
platforms='any',
install_requires=[
'frasco',
'frasco-assets'
]
)
| StarcoderdataPython |
1799027 | <filename>tests/test_boxes.py
from unittest import TestCase
from omnicanvas.graphics import ShapeGraphic, BoxGraphic
class BoxGraphicCreationTests(TestCase):
def test_can_create_box_graphic(self):
box = BoxGraphic(10, 20, 100, 200)
self.assertIsInstance(box, ShapeGraphic)
self.assertEqual(box._x, 10)
self.assertEqual(box._y, 20)
self.assertEqual(box._width, 100)
self.assertEqual(box._height, 200)
self.assertEqual(box._fill_color, "#FFFFFF")
self.assertEqual(box._opacity, 1)
self.assertEqual(box._line_width, 1)
self.assertEqual(box._line_style, "-")
self.assertEqual(box._line_color, "#000000")
self.assertEqual(box._rotation, (0, 0, 0))
self.assertEqual(box._data, {})
def test_box_location_must_be_numeric(self):
with self.assertRaises(TypeError):
BoxGraphic("10", 20, 100, 200)
with self.assertRaises(TypeError):
BoxGraphic(10, "20", 100, 200)
BoxGraphic(10.5, 20, 100, 200)
BoxGraphic(10, 20.5, 100, 200)
def test_box_dimensions_must_be_numeric(self):
with self.assertRaises(TypeError):
BoxGraphic(10, 20, "100", 200)
with self.assertRaises(TypeError):
BoxGraphic(10, 20, 100, "200")
BoxGraphic(10, 20, 100.5, 200)
BoxGraphic(10, 20, 100, 200.5)
class BoxGraphicPropertyTests(TestCase):
def test_basic_properties(self):
box = BoxGraphic(10, 20, 100, 200)
self.assertIs(box.x(), box._x)
self.assertIs(box.y(), box._y)
self.assertIs(box.width(), box._width)
self.assertIs(box.height(), box._height)
def test_can_set_location(self):
box = BoxGraphic(10, 20, 100, 200)
box.x(200)
self.assertEqual(box.x(), 200)
box.y(-10)
self.assertEqual(box.y(), -10)
def test_set_box_location_must_be_numeric(self):
box = BoxGraphic(10, 20, 100, 200)
with self.assertRaises(TypeError):
box.x("10")
with self.assertRaises(TypeError):
box.y("20")
box.x(10.5)
box.y(10.5)
def test_can_set_box_size(self):
box = BoxGraphic(10, 20, 100, 200)
box.width(200)
self.assertEqual(box.width(), 200)
box.height(-10)
self.assertEqual(box.height(), -10)
def test_set_box_size_must_be_numeric(self):
box = BoxGraphic(10, 20, 100, 200)
with self.assertRaises(TypeError):
box.width("10")
with self.assertRaises(TypeError):
box.height("20")
box.width(10.5)
box.height(10.5)
def test_box_center(self):
box = BoxGraphic(10, 20, 100, 200)
self.assertEqual(box.center(), (60, 120))
| StarcoderdataPython |
1735078 | <filename>schimpy/cencoos_download.py<gh_stars>1-10
from netCDF4 import *
import pyproj
import ogr, osr
import numpy as np
import time
import datetime as dtm
def time_block_report(message,told):
""" This routine is for reporting incremental timing of parts of the script"""
tnew=time.time()
diff = tnew - told
print("%s : %s" % (message,diff))
return tnew
# For converting the Mercator projection of the data to lat/lon
# Some of the mercator parameters turned out to be a bit
# inconsistent on the CENCOOS side.
p=pyproj.Proj(r'+proj=merc +lat_ts=30.0 +lon_0=-128.735000610 +ellps=sphere')
cencoos_url = "http://thredds.axiomalaska.com/thredds/dodsC/COAMPS_4KM_10M_WIND.nc"
# http://thredds.cencoos.org/thredds/dodsC/COAMPS_4KM_10M_WIND.nc"
def merc_to_latlon(x,y):
return p(x,y,inverse=True)
def cencoos_schism_opendap(lat_lo,lon_lo,lat_hi,lon_hi,
file_base,to_cache,from_cache):
""" Download cencoos opendap data for all time based on a bounding set of lat/lon
file_base : str prefix for files
to_cache : bool whether to stash data in numpy arrays to make it easier next time
from_cache : use cache rather than download)
The strategy for this download may change with time. When the script was originally
written, blocking the query in time was very inefficient
"""
import time
t = time.time()
cencoos_wind_url = "http://thredds.cencoos.org/thredds/dodsC/COAMPS_4KM_10M_WIND.nc"
cencoos_pmsl_url = "http://thredds.cencoos.org/thredds/dodsC/COAMPS_4KM_PRES_MSL.nc"
if from_cache:
""" Fetch dimensions that have been stored"""
x_merc = np.loadtxt("x_merc.txt")
y_merc = np.loadtxt("y_merc.txt")
times = np.loadtxt("times.txt")
else:
data_wind = Dataset(cencoos_wind_url)
t=time_block_report("Wind opened at",t)
x_merc = data_wind.variables['x'][:]*1000.
y_merc = data_wind.variables['y'][:]*1000.
timevar=data_wind.variables['time']
times = timevar[:]
tunits = timevar.units
alldatetimes = num2date(times,tunits)
print("File time units: %s" % tunits)
appdt = times[1] - times[0]
print("Apparent dt in units: %s" % appdt)
print("First datetime: %s" % alldatetimes[0])
print("Last datetime: %s" % alldatetimes[-1])
t=time_block_report("Full dimensions loaded at",t)
data_pmsl= Dataset(cencoos_pmsl_url)
t=time_block_report("Pressure opened at",t)
# Report any output gaps
last_time = 0.0
for tt in times:
if tt != last_time +1:
dtt = dtm.datetime(1970,1,1) + dtm.timedelta(hours=tt)
ldt = dtm.datetime(1970,1,1) + dtm.timedelta(hours=last_time)
dttxt = dtt.strftime("%Y-%m-%d %H:%M")
ldtxt = ldt.strftime("%Y-%m-%d %H:%M")
print("Non-consecutive time %s %s" % (dttxt,ldtxt))
last_time = tt
dset_last = dtm.datetime(1970,1,1) + dtm.timedelta(hours=tt)
print("Last time in cencoos dataset: %s" % dset_last)
# Filter based on bounding box in lat/lon
lon = np.array([merc_to_latlon(xx,y_merc[0])[0] for xx in x_merc])
lat = np.array([merc_to_latlon(x_merc[0],yy)[1] for yy in y_merc])
(latndx,) = np.logical_and(lat >= lat_lo, lat <= lat_hi).nonzero()
(lonndx,) = np.logical_and(lon >= lon_lo, lon <= lon_hi).nonzero()
lat_dest = lat[latndx]
lon_dest = lon[lonndx]
np.savetxt("latitude.csv",lat_dest,fmt="%.5f")
np.savetxt("longitude.csv",lon_dest,fmt="%.5f")
print("# lat: %s" % len(lat_dest))
print("# lon: %s" % len(lon_dest))
meshcoord = np.meshgrid(lon_dest,lat_dest)
meshpoints = np.array(meshcoord).T.reshape(-1,2)
np.savetxt("meshpoints.csv",meshpoints,delimiter = ",",fmt="%.5f")
latstart = latndx.min()
lonstart = lonndx.min()
latstop = latndx.max() + 1
lonstop = lonndx.max() + 1
# Now load the major datasets either from cache or query
if from_cache:
uwind=np.load("uwind.npy")
vwind=np.load("vwind.npy")
pres =np.load("pmsl.npy")
else:
subset = "u_component_wind_true_direction_all_geometries"
uwind = data_wind.variables[subset][:,latstart:latstop,lonstart:lonstop]
t=time_block_report("Fetched uwind %s", t)
subset = "v_component_wind_true_direction_all_geometries"
vwind = data_wind.variables[subset][:,latstart:latstop,lonstart:lonstop]
t=time_block_report("Fetched vwind %s", t)
data_wind.close()
subset = "pressure_reduce_to_MSL"
pres = data_pmsl.variables[subset][:,latstart:latstop,lonstart:lonstop]
t=time_block_report("Fetched pressure %s", t)
data_pmsl.close()
# Save to cache if requested
if to_cache:
np.savetxt("times.txt",times)
np.savetxt("x_merc.txt",x_merc)
np.savetxt("y_merc.txt",y_merc)
if to_cache:
np.save("uwind",uwind)
np.save("vwind",vwind)
np.save("pmsl",pres)
nfile = len(times) // 24
hrs = times.astype(int)
dayint = hrs // 24
minday = dayint.min()
maxday = dayint.max()
dayrange = np.arange(minday,maxday+1)
# This remaining loop processes the big (in time) download and stores it in daily
# blocks.
for d,dy in enumerate(dayrange):
print("Block: %s" % d)
timendx, = np.where(dayint == dy)
if len(timendx) == 24:
lastgood = timendx
assert np.all(times[timendx] == np.arange(dy*24,(dy+1)*24))
elif len(timendx) == 0:
print("empty index on block %s" % d)
timendx = lastgood
desired = np.arange(dy*24,(dy+1)*24)
else:
print("incomplete index on block %s" % d)
desired = np.arange(dy*24,(dy+1)*24)
timendx = np.searchsorted(times,desired,side="left")
time_subset = times[timendx]
# times are in hours since 1970-01-01
base = dtm.datetime(1970,1,1) + dtm.timedelta(hours=dy*24)
print("Base %s" % base.strftime("%Y-%m-%d %H:%M"))
time_days = (time_subset - time_subset[0])/24.
base_date_str = base.strftime("%Y-%m-%d")
dest_time_unit = "days since %s" % base_date_str
dest_time_base = base.year,base.month,base.day,base.hour
dest_time_base = base.year,base.month,base.day,base.hour
t=time_block_report("Calcs done:",t)
datestr_for_file = base.strftime("%Y%m%d")
outname = "%s_%s.nc" % (file_base,datestr_for_file)
out = Dataset(outname,"w",format='NETCDF4')
print("Created file: %s " % outname)
subset = "u_component_wind_true_direction_all_geometries"
uwind2 = uwind[timendx,:,:]
if np.any(np.isnan(uwind2)):
for i in range(uwinds.shape[0]):
if np.any(np.isnan(uwind2[i,:,:])):
print("uwind[%s] has nan values: %s *************************" % (i,datestr_for_file))
subset = "v_component_wind_true_direction_all_geometries"
vwind2 = vwind[timendx,:,:]
if np.any(np.isnan(vwind2)):
for i in range(vwinds.shape[0]):
if np.any(np.isnan(vwind2[i,:,:])):
print("vwind[%s] has nan values: %s *************************" % (i,datestr_for_file))
pres2 = pres[timendx,:,:]
if np.any(np.isnan(pres2)):
for i in range(pres2.shape[0]):
if np.any(np.isnan(pres2[i,:,:])):
pres2[i,:,:] = pres2[i-1,:,:]
print("pres[%s] has nan values: %s *************************" % (i,datestr_for_file))
ntime = pres2.shape[0]
ny = pres2.shape[1]
nx = pres2.shape[2]
time=out.createDimension('time',None)
lat=out.createDimension('ny_grid',ny)
lon=out.createDimension('nx_grid',nx)
times_dest = out.createVariable('time','f8',('time',))
times_dest.long_name = "Time"
times_dest.standard_name = "time"
times_dest.units = dest_time_unit
times_dest.base_date = dest_time_base
times_dest[:] = time_days
longitude = out.createVariable('lon','f4',('ny_grid','nx_grid',))
longitude.long_name = "Longitude"
longitude.standard_name = "longitude"
longitude.units = "degrees_east"
longitude[:,:] = meshcoord[0]
latitude = out.createVariable('lat','f4',('ny_grid','nx_grid',))
latitude.long_name = "Latitude"
latitude.standard_name = "latitude"
latitude.units = "degrees_north"
latitude[:,:] = meshcoord[1]
uwind_dest = out.createVariable('uwind','f4',('time','ny_grid','nx_grid',))
uwind_dest.long_name = "Surface Eastward Air Velocity (10m AGL)"
uwind_dest.standard_name = "eastward_wind"
uwind_dest.units = "m/s"
uwind_dest[:,:,:] = uwind2
vwind_dest = out.createVariable('vwind','f4',('time','ny_grid','nx_grid',))
vwind_dest.long_name = "Surface Northward Air Velocity (10m AGL)"
vwind_dest.standard_name = "northward_wind"
vwind_dest.units = "m/s"
vwind_dest[:,:,:] = vwind2
prmsl = out.createVariable('prmsl','f4',('time','ny_grid','nx_grid',))
prmsl.long_name = "Pressure reduced to MSL"
prmsl.standard_name = "air_pressure_at_sea_level"
prmsl.units = "Pa"
prmsl[:,:,:] = pres2
out.sync()
print("Creating dummy surface temp and humidity")
stmp = out.createVariable('stmp','f4',('time', 'ny_grid', 'nx_grid',))
stmp.long_name = "Surface Air Temperature (2m AGL)"
stmp.standard_name = "air_temperature"
stmp.units = "K"
spfh = out.createVariable('spfh','f4',('time', 'ny_grid', 'nx_grid',))
spfh.long_name = "Surface Specific Humidity (2m AGL)"
spfh.standard_name = "specific_humidity"
spfh.units = "1"
out.close()
print("Closed file: %s" % outname)
if __name__ == '__main__':
lon_lo = -123.15
lon_hi = -121.1
lat_lo = 37.30
#lat_hi = 38.90
lat_hi = 39.0 # want nx != ny
file_base = "cencoos_air"
to_cache = True
from_cache = False
cencoos_schism_opendap(lat_lo,lon_lo,lat_hi,lon_hi,file_base,to_cache,from_cache)
| StarcoderdataPython |
62364 | # time_count.py
from webpie import WPApp, WPHandler
import time
class Handler(WPHandler):
def time(self, request, relpath):
return "[%d]: %s\n" % (self.App.bump_counter(), time.ctime()), "text/plain"
class App(WPApp):
def __init__(self, handler_class):
WPApp.__init__(self, handler_class)
self.Counter = 0
def bump_counter(self):
self.Counter += 1
return self.Counter
App(Handler).run_server(8080)
| StarcoderdataPython |
172394 | <reponame>kashifpk/PyCK
from pyck.forms import Form
import os
def test_pyck_lib_get_models_1():
pass
| StarcoderdataPython |
134410 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2019-08-03 15:25:12
# Project: news_qq
from pyspider.libs.base_handler import *
import re
import pymysql
import pymongo
# 文章正则
pattern_finance = re.compile('^(http|https)://finance.*')
pattern_artical = re.compile('^(http|https)://(.*?)-\d{8}.html(.*)')
pattern_pub_time = re.compile('\d{4}年\d{2}月\d{2}日\d{2}:\d{2}')
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://finance.people.com.cn/', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
if re.match(pattern_finance, each.attr.href):
if re.match(pattern_artical, each.attr.href):
self.crawl(each.attr.href, callback=self.detail_page)
else:
self.crawl(each.attr.href, callback=self.index_page)
@config(priority=2)
def detail_page(self, response):
pub_time = response.doc('.text_title > .box01 > .fl').text()
pub_ret = re.search(pattern_pub_time, pub_time)
if pub_ret is None:
return
url = response.url
title = response.doc('.text_title > h1').text()
content = response.doc('.text_con').html()
text = response.doc('.text_con').text()
publish_time = pub_ret.group()
# 保存到mysql
# self.save_to_mysql((title, text, url, publish_time))
result = {
"url": url,
"title": title,
"publish_time": publish_time,
"content": content,
"text": text,
}
# 保存到mongo
self.save_to_mongo(result)
return result
# 保存到mysql
def save_to_mysql(self, params):
db = pymysql.connect(host='localhost', user='root', password='password', port=3306, db='spider')
cursor = db.cursor()
sql = 'INSERT INTO news_qq(title, content, url, pub_time) values(%s, %s, %s, %s)'
try:
cursor.execute(sql, params)
db.commit()
except:
db.rollback()
cursor.close()
db.close()
# 保存到mongo
def save_to_mongo(self, params):
# 客户端连接
client = pymongo.MongoClient('mongodb://user:password@localhost:27017')
# 获取数据库
db = client.spider
# 获取集合
collection = db.news_sql
# 插入数据
collection.insert(params)
| StarcoderdataPython |
94623 | #!python3
from flask import Flask, render_template, request
from simpleeval import simple_eval
import logging
logging.basicConfig(level=logging.DEBUG)
# Declare the App
app = Flask(__name__)
@app.route('/') # Start the app route ('/')
def main():
print('-----------------started-----------------')
return render_template('index.html')
# Form Submission Route
@app.route('/send', methods=['POST'])
def send():
if request.method == 'POST':
app.logger.info(f'REQUEST FORM: {request.method}')
# print((f'REQUEST FORM: {request.form}'))
sig_figs = 5
if request.form['Significant_Figures']:
sig_figs = float(request.form['Significant_Figures'])
if request.form['inch']:
inch = simple_eval(request.form['inch'])
mm = inch * 25.4
to_mm = f'{inch * 25.4:.4g} mm'
return render_template('index.html', to_mm=to_mm)
if request.form['mm']:
mm = simple_eval(request.form['mm'])
to_inch = f'{mm / 25.4:.4g} in'
return render_template('index.html', to_inch=to_inch)
if request.form['lbf']:
lbf = simple_eval(request.form['lbf'])
to_kgf = f'{lbf / 2.20462:.4g} kgf'
to_N = f'{lbf * 4.44822:.4g} N'
return render_template('index.html', to_kgf=to_kgf, lbf_to_N=to_N)
if request.form['kgf']:
kgf = simple_eval(request.form['kgf'])
to_lbf = f'{kgf * 2.20462:.4g} lbf'
to_N = f'{kgf * 9.80665:.4g} N'
return render_template('index.html', to_lbf=to_lbf, kgf_to_N=to_N)
# PRESSURE
if request.form['psi']:
psi = simple_eval(request.form['psi'])
to_Pa = f'{psi * 6894.76:.4g} Pa'
to_MPa = f'{psi * 0.00689476:.4g} MPa'
return render_template('index.html', to_Pa=to_Pa, to_MPa=to_MPa)
if request.form['Pa']:
Pa = simple_eval(request.form['Pa'])
to_psi = f'{Pa / 6894.76:.4g} psi'
to_ksi = f'{Pa / 6.89476:.4g} ksi'
return render_template('index.html', to_psi=to_psi, to_ksi=to_ksi)
# TEMPERATURE
if request.form['F']:
F = simple_eval(request.form['F'])
to_C = f'{(F-32)*5/9:.4g}C'
to_K = f'{(F-32)*5/9+273.15:.4g}K'
return render_template('index.html', to_C=to_C, to_K=to_K)
if request.form['C']:
C = simple_eval(request.form['C'])
to_F = f'{(C*9/5+32):.4g}F'
C_to_K = f'{(C+273.15):.4g}K'
return render_template('index.html', to_F=to_F, C_to_K=C_to_K)
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
3291721 | """Platform for sensor integration."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID, POWER_WATT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DATA_COORDINATOR, DOMAIN, SENSOR_TYPE_RATE, SENSORS_INFO
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
):
"""Set up the sensor platform."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR]
user_id = config_entry.data[CONF_ID]
async_add_entities(
HuisbaasjeSensor(coordinator, user_id=user_id, **sensor_info)
for sensor_info in SENSORS_INFO
)
class HuisbaasjeSensor(CoordinatorEntity, SensorEntity):
"""Defines a Huisbaasje sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
user_id: str,
name: str,
source_type: str,
device_class: str = None,
sensor_type: str = SENSOR_TYPE_RATE,
unit_of_measurement: str = POWER_WATT,
icon: str = "mdi:lightning-bolt",
precision: int = 0,
state_class: str | None = None,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self._user_id = user_id
self._name = name
self._device_class = device_class
self._unit_of_measurement = unit_of_measurement
self._source_type = source_type
self._sensor_type = sensor_type
self._icon = icon
self._precision = precision
self._attr_state_class = state_class
@property
def unique_id(self) -> str:
"""Return an unique id for the sensor."""
return f"{DOMAIN}_{self._user_id}_{self._source_type}_{self._sensor_type}"
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._device_class
@property
def icon(self) -> str:
"""Return the icon to use for the sensor."""
return self._icon
@property
def native_value(self):
"""Return the state of the sensor."""
if self.coordinator.data[self._source_type][self._sensor_type] is not None:
return round(
self.coordinator.data[self._source_type][self._sensor_type],
self._precision,
)
return None
@property
def native_unit_of_measurement(self) -> str:
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def available(self) -> bool:
"""Return if entity is available."""
return (
super().available
and self.coordinator.data
and self._source_type in self.coordinator.data
and self.coordinator.data[self._source_type]
)
| StarcoderdataPython |
3382269 | from __future__ import print_function
import sys
import argparse
import cv2
from event_camera_emulation.emulator import EventCameraEmulator
camera_device_ = None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video_device', '-v', type=str,
default='0', help='The video device. For a camera,'
'provide its id, for e.g. 0. For a video file, provide'
'its path')
args = parser.parse_args()
try:
camera_device_ = cv2.VideoCapture(int(args.video_device))
except ValueError:
camera_device_ = cv2.VideoCapture(args.video_device)
if camera_device_.isOpened():
print('Successfully opened camera device')
_, previous_image = camera_device_.read()
else:
print('Could not open camera device!')
sys.exit()
e_camera_emulator = EventCameraEmulator()
try:
while True:
_, current_image = camera_device_.read()
event_image = e_camera_emulator.get_events_image(current_image, previous_image, 20, False)
# event_image = e_camera_emulator.get_events_image(current_image, previous_image, 20, True)
previous_image = current_image
cv2.imshow('Original Camera stream', current_image)
cv2.imshow('Simulated Event Camera stream', event_image)
cv2.waitKey(1)
cv2.imwrite('/tmp/sample_events_image.jpg', event_image)
except KeyboardInterrupt:
print('\nFinished streaming, exiting program...')
camera_device_.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
37031 | <reponame>JFF-Bohdan/reqlog<filename>reqlog/__main__.py
import os
import sys
import bottle
base_module_dir = os.path.dirname(sys.modules[__name__].__file__)
try:
import reqlog # noqa: F401 # need to check import possibility
except ImportError:
path = base_module_dir
path = os.path.join(path, "..")
sys.path.insert(0, path)
import reqlog # noqa # testing that we able to import package
from reqlog.support.bottle_tools import log_all_routes # noqa
config = reqlog.get_config()
reqlog.setup_app(config)
log_all_routes(reqlog.logger, reqlog.application)
bottle.run(
app=reqlog.application,
host=config.get("main", "host"),
port=config.getint("main", "port"),
debug=config.getboolean("main", "debug"),
reloader=config.getboolean("main", "reloader"),
interval=config.getint("main", "reloader_interval")
)
| StarcoderdataPython |
3941 | import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wall St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
df = wsj_model.global_currencies()
df = pd.DataFrame.from_dict(df)
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last"] = pd.to_numeric(df["Last"].astype(float))
df["Chng"] = pd.to_numeric(df["Chng"].astype(float))
df["%Chng"] = pd.to_numeric(df["%Chng"].astype(float))
formats = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df = df.fillna("")
df.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(df.to_string())
df = df[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = len(df.index)
fig = df2img.plot_dataframe(
df,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
df2img.save_dataframe(fig=fig, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = disnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = disnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = disnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| StarcoderdataPython |
3286815 | import traceback
import struct
def printsafe(data):
result = ""
for i in data:
if 0x20 <= i <= 0x7E:
result = result + chr(i)
else:
result = result + "."
return result
def hexdump(data):
info = ""
l = len(data)
for i in range(0, l, 0x10):
hexdump = ""
for x in range(i, i+0x8 if i+0x8 <= l else l):
hexdump = hexdump + "{0:02X} ".format(data[x])
hexdump = hexdump + " "
for x in range(i+0x8, i+0x10 if i+0x10 <= l else l):
hexdump = hexdump + "{0:02X} ".format(data[x])
info = info + "{0:04X} {1: <49s} {2:s}\n".format(i, hexdump, printsafe(data[i:i+0x10]))
return info
def packetErrorTrace(data):
a = traceback.format_exc()
if not a:
return "Error: No error"
flags, seq, exlen = struct.unpack_from(">BIB", data, 0)
mid = struct.unpack_from(">I", data, 6+exlen)[0]
freq = "FX"
if mid & 0xFFFFFFFA == 0xFFFFFFFA:
mid = mid & 0x000000FA
freq = "FX"
elif mid & 0xFFFF0000 == 0xFFFF0000:
mid = mid & 0x0000FFFF
freq = "LO"
elif mid & 0xFF000000 == 0xFF000000:
mid = (mid >> 16) & 0xFF
freq = "MD"
else:
mid = (mid >> 24) & 0xFF
freq = "HI"
b = "{0: >3s} {1: >3s} {2: >3s} {3: >3s} {4:08X} {5: >2s} {6:08X}".format(
"ZER" if flags&0x80 == 0x80 else "",
"REL" if flags&0x40 == 0x40 else "",
"RES" if flags&0x20 == 0x20 else "",
"ACK" if flags&0x10 == 0x10 else "",
seq,
freq,
mid
)
return "%s\n%s\n%s"%(a, b, ("-"*79)+"\n"+hexdump(data)+"\n"+("-"*79))
| StarcoderdataPython |
161418 | <reponame>BirkbeckCTP/jisc-doab
import logging
from operator import itemgetter
import os
import zipfile
from doab import const
from ebooklib import epub
logger = logging.getLogger(__name__)
class FileManager():
def __init__(self, base_path):
if not base_path.startswith("/"):
base_path = os.path.join(os.getcwd(), base_path)
self.base_path = base_path
logger.debug("File manager using %s" % self.base_path)
def write_bytes(self, *path_parts, filename, to_write):
path_parts = (str(part) for part in path_parts)
dir_path = path = os.path.join(self.base_path, *path_parts)
self.makedirs(dir_path)
path = os.path.join(dir_path, filename)
with open(path, "wb") as f:
f.write(to_write)
def makedirs(self, path):
if not os.path.exists(path):
os.makedirs(path)
def list(self, *path_parts, hidden=False):
path = os.path.join(self.base_path, *path_parts)
li = os.listdir(os.path.join(self.base_path, *path_parts))
if not hidden:
return [i for i in li if not i.startswith(".")]
def read(self, *path_parts, mode=""):
read_path = os.path.join(self.base_path, *path_parts)
with open(read_path, f"r{mode}") as read_file:
return read_file.read()
def readlines(self, *path_parts, mode=""):
read_path = os.path.join(self.base_path, *path_parts)
with open(read_path, f"r{mode}") as read_file:
return read_file.readlines()
def unzip(self, *path_parts, out=None):
"""Extracts the contents into memory or to a file
:param *path_parts: aany number of parts to be joined for the path
:param out: The output path for extracting the contents. If set to None
it will extract in memory
"""
read_path = os.path.join(self.base_path, *path_parts)
zip_file = self._get_zipfile(read_path)
if out:
zip_file.extractall(out)
else:
for name in zipfile.namelist():
content = zipfile.read(name)
yield content
def _get_zipfile(self, path, mode="r"):
return zipfile.ZipFile(path, mode)
@property
def types(self):
return_types = []
for filetype, filename in const.RECOGNIZED_BOOK_TYPES.items():
if os.path.exists(os.path.join(self.base_path, filename)):
return_types.append(filetype)
return return_types
class EPUBFileManager(FileManager):
def __init__(self, base_path):
super().__init__(base_path)
self.epub_filename = base_path
self.epub_file = epub.read_epub(self.epub_filename)
def read(self, filename=None, mime=None):
"""Returns the contents of the epub file
:param filename: A string identifying a filename from the epub
:param mime: A MIME by which to filter the items to be read
:return: The contents of a file or an iterable of tuples
of the format (filename, contents)
"""
for item in self.epub_file.items:
name = item.get_name()
if filename:
if filename == name:
return name, item.get_content()
elif mime:
if mime == item.media_type:
yield name, item.get_content()
else:
yield name, item.get_content()
def list(self):
""" Lists all the items available in the epub document and their MIMEs
:return: A list of tuples of the format (MIME, filename)
"""
return [
(item.media_type, item.get_name())
for item in self.epub_file.items
]
def write_bytes(self, *args, **kwargs):
raise NotImplementedError
| StarcoderdataPython |
116644 | <reponame>RomuloSouza/corong
import pyxel
class Sprite:
def __init__(self, x, y, i, u, v, w, h):
self.pos_x = x
self.pos_y = y
self.img_idx = i
# start (u, v) of the image bank (img_idx)
self.start_x = u
self.start_y = v
self.width = w
self.height = h
def draw(self):
pyxel.blt(
self.pos_x,
self.pos_y,
self.img_idx,
self.start_x,
self.start_y,
self.width,
self.height,
colkey=pyxel.COLOR_BLACK
)
| StarcoderdataPython |
16911 |
import __init__
import os
#os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-11.1/bin64:/usr/local/cuda-11.2/bin64'
import numpy as np
import torch
import torch.multiprocessing as mp
import torch_geometric.datasets as GeoData
from torch_geometric.loader import DenseDataLoader
import torch_geometric.transforms as T
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from config import OptInit
from architecture import DenseDeepGCN, CustomDenseDeepGCN
from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint
from utils.metrics import AverageMeter
import logging
from tqdm import tqdm
from parallel_wrapper import launch
import comm
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='log/mlp4')
def train(model, train_loader, optimizer, criterion, opt, cur_rank):
opt.losses.reset()
model.train()
with tqdm(train_loader) as tqdm_loader:
for i, data in enumerate(tqdm_loader):
opt.iter += 1
desc = 'Epoch:{} Iter:{} [{}/{}] Loss:{Losses.avg: .4f}'\
.format(opt.epoch, opt.iter, i + 1, len(train_loader), Losses=opt.losses)
tqdm_loader.set_description(desc)
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y.to(opt.device)
# ------------------ zero, output, loss
optimizer.zero_grad()
out = model(inputs)
loss = criterion(out, gt)
# ------------------ optimization
loss.backward()
optimizer.step()
opt.losses.update(loss.item())
def test(model, loader, opt, cur_rank):
Is = np.empty((len(loader), opt.n_classes))
Us = np.empty((len(loader), opt.n_classes))
model.eval()
with torch.no_grad():
for i, data in enumerate(tqdm(loader)):
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y
out = model(inputs)
pred = out.max(dim=1)[1]
pred_np = pred.cpu().numpy()
target_np = gt.cpu().numpy()
for cl in range(opt.n_classes):
cur_gt_mask = (target_np == cl)
cur_pred_mask = (pred_np == cl)
I = np.sum(np.logical_and(cur_pred_mask, cur_gt_mask), dtype=np.float32)
U = np.sum(np.logical_or(cur_pred_mask, cur_gt_mask), dtype=np.float32)
Is[i, cl] = I
Us[i, cl] = U
ious = np.divide(np.sum(Is, 0), np.sum(Us, 0))
ious[np.isnan(ious)] = 1
iou = np.mean(ious)
if opt.phase == 'test':
for cl in range(opt.n_classes):
logging.info("===> mIOU for class {}: {}".format(cl, ious[cl]))
opt.test_value = iou
logging.info('TEST Epoch: [{}]\t mIoU: {:.4f}\t'.format(opt.epoch, opt.test_value))
def epochs(opt):
logging.info('===> Creating dataloader ...')
train_dataset = GeoData.S3DIS(opt.data_dir, opt.area, True, pre_transform=T.NormalizeScale())
train_sampler = DistributedSampler(train_dataset, shuffle=True, seed=opt.seed)
train_loader = DenseDataLoader(train_dataset, batch_size=opt.batch_size, shuffle=False, sampler = train_sampler, num_workers=opt.n_gpus)
test_dataset = GeoData.S3DIS(opt.data_dir, opt.area, train=False, pre_transform=T.NormalizeScale())
test_sampler = DistributedSampler(test_dataset, shuffle=False, seed=opt.seed)
test_loader = DenseDataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, sampler = test_sampler, num_workers=opt.n_gpus)
opt.n_classes = train_loader.dataset.num_classes
cur_rank = comm.get_local_rank()
logging.info('===> Loading the network ...')
model = DistributedDataParallel(CustomDenseDeepGCN(opt).to(cur_rank),device_ids=[cur_rank], output_device=cur_rank,broadcast_buffers=False).to(cur_rank)
logging.info('===> loading pre-trained ...')
model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
logging.info(model)
logging.info('===> Init the optimizer ...')
criterion = torch.nn.CrossEntropyLoss().to(cur_rank)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
logging.info('===> Init Metric ...')
opt.losses = AverageMeter()
opt.test_value = 0.
logging.info('===> start training ...')
for _ in range(opt.epoch, opt.total_epochs):
opt.epoch += 1
train_sampler.set_epoch(opt.epoch)
test_sampler.set_epoch(opt.epoch)
logging.info('Epoch:{}'.format(opt.epoch))
train(model, train_loader, optimizer, criterion, opt, cur_rank)
if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
test(model, test_loader, opt, cur_rank)
scheduler.step()
if comm.is_main_process():
# ------------------ save checkpoints
# min or max. based on the metrics
is_best = (opt.test_value < opt.best_value)
opt.best_value = max(opt.test_value, opt.best_value)
model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
save_checkpoint({
'epoch': opt.epoch,
'state_dict': model_cpu,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_value': opt.best_value,
}, is_best, opt.ckpt_dir, opt.exp_name)
# ------------------ tensorboard log
info = {
'loss': opt.losses.avg,
'test_value': opt.test_value,
'lr': scheduler.get_lr()[0]
}
writer.add_scalar('Train Loss', info['loss'], opt.epoch)
writer.add_scalar('Test IOU', info['test_value'], opt.epoch)
writer.add_scalar('lr', info['lr'], opt.epoch)
logging.info('Saving the final model.Finish!')
def hola():
print('Hola')
def main():
opt = OptInit().get_args()
'''
This wrapper taken from detectron2 (https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py),
creates n_gpus processes and launches epochs function on each of them.
'''
launch(
epochs,
num_gpus_per_machine=opt.n_gpus,
num_machines=1,
machine_rank=0,
dist_url='auto',
args=(opt,)
)
#epochs(opt)
if __name__ == '__main__':
main() | StarcoderdataPython |
86794 | import argparse
import time
import numpy as np
import networkx as nx
import json
from sklearn.utils import check_random_state
import zmq
from . import agglo, agglo2, features, classify, evaluate as ev
# constants
# labels for machine learning libs
MERGE_LABEL = 0
SEPAR_LABEL = 1
class Solver:
"""ZMQ-based interface between proofreading clients and gala RAGs.
This docstring is intentionally incomplete until the interface settles.
Parameters
----------
labels : array-like of int, shape (..., P, R, C)
The fragment map.
image : array-like of float, shape (..., P, R, C[, Ch]), optional
The image, from which to compute intensity features.
feature_manager : gala.features.Manager object
Object exposing the feature manager interface, to compute the
feature caches and features of the RAG.
address : string, optional
URL of client.
relearn_threshold : int, optional
Minimum batch size to trigger a new learning round.
config_file : string, optional
A JSON file specifying the URLs of the Solver, Client, and ID service.
See `Solver._configure_from_file` for the file specification.
Attributes
----------
This section intentionally left blank.
"""
def __init__(self, labels, image=np.array([]),
feature_manager=features.default.snemi3d(),
address=None, relearn_threshold=20,
config_file=None):
self.labels = labels
self.image = image
self.feature_manager = feature_manager
self._build_rag()
config_address, id_address = self._configure_from_file(config_file)
self.id_service = self._connect_to_id_service(id_address)
self._connect_to_client(address or config_address)
self.history = []
self.separate = []
self.features = []
self.targets = []
self.relearn_threshold = relearn_threshold
self.relearn_trigger = relearn_threshold
self.recently_solved = True
def _build_rag(self):
"""Build the region-adjacency graph from the label image."""
self.rag = agglo.Rag(self.labels, self.image,
feature_manager=self.feature_manager,
normalize_probabilities=True)
self.original_rag = self.rag.copy()
def _configure_from_file(self, filename):
"""Get all configuration parameters from a JSON file.
The file specification is currently in flux, but looks like:
```
{'id_service_url': 'tcp://localhost:5555',
'client_url': 'tcp://*:9001',
'solver_url': 'tcp://localhost:9001'}
```
Parameters
----------
filename : str
The input filename.
Returns
-------
address : str
The URL to bind a ZMQ socket to.
id_address : str
The URL to bind an ID service to
"""
if filename is None:
return None, None
with open(filename, 'r') as fin:
config = json.load(fin)
return (config.get('client_url', None),
config.get('id_service_url', None))
def _connect_to_client(self, address):
self.comm = zmq.Context().socket(zmq.PAIR)
self.comm.bind(address)
def _connect_to_id_service(self, url):
if url is not None:
service_comm = zmq.Context().socket(zmq.REQ)
service_comm.connect(url)
def get_ids(count):
print('requesting %i ids...' % count)
service_comm.send_json({'count': count})
print('receiving %i ids...' % count)
received = service_comm.recv_json()
id_range = received['begin'], received['end']
return id_range
else:
def get_ids(count):
start = np.max(self.labels) + 2
return start, start + count
return get_ids
def send_segmentation(self):
"""Send a segmentation to ZMQ as a fragment-to-segment lookup table.
The format of the lookup table (LUT) is specified in the BigCat
wiki [1]_.
References
----------
.. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication
"""
if len(self.targets) < self.relearn_threshold:
print('server has insufficient data to resolve')
return
self.relearn() # correct way to do it is to implement RAG splits
self.rag.agglomerate(0.5)
self.recently_solved = True
dst_tree = [int(i) for i in self.rag.tree.get_map(0.5)]
unique = set(dst_tree)
start, end = self.id_service(len(unique))
remap = dict(zip(unique, range(start, end)))
dst = list(map(remap.__getitem__, dst_tree))
src = list(range(len(dst)))
message = {'type': 'fragment-segment-lut',
'data': {'fragments': src, 'segments': dst}}
print('server sending:', message)
try:
self.comm.send_json(message, flags=zmq.NOBLOCK)
except zmq.error.Again:
return
def listen(self, send_every=None):
"""Listen to ZMQ port for instructions and data.
The instructions conform to the proofreading protocol defined in the
BigCat wiki [1]_.
Parameters
----------
send_every : int or float, optional
Send a new segmentation every `send_every` seconds.
References
----------
.. [1] https://github.com/saalfeldlab/bigcat/wiki/Actors,-responsibilities,-and-inter-process-communication
"""
start_time = time.time()
recv_flags = zmq.NOBLOCK
while True:
if send_every is not None:
elapsed_time = time.time() - start_time
if elapsed_time > send_every:
print('server resolving')
self.send_segmentation()
start_time = time.time()
try:
if recv_flags == zmq.NOBLOCK:
print('server receiving no blocking...')
else:
print('server receiving blocking...')
message = self.comm.recv_json(flags=recv_flags)
print('server received:', message)
recv_flags = zmq.NOBLOCK
except zmq.error.Again: # no message received
recv_flags = zmq.NULL
print('server: no message received in time')
if not self.recently_solved:
print('server resolving')
self.send_segmentation()
continue
command = message['type']
data = message['data']
if command == 'merge':
segments = data['fragments']
self.learn_merge(segments)
elif command == 'separate':
fragment = data['fragment']
separate_from = data['from']
self.learn_separation(fragment, separate_from)
elif command == 'request':
what = data['what']
if what == 'fragment-segment-lut':
self.send_segmentation()
elif command == 'stop':
return
else:
print('command %s not recognized.' % command)
continue
def learn_merge(self, segments):
"""Learn that a pair of segments should be merged.
Parameters
----------
segments : tuple of int
A pair of segment identifiers.
"""
segments = set(self.rag.tree.highest_ancestor(s) for s in segments)
# ensure the segments are ordered such that every subsequent
# pair shares an edge
ordered = nx.dfs_preorder_nodes(nx.subgraph(self.rag, segments))
s0 = next(ordered)
for s1 in ordered:
self.features.append(self.feature_manager(self.rag, s0, s1))
self.history.append((s0, s1))
s0 = self.rag.merge_nodes(s0, s1)
self.targets.append(MERGE_LABEL)
self.recently_solved = False or len(set(self.targets)) < 2
def learn_separation(self, fragment, separate_from):
"""Learn that a pair of fragments should never be in the same segment.
Parameters
----------
fragments : tuple of int
A pair of fragment identifiers.
"""
f0 = fragment
if not separate_from:
separate_from = self.original_rag.neighbors(f0)
s0 = self.rag.tree.highest_ancestor(f0)
for f1 in separate_from:
if self.rag.boundary_body in (f0, f1):
continue
s1 = self.rag.tree.highest_ancestor(f1)
if self.rag.has_edge(s0, s1):
self.features.append(self.feature_manager(self.rag, s0, s1))
self.targets.append(SEPAR_LABEL)
if self.original_rag.has_edge(f0, f1):
self.features.append(self.feature_manager(self.original_rag,
f0, f1))
self.targets.append(SEPAR_LABEL)
self.separate.append((f0, f1))
self.recently_solved = False or len(set(self.targets)) < 2
def relearn(self):
"""Learn a new merge policy using data gathered so far.
This resets the state of the RAG to contain only the merges and
separations received over the course of its history.
"""
clf = classify.DefaultRandomForest().fit(self.features, self.targets)
self.policy = agglo.classifier_probability(self.feature_manager, clf)
self.rag = self.original_rag.copy()
self.rag.merge_priority_function = self.policy
self.rag.rebuild_merge_queue()
for i, (s0, s1) in enumerate(self.separate):
self.rag.node[s0]['exclusions'].add(i)
self.rag.node[s1]['exclusions'].add(i)
def proofread(fragments, true_segmentation, host='tcp://localhost', port=5556,
num_operations=10, mode='fast paint', stop_when_finished=False,
request_seg=True, random_state=None):
"""Simulate a proofreader by sending and receiving messages to a Solver.
Parameters
----------
fragments : array of int
The initial segmentation to be proofread.
true_segmentation : array of int
The target segmentation. Should be a superset of `fragments`.
host : string
The host to serve ZMQ commands to.
port : int
Port on which to connect ZMQ.
num_operations : int, optional
How many proofreading operations to perform before returning.
mode : string, optional
The mode with which to simulate proofreading.
stop_when_finished : bool, optional
Send the solver a "stop" action when done proofreading. Useful
when running tests so we don't intend to continue proofreading.
random_state : None or int or numpy.RandomState instance, optional
Fix the random state for proofreading.
Returns
-------
lut : tuple of array-like of int
A look-up table from fragments (first array) to segments
(second array), obtained by requesting it from the Solver after
initial proofreading simulation.
"""
true = agglo2.best_segmentation(fragments, true_segmentation)
base_graph = agglo2.fast_rag(fragments)
comm = zmq.Context().socket(zmq.PAIR)
comm.connect(host + ':' + str(port))
ctable = ev.contingency_table(fragments, true).tocsc()
true_labels = np.unique(true)
random = check_random_state(random_state)
random.shuffle(true_labels)
for _, label in zip(range(num_operations), true_labels):
time.sleep(3)
components = [int(i) for i in ctable.getcol(int(label)).indices]
merge_msg = {'type': 'merge', 'data': {'fragments': components}}
print('proofreader sends:', merge_msg)
comm.send_json(merge_msg)
for fragment in components:
others = [int(neighbor) for neighbor in base_graph[fragment]
if neighbor not in components]
if not others:
continue
split_msg = {'type': 'separate',
'data': {'fragment': int(fragment), 'from': others}}
print('proofreader sends:', split_msg)
comm.send_json(split_msg)
if request_seg: # if no request, assume server sends periodic updates
req_msg = {'type': 'request', 'data': {'what': 'fragment-segment-lut'}}
print('proofreader sends:', req_msg)
comm.send_json(req_msg)
print('proofreader receiving...')
response = comm.recv_json()
print('proofreader received:', response)
src = response['data']['fragments']
dst = response['data']['segments']
if stop_when_finished:
stop_msg = {'type': 'stop', 'data': {}}
print('proofreader sends: ', stop_msg)
comm.send_json(stop_msg)
return src, dst
def main():
parser = argparse.ArgumentParser('gala-serve')
parser.add_argument('-f', '--config-file', help='JSON configuration file')
parser.add_argument('input_file', help='Input image file')
parser.add_argument('-F', '--fragment-group',
default='volumes/labels/fragments',
help='Group path in HDF file for fragments')
parser.add_argument('-p', '--membrane-probabilities',
default='volumes/membrane',
help='Group path in HDF file for membrane prob map')
args = parser.parse_args()
from . import imio
frags, probs = imio.read_cremi(args.input_file,
[args.fragment_group,
args.membrane_probabilities])
solver = Solver(frags, probs, config_file=args.config_file)
solver.listen()
| StarcoderdataPython |
4834562 | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
import os
import cv2
import sys
import json
import numpy as np
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(description="Imagenet resize script")
parser.add_argument('--data-dir', '-d', type=str,
default='./val_dataset',
help='path to imagenet val images')
parser.add_argument('--output-dir', '-o', type=str,
default='./val_resize_256',
help='reisize output path')
parser.add_argument('--anno-file', '-a', type=str,
default='val.txt',
help='gen anno fileanme')
parser.add_argument('--short-size', '-s', type=int,
default=256,
help='resize short size')
parser.add_argument('--name2class_file', '-nc', type=str,
default="imagenet_class_index.json",
help='imagenet name to class file')
return parser.parse_args()
args = parse_args()
def resize_shortest_edge(image, size):
H, W = image.shape[:2]
if H >= W:
nW = size
nH = int(float(H)/W * size)
else:
nH = size
nW = int(float(W)/H * size)
return cv2.resize(image, (nW, nH))
def gen_dict(name2class_file):
fr = open(name2class_file, 'r')
class2name_dict = json.load(fr)
name2class_dict = {}
for key in class2name_dict.keys():
name2class_dict[class2name_dict[key][0]] = key
return name2class_dict
def gen_dataset(args):
if not os.path.exists(args.output_dir):
os.system('mkdir ' + args.output_dir)
name2class_dict = gen_dict(args.name2class_file)
classname_list = os.listdir(args.data_dir)
fwa = open(args.anno_file, "w")
for classname in classname_list:
class_dir = os.path.join(args.data_dir, classname)
class_id = name2class_dict[classname]
imagename_list = os.listdir(class_dir)
for imagename in imagename_list:
imagename_path = os.path.join(class_dir, imagename)
os.system('cp ' + imagename_path + ' ' + args.output_dir + '/')
fwa.writelines(imagename + ' ' + str(class_id) + '\n')
fwa.close()
if __name__ == "__main__":
gen_dataset(args)
| StarcoderdataPython |
61114 | import numpy as np
import scipy
import cv2
def get_pixel_neighbors(height, width):
"""
Estimate the 4 neighbors of every pixel in an image
:param height: image height
:param width: image width
:return: pixel index - neighbor index lists
"""
pix_id = []
neighbor_id = []
for i in range(height):
for j in range(width):
n = []
if i == 0:
n = n + [(i + 1) * width + j]
elif i == height - 1:
n = n + [(i - 1) * width + j]
else:
n = n + [(i + 1) * width + j, (i - 1) * width + j]
if j == 0:
n = n + [i * width + j + 1]
elif j == width - 1:
n = n + [i * width + j - 1]
else:
n = n + [i * width + j + 1, i * width + j - 1]
for k in n:
pix_id.append(i*width+j)
neighbor_id.append(k)
return pix_id, neighbor_id
limps = np.array(
[[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11], [11, 12], [12, 13], [1, 8],
[8, 9], [9, 10], [14, 15], [16, 17], [0, 14], [0, 15], [14, 16], [15, 17]])
def get_instance_skeleton_buffer(h, w, poses):
output = np.zeros((h, w, 3), dtype=np.float32) - 1
for i in range(len(poses)):
keypoints = poses[i]
lbl = i
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
return output[:, :, 0]
def get_poseimg_for_opt(sel_pose, poseimg, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 1
output = np.zeros((h, w, 3), dtype=np.float32) - 1
II, JJ = (poseimg > 0).nonzero()
Isel, J_sel = (poseimg == sel_pose).nonzero()
output[II, JJ] = 0
output[Isel, J_sel] = 2
init_mask[Isel, J_sel] = 1
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((25, 25), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def draw_poses_for_optimization(sel_pose, keypoints_list, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 0
output = np.zeros((h, w, 3), dtype=np.float32)-1
for i in range(len(keypoints_list)):
keypoints = keypoints_list[i]
if i == sel_pose:
lbl = 2
else:
lbl = 1
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
# Draw circles for the bg players keypoints
# for k in range(bg_keypoints.shape[0]):
# cv2.circle(output, (int(bg_keypoints[k, 0]), int(bg_keypoints[k, 1])), 2, (bg_keypoint_lable, 0, 0), -1)
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((5, 5), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def set_U(strokes, h, w, dim):
N = h*w
y = np.zeros((N, dim))
U = scipy.sparse.lil_matrix((N, N))
for p in range(strokes.shape[0]):
i = strokes[p, 1]
j = strokes[p, 0]
index = int(i * w + j)
for ii in range(dim):
y[index, ii] = strokes[p, ii+2]
U[index, index] = 1
return U, y
def set_DW(image, edges=None, sigma1=1000., sigma2=0.01):
image = image.astype(float)
h, w = image.shape[0:2]
N = h * w
pixd, neighborid = get_pixel_neighbors(h, w)
i, j = np.unravel_index(pixd, (h, w))
ii, jj = np.unravel_index(neighborid, (h, w))
pix_diff = np.squeeze((image[i, j, :] - image[ii, jj, :]) ** 2)
if len(pix_diff.shape) == 1:
pix_diff = pix_diff[:, np.newaxis]
weight0 = np.exp(-(np.sum(pix_diff, axis=1)) / sigma1)
weight1 = np.exp(-((edges[i, j]) ** 2) / sigma2)
# neighbor_info = np.vstack((pixd, neighborid, weight0)).T
M = len(pixd)
D = scipy.sparse.lil_matrix((M, N))
W = scipy.sparse.lil_matrix((M, M))
p = np.arange(0, M, 1)
D[p, pixd] = 1
D[p, neighborid] = -1
W[p, p] = weight1
return D, W
| StarcoderdataPython |
3323285 | #!/usr/bin/python3
# pylint: disable=C0103
# pylint: disable=C0114
import json
import os
import sys
from github import Github
CHANGELOG_LABELS = ['changelog - added', 'changelog - changed', 'changelog - fixed']
ENDC = '\033[0m'
ERROR = '\033[31m'
INFO = '\033[34m'
NOTICE = '\033[33m'
if 'API_CREDENTIALS' not in os.environ:
print(ERROR + "API_CREDENTIALS needs to be set in env. Exiting." + ENDC)
sys.exit(1)
# get information we need from the event
event_data = json.load(open(os.environ['GITHUB_EVENT_PATH'], 'r'))
event_label = event_data['label']['name']
repo_name = event_data['repository']['full_name']
pr_number = event_data['pull_request']['number']
pr_opened_by = event_data['pull_request']['user']['login']
found_changelog_label = False
for cl in CHANGELOG_LABELS:
if event_label == cl:
found_changelog_label = True
break
if not found_changelog_label:
print(INFO + event_label + " isn't a changelog label. Exiting." + ENDC)
sys.exit(0)
# get PR which is an "issue" for us because the GitHub API is weird
github = Github(os.environ['API_CREDENTIALS'])
repo = github.get_repo(repo_name)
issue = repo.get_issue(pr_number)
sentinel = "<!-- release-note-reminder-bot -->"
# check the existing comments for the sentinel.
found_sentinel = False
comments = issue.get_comments()
for c in comments:
if sentinel in c.body:
found_sentinel = True
break
if found_sentinel:
print(INFO + "Found existing comment sentinel. Exiting." + ENDC)
sys.exit(0)
# don't post if there is already release notes included with the PR
found_release_notes_files = False
for f in repo.get_pull(pr_number).get_files():
if f.status != "added":
continue
print(INFO + "Found file " + f.filename + ENDC)
if f.filename.startswith('.release-notes/'):
if not f.filename.endswith('next-release.md'):
found_release_notes_files = True
break
# if at least one release notes exists, exit
if found_release_notes_files:
print(NOTICE + "Release notes file(s) found in commits. Exiting." + ENDC)
sys.exit(0)
# ok, we should be posting. let's create a reminder and post it.
print(INFO + "Preparing release notes reminder comment." + ENDC)
comment_template = """{sentinel}
Hi @{user},
The {label} label was added to this pull request; all PRs with a changelog label need to have release notes included as part of the PR. If you haven't added release notes already, please do.
Release notes are added by creating a uniquely named file in the `.release-notes` directory. We suggest you call the file `{pr_number}.md` to match the number of this pull request.
The basic format of the release notes (using markdown) should be:
```
## Title
End user description of changes, why it's important,
problems it solves etc.
If a breaking change, make sure to include 1 or more
examples what code would look like prior to this change
and how to update it to work after this change.
```
Thanks.
"""
comment = comment_template.format(user=pr_opened_by,
label=event_label,
pr_number=str(pr_number),
sentinel=sentinel)
print(INFO + "Posting comment." + ENDC)
issue.create_comment(comment)
| StarcoderdataPython |
1793898 | # Protocol plotting zoom-ins set up
set_ylim = [-200, 2300]
set_xlim_ins = [[1850, 2200], [14350, 14600]]
set_ylim_ins = [[-4000, 0], [-2500, 500]]
inset_setup = [(1, 1.25, 'upper center'), (0.7, 1.25, 'upper right')]
mark_setup = [(2, 2), (2, 2)]
| StarcoderdataPython |
157271 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy.testing import assert_allclose
try:
import matplotlib.pyplot as plt
HAS_PLT = True
except ImportError:
HAS_PLT = False
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
from ...tests.helper import pytest
import numpy as np
from .. import hist
from ...stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
| StarcoderdataPython |
1640626 | <reponame>NCRAR/psiaudio
import pytest
from collections import Counter, deque
import numpy as np
from psiaudio.calibration import FlatCalibration
from psiaudio.pipeline import extract_epochs
from psiaudio.queue import FIFOSignalQueue, InterleavedFIFOSignalQueue
from psiaudio.stim import Cos2EnvelopeFactory, ToneFactory
rate = 76.0
isi = np.round(1 / rate, 5)
def make_tone(fs, frequency=250, duration=5e-3):
calibration = FlatCalibration.as_attenuation()
tone = ToneFactory(fs=fs, level=0, frequency=frequency,
calibration=calibration)
return Cos2EnvelopeFactory(fs=fs, start_time=0, rise_time=0.5e-3,
duration=duration, input_factory=tone)
def make_queue(fs, ordering, frequencies, trials, duration=5e-3, isi=isi):
if ordering == 'FIFO':
queue = FIFOSignalQueue(fs)
elif ordering == 'interleaved':
queue = InterleavedFIFOSignalQueue(fs)
else:
raise ValueError(f'Unrecognized queue ordering {ordering}')
conn = deque()
queue.connect(conn.append, 'added')
removed_conn = deque()
queue.connect(removed_conn.append, 'removed')
keys = []
tones = []
for frequency in frequencies:
t = make_tone(fs, frequency=frequency, duration=duration)
delay = max(isi - duration, 0)
md = {'frequency': frequency}
k = queue.append(t, trials, delay, metadata=md)
keys.append(k)
tones.append(t)
return queue, conn, removed_conn, keys, tones
def test_long_tone_queue(fs):
queue, conn, rem_conn, _, _ = \
make_queue(fs, 'interleaved', [1e3, 5e3], 5, duration=1, isi=1)
waveforms = []
n_pop = round(fs * 0.25)
for i in range(16):
w = queue.pop_buffer(n_pop)
waveforms.append(w)
waveforms = np.concatenate(waveforms, axis=-1)
waveforms.shape = 4, -1
assert waveforms.shape == (4, round(fs))
assert np.all(waveforms[0] == waveforms[2])
assert np.all(waveforms[1] == waveforms[3])
assert np.any(waveforms[0] != waveforms[1])
def test_fifo_queue_pause_with_requeue(fs):
# Helper function to track number of remaining keys
def _adjust_remaining(k1, k2, n):
nk1 = min(k1, n)
nk2 = min(n - nk1, k2)
return k1 - nk1, k2 - nk2
queue, conn, rem_conn, (k1, k2), (t1, t2) = \
make_queue(fs, 'FIFO', [1e3, 5e3], 100)
extractor_conn = deque()
extractor_rem_conn = deque()
queue.connect(extractor_conn.append, 'added')
queue.connect(extractor_rem_conn.append, 'removed')
# Generate the waveform template
n_t1 = t1.n_samples_remaining()
n_t2 = t2.n_samples_remaining()
t1_waveform = t1.next(n_t1)
t2_waveform = t2.next(n_t2)
waveforms = []
extractor = extract_epochs(fs=fs,
queue=extractor_conn,
removed_queue=extractor_rem_conn,
poststim_time=0,
buffer_size=0,
epoch_size=15e-3,
target=waveforms.extend)
# Track number of trials remaining
k1_left, k2_left = 100, 100
samples = int(round(fs))
# Since the queue uses the delay (between offset and onset of
# consecutive segments), we need to calculate the actual ISI since it
# may have been rounded to the nearest sample.
delay_samples = round((isi - t1.duration) * fs)
duration_samples = round(t1.duration * fs)
total_samples = duration_samples + delay_samples
actual_isi = total_samples / fs
###########################################################################
# First, queue up 2 seconds worth of trials
###########################################################################
waveform = queue.pop_buffer(samples * 2)
n_queued = np.floor(2 / actual_isi) + 1
t1_lb = 0
t2_lb = 100 * total_samples
t2_lb = int(t2_lb)
assert np.all(waveform[t1_lb:t1_lb + duration_samples] == t1_waveform)
assert np.all(waveform[t2_lb:t2_lb + duration_samples] == t2_waveform)
assert len(conn) == np.ceil(2 / actual_isi)
assert len(rem_conn) == 0
keys = [i['key'] for i in conn]
assert set(keys) == {k1, k2}
assert set(keys[:100]) == {k1}
assert set(keys[100:]) == {k2}
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
conn.clear()
###########################################################################
# Now, pause
###########################################################################
# Pausing should remove all epochs queued up after 0.5s. After sending
# the first waveform to the extractor, we generate a new waveform to
# verify that no additional trials are queued and send that to the
# extractor.
queue.pause(round(0.5 * fs) / fs)
extractor.send(waveform[:round(0.5 * fs)])
# We need to add 1 to account for the very first trial.
n_queued = int(np.floor(2 / actual_isi)) + 1
n_kept = int(np.floor(0.5 / actual_isi)) + 1
# Now, fix the counters
k1_left, k2_left = _adjust_remaining(100, 100, n_kept)
# This is the total number that were removed when we paused.
n_removed = n_queued - n_kept
# Subtract 1 because we haven't fully captured the last trial that
# remains in the queue because the epoch_size was chosen such that the
# end of the epoch to be extracted is after 0.5s.
n_captured = n_kept - 1
assert len(waveforms) == n_captured
# Doing this will capture the final epoch.
waveform = queue.pop_buffer(samples)
assert np.all(waveform == 0)
extractor.send(waveform)
assert len(waveforms) == (n_captured + 1)
# Verify removal event is properly notifying the timestamp
rem_t0 = np.array([i['t0'] for i in rem_conn])
assert np.all(rem_t0 >= 0.5)
assert (rem_t0[0] % actual_isi) == pytest.approx(0, 0.1 / fs)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
assert len(conn) == 0
assert len(rem_conn) == n_removed
rem_count = Counter(i['key'] for i in rem_conn)
assert rem_count[k1] == 100 - n_kept
assert rem_count[k2] == n_queued - 100
conn.clear()
rem_conn.clear()
queue.resume(samples * 1.5 / fs)
waveform = queue.pop_buffer(samples)
n_queued = np.floor(1 / actual_isi) + 1
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
extractor.send(waveform)
assert len(conn) == np.floor(1 / actual_isi) + 1
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
assert len(conn) == np.floor(1 / actual_isi) + 1
keys += [i['key'] for i in conn]
conn.clear()
waveform = queue.pop_buffer(5 * samples)
n_queued = np.floor(5 / actual_isi) + 1
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
extractor.send(waveform)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
keys += [i['key'] for i in conn]
# We requeued 1.5 second worth of trials so need to factor this because
# keys (from conn) did not remove the removed keys.
assert len(keys) == (200 + n_removed)
# However, the extractor is smart enough to handle cancel appropriately
# and should only have the 200 we originally intended.
assert len(waveforms) == 200
# This should capture the 1-sample bug that sometimes occurs when using
# int() instead of round() with quirky sample rates (e.g., like with the
# RZ6).
n = len(t1_waveform)
waveforms = np.vstack(waveforms)
t1_waveforms = waveforms[:100]
t2_waveforms = waveforms[100:]
assert np.all(t1_waveforms[:, :n] == t1_waveform)
assert np.all(t2_waveforms[:, :n] == t2_waveform)
def test_queue_isi_with_pause(fs):
"""
Verifies that queue generates samples at the expected ISI and also verifies
pause functionality works as expected.
"""
queue, conn, _, _, (t1,) = make_queue(fs, 'FIFO', [250], 500)
duration = 1
samples = round(duration * fs)
queue.pop_buffer(samples)
expected_n = int(duration / isi) + 1
assert len(conn) == expected_n
# Pause is after `duration` seconds
queue.pause()
waveform = queue.pop_buffer(samples)
assert np.sum(waveform ** 2) == 0
assert len(conn) == int(duration / isi) + 1
# Resume after `duration` seconds. Note that tokens resume *immediately*.
queue.resume()
queue.pop_buffer(samples)
assert len(conn) == np.ceil(2 * duration / isi)
queue.pop_buffer(samples)
assert len(conn) == np.ceil(3 * duration / isi)
times = [u['t0'] for u in conn]
assert times[0] == 0
all_isi = np.diff(times)
# Since the queue uses the delay (between offset and onset of
# consecutive segments), we need to calculate the actual ISI since it
# may have been rounded to the nearest sample.
actual_isi = round((isi - t1.duration) * fs) / fs + t1.duration
# We paused the playout, so this means that we have a very long delay in
# the middle of the queue. Check for this delay, ensure that there's only
# one ISI with this delay and then verify that all other ISIs are the
# expected ISI given the tone pip duration.
expected_max_isi = round((duration + actual_isi) * fs) / fs
assert all_isi.max() == expected_max_isi
m = all_isi == all_isi.max()
assert sum(m) == 1
# Now, check that all other ISIs are as expected.
expected_isi = round(actual_isi * fs) / fs
np.testing.assert_almost_equal(all_isi[~m], expected_isi)
def test_fifo_queue_pause_resume_timing(fs):
trials = 20
samples = int(fs)
queue, conn, _, _, _ = make_queue(fs, 'FIFO', (1e3, 5e3), trials)
queue.pop_buffer(samples)
conn.clear()
queue.pause(0.1025)
queue.pop_buffer(samples)
queue.resume(0.6725)
queue.pop_buffer(samples)
t0 = [i['t0'] for i in conn]
assert t0[0] == round(0.6725 * fs) / fs
def test_fifo_queue_ordering(fs):
trials = 20
samples = round(fs)
queue, conn, _, (k1, k2), (t1, _) = \
make_queue(fs, 'FIFO', (1e3, 5e3), trials)
epoch_samples = round(t1.duration * fs)
waveforms = []
queue_empty = False
def mark_empty():
nonlocal queue_empty
queue_empty = True
extractor = extract_epochs(fs=fs,
queue=conn,
epoch_size=None,
poststim_time=0,
buffer_size=0,
target=waveforms.append,
empty_queue_cb=mark_empty)
waveform = queue.pop_buffer(samples)
extractor.send(waveform)
assert queue_empty
metadata = list(conn)
for md in metadata[:trials]:
assert k1 == md['key']
for md in metadata[trials:]:
assert k2 == md['key']
waveforms = np.concatenate(waveforms, axis=0)
assert waveforms.shape == (trials * 2, epoch_samples)
for w in waveforms[:trials]:
assert np.all(w == waveforms[0])
for w in waveforms[trials:]:
assert np.all(w == waveforms[trials])
assert np.any(waveforms[0] != waveforms[trials])
def test_interleaved_fifo_queue_ordering(fs):
samples = round(fs)
trials = 20
queue, conn, _, (k1, k2), (t1, _) = \
make_queue(fs, 'interleaved', (1e3, 5e3), trials)
epoch_samples = round(t1.duration * fs)
waveforms = []
queue_empty = False
def mark_empty():
nonlocal queue_empty
queue_empty = True
extractor = extract_epochs(fs=fs,
queue=conn,
epoch_size=None,
poststim_time=0,
buffer_size=0,
target=waveforms.append,
empty_queue_cb=mark_empty)
waveform = queue.pop_buffer(samples)
extractor.send(waveform)
assert queue_empty
# Verify that keys are ordered properly
metadata = list(conn)
for md in metadata[::2]:
assert k1 == md['key']
for md in metadata[1::2]:
assert k2 == md['key']
waveforms = np.concatenate(waveforms, axis=0)
assert waveforms.shape == (trials * 2, epoch_samples)
for w in waveforms[::2]:
assert np.all(w == waveforms[0])
for w in waveforms[1::2]:
assert np.all(w == waveforms[1])
assert np.any(waveforms[0] != waveforms[1])
def test_queue_continuous_tone(fs):
"""
Test ability to work with continuous tones and move to the next one
manually (e.g., as in the case of DPOAEs).
"""
samples = round(1 * fs)
queue, conn, _, _, (t1, t2) = make_queue(fs, 'FIFO', (1e3, 5e3), 1,
duration=100)
# Get samples from t1
assert queue.get_max_duration() == 100
assert np.all(queue.pop_buffer(samples) == t1.next(samples))
assert np.all(queue.pop_buffer(samples) == t1.next(samples))
# Switch to t2
queue.next_trial()
assert np.all(queue.pop_buffer(samples) == t2.next(samples))
assert np.all(queue.pop_buffer(samples) == t2.next(samples))
# Ensure timing information correct
assert len(conn) == 2
assert conn.popleft()['t0'] == 0
assert conn.popleft()['t0'] == (samples * 2) / fs
def test_future_pause(fs):
queue, conn, rem_conn, _, _ = make_queue(fs, 'FIFO', [1e3, 5e3], 100)
queue.pop_buffer(1000)
# This is OK
queue.pause(1000 / fs)
queue.resume(1000 / fs)
# This is not
with pytest.raises(ValueError):
queue.pause(1001 / fs)
def test_queue_partial_capture(fs):
queue, conn, rem_conn, _, (t1, t2) = \
make_queue(fs, 'FIFO', [1e3, 5e3], 100)
extractor_conn = deque()
extractor_rem_conn = deque()
queue.connect(extractor_conn.append, 'added')
queue.connect(extractor_rem_conn.append, 'removed')
waveforms = []
extractor = extract_epochs(fs=fs,
queue=extractor_conn,
removed_queue=extractor_rem_conn,
poststim_time=0,
buffer_size=0,
epoch_size=15e-3,
target=waveforms.extend)
samples = int(fs)
tone_samples = t1.n_samples_remaining()
w1 = queue.pop_buffer(int(tone_samples / 2))
queue.pause(0.5 * tone_samples / fs)
w2 = queue.pop_buffer(samples)
extractor.send(w1)
extractor.send(w2)
assert len(waveforms) == 0
def test_remove_keys(fs):
frequencies = (500, 1e3, 2e3, 4e3, 8e3)
queue, conn, _, keys, tones = make_queue(fs, 'FIFO', frequencies, 100)
queue.remove_key(keys[1])
queue.pop_buffer(int(fs))
queue.remove_key(keys[0])
queue.pop_buffer(int(fs))
counts = Counter(c['key'] for c in conn)
assert counts[keys[0]] == int(rate)
assert counts[keys[2]] == int(rate)
# Should generate all remaining queued trials. Make sure it properly
# exits the queue.
queue.pop_buffer((int(5 * 100 / rate * fs)))
counts = Counter(c['key'] for c in conn)
assert keys[1] not in counts
assert counts[keys[0]] == int(rate)
for k in keys[2:]:
assert counts[k] == 100
def test_remove_keys_with_no_auto_decrement(fs):
frequencies = (500, 1e3, 2e3, 4e3, 8e3)
queue, conn, _, keys, tones = make_queue(fs, 'FIFO', frequencies, 100)
queue.remove_key(keys[1])
queue.pop_buffer(10 * int(fs), decrement=False)
queue.remove_key(keys[0])
for key in keys[2:]:
queue.remove_key(key)
# Should generate all remaining queued trials. Make sure it properly
# exits the queue.
queue.pop_buffer((int(5 * 100 / rate * fs)), decrement=False)
counts = Counter(c['key'] for c in conn)
assert keys[1] not in counts
assert counts[keys[0]] == 10 * int(rate)
for k in keys[2:]:
assert k not in counts
def test_get_closest_key(fs):
frequencies = (500, 1e3, 2e3, 4e3, 8e3)
queue, conn, _, keys, tones = make_queue(fs, 'FIFO', frequencies, 100)
assert queue.get_closest_key(1) is None
queue.pop_buffer(int(fs))
assert queue.get_closest_key(1) == keys[0]
queue.pop_buffer(int(fs))
assert queue.get_closest_key(1) == keys[0]
assert queue.get_closest_key(2) == keys[1]
def test_rebuffering(fs):
from matplotlib import pyplot as plt
frequencies = (500, 1e3, 2e3, 4e3, 8e3)
trials = 200
queue, conn, rem_conn, keys, tones = \
make_queue(fs, 'FIFO', frequencies, trials)
waveforms = []
extractor_conn = deque()
extractor_rem_conn = deque()
queue.connect(extractor_conn.append, 'added')
queue.connect(extractor_rem_conn.append, 'removed')
extractor = extract_epochs(fs=fs,
queue=extractor_conn,
removed_queue=extractor_rem_conn,
poststim_time=0,
buffer_size=0,
epoch_size=8.5e-3,
target=waveforms.append)
# Default tone duration is 5e-3
tone_duration = tones[0].duration
tone_samples = int(round(tone_duration * fs))
# Remove 5e-3 sec of the waveform
extractor.send(queue.pop_buffer(tone_samples))
# Now, pause the queue at 5e-3 sec, remove 10e-3 worth of samples, and then
# resume.
queue.pause(tone_duration)
extractor.send(queue.pop_buffer(tone_samples*2))
queue.resume()
# Pull off one additonal second.
new_ts = queue.get_ts()
# Since we will be pausing the queue at 1.005 sec, we need to make sure
# that we do not actually deliver the samples after 1.005 sec to the
# extractor (this simulates a DAQ where we have uploaded some samples to a
# "buffer" but have not actually played them out).
old_ts = queue.get_ts()
keep = (tone_duration + 1.0) - old_ts
keep_samples = int(round(keep * fs))
w = queue.pop_buffer(int(fs))
extractor.send(w[:keep_samples])
assert queue.get_ts() == pytest.approx(1.015, 4)
# This will result in pausing in the middle of a tone burst. This ensures
# that we properly notify the extractor that a stimulus was cut-off halfway
# (i.e., rem_conn will have an item in the queue).
assert len(rem_conn) == 0
queue.pause(tone_duration + 1.0)
queue.resume(tone_duration + 1.0)
assert len(rem_conn) == 1
# Clear all remaining trials
extractor.send(queue.pop_buffer(15 * int(fs)))
# Check that we have the expected number of epochs acquired
#assert len(waveforms) == (len(frequencies) * trials)
epochs = np.concatenate(waveforms, axis=0)
epochs.shape = len(frequencies), trials, -1
# Make sure epochs 1 ... end are equal to epoch 0
assert np.all(np.equal(epochs[:, [0]], epochs))
| StarcoderdataPython |
4806129 | # /*
# Copyright 2020 Hitachi Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# */
#
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
# prepareData
# Prepare/Clean cloudant data for calculations
# Goal: Read Data
# 1. Read data periodically form sensor database --> dictionary
# 2. Read (sensor) asset dB and arrange (above) periodic data in dictionary format --->
# 3. Read (staff) asset DB to get attributes of each staff ---> list()
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
from cloudant import cloudant
import json
import pandas as pd
from datetime import datetime
import datetime
import numpy as np
def main(dict):
period = 60
rawData = dict["docs"] # this is a list
TypeCode = 'subType' # asset DB: subtype
IdCode = 'id' # asset DB: ID
list_SensorTypes = getListedData(rawData) # "parameter is json"
allDataInterval = organiseInervalData(rawData, list_SensorTypes)
dataPeriodic = normaliseIntervalData(allDataInterval)
return {"dataPeriodic": dataPeriodic}
def getListedData(data0):
list_ids = []
list_subTypes = []
for each in data0:
identifier_each = each.get("deviceId", "")
if identifier_each != "":
list_ids.append(identifier_each)
list_subTypes.append(each["deviceType"])
deviceIdAndTypeLists = {
"id": list(set(list_ids)),
"subType": list(set(list_subTypes))
}
return deviceIdAndTypeLists
def organiseInervalData(data00, idList):
ID_List = idList["id"]
value = []
dataMin = {} # dict.fromkeys(ID_List,None)
# print(dataMin)
outL = []
for i, each in enumerate(data00):
each_data = each.get("data", "")
if each_data != "":
targetValue = ["area", "count"]
if "area" in each["data"]["payload"]:
targetValue = ["area", "count"]
if "handwashStand" in each["data"]["payload"]:
targetValue = ["handwashStand", "count"]
if "garbageBin" in each["data"]["payload"]:
targetValue = ["garbageBin", "amount_rate"]
identifier = each["data"]["payload"][targetValue[0]]
if identifier in dataMin:
value = dataMin[identifier]['payload'][targetValue[1]]
newValue = each['data']['payload'][targetValue[1]]
if(type(value) is type([1, 2])):
outL = value + [newValue]
else:
outL = [value] + [newValue]
else:
dataMin[identifier] = {}
dataMin[identifier]['payload'] = each['data']['payload']
outL = [each['data']['payload'][targetValue[1]]]
dataMin[identifier]['payload'][targetValue[1]] = outL
dataMin_json = json.dumps(dataMin)
return dataMin_json
def normaliseIntervalData(dataMin_j):
dataPerMinute = json.loads(dataMin_j)
targetValue = "count"
for identObject in dataPerMinute:
if dataPerMinute[identObject] is not None:
# print(identObject)
dfCount = 0
dfPayload = pd.DataFrame(dataPerMinute[identObject]['payload'])
if ("area" in dataPerMinute[identObject]["payload"]) or ("handwashStand" in dataPerMinute[identObject]["payload"]):
targetValue = "count"
df_out = (dfPayload[targetValue].sum())
if "garbageBin" in dataPerMinute[identObject]["payload"]:
targetValue = "amount_rate"
df_out = (dfPayload[targetValue]).iloc[-1]
# # dfCount_mean = dfPayload[targetValue].mean()
# # dfCount_min = dfPayload[targetValue].min()
# # dfCount_max = dfPayload[targetValue].max()
# # dfCount_median = dfPayload[targetValue].median()
dataPerMinute[identObject]['payload'][targetValue] = typeProof(
df_out)
return dataPerMinute # json.dumps(dataPerMinute)
def typeProof(obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime.datetime):
return obj.__str__()
else:
return json.dumps(obj)
| StarcoderdataPython |
1613286 | <reponame>bhaskernitt/chargebee-cli
from chargebeecli.client.actionsImpl import ActionsImpl
from chargebeecli.constants.constants import Formats
from chargebeecli.export.Exporter import Exporter
from chargebeecli.formater.response_formatter import ResponseFormatter
from chargebeecli.printer.printer import Printer
from chargebeecli.processors.processor import Processor
from chargebeecli.validator.validator import Validator
API_URI = '/api/v2/hosted_pages'
class HostedPage(Processor, Validator, ResponseFormatter, Exporter, Printer):
__action_processor = ActionsImpl()
def __init__(self, export_format, export_path, file_name, response_format, _operation, _input_columns):
self.headers = self.get_api_header()
self.export_format = export_format
self.export_path = export_path
self.file_name = file_name
self.tables = None
self.response_format = response_format
self.operation = _operation
self.input_columns = _input_columns
def validate_param(self):
self.headers = super().validate_param(self.input_columns, self.headers)
return self
def get_api_header(self):
return ["created_at", "embed", "expires_at", "id", "object", "resource_version", "state", "type", "updated_at",
"url"]
def process(self, ctx, operation, payload, resource_id):
return super(HostedPage, self).process(ctx, operation, payload, resource_id)
def to_be_formatted(self):
return self.response_format.lower() == Formats.TABLE.value
def format(self):
if self.to_be_formatted():
self.tables = super(HostedPage, self).format(self.response, self.response_format, self.operation,
self.headers, 'hosted_page', 'list')
return self
def get(self, ctx, payload, resource_id):
return self.__action_processor.get(API_URI + '/' + resource_id)
def list(self, ctx):
return self.__action_processor.get(API_URI)
def delete(self, ctx, payload, resource_id):
return self.__action_processor.delete(API_URI + '/' + resource_id + '/' + 'delete')
def table_to_be_printed(self):
return self.to_be_formatted()
| StarcoderdataPython |
3358583 | <reponame>5laps2go/xbrr<filename>xbrr/edinet/reader/aspects/finance.py
import warnings
import re
import collections
import importlib
if importlib.util.find_spec("pandas") is not None:
import pandas as pd
from xbrr.base.reader.base_parser import BaseParser
from xbrr.edinet.reader.element_value import ElementValue
class Finance(BaseParser):
def __init__(self, reader):
tags = {
"voluntary_accounting_policy_change": "jpcrp_cor:NotesVoluntaryChangesInAccountingPoliciesConsolidatedFinancialStatementsTextBlock",
"segment_information": "jpcrp_cor:NotesSegmentInformationEtcConsolidatedFinancialStatementsTextBlock",
"real_estate_for_lease": "jpcrp_cor:NotesRealEstateForLeaseEtcFinancialStatementsTextBlock",
"accounting_standards": "jpdei_cor:AccountingStandardsDEI", # 会計基準 from metadata
}
super().__init__(reader, ElementValue, tags)
@property
def use_IFRS(self):
return self.accounting_standards.value == 'IFRS'
def bs(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('bs')
role_uri = self.reader.get_role(role[0]).uri
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_BalanceSheet"
# if ifrs and self.use_IFRS:
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jpigp/rol_ConsolidatedStatementOfFinancialPositionIFRS"
bs = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(bs)
def pl(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('pl')
role_uri = self.reader.get_role(role[0]).uri
# role_uri = "http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_StatementOfIncome"
# if ifrs and self.use_IFRS:
# role_base = "http://disclosure.edinet-fsa.go.jp/role/jpigp/"
# role_uri = f"{role_base}rol_ConsolidatedStatementOfComprehensiveIncomeIFRS"
pl = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(pl)
def cf(self, ifrs=False, use_cal_link=True):
role = self.__find_role_name('cf')
if len(role) == 0:
textblock = self.__read_value_by_textblock(["StatementOfCashFlows"])
return self.__read_finance_statement(textblock.html) if textblock is not None else None
role = role[0]
role_uri = self.reader.get_role(role).uri
cf = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)
return self.__filter_duplicate(cf)
def __filter_duplicate(self, data):
# Exclude dimension member
if data is not None:
data.drop_duplicates(subset=("name", "period"), keep="first",
inplace=True)
return data
def __find_role_name(self, finance_statement):
role_candiates = {
'bs': ["StatementOfFinancialPositionIFRS", "ConsolidatedBalanceSheet", "BalanceSheet"],
'pl': ["StatementOfProfitOrLossIFRS", "StatementOfIncome"],
'cf': ["StatementOfCashFlowsIFRS", "StatementOfCashFlows"],
}
roles = []
for name in role_candiates[finance_statement]:
roles += [x for x in self.reader.custom_roles.keys() if name in x and 'Notes' not in x and x not in roles]
return roles
def __read_value_by_textblock(self, candidates):
values = self.reader.find_value_names(candidates)
textblocks = [x for x in values if x.endswith('TextBlock')]
if len(textblocks) == 0:
return None
element_value = self.reader.findv(textblocks[0])
return element_value
def __read_finance_statement(self, statement_xml):
def myen(value):
if value=='-':
return '000'
myen = value.replace(',','').replace('△', '-')
return myen
def isnum(myen):
try:
float(myen)
except ValueError:
return False
else:
return True
indent_state = []
def indent_label(margin_left):
delidx = [i for i,x in enumerate(indent_state) if int(x) > int(margin_left)]
if len(delidx) > 0: del indent_state[delidx[0]:]
indent_state.append(margin_left)
c = collections.Counter(indent_state)
ks = sorted(c.keys(), key=int)
return "-".join([str(c[x]) for x in ks])
unit = ''
values = []
for table in statement_xml.select('table'):
for record in table.select('tr'):
columns = list(record.select('td'))
label = ''.join([x.text.strip() for x in columns[0].select('p')])
value = myen(columns[-1].text.strip())
style_str = columns[0].find('p')['style'] if label != "" else ""
m = re.match(r'.*margin-left: *([0-9]*).?[0-9]*px.*', style_str)
margin = m.groups()[0] if m is not None else "0"
if isnum(value):
values.append({
'label': label,
'value': value + unit,
'indent': indent_label(margin)
})
elif label != "" and value == "":
values.append({
'label': label,
'indent': indent_label(margin)
})
else:
assert value=='' or '単位:' in value or '百万円' in value or '当連結会計年度' in value
if '百万円' in value: # 単位:百万円 金額(百万円)
unit = '000000'
elif '単位:円' in value:
unit = ''
return pd.DataFrame(values)
| StarcoderdataPython |
3210062 | <gh_stars>1-10
__version__="0.0.1"
dcolor="1"
def indeterminate():
print("\x1b]9;4;3\x1b\\",end="",flush=True)
def show(value:int,color:str=None):
global dcolor
if(color!=None):
color={"green":1,"g":1,"red":2,"r":2,"yellow":4,"y":4}[color]
dcolor=color
else:
color=dcolor
value=int(value)
print("\x1b]9;4;{};{}\x1b\\".format(color,value),end="",flush=True)
def close():
print("\x1b]9;4;0\x1b\\",end="",flush=True)
| StarcoderdataPython |
1659934 | <reponame>DominikSauter/Skeletonization<gh_stars>1-10
import os
import shutil
from skimage import io
from skimage import img_as_ubyte
from skimage import filters
from skimage.util import invert
from matplotlib import pyplot as plt
from fuzzyTransform import fuzzyTransform
from skeleton2Graph import *
import skeletonization
import numpy as np
import scipy.ndimage.morphology as morph
from skimage.morphology import thin
import matplotlib.patches
def main():
in_img_path = 'test_ml_comp_grey_upsampled2.0_framed'
out_img_bdb_path = 'skeletonize_fuzzy_BDB'
out_img_msdb_path = 'skeletonize_fuzzy_MSDB'
out_img_flux_skel_path = 'skeletonize_flux'
# delete out dirs and recreate
shutil.rmtree(out_img_bdb_path, ignore_errors=True)
os.makedirs(out_img_bdb_path)
shutil.rmtree(out_img_msdb_path, ignore_errors=True)
os.makedirs(out_img_msdb_path)
shutil.rmtree(out_img_flux_skel_path, ignore_errors=True)
os.makedirs(out_img_flux_skel_path)
for i, img_path in enumerate(sorted(os.listdir(in_img_path))):
print(img_path)
img = io.imread(os.path.join(in_img_path, img_path),as_gray=True)
img = invert(img)
BW = mat2gray(img)
thresh_img = filters.threshold_otsu(BW)
BW = BW >= thresh_img
BW = BW.astype(float)
BW = 1 - BW
M,N = BW.shape
'''Calculating Euclidean Distance of the Binary Image'''
D,IDX = morph.distance_transform_edt(BW,return_distances=True, return_indices=True)
D = mat2gray(D)
X,Y = np.meshgrid(range(N),range(M))
delD_x = -(IDX[1,:,:] - X)
delD_y = -(IDX[0,:,:] - Y)
# normalize the derivatives
delD_norm = np.sqrt(pow(delD_x,2) + pow(delD_y,2))
with np.errstate(divide='ignore',invalid='ignore'):
delD_xn = delD_x / delD_norm
delD_yn = delD_y / delD_norm
mir_delD_xn = mirrorBW(delD_xn)
mir_delD_yn = mirrorBW(delD_yn)
# f, (ax1, ax2,ax3) = plt.subplots(1,3,figsize=(10,30))
# ax1.imshow(D)
# ax1.set_title('Euclidean Distance Transform')
# ax2.imshow(delD_x)
# ax2.set_title('X direction of the gradient of EDT')
# ax3.imshow(delD_y)
# ax3.set_title('Y direction of the gradient of EDT')
# plt.tight_layout()
# plt.show()
# #Calculate flux map
# fluxMap = flux(mir_delD_xn,mir_delD_yn)
# plt.imshow(np.nan_to_num(fluxMap))
# plt.title('Flux Map')
# plt.show()
#Calculate flux map
fluxMap = flux(mir_delD_xn,mir_delD_yn)
# Calculate flux threshold
print((np.nanmax(fluxMap) - np.nanmedian(fluxMap)))
print(np.nanmedian(fluxMap))
print(np.nanmax(fluxMap))
print(np.nanmin(fluxMap))
# fluxBWThreshold = (np.nanmax(fluxMap) - np.nanmedian(fluxMap)) * 0.30 + np.nanmedian(fluxMap)
# fluxBWThreshold = (np.nanmax(fluxMap) - np.nanmean(fluxMap)) * 0.15 + np.nanmean(fluxMap)
fluxBWThreshold = (np.nanmax(fluxMap) - np.nanmean(fluxMap)) * 0.075 + np.nanmean(fluxMap)
print(fluxBWThreshold)
with np.errstate(divide='ignore',invalid='ignore'):
fluxThin = thin(fluxMap>fluxBWThreshold)
# plt.imshow(np.nan_to_num(fluxMap>fluxBWThreshold))
# plt.title('fluxMap>fluxBWThreshold Map')
# plt.show()
# continue
#
# plt.imshow(np.nan_to_num(fluxThin))
# plt.title('FluxThin Map')
# plt.show()
# # continue
fluxLabeled,b = ndimage.label(fluxThin, np.array([[1,1,1], [1,1,1], [1,1,1]]))
# print(fluxLabeled)
# print(b)
# fluxLabeled_1 = fluxLabeled == 1
labels ,pixelSize = np.unique(fluxLabeled,return_counts=True)
# print("labels: {}".format(labels))
# print("pixelSize: {}".format(pixelSize))
pixelSize_second = pixelSize
pixelSize_second[np.argmax(pixelSize)] = 0
skel_label = np.argmax(pixelSize_second)
# print(skel_label)
# Excluding the background
pixelSize = pixelSize[labels != 0]
labels = labels[labels != 0]
# Calculating the size threshold and filter out small objects
th = min(np.mean(pixelSize) + 3 * np.std(pixelSize), np.max(pixelSize))
selectedObjects = labels[np.where(pixelSize >= th)]
fluxTemp = np.zeros(fluxMap.shape)
# fluxTemp[fluxLabeled == 1] = 1
fluxTemp[fluxLabeled == skel_label] = 1
# plt.imshow(fluxTemp,cmap='gray')
# plt.title('Initial Skeleton with branches')
# plt.tight_layout()
# plt.show()
binary_bdb = fluxTemp
#thresh_bdb = filters.threshold_otsu(BDB)
#binary_bdb = BDB >= thresh_bdb
io.imsave(os.path.join(out_img_flux_skel_path, img_path), img_as_ubyte(binary_bdb))
continue
skeletonNew = np.zeros(fluxMap.shape)
fluxTemp_fluxMap = fluxTemp*fluxMap
adjacencyMatrix, edgeList, edgeProperties,edgeProperties2, verticesProperties, verticesProperties2, endPoints, branchPoints = skeleton2Graph(fluxTemp,fluxTemp*fluxMap)
vertices = np.concatenate((endPoints, branchPoints))
_,_,_, skeletonGraphPointsImg = findBranchPoints(fluxTemp,return_image=True)
f, (ax1, ax2,ax3) = plt.subplots(1,3,figsize=(10,30))
ax1.imshow(skeletonGraphPointsImg)
ax1.set_title('Vertices of the Skeleton\'s graph')
ax2.imshow(skeletonGraphPointsImg[50:100,125:170])
ax2.set_title('Vertices, close look')
ax3.imshow(graphDrawing(fluxTemp,edgeList,0.08))
ax3.set_title('Edges of the Skeleton\'s graph')
plt.tight_layout()
plt.show()
continue
# skeletonNew, MSDB,BDB = fuzzyTransform(fluxTemp, vertices, edgeList, edgeProperties, verticesProperties, verticesProperties2, adjacencyMatrix, returnDB=True)
# f, (ax1, ax2,ax3) = plt.subplots(1,3,figsize=(10,30))
# ax1.imshow(MSDB,cmap ='gist_gray')
# ax1.set_title('Main Skeleton Degree of Belief Map')
# ax2.imshow(BDB,cmap ='gist_gray')
# ax2.set_title('Branch Degree of Belief Map')
# ax3.imshow(skeletonNew, cmap='gray')
# ax3.set_title('Pruned Skeleton')
# plt.tight_layout()
# plt.show()
# binary_bdb = BDB
# #thresh_bdb = filters.threshold_otsu(BDB)
# #binary_bdb = BDB >= thresh_bdb
# io.imsave(os.path.join(out_img_bdb_path, img_path), img_as_ubyte(binary_bdb))
if __name__ == '__main__':
main()
| StarcoderdataPython |
176108 | <gh_stars>0
from xicam.core.execution.workflow import Workflow
from xicam.core.execution.daskexecutor import DaskExecutor
from xicam.plugins import Input, Output, ProcessingPlugin
from pyFAI.detectors import Pilatus2M
import numpy as np
from pyFAI import AzimuthalIntegrator, units
from scipy.ndimage import morphology
import fabio
class ThresholdMaskPlugin(ProcessingPlugin):
data = Input(description="Frame image data", type=np.ndarray)
minimum = Input(description="Threshold floor", type=int)
maximum = Input(description="Threshold ceiling", type=int)
neighborhood = Input(
description="Neighborhood size in pixels for morphological opening. Only clusters of this size"
" that fail the threshold are masked",
type=int,
)
mask = Output(description="Thresholded mask (1 is masked)", type=np.ndarray)
def evaluate(self):
self.mask.value = np.logical_or(self.data.value < self.minimum.value, self.data.value > self.maximum.value)
y, x = np.ogrid[
-self.neighborhood.value : self.neighborhood.value + 1, -self.neighborhood.value : self.neighborhood.value + 1
]
kernel = x ** 2 + y ** 2 <= self.neighborhood.value ** 2
morphology.binary_opening(self.mask.value, kernel, output=self.mask.value) # write-back to mask
class QIntegratePlugin(ProcessingPlugin):
integrator = Input(description="A PyFAI.AzimuthalIntegrator object", type=AzimuthalIntegrator)
data = Input(description="2d array representing intensity for each pixel", type=np.ndarray)
npt = Input(description="Number of bins along q")
polz_factor = Input(description="Polarization factor for correction", type=float)
unit = Input(description="Output units for q", type=[str, units.Unit], default="q_A^-1")
radial_range = Input(
description="The lower and upper range of the radial unit. If not provided, range is simply "
"(data.min(), data.max()). Values outside the range are ignored.",
type=tuple,
)
azimuth_range = Input(
description="The lower and upper range of the azimuthal angle in degree. If not provided, "
"range is simply (data.min(), data.max()). Values outside the range are ignored."
)
mask = Input(description="Array (same size as image) with 1 for masked pixels, and 0 for valid pixels", type=np.ndarray)
dark = Input(description="Dark noise image", type=np.ndarray)
flat = Input(description="Flat field image", type=np.ndarray)
method = Input(
description='Can be "numpy", "cython", "BBox" or "splitpixel", "lut", "csr", "nosplit_csr", '
'"full_csr", "lut_ocl" and "csr_ocl" if you want to go on GPU. To Specify the device: '
'"csr_ocl_1,2"',
type=str,
)
normalization_factor = Input(description="Value of a normalization monitor", type=float)
q = Output(description="Q bin center positions", type=np.array)
I = Output(description="Binned/pixel-split integrated intensity", type=np.array)
def evaluate(self):
self.q.value, self.I.value = self.integrator.value().integrate1d(
data=self.data.value,
npt=self.npt.value,
radial_range=self.radial_range.value,
azimuth_range=self.azimuth_range.value,
mask=self.mask.value,
polarization_factor=self.polz_factor.value,
dark=self.dark.value,
flat=self.flat.value,
method=self.method.value,
unit=self.unit.value,
normalization_factor=self.normalization_factor.value,
)
def test_SAXSWorkflow():
# create processes
thresholdmask = ThresholdMaskPlugin()
qintegrate = QIntegratePlugin()
# set values
AI = AzimuthalIntegrator(
0.283, 5.24e-3, 4.085e-3, 0, 0, 0, 1.72e-4, 1.72e-4, detector=Pilatus2M(), wavelength=1.23984e-10
)
thresholdmask.data.value = fabio.open("/Users/hari/Downloads/AGB_5S_USE_2_2m.edf").data
def AI_func():
from pyFAI.detectors import Pilatus2M
from pyFAI import AzimuthalIntegrator, units
return AzimuthalIntegrator(
0.283, 5.24e-3, 4.085e-3, 0, 0, 0, 1.72e-4, 1.72e-4, detector=Pilatus2M(), wavelength=1.23984e-10
)
qintegrate.integrator.value = AI_func
qintegrate.npt.value = 1000
thresholdmask.minimum.value = 30
thresholdmask.maximum.value = 1e12
qintegrate.data.value = fabio.open("/Users/hari/Downloads/AGB_5S_USE_2_2m.edf").data
thresholdmask.neighborhood.value = 1
qintegrate.normalization_factor.value = 0.5
qintegrate.method.value = "numpy"
# connect processes
thresholdmask.mask.connect(qintegrate.mask)
# add processes to workflow
wf = Workflow("QIntegrate")
wf.addProcess(thresholdmask)
wf.addProcess(qintegrate)
dsk = DaskExecutor()
result = dsk.execute(wf)
print(result)
def test_autoconnect():
# create processes
thresholdmask = ThresholdMaskPlugin()
qintegrate = QIntegratePlugin()
# set values
AI = AzimuthalIntegrator(
0.283, 5.24e-3, 4.085e-3, 0, 0, 0, 1.72e-4, 1.72e-4, detector=Pilatus2M(), wavelength=1.23984e-10
)
thresholdmask.data.value = fabio.open("/Users/hari/Downloads/AGB_5S_USE_2_2m.edf").data
qintegrate.integrator.value = AI
qintegrate.npt.value = 1000
thresholdmask.minimum.value = 30
thresholdmask.maximum.value = 1e12
# add process to workflow
| StarcoderdataPython |
3345656 | <reponame>Fenmaz/connect4
from keras.callbacks import Callback
import tensorflow as tf
class TensorBoardStepCallback(Callback):
"""Tensorboard basic visualizations by step.
"""
def __init__(self, log_dir, logging_per_steps=100, step=0):
super().__init__()
self.step = step
self.logging_per_steps = logging_per_steps
self.writer = tf.summary.FileWriter(log_dir)
def on_batch_end(self, batch, logs=None):
self.step += 1
if self.step % self.logging_per_steps > 0:
return
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, self.step)
self.writer.flush()
def close(self):
self.writer.close()
| StarcoderdataPython |
3375186 | <reponame>Starfunx/Robot_Simulation<filename>main.py
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from Terrain import Terrain as Terrain
from DiffDriveRobot import Robot as Robot
from DiffDriveControl import Robot as RobotControl
from Lidar import Lidar
class PickConsign:
def __init__(self, robotControl):
line, = plt.plot([0],[0])
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
self.robotControl = robotControl
def __call__(self, event):
print('click', event)
if(event.xdata != None) and (event.ydata != None):
self.robotControl.setXC(event.xdata)
self.robotControl.setYC(event.ydata)
self.robotControl.setThetaC(2)
terrain_lines = np.array([[[0, 0], [0, 2000]],
[[0, 2000], [3000, 2000]],
[[3000, 2000], [3000, 0]],
[[3000, 0], [0, 0]]])
nbpts = 1/0.005*5
trajectory_t = np.linspace(0,20,nbpts*10)
trajectory = np.array([500*np.cos(trajectory_t)+1500,
500*np.sin(trajectory_t)+1500])
# trajectory = np.array([trajectory_t*200+700,
# np.ones_like(trajectory_t)*1500])
terrain = Terrain(terrain_lines)
robot = Robot(700, 900, 0)
robotControl = RobotControl(robot.getX(), robot.getY(), robot.getTheta(), robot)
lidar = Lidar([robot.getX(), robot.getY()], 0, 50, np.pi/30, terrain_lines)
pickConsign = PickConsign(robotControl)
dT = 0.005
i = 0
robotpos = [[],[]]
Tmax = int(50/dT)
for t in range(Tmax):
#computing
robot.update(dT)
robotControl.update(dT)
if t*dT > trajectory_t[i]:
i+=1
if i>=len(trajectory_t):
i-=1
robotControl.setXC(trajectory[0,i])
robotControl.setYC(trajectory[1,i])
if (t*dT)%(1.0/30.0) < 0.001:
lidar.setX(robot.getX())
lidar.setY(robot.getY())
lidar.setTheta(robot.getTheta())
lidar.fire()
if (t*dT)%(1.0/30.0) < 0.001:
# Drawing
plt.clf()
# plt.axis('equal')
plt.text(10, 1900, "t: {0:.3f} s".format((t+1)*dT), fontsize=12)
plt.text(10, 1800, "X: {0:.0f} mm".format(robot.getX()), fontsize=12)
plt.text(10, 1700, "Y: {0:.0f} mm".format(robot.getY()), fontsize=12)
plt.text(10, 1600, "T: {0:.3f} rad".format(robot.getTheta()), fontsize=12)
robotpos[0] = robotpos[0] + [robot.getX()]
robotpos[1] = robotpos[1] + [robot.getY()]
# print(robotpos)
plt.plot(robotpos[0], robotpos[1], 'k+')
# lidar.draw()
terrain.draw()
robot.draw()
robotControl.draw()
#draw trajectory
# plt.plot(trajectory[0], trajectory[1], 'k')
plt.pause(0.0001)
plt.show()
| StarcoderdataPython |
103983 | """
This module contains the definitions for Bike and its subclasses Bicycle and
Motorbike.
"""
class Bike:
"""
Class defining a bike that can be ridden and have its gear changed.
Attributes:
seats: number of seats the bike has
gears: number of gears the bike has
"""
def __init__(self, seats, gears):
"""
Creates a new Bike object.
Args:
seats: number of seats the bike has
gears: number of gears the bike has
"""
self.seats = seats
self.gears = gears
self._curr_gear = 1 # current gear, private
self._riding = False # bike is per default not ridden
@property
def curr_gear(self):
"""
Purpose of this function is to enable the user to check the gear
status, but is only able to change it with a specific method.
(was not necessary to implement it this way)
"""
return self._curr_gear
def start_ride(self):
"""
Starts a bike ride.
Returns:
True if successful.
False if bike is already on a ride.
"""
# can't ride a bike if already ridden
if self._riding:
return False
self._riding = True
return True
def end_ride(self):
"""
Ends a bike ride.
Returns:
True if successful.
False if bike is not currently ridden.
"""
# can't stop a bike ride if the bike is already standing
if not self._riding:
return False
self._riding = False
return True
def change_gear(self, new_gear):
"""
Changes bike gear to a new gear.
Args:
new_gear: gear to be changed to
Returns:
True if gear was successfully changed.
Raises:
ValueError if current gear is same as new gear or new gear is <= 0
or not in range of available gears.
"""
if self._curr_gear == new_gear or not 0 < new_gear <= self.gears:
raise ValueError("Already in this gear or invalid gear number.")
self._curr_gear = new_gear
return True
class Bicycle(Bike):
"""
Class defining a Bicycle (extending Bike) that can be ridden, have its
gear changed and has a bell that can be rung.
Attributes:
seats: number of seats the bike has
gears: number of gears the bike has
bell_sound: sound the bell makes when rung
"""
def __init__(self, seats=1, gears=7, bell_sound="ring ring"):
"""
Creates a new Bike object.
Args:
seats: number of seats the bicycle has, defaults to 1
gears: number of gears the bicycle has, defaults to 7
bell_sound: sound the bell makes when rung
"""
super().__init__(seats, gears)
self.bell_sound = bell_sound
def ring_bell(self):
""" Rings bicycle bell."""
print(self.bell_sound)
class Motorbike(Bike):
"""
Class defining a Motorbike (extending Bike) that can be ridden, have its
gear changed and has a tank that can be filled.
Attributes:
seats: number of seats the bike has
gears: number of gears the bike has
"""
def __init__(self, seats=2, gears=5):
"""
Creates a new Motorbike object.
Args:
seats: number of seats the motorbike has, defaults to 2
gears: number of gears the motorbike has, defaults to 5
"""
super().__init__(seats, gears)
# True means full tank. Private so it can only be changed in
# a controlled manner
self._tank = True
@property
def tank(self):
"""
Purpose of this function is to enable the user to check the tank
status, but is only able to fill/empty the tank with specific methods.
This was not necessary to implement.
"""
return self._tank
def start_ride(self):
"""
Starts a motorbike ride.
Returns:
True if successful.
False if motorbike is already on a ride or tank is empty
"""
# can't ride a motorbike if tank is empty or it is already ridden
if not self._tank or not super().start_ride():
return False
return True
def end_ride(self):
"""
Ends a motorbike ride and empties tank.
Returns:
True if successful.
False if motorbike is not currently ridden.
"""
if not super().end_ride():
return False
self._tank = False # tank is empty after riding
return True
# the following method was not necessary to implement, but we want to be
# able to ride more than once.
def fill_tank(self):
"""
Fills motorbike tank with fuel.
Returns:
True if successful.
False if tank already full.
"""
# can't fill tank if already full
if self._tank:
return False
self._tank = True
return True
| StarcoderdataPython |
94383 | """Example, how write generator agent for tesla car"""
from random import sample
from string import ascii_lowercase, digits
from magic_agent.core.base import BaseAgent, RuleItem, RuleDevice, RuleItemGenerator
# import constants Rules
from magic_agent.core.rules import MozillaDefault, AppleWebKit, LikeGecko, Safari, chromium_generator
# pseudo imei generator
def random_imei(): return "".join(sample(ascii_lowercase + digits, 12))
if __name__ == '__main__':
device = RuleDevice(items=("X11", "GNU/Linux"))
tesla = RuleItem("Tesla/2021.{}.{}-{}",
items=(
tuple(range(10, 99)),
tuple(range(1, 9)),
tuple((random_imei() for _ in range(100)))
))
# Chromium and Chrome version should be equal version values in Tesla user agent
#
chromium = RuleItemGenerator("Chromium/{} Chrome/{}", chromium_generator, randomize=False)
b = BaseAgent(rules=(
MozillaDefault,
device,
AppleWebKit,
LikeGecko,
chromium,
Safari,
tesla
))
for _ in range(3):
print(b.agent)
# Mozilla/5.0 (X11; GNU/Linux) AppleWebKit/537.36 (KHTML, like Gecko) Chromium/93.0.4577.129 Chrome/93.0.4577.129 Safari/537.36 Tesla/2021.54.2-rzxq46wot9pg
# Mozilla/5.0 (X11; GNU/Linux) AppleWebKit/537.36 (KHTML, like Gecko) Chromium/94.0.4606.112 Chrome/94.0.4606.112 Safari/537.36 Tesla/2021.58.7-rzxq46wot9pg
# Mozilla/5.0 (X11; GNU/Linux) AppleWebKit/537.36 (KHTML, like Gecko) Chromium/95.0.4638.75 Chrome/95.0.4638.75 Safari/537.36 Tesla/2021.21.7-rzxq46wot9pg
| StarcoderdataPython |
1718522 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DbSystemSummary(object):
"""
A summary of a DB System.
"""
def __init__(self, **kwargs):
"""
Initializes a new DbSystemSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DbSystemSummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this DbSystemSummary.
:type display_name: str
:param description:
The value to assign to the description property of this DbSystemSummary.
:type description: str
:param compartment_id:
The value to assign to the compartment_id property of this DbSystemSummary.
:type compartment_id: str
:param availability_domain:
The value to assign to the availability_domain property of this DbSystemSummary.
:type availability_domain: str
:param fault_domain:
The value to assign to the fault_domain property of this DbSystemSummary.
:type fault_domain: str
:param endpoints:
The value to assign to the endpoints property of this DbSystemSummary.
:type endpoints: list[DbSystemEndpoint]
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DbSystemSummary.
:type lifecycle_state: str
:param mysql_version:
The value to assign to the mysql_version property of this DbSystemSummary.
:type mysql_version: str
:param time_created:
The value to assign to the time_created property of this DbSystemSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this DbSystemSummary.
:type time_updated: datetime
:param freeform_tags:
The value to assign to the freeform_tags property of this DbSystemSummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this DbSystemSummary.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'description': 'str',
'compartment_id': 'str',
'availability_domain': 'str',
'fault_domain': 'str',
'endpoints': 'list[DbSystemEndpoint]',
'lifecycle_state': 'str',
'mysql_version': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'description': 'description',
'compartment_id': 'compartmentId',
'availability_domain': 'availabilityDomain',
'fault_domain': 'faultDomain',
'endpoints': 'endpoints',
'lifecycle_state': 'lifecycleState',
'mysql_version': 'mysqlVersion',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._id = None
self._display_name = None
self._description = None
self._compartment_id = None
self._availability_domain = None
self._fault_domain = None
self._endpoints = None
self._lifecycle_state = None
self._mysql_version = None
self._time_created = None
self._time_updated = None
self._freeform_tags = None
self._defined_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this DbSystemSummary.
The OCID of the DB System.
:return: The id of this DbSystemSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DbSystemSummary.
The OCID of the DB System.
:param id: The id of this DbSystemSummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this DbSystemSummary.
The user-friendly name for the DB System. It does not have to be unique.
:return: The display_name of this DbSystemSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this DbSystemSummary.
The user-friendly name for the DB System. It does not have to be unique.
:param display_name: The display_name of this DbSystemSummary.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this DbSystemSummary.
User-provided data about the DB System.
:return: The description of this DbSystemSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this DbSystemSummary.
User-provided data about the DB System.
:param description: The description of this DbSystemSummary.
:type: str
"""
self._description = description
@property
def compartment_id(self):
"""
Gets the compartment_id of this DbSystemSummary.
The OCID of the compartment the DB System belongs in.
:return: The compartment_id of this DbSystemSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this DbSystemSummary.
The OCID of the compartment the DB System belongs in.
:param compartment_id: The compartment_id of this DbSystemSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def availability_domain(self):
"""
Gets the availability_domain of this DbSystemSummary.
The Availability Domain where the primary DB System should be located.
:return: The availability_domain of this DbSystemSummary.
:rtype: str
"""
return self._availability_domain
@availability_domain.setter
def availability_domain(self, availability_domain):
"""
Sets the availability_domain of this DbSystemSummary.
The Availability Domain where the primary DB System should be located.
:param availability_domain: The availability_domain of this DbSystemSummary.
:type: str
"""
self._availability_domain = availability_domain
@property
def fault_domain(self):
"""
Gets the fault_domain of this DbSystemSummary.
The name of the Fault Domain the DB System is located in.
:return: The fault_domain of this DbSystemSummary.
:rtype: str
"""
return self._fault_domain
@fault_domain.setter
def fault_domain(self, fault_domain):
"""
Sets the fault_domain of this DbSystemSummary.
The name of the Fault Domain the DB System is located in.
:param fault_domain: The fault_domain of this DbSystemSummary.
:type: str
"""
self._fault_domain = fault_domain
@property
def endpoints(self):
"""
Gets the endpoints of this DbSystemSummary.
The network endpoints available for this DB System.
:return: The endpoints of this DbSystemSummary.
:rtype: list[DbSystemEndpoint]
"""
return self._endpoints
@endpoints.setter
def endpoints(self, endpoints):
"""
Sets the endpoints of this DbSystemSummary.
The network endpoints available for this DB System.
:param endpoints: The endpoints of this DbSystemSummary.
:type: list[DbSystemEndpoint]
"""
self._endpoints = endpoints
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this DbSystemSummary.
The current state of the DB System.
:return: The lifecycle_state of this DbSystemSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DbSystemSummary.
The current state of the DB System.
:param lifecycle_state: The lifecycle_state of this DbSystemSummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
@property
def mysql_version(self):
"""
**[Required]** Gets the mysql_version of this DbSystemSummary.
Name of the MySQL Version in use for the DB System.
:return: The mysql_version of this DbSystemSummary.
:rtype: str
"""
return self._mysql_version
@mysql_version.setter
def mysql_version(self, mysql_version):
"""
Sets the mysql_version of this DbSystemSummary.
Name of the MySQL Version in use for the DB System.
:param mysql_version: The mysql_version of this DbSystemSummary.
:type: str
"""
self._mysql_version = mysql_version
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this DbSystemSummary.
The date and time the DB System was created.
:return: The time_created of this DbSystemSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DbSystemSummary.
The date and time the DB System was created.
:param time_created: The time_created of this DbSystemSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
**[Required]** Gets the time_updated of this DbSystemSummary.
The time the DB System was last updated.
:return: The time_updated of this DbSystemSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this DbSystemSummary.
The time the DB System was last updated.
:param time_updated: The time_updated of this DbSystemSummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this DbSystemSummary.
Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this DbSystemSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this DbSystemSummary.
Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this DbSystemSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this DbSystemSummary.
Usage of predefined tag keys. These predefined keys are scoped to namespaces.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this DbSystemSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this DbSystemSummary.
Usage of predefined tag keys. These predefined keys are scoped to namespaces.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this DbSystemSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
4814958 | # -*- coding: utf-8 -*
import os
from os import listdir, makedirs
from os.path import join, basename, splitext, isfile, exists
import glob
import arcpy
import xlrd
# ArcPy工作路径,之后所有的路径都是这个路径的相对路径
WORKSPACE = r'D:\Document\ArcMapDemo\data00_416after'
# arcpy.env.workspace = WORKSPACE
# 行政区划目录
DISTRICT_FOLDER = 'China'
# CSV文件目录
CSV_FOLDER = ['RentPrice_Jan', 'ResoldPrice_Jan']
# POI文件目录
POI_FOLDER = 'POI'
# 临时文件目录
TEMP = 'temp'
# 创建CSV临时目录
for temp in [join(WORKSPACE, TEMP, folder) for folder in CSV_FOLDER]:
if not exists(temp):
makedirs(temp)
# 创建POI临时目录
for temp in [join(WORKSPACE, TEMP, POI_FOLDER, folder) for folder in listdir(join(WORKSPACE, POI_FOLDER))]:
if not exists(temp):
makedirs(temp)
# 对应X Y坐标字段名称
X_FIELD = 'Lon84'
Y_FIELD = 'Lat84'
X_FIELD_POI = '经度_wgs84'
Y_FIELD_POI = '纬度_wgs84'
# 获取所有SHP文件名及对应路径{Beijing: '...path...'}
feature_paths = {splitext(basename(filepath))[0].strip(): filepath
for filepath in glob.glob(join(WORKSPACE, DISTRICT_FOLDER, '*.shp'))}
# 创建WGS84坐标系对象
spatial_ref = arcpy.SpatialReference(4326)
def clip_csv(restart=False):
for folder in CSV_FOLDER:
temp_path = join(WORKSPACE, TEMP, folder)
for filepath in glob.glob(join(WORKSPACE, folder, '*.csv')):
filename = splitext(basename(filepath))[0]
output_filepath = join(temp_path, filename + '.shp')
print output_filepath
if exists(output_filepath):
if not restart:
print 'exist'
continue
arcpy.MakeXYEventLayer_management(
filepath, X_FIELD, Y_FIELD, filename + 'Event', spatial_ref)
arcpy.Delete_management(join(temp_path, filename + '.shp'))
arcpy.Clip_analysis(
filename + 'Event', feature_paths[filename], join(temp_path, filename + '.shp'))
arcpy.Delete_management(filename + 'Event')
# 直接保存展点数据的三种方法。后两种在10.3及以下版本存在BUG
# arcpy.FeatureToPoint_management(
# filename + 'Event', join(temp_path, filename + '.shp'))
# arcpy.DeleteField_management(
# join(temp_path, filename + '.shp'), 'ORIG_FID')
# arcpy.FeatureClassToFeatureClass_conversion(filename + 'Event', join(WORKSPACE, TEMP), filename)
# arcpy.CopyFeatures_management(filename + 'Event', join(WORKSPACE, TEMP, filename))
def clip_poi(restart=False):
for city in listdir(join(WORKSPACE, POI_FOLDER)):
temp_path = join(WORKSPACE, TEMP, POI_FOLDER, city)
for filepath in glob.glob(join(WORKSPACE, POI_FOLDER, city, '*.xlsx')):
filename = splitext(basename(filepath))[0]
output_filepath = join(temp_path, filename + '.shp')
print output_filepath
if exists(output_filepath):
if not restart:
print 'exist'
continue
sheet_name = ExcelHasRow(filepath)
if not sheet_name:
print 'null row, skip this file'
continue
arcpy.MakeXYEventLayer_management(
filepath + '/' + sheet_name + '$', X_FIELD_POI, Y_FIELD_POI, filename + 'Event', spatial_ref)
arcpy.Delete_management(join(temp_path, filename + '.shp'))
# TODO: 裁剪之后,有的POI会被全部裁剪掉,生成的SHP文件中不存在要素,是否保留?
arcpy.Clip_analysis(
filename + 'Event', feature_paths[city], join(temp_path, filename + '.shp'))
arcpy.Delete_management(filename + 'Event')
def ExcelHasRow(filepath):
workxls = xlrd.open_workbook(filepath)
sheet_name = workxls.sheet_names()[0]
worksheet = workxls.sheet_by_name(sheet_name)
if worksheet.nrows > 1:
return sheet_name
else:
return False
if __name__ == "__main__":
# clip_csv()
clip_poi()
| StarcoderdataPython |
1712156 | class iceage():
def show(self):
print("Welcome to the iceage",end=" ")
class mammoth(iceage):
def show(self):
super().show()
print("Hi this is manny")
obj = mammoth()
obj.show()
class genere():
def display(self):
print("There are many genre of books",end=" ")
class fiction(genere):
def show(self):
print("I like the fiction most",end=" ")
class mystery(fiction):
def show(self):
print("Whoa I love mystery",end=" ")
b = mystery()
b.show()
b.show()
class panda:
def show(self):
value = 20
print(value)
class kungfu(panda):
def disp(self):
print(self.value)
p = kungfu()
p.disp() | StarcoderdataPython |
3331920 | <filename>output/models/nist_data/atomic/unsigned_int/schema_instance/nistschema_sv_iv_atomic_unsigned_int_max_inclusive_1_xsd/__init__.py
from output.models.nist_data.atomic.unsigned_int.schema_instance.nistschema_sv_iv_atomic_unsigned_int_max_inclusive_1_xsd.nistschema_sv_iv_atomic_unsigned_int_max_inclusive_1 import NistschemaSvIvAtomicUnsignedIntMaxInclusive1
__all__ = [
"NistschemaSvIvAtomicUnsignedIntMaxInclusive1",
]
| StarcoderdataPython |
1606821 | #!/usr/bin/env python
##
## Copyright 2009 <NAME> & <NAME>
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You
## may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
##
"""docstring goes here""" # :-)
import miner
import getpass
url = 'http://localhost:9862'
u = 'pickaxe'
p = getpass.getpass()
print "connecting to mine..."
api = miner.MineAPI(username=u, password=p, url_prefix=url)
print
print "version..."
print api.version()
print
print "list items..."
print api.list_items()
print
print "create an item..."
x = api.create_item(itemName='api test demo', itemStatus='shareable')
print x
print
print "update an item..."
iid = x['result']['item']['itemId']
y = api.update_item(iid, itemStatus='secret')
print y
print
| StarcoderdataPython |
3369668 | import pytest
from unittest.mock import MagicMock, patch
import base64
import os
import requests
import time
import gen3.auth
from gen3.auth import Gen3Auth
test_endpoint = "https://localhost"
test_key = {
"api_key": "whatever."
+ base64.urlsafe_b64encode(
('{"iss": "%s", "exp": %d }' % (test_endpoint, time.time() + 300)).encode(
"utf-8"
)
).decode("utf-8")
+ ".whatever"
}
def test_get_wts_endpoint():
endpoint = gen3.auth.get_wts_endpoint(namespace="frickjack")
assert endpoint == "http://workspace-token-service.frickjack.svc.cluster.local"
def test_endpoint_from_token():
endpoint = gen3.auth.endpoint_from_token(test_key["api_key"])
assert endpoint == test_endpoint
def test_token_cache():
cache_file = gen3.auth.token_cache_file("whatever")
expected = "{}/.cache/gen3/token_cache_008c5926ca861023c1d2a36653fd88e2".format(
os.path.expanduser("~")
)
assert cache_file == expected
def test_refresh_access_token(mock_gen3_auth):
"""
Make sure that access token ends up in header when refresh is called
"""
with patch("gen3.auth.get_access_token_with_key") as mock_access_token:
mock_access_token.return_value = "new_access_token"
with patch("gen3.auth.decode_token") as mock_decode_token:
mock_decode_token().return_value = {"aud": "123"}
with patch("gen3.auth.Gen3Auth._write_to_file") as mock_write_to_file:
mock_write_to_file().return_value = True
with patch(
"gen3.auth.Gen3Auth.__call__",
return_value=MagicMock(
headers={"Authorization": "Bearer new_access_token"}
),
) as mock_call:
access_token = mock_gen3_auth.refresh_access_token()
assert (
"Bearer " + access_token == mock_call().headers["Authorization"]
)
def test_refresh_access_token_no_cache_file(mock_gen3_auth):
"""
Make sure that access token ends up in header when refresh is called after failing to write to cache file
"""
with patch("gen3.auth.get_access_token_with_key") as mock_access_token:
mock_access_token.return_value = "new_access_token"
with patch("gen3.auth.decode_token") as mock_decode_token:
mock_decode_token().return_value = {"aud": "123"}
with patch("gen3.auth.Gen3Auth._write_to_file") as mock_write_to_file:
mock_write_to_file().return_value = False
with patch(
"gen3.auth.Gen3Auth.__call__",
return_value=MagicMock(
headers={"Authorization": "Bearer new_access_token"}
),
) as mock_call:
access_token = mock_gen3_auth.refresh_access_token()
assert (
"Bearer " + access_token == mock_call().headers["Authorization"]
)
def test_write_to_file_success(mock_gen3_auth):
"""
Make sure that you can write content to a file
"""
with patch("builtins.open", create=True) as mock_open_file:
mock_open_file.return_value = MagicMock()
with patch("builtins.open.write") as mock_file_write:
mock_file_write.return_value = True
with patch("os.rename") as mock_os_rename:
mock_os_rename.return_value = True
result = mock_gen3_auth._write_to_file("some_file", "content")
assert result == True
def test_write_to_file_permission_error(mock_gen3_auth):
"""
Check that the file isn't written when there's a PermissionError
"""
with patch("builtins.open", create=True) as mock_open_file:
mock_open_file.return_value = MagicMock()
with patch(
"builtins.open.write", side_effect=PermissionError
) as mock_file_write:
with pytest.raises(FileNotFoundError):
result = mock_gen3_auth._write_to_file("some_file", "content")
def test_write_to_file_rename_permission_error(mock_gen3_auth):
"""
Check that the file isn't written when there's a PermissionError for renaming
"""
with patch("builtins.open", create=True) as mock_open_file:
mock_open_file.return_value = MagicMock()
with patch("builtins.open.write") as mock_file_write:
mock_file_write.return_value = True
with patch("os.rename", side_effect=PermissionError) as mock_os_rename:
with pytest.raises(PermissionError):
result = mock_gen3_auth._write_to_file("some_file", "content")
def test_write_to_file_rename_file_not_found_error(mock_gen3_auth):
"""
Check that the file isn't renamed when there's a FileNotFoundError
"""
with patch("builtins.open", create=True) as mock_open_file:
mock_open_file.return_value = MagicMock()
with patch("builtins.open.write") as mock_file_write:
mock_file_write.return_value = True
with patch("os.rename", side_effect=FileNotFoundError) as mock_os_rename:
with pytest.raises(FileNotFoundError):
result = mock_gen3_auth._write_to_file("some_file", "content")
def test_auth_init_outside_workspace():
"""
Test that a Gen3Auth instance can be initialized when the
required parameters are included.
"""
# working initialization
auth = gen3.auth.Gen3Auth(refresh_token=test_key)
assert auth.endpoint == test_endpoint
assert auth._refresh_token == test_key
assert auth._use_wts == False
def test_auth_init_in_workspace(monkeypatch):
"""
Test that a Gen3Auth instance can be initialized with no parameters
when working inside a workspace ("NAMESPACE" environment variable),
if the workspace-token-service is available.
"""
monkeypatch.setenv("NAMESPACE", "sdk-tests")
access_token = test_key["api_key"]
def _mock_request(url, **kwargs):
assert url.endswith("/token/")
mocked_response = MagicMock(requests.Response)
mocked_response.status_code = 200
mocked_response.json.return_value = {"token": access_token}
return mocked_response
with patch("gen3.auth.requests") as mock_request:
# unable to communicate with the WTS
mock_request.get().status_code = 403
with pytest.raises(gen3.auth.Gen3AuthError):
gen3.auth.Gen3Auth(idp="local")
with patch("gen3.auth.requests.get") as mock_request:
# can communicate with the WTS
mock_request.side_effect = _mock_request
auth = gen3.auth.Gen3Auth(idp="local")
assert auth._use_wts == True
assert auth.endpoint == test_endpoint
assert auth._access_token == access_token
| StarcoderdataPython |
3392291 | <gh_stars>0
import datetime
import os
from emiproc.hourly_emissions import speciation as spec
model = 'cosmo-art'
path_emi = os.path.join('oae-art-example', '{online}', 'emis_2015_d1.nc')
output_path = os.path.join('oae-art-example', '{online}', 'hourly')
output_name = "d1_"
prof_path = os.path.join('oae-art-example', 'profiles')
start_date = datetime.date(2015, 6, 26)
end_date = datetime.date(2015, 6, 27) # included
var_list = ['%se'%s for s in ["NO2","NO","CO","SO2","NH3","ALD",'HCHO','ORA2',
'HC3','HC5','HC8','ETH','OL2','OLT','OLI','TOL',
'XYL','KET','CSL','VSO4I','VSO4J','VORGPAI',
'VORGPAJ','VP25AI','VP25AJ','VANTHA','VSOOT']]
contribution_list = spec.create_mapping()
catlist = (
[
["NOX_A_AREA", "NOX_A_POINT", "NOX_A_ch",
"NOX_B_AREA", "NOX_B_POINT", "NOX_B_ch",
"NOX_C_AREA", "NOX_C_ch",
"NOX_D_AREA", "NOX_D_POINT", "NOX_D_ch",
"NOX_E_AREA", "NOX_E_ch",
"NOX_F_AREA", "NOX_F_ch",
"NOX_G_AREA", "NOX_G_ch",
"NOX_H_AREA", "NOX_H_POINT", "NOX_H_ch",
"NOX_I_AREA", "NOX_I_ch",
"NOX_J_AREA", "NOX_J_POINT", "NOX_J_ch",
"NOX_K_AREA", "NOX_K_ch",
"NOX_L_AREA", "NOX_L_ch"], # for NO2e
["NOX_A_AREA", "NOX_A_POINT", "NOX_A_ch",
"NOX_B_AREA", "NOX_B_POINT", "NOX_B_ch",
"NOX_C_AREA", "NOX_C_ch",
"NOX_D_AREA", "NOX_D_POINT", "NOX_D_ch",
"NOX_E_AREA", "NOX_E_ch",
"NOX_F_AREA", "NOX_F_ch",
"NOX_G_AREA", "NOX_G_ch",
"NOX_H_AREA", "NOX_H_POINT", "NOX_H_ch",
"NOX_I_AREA", "NOX_I_ch",
"NOX_J_AREA", "NOX_J_POINT", "NOX_J_ch",
"NOX_K_AREA", "NOX_K_ch",
"NOX_L_AREA", "NOX_L_ch"], # for NOe
["CO_A_AREA", "CO_A_POINT", "CO_A_ch",
"CO_B_AREA", "CO_B_POINT", "CO_B_ch",
"CO_C_AREA", "CO_C_ch",
"CO_D_AREA", "CO_D_POINT", "CO_D_ch",
"CO_E_AREA", "CO_E_ch",
"CO_F_AREA", "CO_F_ch",
"CO_G_AREA", "CO_G_ch",
"CO_H_AREA", "CO_H_POINT", "CO_H_ch",
"CO_I_AREA", "CO_I_ch",
"CO_J_AREA", "CO_J_POINT", "CO_J_ch",
"CO_K_AREA", "CO_K_ch",
"CO_L_AREA", "CO_L_ch"], # for COe
["SO2_A_AREA", "SO2_A_POINT", "SO2_A_ch",
"SO2_B_AREA", "SO2_B_POINT", "SO2_B_ch",
"SO2_C_AREA", "SO2_C_ch",
"SO2_D_AREA", "SO2_D_POINT", "SO2_D_ch",
"SO2_E_AREA", "SO2_E_ch",
"SO2_F_AREA", "SO2_F_ch",
"SO2_G_AREA", "SO2_G_ch",
"SO2_H_AREA", "SO2_H_POINT", "SO2_H_ch",
"SO2_I_AREA", "SO2_I_ch",
"SO2_J_AREA", "SO2_J_POINT", "SO2_J_ch",
"SO2_K_AREA", "SO2_K_ch",
"SO2_L_AREA", "SO2_L_ch"], # for SO2e
["NH3_A_AREA", "NH3_A_POINT", "NH3_A_ch",
"NH3_B_AREA", "NH3_B_POINT", "NH3_B_ch",
"NH3_C_AREA", "NH3_C_ch",
"NH3_D_AREA", "NH3_D_POINT", "NH3_D_ch",
"NH3_E_AREA", "NH3_E_ch",
"NH3_F_AREA", "NH3_F_ch",
"NH3_G_AREA", "NH3_G_ch",
"NH3_H_AREA", "NH3_H_POINT", "NH3_H_ch",
"NH3_I_AREA", "NH3_I_ch",
"NH3_J_AREA", "NH3_J_POINT", "NH3_J_ch",
"NH3_K_AREA", "NH3_K_ch",
"NH3_L_AREA", "NH3_L_ch"], # for NH3e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for ALDe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for HCHOe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for ORA2e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for HC3e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for HC5e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for HC8e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for ETHe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for OL2e
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for OLTe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for OLIe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for TOLe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for XYLe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for KETe
["NMVOC_A_AREA", "NMVOC_A_POINT", "NMVOC_A_ch",
"NMVOC_B_AREA", "NMVOC_B_POINT", "NMVOC_B_ch",
"NMVOC_C_AREA", "NMVOC_C_ch",
"NMVOC_D_AREA", "NMVOC_D_POINT", "NMVOC_D_ch",
"NMVOC_E_AREA", "NMVOC_E_ch",
"NMVOC_F1","NMVOC_F2","NMVOC_F3","NMVOC_F4",
"NMVOC_F1_ch","NMVOC_F2_ch","NMVOC_F3_ch","NMVOC_F4_ch",
"NMVOC_G_AREA", "NMVOC_G_ch",
"NMVOC_H_AREA", "NMVOC_H_POINT", "NMVOC_H_ch",
"NMVOC_I_AREA", "NMVOC_I_ch",
"NMVOC_J_AREA", "NMVOC_J_POINT", "NMVOC_J_ch",
"NMVOC_L_AREA", "NMVOC_L_ch"], # for CSLe
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VSO4Ie
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VSO4Je
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VORGPAIe
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VORGPAJe
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VP25AIe
["PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch","PM25_F4_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM25_L_AREA", "PM25_L_ch"], # for VP25AJe
["PM10_A_AREA", "PM10_A_POINT", "PM10_A_ch",
"PM25_A_AREA", "PM25_A_POINT", "PM25_A_ch",
"PM10_B_AREA", "PM10_B_POINT", "PM10_B_ch",
"PM25_B_AREA", "PM25_B_POINT", "PM25_B_ch",
"PM10_C_AREA", "PM10_C_ch",
"PM25_C_AREA", "PM25_C_ch",
"PM10_D_AREA", "PM10_D_POINT", "PM10_D_ch",
"PM25_D_AREA", "PM25_D_POINT", "PM25_D_ch",
"PM10_E_AREA", "PM10_E_ch",
"PM25_E_AREA", "PM25_E_ch",
"PM10_F_AREA", "PM10_F_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4",
"PM25_F1_ch","PM25_F2_ch","PM25_F3_ch",
"PM25_F4_ch",
"PM10_G_AREA", "PM10_G_ch",
"PM25_G_AREA", "PM25_G_ch",
"PM10_H_AREA", "PM10_H_POINT", "PM10_H_ch",
"PM25_H_AREA", "PM25_H_POINT", "PM25_H_ch",
"PM10_I_AREA", "PM10_I_ch",
"PM25_I_AREA", "PM25_I_ch",
"PM10_J_AREA", "PM10_J_POINT", "PM10_J_ch",
"PM25_J_AREA", "PM25_J_POINT", "PM25_J_ch",
"PM10_K_AREA", "PM10_K_ch",
"PM25_K_AREA", "PM25_K_ch",
"PM10_L_AREA", "PM10_L_ch",
"PM25_L_AREA", "PM25_L_ch"], # for ANTHAe
["PM25_A_AREA", "PM25_A_POINT", "BC_A_ch",
"PM25_B_AREA", "PM25_B_POINT", "BC_B_ch",
"PM25_C_AREA", "BC_C_ch",
"PM25_D_AREA", "PM25_D_POINT", "BC_D_ch",
"PM25_E_AREA", "BC_E_ch",
"PM25_F1","PM25_F2","PM25_F3","PM25_F4","BC_F_ch",
"PM25_G_AREA", "BC_G_ch",
"PM25_H_AREA", "PM25_H_POINT", "BC_H_ch",
"PM25_I_AREA", "BC_I_ch",
"PM25_J_AREA", "PM25_J_POINT", "BC_J_ch",
"PM25_K_AREA", "BC_K_ch",
"PM25_L_AREA", "BC_L_ch"] # for SOOTe
])
tplist = (
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for NO2e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for NOe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for COe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for SO2e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for NH3e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU',
'GNFR_F_EU','GNFR_F_CH','GNFR_F_CH',
'GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for ALDe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU',
'GNFR_F_EU','GNFR_F_CH','GNFR_F_CH',
'GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for HCHOe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for ORA2e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for HC3e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for HC5e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for HC8e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for ETHe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for OL2e
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for OLTe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for OLIe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for TOLe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for XYLe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for KETe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for CSLe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VSO4Ie
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VSO4Je
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VORGPAIe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VORGPAJe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VP25AIe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'], # for VP25AJe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH','GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH','GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH','GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH','GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH','GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU', 'GNFR_F_CH','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH','GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH','GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH','GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH','GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH','GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH','GNFR_L_EU', 'GNFR_L_CH'], # for ANTHAe
['GNFR_A_EU', 'GNFR_A_EU', 'GNFR_A_CH',
'GNFR_B_EU', 'GNFR_B_EU', 'GNFR_B_CH',
'GNFR_C_EU', 'GNFR_C_CH',
'GNFR_D_EU', 'GNFR_D_EU', 'GNFR_D_CH',
'GNFR_E_EU', 'GNFR_E_CH',
'GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_EU','GNFR_F_CH',
'GNFR_G_EU', 'GNFR_G_CH',
'GNFR_H_EU', 'GNFR_H_EU', 'GNFR_H_CH',
'GNFR_I_EU', 'GNFR_I_CH',
'GNFR_J_EU', 'GNFR_J_EU', 'GNFR_J_CH',
'GNFR_K_EU', 'GNFR_K_CH',
'GNFR_L_EU', 'GNFR_L_CH'] # for SOOTe
)
"""The vertical profile is only applied to point sources.
All area sources have emissions at the floor level.
As such, their profiles are of the shape [1,0,0,...], like GNFR_L"""
vplist = (
[
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for NO2e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for NOe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for COe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for SO2e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for NH3e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for ALDe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for HCHOe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for ORA2e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for HC3e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for HC5e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for HC8e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for ETHe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for OL2e
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for OLTe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for OLIe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for TOLe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for XYLe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for KETe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for CSLe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VSO4Ie
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VSO4Je
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VORGPAIe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VORGPAJe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VP25AIe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'], # for VP25AJe
['GNFR_L', 'GNFR_A', 'GNFR_L', 'GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L', 'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L', 'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L','GNFR_L','GNFR_L','GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L', 'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L', 'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L', 'GNFR_L', 'GNFR_L'], # for ANTHAe
['GNFR_L', 'GNFR_A', 'GNFR_L',
'GNFR_L', 'GNFR_B', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_D', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L','GNFR_L','GNFR_L','GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_H', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_J', 'GNFR_L',
'GNFR_L', 'GNFR_L',
'GNFR_L', 'GNFR_L'] # for SOOTe
])
| StarcoderdataPython |
1722769 | <gh_stars>0
from flask import Blueprint
from ..models import Permission
main = Blueprint('main', __name__)
from . import views, errors
#lastest edit on page 101, add some value that available to global, these value should stay in DIC.
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| StarcoderdataPython |
3261190 | from distutils.core import setup
import os
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='Lundy',
version='0.1dev',
packages=['lundy',],
license='Creative Commons Attribution-Noncommercial-Share Alike license',
long_description="Test",
install_requires=required
) | StarcoderdataPython |
59051 | <reponame>RohanDukare/OnlineVoting
from django import template
import calendar
register = template.Library()
@register.filter
def index(List, i):
return List[int(i)]
def monthName(List,i):
return List[int(i)]
| StarcoderdataPython |
71439 | <gh_stars>1-10
from bbox import *
from detector import ComputerVisionDetector | StarcoderdataPython |
3227755 | __project__ = 'MeCabOnigiri'
__version__ = '0.0.0'
VERSION = "{0} v{1}".format(__project__, __version__)
| StarcoderdataPython |
144543 | #!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: <NAME> <<EMAIL>>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import socket
from optparse import OptionParser
import time
import struct
import math
def get_local_ipaddress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
ipaddress = (s.getsockname()[0])
s.close()
return ipaddress
def process_command_line(argv):
global operation_mode
parser = OptionParser(usage="usage: %prog [option]", version="ESVM Desktop Client")
parser.add_option(
'-i', '--input', action='store', type='string', dest='input_dir',
help='Set Input image directory')
parser.add_option(
'-s', '--server', action='store', type='string', dest='server_address', default="localhost",
help='Set Input image directory')
parser.add_option(
'-p', '--port', action='store', type='int', dest='server_port', default=9095,
help='Set Input image directory')
parser.add_option(
'-r', '--repeat', action='store', type='int', dest='conn_repeat', default=100,
help='Repeat connecting number')
settings, args = parser.parse_args(argv)
if not len(args) == 0:
parser.error('program takes no command-line arguments; "%s" ignored.' % (args,))
if not settings.input_dir:
parser.error("input directory does no exists at :%s" % (settings.input_dir))
if not os.path.isdir(settings.input_dir):
parser.error("input directory does no exists at :%s" % (settings.input_dir))
return settings, args
def send_request(address, port, inputs, conn_repeat):
# connection
conn_count = 0
connect_start_time = time.time()
while conn_count < conn_repeat:
try:
print "Connecting..."
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(True)
sock.connect((address, port))
conn_count += 1
break
except socket.error, msg:
print "Connection failed, retry"
sock.close()
time.sleep(0.1)
if (sock == None) or (sock.fileno() <= 0):
sys.stderr.write("Connection faild to (%s:%d)\n" % (address, port))
sys.exit(1)
connect_end_time = time.time()
print "Connecting to (%s, %d) takes %f seconds" % \
(address, port, (connect_end_time-connect_start_time))
# send requests
current_duration = -1
print "image\tstart\tend\tduration\tjitter"
for each_input in inputs:
start_time_request = time.time() * 1000.0
binary = open(each_input, 'rb').read();
ret_data = esvm_request(sock, binary)
# print result
end_time_request = time.time() * 1000.0
prev_duration = current_duration
current_duration = end_time_request-start_time_request
if prev_duration == -1: # fisrt response
print "%s\t%014.2f\t%014.2f\t%014.2f\t0" % (each_input, start_time_request,\
end_time_request, \
end_time_request-start_time_request)
else:
print "%s\t%014.2f\t%014.2f\t%014.2f\t%014.2f" % (each_input, round(start_time_request, 3), \
end_time_request, \
current_duration, \
math.fabs(current_duration-prev_duration))
def esvm_request(sock, data):
length = len(data)
# send
sock.sendall(struct.pack("!I", length))
sock.sendall(data)
#recv
data = sock.recv(4)
ret_size = struct.unpack("!I", data)[0]
ret_data = ''
if not ret_size == 0:
ret_data = sock.recv(ret_size)
return ret_data
return None
def main(argv=None):
global LOCAL_IPADDRESS
settings, args = process_command_line(sys.argv[1:])
files = [os.path.join(settings.input_dir, file) for file in os.listdir(settings.input_dir) if file[-3:] == "jpg" or file[-3:] == "JPG"]
send_request(settings.server_address, settings.server_port, files, settings.conn_repeat)
return 0
if __name__ == "__main__":
status = main()
sys.exit(status)
| StarcoderdataPython |
3296268 | <filename>ArticleSpider/ArticleSpider/items.py
# -*- coding: utf-8 -*-
import re
import scrapy
from datetime import datetime
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst, Join
def datetime_type(value):
try:
time_create = datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
time_create = datetime.now().date()
finally:
return time_create
def get_nums(value):
match = re.match(r".*?(\d+).*?", value)
if match:
return int(match.group(1))
return 0
def remove_comment_tags(value):
if "评论" in value:
return ""
return value
class TakeFirstItem(ItemLoader):
# 自定义itemLoader
default_output_processor = TakeFirst()
class JobboleItem(scrapy.Item):
title = scrapy.Field()
time_create = scrapy.Field(
input_processor=MapCompose(datetime_type),
)
url = scrapy.Field()
url_object_id = scrapy.Field() ## 需要md5
img_url = scrapy.Field() # list还是不是list?
img_path = scrapy.Field()
tags = scrapy.Field(
input_processor=MapCompose(remove_comment_tags),
output_processor=Join(","),
)
mark = scrapy.Field(
input_processor=MapCompose(get_nums),
)
comment = scrapy.Field(
input_processor=MapCompose(remove_comment_tags),
)
favor = scrapy.Field()
content = scrapy.Field()
| StarcoderdataPython |
4824870 | def f1():
return f2
def f2():
return f3
def f3():
return 42
assert f1()()() == 42
| StarcoderdataPython |
1602413 | """
"""
import json
import requests
import os
def calc_precip(minutely):
"""
Calculate the percentage of precipitation over the next 15 minutes
"""
return max(datum["precipProbability"] for datum in minutely["data"])
def get_weather():
"""
Get the weather for Amida using DarkSky
:return:
:rtype:
"""
req_str = f'https://api.darksky.net/forecast/{os.getenv("DARKSKY_KEY")}/38.906350,-77.039100'
contents = json.loads(requests.get(req_str).content)
current = contents['currently']
minutely = contents['minutely']
return {
# use minutely summary for summary of weather over next hour, rather than this instance
# that gives us results like "Possible light rain starting in 45 min".
'summary': contents['minutely']['summary'],
'precipitation': f'{calc_precip(minutely) * 100}%',
'temperature': round(current['temperature'], 1) # round to 1dp
}
| StarcoderdataPython |
3328156 | <reponame>BoogalooLi/python_spiders<filename>Xpath/Xpath_basics.py
from lxml import etree
text = '''
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>
学习猿地 - IT培训|Java培训|Python培训|ui设计培训|web前端培训|GO培训|PHP培训|成就自己的只需一套精品
</title>
</head>
<body>
<ul>
<li><a href="/a/b/c/java/">java engineer</a></li>
<li><a href="/a/b/c/python/">python engineer</a></li>
<li><a href="/a/b/c/ai/">AI engineer</a></li>
</ul>
</body>
</html>
'''
# 使用etree解析html字符串
html = etree.HTML(text)
# print(html)
# 提取数据
# ['java engineer', 'python engineer', 'AI engineer']
r1 = html.xpath('/html/body/ul/li/a/text()')
print(r1)
# ['java engineer']
r2 = html.xpath('/html/body/ul/li[1]/a/text()')
print(r2)
| StarcoderdataPython |
3399091 | from flask import render_template
from flask import request
from app.settings import config
from .blueprint import blueprint
from .models import MainCategoryProduct, Product, CategoryProduct
from .logger import logger
# TODO: Возможно имеет смысл вынести хелперы в отдельный файл
def get_categorys():
main_categorys = MainCategoryProduct.query.all()
categorys = [
{
'main': main_category,
'sub': main_category.categorys_product,
}
for main_category in main_categorys
]
return categorys
@blueprint.route('/')
def index():
categorys = get_categorys()
return render_template('index.html', categorys=categorys)
@blueprint.route('/main_category/<int:main_category_id>/')
def show_main_category(main_category_id):
categorys = get_categorys()
main_category = MainCategoryProduct.query.\
filter(MainCategoryProduct.id == main_category_id).first()
if not main_category:
logger.debug('Запрос основной категории по несуществующему id')
return render_template(
'subcategory.html',
categorys=categorys,
main_category=None,
sub_category=None,
)
return render_template(
'subcategory.html',
categorys=categorys,
main_category=main_category,
sub_categorys=main_category.categorys_product,
)
@blueprint.route('/category/<int:category_id>/')
def show_category(category_id):
categorys = get_categorys()
category = CategoryProduct.query.\
filter(CategoryProduct.id == category_id).first()
if not category:
logger.debug('Запрос категории по несуществующему id')
return render_template(
'subcategory_products.html',
categorys=categorys,
products=None,
)
products_info = []
for product in category.products:
products_info.append({
'id': product.id,
'product_name': product.name,
'rating': product.rating,
'cost': product.cost,
'description': product.description,
'images': product.get_sorted_path_images(
config.get('PATH_IMAGES')
),
})
return render_template(
'subcategory_products.html',
categorys=categorys,
products=products_info,
category=category,
main_category=category.main_category_product
)
@blueprint.route('/product/<int:product_id>/')
def show_product(product_id):
categorys = get_categorys()
product = Product.query.filter(Product.id == product_id).first()
if not product:
logger.debug('Запрос продукта по несуществующему id')
return render_template(
'product.html', categorys=categorys, product=None
)
# TODO: надо подумать, может в шаблон перенести вызов функции?
main_parameters = product.get_flat_main_parameters()
images_product = product.get_sorted_path_images(config.get('PATH_IMAGES'))
feedbacks = product.feedbacks
return render_template(
'product.html',
categorys=categorys,
product=product,
category_product=product.category_product,
main_category_product=product.category_product.main_category_product,
images_product=images_product,
main_parameters=main_parameters,
feedbacks=feedbacks
)
@blueprint.route('/api/add_product', methods=['POST'])
def api_add_product():
logger.debug('Кто-то пытается пользоваться API. ХА-ХА-ХА...')
print(request.get_json(force=True))
return 'OK'
| StarcoderdataPython |
1002 | <reponame>davidtahim/Glyphs-Scripts<filename>Components/Align All Components.py
#MenuTitle: Align All Components
# -*- coding: utf-8 -*-
__doc__="""
Fakes auto-alignment in glyphs that cannot be auto-aligned.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFont.selectedFontMaster.id # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def process( thisLayer ):
advance = 0.0
for thisComponent in thisLayer.components:
thisComponent.position = NSPoint( advance, 0.0 )
advance += thisComponent.component.layers[thisFontMasterID].width
thisLayer.width = advance
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Aligning components in:", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| StarcoderdataPython |
3232268 | <filename>gopigo_interface.py
import sys
import time
import gopigo
import robot_util
def handleCommand(command, keyPosition):
# only uses pressing down of keys
if keyPosition != "down":
return
print("handle command", command, keyPosition)
if command == 'L':
gopigo.left_rot()
time.sleep(0.15)
gopigo.stop()
if command == 'R':
gopigo.right_rot()
time.sleep(0.15)
gopigo.stop()
if command == 'F':
gopigo.forward()
time.sleep(0.4)
gopigo.stop()
if command == 'B':
gopigo.backward()
time.sleep(0.3)
gopigo.stop()
robot_util.handleSoundCommand(command, keyPosition)
| StarcoderdataPython |
3208308 | <gh_stars>0
def tabulate_course_details(driver):
'''
driver: Webdriver element
returns: Course and Semester details in a tabular format
'''
courses = driver.find_element_by_xpath("/html/body/center/center/table[1]/tbody").text
course_list = courses.split('\n')
#print(course_list)
course_list = [i.split() for i in course_list]
course_code = []
course_name = []
course_category = []
course_credits = []
course_grade = []
credits = []
sem = []
gpas = []
for i in course_list:
if i[0].isnumeric():
course_code.append(i[1])
course_name.append(' '.join(i[2:-4]))
course_category.append(i[-4])
course_credits.append(i[-3])
course_grade.append(i[-2])
count += 1 # Number of courses taken in a particular sem
elif i[0] == "Earned":
if i[1][-2:].isnumeric():
credits.append((int(i[1][-2:]), count))
if i[2][0:3]=='GPA':
gpas.append(float(i[2][4:]))
else:
credits.append((int(i[1][-1]), count))
if i[2][0:3]=='GPA':
gpas.append(float(i[2][4:]))
else:
count = 0
sem.append(' '.join(i[:]))
course_summary = (course_code, course_name, course_category, course_credits, course_grade)
sem_summary = (sem, credits)
return course_summary, sem_summary, gpas | StarcoderdataPython |
1749438 | <gh_stars>0
# -*- coding: utf-8 -*-
class TemplateNotExistsException(Exception):
pass
class QiniuTokenInvalidException(Exception):
pass | StarcoderdataPython |
1668300 | from .library import Library
__all__ = [Library]
| StarcoderdataPython |
3354237 | import unittest
from unittest import mock
from .txn import Transaction
from .metricstore import Metricstore
from pythonapm.agent import Agent
class Resp:
def __init__(self,status_code):
self.status_code = status_code
class Err:
pass
class TxnTest(unittest.TestCase):
def setUp(self):
self.txn_instance = Transaction(
{'PATH_INFO': 'path', 'QUERY_STRING': 'query', 'REQUEST_METHOD': 'req_method'})
@mock.patch('pythonapm.metric.txn.get_agent')
@mock.patch('pythonapm.metric.test_txn.Agent')
@mock.patch('pythonapm.metric.test_txn.Metricstore.add_web_txn')
def test_end_txn(self,mock_add_txn,mock_get_agent,mock_Agent):
mock_agent_instance = mock_Agent.return_value
mock_agent_instance.is_data_collection_allowed.return_value = True
mock_agent_instance.get_metric_store.return_value = Metricstore()
mock_get_agent.return_value = mock_agent_instance
self.assertFalse(self.txn_instance.completed)
self.txn_instance.end_txn(res= Resp(200))
self.assertTrue(mock_agent_instance.is_data_collection_allowed.called)
self.assertTrue(mock_add_txn.called)
self.assertTrue(self.txn_instance.completed)
self.assertEqual(self.txn_instance.status_code,200)
def test_check_and_add_error(self):
self.txn_instance.exceptions_info = {}
self.txn_instance.check_and_add_error(Exception())
self.txn_instance.check_and_add_error(ArithmeticError())
self.txn_instance.check_and_add_error(ArithmeticError())
self.txn_instance.check_and_add_error(Err())
self.assertEqual(self.txn_instance.exceptions_info['Exception'],1)
self.assertEqual(self.txn_instance.exceptions_info['ArithmeticError'],2)
self.assertEqual(self.txn_instance.exceptions_info['Err'],1)
def test_get_url(self):
self.assertIsInstance(self.txn_instance.get_url(),str)
self.assertEqual(self.txn_instance.get_url(),'path')
def test_get_method(self):
self.assertIsInstance(self.txn_instance.get_method(),str)
self.assertEqual(self.txn_instance.get_method(),'req_method')
def test_get_query_param(self):
self.assertIsInstance(self.txn_instance.get_query_param(),str)
self.assertEqual(self.txn_instance.get_query_param(),'query')
def test_get_status_code(self):
self.assertIsInstance(self.txn_instance.get_status_code(),object)
def test_is_completed(self):
self.assertIsInstance(self.txn_instance.is_completed(),bool)
def test_is_error_txn(self):
self.txn_instance.status_code = 401
self.assertTrue(self.txn_instance.is_error_txn())
self.txn_instance.status_code = 200
self.assertFalse(self.txn_instance.is_error_txn())
def tearDown(self):
self.txn_instance = None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.