content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import gen_test
from types import FunctionType
from typing import List
import gen_swizzle
import gen_type
import gen_math
import gen_dependency
import config
import shutil
import sys
import codegen_util as util
from pathlib import Path
# paths
codgen_root_dir = Path(__file__).parent
project_root_dir = codgen_root_dir.parent
cpp_root_dir = project_root_dir / "FukoMath"
test_root_dir = project_root_dir / "FukoTest"
swizzle_dir = cpp_root_dir / "Swizzle"
types_dir = cpp_root_dir / "Types"
math_dir = cpp_root_dir / "Math"
# file paths
make_script_path = project_root_dir / "premake.lua"
forward_file_path = cpp_root_dir / "fuko_math_forward.h"
deferred_file_path = cpp_root_dir / "fuko_math_deferred.h"
facade_file_path = cpp_root_dir / "fuko_math.h"
dependencies_file_path = cpp_root_dir / "fuko_math_dependencies.h"
# lists
full_type_list = set(config.vector_type_list).union(config.matrix_type_list)
if __name__ == "__main__" :
# clean up dir
if cpp_root_dir.exists():
shutil.rmtree(cpp_root_dir)
if test_root_dir.exists():
shutil.rmtree(test_root_dir)
if make_script_path.exists():
make_script_path.unlink()
# clean up option
if "cleanup" in sys.argv:
workspace_dir = project_root_dir / "workspace"
if workspace_dir.exists():
shutil.rmtree(workspace_dir)
exit()
# gen dir
swizzle_dir.mkdir(parents=True)
types_dir.mkdir(parents=True)
math_dir.mkdir(parents=True)
test_root_dir.mkdir(parents=True)
# copy swizzle.h
swizzle_template_path = codgen_root_dir / config.swizzle_template_path
if swizzle_template_path.exists():
swizzle_template : str
with swizzle_template_path.open() as f:
swizzle_template = f.read()
with (swizzle_dir / "swizzle.h").open("w+") as f:
f.write(swizzle_template.format(
inline_marco = config.inline_marco
, math_namespace = config.math_namespace))
else:
print("lost swizzle template file\n")
exit()
# gen vector swizzle
for src_size in range(2, 5):
with (swizzle_dir / str.format("swizzle{size}", size = src_size)).open("w+") as f:
f.write('#include "swizzle.h"')
f.write(gen_swizzle.gen_swizzle_code_vector(src_size))
# gen matrix swizzle
if config.enable_matrix_swizzle:
for row_size in range(1, 5):
for col_size in range(1, 5):
if row_size != 1 or col_size != 1:
with (swizzle_dir / str.format("swizzle{row}x{col}", row = row_size, col = col_size)).open("w+") as f:
f.write('#include "swizzle.h"')
f.write(gen_swizzle.gen_swizzle_code_matrix(row_size, col_size))
# gen forward file
forward_template_file_path = codgen_root_dir / config.forward_file_template_path
if forward_template_file_path.exists():
forward_template : str
# read template
with forward_template_file_path.open() as f:
forward_template = f.read()
# write
with forward_file_path.open("w+") as f:
f.write(str.format(forward_template
, forward_declares = gen_type.gen_forward_declare_vector(config.vector_type_list)
+ gen_type.gen_forward_declare_matrix(config.matrix_type_list)
, math_namespace = config.math_namespace))
else:
print("lost forward template file\n")
exit()
# gen type codes
for type in full_type_list:
implicit_types = config.vector_type_list.copy()
implicit_types.remove(type)
# write file
with (types_dir / (type + ".h")).open("w+") as f:
f.write('''#pragma once\n#include "../fuko_math_forward.h"\n#include "../Swizzle/swizzle.h"\n\n''')
# begin namespace
if config.enable_namespace:
f.write(begin_namespace())
# gen vector codes
if type in config.vector_type_list:
f.write(gen_type.gen_type_code_vector(type, implicit_types))
# gen matrix types
if type in config.matrix_type_list:
f.write(gen_type.gen_type_code_matrix(type))
# end namespace
if config.enable_namespace:
f.write(end_namespace())
# gen dependencies
with dependencies_file_path.open("w+") as f:
# add pragma and forward
f.write('''#pragma once\n#include "fuko_math_forward.h"\n''')
# add type include
for type in config.vector_type_list:
f.write(str.format('''#include "Types/{type}.h"\n''', type = type))
f.write("\n")
# begin namespace
if config.enable_namespace:
f.write(begin_namespace())
# implicit convertions
f.write(gen_dependency.gen_implicit_conversion(config.vector_type_list))
# asxxx convertions
f.write(gen_dependency.gen_asxxx_conversion(config.asxxx_type_list))
# end namespace
if config.enable_namespace:
f.write(end_namespace())
# gen deferred file
deferred_template_file_path = codgen_root_dir / config.deferred_file_template_path
if deferred_template_file_path.exists():
deferred_template : str
# read template
with deferred_template_file_path.open() as f:
deferred_template = f.read()
# write
with deferred_file_path.open("w+") as f:
f.write(deferred_template.format(
inline_marco = config.inline_marco
, math_namespace = config.math_namespace
))
else:
print("lost deferred template file\n")
exit()
# gen util math
with (math_dir/ "util_math.h").open("w+") as f:
# add pragma and forward
f.write('''#pragma once\n#include "fuko_math_forward.h"\n''')
# add type include
for type in config.vector_type_list:
f.write(str.format('''#include "Types/{type}.h"\n''', type = type))
f.write("\n")
# begin namespace
if config.enable_namespace:
f.write(begin_namespace())
# increment & decrement
f.write(gen_math.gen_vector_increment_decrement(config.arithmetic_type_list))
# arithmetic
f.write(gen_math.gen_vector_arithmetic(config.arithmetic_type_list))
# arithmetic assign
f.write(gen_math.gen_vector_arithmetic_assign(config.arithmetic_type_list))
# swizzle arithmetic
f.write(gen_math.gen_swizzle_arithmetic())
# swizzle arithmetic assign
f.write(gen_math.gen_swizzle_arithmetic_assign())
# end namespace
if config.enable_namespace:
f.write(end_namespace())
# gen per type math
for type in full_type_list :
math_file_path = math_dir / str.format("{base_type}_math.h", base_type = type)
with math_file_path.open("w+") as f:
# add pragma and forward
f.write('''#pragma once\n#include <cmath>\n#include <algorithm>\n#include "util_math.h"\n#include "../fuko_math_forward.h"\n''')
# add type include
f.write(str.format('''#include "../Types/{type}.h"\n\n''', type = type))
# begin namespace
if config.enable_namespace:
f.write(begin_namespace())
# gen code
if type in config.vector_type_list:
f.write(gen_math.gen_vertor_math(type))
if type in config.matrix_type_list:
f.write(gen_math.gen_matrix_math(type))
# end namespace
if config.enable_namespace:
f.write(end_namespace())
# gen facade file
facade_file_template_path = codgen_root_dir / config.facade_file_template_path
if facade_file_template_path.exists():
facade_file_template : str
# read template
with facade_file_template_path.open() as f:
facade_file_template = f.read()
# gen includes
type_includes = ""
math_includes = '''#include "Math/util_math.h"\n'''
for type in full_type_list:
type_includes += '''#include "Types/{type}.h"\n'''.format(type = type)
math_includes += '''#include "Math/{type}_math.h"\n'''.format(type = type)
# write
with facade_file_path.open("w+") as f:
f.write(facade_file_template.format(
type_includes = type_includes
, math_includes = math_includes))
else:
print("lost facade file template file\n")
exit()
# gen testscript
test_vector_path = test_root_dir / "test_vector.h"
test_matrix_path = test_root_dir / "test_matrix.h"
test_math_path = test_root_dir / "test_math.h"
test_exec_path = test_root_dir / "main.cpp"
with test_vector_path.open("w+") as f:
f.write('''#pragma once
#include "fuko_math.h"\n\n''')
f.write(gen_test.gen_vector_test(config.vector_type_list))
with test_matrix_path.open("w+") as f:
f.write('''#pragma once
#include "fuko_math.h"\n\n''')
f.write(gen_test.gen_matrix_test(config.matrix_type_list))
with test_math_path.open("w+") as f:
f.write('''#pragma once
#include "fuko_math.h"\n\n''')
f.write(gen_test.gen_math_test(full_type_list))
with test_exec_path.open("w+") as f:
f.write(gen_test.gen_exec_test())
# gen makescript
make_script_template_path = codgen_root_dir / config.make_script_template_path
if make_script_template_path.exists():
make_script_template : str
# read template
with make_script_template_path.open() as f:
make_script_template = f.read()
# write
with make_script_path.open("w+") as f:
f.write(make_script_template)
else:
print("lost make script template file\n")
exit()
| [
11748,
2429,
62,
9288,
198,
6738,
3858,
1330,
15553,
6030,
198,
6738,
19720,
1330,
7343,
198,
11748,
2429,
62,
2032,
44461,
198,
11748,
2429,
62,
4906,
198,
11748,
2429,
62,
11018,
198,
11748,
2429,
62,
45841,
1387,
198,
11748,
4566,
19... | 2.118938 | 4,784 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
NEIGHBORS = [(1, 0), (-1, 0), (0, 1), (0, -1)]
BIG_RISK = 12345678
LOL_ITERATIONS = 10
# Parse the risk level map file
with open('day15_input.txt') as f:
lines = [line.rstrip('\n') for line in f]
# Generate the risk lookup table (dict).
risk = {}
for y in range(len(lines)):
for x in range(len(lines)):
risk[(x, y)] = int(lines[y][x])
print('Part One: Total risk of the safest path is {0}.'.format(navigate(risk)))
# The entire cave is actually five times larger in both dimensions than you thought;
# the area you originally scanned is just one tile in a 5x5 tile area that forms the full map.
# Your original map tile repeats to the right and downward; each time the tile repeats
# to the right or downward, all of its risk levels are 1 higher than the tile immediately
# up or left of it. However, risk levels above 9 wrap back around to 1.
big_grid = {}
grid_size = int(len(risk) ** 0.5)
for (x, y) in risk:
for i in range(5):
for j in range(5):
new_risk = risk[(x, y)] + i + j
if new_risk > 9:
new_risk = (new_risk + 1) % 10
big_grid[(x + (grid_size * i), y + (grid_size * j))] = new_risk
risk = big_grid
print('Part Two: Total risk of the safest path is {0}.'.format(navigate(risk)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
12161,
18060,
33,
20673,
796,
47527,
16,
11,
657,
828,
13841,
16,
11,
657,
828,
357,
15,
11,
352,
828,
357,
15... | 2.739696 | 461 |
import unittest
import numpy as np
from lib import data, environ
| [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
9195,
1330,
1366,
11,
551,
2268,
628,
198
] | 3.238095 | 21 |
num=int(input("enter the number:-"))
if(num%2==0):
print("Number is even")
else:
print("Number is odd") | [
22510,
28,
600,
7,
15414,
7203,
9255,
262,
1271,
25,
21215,
4008,
198,
361,
7,
22510,
4,
17,
855,
15,
2599,
198,
220,
220,
220,
3601,
7203,
15057,
318,
772,
4943,
198,
17772,
25,
198,
220,
220,
220,
3601,
7203,
15057,
318,
5629,
4... | 2.522727 | 44 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import numpy as np
import matplotlib.pyplot as plt
import os,glob
from matplotlib.colors import ListedColormap
import matplotlib as mpl
import pdb
"""
print(mpl.rcParmas)
mpl.rcParmas['font.family']="Times New Roman"
mpl.rcParmas['lines.width']=2
mpl.rcParmas['figure.figsize']=8,6"""
colors=[(248/255,25/255,25/255),(40/255,172/255,82/255),(161/255,80/255,159/255),(0/255,127/255,182/255)]
#colors=["#fc8d59","#ffffbf","#91cf60"]
camp=ListedColormap(colors)
font = {'family' : 'Times New Roman',
'weight' : 'bold',
'size' : 60}
mpl.rc('font', **font)
# def PlotMonteCalorsTimesConvergencePth(coefficients,file_path,parts,start_plot):
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
11,
4743,
672,
198,
6738,
2603,
29487,
8019,
13,
4033,
669,
1330,
406,
6347,
5216,
579,
499,
198,
11748,
2603,
29487,
8019,
... | 2.264605 | 291 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 19:18:30 2021
@author: cubo
"""
import argparse
import pandas as pd
import pickle
import sys
parser = argparse.ArgumentParser(description='options: cl, tl, cr, tr')
parser.add_argument("vec_mod_switcher", help=
'cl: countVectorizer + LogReg\
tl: tfidtVectorizer + LogReg\
cr: countVectorizer + RandFor\
tr: tfidtVectorizer + RandFor')
parser.add_argument("snippet", help="small piece of song (str)", type=str)
args = parser.parse_args()
vec_mod = args.vec_mod_switcher
snippet =[args.snippet]
if vec_mod == 'cl':
with open("../data/processed_data/fitted_pipelines/cvec_lr_fin_model.pickle", "rb") as file:
fitted_model = pickle.load(file)
elif vec_mod == 'tl':
with open("../data/processed_data/fitted_pipelines/tfidt_lr_fin_model.pickle", "rb") as file:
fitted_model = pickle.load(file)
elif vec_mod == 'cr':
with open("../data/processed_data/fitted_pipelines/cvec_rf_fin_model.pickle", "rb") as file:
fitted_model = pickle.load(file)
elif vec_mod == 'tr':
with open("../data/processed_data/fitted_pipelines/tfidt_rf_fin_model.pickle", "rb") as file:
fitted_model = pickle.load(file)
else:
print('Error! vectorizer must be "cl", "tl", "cr", or "tr"')
print()
sys.exit()
pred = fitted_model.predict_proba(snippet)
predDF = pd.DataFrame(pred,
columns = fitted_model.classes_,
index = snippet)
print(predDF)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
3158,
220,
718,
678,
25,
1507,
25,
1270,
33448,
198,
198,
31,
9800,
25,
13617,
78,
... | 2.122265 | 777 |
# !/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Shows all the vi marks that exist
#
import os
import re
import sublime
import sublime_plugin
SYNTAX_FILE_EXTENSIONS = [
'.tmLanguage',
'.sublime-syntax',
]
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
198,
2,
25156,
477,
262,
25357,
8849,
326,
2152,
198,
2,
198,
11748,
28686,
198,
11748,
302,
198,
198,
11748,
... | 2.494382 | 89 |
from .base_handler import BaseHandler
| [
6738,
764,
8692,
62,
30281,
1330,
7308,
25060,
628
] | 4.333333 | 9 |
# 문제: 1143. Longest Common Subsequence
# 링크: https://leetcode.com/problems/longest-common-subsequence/
# 시간/공간: 392ms / 21.9MB
| [
2,
31619,
105,
116,
168,
254,
250,
25,
1367,
3559,
13,
5882,
395,
8070,
3834,
43167,
198,
2,
31619,
100,
223,
169,
223,
105,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
6511,
395,
12,
11321,
12,
7266,
43167,
14,
... | 1.869565 | 69 |
nota1 = float(input('digite as notas no aluno para tirar a media, nota 1:'))
nota2 = float(input('nota 2:'))
media = ((nota1+nota2)/2)
print('A media do aluno nesse bimestre foi {:.1f}'.format(media))
| [
1662,
64,
16,
796,
12178,
7,
15414,
10786,
12894,
578,
355,
407,
292,
645,
435,
36909,
31215,
48965,
283,
257,
2056,
11,
407,
64,
352,
32105,
4008,
198,
1662,
64,
17,
796,
12178,
7,
15414,
10786,
1662,
64,
362,
32105,
4008,
198,
114... | 2.45122 | 82 |
# Copyright The IETF Trust 2020', 'All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-12 07:11
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
15069,
383,
314,
22274,
9870,
12131,
3256,
705,
3237,
6923,
33876,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1983,
319,
12131,
12,
2999,
12,
1065,
8753,
25,... | 2.906667 | 75 |
import numpy as np
import torch
import h5py
from pathlib import Path
import pickle
import sys, os, glob
import datetime as dt
from tqdm import tqdm
sys.path.append(os.getcwd())
def load_h5_file(file_path):
"""
Given a file path to an h5 file assumed to house a tensor,
load that tensor into memory and return a pointer.
"""
# load
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = list(fr[a_group_key])
# transform to appropriate numpy array
data=data[0:]
data = np.stack(data, axis=0)
return data
cities = ['Berlin','Moscow', 'Istanbul']
# please enter the source data root and submission root
source_root = r""
submission_root = r""
for city in cities:
# get the test files
file_paths = glob.glob(os.path.join(source_root, city, 'test', '*.h5'))
for path in tqdm(file_paths):
all_data = load_h5_file(path)
all_data = torch.from_numpy(np.moveaxis(all_data,-1,2)).float()
pred = torch.mean(all_data, dim=1).unsqueeze(1).repeat(1,6,1,1,1)
pred = torch.clamp(pred, 0, 255).permute(0, 1, 3, 4, 2).astype(np.uint8)
# create saving root
root = os.path.join(submission_root, city.upper())
if not os.path.exists(root):
os.makedirs(root)
# save predictions
target_file = os.path.join(root, path.split('/')[-1])
with h5py.File(target_file, 'w', libver='latest',) as f:
f.create_dataset('array', shape = (pred.shape), data=pred, compression="gzip", compression_opts=4) | [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
289,
20,
9078,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2298,
293,
198,
11748,
25064,
11,
28686,
11,
15095,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
256,
80,
... | 2.265335 | 701 |
import itertools
from sys import stdin
N,A,B = [int(x) for x in stdin.readline().rstrip().split(" ")]
print(N-A+B) | [
11748,
340,
861,
10141,
198,
6738,
25064,
1330,
14367,
259,
198,
198,
45,
11,
32,
11,
33,
796,
685,
600,
7,
87,
8,
329,
2124,
287,
14367,
259,
13,
961,
1370,
22446,
81,
36311,
22446,
35312,
7203,
366,
15437,
198,
4798,
7,
45,
12,
... | 2.395833 | 48 |
import dataclasses
from collections.abc import Mapping
from functools import wraps
from typing import Any, Callable, Optional, TypeVar, overload
FGetType = Callable[[Any], Any]
FSetType = Callable[[Any, Any], None]
FDelType = Callable[[Any], None]
FGet = TypeVar("FGet", bound=FGetType)
FSet = TypeVar("FSet", bound=FSetType)
FDel = TypeVar("FDel", bound=FDelType)
FSET_ATTRIBUTE = "_field_properties_fset"
class BaseFieldProperty(property):
"""Field property base class, allowing to handle default value/factory of field."""
@staticmethod
NO_FIELD = object()
T = TypeVar("T")
@overload
@overload
@overload
@overload
def field_property(
__field=NO_FIELD, *, raw: bool = False, inherit: bool = False, **kwargs
):
"""With keywords argument, declare a field property; otherwise, get a property-like
object from a declared field_property to set its accessors.
Field property declaration use the same args than dataclass field.
inherit=True allows to inherit of overridden field parameters (default,
default_factory, init, repr, hash, compare, metadata)
raw=True will not add default implementation for field accessors
"""
if isinstance(__field, FieldPropertyDecorator):
return __field
elif __field is not NO_FIELD:
raise ValueError(f"Invalid field property {__field}")
else:
dataclasses.field(**kwargs) # check that parameters are valid
return FieldPropertyDecorator(raw, inherit, **kwargs)
| [
11748,
4818,
330,
28958,
198,
6738,
17268,
13,
39305,
1330,
337,
5912,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
32233,
11,
5994,
19852,
11,
31754,
628,
198,
37,
3855,
6030,
796,
4889,
... | 3.086777 | 484 |
__all__ = [ "graph" ]
| [
834,
439,
834,
796,
685,
366,
34960,
1,
2361,
198
] | 2.2 | 10 |
"""BleBox cover entity."""
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_SHUTTER,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverDevice,
)
from homeassistant.exceptions import PlatformNotReady
from . import CommonEntity, async_add_blebox
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add, discovery_info=None):
"""Set up BleBox cover."""
return await async_add_blebox(
BleBoxCoverEntity, "covers", hass, config, async_add, PlatformNotReady
)
async def async_setup_entry(hass, config_entry, async_add):
"""Set up a BleBox entry."""
return await async_add_blebox(
BleBoxCoverEntity,
"covers",
hass,
config_entry.data,
async_add,
PlatformNotReady,
)
class BleBoxCoverEntity(CommonEntity, CoverDevice):
"""Representation of a BleBox cover feature."""
@property
def state(self):
"""Return the equivalent HA cover state."""
states = {
None: None,
0: STATE_CLOSING, # moving down
1: STATE_OPENING, # moving up
2: STATE_OPEN, # manually stopped
3: STATE_CLOSED, # lower limit
4: STATE_OPEN, # upper limit / open
# gateController
5: STATE_OPEN, # overload
6: STATE_OPEN, # motor failure
# 7 is not used
8: STATE_OPEN, # safety stop
}
return states[self._feature.state]
@property
def device_class(self):
"""Return the device class."""
types = {
"shutter": DEVICE_CLASS_SHUTTER,
"gatebox": DEVICE_CLASS_DOOR,
"gate": DEVICE_CLASS_DOOR,
}
return types[self._feature.device_class]
@property
def supported_features(self):
"""Return the supported cover features."""
position = SUPPORT_SET_POSITION if self._feature.is_slider else 0
stop = SUPPORT_STOP if self._feature.has_stop else 0
return position | stop | SUPPORT_OPEN | SUPPORT_CLOSE
@property
def current_cover_position(self):
"""Return the current cover position."""
position = self._feature.current
if position == -1: # possible for shutterBox
name = self.name
_LOGGER.warning(
"Position for %s is unknown. Try calibrating the device.", name
)
return None
return None if position is None else 100 - position
@property
def is_opening(self):
"""Return whether cover is opening."""
return self._is_state(STATE_OPENING)
@property
def is_closing(self):
"""Return whether cover is closing."""
return self._is_state(STATE_CLOSING)
@property
def is_closed(self):
"""Return whether cover is closed."""
return self._is_state(STATE_CLOSED)
async def async_open_cover(self, **kwargs):
"""Open the cover position."""
await self._feature.async_open()
async def async_close_cover(self, **kwargs):
"""Close the cover position."""
await self._feature.async_close()
async def async_set_cover_position(self, **kwargs):
"""Set the cover position."""
position = kwargs[ATTR_POSITION]
if position is not None:
await self._feature.async_set_position(100 - position)
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self._feature.async_stop()
| [
37811,
43413,
14253,
3002,
9312,
526,
15931,
198,
198,
11748,
18931,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
9631,
1330,
357,
198,
220,
220,
220,
5161,
5446,
62,
37997,
17941,
11,
198,
220,
220,
220,
5550,
27389,
62,
316... | 2.353278 | 1,571 |
import os
import re
import requests
from subprocess import Popen, PIPE, STDOUT
from IPython.core.magic import register_cell_magic
from urllib.parse import urljoin
# download server
ver = '7.9.2'
url = f'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{ver}-linux-x86_64.tar.gz'
os.system(f"curl {url} | tar xz")
os.system(f"chown -R daemon:daemon elasticsearch-{ver}")
# start server
es_server = Popen([f'elasticsearch-{ver}/bin/elasticsearch'],
stdout=PIPE, stderr=STDOUT,
preexec_fn=lambda: os.setuid(1) # as daemon
)
for i in range(77):
es_server.stdout.readline() # wait till started
# client
os.system("pip install elasticsearch")
# client magic
@register_cell_magic
# client viz
requests.models.Response._repr_html_ = render
| [
11748,
28686,
198,
11748,
302,
198,
11748,
7007,
220,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
350,
4061,
36,
11,
48571,
12425,
198,
6738,
6101,
7535,
13,
7295,
13,
32707,
1330,
7881,
62,
3846,
62,
32707,
198,
6738,
2956,
297,
571,... | 2.456456 | 333 |
# -*- coding: utf-8 - *-
"""
Abstracts your target from your table definitions target and you can use it.
Example how you inherit from it.
def saletaxdoo(target):
class SaleTaxdoo(target):
"A table definition."
def __init__(self):
super().__init__()
self.table = 'taxdoo_sale'
@property
def mapping(self):
return {'transaction_start_date': (self._datey, False)}
tableobject = saletaxdoo(AzureSQL)() # AzureSQL is defined in this module
"""
from sqlalchemy import types
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, MetaData
from mlrepricer import setup
class SQLite:
"""Default local SQLite database."""
def __init__(self):
"""Map the dtypes of your database to our definitions."""
# if you have unicode it's best to use NVARCHAR
self._asin = types.NVARCHAR(length=9)
self._sellerid = types.NVARCHAR(length=14)
self._textshort = types.NVARCHAR(length=40)
self._textmiddle = types.NVARCHAR(length=400)
self._textlong = types.NVARCHAR(length=4000) # NVARCH4000 is max
self._floaty = types.Float
self._numericy = types.DECIMAL(10, 2)
self._inty = types.INTEGER
self._datey = types.DATE
self._datetimey = types.DATETIME
self._timey = types.TIME
self._booly = types.BOOLEAN
# Connection data, see the central config.yaml file.
self._conn_data = setup.configs['SQLite']
@property
def conn(self):
"""Return a connection string you use like pandas.read_sql_table."""
self.database = self._conn_data['database']
return create_engine(
f"sqlite:////{setup.configs['datafolder']}/{self.database}")
@property
def dtypes(self):
"""Use for creating tables, you have to implement mappings."""
return dict(zip(self.mapping.keys(), [item[0] for item in list(
self.mapping.values())]))
@property
def nullable(self):
"""Use for creating tables, you have to implement mappings."""
return dict(zip(self.mapping.keys(), [item[1] for item in list(
self.mapping.values())]))
@property
def createtable(self):
"""Create empty table if needed, with all columns, autoid, notnull."""
metadata = MetaData(bind=self.conn)
tabletocreate = Table(
self.table, metadata,
Column('ID', Integer, primary_key=True),
*(Column(columnz, dtypez, nullable=self.nullable[columnz]
) for columnz, dtypez in self.dtypes.items()))
if not tabletocreate.exists():
tabletocreate.create()
class AzureSQL:
"""
For each data destination like MSSQL define your own Class.
This is an example for a client server database.
"""
def __init__(self):
"""Map the dtypes of your database to our definitions."""
# if you have unicode it's best to use NVARCHAR
self._textshort = types.NVARCHAR(length=40)
self._textmiddle = types.NVARCHAR(length=400)
self._textlong = types.NVARCHAR(length=4000) # NVARCH4000 is max
self._floaty = types.Float
self._inty = types.INTEGER
self._datey = types.DATE
self._datetimey = types.DATETIME
self._timey = types.TIME
self._booly = types.BOOLEAN
# Connection data, see the central config.yaml file.
self._conn_data = setup.configs['AzureSQL']
@property
def conn(self):
"""Return a connection string you use like pandas.read_sql_table."""
pymssqltext = 'mssql+pymssql://{}@{}:{}@{}:{}/{}'.format(
self._conn_data['username'],
self._conn_data['hostname'],
self._conn_data['password'],
self._conn_data['host'],
self._conn_data['port'],
self._conn_data['database'])
return create_engine(pymssqltext)
@property
def dtypes(self):
"""Use for creating tables, you have to implement mappings."""
return dict(zip(self.mapping.keys(), [item[0] for item in list(
self.mapping.values())]))
@property
def nullable(self):
"""Use for creating tables, you have to implement mappings."""
return dict(zip(self.mapping.keys(), [item[1] for item in list(
self.mapping.values())]))
@property
def createtable(self):
"""Create empty table if needed, with all columns, autoid, notnull."""
metadata = MetaData(bind=self.conn)
tabletocreate = Table(
self.table, metadata,
Column('ID', Integer, primary_key=True),
*(Column(columnz, dtypez, nullable=self.nullable[columnz]
) for columnz, dtypez in self.dtypes.items()))
if not tabletocreate.exists():
tabletocreate.create()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
1635,
12,
198,
37811,
198,
23839,
82,
534,
2496,
422,
534,
3084,
17336,
2496,
290,
345,
460,
779,
340,
13,
198,
198,
16281,
703,
345,
16955,
422,
340,
13,
198,
4299,
3664,
316,
89... | 2.353898 | 2,091 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import asyncio
import logging
import random
import threading
import time
from azure.iot.device.exceptions import (
ConnectionFailedError,
ConnectionDroppedError,
OperationCancelled,
NoConnectionError,
)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
# --------------------------------------
# Parameters for our backoff and jitter
# --------------------------------------
# Retry immediately after failure, or wait unti after first delay?
IMMEDIATE_FIRST_RETRY = True
# Seconds to sleep for first sleep period. The exponential backoff will use
# 2x this number for the second sleep period, then 4x this number for the third
# period, then 8x and so on.
INITIAL_DELAY = 5
# Largest number of seconds to sleep between retries (before applying jitter)
MAXIMUM_DELAY = 60
# Number of seconds before an operation is considered "failed". This period starts before
# the first attempt and includes the elapsed time waiting between any failed attempts.
FAILURE_TIMEOUT = 5 * 60
# Jitter-up factor. The time, after jitter is applied, can be up this percentage larger than the
# pre-jittered time.
JITTER_UP_FACTOR = 0.25
# Jitter-down factor. The time, after jitter is applied, can be up this percentage smaller than the
# pre-jittered time.
JITTER_DOWN_FACTOR = 0.5
# Counter to keep track of running calls. We use this to distinguish between calls in logs.
running_call_index = 0
running_call_index_lock = threading.Lock()
# Retry stats. This is a dictionary of arbitrary values that we print to stdout at the of a
# test run.
retry_stats = {}
retry_stats_lock = threading.Lock()
def apply_jitter(base):
"""
Apply a jitter that can be `JITTER_DOWN_FACTOR` percent smaller than the base up to
`JITTER_UP_FACTOR` larger than the base.
"""
min_value = base * (1 - JITTER_DOWN_FACTOR)
max_value = base * (1 + JITTER_UP_FACTOR)
return random.uniform(min_value, max_value)
def increment_retry_stat_count(key):
"""
Increment a counter in the retry_stats dictionary
"""
global retry_stats
with retry_stats_lock:
retry_stats[key] = retry_stats.get(key, 0) + 1
def reset_retry_stats():
"""
reset retry stats between tests
"""
global retry_stats
retry_stats = {}
def get_type_name(obj):
"""
Given an object, return the name of the type of that object. If `str(type(obj))` returns
`"<class 'threading.Thread'>"`, this function returns `"threading.Thread"`.
"""
try:
return str(type(obj)).split("'")[1]
except Exception:
return str(type(obj))
async def retry_exponential_backoff_with_jitter(client, func, *args, **kwargs):
"""
wrapper function to call a function with retry using exponential backoff with jitter.
"""
global running_call_index, running_call_index_lock
increment_retry_stat_count("retry_operation_total_count")
increment_retry_stat_count("retry_operation{}".format(func.__name__))
with running_call_index_lock:
running_call_index += 1
call_id = "retry_op_{}_".format(running_call_index)
attempt = 1
fail_time = time.time() + FAILURE_TIMEOUT
logger.info(
"retry: call {} started, call = {}({}, {}). Connecting".format(
call_id, str(func), str(args), str(kwargs)
)
)
while True:
try:
# If we're not connected, we should try connecting.
if not client.connected:
logger.info("retry: call {} reconnecting".format(call_id))
await client.connect()
logger.info("retry: call {} invoking".format(call_id))
result = await func(*args, **kwargs)
logger.info("retry: call {} successful".format(call_id))
if attempt > 1:
increment_retry_stat_count("success_after_{}_retries".format(attempt - 1))
return result
except (
ConnectionFailedError,
ConnectionDroppedError,
OperationCancelled,
NoConnectionError,
) as e:
# These are all "retriable errors". If we've hit our maximum time, fail. If not,
# sleep and try again.
increment_retry_stat_count("retriable_error_{}".format(get_type_name(e)))
if time.time() > fail_time:
logger.info(
"retry; Call {} retry limit exceeded. Raising {}".format(
call_id, str(e) or type(e)
)
)
increment_retry_stat_count("final_error_{}".format(get_type_name(e)))
raise
# calculate how long to sleep based on our jitter parameters.
if IMMEDIATE_FIRST_RETRY:
if attempt == 1:
sleep_time = 0
else:
sleep_time = INITIAL_DELAY * pow(2, attempt - 1)
else:
sleep_time = INITIAL_DELAY * pow(2, attempt)
sleep_time = min(sleep_time, MAXIMUM_DELAY)
sleep_time = apply_jitter(sleep_time)
attempt += 1
logger.info(
"retry: Call {} attempt {} raised {}. Sleeping for {} and trying again".format(
call_id, attempt, str(e) or type(e), sleep_time
)
)
await asyncio.sleep(sleep_time)
except Exception as e:
# This a "non-retriable" error. Don't retry. Just fail.
increment_retry_stat_count("non_retriable_error_{}".format(type(e)))
logger.info(
"retry: Call {} raised non-retriable error {}".format(call_id, str(e) or type(e))
)
raise e
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
1174... | 2.418002 | 2,433 |
# -- Project information -----------------------------------------------------
project = "Pegasus Tutorials"
copyright = "2020 - 2022 The Broad Institute, Inc. and Genentech, Inc. All rights reserved."
author = (
"Yiming Yang, Joshua Gould and Bo Li"
)
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
#needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = [
# "sphinx.ext.autodoc",
# "sphinx.ext.intersphinx",
# "sphinx.ext.doctest",
# "sphinx.ext.todo",
# "sphinx.ext.mathjax",
# "sphinx.ext.coverage",
# "sphinx.ext.imgmath",
# "sphinx.ext.ifconfig",
# "sphinx.ext.viewcode",
# "sphinx.ext.githubpages",
# "sphinx.ext.autosummary",
# "sphinx.ext.napoleon",
# "sphinx_autodoc_typehints",
#]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"navigation_depth": 4}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_context = dict(
display_github=True, # Integrate GitHub
github_user="lilab-bcb", # Username
github_repo="pegasus-tutorials", # Repo name
github_version="main", # Version
conf_py_path="/docs/", # Path in the checkout to the docs root
)
| [
2,
1377,
4935,
1321,
20368,
19351,
12,
198,
198,
16302,
796,
366,
47,
1533,
40895,
36361,
82,
1,
198,
22163,
4766,
796,
366,
42334,
532,
33160,
383,
9765,
5136,
11,
3457,
13,
290,
5215,
298,
3055,
11,
3457,
13,
1439,
2489,
10395,
52... | 3.210068 | 1,033 |
from rmonitor.common.helpers import Duration
#############
# Helpers
#############
MESSAGE_TYPES = {
# Documented...
"$A": CompetitorInformationMessage,
"$B": RunInformationMessage,
"$C": ClassInformationMessage,
"$COMP": CompInformationMessage,
"$E": SettingInformationMessage,
"$F": HeartbeatMessage,
"$G": RaceInformationMessage,
"$H": PracticeQualifyingInformationMessage,
"$I": InitRecordMessage,
"$J": PassingInformationMessage,
# Undocumented...
"$SP": LapInformationMessage,
"$SR": LapInformationMessage,
# TODO: Figure out what these are for?
# $RMLT
# $RMS
# $RMDTL
# $RMCA
}
| [
6738,
374,
41143,
13,
11321,
13,
16794,
364,
1330,
22920,
628,
628,
628,
628,
628,
628,
198,
198,
7804,
4242,
2,
198,
2,
220,
10478,
364,
198,
7804,
4242,
2,
198,
198,
44,
1546,
4090,
8264,
62,
9936,
47,
1546,
796,
1391,
198,
220,... | 2.563177 | 277 |
# Licensed under the terms of http://www.apache.org/licenses/LICENSE-2.0
# Author (©): Alvaro del Castillo
# TODO: at some point this must be a real Singleton
import pickle
from mcpi.vec3 import Vec3
from mcthings.blocks_memory import BlocksMemory
from mcthings.utils import build_schematic_nbt
from mcthings.world import World
class Scene:
"""
A scene is a container for all the things built using McThings.
A scene can be built, unbuilt and moved. There is only one scene
in a program using McThings. Things built are added automatically to
the Scene. A Scene can also be loaded from a file, and
it can be saved to a file.
Before adding Things to the Scene, it must be connected to a
Minecraft server (fill the Scene.server attribute)
"""
@property
def end_position(self):
""" end position of the thing """
return self._end_position
@property
def position(self):
""" initial position of the thing """
return self._position
def add(self, thing):
""" Add a new thing to the scene """
if not self.things:
# The initial position of the scene is the position
# of its first thing added
self._position = thing.position
self.things.append(thing)
def add_decorator(self, decorator):
""" Add a new decorator to the scene """
self._decorators.append(decorator)
def decorate(self):
"""
Call all decorators for the current Scene
:return:
"""
for decorator in self._decorators:
decorator(self).decorate()
def build(self):
""" Build all the things inside the Scene """
for thing in self.things:
thing.build()
(min_pos, max_pos) = self.find_bounding_box()
self._end_position = max_pos
def unbuild(self):
""" Unbuild all the things inside the Scene """
for thing in self.things:
thing.unbuild()
def create(self):
""" Create all the things inside the Scene """
for thing in self.things:
thing.create()
def reposition(self, position):
"""
Move all the things in the scene to a new relative position
:param position: new position for the Scene
:return:
"""
# All the things inside the scene must be moved
diff_x = position.x - self._position.x
diff_y = position.y - self._position.y
diff_z = position.z - self._position.z
for thing in self.things:
repos_x = thing.position.x + diff_x
repos_y = thing.position.y + diff_y
repos_z = thing.position.z + diff_z
thing._position = (Vec3(repos_x, repos_y, repos_z))
def move(self, position):
"""
Move the scene to a new position
:param position: new position
:return:
"""
self.unbuild()
self.reposition(position)
self.build()
def load(self, file_path):
""" Load a scene from a file (but no build it yet) """
self.things = pickle.load(open(file_path, "rb"))
if self.things:
self._position = self.things[0].position
def save(self, file_path):
""" Save a scene to a file """
# Clean the blocks_memory: it is not needed to recreate the scene
for thing in self.things:
clean_memory(thing)
pickle.dump(self.things, open(file_path, "wb"))
# Reload the memory
self.create()
def find_bounding_box(self):
""" Compute the bounding box of the Scene """
# Default init values
min_pos = Vec3(self._position.x, self._position.y, self._position.z)
max_pos = Vec3(self._position.x, self._position.y, self._position.z)
# Find the bounding box for the scene
for thing in self.things:
min_pos, max_pos = update_box(min_pos, max_pos, thing.position)
if thing.end_position:
min_pos, max_pos = update_box(min_pos, max_pos, thing.end_position)
return min_pos, max_pos
def to_schematic(self, file_path, block_data=False):
"""
Save the Scene into a Schematic file
:param file_path: file in which to export the Scene in Schematic format
:param block_data: extract blocks ids and data (much slower)
:return: the Schematic object
"""
(min_pos, max_pos) = self.find_bounding_box()
build_schematic_nbt(min_pos, max_pos, block_data).write_file(file_path)
| [
2,
49962,
739,
262,
2846,
286,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
198,
2,
6434,
357,
16224,
2599,
978,
7785,
78,
1619,
5833,
16111,
198,
198,
2,
16926,
46,
25,
379,
617,
96... | 2.444385 | 1,879 |
'''
Note: below code is 100 percent trial and error. I have no idea how
ast is actually supposed to be used. I brute forced this one with the
dir() function and a lot of patience.
Proceed with caution.
The basic idea is to
a. find the index of the Argparse.ArgumentParser assignment in the code
b. find the index of the parse_args() call
c. Get the name of the instance variable to which ArgumentParser was assigned
d. Use that knowledge to extract all of the add_argument calls inbetween the
index of the contructor, and the index of the parse_args() call.
We'll see how robust this turns out to be.
'''
import os
import ast
import sys
import parser
import random
import codegen
import argparse
from itertools import chain
class ParserError(Exception):
'''Thrown when the parser can't find argparse functions the client code'''
pass
def parse_argparse_statements(file_name):
'''
Parses the AST of the passed in Python file and extracts
all code that sets up the ArgumentParser class
'''
nodes = ast.parse(open(os.path.abspath(file_name)).read())
mainfunc = find_main(nodes)
nodes = [node for node in mainfunc.body
if is_func(node, 'add_argument')
]
# get the main ArgumentParser assignment
argparse_assign_obj = filter(
lambda x: is_func(x, 'ArgumentParser'), mainfunc.body)[0]
parser_var_name = get_assignment_name(argparse_assign_obj)
prog_description = get_help_desc(argparse_assign_obj)
ast_source = list(chain([argparse_assign_obj], nodes))
# convert ast to python code
code = map(codegen.to_source, ast_source)
# Add line of code which stores the argparse values
code.append('INFO = {}._actions'.format(parser_var_name))
return {
'code': code,
'parser_variable_name' : parser_var_name,
'description' : prog_description
}
def markup(title, description, arg_info, format, noob):
'''
Loads the appropriate template based on the format argument and
renders it with the dict content stored in the arg_info list.
'''
output = render(module_title=os.path.split(title)[-1],
prog_decription=description,
table_data=build_table(arg_info))
# Stupid work around for the css in html
# I couldn't store curly brackets, as it fudges
# up the string formatting. So curly braces are
# marked up as [% and %]
if format == 'html':
output = output.replace('[%', '{')
output = output.replace('%]', '}')
return output
def extract_values_from_code(code):
'''
extracts the _actions attribute from the client
source file by:
a. Saving code to a temporary python file.
b. Importing the file as a module
c. Grabing its stored values
d. Deleting the python file.
returns the attributes of the module.
'''
try:
tmp_file = make_random_filename(20)
save_to_module()
module = load_module()
return module.INFO
finally:
delete_module()
def test_run():
'''
Runs a simple test file to make sure things
haven't horrifically broken
'''
parse_pyfile('test_input.py')
def generate_doc(f=None, format='html', noob=1, success_msg=1):
'''
Decorator for client code's main function.
It gets the name of the calling script, loads it
into parse_pyfile(), and generates the documentation,
before finally calling the main() function to resume
execution as normal.
'''
# Handles if the passed in object is instance
# of ArgumentParser. If so, it's being called as
# a function, rather than a decorator
if isinstance(f, argparse.ArgumentParser):
filename = sys.argv[0]
build_doc_from_parser_obj(
file_name=filename,
parser_obj=f,
format=format,
noob=noob,
success_msg=success_msg
)
return
# --------------------------------- #
# Below code is all decorator stuff #
# --------------------------------- #
if callable(f):
return generate_docs(f)
return generate_docs
# @generate_doc
if __name__ == '__main__':
main()
| [
7061,
6,
201,
198,
201,
198,
6425,
25,
2174,
2438,
318,
1802,
1411,
4473,
290,
4049,
13,
314,
423,
645,
2126,
703,
220,
201,
198,
459,
318,
1682,
4385,
284,
307,
973,
13,
314,
33908,
4137,
428,
530,
351,
262,
220,
201,
198,
15908,... | 2.737305 | 1,477 |
from sqlalchemy.orm import sessionmaker
# from winner import Winner
from sqlalchemy import Column, Integer, String, Enum
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
nobel_winners = [
{'category': 'Physics',
'name': 'Albert Einstein',
'nationality': 'Swiss',
'sex': 'male',
'year': 1921},
{'category': 'Physics',
'name': 'Paul Dirac',
'nationality': 'America',
'sex': 'male',
'year': 1933},
{'category': 'Chemistry',
'name': 'Marie Curie',
'nationality': 'America',
'sex': 'male',
'year': 1911},
]
engine = create_engine("sqlite:///nobel_prize.db", echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
albert = Winner(**nobel_winners[0])
print(albert)
session.add(albert)
print(session.new)
session.expunge(albert)
print(session.new)
winner_rows = [Winner(**w) for w in nobel_winners]
print(winner_rows)
session.add_all(winner_rows)
print(session.commit())
print(session.query(Winner).count())
result = session.query(Winner).filter_by(nationality='Swiss')
print(list(result))
result = session.query(Winner).filter(Winner.category == 'Physics',
Winner.nationality != 'Swiss')
print(list(result))
result = session.query(Winner).get(3)
print(result)
res = session.query(Winner).order_by('year')
print(res)
winner_rows = session.query(Winner)
nobel_winners = [inst_to_dict(w) for w in winner_rows]
print(nobel_winners)
marie = session.query(Winner).get(3)
marie.nationality = 'French'
print(session.dirty)
print(session.commit())
print(session.query(Winner).filter_by(name='Albert Einstein').delete())
print(list(session.query(Winner)))
| [
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
2,
422,
8464,
1330,
25358,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
2039,
388,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
... | 2.633824 | 680 |
"""Wrapper for netCDF readers."""
from __future__ import division, print_function
import os.path
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.structure import Structure
from monty.dev import requires
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"ETSF_Reader",
"structure_from_etsf_file",
]
try:
import netCDF4
except ImportError:
netCDF4 = None
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NetcdfReader(object):
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error("%s: %s" % (self.path, str(exc)))
self.ngroups = len(list(self.walk_tree()))
#self.path2group = collections.OrderedDict()
#for children in self.walk_tree():
# for child in children:
# #print child.group, child.path
# self.path2group[child.path] = child.group
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""
Activated at the end of the with statement. It automatically closes the file.
"""
self.rootgrp.close()
#@staticmethod
#def pathjoin(*args):
# return "/".join(args)
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
for children in self.walk_tree(value):
yield children
def read_dimvalue(self, dimname, path="/"):
"""Returns the value of a dimension."""
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
else:
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname:
Name of the variable
path:
path to the group.
cmode:
if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
"""
try:
var = self.read_variable(varname, path=path)
except:
raise
if cmode is None:
# scalar or array
return var[0] if not var.shape else var[:]
else:
assert var.shape[-1] == 2
if cmode == "c":
return var[...,0] + 1j*var[...,1]
else:
raise ValueError("Wrong value for cmode %s" % cmode)
def read_variable(self, varname, path="/"):
"""
Returns the variable with name varname in the group specified by path.
"""
return self._read_variables(varname, path=path)[0]
def read_values_with_map(self, names, map_names=None, path="/"):
"""
Read (dimensions, variables) with a mapping.
Args:
names:
list of netCDF keywords to read.
map_names:
dictionary used to map names to the netCDF keywords used to access data on file.
path:
Used to access groups.
returns: od, missing
od is the dictionary. Values are stored in d[name] for name in names.
missing is a list of 2-d tuple with the keywords that are not found.
"""
if map_names is None:
map_names = {}
od, missing = {}, []
for k in names:
try:
key = map_names[k]
except KeyError:
# Read k.
key = k
try:
# Try to read a variable.
od[k] = self.read_value(key, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(key, path=path)
except self.Error:
# key is missing!
missing.append((k, key))
return od, missing
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the
ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
if not hasattr(self, "_chemical_symbols"):
symbols = self.read_value("chemical_symbols")
self._chemical_symbols = []
for s in symbols:
self._chemical_symbols.append("".join(s))
return self._chemical_symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self._chemical_symbols.index(symbol)
def read_structure(self):
"""
Returns the crystalline structure.
Args:
site_properties:
Optional dictionary with site properties.
"""
if self.ngroups != 1:
raise NotImplementedError("ngroups != 1")
return structure_from_etsf_file(self)
def structure_from_etsf_file(ncdata, site_properties=None):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata:
filename or NetcdfReader instance.
site_properties:
Dictionary with site properties.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"),
"bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[property] = ncdata.read_value(prop)
structure = Structure(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
#from pymatgen.io.abinitio.abiobjects import AbiStructure
#structure.__class__ = AbiStructure
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
| [
37811,
36918,
2848,
329,
2010,
34,
8068,
7183,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
13,
6978,
198,
198,
6738,
279,
4948,
265,
5235,
13,
7295,
13,
41667,
1330,
15690,
3152,
2... | 2.272579 | 3,687 |
"""E2E 테스트 모듈입니다."""
from collections import defaultdict
from fastmsa.core import AbstractPubsubClient
def check_port_opened(port: int):
"""e2e 테스트를 위해 포트 오픈 여부를 체크합니다."""
import socket
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
location = ("127.0.0.1", port)
result_of_check = a_socket.connect_ex(location)
if result_of_check == 0:
return True
else:
return False
finally:
a_socket.close()
| [
37811,
36,
17,
36,
220,
169,
227,
234,
168,
232,
97,
169,
232,
116,
31619,
103,
101,
167,
241,
230,
168,
252,
227,
46695,
230,
46695,
97,
526,
15931,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
3049,
907,
64,
13,
7295,
13... | 1.780919 | 283 |
#!/usr/bin/env python3
# Author: Redmar van den Berg
# Copyright: 2017-2018 NVWA
#
# NVWA: Nederlandse Voedsel- en Warenautoriteit
# Netherlands Food and Consumer Product Safety Authority
# https://english.nvwa.nl/
#
# Licence: GPLv3
import os
import sys
import subprocess
import shutil
import tempfile
import unittest
import itertools
from functools import partial
from contextlib import contextmanager
from pprint import pprint
import pickle
import Bio
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
class pyBlast():
""" A full python wrapper around makeblastdb and ncbi-blast+
Uses temporary files and context manager to make sure that nothing
remains on the file system after it has ran.
Please note, this should not be used for very large datasets since
both the database creation and the blast search itself are done on the fly
for every invocation
"""
def run_blast(self, cmd):
""" Run the ncbi blast command and return the location of the file """
if self.verbose:
print(cmd)
cmd()
def _guess_filetype(self, filename):
""" Try to guess the filetype from the content """
# If the input is the name of the blastdb, the file itself should not
# exist, but the derived files should
if not os.path.exists(filename):
derived = [f'{filename}.{ext}' for ext in ['nhr', 'nin', 'nsq']]
if all(os.path.exists(fname) for fname in derived):
return 'blastdb'
# If the file exists, we open it and have a peek at the content
with open(filename, 'r') as fin:
line = next(fin)
if line.startswith('>'):
return 'fasta'
elif line.startswith('<?xml version="1.0"?>'):
return 'xml'
class pyBlastFlat(pyBlast):
""" Yield flattened Blast Records
Each record is guaranteed to have:
- One Alignment per Record
- One Hsp per Alignment
Records without alignments are silently dropped
Multiple Alignments or Hsps are returned in individual Records
"""
def mismatch(self, record):
""" Return the number of mismatches in a blast hit """
# This function only works with flat blast Records
if hasattr(record, 'alignments'):
raise RuntimeError('only flat Records are supported')
alignment = record.alignment
hsp = record.alignment.hsp
# I don't know what the difference between these is
assert hsp.identities == hsp.positives
# The alignment and the query are the same length
# We only count the mismatches
if record.query_length == hsp.align_length:
record.mismatch = record.query_length - hsp.identities
# When the hit is longer then the query, we also count the gaps as
# mismatches
elif record.query_length < hsp.align_length:
record.mismatch = hsp.align_length - hsp.identities
elif record.query_length > hsp.align_length:
record.mismatch = record.query_length - hsp.identities
else:
raise NotImplementedError('unknown composition of blast hit')
def records_overlap(record1, record2):
""" Records overlap when their targets overlap """
if hasattr(record1, 'alignments') or hasattr(record2, 'alignments'):
raise RuntimeError('only flat Records are supported')
# Records cannot overlap if they are to different sequences
if record1.alignment.hit_def != record2.alignment.hit_def:
return False
hsp1 = record1.alignment.hsp
hsp2 = record2.alignment.hsp
return _hit_overlap(hsp1, hsp2)
@staticmethod
def best_hits_pos(records):
""" Find the best hits for each position on the genome """
def list_overlap(record1, lijst):
""" Do any of the hits in lijst overlap hit1 """
for record in lijst:
if overlap(record1, record):
return True
else:
return False
def better_hit(record1, lijst):
""" Is hit better then every hits in lijst it overlaps with """
# If we find record1 is better then another record,
# we can't return True yet, because it might be worse then another
# record it *also* overlaps with. That is why we store the boolean
# and only return it once we have exhausted lijst
better=False
for record in lijst:
if overlap(record1, record):
if record1.mismatch >= record.mismatch:
# record1 is worse or equal to an existing
# overlapping record
return False
elif record1.mismatch < record.mismatch:
# record1 is better than an existing
# overlapping record
better=True
else:
return better
def remove_worse(record1, lijst):
""" Remove all hits that overlap hit1 but have more mismatches """
# Be sure to iterate over a copy of lijst
for record in lijst[:]:
is_better = record1.mismatch < record.mismatch
if overlap(record1, record) and is_better:
lijst.remove(record)
def equal_hit(hit1, lijst):
""" Is hit equal to one or more hits in lijst """
for hit in lijst:
if overlap(hit1, hit) and hit1.mismatch == hit.mismatch:
return True
else:
return False
def test_invariant(lijst):
""" Invariant: no hit in lijst is better then an overlapping hit """
for hit1, hit2 in itertools.combinations(lijst, 2):
# If hit1 and 2 overlap, and they have different mismatch scores
msg='{} and {} overlap and have different mismatch scores ({} and {})'
if overlap(hit1, hit2) and hit1.mismatch != hit2.mismatch:
raise AssertionError(
(msg.format(hit1.query_id,
hit2.query_id,
hit1.mismatch,
hit2.mismatch)
)
)
best = list()
for record in records:
test_invariant(best)
# If hit is new
if not list_overlap(record, best):
best.append(record)
# If hit is better then an existing hit
elif better_hit(record, best):
remove_worse(record, best)
best.append(record)
return sorted(best, key=lambda x: x.mismatch)
@staticmethod
def best_hits_gene(records, name=str):
""" Return only the best hit for each gene
The Optional function 'name' will be called on record.query to
create the name for the gene that will be used to find the best hit
for each gene. This comes in handy when your database contains many
alleles of the same gene, and you are only interested in the best
hit.
For example, you might want to treat CMY-2_1 and CMY-2_23 the same
if the underscore only denotes a different allele.
In that case, specify name=lambda x: x.split('_')[0]
"""
best = dict()
for record in records:
gene_name = name(record.query)
if gene_name not in best:
best[gene_name] = record
elif record.mismatch < best[gene_name].mismatch:
best[record.query] = record
return best.values()
@staticmethod
def fasta(record):
""" Return the Blast object as SeqRecord """
# This function only works with flat blast Records
if hasattr(record, 'alignments'):
raise RuntimeError('only flat Records are supported')
return SeqRecord(
Seq(record.alignment.hsp.sbjct),
id = record.query,
description='{}:{}-{}'.format(
record.alignment.hit_def,
record.alignment.hsp.sbjct_start,
record.alignment.hsp.sbjct_end
)
)
def _minmax(*args):
""" Return the min and max of the input arguments """
min_ = min(*args)
max_ = max(*args)
return(min_, max_)
def _srange(begin, end):
""" Return a set based on range """
return set(range(begin, end))
def _hit_overlap(hsp1, hsp2):
""" Determine whether the hits of two hsps overlap """
hit1_begin, hit1_end = _minmax(hsp1.sbjct_start, hsp1.sbjct_end)
hit2_begin, hit2_end = _minmax(hsp2.sbjct_start, hsp2.sbjct_end)
hit1_range = _srange(hit1_begin, hit1_end)
hit2_range = _srange(hit2_begin, hit2_end)
return not hit1_range.isdisjoint(hit2_range)
if __name__ == '__main__':
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
cmd=NcbiblastnCommandline(
query = 'test/sul2_1_AF542061.fasta',
db = 'test/102637-001-018_k64-contigs.fa',
evalue = 0.001
)
with pyBlastFlat(cmd, rm_tmp=False, min_cov=0.5, verbose=True) as pb:
for record in pb:
fasta = pyBlastFlat.fasta(record)
print(fasta)
print(SeqIO.write(fasta, sys.stdout, 'fasta'))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
6434,
25,
2297,
3876,
5719,
2853,
24626,
198,
2,
15069,
25,
2177,
12,
7908,
23973,
15543,
198,
2,
198,
2,
23973,
15543,
25,
399,
5702,
1044,
325,
20687,
276,
741,
12,
5... | 2.241766 | 4,281 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198
] | 3.35 | 20 |
import matplotlib.pyplot as plt
import pandas as pd
path = 'D:/TAMU Work/TAMU 2022 SPRING/OCEN 460/combined_data.csv'
raw = pd.read_csv(path)
print(raw.describe())
raw = raw.dropna(how='any')
print(raw.describe())
raw.to_csv('D:/TAMU Work/TAMU 2022 SPRING/OCEN 460/combined_data_truncated.csv')
# plt.scatter(raw['longitude'], raw['latitude'], s=0.2, c=raw['depth']) | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6978,
796,
705,
35,
14079,
51,
2390,
52,
5521,
14,
51,
2390,
52,
33160,
49068,
2751,
14,
4503,
1677,
34091,
14,
24011,
1389,
62... | 2.31875 | 160 |
import pytest
from dart_fss.fs.extract import find_all_columns
from dart_fss.utils import str_compare
| [
11748,
12972,
9288,
198,
6738,
35970,
62,
69,
824,
13,
9501,
13,
2302,
974,
1330,
1064,
62,
439,
62,
28665,
82,
198,
6738,
35970,
62,
69,
824,
13,
26791,
1330,
965,
62,
5589,
533,
628,
198
] | 2.888889 | 36 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Patrick Lumban Tobing (Nagoya University)
# based on a VC implementation by Kazuhiro Kobayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import os
from pathlib import Path
import logging
import matplotlib
import numpy as np
from utils import check_hdf5
from utils import read_hdf5
from utils import read_txt
from utils import write_hdf5
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
13130,
9925,
406,
2178,
272,
13695,
278,
357,
45,
363,
23790,
2059,
8,
198,
2,
1912,
319,
257,... | 2.817259 | 197 |
# coding: utf-8
"""
뉴스
"""
from django.core.management.base import BaseCommand
from nyuseu.models import Articles
import pypandoc
from rich.console import Console
from rich.table import Table
from rich.markdown import Markdown
console = Console()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
198,
167,
231,
112,
168,
232,
97,
198,
37811,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
299,
88,
1904,
84,
13,
27530,
1330,
22698,
198,
11748,
... | 3.233766 | 77 |
# -*- coding: utf-8 -*-
u"""OPAL parser.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template import lattice
from sirepo.template.code_variable import CodeVar
from sirepo.template.lattice import LatticeUtil
import math
import os.path
import re
import sirepo.sim_data
_MATERIAL_CODE_TO_NAME = PKDict(
al='ALUMINUM',
be='BERYLLIUM',
cu='COPPER',
au='GOLD',
mo='MOLYBDENUM',
ti='TITANIUM',
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
84,
37811,
3185,
1847,
30751,
13,
198,
198,
25,
22163,
4766,
25,
15069,
357,
66,
8,
12131,
5325,
544,
18380,
11419,
13,
220,
1439,
6923,
33876,
13,
198,
25,
43085,
25,... | 2.404 | 250 |
name = 'hdert'
age = 13
loop = ('FALSE')
while loop == 'FALSE':
print(name + ' is\nawesome and also')
print(age)
print('years old')
print()
name = 'hdert'
age = 14
print('now ' + name + '\nis')
print(age)
print('years old and\nstill awesome')
loop = ('TRUE')
| [
3672,
796,
705,
31298,
861,
6,
201,
198,
496,
796,
1511,
201,
198,
26268,
796,
19203,
37,
23719,
11537,
201,
198,
201,
198,
4514,
9052,
6624,
705,
37,
23719,
10354,
201,
198,
220,
220,
220,
3601,
7,
3672,
1343,
705,
318,
59,
77,
7... | 2.157534 | 146 |
import unittest
import pymongo
import mtq
import logging
log = logging.getLogger('mtq.test')
log.setLevel(logging.INFO)
| [
11748,
555,
715,
395,
198,
11748,
279,
4948,
25162,
198,
11748,
45079,
80,
198,
11748,
18931,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
10786,
16762,
80,
13,
9288,
11537,
198,
6404,
13,
2617,
4971,
7,
6404,
2667,
13,
10778,
8... | 2.818182 | 44 |
import pygame
##### INIT SECTION
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
size = (700, 500)
done = False
pygame.init()
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
pygame.display.set_caption("My Game")
#### you can put custom INITs here.
###### WHILE LOOP SECTIOn
# -------- Main Program Loop -----------
while not done:
#### EVENT CHECK SECTION
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(WHITE)
#### ACTION SECTION
#### FINISHING CODE
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
pygame.quit()
| [
11748,
12972,
6057,
628,
198,
4242,
2,
3268,
2043,
44513,
198,
2,
2896,
500,
617,
7577,
198,
9148,
8120,
796,
357,
15,
11,
657,
11,
657,
8,
198,
12418,
12709,
796,
357,
13381,
11,
14280,
11,
14280,
8,
198,
43016,
796,
357,
15,
11,... | 2.47541 | 305 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for displaying TFDV outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import sys
from typing import Dict, List, Optional, Text, Tuple, Union
import pandas as pd
from tensorflow_data_validation import types
from tensorflow_data_validation.utils import stats_util
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
try:
# pylint: disable=g-import-not-at-top
from IPython.display import display
from IPython.display import HTML
except ImportError as e:
sys.stderr.write('Unable to import IPython: {}. \n'
'TFDV visualization APIs will not function. To use '
'visualization features, make sure IPython is installed, or '
'install TFDV using '
'"pip install tensorflow-data-validation[visualization]"\n'
.format(e))
_NL_CUSTOM_STATS_NAME = 'nl_statistics'
_TOKEN_NAME_KEY = 'token_name'
_FREQUENCY_KEY = 'frequency'
_FRACTION_OF_SEQ_KEY = 'fraction_of_sequences'
_PER_SEQ_MIN_FREQ_KEY = 'per_sequence_min_frequency'
_PER_SEQ_MAX_FREQ_KEY = 'per_sequence_max_frequency'
_PER_SEQ_AVG_FREQ_KEY = 'per_sequence_avg_frequency'
_POSITIONS_KEY = 'positions'
def get_schema_dataframe(
schema: schema_pb2.Schema) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns a tuple of DataFrames containing the input schema information.
Args:
schema: A Schema protocol buffer.
Returns:
A tuple of DataFrames containing the features and domains of the schema.
"""
if not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
# Extract all the string domains at the schema level.
domain_rows = []
for domain in schema.string_domain:
domain_rows.append(
[_add_quotes(domain.name),
', '.join(_add_quotes(v) for v in domain.value)])
feature_rows = []
# Iterate over the features in the schema and extract the properties of each
# feature.
for feature in schema.feature:
# Extract the presence information of the feature.
if feature.HasField('presence'):
if feature.presence.min_fraction == 1.0:
feature_presence = 'required'
else:
feature_presence = 'optional'
else:
feature_presence = ''
# Extract the valency information of the feature.
valency = ''
if feature.HasField('value_count'):
if (feature.value_count.min == feature.value_count.max and
feature.value_count.min == 1):
valency = 'single'
else:
min_value_count = ('[%d' % feature.value_count.min
if feature.value_count.HasField('min') else '[0')
max_value_count = ('%d]' % feature.value_count.max
if feature.value_count.HasField('max') else 'inf)')
valency = min_value_count + ',' + max_value_count
# Extract the feature type.
feature_type = schema_pb2.FeatureType.Name(feature.type)
# If the feature has a string domain, treat it as a string feature.
if feature_type == 'BYTES' and (feature.HasField('domain') or
feature.HasField('string_domain')):
feature_type = 'STRING'
# Extract the domain (if any) of the feature.
domain = '-'
if feature.HasField('domain'):
domain = _add_quotes(feature.domain)
elif feature.HasField('int_domain'):
min_string = ('min: %d' % feature.int_domain.min
if feature.int_domain.HasField('min') else None)
max_string = ('max: %d' % feature.int_domain.max
if feature.int_domain.HasField('max') else None)
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('float_domain'):
if feature.float_domain.HasField('min'):
min_string = 'min: %f' % feature.float_domain.min
elif feature.float_domain.disallow_inf:
min_string = None
else:
min_string = 'min: -inf'
if feature.float_domain.HasField('max'):
max_string = 'max: %f' % feature.float_domain.max
elif feature.float_domain.disallow_inf:
max_string = None
else:
max_string = 'max: inf'
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('string_domain'):
domain = _add_quotes(feature.string_domain.name if
feature.string_domain.name else
feature.name + '_domain')
domain_rows.append([domain,
', '.join(_add_quotes(v) for v in
feature.string_domain.value)])
feature_rows.append(
[_add_quotes(feature.name), feature_type, feature_presence, valency,
domain])
features = pd.DataFrame(
feature_rows,
columns=['Feature name', 'Type', 'Presence', 'Valency',
'Domain']).set_index('Feature name')
domains = pd.DataFrame(
domain_rows, columns=['Domain', 'Values']).set_index('Domain')
return features, domains
def display_schema(schema: schema_pb2.Schema) -> None:
"""Displays the input schema (for use in a Jupyter notebook).
Args:
schema: A Schema protocol buffer.
"""
features_df, domains_df = get_schema_dataframe(schema)
display(features_df)
# Do not truncate columns.
if not domains_df.empty:
pd.set_option('max_colwidth', None)
display(domains_df)
def get_anomalies_dataframe(anomalies: anomalies_pb2.Anomalies) -> pd.DataFrame:
"""Returns a DataFrame containing the input anomalies.
Args:
anomalies: An Anomalies protocol buffer.
Returns:
A DataFrame containing the input anomalies, or an empty DataFrame if there
are no anomalies.
"""
if not isinstance(anomalies, anomalies_pb2.Anomalies):
raise TypeError('anomalies is of type %s, should be an Anomalies proto.' %
type(anomalies).__name__)
anomaly_rows = []
for feature_name, anomaly_info in anomalies.anomaly_info.items():
anomaly_rows.append([
_add_quotes(feature_name), anomaly_info.short_description,
anomaly_info.description
])
if anomalies.HasField('dataset_anomaly_info'):
anomaly_rows.append([
'[dataset anomaly]', anomalies.dataset_anomaly_info.short_description,
anomalies.dataset_anomaly_info.description
])
# Construct a DataFrame consisting of the anomalies and display it.
anomalies_df = pd.DataFrame(
anomaly_rows,
columns=[
'Feature name', 'Anomaly short description',
'Anomaly long description'
]).set_index('Feature name')
# Do not truncate columns.
pd.set_option('max_colwidth', None)
return anomalies_df
def display_anomalies(anomalies: anomalies_pb2.Anomalies) -> None:
"""Displays the input anomalies (for use in a Jupyter notebook).
Args:
anomalies: An Anomalies protocol buffer.
"""
anomalies_df = get_anomalies_dataframe(anomalies)
if anomalies_df.empty:
display(HTML('<h4 style="color:green;">No anomalies found.</h4>'))
else:
display(anomalies_df)
def _project_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList,
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Project statistics proto based on allowlist and denylist features."""
if allowlist_features is None and denylist_features is None:
return statistics
result = statistics_pb2.DatasetFeatureStatisticsList()
for dataset_stats in statistics.datasets:
result_dataset_stats = result.datasets.add()
result_dataset_stats.MergeFrom(dataset_stats)
del result_dataset_stats.features[:]
if allowlist_features is not None:
allowlist_features = set(allowlist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in allowlist_features:
result_dataset_stats.features.add().MergeFrom(feature)
else:
denylist_features = set(denylist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in denylist_features:
continue
result_dataset_stats.features.add().MergeFrom(feature)
return result
def _get_combined_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Get combined datatset statistics list proto."""
if not isinstance(lhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError(
'lhs_statistics is of type %s, should be '
'a DatasetFeatureStatisticsList proto.' % type(lhs_statistics).__name__)
if not lhs_statistics.datasets:
raise ValueError('lhs_statistics proto contains no dataset.')
if len(lhs_statistics.datasets) != 1:
raise ValueError('lhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if lhs_statistics.datasets[0].name:
lhs_name = lhs_statistics.datasets[0].name
# Add lhs stats.
lhs_statistics = _project_statistics(
lhs_statistics, allowlist_features, denylist_features)
combined_statistics = statistics_pb2.DatasetFeatureStatisticsList()
lhs_stats_copy = combined_statistics.datasets.add()
lhs_stats_copy.MergeFrom(lhs_statistics.datasets[0])
if rhs_statistics is not None:
if not isinstance(rhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError('rhs_statistics is of type %s, should be a '
'DatasetFeatureStatisticsList proto.'
% type(rhs_statistics).__name__)
if len(rhs_statistics.datasets) != 1:
raise ValueError('rhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if rhs_statistics.datasets[0].name:
rhs_name = rhs_statistics.datasets[0].name
# If we have same name, revert to default names.
if lhs_name == rhs_name:
lhs_name, rhs_name = 'lhs_statistics', 'rhs_statistics'
# Add rhs stats.
rhs_statistics = _project_statistics(
rhs_statistics, allowlist_features, denylist_features)
rhs_stats_copy = combined_statistics.datasets.add()
rhs_stats_copy.MergeFrom(rhs_statistics.datasets[0])
rhs_stats_copy.name = rhs_name
# Update lhs name.
lhs_stats_copy.name = lhs_name
return combined_statistics
def get_statistics_html(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Text:
"""Build the HTML for visualizing the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
HTML to be embedded for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
combined_statistics = _get_combined_statistics(
lhs_statistics, rhs_statistics, lhs_name, rhs_name, allowlist_features,
denylist_features)
protostr = base64.b64encode(
combined_statistics.SerializeToString()).decode('utf-8')
# pylint: disable=line-too-long,anomalous-backslash-in-string
# Note that in the html template we currently assign a temporary id to the
# facets element and then remove it once we have appended the serialized proto
# string to the element. We do this to avoid any collision of ids when
# displaying multiple facets output in the notebook.
#
# Note that a string literal including '</script>' in a <script> tag needs to
# escape it as <\/script> to avoid early closing the wrapping <script> tag.
html_template = """<iframe id='facets-iframe' width="100%" height="500px"></iframe>
<script>
facets_iframe = document.getElementById('facets-iframe');
facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>';
facets_iframe.srcdoc = facets_html;
facets_iframe.id = "";
setTimeout(() => {
facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px')
}, 1500)
</script>"""
# pylint: enable=line-too-long
html = html_template.replace('protostr', protostr)
return html
def visualize_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None) -> None:
"""Visualize the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
assert (not allowlist_features or not denylist_features), (
'Only specify one of allowlist_features and denylist_features.')
html = get_statistics_html(lhs_statistics, rhs_statistics, lhs_name, rhs_name,
allowlist_features, denylist_features)
display(HTML(html))
def compare_slices(statistics: statistics_pb2.DatasetFeatureStatisticsList,
lhs_slice_key: Text, rhs_slice_key: Text):
"""Compare statistics of two slices using Facets.
Args:
statistics: A DatasetFeatureStatisticsList protocol buffer.
lhs_slice_key: Slice key of the first slice.
rhs_slice_key: Slice key of the second slice.
Raises:
ValueError: If the input statistics proto does not have the specified slice
statistics.
"""
lhs_stats = stats_util.get_slice_stats(statistics, lhs_slice_key)
rhs_stats = stats_util.get_slice_stats(statistics, rhs_slice_key)
visualize_statistics(lhs_stats, rhs_stats,
lhs_name=lhs_slice_key, rhs_name=rhs_slice_key)
def get_natural_language_statistics_dataframes(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Optional[Dict[str, Dict[Union[int, str], Union[Dict[str, pd.DataFrame],
pd.DataFrame]]]]:
"""Gets the `NaturalLanguageStatistics` as a dict of pandas.DataFrame.
Each pd.DataFrame can be fed into a plot with little to no manipulation.
For example, to plot the `token_length_histogram` in plot.ly:
```
import pandas a pd
import plotly
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import display_util as tfdv_display_util
data = pd.DataFrame.from_dict({"col": [1, 2, 3]})
statistics = tfdv.generate_statistics_from_dataframe(data)
df = tfdv_display_util.get_natural_language_statistics_dataframes(statistics)
hist, bin_edges = np.histogram(df[ds_name][feature_name][
'token_length_histogram']['high_values'])
fig = plotly.graph_objs.Figure(data=[
plotly.graph_objs.Bar(x=bin_edges, y=hist, name='Histogram'),
])
```
The resulting dict contains `token_length_histogram` and each token name as
its keys. For each token, the data frame represents a list of stats as well
as the token's positions histogram.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
A dict of pandas data frames. Returns None if natural language statistics
does not exist in the statistics proto.
"""
combined_statistics = _get_combined_statistics(lhs_statistics, rhs_statistics,
lhs_name, rhs_name,
allowlist_features,
denylist_features)
nlp_stats = _get_natural_language_statistics(combined_statistics)
if not nlp_stats:
return None
result = {}
for ds_name, features_dict in nlp_stats.items():
result[ds_name] = {}
for feature_name, nlp_stat in features_dict.items():
result[ds_name][feature_name] = {
'token_length_histogram':
_get_histogram_dataframe(nlp_stat.token_length_histogram),
'token_statistics':
_get_token_statistics(list(nlp_stat.token_statistics))
}
return result
def _get_natural_language_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList
) -> Dict[str, Dict[str, statistics_pb2.NaturalLanguageStatistics]]:
"""Gets the Natural Language stat out of the custom statistic."""
result = {}
for dataset in statistics.datasets:
if not dataset.name:
continue
features_dict = {}
for feature in dataset.features:
for custom_stats in feature.custom_stats:
if custom_stats.name == _NL_CUSTOM_STATS_NAME:
nlp_stat = statistics_pb2.NaturalLanguageStatistics()
custom_stats.any.Unpack(nlp_stat)
if feature.name:
feature_name = feature.name
else:
feature_name = str(types.FeaturePath.from_proto(feature.path))
features_dict[feature_name] = nlp_stat
if features_dict:
result[dataset.name] = features_dict
return result
def _get_token_statistics(
token_statistic: List[
statistics_pb2.NaturalLanguageStatistics.TokenStatistics]
) -> pd.DataFrame:
"""Returns a dict of each token's stats."""
nlp_stats_dict = {
_TOKEN_NAME_KEY: [],
_FREQUENCY_KEY: [],
_FRACTION_OF_SEQ_KEY: [],
_PER_SEQ_MIN_FREQ_KEY: [],
_PER_SEQ_MAX_FREQ_KEY: [],
_PER_SEQ_AVG_FREQ_KEY: [],
_POSITIONS_KEY: [],
}
for token in token_statistic:
if token.WhichOneof('token') == 'string_token':
token_name = token.string_token
else:
token_name = token.int_token
nlp_stats_dict[_TOKEN_NAME_KEY].append(token_name)
nlp_stats_dict[_FREQUENCY_KEY].append(token.frequency)
nlp_stats_dict[_FRACTION_OF_SEQ_KEY].append(token.fraction_of_sequences)
nlp_stats_dict[_PER_SEQ_MIN_FREQ_KEY].append(
token.per_sequence_min_frequency)
nlp_stats_dict[_PER_SEQ_MAX_FREQ_KEY].append(
token.per_sequence_max_frequency)
nlp_stats_dict[_PER_SEQ_AVG_FREQ_KEY].append(
token.per_sequence_avg_frequency)
nlp_stats_dict[_POSITIONS_KEY].append(
_get_histogram_dataframe(token.positions))
return pd.DataFrame.from_dict(nlp_stats_dict)
def _get_histogram_dataframe(
histogram: statistics_pb2.Histogram) -> pd.DataFrame:
"""Gets the `Histogram` as a pandas.DataFrame."""
return pd.DataFrame.from_dict({
'high_values': [b.high_value for b in histogram.buckets],
'low_values': [b.low_value for b in histogram.buckets],
'sample_counts': [b.sample_count for b in histogram.buckets],
})
| [
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 2.603695 | 8,390 |
from webtest import TestApp
| [
6738,
3992,
9288,
1330,
6208,
4677,
198
] | 4 | 7 |
import glob
import os
from argparse import ArgumentParser
from pathlib import Path
import numpy as np
import yaml
from tqdm import tqdm
cad_num_per_class = {
"aeroplane": 8,
"bicycle": 6,
"boat": 6,
"bottle": 8,
"bus": 6,
"car": 10,
"chair": 10,
"diningtable": 6,
"motorbike": 5,
"sofa": 6,
"train": 4,
"tvmonitor": 4
}
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--orig_dataset_dir', type=Path, default='your-folder/datasets/PASCAL3D+',
help='dateset main directory')
parser.add_argument('--new_dataset_dir', type=Path, default='your-folder/datasets/PASCAL_final',
help='dateset main directory')
args = parser.parse_args()
split_dataset(args.orig_dataset_dir, args.new_dataset_dir)
| [
11748,
15095,
198,
11748,
28686,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
331,
43695,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
... | 2.226667 | 375 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 18:05:09 2021
@author: mike_ubuntu
"""
import numpy as np
import sys
from collections import OrderedDict
sys.path.append('/media/mike_ubuntu/DATA/ESYS/')
import src.globals as global_var
from src.token_family import Token_family, TF_Pool
# raise NotImplementedError
# raise NotImplementedError
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
1526,
2242,
1248,
25,
2713,
25,
2931,
33448,
198,
198,
31,
9800,
25,
285,
522,
62,... | 2.397661 | 171 |
from __future__ import absolute_import
import pytest
from sentry.models import EventError
@pytest.mark.parametrize(
"error,type,message,data",
(
({"type": "unknown_error"}, "unknown_error", "Unknown error", {}),
({"type": "unknown_error", "foo": "bar"}, "unknown_error", "Unknown error", {"foo": "bar"}),
(
{"type": "invalid_data", "name": "foo"},
"invalid_data",
"Discarded invalid value",
{"name": "foo"},
),
({"type": "invalid_data"}, "invalid_data", "Discarded invalid value", {}),
({"type": "INVALID_ERROR_TYPE"}, "INVALID_ERROR_TYPE", "Unknown error", {}),
),
)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1908,
563,
13,
27530,
1330,
8558,
12331,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
18224,
... | 2.300676 | 296 |
"""
NZBLeecherUtil - Misc. code for NZBLeecher and its related modules
(c) Copyright 2005 Philip Jenvey
[See end of file]
"""
import os, stat, sys, Hellanzb
from twisted.internet import reactor
from twisted.python import log
from twisted.protocols.policies import ThrottlingProtocol, WrappingFactory
__id__ = '$Id$'
class HellaThrottler:
""" This is twisted.protocols.policies.ThrottlingFactory abstracted away from
Factory. Multiple factories all share the same HellaThrottler singlteton -- thus this
provides global bandwidth throttling for all twisted HellaThrottlingFactories """
def registerWritten(self, length):
"""Called by protocol to tell us more bytes were written."""
self.writtenThisSecond += length
def registerRead(self, length):
"""Called by protocol to tell us more bytes were read."""
self.readThisSecond += length
def checkReadBandwidth(self):
"""Checks if we've passed bandwidth limits."""
if self.readLimit and self.readThisSecond > self.readLimit:
self.throttleReads()
throttleTime = (float(self.readThisSecond) / self.readLimit) - 1.0
self.unthrottleReadsID = reactor.callLater(throttleTime,
self.unthrottleReads)
nzbFiles = []
if Hellanzb.downloading:
# Update the total download rate and each NZBFiles rate and d/l percentage
self.rate = self.readThisSecond
for nsf in Hellanzb.nsfs:
for activeClient in nsf.activeClients:
if activeClient.currentSegment and \
activeClient.currentSegment.nzbFile not in nzbFiles:
nzbFile = activeClient.currentSegment.nzbFile
nzbFile.rate = nzbFile.readThisSecond
talliedBytes = float(nzbFile.totalReadBytes + nzbFile.totalSkippedBytes)
percentage = int(talliedBytes / max(1, nzbFile.totalBytes) * 100)
nzbFile.downloadPercentage = min(100, percentage)
nzbFiles.append(nzbFile)
Hellanzb.scroller.updateLog()
# Reset the rate counters
self.readThisSecond = 0
for nzbFile in nzbFiles:
nzbFile.readThisSecond = 0
self.checkReadBandwidthID = reactor.callLater(1, self.checkReadBandwidth)
def throttleReads(self):
"""Throttle reads on all protocols."""
for f in self.factories:
log.msg("Throttling reads on %s" % f)
for p in f.protocols.keys():
p.throttleReads()
def unthrottleReads(self):
"""Stop throttling reads on all protocols."""
# unthrottling reads just means the protocls startReading() again. Obviously we
# don't want to ever begin reading when the download is currently paused
if Hellanzb.downloadPaused:
return
self.unthrottleReadsID = None
for f in self.factories:
log.msg("Stopped throttling reads on %s" % f)
for p in f.protocols.keys():
p.unthrottleReads()
def throttleWrites(self):
"""Throttle writes on all protocols."""
for f in self.factories:
log.msg("Throttling writes on %s" % f)
for p in f.protocols.keys():
p.throttleWrites()
def unthrottleWrites(self):
"""Stop throttling writes on all protocols."""
self.unthrottleWritesID = None
for f in self.factories:
log.msg("Stopped throttling writes on %s" % f)
for p in f.protocols.keys():
p.unthrottleWrites()
class HellaThrottlingFactory(WrappingFactory):
"""Throttles bandwidth and number of connections via the parent HellaThrottler
Write bandwidth will only be throttled if there is a producer
registered.
"""
protocol = ThrottlingProtocol
def registerWritten(self, length):
"""Called by protocol to tell us more bytes were written."""
self.ht.registerWritten(length)
def registerRead(self, length):
"""Called by protocol to tell us more bytes were read."""
self.ht.registerRead(length)
def validWorkingFile(file, overwriteZeroByteFiles = False):
""" Determine if the specified file path is a valid, existing file in the WORKING_DIR """
# Overwrite (return True) 0 byte segment files if specified
if os.path.exists(file) and \
(os.stat(file)[stat.ST_SIZE] != 0 or not overwriteZeroByteFiles):
return True
return False
"""
Copyright (c) 2005 Philip Jenvey <pjenvey@groovie.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id$
"""
| [
37811,
198,
37371,
33,
3123,
721,
372,
18274,
346,
532,
29882,
13,
2438,
329,
26905,
33,
3123,
721,
372,
290,
663,
3519,
13103,
198,
198,
7,
66,
8,
15069,
5075,
14576,
13364,
3304,
198,
58,
6214,
886,
286,
2393,
60,
198,
37811,
198,... | 2.593235 | 2,365 |
import abc
from typing import List
from expert_voting.scored_expert import ScoredExpert
| [
11748,
450,
66,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
5887,
62,
85,
10720,
13,
1416,
1850,
62,
1069,
11766,
1330,
1446,
1850,
3109,
11766,
628
] | 3.333333 | 27 |
"""
* Project [darzyx]
* Sketch_004: Stars
* by Darzyx
*
* Rotating in deep space.
* Created using Python Mode for Processing.
*
""" | [
37811,
198,
1635,
4935,
685,
27455,
7357,
87,
60,
198,
1635,
17001,
62,
22914,
25,
10271,
198,
1635,
416,
7491,
7357,
87,
198,
1635,
198,
1635,
18481,
803,
287,
2769,
2272,
13,
198,
1635,
15622,
1262,
11361,
10363,
329,
28403,
13,
198... | 3.088889 | 45 |
#!/usr/bin/env python3
# -*- CoDing: utf-8 -*-
"""
Created on May 22 2019
Last Update May 22 2019
@author: simonvanvliet
Department of Zoology
University of Britisch Columbia
vanvliet@zoology.ubc.ca
This recreates the data and figure for figure 6
By default data is loaded unless parameters have changes, to rerun model set override_data to True
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import MLS_evolveCoop_fast as mlse
import mls_general_code as mlsg
import datetime
from pathlib import Path
"""
# SET model settings
"""
# set to True to force recalculation of data
override_data = False
# set folder and file settings
data_folder = Path("Data_Paper/")
fig_Folder = Path("Figures_Paper/")
figureName = 'figure6.pdf'
dataName = 'data_Figure6.npz'
# set model settings
model_par = {
# time step parameters
"maxT": 200000.,
"dT": 5E-2,
"sampleT": 100,
"rms_err_treshold": 1E-5,
"mav_window": 1000,
"rms_window": 5000,
# fixed model parameters
"sampling": "sample",
"mu": 0.01,
"B_H": 1.,
"D_H": 0.,
"K_H": 500.,
# variable model parameters
"cost": 0.01,
"TAU_H": 100.,
"n0": 1E-3,
"mig": 1E-5,
"r": 1.,
"K": 10E3,
# fixed intial condition
"NUMGROUP": 500,
"numTypeBins": 100,
"meanGamma0": 0,
"stdGamma0": 0.01,
"N0init": 1.,
}
"""
# SET figure settings
"""
wFig = 8.7
hFig = 2.5
font = {'family': 'Helvetica',
'weight': 'light',
'size': 6}
axes = {'linewidth': 0.5,
'titlesize': 7,
'labelsize': 6,
'labelpad': 2,
'spines.top': False,
'spines.right': False,
}
ticks = {'major.width': 0.5,
'direction': 'in',
'major.size': 2,
'labelsize': 6,
'major.pad': 2}
legend = {'fontsize': 6,
'handlelength': 1.5,
'handletextpad': 0.5,
'labelspacing': 0.2}
figure = {'dpi': 300}
savefigure = {'dpi': 300,
'transparent': True}
mpl.style.use('seaborn-ticks')
mpl.rc('font', **font)
mpl.rc('axes', **axes)
mpl.rc('xtick', **ticks)
mpl.rc('ytick', **ticks)
mpl.rc('legend', **legend)
mpl.rc('figure', **figure)
mpl.rc('savefig', **savefigure)
colors = ['777777', 'E24A33', '348ABD', '988ED5',
'FBC15E', '8EBA42', 'FFB5B8']
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=colors)
"""
Main code
"""
# runs model
# checks of model parmaters have changed
# Load model is datafile found, run model if not found or if settings have changed
# plot line chart
# calcualte moving average over time
# plot histogram chart
# main function to create figure
if __name__ == "__main__":
create_fig()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
1766,
35,
278,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
220,
1737,
2534,
13130,
198,
5956,
10133,
1737,
2534,
13130,
198,
198,
31,
9800,
... | 2.265272 | 1,195 |
#coding=utf-8
import logging
from controlfile import ControlFile
from heapbuf import HeapBuffer
from pgstruct import memoffset
from upgexception import UPgException
import pgstruct
import upgcrc
logger = logging.getLogger("Catalog")
'''
不同的Pg版本有不同的文件布局,系统属性等等,这个类作为每个版本相应类的父类,记录相应信息。
'''
| [
2,
66,
7656,
28,
40477,
12,
23,
220,
198,
198,
11748,
18931,
198,
198,
6738,
1630,
7753,
1330,
6779,
8979,
198,
6738,
24575,
29325,
1330,
679,
499,
28632,
198,
6738,
23241,
7249,
1330,
1066,
28968,
198,
6738,
510,
25636,
4516,
1330,
1... | 1.702857 | 175 |
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:5/22/2021 9:44 PM
# @File:df
from qcloudsms_py import SmsSingleSender
from qcloudsms_py.httpclient import HTTPError
import random
import ssl
from python_developer_tools.web.services_utils import make_code
ssl._create_default_https_context = ssl._create_unverified_context
appid = '1400525419' # 准备工作中的SDK AppID,类型:int
appkey = '2c894050f230f6df92abac6da8048e57' # 准备工作中的App Key,类型:str
sign = 'CollectInto' # 准备工作中的应用签名,类型:str
if __name__ == '__main__':
phone_num = '18013634236'
send_msg(phone_num) | [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
1377,
19617,
25,
3384,
69,
12,
23,
1377,
198,
2,
2488,
13838,
1976,
1516,
36072,
1219,
9019,
198,
2,
16092,
265,
524,
25,
20,
14,
1828,
14,
1238,
2481,
860,
25,
2598,
3122,
... | 1.986577 | 298 |
"""
MOONS Fibre Positioning System Utility Module
Contains a collection of shared functions and utilities.
24 Jul 2014: Created using functions extracted from fps_classes.py
06 Sep 2014: Added generator function for grid coordinates.
Allow positioner grids to define the origin of their
coordinate system at the centre.
20 Mar 2015: Polar coordinate system definition changed so that theta
is measured clockwise from the focal plane Y axis
(the MOONS convention).
14 Jan 2016: Additional tests. Added elbow_parity_str, polygon_to_points,
line_intersects_circle and triangle_intersects_circle
functions.
07 Jun 2016: Temporarily restored old coordinate conversion functions
until the path analysis code can be brought up to date.
21 Jul 2016: Corrected a typo in a log message.
@author: Steven Beard (UKATC)
"""
from __future__ import division
import sys, math
import warnings
import numpy as np
# Define the limits for floating point values.
EPS = 10 * sys.float_info.epsilon # Smallest possible increment (x 10)
MAXFLOAT = sys.float_info.max # Largest possible value.
# Calculate important constants once.
ROOT3 = math.sqrt(3.0)
ROOT3BY2 = ROOT3 / 2.0
PI2 = 2.0 * math.pi
PIBY2 = math.pi / 2.0
COS30 = math.cos(math.radians(30.0))
PARITY_RIGHT = 1 # Constant meaning right-armed parity
PARITY_LEFT = -1 # Constant meaning left-armed parity
#
# A collection of global helper functions
#
def elbow_parity_str( parity ):
"""
Convert the numerical code for elbow parity into a descriptive string.
:Parameters:
parity: int
The elbow parity:
* 1 means elbow right armed
* -1 means elbow left armed
"""
if parity == PARITY_RIGHT:
return "RIGHT-armed"
else:
return "LEFT-armed"
# FIXME: DELETE THIS FUNCTION.
def cartesian_to_polar_OLD(x, y, xorigin=0.0, yorigin=0.0):
"""
Helper function to convert Cartesian coordinates to polar coordinates
(centred at a defined origin).
Angle measured anti-clockwise from X axis.
OLD OBSOLETE VERSION USED BY PATH ANALYSIS - 13 JUN 2016
DELETE WHEN COMPATIBILITY PROBLEMS SORTED OUT
"""
#FIXME: OBSOLETE COORDINATE SYSTEM
xdiff = float(x) - float(xorigin)
ydiff = float(y) - float(yorigin)
distsq = (xdiff * xdiff) + (ydiff * ydiff)
r = math.sqrt(distsq)
theta = math.atan2(ydiff, xdiff)
return (r, theta)
# FIXME: DELETE THIS FUNCTION.
def polar_to_cartesian_OLD(r, theta, r_ref=0.0, theta_ref=0.0):
"""
Helper function to convert polar coordinates to Cartesian
coordinates (relative to a defined reference point).
Angle measured anti-clockwise from X axis.
OLD OBSOLETE VERSION USED BY PATH ANALYSIS - 13 JUN 2016
DELETE WHEN COMPATIBILITY PROBLEMS SORTED OUT
"""
#FIXME: OBSOLETE COORDINATE SYSTEM
if float(r_ref) > 0.0:
x = r * math.cos(theta) - r_ref * math.cos(theta_ref)
y = r * math.sin(theta) - r_ref * math.sin(theta_ref)
else:
x = r * math.cos(theta)
y = r * math.sin(theta)
return (x, y)
def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0):
"""
Helper function to convert Cartesian coordinates to polar coordinates
(centred at a defined origin). In the polar coordinates, theta is an
angle measured clockwise from the Y axis.
:Parameters:
x: float
X coordinate of point
y: float
Y coordinate of point
xorigin: float (optional)
X coordinate of origin (if not zero)
yorigin: float (optional)
Y coordinate of origin (if not zero)
:Returns:
(r, theta): tuple of 2 floats
Polar coordinates of point. NOTE: theta is in radians.
"""
xdiff = float(x) - float(xorigin)
ydiff = float(y) - float(yorigin)
distsq = (xdiff * xdiff) + (ydiff * ydiff)
r = math.sqrt(distsq)
theta = PIBY2 - math.atan2(ydiff, xdiff)
# Adjust theta to be in the range 0 - 2*PI
while theta < 0.0:
theta += PI2
while theta > PI2:
theta -= PI2
return (r, theta)
def polar_to_cartesian(r, theta, r_ref=0.0, theta_ref=0.0):
"""
Helper function to convert polar coordinates to Cartesian
coordinates (relative to a defined reference point).
:Parameters:
r: float
Radial distance of point from origin.
theta: float
Angular bearing of point, clockwise from Y axis (in radians)
r_ref: float (optional)
Radial distance of reference point from origin
(if reference point is not the origin).
theta_ref: float (optional)
Angular bearing of reference point, clockwise from Y axis
(in radians)(if reference point is not the origin).
:Returns:
(x, y): tuple of 2 floats
Cartesian coordinates of point
"""
if float(r_ref) > 0.0:
x = r * math.sin(theta) - r_ref * math.sin(theta_ref)
y = r * math.cos(theta) - r_ref * math.cos(theta_ref)
# Old anticlockwise from the X axis code
# x = r * math.cos(theta) - r_ref * math.cos(theta_ref)
# y = r * math.sin(theta) - r_ref * math.sin(theta_ref)
else:
x = r * math.sin(theta)
y = r * math.cos(theta)
# Old anticlockwise from the X axis code
# x = r * math.cos(theta)
# y = r * math.sin(theta)
return (x, y)
def hexagonal_to_cartesian(column, row, pitch, xzero=0.0, yzero=0.0):
"""
Calculate the X, Y location of a given column and row
within a hexagonal grid.
:Parameters:
column: int
Column number in hexgonal grid.
row: int
Row number in hexagonal grid.
pitch: float
Distance between neighbouring grid points (in mm).
xzero: float (optional)
X coordinate of zero point. Default 0.0.
yzero: float (optional)
Y coordinate of zero point. Default 0.0.
:Returns:
(x, y): tuple of 2 floats
Cartesian coordinates of centre of grid point.
"""
# The location of the centre of the grid point depends on
# its position in the hexagonal grid.
if row % 2 == 0:
# An even row - columns are aligned with the origin
xpos = (column * pitch) - xzero
else:
# An odd row - columns are offset by pitch/2 from the origin
xpos = ((column + 0.5) * pitch) - xzero
ypos = (row * ROOT3BY2 * pitch) - yzero
return (xpos, ypos)
def cartesian_to_hexagonal(x, y, pitch, xzero=0.0, yzero=0.0):
"""
Calculate the column and row within a hexagonal grid of a
given X, Y location.
:Parameters:
x: int
Column number in hexgonal grid.
y: int
Row number in hexagonal grid.
pitch: float
Distance between neighbouring grid points (in mm).
xzero: float (optional)
Zero point offset for X coordinate. Default 0.0.
yzero: float (optional)
Zero point offset for Y coordinate. Default 0.0.
:Returns:
(column, row): tuple of 2 ints
Location within hexagonal grid.
"""
# First determine the nearest row number.
row = int(round( ((yzero + y) / (ROOT3BY2 * pitch)) ))
# The location of the centre of the positioner depends on
# its position in the hexagonal grid.
if row % 2 == 0:
# An even row - columns are aligned with the origin
column = int(round( ((xzero + x) / pitch) ))
else:
# An odd row - columns are offset by pitch/2 from the origin
column = int(round( ((xzero + x) / pitch) - 0.5 ))
return (column, row)
def flat_to_curved_r(flat_r, curvrad):
"""
Convert a radial distance from the centre of a circular flat plane
into the radial distance projected onto a curved focal plane of
given radius of curvature.
:Parameters:
flat_r: float
The radial distance from the centre of the flat plane.
curvrad: float
The radius of curvature of the curved plane.
:Returns:
curved_r: float
The radial distance projected onto the curved plane.
Note: curved_r approaches flat_r when curvrad tends to infinity.
None is returned when flat_r > curvrad
"""
if curvrad is not None:
ratio = abs(flat_r / curvrad)
if ratio <= 1.0:
curved_r = curvrad * math.asin(ratio)
return curved_r
else:
# Solution is not valid when flat_r > curvrad
return None
else:
# If the radius of curvature is infinite or not a number
# the flat and curved radial distances are the same
return flat_r
def rotate_coordinates(x, y, theta):
"""
Rotate a pair of Cartesian coordinates (x,y) onto a new
coordinate grid which is rotated by angle theta.
:Parameters:
x: float
X coordinate of point in the original grid
y: float
Y coordinate of point in the original grid
theta: float
Rotation angle between first grid and second grid (in radians)
:Returns:
(xnew, ynew): tuple of 2 floats
The new coordinates
"""
xnew = x * math.cos(theta) + y * math.sin(theta)
ynew = x * math.sin(theta) + y * math.cos(theta)
return (xnew, ynew)
def distance_squared(x1, y1, x2, y2):
"""
Helper function to return the square of the distance between
points (x1,y1) and (x2,y2).
:Parameters:
x1: float
X coordinate of first point
y1: float
Y coordinate of first point
x2: float
X coordinate of second point
y2: float
Y coordinate of second point
:Returns:
distsq: float
The square of the distance, (x1-x2)**2 + (y1-y2)**2
"""
xdiff = float(x1) - float(x2)
ydiff = float(y1) - float(y2)
distsq = (xdiff * xdiff) + (ydiff * ydiff)
return distsq
def closer_than(x1, y1, x2, y2, limitsq):
"""
Helper function to determine whether the square of the distance
between points (x1,y1) and (x2,y2) is less than or equal to limitsq.
The squares of the distances are compared to save computation time.
:Parameters:
x1: float
X coordinate of first point
y1: float
Y coordinate of first point
x2: float
X coordinate of second point
y2: float
Y coordinate of second point
limitsq: float
The square of the limiting distance.
:Returns:
True or False
"""
distsq = distance_squared(x1, y1, x2, y2)
if distsq <= limitsq:
return True
else:
return False
def arms_intersect(line1, line2, half_width, maxangle=None, thinfactor=100.0,
quick=True):
"""
Helper function to determine whether two arm
segments intersect.
NOTE: This function decides whether to call the lines_intersect
function or the quadrangles_intersect depending on the "quick"
parameter and the arm width given.
:Parameters:
line1: tuple of (xstart, ystart, xend, yend)
First line segment
line2: tuple of (xstart, ystart, xend, yend)
Second line segment
half_width: float
Half the arm width in the same units as the line coordinates.
This is used to define the tolerance for a simple line intersection
check, or to define the corner coordinates for a full quadrangle
intersection check.
maxangle: float (optional)
The maximum angle (in degrees) at which a conflict is possible.
Two arms only conflict if the relative angle between their
orientations is less than this limit.
The default value of None means that all angles can conflict.
thinfactor: float (optional)
The definition of a thin line. A line is thin if its length
is more than thinfactor times its width (default 100).
Thin lines are always tested using the "lines_intersect"
function. Thick lines are tested using the much slower
"quadrangles_intersect" function unless the "quick" parameter
(below) is True.
quick: bool (optional)
Set True for a quick but simple intersection test
where arms are always considered lines with a tolerance.
Set False for an accurate but very slow intersection test
where all thick arms are treated as 2-D quadrangles.
:Returns:
True or False
"""
ydiff1 = line1[3] - line1[1]
xdiff1 = line1[2] - line1[0]
length1sq = xdiff1 * xdiff1 + ydiff1 * ydiff1
ydiff2 = line2[3] - line2[1]
xdiff2 = line2[2] - line2[0]
length2sq = xdiff2 * xdiff2 + ydiff2 * ydiff2
# Check for relative orientations greater than the conflict limit.
if maxangle is not None:
angle1 = math.atan2(ydiff1, xdiff1)
angle2 = math.atan2(ydiff2, xdiff2)
if abs(angle2 - angle1) > math.radians(maxangle):
return False
thinfactsq = thinfactor * thinfactor
maxlengthsq = max(length1sq, length2sq)
half_widthsq = half_width * half_width
if quick or ((half_widthsq * thinfactsq) < maxlengthsq):
# Treat thin lines as lines with a tolerance.
# This will tend to round off the ends of the line.
return lines_intersect(line1, line2, tolerance=half_width)
else:
# Treat thick lines as quadrangles. Useful for lines with sharp
# boundaries, but a much slower option.
angle1 = math.atan2(ydiff1, xdiff1)
xdelta1 = half_width * math.sin(angle1)
ydelta1 = half_width * math.cos(angle1)
quad1 = [(line1[0]-xdelta1,line1[1]+ydelta1),
(line1[0]+xdelta1,line1[1]-ydelta1),
(line1[2]-xdelta1,line1[3]+ydelta1),
(line1[2]+xdelta1,line1[3]-ydelta1)]
angle2 = math.atan2(ydiff2, xdiff2)
xdelta2 = half_width * math.sin(angle2)
ydelta2 = half_width * math.cos(angle2)
quad2 = [(line2[0]-xdelta2,line2[1]+ydelta2),
(line2[0]+xdelta2,line2[1]-ydelta2),
(line2[2]-xdelta2,line2[3]+ydelta2),
(line2[2]+xdelta2,line2[3]-ydelta2)]
return quadrangles_intersect(quad1, quad2)
def lines_intersect(line1, line2, tolerance=0.0):
"""
Helper function to determine whether two line segments intersect.
This function is most suitable for thin lines. Intersection is
tested by surrounding each point with a tolerance of the given
radius. Thick lines requiring a large tolerance are effectively
given rounded ends, which causes a slight overestimate in the
number of intersections. A more accurate, but much slower, way
of testing thick lines is to use the "quadrangles_intersect"
function.
:Parameters:
line1: tuple of (xstart, ystart, xend, yend)
First line segment
line2: tuple of (xstart, ystart, xend, yend)
Second line segment
tolerance: float (optional)
The tolerance of the intersection, to account for floating
point rounding errors and a finite width of the lines.
The tolerance should be roughly half the line width.
Defaults to the smallest possible floating point value.
NOTE: The tolerance should be much less than the
line length - otherwise use the quadrangles_intersect
function.
:Returns:
True or False
"""
assert isinstance(line1, (list,tuple))
assert isinstance(line1, (list,tuple))
assert len(line1) > 3
assert len(line2) > 3
# print("Lines intersect? Line 1:", str(line1), "Line 2:", str(line2))
tolerance = float(tolerance) + EPS
# print("Tolerance = ", tolerance)
xlength1 = line1[2]-line1[0]
ylength1 = line1[3]-line1[1]
xlength2 = line2[2]-line2[0]
ylength2 = line2[3]-line2[1]
if line1 == line2:
# Identical lines intersect by definition
# print("Identical lines")
return True
if abs(xlength1) < EPS and abs(xlength2) < EPS:
# Both slopes are infinite. The lines intersect only
# if they have exactly the same X coordinate, to within
# the tolerance.
if abs(line1[0] - line2[0]) < tolerance:
# print("Infinite slope and same X")
return True
else:
return False
elif abs(xlength1) < EPS:
# Infinite line1 slope special case.
# The lines intersect where the X coordinate of line1 is
# substituted into the equation for line2
slope2 = ylength2/xlength2
const2 = line2[1] - slope2 * line2[0]
xintersect = line1[0]
yintersect = slope2 * xintersect + const2
elif abs(xlength2) < EPS:
# Infinite line2 slope special case
# The lines intersect where the X coordinate of line2 is
# substituted into the equation for line1
slope1 = ylength1/xlength1
const1 = line1[1] - slope1 * line1[0]
xintersect = line2[0]
yintersect = slope1 * xintersect + const1
else:
# Determine the slopes and constants of the two lines
slope1 = ylength1/xlength1
slope2 = ylength2/xlength2
const1 = line1[1] - slope1 * line1[0]
const2 = line2[1] - slope2 * line2[0]
# If the slopes are the same, the lines are parallel and cannot
# intersect
slopediff = slope1 - slope2
if abs(slopediff) < EPS:
# Parallel lines
# print("Parallel lines")
return False
# Find the point of intersection between the two lines
# when they are extrapolated to infinity.
xintersect = (const2 - const1) / (slope1 - slope2)
yintersect = slope1 * xintersect + const1
# print("Infinite lines intersect at", xintersect, yintersect)
# The line segments intersect if this intersection point
# occurs inside the range of both segments.
left1 = min(line1[0],line1[2]) - tolerance
right1 = max(line1[0],line1[2]) + tolerance
left2 = min(line2[0],line2[2]) - tolerance
right2 = max(line2[0],line2[2]) + tolerance
bottom1 = min(line1[1],line1[3]) - tolerance
top1 = max(line1[1],line1[3]) + tolerance
bottom2 = min(line2[1],line2[3]) - tolerance
top2 = max(line2[1],line2[3]) + tolerance
# print("Line 1 extent is", left1, right1, bottom1, top1)
# print("Line 2 extent is", left2, right2, bottom2, top2)
if xintersect > left1 and xintersect < right1 and \
xintersect > left2 and xintersect < right2 and \
yintersect > bottom1 and yintersect < top1 and \
yintersect > bottom2 and yintersect < top2:
# print("Infinite intersection point is within this range")
return True
return False
def line_intersects_circle(x1, y1, x2, y2, xcen, ycen, radius):
"""
Helper function to determine whether a specified line intersects
with the circle described by (xcen,ycen,radius).
:Parameters:
x1: float
X coordinate of first point
y1: float
Y coordinate of first point
x2: float
X coordinate of second point
y2: float
Y coordinate of second point
xcen: float
X coordinate of centre of circle
ycen: float
Y coordinate of centre of circle
radius: float
Radius of circle
:Returns:
True or False
"""
# First determine the closest distance from the centre of the
# circle to any point along the line
distance = distance_from_point_to_line(xcen, ycen, x1, y1, x2, y2)
# If the distance is less than the radius of the circle, the circle
# intersects with the line.
if distance < radius:
# print("Line intersects circle (%f < %f)" % (distance, radius))
return True
else:
return False
def distance_from_point_to_line(xpoint, ypoint, x1, y1, x2, y2):
"""
Helper function which calculates the closest approach distance
between a point and the specified line segment.
"""
# Determine line equation from the end points
(slope, intercept) = line_from_two_points(x1, y1, x2, y2)
# print("Line slope=%s, intercept=%f" % (str(slope),intercept))
if slope is not None:
# Evaluate the line at the X coordinate of the point and
# calculate the closest approach distance for an infinite
# line.
yline = intercept + slope * xpoint
ydiff = ypoint - yline
theta = math.atan(slope)
distance = abs(ydiff) * math.cos(theta)
# Calculate the coordinates of this closest point.
xclosest = xpoint - (distance * math.sin(theta))
# print("Closest approach is %f at x=%f" % (distance,xclosest) )
# The infinite line distance is valid only if this closest
# point actually lies on the line segment. If it doesn't,
# the distance needs to be recalculated.
if xclosest < min(x1,x2):
# The closest point is off the left hand end. The real
# distance is between the point and the left hand end.
if x1 < x2:
distsq = distance_squared(x1, y1, xpoint, ypoint)
else:
distsq = distance_squared(x2, y2, xpoint, ypoint)
distance = math.sqrt(distsq)
# print("Distance to left hand end is %f", distance)
elif xclosest > max(x1,x2):
# The closest point is off the right hand end. The real
# distance is between the point and the right hand end.
if x1 > x2:
distsq = distance_squared(x1, y1, xpoint, ypoint)
else:
distsq = distance_squared(x2, y2, xpoint, ypoint)
distance = math.sqrt(distsq)
# print("Distance to right hand end is %f", distance)
else:
# Infinite slope
if ypoint < min(y1,y2):
# Point is below the bottom of the line
if y1 < y2:
distsq = distance_squared(x1, y1, xpoint, ypoint)
else:
distsq = distance_squared(x2, y2, xpoint, ypoint)
distance = math.sqrt(distsq)
# print("Distance to bottom end is %f", distance)
elif ypoint > max(y1,y2):
# Point is above the top of the line
if y1 > y2:
distsq = distance_squared(x1, y1, xpoint, ypoint)
else:
distsq = distance_squared(x2, y2, xpoint, ypoint)
distance = math.sqrt(distsq)
# print("Distance to top end is %f", distance)
else:
# Point is within the Y range of the line
distance = abs(x1 - xpoint)
# print("Closest approach is %f at y=%f" % (distance,ypoint) )
return distance
def quadrangles_intersect(quad1, quad2):
"""
Helper function to determine whether two quadrangles
intersect.
:Parameters:
quad1: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
The corner coordinates of the first quadrangle
quad2: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
The corner coordinates of the second quadrangle
:Returns:
True or False
"""
assert isinstance(quad1, (list,tuple))
assert isinstance(quad2, (list,tuple))
assert len(quad1) > 3
assert len(quad2) > 3
assert isinstance(quad1[0], (list,tuple))
assert isinstance(quad2[0], (list,tuple))
assert len(quad1[0]) > 1
assert len(quad2[0]) > 1
# Two quadrangles intersect either if any of the lines bordering
# those quadrangles intersect.
line11 = (quad1[0][0], quad1[0][1], quad1[1][0], quad1[1][1])
line12 = (quad1[1][0], quad1[1][1], quad1[2][0], quad1[2][1])
line13 = (quad1[2][0], quad1[2][1], quad1[3][0], quad1[3][1])
line14 = (quad1[3][0], quad1[3][1], quad1[0][0], quad1[0][1])
line21 = (quad2[0][0], quad2[0][1], quad2[1][0], quad2[1][1])
line22 = (quad2[1][0], quad2[1][1], quad2[2][0], quad2[2][1])
line23 = (quad2[2][0], quad2[2][1], quad2[3][0], quad2[3][1])
line24 = (quad2[3][0], quad2[3][1], quad2[0][0], quad2[0][1])
for line1 in (line11, line12, line13, line14):
for line2 in (line21, line22, line23, line24):
if lines_intersect(line1, line2):
# Two edges intersect
return True
# None of the edges intersect, but the quadrangles might still
# intersect if one of them is entirely inside the other.
xmin1 = min( quad1[0][0], quad1[1][0], quad1[2][0], quad1[3][0])
xmax1 = max( quad1[0][0], quad1[1][0], quad1[2][0], quad1[3][0])
ymin1 = min( quad1[0][1], quad1[1][1], quad1[2][1], quad1[3][1])
ymax1 = max( quad1[0][1], quad1[1][1], quad1[2][1], quad1[3][1])
xmin2 = min( quad2[0][0], quad2[1][0], quad2[2][0], quad2[3][0])
xmax2 = max( quad2[0][0], quad2[1][0], quad2[2][0], quad2[3][0])
ymin2 = min( quad2[0][1], quad2[1][1], quad2[2][1], quad2[3][1])
ymax2 = max( quad2[0][1], quad2[1][1], quad2[2][1], quad2[3][1])
if (xmin1 < xmin2) and (xmax1 > xmax2) and \
(ymin1 < ymin2) and (ymax1 > ymax2):
# Quadrangle 2 entirely within quadrangle 1
return True
if (xmin2 < xmin1) and (xmax2 > xmax1) and \
(ymin2 < ymin1) and (ymax2 > ymax1):
# Quadrangle 1 entirely within quadrangle 2
return True
# If all the above tests have failed the quadrangles do not intersect.
return False
def triangles_intersect(tri1, tri2):
"""
Helper function to determine whether two triangles
intersect.
:Parameters:
tri1: tuple of ((x1,y1),(x2,y2),(x3,y3))
The corner coordinates of the first triangle
tri2: tuple of ((x1,y1),(x2,y2),(x3,y3))
The corner coordinates of the second triangle
:Returns:
True or False
"""
assert isinstance(tri1, (list,tuple))
assert isinstance(tri2, (list,tuple))
assert len(tri1) > 2
assert len(tri2) > 2
assert isinstance(tri1[0], (list,tuple))
assert isinstance(tri2[0], (list,tuple))
assert len(tri1[0]) > 1
assert len(tri2[0]) > 1
# Two triangles intersect if any point from one triangle lies within
# the other.
for xpoint,ypoint in tri1:
if point_inside_triangle(xpoint, ypoint, tri2[0][0], tri2[0][1],
tri2[1][0], tri2[1][1], tri2[2][0], tri2[2][1]):
return True
for xpoint,ypoint in tri2:
if point_inside_triangle(xpoint, ypoint, tri1[0][0], tri1[0][1],
tri1[1][0], tri1[1][1], tri1[2][0], tri1[2][1]):
return True
return False
def triangle_intersects_circle(triang, xcen, ycen, radius):
"""
Helper function to determine whether a triangle and a
circle intersect.
:Parameters:
triang: tuple of ((x1,y1),(x2,y2),(x3,y3))
The corner coordinates of the triangle
xcen: float
X coordinate of centre of circle
ycen: float
Y coordinate of centre of circle
radius: float
Radius of circle
:Returns:
True or False
"""
assert isinstance(triang, (list,tuple))
assert isinstance(triang, (list,tuple))
assert len(triang) > 2
assert len(triang) > 2
# A circle cannot intersect a triangle if it lies entirely outside
# the rectangle bounding the triangle.
xmin = min(triang[0][0], triang[1][0], triang[2][0]) - radius
xmax = max(triang[0][0], triang[1][0], triang[2][0]) + radius
ymin = min(triang[0][1], triang[1][1], triang[2][1]) - radius
ymax = max(triang[0][1], triang[1][1], triang[2][1]) + radius
# print("Circle centre is", xcen, ycen)
# print("Bounds of triangle are", xmin, xmax, ymin, ymax)
if xcen < xmin or xcen > xmax or ycen < ymin or ycen > ymax:
# print("Circle entirely outside bounds of triangle")
return False
# A circle intersects a triangle if it intersects any of the
# straight lines making up the edges of the triangle.
# print("Triangle side 1-2", triang[0][0], triang[0][1],
# triang[1][0], triang[1][1])
if line_intersects_circle(triang[0][0], triang[0][1],
triang[1][0], triang[1][1],
xcen, ycen, radius):
# print("Triangle side 1-2 intersects circle")
return True
# print("Triangle side 2-3", triang[1][0], triang[1][1],
# triang[2][0], triang[2][1])
if line_intersects_circle(triang[1][0], triang[1][1],
triang[2][0], triang[2][1],
xcen, ycen, radius):
# print("Triangle side 2-3 intersects circle")
return True
# print("Triangle side 1-3", triang[0][0], triang[0][1],
# triang[2][0], triang[2][1])
if line_intersects_circle(triang[0][0], triang[0][1],
triang[2][0], triang[2][1],
xcen, ycen, radius):
# print("Triangle side 1-3 intersects circle")
return True
return False
def polygons_intersect(poly1, poly2):
"""
Helper function to determine whether two polygons
intersect.
:Parameters:
poly1: tuple of ((x1,y1),(x2,y2),...)
The coordinates of the vertices of the first polygon.
There must be at least 3 vertices.
poly2: tuple of ((x1,y1),(x2,y2),(x3,y3),...)
The coordinates of the vertices of the second polygon
There must be at least 3 vertices.
:Returns:
True or False
"""
assert isinstance(poly1, (list,tuple))
assert isinstance(poly2, (list,tuple))
assert len(poly1) > 2
assert len(poly2) > 2
assert isinstance(poly1[0], (list,tuple))
assert isinstance(poly2[0], (list,tuple))
assert len(poly1[0]) > 1
assert len(poly2[0]) > 1
# TO BE IMPLEMENTED
raise NotImplementedError("Polygon intersection not implemented yet.")
return True
def line_from_two_points(x1, y1, x2, y2):
"""
Helper function to return the equation of a line
passing through any two points.
:Parameters:
x1: float
X coordinate of first point
y1: float
Y coordinate of first point
x2: float
X coordinate of second point
y2: float
Y coordinate of second point
:Returns:
(slope, intercept) or (None, xposition) if the slope is infinite.
"""
xdiff = (x2-x1)
if abs(xdiff) > 0.0:
ydiff = (y2-y1)
slope = ydiff / xdiff
intercept = y1 - slope * x1
return (slope, intercept)
else:
return (None, x1)
def y_coord_on_circle(xcoord, xcen, ycen, radius):
"""
Return the Y coordinates of the intersection point between the
vertical line at xcoord and the circle at centre (xcen, ycen)
and given radius.
"""
xdiff = xcoord - xcen
# The problem is not solveable if the line is outside the circle.
if xdiff < -radius or xdiff > radius:
return None
rsqxsq = (radius * radius) - (xdiff * xdiff)
ydiff = math.sqrt(rsqxsq)
return (ycen-ydiff, ycen+ydiff)
def point_inside_boundary(xpoint, ypoint, xmin, xmax, ymin, ymax):
"""
Helper function to determine if a point (xpoint,ypoint) is
inside a rectangle bounded by (xmin,xmax,ymin,ymax).
"""
if xpoint > xmin and xpoint < xmax and ypoint > ymin and ypoint < ymax:
return True
else:
return False
def point_inside_circle(xpoint, ypoint, xcen, ycen, radius, left_only=False,
right_only=False, bottom_only=False, top_only=False):
"""
Helper function to determine if a point (xpoint,ypoint) is
inside a circle described by (xcen,ycen,radius).
Flags left_only, right_only, bottom_only and top_only can be used to
tailor the function to check for the point lying within one half or one
quarter of the circle.
"""
# If the point is outside the bounding rectangle, it can't be
# inside the circle.
if right_only:
xmin = xcen
else:
xmin = xcen - radius
if left_only:
xmax = xcen
else:
xmax = xcen + radius
if top_only:
ymin = ycen
else:
ymin = ycen - radius
if bottom_only:
ymax = ycen
else:
ymax = ycen + radius
if point_inside_boundary(xpoint, ypoint, xmin, xmax, ymin, ymax):
# Point is inside rectangle. Now determine if it is also
# inside the circle.
radiussq = radius * radius
xdiff = xpoint - xcen
ydiff = ypoint - ycen
distsq = (xdiff * xdiff) + (ydiff * ydiff)
if distsq < radiussq:
# Inside circle
return True
else:
return False
else:
return False
def point_inside_ellipse(xpoint, ypoint, xcen, ycen, xsemimajor, ysemiminor,
orient,
left_only=False, right_only=False, bottom_only=False,
top_only=False):
"""
Helper function to determine if a point (xpoint,ypoint) is
inside an ellipse described by (xcen,ycen,xsemimajor,ysemiminor,orient).
Flags left_only, right_only, bottom_only and top_only can be used to
tailor the function to check for the point lying with one half or one
quarter of the ellipse.
"""
# FIXME: This function doesn't work properly when the ellipse is tilted.
# If the point is outside the bounding rectangle, it can't be
# inside the ellipse.
if right_only:
xmin = xcen
else:
xmin = xcen - xsemimajor
if left_only:
xmax = xcen
else:
xmax = xcen + xsemimajor
if top_only:
ymin = ycen
else:
ymin = ycen - ysemiminor
if bottom_only:
ymax = ycen
else:
ymax = ycen + ysemiminor
if point_inside_boundary(xpoint, ypoint, xmin, xmax, ymin, ymax):
# Point is inside rectangle. Now determine if it is also
# inside the ellipse.
sxmajsq = xsemimajor * xsemimajor
syminsq = ysemiminor * ysemiminor
xdiff = xpoint - xcen
ydiff = ypoint - ycen
(xnew, ynew) = rotate_coordinates(xdiff, ydiff, -orient)
xratio = (xnew * xnew) / sxmajsq
yratio = (ynew * ynew) / syminsq
if (xratio + yratio) < 1.0:
# Inside ellipse
return True
else:
return False
else:
return False
def point_inside_triangle(xpoint, ypoint, ax, ay, bx, by, cx, cy):
"""
Helper function to determine if a point (xpoint,ypoint)
is inside the triangle bounded by the points A (ax,ay),
B (bx,by) and C (cx,cy).
:Parameters:
xpoint: float
X coordinate of point
ypoint: float
Y coordinate of point
ax: float
X coordinate of first corner of triangle
ay: float
Y coordinate of first corner of triangle
bx: float
X coordinate of second corner of triangle
by: float
Y coordinate of second corner of triangle
cx: float
X coordinate of third corner of triangle
cy: float
Y coordinate of third corner of triangle
:Returns:
True or False
"""
# print("Testing for", xpoint, ypoint, "inside", ax, ay, bx, by, cx, cy)
# This function is reused 3 times
# Determine the equation of the line A-B and compare with C
(slope, intercept) = line_from_two_points(ax, ay, bx, by)
inside = test_point(slope, intercept, cx, cy, xpoint, ypoint)
if not inside:
return False
# Determine the equation of the line B-C and compare with A
(slope, intercept) = line_from_two_points(bx, by, cx, cy)
inside = test_point(slope, intercept, ax, ay, xpoint, ypoint)
if not inside:
return False
# Determine the equation of the line A-C and compare with B
(slope, intercept) = line_from_two_points(ax, ay, cx, cy)
inside = test_point(slope, intercept, bx, by, xpoint, ypoint)
if not inside:
return False
# print("Point is within all 3 sides, so is inside triangle")
return True
def point_inside_avoidance_zone(xpoint, ypoint, xfibre, yfibre, favoid,
length1, length2, avoid2, halfwidth2):
"""
Helper function to determine if a point (xpoint,ypoint)
is inside the avoidance zone for a positioner arm assigned
to a target (xfibre,yfibre). All coordinates are referenced
to an origin at the positioner centre (same as the alpha axis).
OBSOLETE FUNCTION.
:Parameters:
xpoint: float
X coordinate of point
ypoint: float
Y coordinate of point
xfibre: float
X coordinate of fibre
yfibre: float
Y coordinate of fibre.
favoid: float
Radius of fibre avoidance circle. No two targets can come closer
than this limit.
length1: float
Length of the alpha arm (determining the location of the elbow joint).
length2: float
Length of the beta arm (determining the distance of the fibre
from the elbow joint).
avoid2: float
Length of the outer portion of the beta arm which can collide with
another beta arm.
halfwidth2: float
Width of the outer portion of the beta arm, which can collide with
another beta arm.
"""
# If xfibre is negative, reflect everything around the X axis.
if xfibre < 0.0:
xfibre = -xfibre
xpoint = -xpoint
# If yfibre is not zero, rotate all the coordinates so they are in
# a frame of reference where yfibre is zero.
if abs(yfibre) > EPS:
angle = math.atan2(yfibre, xfibre)
xfibre = math.sqrt( (xfibre * yfibre) + (yfibre * yfibre) )
newxpoint = xpoint * math.cos(angle) + ypoint * math.sin(angle)
newypoint = xpoint * math.sin(angle) + ypoint * math.cos(angle)
xpoint = newxpoint
ypoint = newypoint
# Calculate some significant locations
# Left-most extent of beta arm when tucked in, which is also the
# left-most extent of ellipse swept out by beta arm.
xleft = (length2 - length1) - avoid2
# Right-most extent of ellipse swept out by beta arm
xright = length2 + length1 - avoid2
# Centre of avoidance ellipse
xboundary = (xleft + xright)/2.0
# Bottom of avoidance ellipse, determined by the Y extent of the
# beta arm when the alpha arm is vertical.
ybottom = -length1 * avoid2 / length2
# The avoidance zone is oversized either by the fibre avoidance
# or by half the width of the beta arm.
xfarleft = xleft - favoid
xfarright = xfibre + favoid
yfarbottom = ybottom - halfwidth2
yfartop = halfwidth2
# If the test point is outside this extreme bounding rectangle then it
# can't be inside the avoidance zone.
if not point_inside_boundary(xpoint, ypoint, xfarleft, xfarright,
yfarbottom, yfartop):
return False
# A more detailed check is needed. If the test point is inside any
# of the following zones then it is inside the avoidance zone. The
# easiest zones are tested first for efficiency
# (1) Rectangle
if point_inside_boundary(xpoint, ypoint, xleft, xfibre, -halfwidth2,
halfwidth2):
return True
# (2) Circles at either end of the travel
if point_inside_circle(xpoint, ypoint, xleft, 0.0, favoid):
return True
if point_inside_circle(xpoint, ypoint, xfibre, 0.0, favoid): # favoid or halfwidth2?
return True
# (3) Quarter ellipse
semi_major = favoid + (xright - xleft)/2.0
semi_minor = abs(ybottom) + halfwidth2
if point_inside_ellipse(xpoint, ypoint, xboundary, 0.0, semi_major,
semi_minor, 0.0, left_only=True, bottom_only=True):
return True
# (4) Triangle
if point_inside_triangle(xpoint, ypoint, xboundary, yfarbottom,
xboundary, -halfwidth2, xfibre, -halfwidth2):
return True
return False
def generate_avoidance_perimeter(xfibre, length1, length2, mlength, mwidth,
favoid, padding=0.0, inc1=1.0, inc2=0.5):
"""
Generate a list of X,Y coordinates defining the perimeter of the
avoidance zone swept out by the metrology avoidance zone and the
fibre avoidance zone when the fibre holder is moved radially from
xfibre to the minimum radius dicated by the arm lengths.
OBSOLETE FUNCTION.
:Parameters:
xfibre: float
The fibre X coordinate for which to generate an avoidance
perimeter.
length1: float
Length of inner (alpha) arm
length2: float
Length of outer (beta) arm
mlength: float
Length of metrology zone at end of beta arm.
mwidth: float
Width of metrology zone at end of beta arm.
favoid: float
Radius of the avoidance zone surrounding the fibre.
padding: float (optional)
An optional padding to be applied to the edges of the
avoidance zone, increasing the clearance.
By default the padding is zero.
inc1: float (optional)
The X coordinate increment for the avoidance zone.
By default this is 2.0.
inc2: float (optional)
The X coordinate increment for the xfibre tests.
By default this is 0.5.
:Returns:
(xpoints, ypoints): tuple of tuples
A list of x,y coordinates defining the perimeter of the
avoidance zone at the given xfibre.
"""
# First generate a list of X coordinates to be tested.
maxlength = length2 + length1
minlength = length2 - length1
xmin = 0.0
xmax = maxlength + padding + favoid
xcoords = np.arange(xmin, xmax+inc1, inc1)
# Fill the Y extremities with dummay values
yminlist = 1000.0 * np.ones_like(xcoords)
ymaxlist = -1000.0 * np.ones_like(xcoords)
# Generate the avoidance zone swept out by the metrology zone
# as the fibre moves inwards.
if xfibre is None:
xfibre = maxlength
xtest = xfibre
while (xtest >= minlength):
# print "Fibre moves to=", xtest
(corner_bottom, corner_left, corner_top, corner_right) = \
solve_metrology_edge(xtest, length1, length2, mlength, mwidth,
padding=padding)
bottom_line = line_from_two_points(corner_bottom[0], corner_bottom[1],
corner_right[0], corner_right[1])
left_line = line_from_two_points(corner_left[0], corner_left[1],
corner_bottom[0], corner_bottom[1])
top_line = line_from_two_points(corner_left[0], corner_left[1],
corner_top[0], corner_top[1])
right_line = line_from_two_points(corner_top[0], corner_top[1],
corner_right[0], corner_right[1])
for ii in range(0, len(xcoords)):
# print "Testing", xcoords[ii]
if bottom_line[0] is not None:
if xcoords[ii] >= corner_bottom[0] and \
xcoords[ii] <= corner_right[0]:
# print "Inside bottom line range"
ybottom = bottom_line[1] + bottom_line[0] * xcoords[ii]
if ybottom < yminlist[ii]:
yminlist[ii] = ybottom
if left_line[0] is not None:
if xcoords[ii] >= corner_left[0] and \
xcoords[ii] <= corner_bottom[0]:
# print "Inside left line range"
ybottom = left_line[1] + left_line[0] * xcoords[ii]
if ybottom < yminlist[ii]:
yminlist[ii] = ybottom
if top_line[0] is not None:
# print "Inside top line range"
if xcoords[ii] >= corner_left[0] and \
xcoords[ii] <= corner_top[0]:
ytop = top_line[1] + top_line[0] * xcoords[ii]
if ytop > ymaxlist[ii]:
ymaxlist[ii] = ytop
if right_line[0] is not None:
if xcoords[ii] >= corner_top[0] and \
xcoords[ii] <= corner_right[0]:
# print "Inside right line range"
ytop = right_line[1] + right_line[0] * xcoords[ii]
if ytop > ymaxlist[ii]:
ymaxlist[ii] = ytop
xtest -= inc2
# Add the fibre avoidance zone if needed.
if favoid > 0.0:
for ii in range(0, len(xcoords)):
result = y_coord_on_circle(xcoords[ii], xfibre, 0.0, favoid+padding)
if result is not None:
if result[0] < yminlist[ii]:
yminlist[ii] = result[0]
if result[1] > ymaxlist[ii]:
ymaxlist[ii] = result[1]
xpoints = []
ypoints = []
for ii in range(0, len(xcoords)):
if ymaxlist[ii] > -999.0:
xpoints.append(xcoords[ii])
ypoints.append(ymaxlist[ii])
for ii in range(0, len(xcoords)):
jj = -ii - 1
if yminlist[jj] < 999.0:
xpoints.append(xcoords[jj])
ypoints.append(yminlist[jj])
xpoints.append(xpoints[0])
ypoints.append(ypoints[0])
return (xpoints,ypoints)
def generate_avoidance_perimeter2(xstart, ystart, xfinish, yfinish,
length1, length2, mlength, mwidth,
favoid, padding=0.0, inc1=1.0, inc2=0.5):
"""
Generate a list of X,Y coordinates defining the perimeter of the
avoidance zone swept out by the metrology avoidance zone and the
fibre avoidance zone when the fibre holder moves from
(xstart, ystart) to (xfinish, yfinish).
OBSOLETE FUNCTION.
:Parameters:
xstart: float
The fibre X coordinate at the beginning of travel.
ystart: float
The fibre Y coordinate at the beginning of travel.
xfinish: float
The fibre X coordinate at the end of travel.
yfinish: float
The fibre Y coordinate at the end of travel.
length1: float
Length of inner (alpha) arm
length2: float
Length of outer (beta) arm
mlength: float
Length of metrology zone at end of beta arm.
mwidth: float
Width of metrology zone at end of beta arm.
favoid: float
Radius of the avoidance zone surrounding the fibre.
padding: float (optional)
An optional padding to be applied to the edges of the
avoidance zone, increasing the clearance.
By default the padding is zero.
inc1: float (optional)
The X coordinate increment for the avoidance zone.
By default this is 2.0.
inc2: float (optional)
The X coordinate increment for the xfibre tests.
By default this is 0.5.
:Returns:
(xpoints, ypoints): tuple of tuples
A list of x,y coordinates defining the perimeter of the
avoidance zone at the given xfibre.
"""
# First generate a list of X coordinates to be tested.
maxlength = length2 + length1
minlength = length2 - length1
xmin = min( minlength, xstart, xfinish ) - mlength - padding - favoid
xmax = max(maxlength, xstart, xfinish) + mlength + padding + favoid
xcoords = np.arange(xmin, xmax+inc1, inc1)
# Fill the Y extremities with dummay values
yminlist = 1000.0 * np.ones_like(xcoords)
ymaxlist = -1000.0 * np.ones_like(xcoords)
# Generate the avoidance zone swept out by the metrology zone
# as the fibre moves from start to finish.
xtest = xstart
ytest = ystart
npoints = int(0.5 + abs(xfinish - xstart) / inc2)
if xstart > xfinish:
xinc = -inc2
else:
xinc = inc2
yinc = (yfinish - ystart) / float(npoints)
for point in range(0,npoints):
# Convert to polar coordinates
(rtest, thtest) = cartesian_to_polar( xtest, ytest )
# print "Fibre moves to=", xtest, ytest, rtest, thtest
# Solve the metrology edge for a nominal location at x=rtest, theta=0
# and then rotate the coordinates by thtest
(cb, cl, ct, cr) = \
solve_metrology_edge(rtest, length1, length2, mlength, mwidth,
padding=padding)
corner_bottom = rotate_coordinates( cb[0], cb[1], thtest)
corner_left = rotate_coordinates( cl[0], cl[1], thtest)
corner_top = rotate_coordinates( ct[0], ct[1], thtest)
corner_right = rotate_coordinates( cr[0], cr[1], thtest)
bottom_line = line_from_two_points(corner_bottom[0], corner_bottom[1],
corner_right[0], corner_right[1])
left_line = line_from_two_points(corner_left[0], corner_left[1],
corner_bottom[0], corner_bottom[1])
top_line = line_from_two_points(corner_left[0], corner_left[1],
corner_top[0], corner_top[1])
right_line = line_from_two_points(corner_top[0], corner_top[1],
corner_right[0], corner_right[1])
for ii in range(0, len(xcoords)):
# print "Testing", xcoords[ii]
if bottom_line[0] is not None:
if xcoords[ii] >= corner_bottom[0] and \
xcoords[ii] <= corner_right[0]:
# print "Inside bottom line range"
ybottom = bottom_line[1] + bottom_line[0] * xcoords[ii]
if ybottom < yminlist[ii]:
yminlist[ii] = ybottom
if left_line[0] is not None:
if xcoords[ii] >= corner_left[0] and \
xcoords[ii] <= corner_bottom[0]:
# print "Inside left line range"
ybottom = left_line[1] + left_line[0] * xcoords[ii]
if ybottom < yminlist[ii]:
yminlist[ii] = ybottom
if top_line[0] is not None:
# print "Inside top line range"
if xcoords[ii] >= corner_left[0] and \
xcoords[ii] <= corner_top[0]:
ytop = top_line[1] + top_line[0] * xcoords[ii]
if ytop > ymaxlist[ii]:
ymaxlist[ii] = ytop
if right_line[0] is not None:
if xcoords[ii] >= corner_top[0] and \
xcoords[ii] <= corner_right[0]:
# print "Inside right line range"
ytop = right_line[1] + right_line[0] * xcoords[ii]
if ytop > ymaxlist[ii]:
ymaxlist[ii] = ytop
xtest += xinc
ytest += yinc
# Add the fibre avoidance zone if needed.
if favoid > 0.0:
xtest = xstart
ytest = ystart
for point in range(0,npoints):
for ii in range(0, len(xcoords)):
result = y_coord_on_circle(xcoords[ii], xtest, ytest, favoid+padding)
if result is not None:
if result[0] < yminlist[ii]:
yminlist[ii] = result[0]
if result[1] > ymaxlist[ii]:
ymaxlist[ii] = result[1]
xtest += xinc
ytest += yinc
for ii in range(0, len(xcoords)):
result = y_coord_on_circle(xcoords[ii], xfinish, yfinish, favoid+padding)
if result is not None:
if result[0] < yminlist[ii]:
yminlist[ii] = result[0]
if result[1] > ymaxlist[ii]:
ymaxlist[ii] = result[1]
xpoints = []
ypoints = []
for ii in range(0, len(xcoords)):
if ymaxlist[ii] > -999.0:
xpoints.append(xcoords[ii])
ypoints.append(ymaxlist[ii])
for ii in range(0, len(xcoords)):
jj = -ii - 1
if yminlist[jj] < 999.0:
xpoints.append(xcoords[jj])
ypoints.append(yminlist[jj])
xpoints.append(xpoints[0])
ypoints.append(ypoints[0])
return (xpoints,ypoints)
# def xfibre_to_avoidance(xfibre, length1, length2, mlength, mwidth, padding=0.0):
# """
#
# Given a particular X fibre coordinate, return the coodinates of
# the left-most, right-most, bottom-most and top-most corners of
# the metrology avoidance zone.
#
# :Parameters:
#
# xfibre: float
# X coordinate of the centre location of the fibre.
# The Y coordinate is assumed to be zero (i.e. the avoidance
# zone is predicted for fibre movement along the X axis - other
# zones may be calculated by rotating this one).
# length1: float
# Length of inner (alpha) arm
# length2: float
# Length of outer (beta) arm
# mlength: float
# Length of metrology zone at end of beta arm.
# mwidth: float
# Width of metrology zone at end of beta arm.
# padding: float (optional)
# An optional padding to be applied to the edges of the
# avoidance zone, increasing the clearance.
# By default the padding is zero.
#
# :Returns:
#
# quad: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
# The corner coordinates of the metrology zone, sorted
# into left-most, right-most, bottom-most and top-most.
#
# """
# # First solve the rectangle
# quad = solve_metrology_edge(xfibre, length1, length2, mlength, mwidth,
# padding=padding)
#
# # Sort the corners into order.
# corner_left = quad[0]
# corner_right = quad[0]
# corner_bottom = quad[0]
# corner_top = quad[0]
# for corner in quad:
# if corner[0] < corner_left[0]:
# corner_left = corner
# elif corner[0] > corner_right[0]:
# corner_right = corner
# if corner[1] < corner_bottom[0]:
# corner_bottom = corner
# elif corner[1] > corner_top[0]:
# corner_top = corner
#
# return (corner_left, corner_right, corner_bottom, corner_top)
def solve_metrology_edge(xfibre, length1, length2, mlength, mwidth,
padding=0.0):
"""
Give a fibre location (assumed to be at the end of the metrology zone)
and the positioner arm parameters, find the location of the corners
of a metrology rectangle of given length and width.
:Parameters:
xfibre: float
X coordinate of the centre location of the fibre.
The Y coordinate is assumed to be zero (i.e. the avoidance
zone is predicted for fibre movement along the X axis - other
zones may be calculated by rotating this one).
length1: float
Length of inner (alpha) arm
length2: float
Length of outer (beta) arm
mlength: float
Length of metrology zone at end of beta arm.
mwidth: float
Width of metrology zone at end of beta arm.
padding: float (optional)
An optional padding to be applied to the edges of the
avoidance zone, increasing the clearance.
By default the padding is zero.
:Returns:
quad: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
The corner coordinates of the metrology zone
"""
# First solve the triangle rule to determine the angle of orientation
# of the beta arm.
length2sq = length2 * length2
xfibresq = xfibre * xfibre
length1sq = length1 * length1
angle = solve_triangle(length2, length2sq, xfibre, xfibresq, length1sq)
if angle is not None:
# Now solve the metrology rectangle to determine the location of the
# corners
quad = solve_tilted_rectangle(xfibre, 0.0, mlength, mwidth, angle,
padding=padding, pad_upper=False)
else:
strg = "Triangle equation cannot be solved"
raise ValueError(strg)
return quad
def solve_tilted_rectangle(xend, yend, length, width, angle, padding=0.0,
pad_upper=True):
"""
Given a rectangle of a certain length, width and orientation,
knowing the coordinates of the centre of one end of the
rectangle, return the coordinates of the corners.
:Parameters:
xend: float
X coordinate of the centre of the upper edge (at the extremity
of the length) of the rectangle.
yend: float
Y coordinate of the centre of the same edge of the rectangle.
length: float
Length of the rectangle
width: float
Width of the rectangle
angle: float
Angle of the rectangle (radians).
padding: float (optional)
An optional padding to be applied to the edges of the
rectangle, increasing the length and the width by
2 * padding. This parameter can be used to determine the
corners of a new rectangle which avoids the edges of the
original rectangle by at least this amount.
By default the padding is zero and the corners of the
original rectangle are returned.
pad_upper: boolean (optional)
Set True (the default) to pad the upper edge of the rectangle.
Setting this to False allows one end of the rectangle to
have a much smaller padding.
:Returns:
quad: tuple of ((x1,y1),(x2,y2),(x3,y3),(x4,y4))
The corner coordinates of the rectangle
"""
assert float(length) > 0.0
assert float(width) > 0.0
# The coordinates of the other edge of the rectangle can be calculated
# from the length and orientation.
xlength = length * math.cos(angle)
ylength = length * math.sin(angle)
xother = xend - xlength
yother = yend - ylength
# The X and Y increments of the corners from these ends depend on
# the width and orientation
xwidth2 = width * math.sin(angle) / 2.0
ywidth2 = width * math.cos(angle) / 2.0
x1 = xother + xwidth2
y1 = yother - ywidth2
x2 = xother - xwidth2
y2 = yother + ywidth2
x3 = xend - xwidth2
y3 = yend + ywidth2
x4 = xend + xwidth2
y4 = yend - ywidth2
# If required, apply a padding to the corner coordinates.
if padding > 0.0:
xlength_pad = padding * math.cos(angle)
ylength_pad = padding * math.sin(angle)
xwidth_pad = padding * math.sin(angle)
ywidth_pad = padding * math.cos(angle)
x1 = x1 - xlength_pad + xwidth_pad
y1 = y1 - ylength_pad - ywidth_pad
x2 = x2 - xlength_pad - xwidth_pad
y2 = y2 - ylength_pad + ywidth_pad
if pad_upper:
x3 = x3 + xlength_pad - xwidth_pad
y3 = y3 + ylength_pad + ywidth_pad
x4 = x4 + xlength_pad + xwidth_pad
y4 = y4 + ylength_pad - ywidth_pad
else:
# Only pad the width at the upper end of the rectangle
x3 = x3 - xwidth_pad
y3 = y3 + ywidth_pad
x4 = x4 + xwidth_pad
y4 = y4 - ywidth_pad
quad = [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]
return quad
def solve_tangent_angle(distance, radius):
"""
Helper function to calculate the angle between the
centre of a circle and the tangent point, as seen from
a point a certain distance from the circle.
:Parameters:
distance: float
Distance of point from centre of circle.
radius: float
Radius of circle
:Returns:
tangent_angle: float
The tangent angle in radians, or None if there is
no solution.
"""
sinangle = float(radius) / float(distance)
if abs(sinangle) <= 1.0:
angle = math.asin(sinangle)
else:
angle = None
return angle
def solve_triangle(side1, side1sq, side2, side2sq, side3sq):
"""
Helper function to solve the triangular cosine rule.
Calculate an angle given all three sides.
c**2 = a**2 + b**2 - 2 * a * b cos( C )
:Parameters:
side1: float
Length of first side of triangle
side1sq: float
Square of the length of the first side of the triangle
(given as well to save computation time).
side2: float
Length of the second side of the triangle
side2sq: float
Square of the length of the second side of the triangle
(given as well to save computation time).
side3sq: float
Square of the length of the third side of the triangle
:Returns:
angle3: float
Angle subtended by the third side of the triangle (radians)
"""
if abs(side1) < EPS or abs(side2) < EPS:
# Equation cannot be solved
strg = "Triangle equation with side1=%f, side1sq=%f, " \
"side2=%f, side2sq=%f, side3sq=%f " % \
(side1, side1sq, side2, side2sq, side3sq)
strg += "cannot be solved. Divide by zero!"
warnings.warn(strg)
return None
cosangle = (side1sq + side2sq - side3sq) / \
(2.0 * side1 * side2)
if cosangle >= -1.0 and cosangle <= 1.0:
angle = math.acos(cosangle)
return angle
else:
# Equation cannot be solved
strg = "Triangle equation with side1=%f, side1sq=%f, " \
"side2=%f, side2sq=%f, side3sq=%f " % \
(side1, side1sq, side2, side2sq, side3sq)
strg += "cannot be solved. cos(angle) = %f" % cosangle
warnings.warn(strg)
return None
def solve_shoulder(theta_local, shoulder_fibre, parity):
"""
Helper function to calculate shoulder angle from fibre angle
(theta_local) and fibre angle at the shoulder joint (shoulder_fibre).
In the polar coordinates, theta is an angle measured clockwise from
the Y axis.
:Parameters:
theta_local: float
Angular coordinate of fibre holder, clockwise from Y axis (radians)
shoulder_fibre: float
Shoulder to fibre angle (radians)
parity: int (optional)
The elbow option to be calculated:
* 1 means elbow right armed
* -1 means elbow left armed
:Returns:
angle1: float
The shoulder angle (or alpha motor angle) (radians)
"""
# There are two possible parities for the shoulder angle,
# depending on which quadrant the elbow joint is contained in.
if parity == PARITY_RIGHT:
# Right armed orientation
angle1 = (math.radians(90.0) - theta_local) - shoulder_fibre
else:
# Left armed orientation
angle1 = (math.radians(90.0) - theta_local) + shoulder_fibre
return angle1
def solve_shoulder_xy(xpos, ypos, shoulder_fibre, parity):
"""
Helper function to calculate shoulder angle from fibre location
(xpos, ypos) and fibre angle at the shoulder joint (shoulder_fibre).
In the polar coordinates, theta is an angle measured clockwise from
the Y axis.
:Parameters:
xpos: float
X coordinate of fibre holder
ypos: float
Y coordinate of fibre holder
shoulder_fibre: float
Shoulder to fibre angle (or beta motor angle) (radians)
parity: int (optional)
The elbow parity to be calculated:
* 1 means elbow right armed
* -1 means elbow left armed
:Returns:
angle1: float
The shoulder angle (or alpha motor angle) (radians)
"""
# Determine the fibre angle.
fibre_angle = math.atan2(ypos, xpos)
# There are two possible parities for the shoulder angle,
# depending on which quadrant the elbow joint is contained in.
if parity == PARITY_RIGHT:
# Right armed parity
angle1 = fibre_angle - shoulder_fibre
else:
# Left armed parity
angle1 = fibre_angle + shoulder_fibre
return angle1
def solve_elbow(r_local, theta_local, parity, length1, length1sq, length2, length2sq):
"""
Helper function to solve the elbow location given a target position
(R, theta), parity choice (-1 or 1) and arm lengths.
In the polar coordinates, theta is an angle measured clockwise from
the Y axis.
:Parameters:
r_local: float
Radial coordinate of fibre holder
theta_local: float
Angular coordinate of fibre holder (radians)
parity: int (optional)
The elbow option to be adopted:
* 1 means elbow right armed
* -1 means elbow left armed
length1: float
Length of inner (alpha) arm
length1sq: float
Square of the length of the inner (alpha) arm
(given as well to save computation time).
length2: float
Length of outer (beta) arm
length2sq: float
Square of the length of the outer (beta) arm
(given as well to save computation time).
:Returns:
(r_elbow,theta_elbow) - the location of the elbow joint,
or (None,None) if a the equation cannot be solved.
"""
# Solve the triangle cosine rule to determine the angle between
# the shoulder and the fibre target.
r_squared = r_local * r_local
shoulder_fibre = solve_triangle(r_local, r_squared, length1, length1sq,
length2sq )
if shoulder_fibre is None:
# Equation cannot be solved
return (None, None)
# Convert shoulder to fibre angle into shoulder angle
# and return the elbow location in (R,theta).
angle1 = solve_shoulder(theta_local, shoulder_fibre, parity)
return (length1, angle1)
def solve_elbow_xy(xpos, ypos, parity, length1, length1sq, length2, length2sq):
"""
Helper function to solve the elbow location given a target position
(xpos, ypos), parity choice (-1 or 1) and arm radii.
In the polar coordinates, theta is an angle measured clockwise from
the Y axis.
:Parameters:
xpos: float
X coordinate of fibre holder
ypos: float
Y coordinate of fibre holder
parity: int (optional)
The elbow parity to be adopted:
* 1 means elbow right armed
* -1 means elbow left armed
length1: float
Length of inner (alpha) arm
length1sq: float
Square of the length of the inner (alpha) arm
(given as well to save computation time).
length2: float
Length of outer (beta) arm
length2sq: float
Square of the length of the outer (beta) arm
(given as well to save computation time).
:Returns:
(xelbow,yelbow) - the location of the elbow joint,
or (None,None) if a the equation cannot be solved.
"""
# Determine the third length of the triangle that needs
# to be made by the arm's shoulder and elbow to reach
# the fibre position.
reachsq = (xpos * xpos) + (ypos * ypos)
reach = math.sqrt(reachsq)
# Solve the triangle cosine rule to determine the angle between
# the shoulder and the fibre target.
shoulder_fibre = solve_triangle(reach, reachsq, length1, length1sq,
length2sq )
if shoulder_fibre is None:
# Equation cannot be solved
return (None, None)
# Convert shoulder to fibre angle into shoulder angle
# and solve the elbow position.
angle1 = solve_shoulder_xy(xpos, ypos, shoulder_fibre, parity)
xelbow = length1 * math.cos(angle1)
yelbow = length1 * math.sin(angle1)
return (xelbow,yelbow)
def read_grid_configuration(filename, pitch, ignore_header=True):
"""
Read a CSV file and return a configuration list describing the locations
of fibre positioners on a hexagonal grid.
:Parameters:
filename: string
Name of the CSV file to be read.
pitch: float
Distance between neighbouring positioners (in mm)
ignore_header: bool, optional
If True (default), ignore the first line of the file.
:Returns:
config_list: list of (index, xcen, ycen, orient, column, row)
List of configuration parameters describing the IDs,
locations and orientations of the fibre positioners.
"""
import csv
config_list = []
mincol = 0
minrow = 0
with open(filename, 'r') as csvfile:
cfile = csv.reader(csvfile)
if ignore_header:
cfile.next()
for row in cfile:
ident = int(row[0])
xcen = float(row[4])
ycen = float(row[5])
orient = 0.0
(column, row) = cartesian_to_hexagonal(xcen, ycen, pitch)
mincol = min(column, mincol)
minrow = min(row, minrow)
config = [ident, xcen, ycen, orient, column, row]
config_list.append(config)
csvfile.close()
# Adjust to make the minimum row and column zero.
for config in config_list:
config[4] -= mincol
config[5] -= minrow
return config_list
def remove_grid_cells( config_list, skip_cells ):
"""
Remove the given list of cells from a configuration list
:Parameters:
config_list: list of (index, xcen, ycen, orient, column, row)
List of configuration parameters describing the IDs,
locations and orientations of the fibre positioners.
skip_cells: list of tuples of 2 ints (optional)
A list of (column,row) combinations to be removed. This allows
gaps to be left in the grid to simulate the location of
acquisition cameras. If empty, the list is unchanged.
:Returns:
new_config_list: list of (index, xcen, ycen, orient, column, row)
List of configuration parameters describing the IDs,
locations and orientations of the fibre positioners, with
the skipped cells removed.
"""
if skip_cells:
new_config_list = []
for config in config_list:
skip_this = False
column = config[4]
row = config[5]
for testcol, testrow in skip_cells:
if column == testcol and row == testrow:
skip_this = True
break
if not skip_this:
new_config_list.append( config )
return new_config_list
else:
return config_list
def generate_grid_configuration(columns, rows, pitch, shape='circle',
xyerror=0.0, oerror=0.0, curvrad=None,
origin_at_centre=True, gridradius=None,
skip_cells=None):
"""
Generate a configuration list describing the locations
of fibre positioners on a hexagonal grid of a given size.
:Parameters:
columns: int
Number of columns in the hexagonal grid.
rows: int
Number of rows in the hexagonal grid.
pitch: float
Distance between neighbouring positioners (in mm)
shape: str (optional)
The shape of the positioner grid.
* rectangle - A fully populated rectangular grid.
* circle - A grid that is only populated inside a boundary
defined by the largest inscribed circle. NOTE: For a fully
populated circle, the number of rows needs to be 2/SQRT(3)
times greater than the number of columns.
The default shape is 'circle'
xyerror: float (optional)
An uncertainty in the location of the centre of each
positioner (in mm). This parameter is used to simulate
manufacturing differences. Each positioner will be randomly offset
by a small amount based on this uncertainty. The default of
0.0 keeps each positioner exactly aligned with the hexagonal
grid.
oerror: float (optional)
An uncertainty in the orientation of each positioner (in radians).
This parameter is used to simulate manufacturing differences.
Each positioner will be randomly oriented by a small amount
based on this uncertainty. The default of 0.0 keeps each
positioner exactly oriented along the X axis.
curvrad: float (optional)
The radius of curvature of the focal plane.
If a value is given, grid locations are adjusted to project
onto a curved focal plane.
By default curvrad=None and the focal plane is assumed flat.
origin_at_centre: boolean (optional)
If True, the origin of the grid coordinate system is at
its centre.
If False the origin is at the bottom left corner.
The default is True.
gridradius: float (optional)
If shape is 'circle', the maximum radius into which the grid
must fit. If None (the default) the maximum is dictated by
the number of rows and columns given.
skip_cells: list of tuples of 2 ints (optional)
A list of (column,row) combinations to skip. This allows
gaps to be left in the grid to simulate the location of
acquisition cameras.
:Returns:
config_list: list of (index, xcen, ycen, orient, column, row)
List of configuration parameters describing the IDs,
locations and orientations of the fibre positioners.
"""
config_list = []
count = 1
# The layout of the positioners depends on the shape requested.
# A circle shape will fill the largest inscribed circle with
# positioners. A positioner is regarded as being inside if its
# centre lies inside the circle.
# The rows and columns outside the circle will be empty.
#
# NOTE: To completely fill the circle, the number of rows should
# be 2/SQRT(3) times greater than the number of columns, since the
# rows are spaced more closely together.
#
if shape == 'rectangle':
# 'rectangle'. rows x columns filled grid of positioners.
for row in range(0,rows):
for column in range(0,columns):
skip_this = False
if skip_cells is not None:
for testcol, testrow in skip_cells:
if column == testcol and row == testrow:
skip_this = True
break
if not skip_this:
(xpos,ypos) = hexagonal_to_cartesian(column, row, pitch)
if xyerror > 0.0:
xpos += xyerror * np.random.randn()
ypos += xyerror * np.random.randn()
if oerror > 0.0:
orient = oerror * np.random.randn()
else:
orient = 0.0
config = (count, xpos, ypos, orient)
config_list.append(config)
count += 1
else:
# Assume 'circle'. Circular boundary.
length1 = float(columns) / 2.0
length2 = ROOT3BY2 * float(rows) / 2.0
if gridradius is not None:
length3 = int(gridradius / pitch)
radius = min( max(length1, length2), length3 )
else:
radius = max(length1, length2)
# print "length1,length2,radius=", length1, length2, radius
radiussq = radius * radius
colcen = columns // 2
rowcen = rows // 2
(xcen,ycen) = hexagonal_to_cartesian(colcen, rowcen, 1.0)
# print "Central column, row at", colcen, rowcen, "x,y at", xcen, ycen
# A circular grid can have its origin at the centre or at the
# bottom left corner of the grid.
if origin_at_centre:
(xzero,yzero) = hexagonal_to_cartesian(colcen, rowcen, pitch)
else:
xzero = 0.0
yzero = 0.0
# print "xzero, yzero at", xzero, yzero
for row in range(0,rows):
for column in range(0,columns):
skip_this = False
if skip_cells is not None:
for testcol, testrow in skip_cells:
if column == testcol and row == testrow:
skip_this = True
break
if not skip_this:
(xtest,ytest) = hexagonal_to_cartesian(column, row, 1.0)
xdiff = (xtest - xcen)
ydiff = (ytest - ycen)
if (xdiff * xdiff) + (ydiff * ydiff) < radiussq:
# Inside the circle.
(xpos,ypos) = hexagonal_to_cartesian(column, row, pitch)
if xyerror > 0.0:
xpos = xpos - xzero + xyerror * np.random.randn()
ypos = ypos - yzero + xyerror * np.random.randn()
else:
xpos -= xzero
ypos -= yzero
if oerror > 0.0:
orient = oerror * np.random.randn()
else:
orient = 0.0
config = (count, xpos, ypos, orient, column, row)
config_list.append(config)
count += 1
# If necessary, adjust the coordinates for the curvature of the
# focal plane.
if curvrad is not None and curvrad > 0.0:
new_list = []
for (count, xpos, ypos, orient, column, row) in config_list:
(flat_r, flat_theta) = cartesian_to_polar(xpos, ypos)
curved_r = flat_to_curved_r(flat_r, curvrad)
if curved_r is not None:
(new_x, new_y) = polar_to_cartesian(curved_r, flat_theta)
new_list.append( (count, new_x, new_y, orient, column, row) )
del config_list
return new_list
return config_list
def polygon_to_points( poly ):
"""
Plotting helper, which rearranges polygon vertices into lists
of X and Y coordinates. The first point is duplicated at the end
of each list, to make a closed path.
:Parameters:
poly: tuple of ((x1,y1),(x2,y2),...)
The coordinates of the vertices of the polygon.
:Returns:
(xlist, ylist): list of 2 tuples
((x1, x2, ..., x1), (y1, y2, ..., y1))
"""
xlist = []
ylist = []
for vertex in poly:
xlist.append(vertex[0])
ylist.append(vertex[1])
xlist.append(xlist[0])
ylist.append(ylist[0])
return (xlist, ylist)
if __name__ == '__main__':
import plotting
PLOTTING = True
print("\nTesting utility functions")
print("ROOT3BY2=", ROOT3BY2)
print("PIBY2=", PIBY2)
positioner_configs = generate_grid_configuration(33, 39, 25.0,
shape='circle', xyerror=0.0, oerror=0.0,
curvrad=4212.0, gridradius=432.0)
print("positioner_configs=", positioner_configs)
print("There are=", len(positioner_configs), "positioners.")
# import controller.fps_classes_control as fps
# grid = fps.PositionerGrid( positioner_configs )
# if PLOTTING:
# grid.plot(trivial=True)
# plotting.show_plot()
# plotting.close()
# del grid
del positioner_configs
print("\nTesting simple coordinate conversion")
x_list = [3.0, -3.0, 3.0, -3.0, 0.0, 0.0, 3.0, -3.0, 1.0]
y_list = [4.0, 4.0, -4.0, -4.0, 4.0, -4.0, 0.0, 0.0, 1.0]
for x,y in zip(x_list, y_list):
(r, theta) = cartesian_to_polar(x,y)
print("(%.2f,%.2f) converted to polar coordinates is (%.2f,%.2f deg)." % \
(x,y,r,math.degrees(theta)))
print("Converted back to Cartesian is (%.2f,%.2f)." % \
polar_to_cartesian(r,theta))
print("\nTesting hexagonal conversion")
pitch = 25.0
xzero = 314.8657
yzero = 283.5884
column_list = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]
row_list = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7]
for row,col in zip(column_list, row_list):
(x,y) = hexagonal_to_cartesian(col, row, pitch, xzero=xzero, yzero=yzero)
print("hexagonal grid point (%d,%d) is centred at (%.2f,%.2f)." % \
(col, row, x, y))
print("Converted back to hexagonal grid is (%d,%d)." % \
cartesian_to_hexagonal(x, y, pitch, xzero=xzero, yzero=yzero))
print("\nTesting geometric intersection functions")
# # -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# point1 = (1.4, 1.8)
# point2 = (6.0, 1.9)
# line1 = [1.0, 1.0, 10.0, 3.0]
# line2 = [1.0, 1.0, 4.0, 1.7]
# xlist = [line1[0], line1[2]]
# ylist = [line1[1], line1[3]]
# dist = distance_from_point_to_line(point1[0], point1[1],
# line1[0], line1[1], line1[2], line1[3])
# strg = "Distance from point to line is %f" % dist
# print strg
# if PLOTTING:
# plotaxis = plotting.plot_xy(xlist, ylist, title=strg, showplot=False)
# plotting.plot_xy( point1[0], point1[1], plotaxis=plotaxis,
# linefmt='b+', linestyle=' ',
# showplot=True )
# plotting.close()
# dist = distance_from_point_to_line(point2[0], point2[1],
# line1[0], line1[1], line1[2], line1[3])
# strg = "Distance from point to line is %f" % dist
# print strg
# if PLOTTING:
# plotaxis = plotting.plot_xy(xlist, ylist, title=strg, showplot=False)
# plotting.plot_xy( point2[0], point2[1], plotaxis=plotaxis,
# linefmt='b+', linestyle=' ',
# showplot=True )
# plotting.close()
# xlist = [line2[0], line2[2]]
# ylist = [line2[1], line2[3]]
# dist = distance_from_point_to_line(point2[0], point2[1],
# line2[0], line2[1], line2[2], line2[3])
# strg = "Distance from point to line is %f" % dist
# print strg
# if PLOTTING:
# plotaxis = plotting.plot_xy(xlist, ylist, title=strg, showplot=False)
# plotting.plot_xy( point2[0], point2[1], plotaxis=plotaxis,
# linefmt='b+', linestyle=' ',
# showplot=True )
# plotting.close()
# -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
point1 = (1.4, 1.8)
point2 = (6.0, 1.9)
xlist = [1.0, 10.0, 3.0, 1.0]
ylist = [1.0, 3.0, 7.0, 1.0]
inside = point_inside_triangle(point1[0], point1[1],
xlist[0], ylist[0],
xlist[1], ylist[1],
xlist[2], ylist[2])
if inside:
strg = "Point inside triangle"
else:
strg = "Point not inside triangle"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist, ylist, title=strg, showplot=False)
plotting.plot_xy( point1[0], point1[1], plotaxis=plotaxis,
linefmt='b+', linestyle=' ',
showplot=True )
plotting.close()
inside = point_inside_triangle(point2[0], point2[1],
xlist[0], ylist[0],
xlist[1], ylist[1],
xlist[2], ylist[2])
if inside:
strg = "Point inside triangle"
else:
strg = "Point not inside triangle"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist, ylist, title=strg, showplot=False)
plotting.plot_xy( point2[0], point2[1], plotaxis=plotaxis,
linefmt='b+', linestyle=' ',
showplot=True )
plotting.close()
# -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
triang1 = [(1.0,1.0), (10.0,3.0), (3.0,7.0)]
triang2 = [(5.0,0.0), (5.5,2.5), (12.0,2.0)]
triang3 = [(7.0,0.0), (7.5,2.3), (14.0,2.0)]
(xlist1, ylist1) = polygon_to_points(triang1)
(xlist2, ylist2) = polygon_to_points(triang2)
(xlist3, ylist3) = polygon_to_points(triang3)
intersect = triangles_intersect(triang1, triang2)
if intersect:
strg = "Triangles intersect"
else:
strg = "Triangles do not intersect"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist1, ylist1, title=strg, showplot=False)
plotting.plot_xy( xlist2, ylist2, plotaxis=plotaxis, showplot=True )
plotting.close()
intersect = triangles_intersect(triang1, triang3)
if intersect:
strg = "Triangles intersect"
else:
strg = "Triangles do not intersect"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist1, ylist1, title=strg, showplot=False)
plotting.plot_xy( xlist3, ylist3, plotaxis=plotaxis, showplot=True )
plotting.close()
# -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
quadrang1 = [(1.0,1.0), (10.0,3.0), (8.0, 9.0), (3.0,7.0)]
quadrang2 = [(5.0,0.0), (5.5,2.5), (9.0,5.0), (12.0,2.0)]
quadrang3 = [(7.0,0.0), (7.5,2.3), (11.0,3.0), (14.0,2.0)]
(xlist1, ylist1) = polygon_to_points(quadrang1)
(xlist2, ylist2) = polygon_to_points(quadrang2)
(xlist3, ylist3) = polygon_to_points(quadrang3)
intersect = quadrangles_intersect(quadrang1, quadrang2)
if intersect:
strg = "Quadrangles intersect"
else:
strg = "Quadrangles do not intersect"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist1, ylist1, title=strg, showplot=False)
plotting.plot_xy( xlist2, ylist2, plotaxis=plotaxis, showplot=True )
plotting.close()
intersect = quadrangles_intersect(quadrang1, quadrang3)
if intersect:
strg = "Quadrangles intersect"
else:
strg = "Quadrangles do not intersect"
if PLOTTING:
plotaxis = plotting.plot_xy(xlist1, ylist1, title=strg, showplot=False)
plotting.plot_xy( xlist3, ylist3, plotaxis=plotaxis, showplot=True )
plotting.close()
# -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
xcen = 5.0
ycen = 5.0
radius = 2.0
triang1 = [(1.0,1.0), (10.0,3.0), (3.0,7.0)]
triang2 = [(5.0,0.0), (5.5,2.5), (12.0,2.0)]
triang3 = [(7.0,0.0), (7.5,2.3), (5.0,3.0)]
(xlist1, ylist1) = polygon_to_points(triang1)
(xlist2, ylist2) = polygon_to_points(triang2)
(xlist3, ylist3) = polygon_to_points(triang3)
intersect = triangle_intersects_circle(triang1, xcen, ycen, radius)
if intersect:
strg = "Triangle intersects circle"
else:
strg = "Triangle does not intersect circle"
if PLOTTING:
plotaxis = plotting.plot_circles([xcen], [ycen], radius, title=strg,
showplot=False)
plotting.plot_xy( xlist1, ylist1, plotaxis=plotaxis, showplot=True )
plotting.close()
intersect = triangle_intersects_circle(triang2, xcen, ycen, radius)
if intersect:
strg = "Triangle intersects circle"
else:
strg = "Triangle does not intersect circle"
if PLOTTING:
plotaxis = plotting.plot_circles([xcen], [ycen], radius, title=strg,
showplot=False)
plotting.plot_xy( xlist2, ylist2, plotaxis=plotaxis, showplot=True )
plotting.close()
intersect = triangle_intersects_circle(triang3, xcen, ycen, radius)
if intersect:
strg = "Triangle intersects circle"
else:
strg = "Triangle does not intersect circle"
if PLOTTING:
plotaxis = plotting.plot_circles([xcen], [ycen], radius, title=strg,
showplot=False)
plotting.plot_xy( xlist3, ylist3, plotaxis=plotaxis, showplot=True )
plotting.close()
# -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
xcen = 5.0
ycen = 5.0
semiminor = 2.0
semimajor = 3.0
tilt = math.radians(0.0)
xpoint = 4.0
ypoint = 4.0
intersect = point_inside_ellipse(xpoint, ypoint, xcen, ycen, semimajor,
semiminor, tilt)
if intersect:
strg = "Point inside ellipse"
else:
strg = "Point not inside ellipse"
if PLOTTING:
plotaxis = plotting.plot_ellipses([xcen], [ycen], semimajor, semiminor,
tilt, title=strg, showplot=False)
plotting.plot_xy( xpoint, ypoint, plotaxis=plotaxis, linefmt='b+',
linestyle=' ', showplot=True )
plotting.close()
xpoint = 7.0
ypoint = 4.0
intersect = point_inside_ellipse(xpoint, ypoint, xcen, ycen, semimajor,
semiminor, tilt)
if intersect:
strg = "Point inside ellipse"
else:
strg = "Point not inside ellipse"
if PLOTTING:
plotaxis = plotting.plot_ellipses([xcen], [ycen], semimajor, semiminor,
tilt, title=strg, showplot=False)
plotting.plot_xy( xpoint, ypoint, plotaxis=plotaxis, linefmt='b+',
linestyle=' ', showplot=True )
plotting.close()
# # -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# print "\nTesting generation of radial avoidance zone"
# LENGTH1 = 8.0
# LENGTH2 = 17.0
# MLENGTH = 6.5
# MWIDTH = 5.5
# FAVOID = 2.0
# maxlength = LENGTH1 + LENGTH2
# for xfibre in (25.0, 20.0, 15.0, 10.0):
# (xpoints,ypoints) = generate_avoidance_perimeter(xfibre, LENGTH1, LENGTH2,
# MLENGTH, MWIDTH, FAVOID,
# padding=2.0, inc1=0.5, inc2=0.1)
# if PLOTTING:
# plotting.plot_xy(xpoints,ypoints,
# title="Avoidance zone for xfibre=%.2f" % xfibre)
# plotting.close()
# for (x,y) in zip(xpoints,ypoints):
# print x,y
# print "Testing generation of direct avoidance zone"
# LENGTH1 = 8.0
# LENGTH2 = 17.0
# MLENGTH = 6.5
# MWIDTH = 5.5
# FAVOID = 2.0
# maxlength = LENGTH1 + LENGTH2
# tests = [(25.0, 0.0, 0.0, 25.0),
# (25.0, 0.0, 15.0, 9.0),
# (15.0, 9.0, 0.0, 9.0)]
# for (xstart, ystart, xfinish, yfinish) in tests:
# (xpoints,ypoints) = generate_avoidance_perimeter2(xstart, ystart,
# xfinish, yfinish, LENGTH1, LENGTH2,
# MLENGTH, MWIDTH, FAVOID,
# padding=2.0, inc1=0.5, inc2=0.1)
# if PLOTTING:
# title="Avoidance zone for (%.2f,%.2f) -> (%.2f,%.2f)" % \
# (xstart, ystart, xfinish, yfinish)
# plotting.plot_xy(xpoints, ypoints, title=title)
# plotting.close()
#
# # for (x,y) in zip(xpoints,ypoints):
# # print x,y
# print "\nSolving xfibre avoidance zone."
# LENGTH1 = 8.0
# LENGTH2 = 17.0
# MAXLENGTH = LENGTH2 + LENGTH1
# MINLENGTH = LENGTH2 - LENGTH1
# XINC = 0.25
# MLENGTH = 6.5
# MWIDTH = 5.5
# if PLOTTING:
# plotfig = plotting.new_figure(1,
# stitle="Avoidance as a function of X (min red, max blue)")
# plotaxis = plotting.add_subplot(plotfig, 1, 1, 1)
# for padding in (0.0, 2.0):
# print "padding=", padding
# xminplot = []
# xmaxplot = []
# yminplot = []
# ymaxplot = []
# xfibre = MAXLENGTH
# while (xfibre >= MINLENGTH):
# # (left, right, bottom, top) = xfibre_to_avoidance(xfibre,
# # LENGTH1, LENGTH2,
# # MLENGTH, MWIDTH,
# # padding=padding)
# (bottom, left, top, right) = solve_metrology_edge(xfibre,
# LENGTH1, LENGTH2,
# MLENGTH, MWIDTH,
# padding=padding)
# print "xfibre=%.2f: bottom=(%.2f,%.2f), left=(%.2f,%.2f), top=(%.2f,%.2f), right=(%.2f,%.2f)" % \
# (xfibre, bottom[0], bottom[1], left[0], left[1], top[0], top[1], right[0], right[1])
# xfibre -= XINC
#
# xrect = [bottom[0], left[0], top[0], right[0], bottom[0]]
# yrect = [bottom[1], left[1], top[1], right[1], bottom[1]]
# if PLOTTING and padding > 0.0:
# plotaxis = plotting.plot_xy(xrect, yrect, linefmt='k ',
# linestyle=':', plotfig=plotfig,
# plotaxis=plotaxis, showplot=False)
#
# xminplot.append(bottom[0])
# xmaxplot.append(top[0])
# yminplot.append(bottom[1])
# ymaxplot.append(top[1])
# if PLOTTING:
# plotaxis = plotting.plot_xy(xminplot, yminplot, linefmt='r+',
# linestyle='-', linewidth=2,
# plotfig=plotfig,
# plotaxis=plotaxis, showplot=False)
# plotaxis = plotting.plot_xy(xmaxplot, ymaxplot, linefmt='b+',
# linestyle='-', linewidth=2,
# plotfig=plotfig,
# plotaxis=plotaxis, showplot=False)
#
# if PLOTTING:
# plotting.show_plot()
# plotting.close()
print("Tests finished")
| [
37811,
198,
198,
11770,
19213,
41566,
260,
23158,
278,
4482,
34030,
19937,
198,
198,
4264,
1299,
257,
4947,
286,
4888,
5499,
290,
20081,
13,
198,
198,
1731,
5979,
1946,
25,
15622,
1262,
5499,
21242,
422,
32977,
62,
37724,
13,
9078,
198,... | 2.167447 | 43,590 |
import json
import os.path
from haikunator import Haikunator
from .. import CLOUD_PIPE_TEMPLATES_FOLDER
name_generator = Haikunator()
# TODO: add instance_type
| [
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
198,
6738,
387,
1134,
403,
1352,
1330,
9398,
1134,
403,
1352,
198,
198,
6738,
11485,
1330,
7852,
2606,
35,
62,
47,
4061,
36,
62,
51,
3620,
6489,
29462,
62,
37,
3535,
14418,
198,
198,
3... | 2.619048 | 63 |
#!/usr/bin/python3
# Takes as input wps.txt, created by words_per_second.sh
# Does not show outliers, i.e. segments with wps > 5
import sys
import matplotlib.pyplot as plt
import numpy as np
with open(sys.argv[1],'r',encoding='utf-8') as f:
# wps per segment
wps = [float(line.strip().split()[0]) for line in f.readlines()]
plt.hist(wps, 100)
plt.xlabel('Words/second')
plt.ylabel('# Segments')
plt.title('Words per second in Althingi segments')
plt.axis([0, 5, 0, 50000])
plt.grid(True)
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
33687,
355,
5128,
266,
862,
13,
14116,
11,
2727,
416,
2456,
62,
525,
62,
12227,
13,
1477,
198,
2,
8314,
407,
905,
41528,
3183,
11,
1312,
13,
68,
13,
17894,
351,
266,
862,
18... | 2.377273 | 220 |
"""
RDB 2015
User Interface
CustomTable Widget
Author: Tomas Krizek
"""
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import (QTableWidget, QTableWidgetItem, QWidget,
QAbstractItemView, QHeaderView)
| [
37811,
198,
49,
11012,
1853,
198,
198,
12982,
26491,
198,
198,
15022,
10962,
370,
17484,
198,
198,
13838,
25,
42884,
509,
380,
43130,
198,
37811,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
11,
12972,
39568,
1171... | 2.678161 | 87 |
import os
import librosa
import librosa.feature
import numpy as np
import scipy.stats
from fastdtw import fastdtw
"""
**************************************** INSTRUCTIONS ***************************************
* *
* Usage: python mcd_computer.py --language german --model simple *
* *
* For each utterance in a meta-file, find the ground-truth spectrogram and a synthesized *
* spectrogram and compute Mel Cepstral Distorsion of them, saves into a file with basic *
* statistics. *
* *
*********************************************************************************************
"""
if __name__ == '__main__':
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument("--language", type=str, required=True, help="Language to be synthesized.")
parser.add_argument("--model", type=str, required=True, help="Model specific folder.")
parser.add_argument('--num_mfcc', type=int, default=13, help="Number of MFCC coefficients.")
parser.add_argument("--where", type=str, required=True, help="Data specific folder.")
args = parser.parse_args()
mcds = []
meta_file = os.path.join(args.where, 'all_meta_files', f'{args.language}.txt')
with open(meta_file, 'r', encoding='utf-8') as f:
for l in f:
tokens = l.rstrip().split('|')
idx = tokens[0]
spec_path = os.path.join(args.where, args.model, 'spectrograms', args.language, f'{idx}.npy')
if not os.path.exists(spec_path):
print(f'Missing spectrogram of {idx}!')
continue
gen = np.load(spec_path)
ref_path = os.path.join(args.where, 'ground-truth', 'spectrograms', f'{idx}.npy')
ref = np.load(ref_path)
mcd = mel_cepstral_distorision(gen, ref, args.num_mfcc)
mcds.append((idx, mcd))
values = [x for i, x in mcds]
mcd_mean = np.mean(values)
mcd_std = np.std(values)
output_path = os.path.join(args.where, args.model, 'mcd')
if not os.path.exists(output_path):
os.makedirs(output_path)
mcd_lower, mcd_upper = confidence_interval(values)
output_file = os.path.join(output_path, f'{args.language}.txt')
with open(output_file, 'w+', encoding='utf-8') as of:
for i, c in mcds:
print(f'{c}', file=of) # {i}|
print(f'Total mean MCD: {mcd_mean}', file=of)
print(f'Std. dev. of MCD: {mcd_std}', file=of)
print(f'Conf. interval: ({mcd_lower}, {mcd_upper})', file=of) | [
11748,
28686,
198,
11748,
9195,
4951,
64,
198,
11748,
9195,
4951,
64,
13,
30053,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
198,
6738,
3049,
67,
4246,
1330,
3049,
67,
4246,
198,
198,
37811,
198,
198,
1717... | 2.054149 | 1,422 |
from django.conf.urls import url, include, patterns
urlpatterns = [
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
11,
7572,
198,
198,
6371,
33279,
82,
796,
685,
198,
198,
60,
198
] | 2.88 | 25 |
import unittest
import argparse
from fbx_parser.fbx_parser_rework import FbxParser | [
11748,
555,
715,
395,
198,
11748,
1822,
29572,
198,
198,
6738,
277,
65,
87,
62,
48610,
13,
21855,
87,
62,
48610,
62,
1809,
967,
1330,
376,
65,
87,
46677
] | 2.862069 | 29 |
from abc import ABC, abstractmethod
import sys
import json
import pandas as pd
import string
import csv
import time
from textClustPy import Input
from textClustPy import Observation
## implementation of abstract class
class InMemInput(Input):
'''
:param pdframe: pandas data frame that serves as stream input
:type pdframe: DataFrame
:param col_id: Column index that contains the text id
:type col_id: int
:param col_time: Column index that contains the time
:type col_time: int
:param col_text: Column index that contains the text
:type col_text: int
:param col_text: Column index that contains the true cluster belonging
:type col_label: int
'''
def run(self):
'''
Update the textclust algorithm with the complete data in the data frame
'''
for row in self.reader:
data = self.getObservation(row)
self.processdata(data)
def update(self, n):
'''
Update the textclust algorithm on new observations
:param n: Number of observations that should be used by textclust
:type n: int
'''
for i in range(0,n):
row = next(self.reader)
data = self.getObservation(row)
self.processdata(data)
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
11748,
25064,
198,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
4731,
198,
11748,
269,
21370,
198,
11748,
640,
628,
198,
6738,
2420,
2601,
436,
20519,
... | 2.475926 | 540 |
import fire
import os
import csv
import numpy as np
from copy import deepcopy
from tqdm import tqdm
from scipy.stats import loguniform
from functools import partial
from multiprocessing import Pool
from scipy.stats import median_absolute_deviation as mad
# Borrow utils
from infomercial.utils import save_checkpoint
from infomercial.utils import load_checkpoint
# Borrow tune utils
from infomercial.exp.tune_bandit import get_best_trial
from infomercial.exp.tune_bandit import get_sorted_trials
from infomercial.exp.tune_bandit import get_best_result
from infomercial.exp.tune_bandit import get_configs
from infomercial.exp.tune_bandit import get_metrics
from infomercial.exp.tune_bandit import save_csv
# Our target
from parkid.run import change_bandits
def tune(name,
model_name="parkid",
env_name1="BanditUniform4",
env_name2="BanditChange4",
change=100,
num_episodes=40,
share_update=False,
num_repeats=10,
num_samples=10,
num_processes=1,
metric="change_R",
stat="median",
master_seed=None,
**config_kwargs):
"""Tune hyperparameters for change_bandits."""
# -
# Init:
# Separate name from path
path, name = os.path.split(name)
# Look up the bandit run function were using in this tuning.
exp_func = getattr(change_bandits, model_name)
# Build the parallel callback
trials = []
# generate sep prngs for each kwargs
prngs = []
for i in range(len(config_kwargs)):
if master_seed is not None:
prng = np.random.RandomState(master_seed + i)
else:
prng = np.random.RandomState()
prngs.append(prng)
# Setup default params
params = dict(exp_func=exp_func,
env_name1=env_name1,
env_name2=env_name2,
change=change,
share_update=share_update,
num_episodes=num_episodes,
num_repeats=num_repeats,
metric=metric,
master_seed=master_seed,
config={})
# -
# Run!
# Setup the parallel workers
workers = []
pool = Pool(processes=num_processes)
for n in range(num_samples):
# Reset param sample for safety
params["config"] = {}
params["config"]["write_to_disk"] = False
# Make a new sample
for i, (k, par) in enumerate(config_kwargs.items()):
try:
mode, low, high = par
mode = str(mode)
if mode == "loguniform":
params["config"][k] = loguniform(
low, high).rvs(random_state=prngs[i])
elif mode == "uniform":
params["config"][k] = prngs[i].uniform(low=low, high=high)
elif mode == "linspace":
# Its ineff. to generate this everytime; oh well
values = np.linspace(low, high, num=num_samples)
params["config"][k] = values[n]
else:
raise ValueError(f"mode {mode} not understood")
except TypeError: # number?
params["config"][k] = float(par)
except ValueError: # string?
params["config"][k] = str(par)
# A worker gets the new sample
workers.append(
pool.apply_async(_train,
kwds=deepcopy(params),
callback=append_to_results))
# Get the worker's result (blocks until complete)
for worker in tqdm(workers):
worker.get()
pool.close()
pool.join()
# Cleanup - dump write_to_disk arg
for trial in trials:
del trial["config"]["write_to_disk"]
# -
# Sort and save the configs of all trials
sorted_configs = {}
for i, trial in enumerate(get_sorted_trials(trials, metric)):
sorted_configs[i] = trial["config"]
if stat == "median":
sorted_configs[i].update({metric: np.median(trial["scores"])})
sorted_configs[i].update({"mad_" + metric: mad(trial["scores"])})
elif stat == "mean":
sorted_configs[i].update({metric: np.mean(trial["scores"])})
sorted_configs[i].update(
{"std_" + metric: np.std(trial["scores"])})
else:
raise ValueError("stat must be median or mean")
save_csv(sorted_configs, filename=os.path.join(path, name + "_sorted.csv"))
return get_best_trial(trials, metric)
# ----------------------------------------------------------------------------
# !
if __name__ == "__main__":
# Create CL interface
fire.Fire(tune)
| [
11748,
2046,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2604,
403,
... | 2.142728 | 2,214 |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 16:41:04 2019
@author: Utilisateur
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
8621,
2608,
1467,
25,
3901,
25,
3023,
13130,
198,
198,
31,
9800,
25,
7273,
346,
271,
15093,
198,
37811,
198
] | 2.307692 | 39 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3.086957 | 46 |
import errno
import os
from os.path import join, exists
from os import stat, makedirs
# create folder for cache:
# mkdir -p /tmp/climate_data_portal/images/recent/
# mkdir -p /tmp/climate_data_portal/images/older/
MAX_CACHE_FOLDER_SIZE = 2**24 # 16 MiB
| [
198,
11748,
11454,
3919,
198,
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
7160,
198,
6738,
28686,
1330,
1185,
11,
285,
4335,
17062,
198,
198,
2,
2251,
9483,
329,
12940,
25,
198,
2,
33480,
15908,
532,
79,
1220,
22065,
14,... | 2.705263 | 95 |
import os
from indexing.pathanalyzer import PathAnalyzer
from indexing.pathanalyzerstore import PathAnalyzerStore
class Indexer:
"""
Traverses the given directory using the DFS algorithm. Allows registering different rules for handling different
file types and calls the associated PathAnalyzers and Collectors indirectly for each type.
"""
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, max_depth=10):
"""
Initializes attributes and checks the maximum depth provided.
Parameters
----------
max_depth : int
The maximum depth to look in.
"""
### Validate parameters.
if max_depth < 1:
raise Exception('max_depth must be greater than or equal to 1.')
### Attributes from outside.
self._max_depth = max_depth
### Private attributes.
# A collection of analyzers which handle different file types.
self._analyzers = []
# The depth we are currently in.
self._current_depth = 0
# The list of directories to index.
self._rules = {}
####################################################################################################################
# Public methods.
####################################################################################################################
def add_rule(self, directory, policy):
"""
Registers a new directory to index. Does nothing if the given directory is already added.
Parameters
----------
directory : str
The directory to be indexed.
policy : IndexerPolicy
A policy that applies to this directory.
"""
analyzer = self._create_analyzer(policy)
analyzer_store = self._create_analyzerstore(directory)
analyzer_store.add_analyzer(policy.extensions, analyzer)
def index(self):
"""
Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.
"""
for analyzer in self._analyzers:
analyzer.init_filters()
for directory, analyzer_store in self._rules.items():
if os.path.exists(directory):
self._scan_directory(directory, analyzer_store)
for analyzer in self._analyzers:
analyzer.clean_filters()
####################################################################################################################
# Auxiliary methods.
####################################################################################################################
def _enter(self, directory):
"""
Indicates for the analyzers that we entered into the given directory.
Parameters
----------
directory : str
The directory we entered.
"""
for analyzer in self._analyzers:
analyzer.enter(directory)
self._current_depth = self._current_depth + 1
def _leave(self):
"""
Indicates for the analyzers that we are leaving the last directory.
"""
for analyzer in self._analyzers:
analyzer.leave()
self._current_depth = self._current_depth - 1
def _scan_directory(self, path, analyzer_store):
"""
Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to
analyze and store the data.
Parameters
----------
path : str
The path to enumerate.
analyzers : PathAnalyzerStore
The PathAnalyzerStore to use.
"""
for current_file in os.listdir(path):
current_path = os.path.join(path, current_file)
if self._current_depth >= self._max_depth:
return
if os.path.isdir(current_path):
self._enter(current_file)
self._scan_directory(current_path, analyzer_store)
self._leave()
else:
self._analyze_file(current_path, analyzer_store)
| [
11748,
28686,
198,
198,
6738,
6376,
278,
13,
79,
6696,
3400,
9107,
1330,
10644,
37702,
9107,
198,
6738,
6376,
278,
13,
79,
6696,
3400,
9107,
8095,
1330,
10644,
37702,
9107,
22658,
198,
198,
4871,
12901,
263,
25,
198,
220,
220,
220,
37... | 2.826003 | 1,546 |
# Generated by Django 2.2.12 on 2020-04-27 08:54
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1065,
319,
12131,
12,
3023,
12,
1983,
8487,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
# -*- coding:utf-8 -*-
# @Script: settings.py
# @Author: Andre Litty
# @Email: alittysw@gmail.com
# @Create At: 2020-03-21 13:45:12
# @Last Modified By: Andre Litty
# @Last Modified At: 2020-04-08 14:23:05
# @Description: Class based flask settings.
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
7391,
25,
6460,
13,
9078,
198,
2,
2488,
13838,
25,
10948,
406,
9760,
198,
2,
2488,
15333,
25,
435,
715,
893,
86,
31,
14816,
13,
785,
198,
2,
2488,
16447,
1629,... | 2.625 | 96 |
import torch
from ..math.norm import *
def normalize_scale(P):
"""
Returns the rescaled points set in range [-1,1]
Parameters
----------
P : Tensor
the input points set tensor
Returns
-------
Tensor
the rescaled points set
"""
min = torch.min(P, dim=0, keepdim=True)[0]
max = torch.max(P, dim=0, keepdim=True)[0]
d = torch.mul(distance(min, max), 0.5)
return torch.div(P, d)
| [
11748,
28034,
198,
6738,
11485,
11018,
13,
27237,
1330,
1635,
628,
198,
4299,
3487,
1096,
62,
9888,
7,
47,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
262,
6811,
3021,
2173,
900,
287,
2837,
25915,
16,
11,
16,
60,
628... | 2.34375 | 192 |
'''OpenGL extension ARB.texture_rectangle
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_rectangle to provide a more
Python-friendly API
Overview (from the spec)
OpenGL texturing is limited to images with power-of-two dimensions
and an optional 1-texel border. The ARB_texture_rectangle extension
adds a new texture target that supports 2D textures without requiring
power-of-two dimensions.
Non-power-of-two sized (NPOTS) textures are useful for storing video
images that do not have power-of-two sized (POTS). Re-sampling
artifacts are avoided and less texture memory may be required by
using non-power-of-two sized textures. Non-power-of-two sized
textures are also useful for shadow maps and window-space texturing.
However, non-power-of-two sized textures have limitations that
do not apply to power-of-two sized textures. NPOTS textures may
not use mipmap filtering; POTS textures support both mipmapped
and non-mipmapped filtering. NPOTS textures support only the
GL_CLAMP, GL_CLAMP_TO_EDGE, and GL_CLAMP_TO_BORDER wrap modes;
POTS textures support GL_CLAMP_TO_EDGE, GL_REPEAT, GL_CLAMP,
GL_MIRRORED_REPEAT, and GL_CLAMP_TO_BORDER (and GL_MIRROR_CLAMP_ATI
and GL_MIRROR_CLAMP_TO_EDGE_ATI if ATI_texture_mirror_once is
supported) . NPOTS textures do not support an optional 1-texel
border; POTS textures do support an optional 1-texel border.
NPOTS textures are accessed by dimension-dependent (aka
non-normalized) texture coordinates. So instead of thinking of
the texture image lying in a [0..1]x[0..1] range, the NPOTS texture
image lies in a [0..w]x[0..h] range.
This extension adds a new texture target and related state (proxy,
binding, max texture size).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_rectangle.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.texture_rectangle import *
### END AUTOGENERATED SECTION | [
7061,
6,
11505,
8763,
7552,
5923,
33,
13,
41293,
62,
2554,
9248,
198,
198,
1212,
8265,
2183,
2696,
262,
9172,
286,
262,
220,
198,
11505,
8763,
13,
1831,
13,
8763,
13,
37304,
13,
41293,
62,
2554,
9248,
284,
2148,
257,
517,
220,
198,
... | 3.244548 | 642 |
""" director account specific methods """
import argparse
from api_request.request import *
| [
37811,
3437,
1848,
2176,
5050,
37227,
198,
198,
11748,
1822,
29572,
198,
6738,
40391,
62,
25927,
13,
25927,
1330,
1635,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
198,
220,
220,
628,
198,
220,
220,
220,
62... | 2.688889 | 45 |
# coding: utf-8
# In[7]:
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u"config InlineBackend.figure_formats = {'svg',}")
import numpy as np
import pandas as pd
from IPython.display import display
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn import cross_validation
import matplotlib.pyplot as plt
from sklearn import preprocessing
import seaborn as sns
from sklearn.cross_validation import StratifiedKFold
# # Cross-Language Rap Detector
# A previous project of mine, RapItalia, was designed to approximately track the growth in popularity of rap in Italy, based on the publication date of rap songs that turned up in an Italian lyrics database. Did rap suddenly get popular there 10 years ago? I didn't have any genre information, only lyrical content, and so I made the assertion that rap songs could be distinguished by the number of words in the lyrics. I used a quite arbitrary cutoff for the number of words, specifically 500 words, and verified it only with a quick look at the songs that were identified as rap (I did made sure not to tune it based on the final output of my analysis). The results of this classification were then fed into my subsequent analysis of publication date by genre
#
# Making an assumption like that without evidence is pretty sloppy, so I set out to do better. I did two things:
# 1. Linking a large body of lyrics with high quality genre information
# 2. Evaluated whether songs can be correctly classified as rap or non-rap based on non-language-specific properties of the lyrics (like the number of words)
#
# Why non-language-specific? If I train on surface features of rap songs in every language, then I can identify rap songs in every language. This could also shed light generally on the surface-level lyrical differences between genres.
#
# First, some helper functions:
# In[42]:
def feature_distribution(data, which_feature, xlim=None):
"""Plot a comparison of the distribution of a particular feature between rap and non-rap."""
# Compute the range of feature values to use, if not specified in xlim
med = np.median(train_data.loc[:,which_feature])
q75, q25 = np.percentile(train_data.loc[:,which_feature], [75 ,25])
iqr = q75 - q25
minx = med-(iqr*2.5)
if minx < 0:
minx = 0
maxx = med+(iqr*2.5)
if xlim:
minx=xlim[0]
maxx=xlim[1]
nbins = 20
bins = np.linspace(minx, maxx, nbins+1)
# Plot the histograms
plt.figure()
sns.distplot(data.loc[data.is_rap==False,which_feature], bins=bins, label='Non-rap')
sns.distplot(data.loc[data.is_rap==True,which_feature], bins=bins, label='Rap')
plt.xlim(minx, maxx)
plt.title(which_feature)
plt.legend()
def plot_feature_importance(features, fitted_forest):
"""Using a fitted random forest, make a cleveland dot plot of the computed feature importances. """
plt.figure()
vals = fitted_forest.feature_importances_
sortorder = np.flipud(np.argsort(vals))
features = np.array(features)
with sns.axes_style("whitegrid"):
sns.stripplot(y=features[sortorder], x=vals[sortorder], orient="h", color='red', size=10)
xl = plt.xlim()
plt.xlim(0,xl[1])
plt.grid(axis='y',linestyle=':')
plt.xlabel('Feature importance score')
def examine_prediction(y, prediction, data, features, show_misidentified=True):
"""Given a prediction and ground truth (y), output statistics about the quality of the prediction."""
if type(features) == np.ndarray:
features = features.tolist()
cm = confusion_matrix(y, prediction)
np.set_printoptions(precision=2)
nonrap_misidentified = float(cm[0,1])/(cm[0,0]+cm[0,1])
print "Accuracy =\t%.1f%%" % (100*metrics.accuracy_score(y, prediction))
print "Rap songs correctly identified =\t%.1f%%" % (100*metrics.recall_score(y, prediction))
print "Songs incorrectly identified as rap =\t%.1f%%" % (100*(1-metrics.precision_score(y, prediction)))
print "Non-rap songs identified as rap =\t%.1f%%" % (100*nonrap_misidentified)
print "F1 score =\t%.3f" % metrics.f1_score(y, prediction)
print('Confusion matrix')
print(cm)
if show_misidentified:
print "Misidentified as rap: "
display(data.loc[(prediction==1) & (y==0),['artist_name','title']+features])
print "Misidentified as nonrap: "
display(data.loc[(prediction==0) & (y==1),['artist_name','title']+features])
def compute_features(lyrics, tdm_indices):
"""Create new superficial lyrics features. Return df with the new features in columns and one row per track."""
import time
start = time.time()
total_num_words = np.zeros(len(tdm_indices))
tdm = lyrics['tdm'].toarray()
for i in range(len(tdm_indices)):
total_num_words[i] = tdm[tdm_indices[i],:].sum()
# print (time.time()-start)/60
word_lens = np.array([len(i) for i in lyrics['unstemmed_terms']],dtype=float)
mean_word_length = np.zeros(len(tdm_indices))
for i in range(len(tdm_indices)):
word_indices = tdm[tdm_indices[i],:].nonzero()[0]
mean_word_length[i] = np.mean(word_lens[word_indices])
# print (time.time()-start)/60
median_word_rank = np.zeros(len(tdm_indices))
for i in range(len(tdm_indices)):
word_indices = tdm[tdm_indices[i],:].nonzero()[0]
median_word_rank[i] = np.median(word_indices)
# print (time.time()-start)/60
mean_word_instances = np.zeros(len(tdm_indices))
for i in range(len(tdm_indices)):
nums = tdm[tdm_indices[i],:]
nz = nums[nums.nonzero()]
mean_word_instances[i] = np.mean(nz)
mean_word_instances = np.divide(mean_word_instances, total_num_words)
# print (time.time()-start)/60
additional_features = pd.DataFrame(data={'total_num_words':total_num_words, 'mean_word_length':mean_word_length, 'median_word_rank':median_word_rank, 'mean_word_instances':mean_word_instances})
return additional_features
# ## Creating the dataset
# My source for lyrics was the [musicXmatch Dataset](http://labrosa.ee.columbia.edu/millionsong/musixmatch), which contains entries for 237,662 songs from the Million Songs Dataset. The MSD is a selection of one million songs based on [loose criteria](http://labrosa.ee.columbia.edu/millionsong/pages/how-did-you-choose-million-tracks) that included as many songs as possible by popular artists, and "extreme" songs in terms of audio characteristics. However the complete lyrics are not included, for copyright reasons:
#
# > The lyrics come in bag-of-words format: each track is described as the word-counts for a dictionary of the top 5,000 words across the set.
#
# This eliminates at least two surface-level properties I was interested in, the line lengths and the occurrence of extremely rare (or made-up) words. But it retains many more. I stored lyrics information in a dict called lyrics, which has at the heart of it a sparse matrix of counts of words (columns) by tracks (rows), sorted in decreasing order of word frequency across the corpus.
# ```
# print lyrics['terms'][0:10]
# print(lyrics['tdm'][:5,:].toarray())
#
# ['i', 'the', 'you', 'to', 'and', 'a', 'me', 'it', 'not', 'in']
# [[10 0 17 ..., 0 0 0]
# [28 15 2 ..., 0 0 0]
# [ 5 4 3 ..., 0 0 0]
# [16 4 0 ..., 0 0 0]
# [39 30 10 ..., 0 0 0]]```
#
# Although the Million Songs Dataset contains a large amount of metadata and data about the acoustic properties of songs (based on data compiled by [The Echo Nest](https://en.wikipedia.org/wiki/The_Echo_Nest), it does not have genre information. I got that from the [tagtraum genre annotations](www.tagtraum.com/msd_genre_datasets.html) to the Million Songs Dataset. It determines genre based on human-generated annotations from the All Music Guide, Last.fm, and the beaTunes Genre Dataset (BGD). There are up to two genres listed for every song, and I defined a track as being rap if it had "Rap" in either of the two genre slots.
#
# The tagtraum genre annotations covered 133,676 tracks, of which 55,726 intersected with the tracks in the musicXmatch lyrics training set, and 6,967 with the lyrics test set (the musicXmatch dataset has a standard train-test split). `generate_track_info.py` does this merge, and also adds track names and artist names by querying the MSD's sqlite3 database track_metadata.db, and saves the result as pickles.
# In[15]:
import pickle
with open('train_track_info.pickle','r') as f:
track_info = pickle.load(f)
with open('train_lyrics_data.pickle','r') as f:
lyrics = pickle.load(f)
# ## Feature engineering: Surface text features
# Compute new features for each track based on the lyrics.
# In[44]:
# Create features
new_features = compute_features(lyrics, track_info.tdm_row)
train_data = pd.concat([track_info, new_features],axis=1)
features = new_features.columns.values
# Examining the distribution of these variables between the two classes shows promising separation of tracks.
#
# `total_num_words` is the number of words in the track, which will be an underestimate of the true number of words because of all words beyond the 5000 most frequent in the lyrics dataset being eliminated. Nevertheless, it should have a very strong linear correlation with the true number of words.
# In[51]:
feature_distribution(train_data,'total_num_words',[0,1000])
# `mean_word_length` is the mean of the word lengths in a track, not weighting by frequency of the word. Again, not precisely the real values, since the lyrics have been stemmed (although I used the provided unstemming dictionary) but should correlate strongly.
# In[52]:
feature_distribution(train_data,'mean_word_length')
# `median_word_rank` is the median of the horizontal index of the words in the term-document matrix, which reflects the rarity of the words used.
# In[53]:
feature_distribution(train_data,'median_word_rank',[0,500])
# `mean_word_instances` is the mean number of times a word is repeated in a track, divided by the total number of words in the track. It should reflect how repetitive the song is lyrically (e.g. because of a high ratio of choruses to verses)
# In[54]:
feature_distribution(train_data,'mean_word_instances')
# ## How I Got to 95% Accuracy Without Really Trying: The Problem of Imbalanced Datasets
# All my initial attempts to correctly detect rap songs using the features I created seemed to be very successful: 95% accuracy. But then I realized that this was due to rap songs being much less common than non-rap.
# In[55]:
pd.value_counts(track_info.is_rap)
# In fact a dumb model that predicts that no songs will ever be rap achieves this accuracy, thanks to the imbalanced dataset.
# In[56]:
# Baseline model: none are rap
prediction = np.zeros(len(train_data))
print "Accuracy = %.1f%%" % (100* np.mean(prediction == train_data.is_rap))
# But this was very unsatisfactory for the puposes of my rap detector. I needed a) a better way to measure performance and b) a way to deal with training on this imbalanced data.
#
# ## Imbalanced dataset therapy #1: Undersampling the classes to be equal
# First, for a metric that is relevant to the performance I care about, which includes correctly identifying rap as well as not incorrectly identifying songs as rap. (aka the recall and 1-the precision). I decided to focus on the F1 score, which combines the two. It correctly measures my rap-doesn't-exist baseline as terrible:
# In[57]:
examine_prediction(train_data.is_rap, prediction, train_data, features, show_misidentified=False)
# So I equalized the number of non-rap and rap tracks in my training set by selecting a random subset of the non-rap tracks.
# In[58]:
# Reduce the number of non-rap training samples so that it is balanced with the rap training samples
num_rap_tracks = np.sum(train_data.is_rap)
non_rap_tracks = train_data.loc[np.invert(train_data.is_rap),:]
rs = cross_validation.ShuffleSplit(len(non_rap_tracks), n_iter=1, test_size=num_rap_tracks,random_state=seed)
sampled_nonrap_tracks = next(iter(rs))[1]
non_rap_tracks = non_rap_tracks.iloc[sampled_nonrap_tracks,:]
train_data = pd.concat([non_rap_tracks,train_data.loc[train_data.is_rap,:]],ignore_index=True)
y = train_data.loc[:,'is_rap']
print "There are now %d non-rap tracks in the training set" % len(non_rap_tracks)
# ### Random Forest cross-validation
# With the non-rap and rap tracks equalized, and therefore the chance level of 50%, we can start training classifiers. Here's random forest, which did much better than chance, and also had a good F1 score:
# In[59]:
# Prepare for cross validation fittings
num_folds=5
num_instances = len(train_data)
seed = 7
kfold = StratifiedKFold(y, shuffle=True, n_folds=num_folds, random_state=seed)
# Random forest fit
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
prediction = cross_validation.cross_val_predict(clf,train_data.loc[:,features], y, cv=kfold)
examine_prediction(y, prediction, train_data, features, show_misidentified=False)
# The next few algorithms require the features to be scaled (and I save this scaling so I can apply it to the test data):
# In[60]:
scaler = preprocessing.StandardScaler().fit(train_data.loc[:,features])
train_data_scaled = scaler.transform(train_data.loc[:,features])
# ### Logistic regression cross-validation
# The logistic regression fit is almost as good, and much faster.
# In[61]:
# Cross validate Logistic regression
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
prediction = cross_validation.cross_val_predict(clf,train_data_scaled, y, cv=kfold)
examine_prediction(y, prediction, train_data, features, show_misidentified=False)
# ### Support vector machine cross-validation
# The best performance (although probably not statistically significantly better) is from SVM
# In[62]:
# Cross validate SVM
from sklearn import svm
clf = svm.SVC()
prediction = cross_validation.cross_val_predict(clf,train_data_scaled, y, cv=kfold)
examine_prediction(y, prediction, train_data, features, show_misidentified=False)
# ### Choice of number of features and feature importance
# We can use [recursive feature elimination with cross-validation](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html#sklearn.feature_selection.RFECV) to see how many features to use, and then by fitting a random forest, rank their importance.
# In[63]:
# Recursive feature selection and feature importance
from sklearn.feature_selection import RFECV
#clf = svm.SVC(kernel='linear')
clf = RandomForestClassifier(n_estimators=100)
rfecv = RFECV(estimator=clf, step=1, cv=StratifiedKFold(y, 3, random_state=seed), scoring='f1')
rfecv.fit(train_data_scaled, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (f1)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
# In[64]:
fitted_forest = clf.fit(train_data_scaled, y)
plot_feature_importance(features, fitted_forest)
# ### Embedding of the points in 2D (t-SNE)
# Just to get more insight into the separability of rap and non-rap using our features, I visualized the two classes embedded in 2D space using the [t-distributed stochastic neighbor embedding (t-SNE)](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding).
# In[65]:
# Compute the t-SNE embedding of the points onto a 2 plane
from sklearn.manifold import TSNE
tsne = TSNE()
proj = tsne.fit_transform(train_data.loc[:,features])
# In[66]:
# Plot the t-SNE embedding
plt.figure()
plt.set_cmap("coolwarm")
plt.scatter(proj[:, 0], proj[:, 1],s=2, c=y, alpha=1, edgecolors='face')
# ### Evaluating the undersampled models on the test set
# However, what really counts is performance on the test set. It's time to load that in, add the features, and then try it out with our trained models.
# In[67]:
with open('test_track_info.pickle','r') as f:
test_track_info = pickle.load(f)
with open('test_lyrics_data.pickle','r') as f:
test_lyrics = pickle.load(f)
# In[68]:
new_features = compute_features(test_lyrics, test_track_info.tdm_row)
test_data = pd.concat([test_track_info, new_features],axis=1)
test_data_scaled = scaler.transform(test_data.loc[:,features]) # Use scaler that was fit on train_data
# First up is SVM. Although the accuracy is still fairly high, and the F1 score is much higher than the no-rap baseline, the accuracy is actually lower, 93.1% vs 95.2%. Furthermore, the F1 score is lower than our cross-validation predicted, and full 46.8% of the songs identified as rap were not rap songs.
# In[69]:
# Train SVM on the whole training set
clf = svm.SVC()
fitinfo = clf.fit(train_data_scaled, y)
prediction = clf.predict(test_data_scaled)
examine_prediction(test_data.is_rap, prediction, test_data, features, show_misidentified=False)
# The random forest does even worse, with an at below 90% and even more of the songs identified as rap being misclassified - in fact the majority.
# In[70]:
# Just for interest, a random forest
clf = RandomForestClassifier(n_estimators=100)
fitinfo = clf.fit(train_data_scaled, y)
prediction = clf.predict(test_data_scaled)
examine_prediction(test_data.is_rap, prediction, test_data, features, show_misidentified=False)
# Clearly, undersampling so that the dataset is 50/50 non-rap and rap songs biases the detector towards saying it is a rap song, which causes its performance to suffer on the test set, a mixed sample of tracks that contains less than 10% rap songs.
# ## Imbalanced dataset therapy #2: Training on all data with SVM class weights
# Next, I tried another option for imbalanced data sets, using the full training set but weighting the rap tracks a little higher. I focused on SVM, since it is faster than random forest and had some of the best performances.
#
# ### No class weighting
#
# Before trying the weighting, however, I decided to just try the whole training set without any class weighting. When I perform a cross-validation, the accuracy is higher, but the F1 score is lower than with the 50/50 trained models. We are only detecting about half of the rap songs.
# In[71]:
# Create un-subsampled training set (i.e. all the training data)
un_train_data = track_info.copy()
new_features = compute_features(lyrics, un_train_data.tdm_row)
un_train_data = pd.concat([un_train_data, new_features],axis=1)
# Cross-validation of SVM on imbalanced data with no class weigthinging.
scaler_un = preprocessing.StandardScaler().fit(un_train_data.loc[:,features])
un_train_data_scaled = scaler_un.transform(un_train_data.loc[:,features])
un_y = un_train_data.is_rap
un_kfold = StratifiedKFold(y, shuffle=True, n_folds=num_folds, random_state=seed)
clf = svm.SVC()
prediction = cross_validation.cross_val_predict(clf, un_train_data_scaled, un_y, cv=un_kfold)
examine_prediction(un_y, prediction, un_train_data, features, show_misidentified=False)
# ### "Balance" class weighting
# Adding "balanced" class weighting (where classes are weighted based on the inverse of how often they appear) didn't make things better - although they greatly increased the number of rap songs that were identified as such, they made both accuracy and F1 worse.
# In[72]:
# Cross-validation of SVM on unbalanced data with class weightings
clf = svm.SVC(class_weight='balanced')
prediction = cross_validation.cross_val_predict(clf, un_train_data_scaled, un_y, cv=un_kfold)
examine_prediction(un_y, prediction, un_train_data, features, show_misidentified=False)
# I had a hunch that finding an intermediate level of class weighting, somewhere between none and balanced, would give me the best possible F1 on the test set. I started by recreating manually the balanced class weightings.
# In[73]:
# Recreate the class weightings that 'balanced' produces
cw = len(un_y)/(2. * np.bincount(un_y))
print cw
# Then I used grid search cross validation to take 10 steps between completely unweighted and weighted, to find the class weights that optimize F1.
# In[ ]:
# Search to optimize the class weightings, with 10 steps between no weighting and balanced weighting
from sklearn import grid_search
import time
ratios = np.linspace(float(np.sum(un_y))/len(un_y),0.5,10)
cws = [{0:len(un_y)/(2 * len(un_y)*(1-r)),1:len(un_y)/(2 * len(un_y)*r)} for r in ratios]
#start = time.time()
#param_grid = {'class_weight':cws}
#clf = svm.SVC()
#gs = grid_search.GridSearchCV(estimator=clf, param_grid=param_grid, cv=3, verbose=0, scoring='f1',n_jobs=-1)
#gs.fit(un_train_data_scaled, un_y)
#for params, mean_score, scores in gs.grid_scores_:
# print("%0.3f (+/-%0.03f) for %r"
# % (mean_score, scores.std() * 2, params))
#print time.time()-start
# ```
# # Output of the previous cell when uncommented, since it takes too long and produces a lot of ugly warning messages
#
# 0.500 (+/-0.015) for {'class_weight': {0: 0.52539975863629507, 1: 10.342613214550854}}
# 0.596 (+/-0.021) for {'class_weight': {0: 0.55464822314479156, 1: 5.0747141556207627}}
# 0.625 (+/-0.033) for {'class_weight': {0: 0.58734512237966974, 1: 3.3622090528799751}}
# 0.636 (+/-0.033) for {'class_weight': {0: 0.62413852322533925, 1: 2.5138792818261102}}
# 0.637 (+/-0.027) for {'class_weight': {0: 0.66584973394368752, 1: 2.0073886104929475}}
# 0.638 (+/-0.018) for {'class_weight': {0: 0.71353533858975715, 1: 1.6707664017162922}}
# 0.631 (+/-0.014) for {'class_weight': {0: 0.76857794369149857, 1: 1.4308284833960971}}
# 0.618 (+/-0.019) for {'class_weight': {0: 0.83282243735573969, 1: 1.2511512805033203}}
# 0.599 (+/-0.029) for {'class_weight': {0: 0.90878682013220458, 1: 1.1115657053697283}}
# 0.573 (+/-0.020) for {'class_weight': {0: 1.0, 1: 1.0}}```
# ### Evaluating the optimal class-weighted SVM model on test data
# Based on the results of the grid search, I chose an intermediate class weighting, where non-rap entries are weighted at 0.71 and rap entries at 1.67, or 2.3 times as much. I then applied the trained SVM model to the test data.
#
# The accuracy was 96.5%, better than the baseline, but more importantly the F1 score was 0.75, much higher than the 0.66 obtained via the undersampling. 68.4% of rap songs were correctly identified as such, with less than 1.2% of non-rap songs mis-identified.
# In[75]:
clf = svm.SVC(class_weight=cws[5])
test_data_scaled_un = scaler_un.transform(test_data.loc[:,features]) # Different scaler
fitinfo = clf.fit(un_train_data_scaled, un_y)
prediction = clf.predict(test_data_scaled_un)
examine_prediction(test_data.is_rap, prediction, test_data, features, show_misidentified=True)
# ### Error analysis
# The procedure above also outputted some examples of misclassification, and it's interesting to look at these. I see two types of songs that tend to be misclassified as rap: exceptionally wordy ballads, because of the high total_num_words, and foreign-language songs, because of high median_word_rank - because words are represented in terms of their rank in the overall corpus, and the overall corpus appears to be majority English songs, non-English words have a higher average rank. This would seem to be an impediment to building a cross-language classifier with this feature. However, median_word_rank would be a reliable indicator if the training corpus was built with an equal number of tracks for each language, or within only one language. As for the wordy ballads, this could be partially compensated for by including metadata about the track duration - I expect that words per minute would be a more reliable predictor.
#
# As for songs that are misclassified as non-rap, many of these are not in fact rap: the very short total_num_word values, e.g. 29 or 53, indicate that they are in fact primarily instrumental with a spoken word sample, or album skits. And Never Going Back Again by Lindsey Buckingham (of Fleetwood Mac) is labelled as rap, which suggests that there are data quality issues in the initial labelling that are lowering performance scores slightly.
# ## Conclusion
# For a diverse, primarily English-language dataset of lyrics, I achieved 96.5% accuracy in classifying tracks as rap, with an F1 score of 0.75 corresponding to detecting 68.4% of rap songs and misclassifying 1.2% of non-rap songs as rap. Depending on the needs of the application, the model could be biased towards detecting more rap songs, at a cost of misclassfying more non-rap songs.
#
# I dealt with the problem of imbalanced datasets by using a SVM model with class weights tuned using crossvalidation. This strategy and the performance would change if the composition of the target data were different - for example if it consisted of equal numbers rap and non-rap, we could achieve at least 84% accuracy (above a chance of 50%) with these four superficial text features.
#
# I could achieve even better genre classification based on lyrics by using semantic content, but the current superficial text features - `total_num_words`, `mean_word_length`, `median_word_rank`, and `mean_word_instances` - are both sufficient and helpful for classifying songs as rap at that performance level, and these features are language and vocabulary independent. So that even when trained on a primarily English-language corpus, most of them could be used to classify Italian-language songs as rap or not.
# In[ ]:
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
22,
5974,
198,
198,
1136,
62,
541,
7535,
22446,
32707,
7,
84,
1101,
265,
29487,
8019,
26098,
11537,
198,
1136,
62,
541,
7535,
22446,
32707,
7,
84,
1,
11250,
554,
1370,
7... | 3.106339 | 8,219 |
"""
Port a native function or object to Python.
"""
from typing import Iterable, Optional
class PyPortSignature:
"""
Port a native object or object to Python.
"""
dependencies: Optional[Iterable[str]] = None
linked_ports: Optional[Iterable[str]] = None
| [
37811,
198,
13924,
257,
6868,
2163,
393,
2134,
284,
11361,
13,
198,
37811,
198,
6738,
19720,
1330,
40806,
540,
11,
32233,
628,
198,
4871,
9485,
13924,
11712,
1300,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4347,
257,
6868,
21... | 3.247059 | 85 |
from pyhop_anytime import *
global state, goals
state = State('state')
state.calibration_target = Oset([('instrument0','groundstation3'),('instrument1','groundstation3'),('instrument2','groundstation3'),('instrument3','star1'),('instrument4','groundstation3'),('instrument5','groundstation3'),('instrument6','star4'),('instrument7','star0'),('instrument8','groundstation3')])
state.on_board = Oset([('instrument0','satellite0'),('instrument1','satellite0'),('instrument2','satellite1'),('instrument3','satellite2'),('instrument4','satellite2'),('instrument5','satellite3'),('instrument6','satellite4'),('instrument7','satellite4'),('instrument8','satellite4')])
state.pointing = Oset([('satellite0','star19'),('satellite1','planet17'),('satellite2','planet7'),('satellite3','star4'),('satellite4','phenomenon5')])
state.power_avail = Oset(['satellite0','satellite1','satellite2','satellite3','satellite4'])
state.supports = Oset([('instrument0','image4'),('instrument1','image4'),('instrument1','thermograph1'),('instrument2','image4'),('instrument2','thermograph0'),('instrument2','thermograph2'),('instrument3','image3'),('instrument3','image4'),('instrument4','image3'),('instrument5','image4'),('instrument5','thermograph1'),('instrument6','image3'),('instrument6','thermograph0'),('instrument6','thermograph1'),('instrument7','thermograph0'),('instrument7','thermograph2'),('instrument8','image3'),('instrument8','thermograph2')])
state.calibrated = Oset()
state.have_image = Oset()
state.power_on = Oset()
goals = State('goals')
goals.have_image = Oset([('phenomenon12','image3'),('phenomenon18','image3'),('phenomenon27','thermograph1'),('phenomenon5','thermograph1'),('planet10','thermograph1'),('planet11','thermograph2'),('planet13','thermograph1'),('planet15','thermograph0'),('planet16','image3'),('planet17','image4'),('planet23','thermograph1'),('planet24','thermograph2'),('planet25','thermograph1'),('planet28','thermograph2'),('planet29','thermograph0'),('planet6','image4'),('planet7','image3'),('planet8','image3'),('planet9','thermograph0'),('star14','image3'),('star19','thermograph0'),('star21','thermograph1'),('star22','image4'),('star26','thermograph0')])
goals.pointing = Oset([('satellite1','phenomenon5'),('satellite2','planet11'),('satellite4','planet11')])
| [
6738,
12972,
8548,
62,
1092,
2435,
1330,
1635,
198,
20541,
1181,
11,
4661,
198,
5219,
796,
1812,
10786,
5219,
11537,
198,
5219,
13,
9948,
571,
1358,
62,
16793,
796,
440,
2617,
26933,
10786,
259,
43872,
15,
41707,
2833,
17529,
18,
33809,... | 3.027778 | 756 |
from flask import Flask, Response
from conf import s
from conntext import before_request, ServiceResponse, error_handler, after_request
from init_service import start_task
from log import logger
from route_list import ROUTE_LIST
import utils
import traceback
# 对header做特殊的处理
app = ServiceCentre()
app.wsgi_app = WsgiApp(app.wsgi_app)
app.auto_find_instance_path()
@app.route("/favicon.ico")
if __name__ == "__main__":
app.run(
s.get_conf_str("HOST", default="0.0.0.0"),
port=s.get_conf_int("PORT", default=5000),
threaded=True,
debug=s.get_conf_bool("LOG_DEBUG", default=False),
)
# http_server = WSGIServer(
# (
# g.get_conf_str("HOST", default="0.0.0.0"),
# g.get_conf_int("PORT", default=5000),
# ),
# app,
# )
# http_server.serve_forever()
| [
6738,
42903,
1330,
46947,
11,
18261,
198,
198,
6738,
1013,
1330,
264,
198,
6738,
369,
429,
2302,
1330,
878,
62,
25927,
11,
4809,
31077,
11,
4049,
62,
30281,
11,
706,
62,
25927,
198,
6738,
2315,
62,
15271,
1330,
923,
62,
35943,
198,
... | 2.228571 | 385 |
import json
useCases = {} | [
11748,
33918,
198,
198,
1904,
34,
1386,
796,
23884
] | 2.888889 | 9 |
import network
| [
11748,
3127,
628,
198
] | 4.25 | 4 |
import logging
import math
class PFLocalization():
""" Performs the robot localization using the PF approach """
| [
198,
11748,
18931,
198,
11748,
10688,
628,
198,
4871,
350,
3697,
4374,
1634,
33529,
198,
220,
220,
220,
37227,
2448,
23914,
262,
9379,
42842,
1262,
262,
28223,
3164,
37227,
198
] | 4 | 30 |
# coding=utf-8
# date: 2018/12/19, 13:49
# name: smz
import tensorflow as tf
def demo_one():
"""defference between softmax and log_softmax"""
array_1 = tf.convert_to_tensor([1, 2, 3, 4, 5], dtype=tf.float32)
single_softmax = tf.nn.softmax(array_1, name='single_softmax')
log_softmax = tf.nn.log_softmax(array_1, name="log_softmax")
log_array = tf.log(array_1, name='log_values')
log_single_softmax = tf.log(single_softmax, name="log_single_softmax")
with tf.Session() as sess:
single_softmax_value, log_softmax_value, log_array_values, log_single_softmax_value = sess.run(
fetches=[single_softmax, log_softmax, log_array, log_single_softmax])
print("single_softmax_value:{}\n".format(single_softmax_value))
print("log_softmax_value:{}\n".format(log_softmax_value))
print("log_array_values:{}\n".format(log_array_values))
print("log_single_softmax:{}\n".format(log_single_softmax_value))
if __name__ == "__main__":
demo_one() | [
2,
19617,
28,
40477,
12,
23,
198,
2,
3128,
25,
2864,
14,
1065,
14,
1129,
11,
1511,
25,
2920,
198,
2,
1438,
25,
895,
89,
628,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
4299,
13605,
62,
505,
33529,
198,
220,
220,
220,
3... | 2.378505 | 428 |
#!/usr/bin/env python
# Charles Lambelet - 20.07.18
# charles.lambelet88@gmail.com
# Connect Myo to Raspberry Pi 3/Zero W with Python over BLE
import pexpect
import time
import serial
# import common
# import filtering
from common import *
from filtering import *
ser = serial.Serial(
# port='/dev/ttyAMA0', # use ttyAMA0 when not using bluetooth on RPi3/Zero W
port='/dev/ttyS0', # use miniUART and leave main UART to bluetooth module
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
# function to connect to the Myo.
def connect(child, device):
'''Function to connect to the Myo'''
print("Connecting to "),
print(device),
child.sendline("connect {0}".format(device))
child.expect("Connection successful", timeout=5)
print(" Connected!")
# function to disconnect from the Myo.
def disconnect(child, device):
'''Function to disconnect from the Myo'''
print("Disconnecting to "),
print(device),
child.sendline("disconnect {0}".format(device))
print(" Disonnected!")
# function to get the name of the Myo.
def get_name(child):
'''Function to get the name of the Myo'''
child.sendline("char-read-hnd 0x03")
child.expect("Characteristic value/descriptor:", timeout=10)
# look for end of line with \r\n
child.expect("\r\n", timeout=10)
# get returned string (space separated) up to expected string pattern
name_str = child.before
# remove all spaces in string
name_str = name_str.replace(' ', '')
# decode hex string into ASCII
name_myo = name_str.decode('hex')
print("Device name: "),
print(name_myo)
# function to get firmware version.
def get_firmware(child):
'''Function to get the firmware version'''
child.sendline("char-read-hnd 0x17")
child.expect("Characteristic value/descriptor:", timeout=10)
child.expect("\r\n", timeout=10)
fw_str = child.before
# remove last space in string (= 1 character)
fw_str = fw_str[:-1]
# replace spaces in string with \x ("\" is escape character)
fw_str = fw_str.replace(' ', r'\x').decode('string-escape')
# decode string with unpack() function
v0, v1, v2, v3 = unpack('4h', fw_str)
print('Firmware version: %d.%d.%d.%d' % (v0, v1, v2, v3))
# function to get battery level.
def get_battery_level(child):
'''Function to get the battery level'''
child.sendline("char-read-hnd 0x0011")
child.expect("Characteristic value/descriptor:", timeout=10)
child.expect("\r\n", timeout=10)
print("Battery percentage: "),
# convert hex string into int and then into float
print(float(int(child.before, 16))),
print("%")
# function to make Myo vibrate from 1 to 3 seconds
def vibrate(child, length):
'''Function to make the Myo vibrate for length seconds'''
print("The Myo will vibrate during " + str(length) + " second(s)")
# change duration of vibration in command string
string = 'char-write-req 0x19 03010' + str(length)
child.sendline(string)
# function to change color of logo and line
# first 3B are for logo and last 3B are for line
# use RGB color code in hex to assign desired color to logo and line
# for instance: logo = 0000ff (blue) and line = ff0000 (red)
# logo and line must be string type
def set_leds(child, logo, line):
'''Function to change color and intensity of logo and line of Myo'''
string = 'char-write-req 0x19 0606' + str(logo) + str(line)
child.sendline(string)
# function to start sending raw data and pose notifications
def start_raw(child):
'''Sending this sequence for v1.0 firmware seems to enable both raw data and
pose notifications.
'''
# enable IMU data
# child.sendline("char-write-req 0x1d 0100")
# start sending raw data (IMU (if enabled above) and EMG)
child.sendline("char-write-req 0x28 0100")
child.sendline("char-write-req 0x19 0103010100")
child.sendline("char-write-req 0x19 0103010101") # without this command the Myo disconnects after about 1 min.
# function to start sending raw data and pose notifications
def start_raw_fast(child):
'''Sending this sequence for v1.0 firmware enables fast data transfer
at 200Hz.
'''
# enable IMU data
# child.sendline("char-write-req 0x1d 0100")
# disable IMU data
child.sendline("char-write-req 0x1d 0000")
# enable on/off arm notifications
# child.sendline("char-write-req 0x24 0200")
# disable on/off arm notifications
child.sendline("char-write-req 0x24 0000")
# to get raw EMG signals, we subscribe to the four EMG notifications
# characteristics by writing a 0x0100 command to the corresponding handles.
child.sendline("char-write-req 0x2c 0100") # Suscribe to EmgData0Characteristic
child.sendline("char-write-req 0x2f 0100") # Suscribe to EmgData1Characteristic
child.sendline("char-write-req 0x32 0100") # Suscribe to EmgData2Characteristic
child.sendline("char-write-req 0x35 0100") # Suscribe to EmgData3Characteristic
# bytes sent to handle 0x19 (command characteristic) have the following
# format: [command, payload_size, EMG mode, IMU mode, classifier mode]
# according to the Myo BLE specification, the commands are:
# 0x01 -> set EMG and IMU
# 0x03 -> 3 bytes of payload
# 0x02 -> send 50Hz filtered signals
# 0x01 -> send IMU data streams
# 0x01 -> send classifier events
# child.sendline("char-write-req 0x19 0103020101")
child.sendline("char-write-req 0x19 0103020000")
# enable battery notifications
# child.sendline("char-write-req 0x12 0110")
# disable battery notifications
child.sendline("char-write-req 0x12 0010")
# function to collect raw data
def collect_raw(child):
'''Function to collect raw date at low sampling rate'''
# for IMU data
# child.expect("Notification handle = 0x001c value:", timeout=10)
# for EMG data
child.expect("Notification handle = 0x0027 value:", timeout=10)
child.expect("\r\n", timeout=10)
emg_str = child.before
# emg_str looks like this: cc 00 2f 00 40 00 bb 01 14 01 f9 00 63 00 65 00 00
# remove two last spaces and last byte in string (= 4 characters)
emg_str = emg_str[:-4]
# emg_str looks like this: cc 00 2f 00 40 00 bb 01 14 01 f9 00 63 00 65 00
# replace spaces in string with \x ("\" is escape character)
emg_str = emg_str.replace(' ', r'\x').decode('string-escape')
# decode string with unpack() function (see common.py)
emg_raw = unpack('8h', emg_str)
# print(emg_raw)
return emg_raw
# function to collect raw data at 200Hz
# received packets look like: Notification handle = 0x002b value: ff 01 06 31 12 0f 01 ff 00 01 03 eb 05 f0 fd 00
# there are 4 different attributes to look for: 0x002b, 0x002e, 0x0031, 0x0034
# it seems these attributes are not always coming in the same order...
# def collect_raw_fast(child):
# '''Function to collect raw date at high sampling rate (200Hz)'''
# i = child.expect(["Notification handle = 0x002b value:",
# "Notification handle = 0x002e value:",
# "Notification handle = 0x0031 value:",
# "Notification handle = 0x0034 value:"], timeout=10)
# if i == 0:
# child.expect("\r\n", timeout=10)
# emg_str0 = child.before
# # remove last space at the end of the string (= 1 character)
# emg_str0 = emg_str0[:-1]
# # print(emg_str0)
# # replace spaces in string with \x ("\" is escape character) to use unpack() function.
# emg_str0 = emg_str0.replace(' ', r'\x').decode('string-escape')
# # decode string with unpack() function (see common.py)
# emg_raw0a = unpack('8b', emg_str0[:8])
# emg_raw0b = unpack('8b', emg_str0[8:])
# print("emg_raw0a: ", emg_raw0a)
# print("emg_raw0b: ", emg_raw0b)
#
# return [emg_raw0a, emg_raw0b]
#
# elif i == 1:
# child.expect("\r\n", timeout=10)
# emg_str1 = child.before
# # remove last space at the end of the string (= 1 character)
# emg_str1 = emg_str1[:-1]
# # print(emg_str1)
# # replace spaces in string with \x ("\" is escape character) to use unpack() function.
# emg_str1 = emg_str1.replace(' ', r'\x').decode('string-escape')
# # decode string with unpack() function (see common.py)
# emg_raw1a = unpack('8b', emg_str1[:8])
# emg_raw1b = unpack('8b', emg_str1[8:])
# print("emg_raw1a: ", emg_raw1a)
# print("emg_raw1b: ", emg_raw1b)
#
# elif i == 2:
# child.expect("\r\n", timeout=10)
# emg_str2 = child.before
# # remove last space at the end of the string (= 1 character)
# emg_str2 = emg_str2[:-1]
# # print(emg_str2)
# # replace spaces in string with \x ("\" is escape character) to use unpack() function.
# emg_str2 = emg_str2.replace(' ', r'\x').decode('string-escape')
# # decode string with unpack() function (see common.py)
# emg_raw2a = unpack('8b', emg_str2[:8])
# emg_raw2b = unpack('8b', emg_str2[8:])
# print("emg_raw2a: ", emg_raw2a)
# print("emg_raw2b: ", emg_raw2b)
#
# elif i == 3:
# child.expect("\r\n", timeout=10)
# emg_str3 = child.before
# # remove last space at the end of the string (= 1 character)
# emg_str3 = emg_str3[:-1]
# # print(emg_str3)
# # replace spaces in string with \x ("\" is escape character) to use unpack() function.
# emg_str3 = emg_str3.replace(' ', r'\x').decode('string-escape')
# # decode string with unpack() function (see common.py)
# emg_raw3a = unpack('8b', emg_str3[:8])
# emg_raw3b = unpack('8b', emg_str3[8:])
# print("emg_raw3a: ", emg_raw3a)
# print("emg_raw3b: ", emg_raw3b)
# simpler version of collect_raw_fast()
def collect_raw_fast(child):
'''Function to collect raw date at high sampling rate (200Hz)'''
i = child.expect(["Notification handle = 0x002b value:",
"Notification handle = 0x002e value:",
"Notification handle = 0x0031 value:",
"Notification handle = 0x0034 value:"], timeout=10)
if i == 0 or i == 1 or i == 2 or i == 3:
child.expect("\r\n", timeout=10)
emg_str = child.before
# remove last space at the end of the string (= 1 character)
emg_str = emg_str[:-1]
# print(emg_str)
# replace spaces in string with \x ("\" is escape character) to use unpack() function.
emg_str = emg_str.replace(' ', r'\x').decode('string-escape')
# decode string with unpack() function (see common.py)
emg_raw_a = unpack('8b', emg_str[:8])
emg_raw_b = unpack('8b', emg_str[8:])
# print("emg_raw_a: ", emg_raw_a)
# print("emg_raw_b: ", emg_raw_b)
return [emg_raw_a, emg_raw_b]
# function to put the Myo in different sleep mode (0 or 1).
# 0: the myo will go to sleep mode after some inactivity.
# 1: the myo will not go into sleep mode at all.
def sleep_mode(child, mode):
'''Function to put the Myo in different sleep mode'''
string = 'char-write-req 0x19 09010' + str(mode)
child.sendline(string)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
7516,
10923,
1350,
1616,
532,
1160,
13,
2998,
13,
1507,
198,
2,
1149,
829,
13,
2543,
1350,
1616,
3459,
31,
14816,
13,
785,
198,
198,
2,
8113,
2011,
78,
284,
24244,
13993,
5... | 2.449433 | 4,677 |
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from db_utils import init_db
import sys
import time
if len(sys.argv) < 5:
print 'You must specify 4 arguments:'
print '1. The MySQL DB user name'
print '2. The MySQL DB password'
print '3. The start datetime in the format: %Y-%m-%d %H:%M:%S'
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
sys.exit(1)
db = init_db(
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
start_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
finish_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
total_time = 0
total_idle_time = 0
for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time
prev_state = 1
states = {0: [], 1: []}
for timestamp, state in db.select_host_states(
host_id, start_time, finish_time):
if prev_timestamp:
states[prev_state].append(total_seconds(
timestamp - prev_timestamp))
prev_timestamp = timestamp
prev_state = state
states[prev_state].append(total_seconds(
finish_time - prev_timestamp))
off_time = sum(states[0])
on_time = sum(states[1])
total_time += off_time + on_time
total_idle_time += off_time
print "Total time: " + str(total_time)
print "Total idle time: " + str(total_idle_time)
print "Idle time fraction: " + str(
float(total_idle_time) / total_time)
| [
2,
15069,
2321,
9261,
3944,
28678,
1031,
709,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.581886 | 806 |
from conans import ConanFile, CMake, tools
import os
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
198,
11748,
28686,
628
] | 3.6 | 15 |
import pandas as pd
hdf = pd.HDFStore('signals_data.h5',mode='r')
key = 'logger_data'
df1 = hdf.get(key)
df1.to_csv('signals_data.csv', encoding='utf-8', index=False, sep=',') | [
11748,
19798,
292,
355,
279,
67,
198,
198,
71,
7568,
796,
279,
67,
13,
39,
8068,
22658,
10786,
12683,
874,
62,
7890,
13,
71,
20,
3256,
14171,
11639,
81,
11537,
198,
198,
2539,
796,
705,
6404,
1362,
62,
7890,
6,
198,
198,
7568,
16,... | 2.182927 | 82 |
"""
This is an extract configuration for a S3 Scrape file.
See template definitions here:
https://docs.google.com/spreadsheets/d/1ugcw1Rh3e7vXnc7OWlR4J7bafiBjGnfd4-rEThI-BNI
"""
from kf_lib_data_ingest.common import constants
from kf_lib_data_ingest.common.constants import GENOMIC_FILE, COMMON
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.extract.operations import (
keep_map,
row_map,
value_map,
constant_map,
)
def genomic_file_ext(x):
"""
Get genomic file extension
"""
matches = [
file_ext for file_ext in FILE_EXT_FORMAT_MAP if x.endswith(file_ext)
]
if matches:
file_ext = max(matches, key=len)
else:
file_ext = None
return file_ext
FILE_EXT_FORMAT_MAP = {
".fq": GENOMIC_FILE.FORMAT.FASTQ,
".fastq": GENOMIC_FILE.FORMAT.FASTQ,
".fq.gz": GENOMIC_FILE.FORMAT.FASTQ,
".fastq.gz": GENOMIC_FILE.FORMAT.FASTQ,
".bam": GENOMIC_FILE.FORMAT.BAM,
".hgv.bam": GENOMIC_FILE.FORMAT.BAM,
".cram": GENOMIC_FILE.FORMAT.CRAM,
".bam.bai": GENOMIC_FILE.FORMAT.BAI,
".bai": GENOMIC_FILE.FORMAT.BAI,
".cram.crai": GENOMIC_FILE.FORMAT.CRAI,
".crai": GENOMIC_FILE.FORMAT.CRAI,
".g.vcf.gz": GENOMIC_FILE.FORMAT.GVCF,
".g.vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
".vcf.gz": GENOMIC_FILE.FORMAT.VCF,
".vcf": GENOMIC_FILE.FORMAT.VCF,
".vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
".peddy.html": GENOMIC_FILE.FORMAT.HTML,
".md5": COMMON.OTHER,
}
DATA_TYPES = {
GENOMIC_FILE.FORMAT.FASTQ: GENOMIC_FILE.DATA_TYPE.UNALIGNED_READS,
GENOMIC_FILE.FORMAT.BAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.CRAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.BAI: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS_INDEX,
GENOMIC_FILE.FORMAT.CRAI: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS_INDEX,
GENOMIC_FILE.FORMAT.VCF: GENOMIC_FILE.DATA_TYPE.VARIANT_CALLS,
GENOMIC_FILE.FORMAT.GVCF: GENOMIC_FILE.DATA_TYPE.GVCF,
GENOMIC_FILE.FORMAT.HTML: COMMON.OTHER,
# Different TBI types share the same format in FILE_EXT_FORMAT_MAP above
".g.vcf.gz.tbi": GENOMIC_FILE.DATA_TYPE.GVCF_INDEX,
".vcf.gz.tbi": GENOMIC_FILE.DATA_TYPE.VARIANT_CALLS_INDEX,
".md5": COMMON.OTHER,
}
def filter_df_by_file_ext(df):
"""
Only keep rows where file extension is one of those in
FILE_EXT_FORMAT_MAP.keys
"""
df[CONCEPT.GENOMIC_FILE.FILE_FORMAT] = df["Key"].apply(file_format)
return df[df[CONCEPT.GENOMIC_FILE.FILE_FORMAT].notnull()]
source_data_url = "{{ download_url }}"
do_after_read = filter_df_by_file_ext
def s3_url(row):
"""
Create S3 URL for object from S3 bucket and key
"""
return f's3://{row["Bucket"]}/{row["Key"]}'
def file_format(x):
"""
Get genomic file format by looking genomic file ext up in
FILE_EXT_FORMAT_MAP dict
"""
return FILE_EXT_FORMAT_MAP.get(genomic_file_ext(x))
def data_type(x):
"""
Get genomic file data type by looking up file format in DATA_TYPES.
However, some types share formats, so then use the file extension itself
to do the data type lookup.
"""
return (
DATA_TYPES.get(file_format(x)) or
DATA_TYPES.get(genomic_file_ext(x))
)
def fname(key):
"""
Return just the filename portion of the key
"""
return key.rsplit("/", 1)[-1]
operations = [
row_map(out_col=CONCEPT.GENOMIC_FILE.ID, m=s3_url),
row_map(
out_col=CONCEPT.GENOMIC_FILE.URL_LIST, m=lambda row: [s3_url(row)]
),
value_map(in_col="Key", out_col=CONCEPT.GENOMIC_FILE.FILE_NAME, m=fname),
keep_map(in_col="Size", out_col=CONCEPT.GENOMIC_FILE.SIZE),
value_map(
in_col="ETag",
out_col=CONCEPT.GENOMIC_FILE.HASH_DICT,
m=lambda x: {constants.FILE.HASH.S3_ETAG.lower(): x.replace('"', "")},
),
constant_map(
out_col=CONCEPT.GENOMIC_FILE.AVAILABILITY,
m=constants.GENOMIC_FILE.AVAILABILITY.IMMEDIATE,
),
keep_map(
in_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
out_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.DATA_TYPE,
m=data_type,
),
]
| [
37811,
198,
1212,
318,
281,
7925,
8398,
329,
257,
311,
18,
1446,
13484,
2393,
13,
198,
198,
6214,
11055,
17336,
994,
25,
198,
5450,
1378,
31628,
13,
13297,
13,
785,
14,
43639,
42011,
14,
67,
14,
16,
1018,
66,
86,
16,
38576,
18,
68... | 2.11333 | 2,003 |
import yaml
import dolfin
from dolfin import assemble
from utils import get_versions
code_parameters = get_versions()
| [
11748,
331,
43695,
198,
11748,
288,
4024,
259,
198,
198,
6738,
288,
4024,
259,
1330,
25432,
628,
198,
6738,
3384,
4487,
1330,
651,
62,
47178,
198,
8189,
62,
17143,
7307,
796,
651,
62,
47178,
3419,
198
] | 3.361111 | 36 |
from __future__ import unicode_literals
import collections
import functools
import logging
import operator
from functools import reduce
from django.apps import apps
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import messages
from django.db import models
from django.db.models import Case, F, Q, Sum, When
from django.forms.models import _get_foreign_key
from django.http import Http404, HttpResponse, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _, ungettext
from touchtechnology.admin.base import AdminComponent
from touchtechnology.admin.sites import site
from touchtechnology.common.decorators import csrf_exempt_m, staff_login_required_m
from touchtechnology.common.prince import prince
from tournamentcontrol.competition.dashboard import (
BasicResultWidget, DetailResultWidget, MostValuableWidget, ProgressStageWidget,
ScoresheetWidget,
)
from tournamentcontrol.competition.decorators import competition_by_pk_m, registration
from tournamentcontrol.competition.forms import (
ClubAssociationForm, ClubRoleForm, CompetitionForm, DivisionForm, DrawFormatForm,
DrawGenerationFormSet, DrawGenerationMatchFormSet, GroundForm, MatchEditForm,
MatchScheduleFormSet, MatchWashoutFormSet, PersonEditForm, PersonMergeForm,
ProgressMatchesFormSet, ProgressTeamsFormSet, RescheduleDateFormSet,
SeasonAssociationFormSet, SeasonForm, SeasonMatchTimeFormSet, StageForm,
StageGroupForm, TeamAssociationForm, TeamAssociationFormSet, TeamForm, TeamRoleForm,
UndecidedTeamForm, VenueForm,
)
from tournamentcontrol.competition.models import (
Club, ClubAssociation, ClubRole, Competition, Division, DivisionExclusionDate,
DrawFormat, Ground, LadderEntry, LadderSummary, Match, MatchScoreSheet, Person,
Season, SeasonAssociation, SeasonExclusionDate, SeasonMatchTime, SeasonReferee,
SimpleScoreMatchStatistic, Stage, StageGroup, Team, TeamAssociation, TeamRole,
UndecidedTeam, Venue,
)
from tournamentcontrol.competition.sites import CompetitionAdminMixin
from tournamentcontrol.competition.tasks import generate_pdf_scorecards
from tournamentcontrol.competition.utils import (
generate_fixture_grid, generate_scorecards, legitimate_bye_match, match_unplayed,
team_needs_progressing,
)
from tournamentcontrol.competition.wizards import DrawGenerationWizard
SCORECARD_PDF_WAIT = getattr(settings, "TOURNAMENTCONTROL_SCORECARD_PDF_WAIT", 5)
log = logging.getLogger(__name__)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
17268,
198,
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
11748,
10088,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
6738,
42625,
14208,
13,
18211,
1330... | 3.505249 | 762 |
from .core import remove_and_inpaint, bank_of_structuring_elements | [
6738,
764,
7295,
1330,
4781,
62,
392,
62,
259,
79,
2913,
11,
3331,
62,
1659,
62,
7249,
870,
62,
68,
3639
] | 3.142857 | 21 |
import requests
import json
import sys
if __name__ == '__main__':
main() | [
11748,
7007,
198,
11748,
33918,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419
] | 3.04 | 25 |
import unittest
from django.test import TestCase
from django.conf import settings
from django.db import IntegrityError
from icontact.adapter import IContactData, IContactAdapter
class IContactDataTests(TestCase):
"""
tests for IContactData class
"""
class IContactAdapterTests(TestCase):
"""
Tests for IcontactAdapter class
"""
| [
11748,
555,
715,
395,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
39348,
12331,
198,
198,
6738,
14158,
756,
529,
13,
324,
3429,
1330... | 2.876923 | 130 |
# Roll 'n' Jump
# Written in 2020, 2021 by Samuel Arsac, Hugo Buscemi,
# Matteo Chencerel, Rida Lali
# To the extent possible under law, the author(s) have dedicated all
# copyright and related and neighboring rights to this software to the
# public domain worldwide. This software is distributed without any warranty.
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""Fichier de test pour score."""
import os
from hypothesis import given
from hypothesis.strategies import characters, integers, text, lists, tuples
import rollnjump.main as main
import rollnjump.conf as cf
import rollnjump.score as scre
cf.SCORES = os.path.join(os.path.dirname(__file__), "test_score.txt")
@given(integers())
def test_print(number):
"""Test pour les fonctions d'affichage."""
# Simples appels aux fonctions
main.initialization(False)
scre.score(number)
scre.score_endgame(number)
cf.LANG = "fr"
scre.winner_endgame()
cf.LANG = "en"
scre.winner_endgame()
alphanum_char = characters(min_codepoint=0x30,
max_codepoint=0x7A,
blacklist_characters=[':', ';', '<',
'=', '>', '?',
'@', '[', '\\',
']', '^', '_',
'`'])
score_list = tuples(integers(min_value=0), text(alphanum_char))
@given(lists(score_list, min_size=1, max_size=5))
def test_scoreboard(scores):
"""Test pour les fonctions relatives au tableau."""
scre.init_best_score()
for (score, name) in scores:
scre.PLAYER = name
scre.set_best_score(score)
read_scores = scre.get_scores()
scores = list(sorted(scores, key=lambda x: -x[0]))
assert read_scores == scores
last_score = scre.get_last_best_score()
assert last_score == scores[-1][0]
scre.PLAYER = scores[0][1]
assert scre.maj(scores[0][0] + 1)
assert scre.get_scores()[0] == scores[0]
for _ in range(5):
if scre.maj(10):
scre.set_best_score(10)
assert not scre.maj(1)
scre.init_best_score()
@given(lists(text()))
def test_corrupted_board_random(contents):
"""Test de robustesse en cas d'erreur dans le fichier des scores."""
with open(cf.SCORES, 'w') as board:
for line in contents:
board.write(line + '\n')
scre.get_scores()
def test_corrupted_board():
"""Test similaire non randomisé pour assurer la couverture."""
with open(cf.SCORES, 'w') as board:
for line in ["fsdq;0;vd", "s;s", "bcds"]:
board.write(line + '\n')
assert scre.get_scores() == []
| [
2,
8299,
705,
77,
6,
15903,
198,
2,
22503,
287,
12131,
11,
33448,
416,
17100,
24230,
330,
11,
25930,
5869,
344,
11632,
11,
198,
2,
38789,
78,
12555,
344,
2411,
11,
371,
3755,
406,
7344,
198,
2,
1675,
262,
6287,
1744,
739,
1099,
11... | 2.209412 | 1,275 |
#!/usr/bin/python3
""" class Check module
Functions:
inherits_from: checks inheritance
"""
def inherits_from(obj, a_class):
""" Returns booleanType response
for class inheritance test
Args:
obj: object to evaluate
a_class: class value for testing
"""
if (type(obj) != a_class):
return isinstance(obj, a_class)
else:
return False
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
1398,
6822,
8265,
198,
220,
220,
220,
40480,
25,
198,
220,
220,
220,
220,
220,
220,
220,
10639,
896,
62,
6738,
25,
8794,
24155,
198,
37811,
628,
198,
4299,
10639,
896,
62,
6738,
... | 2.390805 | 174 |
'''
Handy extensions for documentation
'''
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from docutils.writers.latex2e import LaTeXTranslator
FloaterTemplate = '''
\\setlength{\\scratchlengthouter}{%s\\textwidth}
\\setlength{\\scratchlengthinner}{\\scratchlengthouter}
\\addtolength{\\scratchlengthinner}{-5pt}
\\begin{floatingfigure}[r]{\\scratchlengthouter}
\\noindent\\begin{minipage}{\\scratchlengthouter}\\hspace{5pt}\\includegraphics[width=\\scratchlengthinner]{%s}
\\end{minipage}
\\end{floatingfigure}
'''
| [
7061,
6,
198,
220,
220,
220,
7157,
88,
18366,
329,
10314,
198,
7061,
6,
198,
198,
6738,
2205,
26791,
1330,
13760,
198,
6738,
2205,
26791,
13,
79,
945,
364,
13,
81,
301,
1330,
34819,
11,
34736,
198,
6738,
2205,
26791,
13,
34422,
13,
... | 2.88601 | 193 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: cop.py
import re
from collections import OrderedDict
from .romanizer import romanizer
has_capitals = True
data = OrderedDict()
# http://en.wikipedia.org/wiki/Coptic_alphabet
# letters from ⲁ to ⲑ (1 - 9)
# alef:http://en.wiktionary.org/wiki/
data['alpha'] = dict(letter=[u'ⲁ'], name=u'ⲁ', segment='vowel', subsegment='', transliteration=u'a', order=1)
# beth:http://en.wiktionary.org/wiki/
data['beth'] = dict(letter=[u'ⲃ'], name=u'ⲃ', segment='consonant', subsegment='', transliteration=u'b', order=2)
# gimel:http://en.wiktionary.org/wiki/
data['gamma'] = dict(letter=[u'ⲅ'], name=u'ⲅ', segment='consonant', subsegment='', transliteration=u'g', order=3)
# daleth:http://en.wiktionary.org/wiki/
data['delta'] = dict(letter=[u'ⲇ'], name=u'ⲇ', segment='consonant', subsegment='', transliteration=u'd', order=4)
# he:http://en.wiktionary.org/wiki/
data['ei'] = dict(letter=[u'ⲉ'], name=u'ⲉי', segment='vowel', subsegment='', transliteration=u'e', order=5)
# vau:http://en.wikipedia.org/wiki/
data['so'] = dict(letter=[u'ⲋ'], name=u'ⲋ', segment='numeral', subsegment='', transliteration=u'w', order=6)
# zayin:http://en.wiktionary.org/wiki/
data['zeta'] = dict(letter=[u'ⲍ'], name=u'ⲍ', segment='consonant', subsegment='', transliteration=u'z', order=7)
# heth:http://en.wiktionary.org/wiki/
data['eta'] = dict(letter=[u'ⲏ'], name=u'ⲏ', segment='vowel', subsegment='', transliteration=u'ê', order=8)
# teth:http://en.wiktionary.org/wiki/
data['theta'] = dict(letter=[u'ⲑ'], name=u'ⲑ', segment='consonant', subsegment='', transliteration=u'h', order=9)
# letters from י to ϥ (10 - 90)
# yod:http://en.wiktionary.org/wiki/
data['yota'] = dict(letter=[u'ⲓ'], name=u'ⲓ', segment='vowel', subsegment='', transliteration=u'i', order=10)
# kaph:http://en.wiktionary.org/wiki/
data['kappa'] = dict(letter=[u'ⲕ'], name=u'ⲕ', segment='consonant', subsegment='', transliteration=u'k', order=11)
# lamed:http://en.wiktionary.org/wiki/
data['lambda'] = dict(letter=[u'ⲗ'], name=u'ⲗ', segment='consonant', subsegment='', transliteration=u'l', order=12)
# mem:http://en.wiktionary.org/wiki/
data['me'] = dict(letter=[u'ⲙ'], name=u'ⲙ', segment='consonant', subsegment='', transliteration=u'm', order=13)
# num:http://en.wiktionary.org/wiki/
data['ne'] = dict(letter=[u'ⲛ'], name=u'ⲛ', segment='consonant', subsegment='', transliteration=u'n', order=14)
# samekh:http://en.wiktionary.org/wiki/
data['eksi'] = dict(letter=[u'ⲝ'], name=u'ⲝ', segment='consonant', subsegment='', transliteration=u'x', order=15)
# ayin:http://en.wiktionary.org/wiki/
data['o'] = dict(letter=[u'ⲟ'], name=u'ⲟ', segment='consonant', subsegment='', transliteration=u'o', order=16)
# pe:http://en.wiktionary.org/wiki/
data['pi'] = dict(letter=[u'ⲡ'], name=u'ⲡ', segment='consonant', subsegment='', transliteration=u'p', order=17)
# tsade:http://en.wikipedia.org/wiki/
data['fay'] = dict(letter=[u'ϥ'], name=u'ϥ', segment='numeral', subsegment='', transliteration=u'q', order=18)
# letters from ⲣ to ⳁ (100 - 900)
# resh:http://en.wiktionary.org/wiki/
data['ro'] = dict(letter=[u'ⲣ'], name=u'ⲣ', segment='consonant', subsegment='', transliteration=u'r', order=19)
# shin:http://en.wiktionary.org/wiki/
data['sima'] = dict(letter=[u'ⲥ'], name=u'ⲥ', segment='consonant', subsegment='', transliteration=u's', order=20)
# tau:http://en.wiktionary.org/wiki/
data['taw'] = dict(letter=[u'ⲧ'], name=u'ⲧו', segment='consonant', subsegment='', transliteration=u't', order=21)
# final_tsade:http://en.wiktionary.org/wiki/Tsade
data['epsilon'] = dict(letter=[u'ⲩ'], name=u'ⲩ', segment='vowel', subsegment='', transliteration=u'u', order=22)
# final_kaph:http://en.wiktionary.org/wiki/
data['fi'] = dict(letter=[u'ⲫ'], name=u'ⲫ', segment='consonant', subsegment='', transliteration=u'f', order=23)
# final_mem, chi:http://en.wiktionⲣary.org/wiki/
data['khe'] = dict(letter=[u'ⲭ'], name=u'ⲭ', segment='consonant', subsegment='', transliteration=u'c', order=24)
# final_nun:http://en.wiktionary.org/wiki/
data['epsi'] = dict(letter=[u'ⲯ'], name=u'ⲯ', segment='consonant', subsegment='', transliteration=u'y', order=25)
# final_pe:http://en.wiktionary.org/wiki/
data['ou'] = dict(letter=[u'ⲱ'], name=u'ⲱ', segment='vowel', subsegment='', transliteration=u'ô', order=26)
# final_tsade:http://en.wiktionary.org/wiki/Tsade
data['nine'] = dict(letter=[u'ⳁ'], name=u'ⳁ', segment='numeral', subsegment='', transliteration=u'j', order=27)
r = romanizer(data, has_capitals)
# collect coptic and transliteration letters from data dictionary for preprocessing function
letters = ''.join([''.join(d['letter'])+d['transliteration']+''.join(d['letter']).upper()+d['transliteration'].upper() for key, d in data.items()])
regex = re.compile('[^%s ]+' % letters)
regex2 = re.compile('[^%s\s]' % ''.join([''.join(d['letter'])+''.join(d['letter']).upper() for key, d in data.items()]))
def filter(string):
"""
Preprocess string to remove all other characters but coptic ones
:param string:
:return:
"""
# remove all unwanted characters
return regex2.sub(' ', string)
def preprocess(string):
"""
Preprocess string to transform all diacritics and remove other special characters
:param string:
:return:
"""
return regex.sub('', string)
def convert(string, sanitize=False):
"""
Swap characters from script to transliterated version and vice versa.
Optionally sanitize string by using preprocess function.
:param sanitize:
:param string:
:return:
"""
return r.convert(string, (preprocess if sanitize else False))
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2393,
25,
2243,
13,
9078,
198,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
764,
... | 2.419844 | 2,308 |
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Component registry export / import support unit tests.
"""
import unittest
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import aq_base
from OFS.Folder import Folder
from OFS.SimpleItem import SimpleItem
from Products.Five.component import enableSite
from Products.Five.component.interfaces import IObjectManagerSite
from zope.component import getGlobalSiteManager
from zope.component import getMultiAdapter
from zope.component import getSiteManager
from zope.component import handle
from zope.component import queryAdapter
from zope.component import queryUtility
from zope.component import subscribers
from zope.component.globalregistry import base
from zope.component.hooks import clearSite
from zope.component.hooks import setHooks
from zope.component.hooks import setSite
from zope.interface import Interface
from zope.interface import implementer
from ..interfaces import IBody
from ..interfaces import IComponentsHandlerBlacklist
from ..testing import BodyAdapterTestCase
from ..testing import DummySetupEnviron
from ..testing import ExportImportZCMLLayer
from ..tests.common import DummyImportContext
try:
from five.localsitemanager.registry import PersistentComponents
except ImportError:
# Avoid generating a spurious dependency
PersistentComponents = None
class IDummyInterface(Interface):
"""A dummy interface."""
def verify():
"""Returns True."""
class IDummyInterface2(Interface):
"""A second dummy interface."""
def verify():
"""Returns True."""
@implementer(IDummyInterface)
class DummyUtility(object):
"""A dummy utility."""
class IAnotherDummy(Interface):
"""A third dummy interface."""
def inc():
"""Increments handle count"""
class IAnotherDummy2(Interface):
"""A second dummy interface."""
def verify():
"""Returns True."""
@implementer(IAnotherDummy)
class DummyObject(object):
"""A dummy object to pass to the handler."""
handled = 0
@implementer(IAnotherDummy2)
class DummyAdapter(object):
"""A dummy adapter."""
def dummy_handler(context):
"""A dummy event handler."""
context.inc()
@implementer(IDummyInterface)
class DummyTool(SimpleItem):
"""A dummy tool."""
id = 'dummy_tool'
meta_type = 'dummy tool'
security = ClassSecurityInfo()
@security.public
InitializeClass(DummyTool)
@implementer(IDummyInterface2)
class DummyTool2(SimpleItem):
"""A second dummy tool."""
id = 'dummy_tool2'
meta_type = 'dummy tool2'
security = ClassSecurityInfo()
@security.public
InitializeClass(DummyTool2)
@implementer(IComponentsHandlerBlacklist)
class DummyBlacklist(object):
"""A blacklist."""
_COMPONENTS_BODY = b"""\
<?xml version="1.0" encoding="utf-8"?>
<componentregistry>
<adapters>
<adapter factory="Products.GenericSetup.tests.test_components.DummyAdapter"
for="zope.interface.Interface"
provides="Products.GenericSetup.tests.test_components.IAnotherDummy2"/>
<adapter name="foo"
factory="Products.GenericSetup.tests.test_components.DummyAdapter"
for="zope.interface.Interface"
provides="Products.GenericSetup.tests.test_components.IAnotherDummy2"/>
</adapters>
<subscribers>
<subscriber
factory="Products.GenericSetup.tests.test_components.DummyAdapter"
for="Products.GenericSetup.tests.test_components.IAnotherDummy"
provides="Products.GenericSetup.tests.test_components.IAnotherDummy2"/>
<subscriber for="Products.GenericSetup.tests.test_components.IAnotherDummy"
handler="Products.GenericSetup.tests.test_components.dummy_handler"/>
</subscribers>
<utilities>
<utility factory="Products.GenericSetup.tests.test_components.DummyUtility"
id="dummy_utility"
interface="Products.GenericSetup.tests.test_components.IDummyInterface"/>
<utility name="dummy tool name"
interface="Products.GenericSetup.tests.test_components.IDummyInterface"
object="dummy_tool"/>
<utility name="dummy tool name2"
interface="Products.GenericSetup.tests.test_components.IDummyInterface2"
object="dummy_tool2"/>
<utility name="foo"
factory="Products.GenericSetup.tests.test_components.DummyUtility"
interface="Products.GenericSetup.tests.test_components.IDummyInterface2"/>
</utilities>
</componentregistry>
"""
_REMOVE_IMPORT = b"""\
<?xml version="1.0" encoding="utf-8"?>
<componentregistry>
<adapters>
<adapter factory="Products.GenericSetup.tests.test_components.DummyAdapter"
provides="Products.GenericSetup.tests.test_components.IAnotherDummy2"
for="*" remove="True"/>
</adapters>
<subscribers>
<subscriber
factory="Products.GenericSetup.tests.test_components.DummyAdapter"
for="Products.GenericSetup.tests.test_components.IAnotherDummy"
provides="Products.GenericSetup.tests.test_components.IAnotherDummy2"
remove="True"/>
<subscriber
for="Products.GenericSetup.tests.test_components.IAnotherDummy"
handler="Products.GenericSetup.tests.test_components.dummy_handler"
remove="True"/>
</subscribers>
<utilities>
<utility id="dummy_utility"
factory="Products.GenericSetup.tests.test_components.DummyUtility"
interface="Products.GenericSetup.tests.test_components.IDummyInterface"
remove="True"/>
<utility name="dummy tool name"
interface="Products.GenericSetup.tests.test_components.IDummyInterface"
object="dummy_tool" remove="True"/>
<utility name="foo"
factory="Products.GenericSetup.tests.test_components.DummyUtility"
interface="Products.GenericSetup.tests.test_components.IDummyInterface2"
remove="True"/>
</utilities>
</componentregistry>
"""
if PersistentComponents is not None:
else:
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
4793,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789,
11,
198,
2,
10628,
362,
13,
... | 3.164933 | 2,019 |
import logging
import numpy as np
from spike_swarm_sim.register import decoding_registry, decoders
from spike_swarm_sim.utils import softmax, sigmoid, tanh
from spike_swarm_sim.globals import global_states
from spike_swarm_sim.algorithms.interfaces import GET, SET, LEN, INIT
class DecodingWrapper:
""" Wrapper for gathering all the decoders of each ANN output.
It contains a dict mapping output names to Decoder instances.
=============================================================================
- Params:
topology [dict] : configuration dict of the overall topology.
- Attributes:
motor_ensembles [dict] : dict mapping motor ensembles to num of neurons.
decoding_config [dict] : config. dict of the decoding (extracted from
topology config.).
act_ens_map [dict] : dict mapping output names to motor ensemble names.
=============================================================================
"""
def step(self, spikes):
""" Steps all the decoders with the corresponding spikes or activities. """
if spikes.shape[1] != sum(tuple(self.motor_ensembles.values())):
raise Exception(logging.error('The dim. of spikes to be decoded must be the same '\
'as the number of motor neurons.'))
actions = {}
current_idx = 0
for name, decoder in self._decoders.items():
dec_spikes = spikes[:, current_idx : current_idx + self.motor_ensembles[self.act_ens_map[name]]]
actions.update({name : decoder.step(dec_spikes)})
current_idx += self.motor_ensembles[self.act_ens_map[name]]
return actions
def build(self, topology):
""" Builds the decoders using the topology config. dict. """
self.motor_ensembles = {out['ensemble'] : topology['ensembles'][out['ensemble']]['n']\
for out in topology['outputs'].values()}
self.decoding_config = topology['decoding'].copy()
self.act_ens_map = {out_name : out['ensemble'] for out_name, out in topology['outputs'].items()}
for decoder_name, decoder in topology['decoding'].items():
self._decoders.update({decoder_name : decoders[decoder['scheme']]({
self.act_ens_map[decoder_name] : self.motor_ensembles[self.act_ens_map[decoder_name]]},\
**decoder['params'])})
@property
def all(self):
""" Return all the decoders. """
return self._decoders
def get(self, key):
""" Return the decoder corresponding to key. """
if key not in self._decoders.keys():
raise Exception(logging.error('Decoder corresponding to key {} does not exist.'\
.format(key)))
return self._decoders[key]
def reset(self):
""" Resets all the decoders. """
for decoder in self._decoders.values():
decoder.reset()
@GET('decoders:weights')
@SET('decoders:weights')
@INIT('decoders:weights')
@LEN('decoders:weights')
class Decoder:
""" Base class for decoding ANN outputs (spikes, activities, ...)
into actions.
===================================================================
- Params :
out_ensembles [dict] : dict mapping output/motor ensembles to
number of neurons.
trainable [bool] : whether there are trainable/optimizable
variables in the decoder. # TODO check
is_cat [bool] : whether the decoded actions are categorical or
numerical.
===================================================================
"""
@decoding_registry
class IdentityDecoding(Decoder):
""" Identity of dummy decoding of activities (not supported for spikes).
It outputs the same value as the input. If time_scale > 1 then it returns the
input value at the last time step.
"""
@decoding_registry
class ThresholdDecoding(Decoder):
""" Decodes activities into binary actions by means of applying a
heaviside function.
====================================================================
- Args:
threshold [float]
"""
@decoding_registry
class ArgmaxDecoding(Decoder):
""" #TODO
"""
@decoding_registry
class SoftmaxDecoding(Decoder):
""" Decodes activities into categorical actions by transforming activities
into probabilities with softmax and sampling the action with a categorical
dist.
"""
@decoding_registry
class FirstToSpike(Decoder):
""" Temporal decoding process in which selected action in a time
window is defined by the output neuron that spiked first.
It is used when the action is categorical and low dimensional.
"""
@decoding_registry
class RankOrderDecoding(Decoder):
# TODO: Not tested. May produce errors
""" Temporal decoding process in which the selected action within a time window is defined
based on a linear combination of the order of neuron spikes.
"""
@decoding_registry
class LinearPopulationDecoding(Decoder):
""" Decodes spikes as a linear transformation of the computed activities of
each motor neuron. More specifically, it is computed as follows:
-- actions = sigmoid(3 * W.dot(u(t)) --
where W is a weight matrix and u(t) is the vector of activities/rates computed
as the filtering of the spike trains.
The weights are normally optimized and can be addressed by the evolutionary alg.
with the query "decoders:weights:all".
====================================================================================
- Params:
num_outputs [int] : number of output actions.
tau_decay [float] : decaying time constant of the neuron's activity after spike.
tau_rise [float] : rise time constant of the neuron's activity after spike.
- Attributes:
dt [float] : Euler step to compute the activities.
activities [np.ndarray] : current activities of the neurons of the ensemble.
decoded_activities [np.ndarray] : decoded activities.
x [np.ndarray] : auxiliary variable to filter spikes.
w [np.ndarray] : flattened or vectorized decoding weight matrix.
action_recodings [np.ndarray] : Data recodings of the decoded activities.
# TODO: Provisional, pasar a monitor.
====================================================================================
"""
@property
| [
11748,
18931,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
20240,
62,
2032,
1670,
62,
14323,
13,
30238,
1330,
39938,
62,
2301,
4592,
11,
875,
375,
364,
201,
198,
6738,
20240,
62,
2032,
1670,
62,
14323,
13,
26791,
1330,
270... | 2.746071 | 2,418 |
# -*- coding: utf-8 -*-
"""Sphinx extension for documenting the GraphQL schema."""
# pylint: disable=import-outside-toplevel
from typing import TYPE_CHECKING, List
from graphql.utils.schema_printer import print_schema
from .main import SCHEMA
if TYPE_CHECKING:
from docutils.nodes import literal_block
from sphinx.application import Sphinx
def setup(app: "Sphinx") -> None:
"""Setup the sphinx extension."""
from docutils.nodes import Element, literal_block
from sphinx.util.docutils import SphinxDirective
class SchemaDirective(SphinxDirective):
"""Directive to generate the GraphQL schema."""
def run(self) -> List[Element]:
"""Run the directive."""
text = print_schema(SCHEMA)
# TODO for lexing tried: https://gitlab.com/marcogiusti/pygments-graphql/-/blob/master/src/pygments_graphql.py
# but it failed
code_node = literal_block(text, text) # , language="graphql")
self.set_source_info(code_node)
return [code_node]
app.add_directive("aiida-graphql-schema", SchemaDirective)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
50,
746,
28413,
7552,
329,
33045,
262,
29681,
9711,
32815,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
43435,
12,
83,
643,
626,
198,
6738,
197... | 2.542141 | 439 |
#A confederação nacional de natação precisa de um programa que leia o ano de nascimento de um atleta
#e mostre sua categoria, de acordo com a idade:
#- até 9 anos: MIRIM
#- até 14 anos: infantil
#- até 19 anos: JUNIOR
#- até 20 anos: SENIOR
#- acima: MASTER
from datetime import date
idade = date.today().year
ano = int(input('Digite o ano em que você nasceu!!!'))
anos = idade - ano
if anos <= 9:
print('Quem nasceu em {} tem {} anos e é um atleta MIRIM'.format(ano, anos))
elif anos <= 14:
print('Quem nasceu em {} tem {} anos e é alteta INFANTIL'.format(ano, anos))
elif anos <= 19:
print('Quem nasceu em {} tem {} anos e é atleta JÚNIOR'.format( ano, anos))
elif anos <= 25:
print('Quem nasceu em {} tem {} anos e é atleta SÊNIOR'.format(ano, anos))
else:
print('Quem nasceu em {} tem {} e é atleta MASTER'.format(ano, anos)) | [
2,
32,
1013,
5702,
64,
16175,
28749,
299,
330,
1538,
390,
299,
1045,
16175,
28749,
3718,
9160,
390,
23781,
1430,
64,
8358,
443,
544,
267,
281,
78,
390,
299,
3372,
3681,
78,
390,
23781,
379,
1616,
64,
201,
198,
2,
68,
749,
260,
424... | 2.304233 | 378 |
"""
[References]
- https://stackoverflow.com/questions/8505651/non-repetitive-random-number-in-numpy
- https://algorithmist.com/wiki/Modular_inverse
- https://stackoverflow.com/questions/16044553/solving-a-modular-equation-python
"""
import random
import numpy as np
def get_bitwidth(n):
"""Calculate the bit width (size) for a given number of data elements
"""
return int(2**np.ceil(np.log2(np.log2(2*n))))
def get_bytewidth(n):
"""Calculate the byte width (size) for a given number of data elements
"""
return int(get_bitwidth(n) // 8)
| [
37811,
198,
58,
19927,
60,
198,
12,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
25764,
20,
40639,
14,
13159,
12,
260,
6449,
1800,
12,
25120,
12,
17618,
12,
259,
12,
77,
32152,
198,
12,
3740,
1378,
282,
7727,
37980,
... | 2.595455 | 220 |
import numpy as np
import pytest
from lab2.ball_window import BallWindow
# checks if the dimension of the ball window is correct
@pytest.mark.parametrize(
"center, expected",
[
(np.array([0, 5]), 2),
(np.array([2.5]), 1),
(np.array([0, 5, 6]), 3),
],
)
# checks if the volume of the ball window is correct
@pytest.mark.parametrize(
"center, radius, expected",
[
(np.array([0, 5]), 2, np.pi * 4),
(np.array([2.5]), 3, 6),
(np.array([0, 5, 6]), 2, (4 / 3) * np.pi * 2 ** 3),
],
)
# checks if, for the ball_2d, the point is in the ball window
@pytest.fixture
@pytest.mark.parametrize(
"point, expected",
[
(np.array([0, 0]), True),
(np.array([2.5, 2.5]), True),
(np.array([10, 3]), False),
],
)
# checks if for the ball_2d, the point is in the ball window. Returns 1 if it is the case, 0 otherwise.
@pytest.fixture
@pytest.mark.parametrize(
"point, expected",
[
(np.array([0, 0]), 1),
(np.array([2.5, 2.5]), 1),
(np.array([10, 3]), 0),
],
)
# checks if the point(s) taken randomly is in the ball window
@pytest.mark.parametrize(
"center, radius, expected",
[
(np.array([0, 0]), 5, True),
(np.array([1]), 3, True),
(np.array([0, 0, 3]), 1, True),
],
)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
2248,
17,
13,
1894,
62,
17497,
1330,
6932,
27703,
628,
198,
198,
2,
8794,
611,
262,
15793,
286,
262,
2613,
4324,
318,
3376,
198,
31,
9078,
9288,
13,
4102,
13,
1... | 2.143541 | 627 |
import datetime
# date, datetime, time 和 timezone 类型共享这些通用特性: 1. 这些类型的对象都是不可变的。2. 这些类型的对象是可哈希的,这意味着它们可被作为字典的键。 3. 这些类型的对象支持通过 pickle 模块进行高效的封存。
print(datetime.MINYEAR)
print(datetime.MAXYEAR)
print(datetime.date(2020, 5, 7))
print(datetime.time(20, 5, 7, 123000))
print(datetime.datetime(2020, 5, 7, 20, 5, 7))
print(datetime.datetime.now()) # now有@classmethod修饰,所以不需要实例化
print(datetime.datetime.utcnow())
print(datetime.datetime(2020, 5, 7, 20, 5, 8) - datetime.datetime(2020, 5, 7, 20, 5, 7))
print(datetime.datetime.now().strftime("%A"))
print(datetime.datetime.strptime('2020-01-01 0:0:0', '%Y-%m-%d %H:%M:%S'))
# 使用 datetime.strptime(date_string, format) 等价于:
# datetime(*(time.strptime(date_string, format)[0:6]))
day20 = datetime.datetime.strptime('2021-01-01 0:0:0', '%Y-%m-%d %H:%M:%S')
nowdate = datetime.datetime.today()
dela = day20 - nowdate
day = dela.days
hour = int(dela.seconds / 60 / 60)
minute = int((dela.seconds - hour * 60 * 60) / 60)
second = dela.seconds - hour * 60 * 60 - minute * 60
print('到2021年元旦还有:' + str(day) + '天' + str(hour) + '小时' + str(minute) + '分' + str(second) + '秒')
print("----------")
from datetime import datetime
from datetime import timedelta
# 1) 获取当前日期和时间
today = datetime.today() # 返回当前时间时分秒都为0
print("当前时间")
print(today)
today1 = datetime.now() # 返回当前日期和时间
# now.hour # 时
# now.minute # 分
# now.isoweekday()# 返回的1-7代表周一--周日;
# now.weekday()# 返回的0-6代表周一--到周日
# 而标准格式种%w 1-6表示周一--周六,0代表周日
print(today1)
today2 = datetime.utcnow() # 返回当前东八区时间就是比当时时间少8个小时
print(today2)
# 2) 获取指定日期和时间,加减计算
time = datetime(2019, 5, 12, 12, 13, 14)
d = time + timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0, )
# 依次为 "周" "天", "时","分","秒","毫秒","微秒"
print(time)
print(d)
time1 = "2019-5-12 12:13:14" # 字符串 日期
d1 = datetime.strptime(str(time1), '%Y-%m-%d %H:%M:%S')
plus = d1 + timedelta(days=1) # 加
minus = d1 - timedelta(days=1) # 减
print(time1)
print(d1)
print(plus)
print(minus)
time2 = 20190512121314
d2 = datetime.strptime(str(time2), '%Y%m%d%H%M%S')
delta = d2 + timedelta(days=1)
print(time2)
print(d2)
print(delta)
# 3) 日期datetime-timestamp 时间戳相互转
now_stamp = time.timestamp()
print('指定时间对应时间戳 :', now_stamp)
print('对应本地时间 :', datetime.fromtimestamp(now_stamp))
print('UTC标准时间 :', datetime.utcfromtimestamp(now_stamp))
print('本周的第几天:', datetime.fromtimestamp(now_stamp).weekday())
# 4) datetime 时间 转换为str字符串
now = datetime.now()
print('当前时间 :', now)
print(now.strftime('%Y%m%d%H%M%S'))
import pytz
print(datetime(2011, 11, 11, 0, 0, 0, tzinfo=pytz.utc))
print(datetime(2011, 11, 11, 0, 0, 0, tzinfo=pytz.timezone("Asia/Shanghai")))
from datetime import datetime, timedelta, timezone
print(datetime.utcnow())
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
print(utc_dt)
cn_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
print(cn_dt)
jan_dt = utc_dt.astimezone(timezone(timedelta(hours=9)))
print(jan_dt)
cn_2_jan_dt = cn_dt.astimezone(timezone(timedelta(hours=9)))
print(cn_2_jan_dt)
| [
11748,
4818,
8079,
198,
198,
2,
3128,
11,
4818,
8079,
11,
640,
10263,
240,
234,
640,
11340,
13328,
109,
119,
161,
252,
233,
17739,
109,
12859,
104,
32573,
247,
12859,
249,
34460,
248,
18796,
101,
31965,
117,
45250,
100,
25,
352,
13,
... | 1.678232 | 1,787 |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from os import path
import numpy as np
from wisdem.ccblade.ccblade import CCBlade, CCAirfoil
#
# Omega is fixed at 0 so no need to run derivatives test
#
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
| [
37811,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
1639,
743,
7330,
257,
4866,
286,
262,
13789,
379,... | 3.418103 | 232 |
import json
from jwt import encode
import time
import requests
import logging
from os import getenv
| [
11748,
33918,
198,
6738,
474,
46569,
1330,
37773,
198,
11748,
640,
198,
11748,
7007,
198,
11748,
18931,
198,
6738,
28686,
1330,
651,
24330,
628,
628,
628
] | 4.038462 | 26 |
from .mnist import get_mnist
from .mnistm import get_mnistm
from .svhn import get_svhn
__all__ = (get_mnist, get_svhn, get_mnistm)
| [
6738,
764,
10295,
396,
1330,
651,
62,
10295,
396,
198,
6738,
764,
10295,
396,
76,
1330,
651,
62,
10295,
396,
76,
198,
6738,
764,
21370,
21116,
1330,
651,
62,
21370,
21116,
198,
198,
834,
439,
834,
796,
357,
1136,
62,
10295,
396,
11,... | 2.4 | 55 |
from TwitchPlay import TwitchPlay
import sys
if len(sys.argv) != 2 :
print("wrong number of arguments")
print("usage: start.py [mode]")
exit(1)
app = TwitchPlay()
app.start(sys.argv[1])
| [
6738,
23835,
11002,
1330,
23835,
11002,
198,
11748,
25064,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
1058,
198,
220,
220,
220,
3601,
7203,
36460,
1271,
286,
7159,
4943,
198,
220,
220,
220,
3601,
7203,
26060,
25,
923,... | 2.653333 | 75 |
# Generated by Django 3.0.8 on 2020-09-19 17:16
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
2931,
12,
1129,
1596,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |