content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# MP3 stream header information support for Mutagen.
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""MPEG audio stream information and tags."""
import os
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen._util import MutagenError
from mutagen.id3 import ID3FileType, BitPaddedInt, delete
__all__ = ["MP3", "Open", "delete", "MP3"]
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGInfo(StreamInfo):
"""MPEG audio stream information
Parse information about an MPEG audio file. This also reads the
Xing VBR header format.
This code was implemented based on the format documentation at
http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
Useful attributes:
* length -- audio length, in seconds
* bitrate -- audio bitrate, in bits per second
* sketchy -- if true, the file may not be valid MPEG audio
Useless attributes:
* version -- MPEG version (1, 2, 2.5)
* layer -- 1, 2, or 3
* mode -- One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3)
* protected -- whether or not the file is "protected"
* padding -- whether or not audio frames are padded
* sample_rate -- audio sample rate, in Hz
"""
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1, 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
__RATES = {
1: [44100, 48000, 32000],
2: [22050, 24000, 16000],
2.5: [11025, 12000, 8000]
}
sketchy = False
def __init__(self, fileobj, offset=None):
"""Parse MPEG stream information from a file-like object.
If an offset argument is given, it is used to start looking
for stream information and Xing headers; otherwise, ID3v2 tags
will be skipped automatically. A correct offset can make
loading files significantly faster.
"""
try:
size = os.path.getsize(fileobj.name)
except (IOError, OSError, AttributeError):
fileobj.seek(0, 2)
size = fileobj.tell()
# If we don't get an offset, try to skip an ID3v2 tag.
if offset is None:
fileobj.seek(0, 0)
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = '', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
offset = insize + 10
else:
offset = 0
# Try to find two valid headers (meaning, very likely MPEG data)
# at the given offset, 30% through the file, 60% through the file,
# and 90% through the file.
for i in [offset, 0.3 * size, 0.6 * size, 0.9 * size]:
try:
self.__try(fileobj, int(i), size - offset)
except error:
pass
else:
break
# If we can't find any two consecutive frames, try to find just
# one frame back at the original offset given.
else:
self.__try(fileobj, offset, size - offset, False)
self.sketchy = True
class MP3(ID3FileType):
"""An MPEG audio (usually MPEG-1 Layer 3) file.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`ID3 <mutagen.id3.ID3>`
"""
_Info = MPEGInfo
_mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"]
@property
@staticmethod
Open = MP3
class EasyMP3(MP3):
"""Like MP3, but uses EasyID3 for tags.
:ivar info: :class:`MPEGInfo`
:ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>`
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3
| [
2,
4904,
18,
4269,
13639,
1321,
1104,
329,
13859,
11286,
13,
198,
2,
15069,
4793,
5689,
370,
411,
1349,
328,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
... | 2.264484 | 1,985 |
from bika.lims.workflow import skip
from bika.lims.workflow import doActionFor
| [
6738,
275,
9232,
13,
2475,
82,
13,
1818,
11125,
1330,
14267,
198,
6738,
275,
9232,
13,
2475,
82,
13,
1818,
11125,
1330,
466,
12502,
1890,
198
] | 3.038462 | 26 |
from pygame import *
from time import sleep, time as timer
font.init()
font2 = font.Font(None, 36)
score1 = 0
score2 = 0
win_width = 700
win_height = 500
window = display.set_mode((win_width,win_height))
display.set_caption("Лабиринт")
background = transform.scale(image.load("galaxy.jpg"),(win_width,win_height))
Finish = False
game = True
speed_x = 3
speed_y = 3
hero1 = Player("images.jpg", 20,250,50,100,7)
hero2 = Player("images.jpg", 630,250,50,100,7)
ball =GameSprite("Ball.png", 350,250,40,40,5)
clock = time.Clock()
FPS = 60
while game:
for e in event.get():
if e.type == QUIT:
game = False
if Finish != True:
window.blit(background,(0,0))
ball.rect.x += speed_x
ball.rect.y += speed_y
ball.reset()
hero1.update1()
hero1.reset()
hero2.update2()
hero2.reset()
text1 = font2.render("Счёт: " + str(score1), 1, (255, 255, 255))
window.blit(text1, (10,10))
text2 = font2.render("Счёт: " + str(score2), 1, (255, 255, 255))
window.blit(text2, (10,40))
display.update()
if ball.rect.y < 0 or ball.rect.y > win_height - 50:
speed_y *= -1
if sprite.collide_rect(hero1,ball) or sprite.collide_rect(hero2,ball):
speed_x *= -1
if ball.rect.x > 700:
score1 += 1
ball.kill()
ball.reset()
if ball.rect.x < 0:
score2 += 1
ball.kill()
ball.reset()
clock.tick(FPS)
| [
6738,
12972,
6057,
1330,
1635,
201,
198,
6738,
640,
1330,
3993,
11,
640,
355,
19781,
201,
198,
201,
198,
10331,
13,
15003,
3419,
201,
198,
10331,
17,
796,
10369,
13,
23252,
7,
14202,
11,
4570,
8,
201,
198,
201,
198,
26675,
16,
796,
... | 1.989583 | 768 |
from collections import defaultdict
from itertools import permutations
people = defaultdict(dict)
names = set()
for l in open('input.txt').readlines():
a = l.rstrip('.\r\n').split(' ')
#names is a set, no need to check if name already there
names.add(a[0])
amount = int(a[3])
if a[2] == 'lose':
amount = -amount
people[a[0]][a[10]] = amount
print seat()
#adding myself for part 2
for person in names:
people[person]['me'] = 0
people['me'][person] = 0
names.add('me')
print seat() | [
6738,
17268,
1330,
4277,
11600,
201,
198,
6738,
340,
861,
10141,
1330,
9943,
32855,
201,
198,
201,
198,
15332,
796,
4277,
11600,
7,
11600,
8,
201,
198,
14933,
796,
900,
3419,
201,
198,
201,
198,
1640,
300,
287,
1280,
10786,
15414,
13,... | 2.385281 | 231 |
#coding=utf8
from uliweb.utils.pyini import *
def test_sorteddict():
"""
>>> d = SortedDict()
>>> d
<SortedDict {}>
>>> d.name = 'limodou'
>>> d['class'] = 'py'
>>> d
<SortedDict {'class':'py', 'name':'limodou'}>
>>> d.keys()
['name', 'class']
>>> d.values()
['limodou', 'py']
>>> d['class']
'py'
>>> d.name
'limodou'
>>> d.get('name', 'default')
'limodou'
>>> d.get('other', 'default')
'default'
>>> 'name' in d
True
>>> 'other' in d
False
>>> print (d.other)
None
>>> try:
... d['other']
... except Exception as e:
... print (e)
'other'
>>> del d['class']
>>> del d['name']
>>> d
<SortedDict {}>
>>> d['name'] = 'limodou'
>>> d.pop('other', 'default')
'default'
>>> d.pop('name')
'limodou'
>>> d
<SortedDict {}>
>>> d.update({'class':'py', 'attribute':'border'})
>>> d
<SortedDict {'attribute':'border', 'class':'py'}>
"""
def test_section():
"""
>>> s = Section('default', "#comment")
>>> print (s)
#comment
[default]
<BLANKLINE>
>>> s.name = 'limodou'
>>> s.add_comment('name', '#name')
>>> s.add_comment(comments='#change')
>>> print (s)
#change
[default]
#name
name = 'limodou'
<BLANKLINE>
>>> del s.name
>>> print (s)
#change
[default]
<BLANKLINE>
"""
def test_ini1():
"""
>>> x = Ini()
>>> s = x.add('default')
>>> print (x)
#coding=utf-8
[default]
<BLANKLINE>
>>> s['abc'] = 'name'
>>> print (x)
#coding=utf-8
[default]
abc = 'name'
<BLANKLINE>
"""
def test_ini2():
"""
>>> x = Ini()
>>> x['default'] = Section('default', "#comment")
>>> x.default.name = 'limodou'
>>> x.default['class'] = 'py'
>>> x.default.list = ['abc']
>>> print (x)
#coding=utf-8
#comment
[default]
name = 'limodou'
class = 'py'
list = ['abc']
<BLANKLINE>
>>> x.default.list = ['cde'] #for mutable object will merge the data, including dict type
>>> print (x.default.list)
['abc', 'cde']
>>> x.default.d = {'a':'a'}
>>> x.default.d = {'b':'b'}
>>> print (x.default.d)
{'a': 'a', 'b': 'b'}
"""
def test_gettext():
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = Ini(env={'_':_})
>>> x['default'] = Section('default')
>>> x.default.option = _('Hello')
>>> x.keys()
['_', 'gettext_lazy', 'set', 'default']
"""
def test_replace():
"""
>>> x = Ini()
>>> x['default'] = Section('default')
>>> x.default.option = ['a']
>>> x.default.option
['a']
>>> x.default.option = ['b']
>>> x.default.option
['a', 'b']
>>> x.default.add('option', ['c'], replace=True)
>>> x.default.option
['c']
>>> print (x.default)
[default]
option <= ['c']
<BLANKLINE>
"""
def test_set_var():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> print (x)
#coding=utf-8
[default]
key = 'name'
<BLANKLINE>
>>> x.set_var('default/key/name', 'hello')
True
>>> print (x)
#coding=utf-8
[default]
key = 'name'
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key')
'name'
>>> x.get_var('default/no')
>>> x.get_var('defaut/no', 'no')
'no'
>>> x.del_var('default/key')
True
>>> print (x)
#coding=utf-8
[default]
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key/name')
'hello'
>>> x.get_var('default')
<Section {'key/name':'hello'}>
"""
def test_update():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> d = {'default/key':'limodou', 'default/b':123}
>>> x.update(d)
>>> print (x)
#coding=utf-8
[default]
key = 'limodou'
b = 123
<BLANKLINE>
"""
def test_uni_print():
"""
>>> a = ()
>>> uni_prt(a, 'utf-8')
'()'
>>> a = (1,2)
>>> uni_prt(a)
'(1, 2)'
"""
def test_triple_string():
"""
>>> from io import StringIO
>>> buf = StringIO(\"\"\"
... #coding=utf8
... [DEFAULT]
... a = '''hello
... 中文
... '''
... \"\"\")
>>> x = Ini()
>>> x.read(buf)
>>> print (repr(x.DEFAULT.a))
'hello\\n\\u4e2d\\u6587\\n'
"""
def test_save():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = 'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = 'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = 'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = 'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_merge_data():
"""
>>> from uliweb.utils.pyini import merge_data
>>> a = [[1,2,3], [2,3,4], [4,5]]
>>> b = [{'a':[1,2], 'b':{'a':[1,2]}}, {'a':[2,3], 'b':{'a':['b'], 'b':2}}]
>>> c = [set([1,2,3]), set([2,4])]
>>> print (merge_data(a))
[1, 2, 3, 4, 5]
>>> print (merge_data(b))
{'a': [1, 2, 3], 'b': {'a': [1, 2, 'b'], 'b': 2}}
>>> print (merge_data(c))
{1, 2, 3, 4}
>>> print (merge_data([2]))
2
"""
def test_lazy():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = 'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = 'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> x.freeze()
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = 'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = 'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_multiple_read():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = 'abc'
... [other]
... option = default.option
... option1 = '{{option}} xxx'
... option2 = '{{default.option}}'
... option3 = '{{other.option}}'
... \"\"\")
>>> x.read(buf)
>>> buf1 = StringIO(\"\"\"
... [default]
... option = 'hello'
... \"\"\")
>>> x.read(buf1)
>>> x.freeze()
>>> print (x)
#coding=UTF-8
<BLANKLINE>
[default]
option = 'hello'
[other]
option = 'hello'
option1 = 'hello xxx'
option2 = 'hello'
option3 = 'hello'
<BLANKLINE>
"""
def test_chinese():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from io import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... option = '中文'
... option2 = _('中文')
... option3 = '{{option}}'
... [other]
... x = '中文 {{default.option}}'
... x1 = '中文 {{default.option}}'
... x2 = 'xbd {{default.option}}'
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=utf-8
[default]
option = '中文'
option2 = _('中文')
option3 = '中文'
[other]
x = '中文 中文'
x1 = '中文 中文'
x2 = 'xbd 中文'
<BLANKLINE>
>>> print (repr(x.other.x1))
'中文 中文'
>>> x.keys()
['_', 'gettext_lazy', 'set', 'default', 'other']
"""
def test_set():
"""
>>> from io import StringIO
>>> x = Ini()
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {1,2,3}
... set2 = set([1,2,3])
... \"\"\")
>>> x.read(buf)
>>> print (x)
#coding=utf-8
[default]
set1 = {1, 2, 3}
set2 = {1, 2, 3}
<BLANKLINE>
>>> buf2 = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {5,3}
... \"\"\")
>>> x.read(buf2)
>>> print (x.default.set1)
{1, 2, 3, 5}
"""
| [
2,
66,
7656,
28,
40477,
23,
198,
6738,
334,
4528,
12384,
13,
26791,
13,
9078,
5362,
1330,
1635,
198,
198,
4299,
1332,
62,
30619,
6048,
713,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13163,
288,
796,
311,
9741,
35,
713,
... | 2.045881 | 4,686 |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import json
import pandas as pd
import xmltodict
import shutil
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.sampling import inputs
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.delphin_setup import delphin_permutations
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
def create_sampling_strategy(path: str, design_option: list) -> dict:
"""
Create a sampling strategy for WP6 Delphin Automation. The sampling strategy will be name 'sampling_strategy.json'
and be located at the given folder.
"""
design = [design_.split('.')[0] for design_ in design_option]
scenario = {'generic_scenario': None}
distributions = {'exterior_climate':
{'type': 'discrete', 'range': ['Weimar', 'Bremen', 'MuenchenAirp']},
'exterior_heat_transfer_coefficient_slope':
{'type': 'uniform', 'range': [1, 4], },
'exterior_moisture_transfer_coefficient':
{'type': 'discrete', 'range': [7.7*10**-9]},
'solar_absorption':
{'type': 'uniform', 'range': [0.4, 0.8], },
'rain_scale_factor':
{'type': 'uniform', 'range': [0, 2], },
'interior_climate':
{'type': 'discrete', 'range': ['a', 'b'], },
'wall_orientation':
{'type': 'uniform', 'range': [0, 360], },
'start_year':
{'type': 'discrete', 'range': [i for i in range(2020, 2046)], },
}
sampling_settings = {'initial samples per set': 1,
'add samples per run': 1,
'max samples': 500,
'sequence': 10,
'standard error threshold': 0.1,
'raw sample size': 2 ** 9}
combined_dict = {'design': design, 'scenario': scenario,
'distributions': distributions, 'settings': sampling_settings}
with open(os.path.join(path, 'sampling_strategy.json'), 'w') as file:
json.dump(combined_dict, file)
return combined_dict
folder_ = r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU'
folder_1d = os.path.join(folder_, '1D')
folder_2d = os.path.join(folder_, '2D')
folder_strategy = os.path.join(folder_, 'sampling_strategy')
folder_design = os.path.join(folder_, 'designs')
#create_1d_designs(folder_1d)
create_2d_designs(folder_2d)
copy_designs(folder_)
design_options = os.listdir(folder_design)
create_sampling_strategy(folder_strategy, design_options)
mongo_setup.global_end_ssh(server)
| [
834,
9800,
834,
796,
366,
20298,
9071,
82,
36232,
1,
198,
834,
43085,
834,
796,
705,
36393,
6,
198,
198,
2,
16529,
3880,
19351,
1303,
198,
2,
30023,
33002,
198,
198,
2,
3401,
5028,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
1... | 2.289365 | 1,448 |
import torch
from netharn import util
from netharn.analytic import output_shape_for
class Reshape(torch.nn.Module, util.ModuleMixin):
"""
Wrapper class around `torch.view` that implements `output_shape_for`
TODO:
[ ] - Can we implement receptive_feild_for for this layer?
Args:
*shape: same ars that would be passed to view.
if an item in shape is None it means that the output
shape should keep the input shape value in that dimension
Example:
>>> import netharn as nh
>>> nh.OutputShapeFor(Reshape(-1, 3))._check_consistency((20, 6, 20))
(800, 3)
>>> nh.OutputShapeFor(Reshape(100, -1, 5))._check_consistency((10, 10, 15))
(100, 3, 5)
>>> Reshape(7, -1, 3).output_shape_for((None, 1)) # weird case
(7, None, 3)
>>> nh.OutputShapeFor(Reshape(None, -1, 4))._check_consistency((10, 32, 32, 16))
(10, 4096, 4)
>>> Reshape(None, -1, 4).output_shape_for((None, 32, 32, 16))
(None, 4096, 4)
>>> import netharn as nh
>>> nh.OutputShapeFor(Reshape(-1, 3))._check_consistency((20, 6, 20))
Ignore:
>>> from netharn.layers.reshape import *
>>> self = Reshape(None, 1600)
>>> input_shape = (10, 64, 5, 5)
>>> nh.OutputShapeFor(self)._check_consistency(input_shape)
"""
def forward(self, input):
"""
Example:
>>> import netharn as nh
>>> self = Reshape(None, -1, 4)
>>> input_shape = (10, 32, 32, 16)
>>> input = torch.rand(input_shape)
>>> output = self.forward(input)
>>> print(tuple(output.shape))
(10, 4096, 4)
>>> print(tuple(self.output_shape_for(input_shape)))
>>> nh.OutputShapeFor(self)._check_consistency(input_shape)
"""
if not self._none_dims:
output_shape = self.shape
else:
output_shape = list(self.shape)
input_shape = input.shape
for i in self._none_dims:
if i >= len(input_shape):
raise ValueError('input shape does not correspond')
output_shape[i] = input_shape[i]
return input.view(*output_shape)
def extra_repr(self):
"""
Example:
>>> print(Reshape(-1, 10))
Reshape(-1, 10)
>>> print(Reshape(5, 5, 5))
Reshape(5, 5, 5)
"""
return '{}'.format(', '.join(str(s) for s in self.shape))
| [
11748,
28034,
198,
6738,
2010,
71,
1501,
1330,
7736,
198,
6738,
2010,
71,
1501,
13,
38200,
13370,
1330,
5072,
62,
43358,
62,
1640,
628,
198,
4871,
1874,
71,
1758,
7,
13165,
354,
13,
20471,
13,
26796,
11,
7736,
13,
26796,
35608,
259,
... | 2.049075 | 1,243 |
# -*- coding: utf-8 -*-
# Python3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from QOpenScienceFramework.widgets import LoginWindow
from QOpenScienceFramework.compat import *
from QOpenScienceFramework import events
import QOpenScienceFramework.connection as osf
from qtpy import QtCore, QtGui, QtNetwork, QtWidgets
import json
# Import basics
import logging
import os
import time
# UUID generation
import uuid
# Python warnings
import warnings
# Easier function decorating
from functools import wraps
import logging
logger = logging.getLogger()
def _(s):
""" Dummy function later to be replaced for translation. """
return s
class ConnectionManager(QtNetwork.QNetworkAccessManager):
"""
The connection manager does most of the heavy lifting in communicating with the
OSF. It is responsible for all the HTTP requests and the correct treatment of
responses from the OSF. """
# The maximum number of allowed redirects
MAX_REDIRECTS = 5
error_message = QtCore.Signal('QString', 'QString')
"""PyQt signal to send an error message."""
warning_message = QtCore.Signal('QString', 'QString')
"""PyQt signal to send a warning message."""
info_message = QtCore.Signal('QString', 'QString')
"""PyQt signal to send an info message."""
success_message = QtCore.Signal('QString', 'QString')
"""PyQt signal to send a success message."""
def __init__(self, *args, **kwargs):
""" Constructor
Parameters
----------
tokenfile : str (default: 'token.json')
The path to the file in which the token information should be stored.
notifier : QtCore.QObject (default: None)
The object containing pyqt slots / callables to which this object's
message signals can be connected. The object should contain the following
slots / functions: info, error, success, warning. Each of these
should expect two strings. This object is then repsonsible for displaying
the messages, or passing them on to another object responsible for
the display.
If ``None`` is passed, then a events.Notifier object is
created which simply displays all messages in QDialog boxes
"""
# See if tokenfile and notifier are specified as keyword args
tokenfile = kwargs.pop("tokenfile", "token.json")
notifier = kwargs.pop("notifier", None)
# Call parent's constructor
super(ConnectionManager, self).__init__(*args, **kwargs)
self.tokenfile = tokenfile
self.dispatcher = events.EventDispatcher()
# Notifications
if notifier is None:
self.notifier = events.Notifier()
else:
if not isinstance(notifier, QtCore.QObject):
raise TypeError('notifier needs to be a class that inherits '
'from QtCore.QObject')
if not hasattr(notifier, 'info'):
raise AttributeError('notifier object is missing a pyqt slot '
' named info(str, str)')
if not hasattr(notifier, 'error'):
raise AttributeError('notifier object is missing a pyqt slot '
' named error(str, str)')
if not hasattr(notifier, 'success'):
raise AttributeError('notifier object is missing a pyqt slot '
' named success(str, str)')
if not hasattr(notifier, 'warning'):
raise AttributeError('notifier object is missing a pyqt slot '
' named warning(str, str)')
self.notifier = notifier
self.error_message.connect(self.notifier.error)
self.info_message.connect(self.notifier.info)
self.success_message.connect(self.notifier.success)
self.warning_message.connect(self.notifier.warning)
# Init browser in which login page is displayed
self.browser = LoginWindow()
self.browser.setWindowTitle(_(u"Log in to OSF"))
# Make sure browser closes if parent QWidget closes
if isinstance(self.parent(), QtWidgets.QWidget):
self.parent().destroyed.connect(self.browser.close)
# Connect browsers logged in event to that of dispatcher's
self.browser.logged_in.connect(self.dispatcher.dispatch_login)
self.logged_in_user = {}
self.config_mgr = QtNetwork.QNetworkConfigurationManager(self)
# The icon to show on the progress dialog
self._progress_icon = None
# Dictionary holding requests in progress, so that they can be repeated if
# mid-request it is discovered that the OAuth2 token is no longer valid.
self.pending_requests = {}
# properties
@property
def progress_icon(self):
""" The icon to show on the progress dialog."""
return self._progress_icon
@progress_icon.setter
# Private functions
def __logout_succeeded(self, data, *args):
""" Callback for logout().
Called when logout has succeeded. This function
dispatches the logout signal to all other connected elements. """
self.dispatcher.dispatch_logout()
def __logout_failed(self, data, *args):
""" Callback for logout().
Called when logout has failed. """
self.dispatcher.dispatch_login()
# Login and Logout functions
def login(self):
""" Logs in a user. Checks if a token file is stored which can be used to
login a user. If not or the token file is invalid, it opens a browser
window through which a user can log in. After a successful login, the
browser widget fires the 'logged_in' event. """
# If a valid stored token is found, read that in an dispatch login event
if self.check_for_stored_token(self.tokenfile):
self.dispatcher.dispatch_login()
return
# Otherwise, do the whole authentication dance
self.show_login_window()
def check_for_stored_token(self, tokenfile):
""" Checks for stored token information. Checks if a token.json file can be
found at the supplied location and inspects if it is not expired.
Parameters
----------
tokenfile : str
Path to the token file
Returns
-------
bool
True if a valid token was found at tokenfile's location, False otherwise
"""
if not os.path.isfile(tokenfile):
return False
try:
token = json.load(open(tokenfile))
except IOError:
logger.warning("Token file could not be opened.")
return False
# Check if token has not yet expired
if token["expires_at"] > time.time():
# Load the token information in the session object
osf.session.token = token
return True
else:
osf.session = osf.create_session()
os.remove(tokenfile)
logger.info("Token expired; need log-in")
return False
def show_login_window(self):
""" Shows the login page on OSF. """
auth_url, state = osf.get_authorization_url()
# Set up browser
browser_url = get_QUrl(auth_url)
self.browser.load(browser_url)
self.browser.show()
self.browser.raise_()
self.browser.activateWindow()
def logout(self):
""" Logs the current user out from OSF. """
if osf.is_authorized() and osf.session.access_token:
self.post(
osf.logout_url,
self.__logout_succeeded,
{'token': osf.session.access_token},
errorCallback=self.__logout_failed
)
# Communication with OSF API
def buffer_network_request(func):
""" Decorator function, not to be called directly.
Checks if network is accessible and buffers the network request so
that it can be sent again if it fails the first time, for instance due to
an invalidated OAuth2 token. In this case the user will be presented with
the login screen again. If the same user successfully logs in again, the
request will be resent. """
@wraps(func)
return func_wrapper
def clear_pending_requests(self):
""" Resets the pending network requests that still need to be executed.
Network requests
"""
self.pending_requests = {}
def add_token(self, request):
"""Adds the OAuth2 token to a HTTP request.
Parameters
----------
request : QtNetwork.QNetworkRequest
The network request item in whose header to add the OAuth2 token
Returns
-------
bool
True if token could successfully be added to the request, False if not
"""
if osf.is_authorized():
name = safe_encode("Authorization")
value = safe_encode("Bearer {}".format(osf.session.access_token))
request.setRawHeader(name, value)
return True
else:
return False
# Basic HTTP Functions
def __check_request_parameters(self, url, callback):
""" Check if the supplied url is of the correct type and if the callback
parameter is really a callable
Parameters
----------
url : string or QtCore.QUrl
The target url/endpoint to perform the request on
callback : callable
The callback function
Returns
-------
QtCore.QUrl
The url to send the request to in QUrl format (does nothing if url \
was already supplied as a QUrl)
Raises
------
TypeError
if url is not a QUrl or string, or if callback is not a callable
"""
if not isinstance(url, QtCore.QUrl) and not isinstance(url, basestring):
raise TypeError("url should be a string or QUrl object")
if not isinstance(url, QtCore.QUrl):
url = QtCore.QUrl(url)
if not callable(callback):
raise TypeError("callback should be a function or callable.")
return url
@buffer_network_request
def get(self, url, callback, *args, **kwargs):
""" Performs a HTTP GET request.
The OAuth2 token is automatically added to the
header if the request is going to an OSF server.
Parameters
----------
url : string / QtCore.QUrl
The target url/endpoint to perform the GET request on
callback : callable
The function to call once the request is finished successfully.
downloadProgess : function (defualt: None)
The slot (callback function) for the downloadProgress signal of the
reply object. This signal is emitted after a certain amount of bytes
is received, and can be used for instance to update a download progress
dialog box. The callback function should have two parameters to which
the transfered and total bytes can be assigned.
readyRead : function (default : None)
The slot (callback function) for the readyRead signal of the
reply object.
errorCallback : function (default: None)
function to call whenever an error occurs. Should be able to accept
the reply object as an argument. This function is also called if the
operation is aborted by the user him/herself.
progressDialog : QtWidgets.QProgressDialog (default: None)
The dialog to send the progress indication to. Will be included in the
reply object so that it is accessible in the downloadProgress slot, by
calling self.sender().property('progressDialog')
abortSignal : QtCore.Signal
This signal will be attached to the reply objects abort() slot, so that
the operation can be aborted from outside if necessary.
*args (optional)
Any other arguments that you want to have passed to the callback
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
Returns
-------
QtNetwork.QNetworkReply
The reply object for the current request. Note that if a 301 or 302
redirect has occurred, a new reply object has been made for the redirect
and the one returned here is no longer valid.
"""
# First check the correctness of the url and callback parameters
url = self.__check_request_parameters(url, callback)
# Create network request
request = QtNetwork.QNetworkRequest(url)
# Add OAuth2 token
if not self.add_token(request):
warnings.warn(_(u"Token could not be added to the request"))
# Check if this is a redirect and keep a count to prevent endless
# redirects. If redirect_count is not set, init it to 0
kwargs['redirect_count'] = kwargs.get('redirect_count', 0)
reply = super(ConnectionManager, self).get(request)
# If provided, connect the abort signal to the reply's abort() slot
abortSignal = kwargs.get('abortSignal', None)
if not abortSignal is None:
abortSignal.connect(reply.abort)
# Check if a QProgressDialog has been passed to which the download status
# can be reported. If so, add it as a property of the reply object
progressDialog = kwargs.get('progressDialog', None)
if isinstance(progressDialog, QtWidgets.QProgressDialog):
progressDialog.canceled.connect(reply.abort)
reply.setProperty('progressDialog', progressDialog)
# Check if a callback has been specified to which the downloadprogress
# is to be reported
dlpCallback = kwargs.get('downloadProgress', None)
if callable(dlpCallback):
reply.downloadProgress.connect(dlpCallback)
# Check if a callback has been specified for reply's readyRead() signal
# which emits as soon as data is available on the buffer and doesn't wait
# till the whole transfer is finished as the finished() callback does
# This is useful when downloading larger files
rrCallback = kwargs.get('readyRead', None)
if callable(rrCallback):
reply.readyRead.connect(
lambda: rrCallback(*args, **kwargs)
)
reply.finished.connect(
lambda: self.__reply_finished(
callback, *args, **kwargs
)
)
return reply
@buffer_network_request
def post(self, url, callback, data_to_send, *args, **kwargs):
""" Perform a HTTP POST request.
The OAuth2 token is automatically added to the
header if the request is going to an OSF server. This request is mainly used to send
small amounts of data to the OSF framework (use PUT for larger files, as this is also
required by the WaterButler service used by the OSF)
Parameters
----------
url : string / QtCore.QUrl
The target url/endpoint to perform the POST request on.
callback : function
The function to call once the request is finished.
data_to_send : dict
The data to send with the POST request. keys will be used as variable names
and values will be used as the variable values.
*args (optional)
Any other arguments that you want to have passed to callable.
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
"""
# First check the correctness of the url and callback parameters
url = self.__check_request_parameters(url, callback)
if not type(data_to_send) is dict:
raise TypeError("The POST data should be passed as a dict")
request = QtNetwork.QNetworkRequest(url)
request.setHeader(request.ContentTypeHeader,
"application/x-www-form-urlencoded")
# Add OAuth2 token
if not self.add_token(request):
warnings.warn(_(u"Token could not be added to the request"))
# Sadly, Qt4 and Qt5 show some incompatibility in that QUrl no longer has the
# addQueryItem function in Qt5. This has moved to a differen QUrlQuery object
if QtCore.PYQT_VERSION_STR < '5':
postdata = QtCore.QUrl()
else:
postdata = QtCore.QUrlQuery()
# Add data
for varname in data_to_send:
postdata.addQueryItem(varname, data_to_send.get(varname))
# Convert to QByteArray for transport
if QtCore.PYQT_VERSION_STR < '5':
final_postdata = postdata.encodedQuery()
else:
final_postdata = safe_encode(
postdata.toString(QtCore.QUrl.FullyEncoded))
# Fire!
reply = super(ConnectionManager, self).post(request, final_postdata)
reply.finished.connect(
lambda: self.__reply_finished(callback, *args, **kwargs))
@buffer_network_request
def put(self, url, callback, *args, **kwargs):
""" Perform a HTTP PUT request.
The OAuth2 token is automatically added to the
header if the request is going to an OSF server. This method should be used
to upload larger sets of data such as files.
Parameters
----------
url : string / QtCore.QUrl
The target url/endpoint to perform the PUT request on.
callback : function
The function to call once the request is finished.
data_to_send : QIODevice (default : None)
The file to upload (QFile or other QIODevice type)
uploadProgess : callable (defualt: None)
The slot (callback function) for the downloadProgress signal of the
reply object. This signal is emitted after a certain amount of bytes
is received, and can be used for instance to update a download progress
dialog box. The callback function should have two parameters to which
the transfered and total bytes can be assigned.
errorCallback : callable (default: None)
function to call whenever an error occurs. Should be able to accept
the reply object as an argument. This function is also called if the
operation is aborted by the user him/herself.
progressDialog : QtWidgets.QProgressDialog (default: None)
The dialog to send the progress indication to. Will be included in the
reply object so that it is accessible in the downloadProgress slot, by
calling self.sender().property('progressDialog')
abortSignal : QtCore.Signal
This signal will be attached to the reply objects abort() slot, so that
the operation can be aborted from outside if necessary.
*args (optional)
Any other arguments that you want to have passed to the callback
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
"""
# First check the correctness of the url and callback parameters
url = self.__check_request_parameters(url, callback)
# Don't use pop() here as it will cause a segmentation fault!
data_to_send = kwargs.get('data_to_send')
if not data_to_send is None and not isinstance(data_to_send, QtCore.QIODevice):
raise TypeError(
"The data_to_send should be of type QtCore.QIODevice")
request = QtNetwork.QNetworkRequest(url)
request.setHeader(request.ContentTypeHeader,
"application/x-www-form-urlencoded")
if data_to_send is None:
request.setHeader(request.ContentLengthHeader, '0')
# Add OAuth2 token
if not self.add_token(request):
warnings.warn(_(u"Token could not be added to the request"))
reply = super(ConnectionManager, self).put(request, data_to_send)
reply.finished.connect(
lambda: self.__reply_finished(callback, *args, **kwargs))
# Check if a QProgressDialog has been passed to which the download status
# can be reported. If so, add it as a property of the reply object
progressDialog = kwargs.get('progressDialog', None)
if isinstance(progressDialog, QtWidgets.QProgressDialog):
progressDialog.canceled.connect(reply.abort)
reply.setProperty('progressDialog', progressDialog)
elif not progressDialog is None:
logging.error("progressDialog is not a QtWidgets.QProgressDialog")
# If provided, connect the abort signal to the reply's abort() slot
abortSignal = kwargs.get('abortSignal', None)
if not abortSignal is None:
abortSignal.connect(reply.abort)
# Check if a callback has been specified to which the downloadprogress
# is to be reported
ulpCallback = kwargs.get('uploadProgress', None)
if callable(ulpCallback):
reply.uploadProgress.connect(ulpCallback)
@buffer_network_request
def delete(self, url, callback, *args, **kwargs):
""" Perform a HTTP DELETE request.
The OAuth2 token is automatically added to the
header if the request is going to an OSF server.
Parameters
----------
url : string / QtCore.QUrl
The target url/endpoint to perform the GET request on
callback : function
The function to call once the request is finished successfully.
errorCallback : function (default: None)
function to call whenever an error occurs. Should be able to accept
the reply object as an argument. This function is also called if the
operation is aborted by the user him/herself.
abortSignal : QtCore.Signal
This signal will be attached to the reply objects abort() slot, so that
the operation can be aborted from outside if necessary.
*args (optional)
Any other arguments that you want to have passed to the callback
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
"""
# First check the correctness of the url and callback parameters
url = self.__check_request_parameters(url, callback)
request = QtNetwork.QNetworkRequest(url)
# Add OAuth2 token
if not self.add_token(request):
warnings.warn(_(u"Token could not be added to the request"))
# Check if this is a redirect and keep a count to prevent endless
# redirects. If redirect_count is not set, init it to 0
kwargs['redirect_count'] = kwargs.get('redirect_count', 0)
reply = super(ConnectionManager, self).deleteResource(request)
# If provided, connect the abort signal to the reply's abort() slot
abortSignal = kwargs.get('abortSignal', None)
if not abortSignal is None:
abortSignal.connect(reply.abort)
reply.finished.connect(
lambda: self.__reply_finished(
callback, *args, **kwargs
)
)
return reply
# Convenience HTTP Functions
def get_logged_in_user(self, callback, *args, **kwargs):
"""Get logged in user information.
Contacts the OSF to request data of the currently logged in user
Parameters
----------
callback : function
The callback function to which the data should be delivered once the
request is finished
Returns
-------
QtNetwork.QNetworkReply or None if something went wrong
"""
api_call = osf.api_call("logged_in_user")
return self.get(api_call, callback, *args, **kwargs)
def get_user_projects(self, callback, *args, **kwargs):
""" Gets current user's projects. Retrieves a list of projects owned by
the currently logged in user from OSF
Parameters
----------
callback : function
The callback function to which the data should be delivered once the
request is finished
Returns
-------
QtNetwork.QNetworkReply or None if something went wrong
"""
api_call = osf.api_call("projects")
return self.get(api_call, callback, *args, **kwargs)
def get_project_repos(self, project_id, callback, *args, **kwargs):
""" Get repos for the specified project.
Retrieves a list of repositories from the OSF that belong to the passed
project id.
Parameters
----------
project_id : string
The project id that OSF uses for this project (e.g. the node id)
callback : function
The callback function to which the data should be delivered once the
request is finished
Returns
-------
QtNetwork.QNetworkReply or None if something went wrong
"""
api_call = osf.api_call("project_repos", project_id)
return self.get(api_call, callback, *args, **kwargs)
def get_repo_files(self, project_id, repo_name, callback, *args, **kwargs):
"""Retrieves files contained in a repository.
Retrieves a list of files from the OSF that belong to the indicated
repository of the passed project id.
Parameters
----------
project_id : string
The project id that OSF uses for this project (e.g. the node id)
repo_name : string
The repository to get the files from. Should be something along the
lines of osfstorage, github, dropbox, etc. Check OSF documentation
for a full list of specifications.
callback : function
The callback function to which the data should be delivered once the
request is finished
Returns
-------
QtNetwork.QNetworkReply or None if something went wrong
"""
api_call = osf.api_call("repo_files", project_id, repo_name)
return self.get(api_call, callback, *args, **kwargs)
def get_file_info(self, file_id, callback, *args, **kwargs):
""" Gets information about the specified file.
Parameters
----------
file_id : string
The OSF file identifier (e.g. the node id).
callback : function
The callback function to which the data should be delivered once the
request is finished.
Returns
-------
QtNetwork.QNetworkReply or None if something went wrong.
"""
api_call = osf.api_call("file_info", file_id)
return self.get(api_call, callback, *args, **kwargs)
def download_file(self, url, destination, *args, **kwargs):
""" Downloads a file by a using HTTP GET request.
The OAuth2 token is automatically
added to the header if the request is going to an OSF server.
Parameters
----------
url : string / QtCore.QUrl
The target url that points to the file to download
destination : string
The path and filename with which the file should be saved.
finished_callback : function (default: None)
The function to call once the download is finished.
downloadProgress : function (default: None)
The slot (callback function) for the downloadProgress signal of the
reply object. This signal is emitted after a certain amount of bytes
is received, and can be used for instance to update a download progress
dialog box. The callback function should have two parameters to which
the transfered and total bytes can be assigned.
errorCallback : function (default: None)
function to call whenever an error occurs. Should be able to accept
the reply object as an argument.
progressDialog : dict (default : None)
A dictionary containing data about the file to be transferred. It
should have two entries:
filename: The name of the file
filesize: the size of the file in bytes
*args (optional)
Any other arguments that you want to have passed to the callback
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
"""
# Check if destination is a string
if not type(destination) == str:
raise ValueError("destination should be a string")
# Check if the specified folder exists. However, because a situation is possible in which
# the user has selected a destination but deletes the folder in some other program in the meantime,
# show a message box, but do not raise an exception, because we don't want this to completely crash
# our program.
if not os.path.isdir(os.path.split(os.path.abspath(destination))[0]):
self.error_message.emit(
_("{} is not a valid destination").format(destination))
return
kwargs['destination'] = destination
kwargs['download_url'] = url
# Extra call to get() to make sure OAuth2 token is still valid before download
# is initiated. If not, this way the request can be repeated after the user
# reauthenticates
self.get_logged_in_user(self.__download, *args, **kwargs)
def upload_file(self, url, source_file, *args, **kwargs):
""" Uploads a file.
The file will be stored at the specified destination on the OSF.
Parameters
----------
url : string / QtCore.QUrl
The target url that points to endpoint handling the upload
source_file : string / QtCore.QtFile
The path to the file which should be uploaded.
finishedCallback : function (default: None)
The function to call once the upload is finished.
uploadProgress : function (default: None)
The slot (callback function) for the uploadProgress signal of the
reply object. This signal is emitted after a certain amount of bytes
is received, and can be used for instance to update a upload progress
dialog box. The callback function should have two parameters to which
the transfered and total bytes can be assigned.
errorCallback : function (default: None)
function to call whenever an error occurs. Should be able to accept
the reply object as an argument.
progressDialog : dict (default : None)
A dictionary containing data about the file to be transferred. It
should have two entries:
filename: The name of the file
filesize: the size of the file in bytes
*args (optional)
Any other arguments that you want to have passed to the callback
**kwargs (optional)
Any other keywoard arguments that you want to have passed to the callback
"""
# Extra call to get() to make sure OAuth2 token is still valid before download
# is initiated. If not, this way the request can be repeated after the user
# reauthenticates
kwargs['upload_url'] = url
kwargs['source_file'] = source_file
self.get_logged_in_user(self.__upload, *args, **kwargs)
# PyQt Slots
def __reply_finished(self, callback, *args, **kwargs):
""" Callback for any HTTP request """
reply = self.sender()
request = reply.request()
# Get the error callback function, if set
errorCallback = kwargs.get('errorCallback', None)
# Get the request id, if set (only for authenticated requests, if a user
# is logged in), so it can be repeated if the user is required to
# reauthenticate.
current_request_id = kwargs.pop('_request_id', None)
# If an error occured, just show a simple QMessageBox for now
if reply.error() != reply.NoError:
# User not/no longer authenticated to perform this request
# Show login window again
if reply.error() == reply.AuthenticationRequiredError:
# If access is denied, the user's token must have expired
# or something like that. Dispatch the logout signal and
# show the login window again
self.dispatcher.dispatch_logout()
self.show_login_window()
# For all other errors
else:
# Don't show error notification if user manually cancelled operation.
# This is undesirable most of the time, and when it is required, it
# can be implemented by using the errorCallback function
if reply.error() != reply.OperationCanceledError:
self.error_message.emit(
str(reply.attribute(request.HttpStatusCodeAttribute)),
reply.errorString()
)
# Remove this request from pending requests because it should not
# be repeated upon reauthentication of the user
if not current_request_id is None:
self.pending_requests.pop(current_request_id, None)
# Close any remaining file handles that were created for upload
# or download
self.__close_file_handles(*args, **kwargs)
# Call error callback, if set
if callable(errorCallback):
kwargs.pop('errorCallback')
errorCallback(reply, *args, **kwargs)
reply.deleteLater()
return
# For all other options that follow below, this request can be erased
# from pending requests.
if not current_request_id is None:
self.pending_requests.pop(current_request_id, None)
# Check if the reply indicates a redirect
if reply.attribute(request.HttpStatusCodeAttribute) in [301, 302]:
# To prevent endless redirects, make a count of them and only
# allow a preset maximum
if kwargs['redirect_count'] < self.MAX_REDIRECTS:
kwargs['redirect_count'] += 1
else:
self.error_message.emit(
_("Whoops, something is going wrong"),
_("Too Many redirects")
)
if callable(errorCallback):
kwargs.pop('errorCallback')
errorCallback(reply, *args, **kwargs)
# Close any remaining file handles that were created for upload
# or download
self.__close_file_handles(*args, **kwargs)
reply.deleteLater()
return
# Truncate the temp file. This will delete any contents that have been streamed
# to the file during the redirect request.
if 'tmp_file' in kwargs and isinstance(kwargs['tmp_file'], QtCore.QTemporaryFile):
kwargs['tmp_file'].resize(0)
# Perform another request with the redirect_url and pass on the callback
redirect_url = reply.attribute(request.RedirectionTargetAttribute)
# For now, the redirects only work for GET operations (but to my
# knowledge, those are the only operations they occur for)
if reply.operation() == self.GetOperation:
self.get(redirect_url, callback, *args, **kwargs)
else:
# Remove (potentially) internally used kwargs before passing
# data on to the callback
kwargs.pop('redirect_count', None)
kwargs.pop('downloadProgress', None)
kwargs.pop('uploadProgress', None)
kwargs.pop('readyRead', None)
kwargs.pop('errorCallback', None)
kwargs.pop('abortSignal', None)
callback(reply, *args, **kwargs)
# Cleanup, mark the reply object for deletion
reply.deleteLater()
def __create_progress_dialog(self, text, filesize):
""" Creates a progress dialog. Uses manager.progress_icon (if set) to
determine which icon to display on the dialog.
Parameters
----------
text : str
The label to display on the dialog
filesize : int
The size of the file being transfered in bytes
Returns
-------
QtWidgets.QProgressDialog
"""
progress_dialog = QtWidgets.QProgressDialog()
progress_dialog.hide()
progress_dialog.setLabelText(text)
progress_dialog.setMinimum(0)
progress_dialog.setMaximum(filesize)
if self._progress_icon:
progress_dialog.setWindowIcon(self._progress_icon)
progress_dialog.setWindowTitle(_(u"Transferring"))
return progress_dialog
def __transfer_progress(self, transfered, total):
""" callback for a reply object. """
self.sender().property('progressDialog').setValue(transfered)
def __download(self, reply, download_url, *args, **kwargs):
""" The real download function, that is a callback for get_logged_in_user()
in download_file() """
# Create tempfile
tmp_file = QtCore.QTemporaryFile()
tmp_file.open(QtCore.QIODevice.WriteOnly)
kwargs['tmp_file'] = tmp_file
progressDialog = kwargs.get('progressDialog', None)
if isinstance(progressDialog, dict):
try:
text = _("Downloading") + " " + progressDialog['filename']
size = progressDialog['filesize']
except KeyError as e:
raise KeyError("progressDialog missing field {}".format(e))
progress_indicator = self.__create_progress_dialog(text, size)
kwargs['progressDialog'] = progress_indicator
kwargs['downloadProgress'] = self.__transfer_progress
# Callback function for when bytes are received
kwargs['readyRead'] = self.__download_readyRead
# Download the file with a get request
self.get(download_url, self.__download_finished, *args, **kwargs)
def __download_readyRead(self, *args, **kwargs):
""" callback for a reply object to indicate that data is ready to be
written to a buffer. """
reply = self.sender()
data = reply.readAll()
if not 'tmp_file' in kwargs or not isinstance(kwargs['tmp_file'], QtCore.QTemporaryFile):
raise AttributeError('Missing file handle to write to')
kwargs['tmp_file'].write(data)
def __download_finished(self, reply, *args, **kwargs):
""" Callback for a reply object of a GET request, indicating that all
expected data has been received. """
progressDialog = kwargs.pop('progressDialog', None)
if isinstance(progressDialog, QtWidgets.QWidget):
progressDialog.deleteLater()
# Do some checks to see if the required data has been passed.
if not 'destination' in kwargs:
raise AttributeError("No destination passed")
if not 'tmp_file' in kwargs or not isinstance(kwargs['tmp_file'], QtCore.QTemporaryFile):
raise AttributeError(
"No valid reference to temp file where data was saved")
kwargs['tmp_file'].close()
# If a file with the same name already exists at the location, try to
# delete it.
if QtCore.QFile.exists(kwargs['destination']):
if not QtCore.QFile.remove(kwargs['destination']):
# If the destination file could not be deleted, notify the user
# of this and stop the operation
self.error_message.emit(
_("Error saving file"),
_("Could not replace {}").format(kwargs['destination'])
)
return
# Copy the temp file to its destination
if not kwargs['tmp_file'].copy(kwargs['destination']):
self.error_message.emit(
_("Error saving file"),
_("Could not save file to {}").format(kwargs['destination'])
)
return
fcb = kwargs.pop('finishedCallback', None)
if callable(fcb):
fcb(reply, *args, **kwargs)
def __upload(self, reply, upload_url, source_file, *args, **kwargs):
""" Callback for get_logged_in_user() in upload_file(). Does the real
uploading. """
# Put checks for the url to be a string or QUrl
# Check source file
if isinstance(source_file, basestring):
# Check if the specified file exists, because a situation is possible in which
# the user has deleted the file in the meantime in another program.
# show a message box, but do not raise an exception, because we don't want this
# to completely crash our program.
if not os.path.isfile(os.path.abspath(source_file)):
self.error_message.emit(
_("{} is not a valid source file").format(source_file))
return
# Open source file for reading
source_file = QtCore.QFile(source_file)
elif not isinstance(source_file, QtCore.QIODevice):
self.error_message.emit(
_("{} is not a string or QIODevice instance").format(source_file))
return
progressDialog = kwargs.pop('progressDialog', None)
if isinstance(progressDialog, dict):
try:
text = _("Uploading") + " " + \
os.path.basename(progressDialog['filename'])
size = progressDialog['filesize']
except KeyError as e:
raise KeyError("progressDialog is missing field {}".format(e))
progress_indicator = self.__create_progress_dialog(text, size)
kwargs['progressDialog'] = progress_indicator
kwargs['uploadProgress'] = self.__transfer_progress
source_file.open(QtCore.QIODevice.ReadOnly)
self.put(upload_url, self.__upload_finished, data_to_send=source_file,
*args, **kwargs)
def __upload_finished(self, reply, *args, **kwargs):
""" Callback for the reply object of a PUT request, indicating that all
data has been sent. """
progressDialog = kwargs.pop('progressDialog', None)
if isinstance(progressDialog, QtWidgets.QWidget):
progressDialog.deleteLater()
if not 'data_to_send' in kwargs or not isinstance(kwargs['data_to_send'],
QtCore.QIODevice):
raise AttributeError("No valid open file handle")
# Close the source file
kwargs['data_to_send'].close()
# If another external callback function was provided, call it below
fcb = kwargs.pop('finishedCallback', None)
if callable(fcb):
fcb(reply, *args, **kwargs)
def __close_file_handles(self, *args, **kwargs):
""" Closes any open file handles after a failed transfer. Called by
__reply_finished when a HTTP response code indicating an error has been
received """
# When a download is failed, close the file handle stored in tmp_file
tmp_file = kwargs.pop('tmp_file', None)
if isinstance(tmp_file, QtCore.QIODevice):
tmp_file.close()
# File uploads are stored in data_to_send
data_to_send = kwargs.pop('data_to_send', None)
if isinstance(data_to_send, QtCore.QIODevice):
data_to_send.close()
# Other callbacks
def handle_login(self):
""" Handles the login event received after login. """
self.get_logged_in_user(self.set_logged_in_user)
def handle_logout(self):
""" Handles the logout event received after a logout. """
self.logged_in_user = {}
def set_logged_in_user(self, user_data):
""" Callback function, not to be called directly.
Locally saves the data of the currently logged_in user """
self.logged_in_user = json.loads(
safe_decode(user_data.readAll().data()))
# If user had any pending requests from previous login, execute them now
for (user_id, request) in self.pending_requests.values():
if user_id == self.logged_in_user['data']['id']:
request()
# Clear the pending actions queue, just to be sure.
self.clear_pending_requests()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
11361,
18,
17764,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
1195,
115... | 2.457857 | 18,532 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# pip install opentelemetry-api
# pip install opentelemetry-sdk
# pip install opentelemetry-exporter-otlp-proto-http
# pip install opentelemetry-instrumentation-wsgi
# python3 oteltest.py & disown
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.propagate import extract
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
import http.server
import json
resource = Resource(attributes={
"service.name": "python-otel-service"
})
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
otlp_exporter = OTLPSpanExporter(
endpoint="https://<YOUR>.live.dynatrace.com/api/v2/otlp/v1/traces",
headers={"Authorization" : "Api-Token <YOUR>"},
)
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
s = http.server.HTTPServer( ('', 9090), Handler )
s.serve_forever()
| [
2,
7347,
2721,
1034,
298,
11129,
41935,
12,
15042,
198,
2,
7347,
2721,
1034,
298,
11129,
41935,
12,
21282,
74,
198,
2,
7347,
2721,
1034,
298,
11129,
41935,
12,
1069,
26634,
12,
313,
34431,
12,
1676,
1462,
12,
4023,
198,
2,
7347,
272... | 2.848806 | 377 |
import logging
from src.Settings import WIKI_SETTINGS, SEARCH_SETTINGS, ASSISTANT_SETTINGS, PLAY_SETTINGS, OPEN_SETTINGS
from src.Actions.Open import open_page_or_file
from src.Actions.Play import play_youtube_video_for
from src.Actions.Search import search_web_for
from src.Actions.Wiki import open_wiki_results_for
from src.Actions.Catchall import do_catchall_action
from src.Tools.process_command import get_first_word_and_phrase_from, listen_for_commands, cut_wake_word_from_command
from src.Tools.wake_triggers import check_for_wake_word
def run_assistant():
"""
Listen for command;
Check if wake trigger was detected;
If so, perform action in command;
If not, log it and perform action in command anyway.
:return: None
"""
command = listen_for_commands().lower()
"""
Assistant checks the command to see if it has the wake word in it.
"""
wake_word_detected: bool = check_for_wake_word(command)
"""
By default, Assistant will check if the wake_word is detected and log the outcome.
This process happens inside the check_for_wake_word function above.
"""
if wake_word_detected:
command_without_wake_word = cut_wake_word_from_command(command)
perform_action(command_without_wake_word)
else:
if ASSISTANT_SETTINGS.get('Require Wake Word', False):
logging.error(f'Wake Word required by settings; action {command} will not be performed.')
else:
"""
Here, the wake word is not required, so it will proceed to perform the action.
This can be modified in the Settings file.
"""
logging.info(f'Wake Word not required by settings; continuing to perform action')
perform_action(command)
def perform_action(command_action):
"""
Process command and perform the corresponding action.
This is the core decision-making process behind the Assistant.
:param command_action: str
:return: None
"""
command_word, phrase = get_first_word_and_phrase_from(command_action)
"""
Get first word and phrase.
'phrase' has had helper words removed and whitespace stripped.
Run the determine_command_type function to check through the Settings to see if the command is in a list.
"""
chosen_action = determine_command_type(command_word)
if chosen_action == 'play':
"""
Play using pywhatkit.playonyt()
"""
logging.debug(f'Recognized {command_word} as Play; playing {phrase}')
play_youtube_video_for(phrase)
elif chosen_action == 'wiki':
"""
Read Wikipedia using wikipedia.summary()
Open web browser to Wikipedia website
"""
logging.debug(f'Recognized {command_word} as Wiki; searching Wiki for {phrase}')
open_wiki_results_for(phrase)
elif chosen_action == 'search':
"""
Search on Google using pywhatkit.search()
"""
logging.debug(f'Recognized {command_word} as Search; searching Google for {phrase}')
search_web_for(phrase)
elif chosen_action == 'open':
"""
Open various locations;
currently limited to e-mail only based on url in Settings
"""
logging.info(f'Recognized {command_word} as open; attempting to open {phrase[5:]}')
open_page_or_file(phrase)
else:
"""
Catchall Option:
When the first word is not recognized, Assistant goes here.
Currently set to Google Search by default.
"""
logging.info(f'Failed to recognize {command_word} (which is set to catchall by default); reverting to search')
"""
Below is the catchall action to do when the command is not recognized.
By default, it is set to search the web for the phrase.
"""
do_catchall_action(phrase, command_word)
def determine_command_type(command_word):
"""
Check Settings to get the list of available commands (or default to these lists if no settings found).
By default, the
:param command_word:
:return:
"""
default_play_commands = ['play']
default_wiki_commands = ['wiki', 'what', 'who']
default_search_commands = ['search', 'find', 'google']
default_open_commands = ['open']
chosen_action = 'catchall'
actions_list = [
PLAY_SETTINGS.get('Commands', default_play_commands),
WIKI_SETTINGS.get('Commands', default_wiki_commands),
SEARCH_SETTINGS.get('Commands', default_search_commands),
OPEN_SETTINGS.get('Commands', default_open_commands)]
"""
Actions list is a list of all of the options of command words that can trigger that particular action.
Iterate over the list of actions until you find a word that matches;
then set the chosen action to the first action in that list to return as the chosen action.
If the command matches nothing, it will stay as 'catchall' by default.
"""
for command_list in actions_list:
if command_word in command_list:
chosen_action = command_list[0]
break
return chosen_action
| [
11748,
18931,
198,
198,
6738,
12351,
13,
26232,
1330,
370,
18694,
40,
62,
28480,
51,
20754,
11,
7946,
31315,
62,
28480,
51,
20754,
11,
24994,
8808,
8643,
62,
28480,
51,
20754,
11,
28180,
62,
28480,
51,
20754,
11,
38303,
62,
28480,
51,... | 2.745348 | 1,881 |
from easyprocess import EasyProcess
from pyvirtualdisplay.smartdisplay import SmartDisplay
from discogui.imgutil import grab_no_blink
with SmartDisplay() as disp:
with EasyProcess(["zenity", "--entry"]) as p:
disp.waitgrab()
im = grab_no_blink()
im.save("blink.png")
| [
6738,
2562,
14681,
1330,
16789,
18709,
198,
6738,
12972,
32844,
13812,
13,
27004,
13812,
1330,
10880,
23114,
198,
198,
6738,
1221,
519,
9019,
13,
9600,
22602,
1330,
5552,
62,
3919,
62,
2436,
676,
198,
198,
4480,
10880,
23114,
3419,
355,
... | 2.7 | 110 |
import pytest
from sqlalchemy import create_engine
import db
from model import model
from model.model import *
from db import Session, sessionScope
from sqlalchemy.exc import IntegrityError
import bcrypt
import pdb
import inspect, os
@pytest.yield_fixture(scope="module")
@pytest.yield_fixture
| [
11748,
12972,
9288,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
11748,
20613,
198,
6738,
2746,
1330,
2746,
198,
6738,
2746,
13,
19849,
1330,
1635,
198,
6738,
20613,
1330,
23575,
11,
6246,
43642,
198,
6738,
44161,
282,
26599,... | 3.448276 | 87 |
from forms_api.fields.select_field import SelectField
from tests.fields.utils import FieldTestCase
| [
6738,
5107,
62,
15042,
13,
25747,
13,
19738,
62,
3245,
1330,
9683,
15878,
198,
6738,
5254,
13,
25747,
13,
26791,
1330,
7663,
14402,
20448,
628
] | 4 | 25 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt,cstr
| [
2,
15069,
357,
66,
8,
2211,
11,
39313,
27768,
21852,
18367,
83,
13,
12052,
13,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
5... | 3.634921 | 63 |
from . import tritam_tracking
from . import tritam_api_constant
from . import tritam_sms
| [
6738,
764,
1330,
491,
270,
321,
62,
36280,
198,
6738,
764,
1330,
491,
270,
321,
62,
15042,
62,
9979,
415,
198,
6738,
764,
1330,
491,
270,
321,
62,
82,
907,
198
] | 2.870968 | 31 |
import click
from pycman import config
import eyapm
from eyapm import transaction
@click.command('reinstall')
@click.pass_context
@click.option(
'--config-file', 'config_file',
default=eyapm.default.config_file)
@click.option(
'-y', '--yes', 'quiet', is_flag=True, default=False
)
@click.argument('pkgnames', nargs=-1, required=True)
| [
201,
198,
201,
198,
11748,
3904,
201,
198,
6738,
12972,
66,
805,
1330,
4566,
201,
198,
201,
198,
11748,
1926,
499,
76,
201,
198,
6738,
1926,
499,
76,
1330,
8611,
201,
198,
201,
198,
201,
198,
201,
198,
31,
12976,
13,
21812,
10786,
... | 2.440789 | 152 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 09:41:54 2017
@author: ozsanos
"""
from ModTest import Pi
print(Pi())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
2365,
1679,
7769,
25,
3901,
25,
4051,
2177,
201,
198,
201,
198,
31,
9800,
25,
15649,
12807,
418,
201,
198,
37811,
201,
198,
673... | 2.220339 | 59 |
import select
import socket
import threading
import wx
import wx.lib.pubsub
if __name__ == "__main__":
app = wx.App(False)
frame = MainFrame()
app.MainLoop()
| [
11748,
2922,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
266,
87,
198,
11748,
266,
87,
13,
8019,
13,
12984,
7266,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
598,
796,
266,
87,
13... | 2.606061 | 66 |
from django.shortcuts import render, redirect
from .models import video
from .forms import VideoForm
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
764,
27530,
1330,
2008,
198,
6738,
764,
23914,
1330,
7623,
8479,
198,
2,
13610,
534,
5009,
994,
13,
628
] | 4.129032 | 31 |
import abc
from functools import partial
import typing
from .tree import Node, LiteralNode, RuleNode, MultiNode
from ..validation.validity import Validity
from ..validation import group_types as gt
from ..validation.transform_validation import get_return_type
if typing.TYPE_CHECKING:
from .transform import LanguageTransformation # pylint: disable=unused-import
RuleName = str
MatchResult = typing.Tuple['_SmartText', Node]
RuleReferenceCache = typing.MutableMapping[
typing.Tuple[int, '_SmartText', int],
typing.Sequence[MatchResult]
]
T = typing.TypeVar('T')
class Language(object):
"""Collection of rules"""
@classmethod
class Rule(object):
"""Syntax rule
each transformation in the map must match the format of the syntax
"""
class Syntax(object):
"""Collection of Term lists
a syntax matches if one of its term lists matches the plaintext
"""
@classmethod
class TermGroup(object):
"""Collection of Terms"""
@classmethod
class Term(object, metaclass=abc.ABCMeta):
"""Unit of a syntax"""
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
class RuleReference(Term):
"""Term that represents a nested rule"""
class Literal(Term):
"""Term that represents a plaintext literal"""
| [
11748,
450,
66,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
19720,
198,
198,
6738,
764,
21048,
1330,
19081,
11,
25659,
1691,
19667,
11,
14330,
19667,
11,
15237,
19667,
198,
6738,
11485,
12102,
341,
13,
12102,
414,
1330,
3254,
... | 3.174641 | 418 |
# Generated by Django 3.1.6 on 2021-02-04 17:27
from django.db import migrations, models
import elearn.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2999,
12,
3023,
1596,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
198,
11748,
9766,
1501,
13,
27530,
628
] | 2.897436 | 39 |
from django.conf.urls import url, include
from quiz import views
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^$', views.welcome, name="welcome"),
url(r'^create/', views.create_user, name="create_user"),
url(r'^validate_login/', views.log_in, name="log_user"),
url(r'^update_result/', views.update_result, name="update_result"),
url(r'^test', views.get_data, name="getdata"),
url(r'^logout', views.log_out, name="log_out"),
url(r'^validate_username/', views.validate_username, name='validate_username'),
url(r'tops/', views.top10, name='tops'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
38964,
1330,
5009,
198,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
472,
353,
796,
41144,
13,
19463,
49,
39605,
3419,
628,
198,
6371,
33279,
82,
796... | 2.586873 | 259 |
'''
for- in 구문 연습
'''
# 피보나치 수열(fibonacci sequence)
# f[0] = 0, f[1] = 1
# f[n] = f[n-1] +f[n-2], n >=2
# 피보나치 수열 원소 20개 짜리 리스트를 생성
f = [0, 1]
for n in range(2,20):
f.append(f[n - 1] + f[n - 2])
print(f)
# 소수(prime number): 1과 자기자신으로 나누어지는 정수
# 2부터 10까지의 정수들 중에서 소수를 찾아서 출력
for n in range(2,11):
isprime = True
for divider in range(2,n):
if n % divider == 0:
print(f'{n} = {divider} x {n / divider}')
isprime = False
break
if isprime:
print(f'{n}은 소수!')
# for/ while 반복문과 else가 함께 사용되는 경우:
# 반복문이 break를 만나지 않고 범위 전체를 반복했을 때
# else 블록이 실행
# 반복문 중간에 break를 만나면 else는 실행되지 않음.
for i in range(5):
if i ==3:
break
print(i, end = ' ')
else:
print('모든 반복을 끝냄')
print()
# for-else 구문을 사용한 소수 찾기
for n in range(2,11):
for divider in range(2,n):
if n % divider == 0: # 약수가 존재 -> 소수가 아님
break
else: #break를 만나지 않았을 때 -> 약수가 없음 -> 소수
print(f'{n}은 소수') | [
7061,
6,
198,
1640,
12,
287,
220,
166,
113,
105,
167,
105,
116,
23821,
245,
108,
168,
232,
113,
198,
7061,
6,
198,
2,
220,
169,
242,
120,
167,
111,
112,
167,
224,
246,
168,
117,
246,
23821,
230,
246,
168,
245,
112,
7,
69,
571,... | 1.085746 | 898 |
#!/usr/bin/env python3
# coding:utf-8
'''
n + 100 = i^2, n + 268 = j^2
=> n -> i^2 -> j^2
i^2 - 100 <= 10000
=> i < 100.5
又 i 为整数
=> i 的上限为 100
同理,由 j^2 - 268 <= 10000
=> j 的上限为 101
其实,借鉴 004_04.py 可知 i 与 j 的上限分别为 83 与 84
算式 sqrt(n + 100) 与 sqrt(n + 268) 成立,且分别对应 i 与 j
=> n >= -100
=> sqrt(n + 100) >= 0,即 i >= 0
=> sqrt(n + 268) = sqrt(168)
又 j 为整数
=> j >= 13
=> i 的下限为 0,j 的下限为 13
'''
a, b = set(), set()
for i in range(84): # 0 <= i <= 83
a.add(i**2 - 100) # 集合 a 中的数 n 均满足 (n + 100) 为完全平方数
for j in range(13, 85): # 13 <= j <= 84
b.add(j**2 - 268) # 集合 b 中的数 n 均满足 (n + 268) 为完全平方数
c = list(a & b) # 将 a 与 b 的交集放入列表
c.sort() # 排序
print(c)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
40477,
12,
23,
198,
198,
7061,
6,
198,
77,
1343,
1802,
796,
1312,
61,
17,
11,
299,
1343,
36678,
796,
474,
61,
17,
198,
14804,
299,
220,
4613,
220,
1312,
61,
17,... | 1.291429 | 525 |
"""
@brief test log(time=93s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper.flog import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.pycode import ExtTestCase
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
31,
65,
3796,
220,
220,
220,
220,
220,
1332,
2604,
7,
2435,
28,
6052,
82,
8,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
12972,
24209,
2978,
525,
13,
6404,
2978,
525,
13,
... | 2.132075 | 265 |
from src.server import Quotes
from src.text import get_text
from static import consts
SERVER = Quotes.get_instance()
| [
6738,
12351,
13,
15388,
1330,
2264,
6421,
198,
6738,
12351,
13,
5239,
1330,
651,
62,
5239,
198,
198,
6738,
9037,
1330,
1500,
82,
198,
198,
35009,
5959,
796,
2264,
6421,
13,
1136,
62,
39098,
3419,
628
] | 3.333333 | 36 |
import argparse
import json
from tqdm import tqdm
from common.dataset.reader import JSONLineReader
from common.util.log_helper import LogHelper
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', help='/path/to/input/file')
parser.add_argument('output', help='/path/to/output/file')
args = parser.parse_args()
LogHelper.setup()
logger = LogHelper.get_logger("separate_scores")
jlr = JSONLineReader()
lines = jlr.read(args.input)
with open(args.output, 'w') as f:
for obj in tqdm(lines):
predicted_evidence = obj['predicted_evidence']
new_predicted_evidence = []
scores = []
for evidence in predicted_evidence:
new_predicted_evidence.append(evidence[0])
scores.append(evidence[1])
obj['predicted_evidence'] = new_predicted_evidence
obj['scores'] = scores
f.write(json.dumps(obj) + '\n')
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
2219,
13,
19608,
292,
316,
13,
46862,
1330,
19449,
13949,
33634,
198,
6738,
2219,
13,
22602,
13,
6404,
62,
2978,
525,
1330,
5... | 2.357995 | 419 |
if __name__=="__main__":
array= []
size = int(input())
for i in range(size) :
ele = int(input())
array.append(ele)
print("Original array : ",array)
print("Array with zero at the end of it: ",moveZero(array))
if __name__=="__main__":
size = int(input())
array1 = []
left = []
right = []
for i in range(size):
ele = int(input())
array1.append(ele)
print("Original array : ", array1)
print("Array with Even at the end of it: ",moveEven(array1))
if __name__=="__main__":
size = int(input())
array2 = []
nonprime = []
prime = []
for i in range(size):
ele = int(input())
array2.append(ele)
print("Original array : ", array2)
print("Array with Even at the end of it: ",movePrime(array2))
| [
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
7177,
28,
17635,
220,
201,
198,
220,
220,
220,
2546,
796,
493,
7,
15414,
28955,
220,
220,
201,
198,
220,
220,
220,
329,
1312,
287,
2837,
7,
7857,
8,
... | 2.071101 | 436 |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 04
Copyright (c) 2018, Vu Hoang Minh. All rights reserved.
@author: Vu Hoang Minh
@email: minh.vu@umu.se
@license: BSD 3-clause.
"""
import os
import ntpath
from unet3d.utils.print_utils import print_processing, print_section, print_separator
import numpy as np
from unet3d.utils.utils import str2bool
from brats.config import config_dict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
4280,
8702,
198,
15269,
357,
66,
8,
2864,
11,
35816,
9544,
648,
1855,
71,
13,
1439,
2489,
10395,
13,
198,
31,
9800,
25,
220,
35816,
9544... | 2.703226 | 155 |
import cx_Freeze
import sys
import matplotlib
base = None
if sys.platform == 'Win32':
base = 'Win32'
executables = [cx_Freeze.Executable("VasoTracker.py", base=base, icon="ICON.ICO")]
additional_mods = ['cv2','atexit','numpy.core._methods', 'numpy.lib.format', "matplotlib.backends.backend_tkagg"]
excludes = ["winpty"]
#buildOptions = dict(include_files = ['SampleData/']) #folder,relative path. Use tuple like in the single file to set a absolute path.
cx_Freeze.setup(
name = "VasoTracker",
options = {"build_exe": {"excludes": excludes,'includes': additional_mods,
"packages":['skimage',"tkFileDialog","scipy","cv2","Tkinter", "matplotlib", "Queue"],
"include_files":["ICON.ICO", 'SampleData/', 'Results/']}},
version = "1.0.1",
description = "Vasotracker Diameter Tracking",
executables = executables )
| [
11748,
43213,
62,
11146,
2736,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
198,
198,
8692,
796,
6045,
198,
198,
361,
25064,
13,
24254,
6624,
705,
16643,
2624,
10354,
198,
220,
220,
220,
2779,
796,
705,
16643,
2624,
6,
198,
198,
... | 2.487603 | 363 |
from django.contrib import admin
from cardapio.models import Product, Category
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
2657,
499,
952,
13,
27530,
1330,
8721,
11,
21743,
198,
198,
28482,
13,
15654,
13,
30238,
7,
15667,
11,
8721,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27313,
11,
21743,
... | 3.795455 | 44 |
"""
This module contains a task for starting and monitoring Fivetran connector sync jobs
"""
| [
37811,
198,
1212,
8265,
4909,
257,
4876,
329,
3599,
290,
9904,
10579,
2213,
272,
21716,
17510,
3946,
198,
37811,
198
] | 4.65 | 20 |
import unittest
from pax import core, datastructure
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
279,
897,
1330,
4755,
11,
4818,
459,
5620,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.641026 | 39 |
import requests
import json
import time
# urls
independent_reserve_url = "https://api.independentreserve.com/Public/GetMarketSummary?primaryCurrencyCode=eth&secondaryCurrencyCode=aud"
acx_url = "https://acx.io//api/v2/tickers/ethaud.json"
btcm_url = "https://api.btcmarkets.net/market/ETH/AUD/tick"
coinspot_url = "https://www.coinspot.com.au/sell/eth/rate/aud"#"https://www.coinspot.com.au/sell/eth"
coinbase_url = "https://api.coinbase.com/v2/exchange-rates?currency=ETH"
if __name__ == "__main__":
print time.strftime("%H:%M:%S")
get_sell_prices()
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
640,
198,
198,
2,
2956,
7278,
198,
34750,
62,
411,
3760,
62,
6371,
796,
366,
5450,
1378,
15042,
13,
34750,
411,
3760,
13,
785,
14,
15202,
14,
3855,
27470,
22093,
30,
39754,
34,
13382,
10669,... | 2.583333 | 216 |
import os
| [
11748,
28686,
198
] | 3.333333 | 3 |
# -*- coding: utf-8 -*-
from ....Methods.Slot.Slot import SlotCheckError
from ....Methods.Slot.SlotW26 import *
def check(self):
"""Check that the SlotW26 object is correct
Parameters
----------
self : SlotW26
A SlotW26 object
Returns
-------
None
Raises
-------
S26_WCheckError
You must have W0 < 2*R1
"""
if self.R1 * 2 <= self.W0:
raise S26_WCheckError("You must have W0 < 2*R1")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
19424,
46202,
13,
38963,
13,
38963,
1330,
32026,
9787,
12331,
198,
6738,
19424,
46202,
13,
38963,
13,
38963,
54,
2075,
1330,
1635,
628,
198,
4299,
2198,
7,
94... | 2.352041 | 196 |
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utilities.
This module provides misc. utility functions for apps and the Falcon
framework itself. These functions are hoisted into the front-door
`falcon` module for convenience::
import falcon
now = falcon.http_now()
"""
import datetime
import functools
import inspect
import warnings
import six
from falcon import status_codes
__all__ = (
'deprecated',
'http_now',
'dt_to_http',
'http_date_to_dt',
'to_query_str',
'get_bound_method',
'get_argnames',
'get_http_status'
)
# PERF(kgriffs): Avoid superfluous namespace lookups
strptime = datetime.datetime.strptime
utcnow = datetime.datetime.utcnow
# NOTE(kgriffs): We don't want our deprecations to be ignored by default,
# so create our own type.
#
# TODO(kgriffs): Revisit this decision if users complain.
def deprecated(instructions):
"""Flags a method as deprecated.
This function returns a decorator which can be used to mark deprecated
functions. Applying this decorator will result in a warning being
emitted when the function is used.
Args:
instructions (str): Specific guidance for the developer, e.g.:
'Please migrate to add_proxy(...)''
"""
return decorator
def http_now():
"""Returns the current UTC time as an IMF-fixdate.
Returns:
str: The current UTC time as an IMF-fixdate,
e.g., 'Tue, 15 Nov 1994 12:45:26 GMT'.
"""
return dt_to_http(utcnow())
def dt_to_http(dt):
"""Converts a ``datetime`` instance to an HTTP date string.
Args:
dt (datetime): A ``datetime`` instance to convert, assumed to be UTC.
Returns:
str: An RFC 1123 date string, e.g.: "Tue, 15 Nov 1994 12:45:26 GMT".
"""
# Tue, 15 Nov 1994 12:45:26 GMT
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
def http_date_to_dt(http_date, obs_date=False):
"""Converts an HTTP date string to a datetime instance.
Args:
http_date (str): An RFC 1123 date string, e.g.:
"Tue, 15 Nov 1994 12:45:26 GMT".
Keyword Arguments:
obs_date (bool): Support obs-date formats according to
RFC 7231, e.g.:
"Sunday, 06-Nov-94 08:49:37 GMT" (default ``False``).
Returns:
datetime: A UTC datetime instance corresponding to the given
HTTP date.
Raises:
ValueError: http_date doesn't match any of the available time formats
"""
if not obs_date:
# PERF(kgriffs): This violates DRY, but we do it anyway
# to avoid the overhead of setting up a tuple, looping
# over it, and setting up exception handling blocks each
# time around the loop, in the case that we don't actually
# need to check for multiple formats.
return strptime(http_date, '%a, %d %b %Y %H:%M:%S %Z')
time_formats = (
'%a, %d %b %Y %H:%M:%S %Z',
'%a, %d-%b-%Y %H:%M:%S %Z',
'%A, %d-%b-%y %H:%M:%S %Z',
'%a %b %d %H:%M:%S %Y',
)
# Loop through the formats and return the first that matches
for time_format in time_formats:
try:
return strptime(http_date, time_format)
except ValueError:
continue
# Did not match any formats
raise ValueError('time data %r does not match known formats' % http_date)
def to_query_str(params, comma_delimited_lists=True, prefix=True):
"""Converts a dictionary of parameters to a query string.
Args:
params (dict): A dictionary of parameters, where each key is
a parameter name, and each value is either a ``str`` or
something that can be converted into a ``str``, or a
list of such values. If a ``list``, the value will be
converted to a comma-delimited string of values
(e.g., 'thing=1,2,3').
comma_delimited_lists (bool): Set to ``False`` to encode list
values by specifying multiple instances of the parameter
(e.g., 'thing=1&thing=2&thing=3'). Otherwise, parameters
will be encoded as comma-separated values (e.g.,
'thing=1,2,3'). Defaults to ``True``.
prefix (bool): Set to ``False`` to exclude the '?' prefix
in the result string (default ``True``).
Returns:
str: A URI query string, including the '?' prefix (unless
`prefix` is ``False``), or an empty string if no params are
given (the ``dict`` is empty).
"""
if not params:
return ''
# PERF: This is faster than a list comprehension and join, mainly
# because it allows us to inline the value transform.
query_str = '?' if prefix else ''
for k, v in params.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
elif isinstance(v, list):
if comma_delimited_lists:
v = ','.join(map(str, v))
else:
for list_value in v:
if list_value is True:
list_value = 'true'
elif list_value is False:
list_value = 'false'
else:
list_value = str(list_value)
query_str += k + '=' + list_value + '&'
continue
else:
v = str(v)
query_str += k + '=' + v + '&'
return query_str[:-1]
def get_bound_method(obj, method_name):
"""Get a bound method of the given object by name.
Args:
obj: Object on which to look up the method.
method_name: Name of the method to retrieve.
Returns:
Bound method, or ``None`` if the method does not exist on
the object.
Raises:
AttributeError: The method exists, but it isn't
bound (most likely a class was passed, rather than
an instance of that class).
"""
method = getattr(obj, method_name, None)
if method is not None:
# NOTE(kgriffs): Ensure it is a bound method
if six.get_method_self(method) is None:
# NOTE(kgriffs): In Python 3 this code is unreachable
# because the above will raise AttributeError on its
# own.
msg = '{0} must be a bound method'.format(method)
raise AttributeError(msg)
return method
def _get_func_if_nested(callable):
"""Returns the function object of a given callable."""
if isinstance(callable, functools.partial):
return callable.func
if inspect.isroutine(callable):
return callable
return callable.__call__
def _get_argspec(func):
"""Returns an inspect.ArgSpec instance given a function object.
We prefer this implementation rather than the inspect module's getargspec
since the latter has a strict check that the passed function is an instance
of FunctionType. Cython functions do not pass this check, but they do implement
the `func_code` and `func_defaults` attributes that we need to produce an Argspec.
This implementation re-uses much of inspect.getargspec but removes the strict
check allowing interface failures to be raised as AttributeError.
(See also: https://github.com/python/cpython/blob/2.7/Lib/inspect.py)
"""
if inspect.ismethod(func):
func = func.im_func
args, varargs, varkw = inspect.getargs(func.func_code)
return inspect.ArgSpec(args, varargs, varkw, func.func_defaults)
def get_argnames(func):
"""Introspecs the arguments of a callable.
Args:
func: The callable to introspect
Returns:
A list of argument names, excluding *arg and **kwargs
arguments.
"""
if six.PY2:
func_object = _get_func_if_nested(func)
spec = _get_argspec(func_object)
# NOTE(kgriffs): inspect.signature does not include 'self',
# so remove it under PY2 if it is present.
args = [arg for arg in spec.args if arg != 'self']
else:
sig = inspect.signature(func)
args = [
param.name
for param in sig.parameters.values()
if param.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
]
return args
def get_http_status(status_code, default_reason='Unknown'):
"""Gets both the http status code and description from just a code
Args:
status_code: integer or string that can be converted to an integer
default_reason: default text to be appended to the status_code
if the lookup does not find a result
Returns:
str: status code e.g. "404 Not Found"
Raises:
ValueError: the value entered could not be converted to an integer
"""
# sanitize inputs
try:
code = float(status_code) # float can validate values like "401.1"
code = int(code) # converting to int removes the decimal places
if code < 100:
raise ValueError
except ValueError:
raise ValueError('get_http_status failed: "%s" is not a '
'valid status code', status_code)
# lookup the status code
try:
return getattr(status_codes, 'HTTP_' + str(code))
except AttributeError:
# not found
return str(code) + ' ' + default_reason
| [
2,
15069,
2211,
416,
37927,
13200,
14504,
278,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
1... | 2.503413 | 3,955 |
from random import choice, sample
import numpy as np
from discord.ext.commands import Cog, Context, command
from xythrion.bot import Xythrion
from xythrion.utils import DefaultEmbed
class Randoms(Cog):
"""Picking a bunch of different things at random (games based on random chance)."""
@command(aliases=("roll",))
async def dice(self, ctx: Context, rolls: int = 1) -> None:
"""Rolls a die anywhere between 1 and 100."""
if 1 < rolls < 100:
s = round(np.sum(sample(range(1, 6), rolls)) / rolls, 3)
msg = f"Die was rolled {rolls} time(s). Average output: {s}"
else:
msg = "Integer gives for rolls is invalid."
embed = DefaultEmbed(ctx, description=msg)
await ctx.send(embed=embed)
@command(aliases=("pick",))
async def choose(self, ctx: Context, *choices) -> None:
"""Returns only one of the items that the user gives."""
embed = DefaultEmbed(ctx, description=f"A choice was made. Fate landed on {choice(choices)}.")
await ctx.send(embed=embed)
| [
6738,
4738,
1330,
3572,
11,
6291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
327,
519,
11,
30532,
11,
3141,
198,
198,
6738,
2124,
5272,
81,
295,
13,
13645,
1330,
1395,
5272,
81,
295,
1... | 2.63145 | 407 |
import argparse
import csv
import os
import sys
import traceback
from pynus.modes import classes, forums
from pynus.utils import progress, webbrowser
VERSION = 'v0.2.2'
BUILD = '20210524'
# Check for positive integer in argument
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
279,
2047,
385,
13,
76,
4147,
1330,
6097,
11,
14216,
198,
6738,
279,
2047,
385,
13,
26791,
1330,
4371,
11,
3992,
40259... | 2.915789 | 95 |
n = float(input('massa da substancia em gramas: '))
meiavida = n
tempo = 0
while meiavida >= 0.05:
meiavida *= 0.5
tempo +=50
print('para q a substancia de massa {}g atinja 0.05g , levou {:.0f}s'.format(n,tempo)) | [
77,
796,
12178,
7,
15414,
10786,
22208,
64,
12379,
3293,
1192,
544,
795,
14599,
292,
25,
705,
4008,
198,
1326,
544,
85,
3755,
796,
299,
198,
11498,
7501,
796,
657,
198,
4514,
502,
544,
85,
3755,
18189,
657,
13,
2713,
25,
198,
220,
... | 2.2 | 100 |
from ..db_interface.read import ReadFromDatabase as ReadDb
from ..db_interface.write import WriteToDatabase as WriteDb
import os
__all__ = ["ReadDb", "WriteDb"]
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
__congress__ = os.path.join(__location__, "congress.pickle")
| [
6738,
11485,
9945,
62,
39994,
13,
961,
1330,
4149,
4863,
38105,
355,
4149,
43832,
198,
6738,
11485,
9945,
62,
39994,
13,
13564,
1330,
19430,
2514,
38105,
355,
19430,
43832,
198,
11748,
28686,
198,
198,
834,
439,
834,
796,
14631,
5569,
4... | 2.855856 | 111 |
list1 = []
n = int(input("Enter number of elements :"))
for i in range(0,n):
ele = int(input())
if ele > 0:
list1.append(ele)
print("The positive numbers are: ",list1, end='')
| [
4868,
16,
796,
17635,
220,
628,
198,
77,
796,
493,
7,
15414,
7203,
17469,
1271,
286,
4847,
1058,
48774,
628,
198,
198,
1640,
1312,
287,
2837,
7,
15,
11,
77,
2599,
198,
220,
9766,
796,
493,
7,
15414,
28955,
198,
220,
611,
9766,
187... | 2.567568 | 74 |
from mesh import Mesh1D
from mesh import QuadMesh
from fem import DofHandler
from function import Function
from function import Nodal
from fem import QuadFE
from assembler import Kernel
from assembler import Form
from fem import Basis
from assembler import Assembler
from solver import LinearSystem
from solver import LS
from plot import Plot
import numpy as np
from mesh import HalfEdge
import matplotlib.pyplot as plt
from scipy import linalg
from sksparse.cholmod import cholesky, cholesky_AAt, Factor
from sklearn.datasets import make_sparse_spd_matrix
import scipy.sparse as sp
from gmrf import modchol_ldlt
from gmrf import KLField
from gmrf import CovKernel
import TasmanianSG
'''
# Eigenvectors
oort = 1/np.sqrt(2)
V = np.array([[0.5, oort, 0, 0.5],
[0.5, 0, -oort, -0.5],
[0.5, -oort, 0, 0.5],
[0.5, 0, oort, -0.5]])
# Eigenvalues
d = np.array([4,3,2,0], dtype=float)
Lmd = np.diag(d)
# Covariance matrix
K = V.dot(Lmd.dot(V.T))
# Transformation
A = np.array([[1,2,3,4],
[2,4,6,8]], dtype=float)
# Nullspace of covariance
Vn = V[:,3][:,None]
for v in A:
u = v - Vn.dot(Vn.T.dot(v))
if not np.allclose(u,0):
u = u/linalg.norm(u)
Vn = np.append(Vn, u[:,None], axis=1)
Q,R = linalg.qr(A.T, mode='economic')
print(Q)
print(R)
Q,R = linalg.qr_insert(Vn,Lmdn,A[0,:], -1, which='col')
#Q,R = linalg.qr_insert(Q,R,A[1,:], -1, which='col')
Q.T.dot(A[1,:].T)
pp = A[1,:] - Q.dot(Q.T.dot(A[1,:]))
print(pp)
print('R\n',R)
print(A.dot(V))
Q,R = linalg.qr(A, mode='economic')
r = np.diag(R)
print(len(r[np.abs(r)>1e-13]))
print(Q,'\n',R)
'''
print("TasmanianSG version: {0:s}".format(TasmanianSG.__version__))
print("TasmanianSG license: {0:s}".format(TasmanianSG.__license__))
mesh = Mesh1D(resolution=(2,))
element = QuadFE(mesh.dim(),'Q1')
dofhandler = DofHandler(mesh, element)
phi_x = Basis(dofhandler, 'ux')
problems = [Form(1, test=phi_x, trial=phi_x)]
assembler = Assembler(problems, mesh)
assembler.assemble()
A = assembler.af[0]['bilinear'].get_matrix()
n = dofhandler.n_dofs()
b = np.ones((n,1))
mesh.mark_region('left',lambda x: np.abs(x)<1e-9)
mesh.mark_region('right',lambda x: np.abs(1-x)<1e-9)
print('A before constraint', A.toarray())
system = LS(phi_x)
system.add_dirichlet_constraint('left',1)
system.add_dirichlet_constraint('right',0)
system.set_matrix(sp.csr_matrix(A, copy=True))
system.set_rhs(b)
system.solve_system()
print('A after constraint\n', system.get_matrix().toarray())
print('column records\n', system.column_records)
print('rhs after constraint\n', system.get_rhs().toarray())
y = system.get_solution()
plot = Plot()
plot.line(y)
b = np.zeros((n,1))
system.set_matrix(sp.csr_matrix(A, copy=True))
system.set_rhs(b)
system.solve_system()
print('column records\n')
print([c.toarray() for c in system.column_records])
print('rhs after constraint\n', system.get_rhs().toarray())
y = system.get_solution()
plot = Plot()
plot.line(y)
| [
6738,
19609,
1330,
47529,
16,
35,
198,
6738,
19609,
1330,
20648,
37031,
198,
6738,
2796,
1330,
360,
1659,
25060,
198,
6738,
2163,
1330,
15553,
198,
6738,
2163,
1330,
399,
375,
282,
198,
6738,
2796,
1330,
20648,
15112,
198,
6738,
11156,
... | 2.229385 | 1,334 |
from dnnsvg.layers.layer import Layer
from dnnsvg.layers.fully_connected import FullyConnected
from dnnsvg.svgeables import Tensor3D
from dnnsvg.svgeables import Line
from dnnsvg.svgeables import Text
from dnnsvg.svgeables import ArcArrow
import dnnsvg.svgeables
import math
| [
6738,
288,
77,
5907,
45119,
13,
75,
6962,
13,
29289,
1330,
34398,
198,
6738,
288,
77,
5907,
45119,
13,
75,
6962,
13,
2759,
62,
15236,
1330,
40234,
13313,
276,
198,
6738,
288,
77,
5907,
45119,
13,
21370,
469,
2977,
1330,
309,
22854,
... | 2.875 | 96 |
from .base import *
SECRET_KEY = env('DJANGO_SECRET_KEY', default='0j=^hh2ehem36vg7l%j7g^1)d(h=s2na2#z6=812l^95+%wify')
DEBUG = env.bool('DJANGO_DEBUG', default=True)
| [
6738,
764,
8692,
1330,
1635,
198,
198,
23683,
26087,
62,
20373,
796,
17365,
10786,
35028,
1565,
11230,
62,
23683,
26087,
62,
20373,
3256,
4277,
11639,
15,
73,
28,
61,
12337,
17,
68,
4411,
2623,
45119,
22,
75,
4,
73,
22,
70,
61,
16,
... | 1.988372 | 86 |
from kivy.uix.label import Label
from kivy.uix.image import AsyncImage
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.graphics import Color, Rectangle
#from kivy.loader import Loader
#Loader.loading_image = ''
import sys
import math
import urllib.parse
from .PresentationRenderer import presentation_renderer
from .Action import Action
| [
6738,
479,
452,
88,
13,
84,
844,
13,
18242,
1330,
36052,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
9060,
1330,
1081,
13361,
5159,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
42655,
1330,
370,
17484,
198,
6738,
479,
452,
88,
13,
84... | 3.102941 | 136 |
import numpy as np
import matplotlib.pyplot as plt
greyhounds = 500 # dog type
labs = 500 # dog type
grey_height = 28 + 4*np.random.randn(greyhounds) # average height plus random
lab_height = 24 + 4*np.random.randn(labs) # average height plus random
# vis plot will show us overlapping in the middle with about 50/50 probability
vis_out = plt.hist([grey_height, lab_height], stacked=True, color=['r', 'b'])
plt.show(vis_out) | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
49502,
71,
3733,
796,
5323,
220,
1303,
3290,
2099,
198,
75,
8937,
796,
5323,
220,
1303,
3290,
2099,
198,
198,
49502,
62,
17015,
796... | 2.931973 | 147 |
from .learn import RepairModel
__all__ = ['RepairModel']
| [
6738,
764,
35720,
1330,
28912,
17633,
198,
198,
834,
439,
834,
796,
37250,
6207,
958,
17633,
20520,
198
] | 3.222222 | 18 |
'''
Created on Jan 14, 2012
@author: ntd
'''
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
from sqlite3 import dbapi2 as sqlite
ignorewords=set(['the', 'of', 'to', 'and', 'a', 'in', 'is', 'it'])
# get entry-id: the first column content
# index page(url)
# get text (no tag) only from HTML content
# split words
# check if a page(url) is indexed or not
# add link between 2 pages(urls)
# crawling by BFS(depth = 2 by default) from list of page(pages)
# make score value to be in [0,1]
# below scores = {(url, score)}
if __name__ == '__main__':
'''
test_crawler = crawler('searchindex.db')
test_crawler.createindextables()
pages = ['http://www.titech.ac.jp']
test_crawler.crawl(pages)
'''
test_searcher = searcher('searchindex.db')
test_searcher.calculatepagerank()
test_searcher.query('schedule 2012') | [
7061,
6,
198,
41972,
319,
2365,
1478,
11,
2321,
198,
31,
9800,
25,
299,
8671,
198,
7061,
6,
198,
198,
11748,
2956,
297,
571,
17,
198,
6738,
23762,
50,
10486,
1330,
1635,
198,
6738,
19016,
29572,
1330,
19016,
22179,
198,
6738,
44161,
... | 2.324519 | 416 |
while True:
try:
EPR = 0
EHD = 0
INTRUSOS = 0
for i in range(int(input())):
a = input().split()
c = a[1]
if c == "EPR":
EPR += 1
elif c == "EHD":
EHD += 1
else:
INTRUSOS += 1
print("EPR:",EPR)
print("EHD:",EHD)
print("INTRUSOS:",INTRUSOS)
except EOFError:
break
| [
4514,
6407,
25,
201,
198,
220,
220,
220,
1949,
25,
201,
198,
220,
220,
220,
220,
220,
220,
220,
412,
4805,
796,
657,
201,
198,
220,
220,
220,
220,
220,
220,
220,
412,
10227,
796,
657,
201,
198,
220,
220,
220,
220,
220,
220,
220,... | 1.448378 | 339 |
"""
Test of importing a model from PySCes systems biology tool. Requires PySCes to have been installed.
R. Clewley, 2012
"""
from PyDSTool import *
from PyDSTool.Toolbox.PySCes_SBML import *
print("Modify the path variable to indicate where your PySCes models are...")
path = '/pysces/pscmodels/'
#fname = 'pysces_test_linear1.psc'
fname = 'pysces_test_branch1.psc'
#fname = 'pysces_test_pitcon.psc'
gen = get_pysces_model(path+fname, 'Vode')
gen.set(tdata=[0,10])
gen.set(algparams={'init_step': 0.03})
traj=gen.compute('test')
pts=traj.sample()
for x in pts.coordnames:
plot(pts['t'],pts[x])
| [
37811,
198,
14402,
286,
33332,
257,
2746,
422,
9485,
6173,
274,
3341,
17219,
2891,
13,
26848,
9485,
6173,
274,
284,
423,
587,
6589,
13,
198,
198,
49,
13,
3779,
86,
1636,
11,
2321,
198,
37811,
198,
6738,
9485,
35,
2257,
970,
1330,
16... | 2.493776 | 241 |
from typing import Union
from boa3.builtin import public
@public
| [
6738,
19720,
1330,
4479,
198,
198,
6738,
1489,
64,
18,
13,
18780,
259,
1330,
1171,
628,
198,
31,
11377,
198
] | 3.4 | 20 |
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
15069,
1584,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
220,
220,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
341... | 3.47343 | 207 |
import numpy as np
def linear_divide(x, a, b, c):
"""
Find the best two dividing point for a linear array made up with three different characters.
"""
n = len(x)
ab_penalty = (np.append(0, (np.array(x) == b).cumsum()) +
np.append((np.array(x) == a)[::-1].cumsum()[::-1], 0))
bc_penalty = (np.append(0, (np.array(x) == c).cumsum()) +
np.append((np.array(x) == b)[::-1].cumsum()[::-1], 0))
return (ab_penalty.argmin(), n - bc_penalty[::-1].argmin()), ab_penalty.min() + bc_penalty.min()
def circular_divide(x, a, b, c):
"""
Find the best three dividing point for a circular array made up with three different characters.
"""
n = len(x)
best_loc = None
best_penalty = float("inf")
for i in range(n):
loc, penalty = linear_divide(np.append(x[i:], x[:i]), a, b, c)
if loc[1] > loc[0] and penalty < best_penalty:
best_penalty = penalty
best_loc = (i, (loc[0] + i) % n, (loc[1] + i) % n)
return best_loc, best_penalty
def circular_divide2(x, a, b, c):
"""
Find the best three dividing point for a circular array made up with three different characters,
in either direction.
"""
n = len(x)
best_loc, best_penalty = circular_divide(x, a, b, c)
reverse_best_loc, reverse_best_penalty = circular_divide(x[::-1], a, b, c)
res = np.array([' ' * max(len(a), len(b), len(c))] * n)
if best_penalty <= reverse_best_penalty:
values_assigned = (a, b, c)
else:
best_loc = [n - i for i in reverse_best_loc[::-1]]
best_penalty = reverse_best_penalty
values_assigned = (b, a, c)
for i in range(3):
wrap_around_assign(res, best_loc[i], best_loc[(i + 1) % 3], values_assigned[i])
return res
def refine_labels(pseudotime, original_labels):
"""
Refine labels using pseudotime.
"""
if len(pseudotime) != len(original_labels):
raise ValueError("Lengths of pseudotime and labels must be the same.")
unique_labels = sorted(np.unique(original_labels))
if len(unique_labels) != 3:
raise ValueError("Only supports 3 classes.")
order = np.argsort(pseudotime)
refined_labels = np.array([' ' * max(len(i) for i in unique_labels)] * len(pseudotime))
refined_labels[order] = circular_divide2(np.array(original_labels)[order], *unique_labels)
return refined_labels | [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
14174,
62,
7146,
485,
7,
87,
11,
257,
11,
275,
11,
269,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
9938,
262,
1266,
734,
27241,
966,
329,
257,
14174,
7177,
925,
510,
351,
11... | 2.294787 | 1,055 |
from flask import current_app as app
from flask import redirect
from flask import render_template as rt
from flask import request, send_from_directory, url_for
from htmlmin.main import minify
@app.route("/")
def create():
"""View to create a secret."""
return rt("create.html")
@app.route("/c")
def created():
"""View to see the link for the created secret."""
link, expires_on = request.args.get("link"), request.args.get("expires_on")
if not link or not expires_on:
return redirect(url_for("create"))
return rt("created.html", link=link, expires_on=expires_on)
@app.route("/r/<slug>")
def read(slug):
"""View to read a secret."""
return rt("read.html", slug=slug)
@app.errorhandler(404)
def not_found(error):
"""404 handler."""
return rt("404.html", error=error), 404
@app.route("/robots.txt")
def robots():
"""Robots handler."""
return send_from_directory(app.static_folder, request.path[1:])
@app.after_request
def html_minify(response):
"""Minify html responses."""
if response.content_type == u"text/html; charset=utf-8":
response.set_data(minify(response.get_data(as_text=True)))
return response
| [
6738,
42903,
1330,
1459,
62,
1324,
355,
598,
198,
6738,
42903,
1330,
18941,
198,
6738,
42903,
1330,
8543,
62,
28243,
355,
374,
83,
198,
6738,
42903,
1330,
2581,
11,
3758,
62,
6738,
62,
34945,
11,
19016,
62,
1640,
198,
198,
6738,
27711... | 2.752887 | 433 |
# Generated by Django 2.0.9 on 2018-11-25 15:36
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
24,
319,
2864,
12,
1157,
12,
1495,
1315,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# -*- coding: utf-8 -*-
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this file contain modifications of the original code, in hope new developers,
can have better understanding how gpt2 Language Modeling is constructed from scratch.
"""
"""PyTorch OpenAI GPT-2 model."""
import sys
import os
import math
import random
import numpy as np
import pickle
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class GPT2Config(nn.Module):
"""Configuration class to store the configuration of a `GPT2Model`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_positions=1024, #1024, 512 768
n_ctx=1024, #1024, 512 768
n_embd=600, #768, 256, 512, 1024 --> make sure n_embd%n_head==0
n_layer=12, # 6, 32
n_head=10, #12, 8, 16
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-9,
initializer_range=0.02,
num_labels=1,
summary_type='cls_index',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(GPT2Config, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
###
self.output_attentions = False
self.output_hidden_states = False
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
@property
@property
@property
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class GPT2Model(nn.Module):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
"""
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
class GPT2LMHeadModel(nn.Module):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
"""
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
first_module.weight = second_module.weight
if hasattr(first_module, 'bias') and first_module.bias is not None:
first_module.bias.data = torch.nn.functional.pad(
first_module.bias.data,
(0, first_module.weight.shape[0] - first_module.bias.shape[0]),
'constant',
0
) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
383,
4946,
20185,
4816,
46665,
290,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
15069,
357,
66,
8,
2864,
11,
15127,
23929,
44680,
6234,
13,
220,
14... | 2.436757 | 5,218 |
show_rate_scale()
my_wine = Wine("X", "X", "X", "X", "X", 9, 8, 10, 9, 9, 9, 10, 10, "X")
print(round(my_wine.get_rating()))
| [
201,
198,
197,
201,
198,
12860,
62,
4873,
62,
9888,
3419,
201,
198,
201,
198,
1820,
62,
39002,
796,
20447,
7203,
55,
1600,
366,
55,
1600,
366,
55,
1600,
366,
55,
1600,
366,
55,
1600,
860,
11,
807,
11,
838,
11,
860,
11,
860,
11,
... | 1.901408 | 71 |
### ybyra v0.2
###
### simple exploratory Y-chromosome caller
###
### TP 06/21 (thomaz.pinotti AT gmail.com)
BAM = [line.rstrip() for line in open("ysamples.bam")] # list bam files
SITES = "/path/to/SNPindex/10Mb_callable/SNPindex_all-10Mb.angsd" # change as desired
MIN_DEPTH = "1" # minimum read depth at a region to call a variant
TRIM = "0" # number of bases to trim at read termini
MIN_MAPQ = "30" # minimum mapQ
MIN_BQ = "20" # minimum baseQ
ISOGG_NAME = "/path/to/SNPindex/SNP-names-hap.pos"
rule all:
input:
"no_hits.list"
rule angsd:
input:
"/path/to/{bam}.bam"
output:
mafs = temp("{bam}.mafs.gz"),
arg = temp("{bam}.arg"),
params:
sites = SITES,
mindepth = MIN_DEPTH,
trim = TRIM,
minmapq = MIN_MAPQ,
minbq = MIN_BQ,
outname = "{bam}",
shell:
"angsd -sites {params.sites} -DoMajorMinor 3 -doMaf 8 -doCounts 1 -trim {params.trim} -setMinDepthInd {params.mindepth} -minMapQ {params.minmapq} -minQ {params.minbq} -out {params.outname} -i {input} -r Y: "
rule filter:
input:
"{bam}.mafs.gz"
output:
temp("{bam}.pos")
shell:
"""zless -S {input} | awk '$5>0.9' | cut -f 2 > {output}"""
rule panel_match:
input:
"{bam}.pos"
output:
"{bam}.hits"
params:
isogg = ISOGG_NAME,
shell:
"""
awk 'NR==FNR{{a[$1];next}} ($1) in a' {input} {params.isogg} |
awk '{{print ($(NF-2) " "$0)}}'|
sort -k 1,1 -k 2,2 -u > {output}
"""
rule no_hits:
input:
expand("{bam}.hits", bam=BAM)
output:
"no_hits.list"
shell:
"""echo "List of bam files with 0 hits in used SNP index/filters:" > no_hits.list ;"""
"find *hits -size 0 -print >> no_hits.list ;"
"find *hits -size 0 -delete" | [
21017,
197,
88,
1525,
430,
410,
15,
13,
17,
198,
21017,
198,
21017,
2829,
39180,
2870,
575,
12,
28663,
418,
462,
24955,
198,
21017,
198,
21017,
24525,
9130,
14,
2481,
357,
400,
296,
1031,
13,
11635,
26380,
5161,
308,
4529,
13,
785,
... | 2.12987 | 770 |
import inspect
class Event(object):
"""
A simple implementation of the Observer-Pattern.
The user can specify an event signature upon inizializazion,
defined by kwargs in the form of argumentname=class (e.g. id=int).
The arguments' types are not checked in this implementation though.
Callables with a fitting signature can be added with += or removed with -=.
All listeners can be notified by calling the EventHook class with fitting
arguments.
>>> event = EventHook(id=int, data=dict)
>>> event += lambda id, data: print("%d %s" % (id, data))
>>> event(id=5, data={"foo": "bar"})
5 {'foo': 'bar'}
>>> event = EventHook(id=int)
>>> event += lambda wrong_name: None
Traceback (most recent call last):
...
ValueError: Listener must have these arguments: (id=int)
>>> event = EventHook(id=int)
>>> event += lambda id: None
>>> event(wrong_name=0)
Traceback (most recent call last):
...
ValueError: This EventHook must be called with these arguments: (id=int)
"""
| [
11748,
10104,
628,
198,
4871,
8558,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
2829,
7822,
286,
262,
27058,
12,
47546,
13,
198,
220,
220,
220,
383,
2836,
460,
11986,
281,
1785,
9877,
2402,
287,
528,
498,
528... | 2.923497 | 366 |
from framework import *
root.title("Rotated text")
for rot in range(0, 360, 45):
canv.create_text(100, 200,
text = "Rotated %s deg" % rot,
angle = rot,
fill = random_color(),
font = ("Helvetica", 8),
anchor = W,
)
for rot in range(0, 360, 45):
canv.create_text(300, 200,
text = "Rotated %s deg" % rot,
angle = rot,
fill = random_color(),
font = ("Helvetica", 8),
anchor = E,
)
thread.start_new_thread(test, (canv, __file__, True))
root.mainloop()
| [
6738,
9355,
1330,
1635,
198,
198,
15763,
13,
7839,
7203,
24864,
515,
2420,
4943,
198,
198,
1640,
5724,
287,
2837,
7,
15,
11,
11470,
11,
4153,
2599,
198,
197,
5171,
85,
13,
17953,
62,
5239,
7,
3064,
11,
939,
11,
198,
197,
197,
5239... | 2.234742 | 213 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
from enum import Enum
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
362,
13,
15,
13789,
13,
198,
198,
6738,
33829,
1330,
2039,
388,
628
] | 4.032258 | 31 |
"""Change ISP deposit API method."""
from ibsng.handler.handler import Handler
class changeISPDeposit(Handler):
"""Change ISP deposit method class."""
def control(self):
"""Validate inputs after method setup.
:rtype: None
:return: None
"""
self.is_valid(self.isp_name, str)
self.is_valid(self.deposit_amount, float)
self.is_valid(self.comment, str)
def setup(self, isp_name, deposit_amount, comment=""):
"""Setup required parameters.
:param str isp_name: isp name
:param float deposit_amount: deposit amount
:param str comment: comment
:return: None
:rtype: None
"""
self.isp_name = isp_name
self.deposit_amount = deposit_amount
self.comment = comment
| [
37811,
19400,
33086,
14667,
7824,
2446,
526,
15931,
198,
6738,
24283,
82,
782,
13,
30281,
13,
30281,
1330,
32412,
628,
198,
4871,
1487,
1797,
5760,
538,
7434,
7,
25060,
2599,
198,
220,
220,
220,
37227,
19400,
33086,
14667,
2446,
1398,
5... | 2.416168 | 334 |
import numpy as np
from scipy.ndimage.measurements import label
from scipy.ndimage.morphology import distance_transform_edt
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
from skimage.segmentation import random_walker, watershed
from scipy.signal import convolve2d
from skimage.transform import resize
from itertools import permutations
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
358,
9060,
13,
1326,
5015,
902,
1330,
6167,
198,
6738,
629,
541,
88,
13,
358,
9060,
13,
24503,
1435,
1330,
5253,
62,
35636,
62,
276,
83,
198,
6738,
629,
541,
88,
1330,
29... | 3.495238 | 105 |
from ..base import BaseFeature
import numpy as np
import torch
from .. import register_feature
@register_feature("graph")
| [
6738,
11485,
8692,
1330,
7308,
38816,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
11485,
1330,
7881,
62,
30053,
628,
198,
31,
30238,
62,
30053,
7203,
34960,
4943,
198
] | 3.875 | 32 |
# -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Griffin-Lim phase reconstruction algorithm from mel spectrogram."""
import os
import librosa
import numpy as np
import soundfile as sf
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
def griffin_lim_lb(
mel_spec, stats_path, dataset_config, n_iter=32, output_dir=None, wav_name="lb"
):
"""Generate wave from mel spectrogram with Griffin-Lim algorithm using Librosa.
Args:
mel_spec (ndarray): array representing the mel spectrogram.
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
n_iter (int): number of iterations for GL.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
Returns:
gl_lb (ndarray): generated wave.
"""
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
mel_spec = np.power(10.0, scaler.inverse_transform(mel_spec)).T
mel_basis = librosa.filters.mel(
dataset_config["sampling_rate"],
n_fft=dataset_config["fft_size"],
n_mels=dataset_config["num_mels"],
fmin=dataset_config["fmin"],
fmax=dataset_config["fmax"],
)
mel_to_linear = np.maximum(1e-10, np.dot(np.linalg.pinv(mel_basis), mel_spec))
gl_lb = librosa.griffinlim(
mel_to_linear,
n_iter=n_iter,
hop_length=dataset_config["hop_size"],
win_length=dataset_config["win_length"] or dataset_config["fft_size"],
)
if output_dir:
output_path = os.path.join(output_dir, f"{wav_name}.wav")
sf.write(output_path, gl_lb, dataset_config["sampling_rate"], "PCM_16")
return gl_lb
class TFGriffinLim(tf.keras.layers.Layer):
"""Griffin-Lim algorithm for phase reconstruction from mel spectrogram magnitude."""
def __init__(self, stats_path, dataset_config):
"""Init GL params.
Args:
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
"""
super().__init__()
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
self.scaler = scaler
self.ds_config = dataset_config
self.mel_basis = librosa.filters.mel(
self.ds_config["sampling_rate"],
n_fft=self.ds_config["fft_size"],
n_mels=self.ds_config["num_mels"],
fmin=self.ds_config["fmin"],
fmax=self.ds_config["fmax"],
) # [num_mels, fft_size // 2 + 1]
def save_wav(self, gl_tf, output_dir, wav_name):
"""Generate WAV file and save it.
Args:
gl_tf (tf.Tensor): reconstructed signal from GL algorithm.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
"""
encode_fn = lambda x: tf.audio.encode_wav(x, self.ds_config["sampling_rate"])
gl_tf = tf.expand_dims(gl_tf, -1)
if not isinstance(wav_name, list):
wav_name = [wav_name]
if len(gl_tf.shape) > 2:
bs, *_ = gl_tf.shape
assert bs == len(wav_name), "Batch and 'wav_name' have different size."
tf_wav = tf.map_fn(encode_fn, gl_tf, dtype=tf.string)
for idx in tf.range(bs):
output_path = os.path.join(output_dir, f"{wav_name[idx]}.wav")
tf.io.write_file(output_path, tf_wav[idx])
else:
tf_wav = encode_fn(gl_tf)
tf.io.write_file(os.path.join(output_dir, f"{wav_name[0]}.wav"), tf_wav)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),
tf.TensorSpec(shape=[], dtype=tf.int32),
]
)
def call(self, mel_spec, n_iter=32):
"""Apply GL algorithm to batched mel spectrograms.
Args:
mel_spec (tf.Tensor): normalized mel spectrogram.
n_iter (int): number of iterations to run GL algorithm.
Returns:
(tf.Tensor): reconstructed signal from GL algorithm.
"""
# de-normalize mel spectogram
mel_spec = tf.math.pow(10.0, mel_spec * self.scaler.scale_ + self.scaler.mean_)
inverse_mel = tf.linalg.pinv(self.mel_basis)
# [:, num_mels] @ [fft_size // 2 + 1, num_mels].T
mel_to_linear = tf.linalg.matmul(mel_spec, inverse_mel, transpose_b=True)
mel_to_linear = tf.cast(tf.math.maximum(1e-10, mel_to_linear), tf.complex64)
init_phase = tf.cast(
tf.random.uniform(tf.shape(mel_to_linear), maxval=1), tf.complex64
)
phase = tf.math.exp(2j * np.pi * init_phase)
for _ in tf.range(n_iter):
inverse = tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
phase = tf.signal.stft(
inverse,
self.ds_config["win_length"] or self.ds_config["fft_size"],
self.ds_config["hop_size"],
self.ds_config["fft_size"],
)
phase /= tf.cast(tf.maximum(1e-10, tf.abs(phase)), tf.complex64)
return tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
1855,
71,
42379,
4275,
67,
776,
2507,
457,
430,
72,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,... | 2.188152 | 2,971 |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import json
from collections import OrderedDict
import click
from six import string_types
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success
from ...constants import get_root
from ...utils import parse_version_parts
from ....compat import JSONDecodeError
from ....utils import file_exists, read_file
REQUIRED_ATTRIBUTES = {
'agent_version',
'check',
'description',
'groups',
'integration',
'name',
'statuses',
}
@click.command(
'service-checks',
context_settings=CONTEXT_SETTINGS,
short_help='Validate `service_checks.json` files'
)
def service_checks():
"""Validate all `service_checks.json` files."""
root = get_root()
echo_info("Validating all service_checks.json files...")
failed_checks = 0
ok_checks = 0
for check_name in sorted(os.listdir(root)):
service_checks_file = os.path.join(root, check_name, 'service_checks.json')
if file_exists(service_checks_file):
file_failed = False
display_queue = []
try:
decoded = json.loads(read_file(service_checks_file).strip(), object_pairs_hook=OrderedDict)
except JSONDecodeError as e:
failed_checks += 1
echo_info("{}/service_checks.json... ".format(check_name), nl=False)
echo_failure("FAILED")
echo_failure(' invalid json: {}'.format(e))
continue
unique_names = set()
unique_checks = set()
for service_check in decoded:
# attributes are valid
attrs = set(service_check)
for attr in sorted(attrs - REQUIRED_ATTRIBUTES):
file_failed = True
display_queue.append((echo_failure, ' Attribute `{}` is invalid'.format(attr)))
for attr in sorted(REQUIRED_ATTRIBUTES - attrs):
file_failed = True
display_queue.append((echo_failure, ' Attribute `{}` is required'.format(attr)))
# agent_version
agent_version = service_check.get('agent_version')
version_parts = parse_version_parts(agent_version)
if len(version_parts) != 3:
file_failed = True
if not agent_version:
output = ' required non-null string: agent_version'
else:
output = ' invalid `agent_version`: {}'.format(agent_version)
display_queue.append((echo_failure, output))
# check
check = service_check.get('check')
if not check or not isinstance(check, string_types):
file_failed = True
display_queue.append((echo_failure, ' required non-null string: check'))
else:
if check in unique_checks:
file_failed = True
display_queue.append((echo_failure, ' {} is not a unique check'.format(check)))
else:
unique_checks.add(check)
# description
description = service_check.get('description')
if not description or not isinstance(description, string_types):
file_failed = True
display_queue.append((echo_failure, ' required non-null string: description'))
# groups
groups = service_check.get('groups')
if groups is None or not isinstance(groups, list):
file_failed = True
display_queue.append((echo_failure, ' required list: groups'))
# integration
integration = service_check.get('integration')
if integration is None or not isinstance(integration, string_types):
file_failed = True
display_queue.append((echo_failure, ' required non-null string: integration'))
# name
name = service_check.get('name')
if not name or not isinstance(name, string_types):
file_failed = True
display_queue.append((echo_failure, ' required non-null string: name'))
else:
if name in unique_names:
file_failed = True
display_queue.append((echo_failure, ' {} is not a unique name'.format(name)))
else:
unique_names.add(name)
# statuses
statuses = service_check.get('statuses')
if not statuses or not isinstance(statuses, list):
file_failed = True
display_queue.append((echo_failure, ' required non empty list: statuses'))
if file_failed:
failed_checks += 1
# Display detailed info if file invalid
echo_info("{}/service_checks.json... ".format(check_name), nl=False)
echo_failure("FAILED")
for display_func, message in display_queue:
display_func(message)
else:
ok_checks += 1
if ok_checks:
echo_success("{} valid files".format(ok_checks))
if failed_checks:
echo_failure("{} invalid files".format(failed_checks))
abort()
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
357,
3826,
38559,
24290,
8,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
17268,
... | 2.052228 | 2,738 |
import json
from flask import request, current_app, make_response
from jsonschema import validate, ValidationError
import sqlalchemy as sa
import sqlalchemy.orm as orm
from flask_mrest.mrest import Application
from flask_mrest.models import SABase, SuperModel, UserModel, query_to_json
from flask_mrest.errorhandlers import page_not_found, unauthorized_page
import example_cfg
class CoinSA(SABase):
"""model for coin"""
__tablename__ = "coin"
id = sa.Column(sa.Integer, primary_key=True, doc="primary key")
metal = sa.Column(sa.String(255), nullable=False)
mint = sa.Column(sa.String(255), nullable=False)
user_id = sa.Column(sa.String(120), sa.ForeignKey('user.id'), nullable=False)
user = orm.relationship("UserSA")
example_cfg.MODELS = {'coin': CoinModel(),
'user': UserModel()}
if __name__ == '__main__':
# run app directly for debugging
Application(example_cfg).run(host='0.0.0.0', port=8002) | [
11748,
33918,
198,
6738,
42903,
1330,
2581,
11,
1459,
62,
1324,
11,
787,
62,
26209,
198,
6738,
44804,
684,
2395,
2611,
1330,
26571,
11,
3254,
24765,
12331,
198,
11748,
44161,
282,
26599,
355,
473,
198,
11748,
44161,
282,
26599,
13,
579,... | 2.794203 | 345 |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1334,
62,
30604,
1330,
3722,
198
] | 3.96 | 25 |
from sqlalchemy import ForeignKey
from app import db
class All_mixin(object):
""" This mixin is included in all of the sqlalchemy models.
"""
__bind_key__ = "db2" # SQLALCHEMY_BINDS in config.py
| [
6738,
44161,
282,
26599,
1330,
8708,
9218,
198,
6738,
598,
1330,
20613,
198,
198,
4871,
1439,
62,
19816,
259,
7,
15252,
2599,
198,
220,
220,
220,
37227,
770,
5022,
259,
318,
3017,
287,
477,
286,
262,
44161,
282,
26599,
4981,
13,
198,
... | 2.776316 | 76 |
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Free-busy-URL resources.
"""
__all__ = [
"FreeBusyURLResource",
]
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.dav.http import ErrorResponse
from txweb2.dav.util import joinURL
from txweb2.http import HTTPError
from txweb2.http import Response
from txweb2.http import StatusResponse
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twistedcaldav import caldavxml
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.ical import Property
from twistedcaldav.resource import CalDAVResource, ReadOnlyNoCopyResourceMixIn
from twistedcaldav.scheduling_store.caldav.resource import deliverSchedulePrivilegeSet
from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
from txdav.caldav.datastore.scheduling.freebusy import FreebusyQuery
from txdav.xml import element as davxml
from pycalendar.datetime import DateTime
from pycalendar.duration import Duration
from pycalendar.period import Period
log = Logger()
class FreeBusyURLResource (ReadOnlyNoCopyResourceMixIn, CalDAVResource):
"""
Free-busy URL resource.
Extends L{DAVResource} to provide free-busy URL functionality.
"""
def __init__(self, parent):
"""
@param parent: the parent resource of this one.
"""
assert parent is not None
CalDAVResource.__init__(self, principalCollections=parent.principalCollections())
self.parent = parent
def http_GET(self, request):
"""
The free-busy URL POST method.
"""
return self._processFBURL(request)
def http_POST(self, request):
"""
The free-busy URL POST method.
"""
return self._processFBURL(request)
@inlineCallbacks
##
# ACL
##
| [
2235,
198,
2,
15069,
357,
66,
8,
5075,
12,
5539,
4196,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.055422 | 830 |
from .ManagerBase import ManagerBase
from .Document import Document
| [
6738,
764,
13511,
14881,
1330,
9142,
14881,
198,
6738,
764,
24941,
1330,
16854,
198
] | 4.857143 | 14 |
from dataclasses import dataclass
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from seaborn import utils
from seaborn.palettes import blend_palette
GREEN = "#679436"
RED = "#A01914"
BLUE = "#4F6DB8"
YELLOW = "#FABC3C"
GREY = "#C0C0C0"
COLOR = BLUE
HYDRO_TECHS = ["hydro_run_of_river", "hydro_reservoir"]
BASE_SCENARIO = "continental-autarky-100-continental-grid"
GW_TO_TW = 1e-3
MW_TO_TW = 1e-6
COLUMN_HEADER = [
"y-continental-scale-cost-eur",
"y-national-scale-cost-eur",
"y-cost-diff-eur",
"y-cost-diff-relative",
"y-supply-diff-relative",
"y-wind-diff-relative",
"y-balancing-diff-relative",
"y-continental-scale-pv-gw",
"y-national-scale-pv-gw",
"y-continental-scale-wind-gw",
"y-national-scale-wind-gw",
"y-continental-scale-hydro-gw",
"y-national-scale-hydro-gw",
"y-continental-scale-biofuel-gw",
"y-national-scale-biofuel-gw",
"y-continental-scale-storage-gw",
"y-national-scale-storage-gw",
"y-continental-scale-storage-gwh",
"y-national-scale-storage-gwh",
"y-continental-scale-transmission-gwkm",
"y-regional-scale-cost-eur",
"y-regional-scale-pv-gw",
"y-regional-scale-wind-gw",
"y-regional-scale-hydro-gw",
"y-regional-scale-biofuel-gw",
"y-regional-scale-storage-gw",
"y-regional-scale-storage-gwh",
"y-regional-scale-transmission-gwkm"
]
@dataclass
if __name__ == "__main__":
plot_cost_variability(
path_to_large_scales=snakemake.input.large_scales,
path_to_small_scale=snakemake.input.small_scale,
path_to_scenario_results=snakemake.input.aggregate,
path_to_plot=snakemake.output[0]
)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487... | 2.159204 | 804 |
# coding=utf-8
THANKS = "Tack för att du sålt på S:t Pers barnloppis, 30% av intäkterna skänks till välgörande ändamål genom Svenska kyrkan och diakonin i Uppsalas arbete med utsatta barnfamiljer. <br><br> Vid frågor kontakta oss på loppis.stper@gmail.com för allmänna frågor,<br>saljnummer.stper@gmail.com för säljrelaterade ärenden,<br>volontar.stper@gmail.com för volontärrelaterade ärenden.<br>Facebook: Barnloppis i S:t Pers kyrka"
TEMPLATE = """
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Kvitto på dina sålda varor</title>
<style>
.invoice-box {
max-width: 800px;
margin: auto;
padding: 30px;
border: 1px solid #eee;
box-shadow: 0 0 10px rgba(0, 0, 0, .15);
font-size: 16px;
line-height: 24px;
font-family: \'Helvetica Neue\', \'Helvetica\', Helvetica, Arial, sans-serif;
color: #555;
}
.invoice-box table {
width: 100%;
line-height: inherit;
text-align: left;
}
.invoice-box table td {
padding: 5px;
vertical-align: top;
}
.invoice-box table tr td:nth-child(2) {
text-align: right;
}
.invoice-box table tr.top table td {
padding-bottom: 20px;
}
.invoice-box table tr.top table td.title {
font-size: 45px;
line-height: 45px;
color: #333;
}
.invoice-box table tr.information table td {
padding-bottom: 40px;
}
.invoice-box table tr.heading td {
background: #eee;
border-bottom: 1px solid #ddd;
font-weight: bold;
}
.invoice-box table tr.details td {
padding-bottom: 20px;
}
.invoice-box table tr.item td{
border-bottom: 1px solid #eee;
}
.invoice-box table tr.item.last td {
border-bottom: none;
}
.invoice-box table tr.total td:nth-child(2) {
border-top: 2px solid #eee;
font-weight: bold;
}
@media only screen and (max-width: 600px) {
.invoice-box table tr.top table td {
width: 100%;
display: block;
text-align: center;
}
.invoice-box table tr.information table td {
width: 100%;
display: block;
text-align: center;
}
}
/** RTL **/
.rtl {
direction: rtl;
font-family: Tahoma, \'Helvetica Neue\', \'Helvetica\', Helvetica, Arial, sans-serif;
}
.rtl table {
text-align: right;
}
.rtl table tr td:nth-child(2) {
text-align: left;
}
</style>
</head>
<body>
<div class="invoice-box">
<table cellpadding="0" cellspacing="0">
<tr class="top">
<td colspan="2">
<table>
<tr>
<td class="title"><img src="https://loppis-time.firebaseapp.com/img/loppis.png" style="width:400px; max-width:300px;"></td>
<td>
Utskriven: [[RECEIPT_DATE]]<br>
</td>
</tr>
</table>
</td>
</tr>
<tr class="information">
<td colspan="2">
<table>
<tr>
<td> [[THANKS]] </td>
</tr>
</table>
</td>
</tr>
<tr class="heading">
<td>Vara</td>
<td>Pris</td>
</tr>
[[RECEIPT_ITEMS]]
[[RECEIPT_TOTAL]]
</table>
</div>
</body>
</html>
""".replace("[[THANKS]]", THANKS) | [
2,
19617,
28,
40477,
12,
23,
198,
198,
4221,
15154,
50,
796,
366,
51,
441,
277,
30570,
708,
7043,
264,
29090,
2528,
279,
29090,
311,
25,
83,
9467,
25203,
39590,
271,
11,
1542,
4,
1196,
493,
11033,
74,
759,
64,
1341,
11033,
77,
591... | 1.789952 | 2,090 |
"""Utils for ViViT-based regression models."""
from typing import Any
from absl import logging
import flax
import jax.numpy as jnp
import ml_collections
import numpy as np
from scenic.common_lib import debug_utils
from scenic.projects.vivit import model_utils as vivit_model_utils
average_frame_initializer = vivit_model_utils.average_frame_initializer
central_frame_initializer = vivit_model_utils.central_frame_initializer
def initialise_from_train_state(
config,
train_state: Any,
restored_train_state: Any,
restored_model_cfg: ml_collections.ConfigDict,
restore_output_proj: bool,
vivit_transformer_key: str = 'Transformer',
log_initialised_param_shapes: bool = True) -> Any:
"""Updates the train_state with data from restored_train_state.
We do not reuse this from vivit/model_utils in order to handle position
embeddings and input embeddings differently in init_posemb and
init_embedding, respectively.
This function is written to be used for 'fine-tuning' experiments. Here, we
do some surgery to support larger resolutions (longer sequence length) in
the transformer block, with respect to the learned pos-embeddings.
Args:
config: Configurations for the model being updated.
train_state: A raw TrainState for the model.
restored_train_state: A TrainState that is loaded with parameters/state of a
pretrained model.
restored_model_cfg: Configuration of the model from which the
restored_train_state come from. Usually used for some asserts.
restore_output_proj: If true, load the final output projection. Set to False
if finetuning to a new dataset.
vivit_transformer_key: The key used for storing the subtree in the
parameters that keeps Transformer weights, that are supposed to be
initialized from the given pre-trained model.
log_initialised_param_shapes: If true, print tabular summary of all the
variables in the model once they have been initialised.
Returns:
Updated train_state.
"""
# Inspect and compare the parameters of the model with the init-model.
params = flax.core.unfreeze(train_state.optimizer.target)
if config.init_from.get('checkpoint_format', 'scenic') == 'big_vision':
restored_params = restored_train_state.optimizer['target']
else:
restored_params = restored_train_state.optimizer.target
restored_params = flax.core.unfreeze(restored_params)
# Start moving parameters, one-by-one and apply changes if needed.
for m_key, m_params in restored_params.items():
if m_key == 'output_projection':
if restore_output_proj:
params[m_key] = m_params
else:
logging.info('Not restoring output projection.')
pass
elif m_key == 'pre_logits':
if config.model.representation_size is None:
# We don't have representation_size in the new model, so let's ignore
# if from the pretained model, in case it has it.
# Note, removing the key from the dictionary is necessary to prevent
# obscure errors from the Flax optimizer.
params.pop(m_key, None)
else:
assert restored_model_cfg.model.representation_size
params[m_key] = m_params
elif m_key in {'Transformer', 'SpatialTransformer', 'TemporalTransformer'}:
key_to_load = vivit_transformer_key
is_temporal = False
if m_key == 'TemporalTransformer':
key_to_load = m_key
is_temporal = True
for tm_key, tm_params in m_params.items():
if tm_key == 'posembed_input': # Might need resolution change.
init_posemb(params[key_to_load], m_params, config, restored_model_cfg,
is_temporal=is_temporal)
elif 'encoderblock' in tm_key:
vivit_model_utils.init_encoderblock(
params[key_to_load], m_params, tm_key, config)
else: # Other parameters of the Transformer encoder.
params[key_to_load][tm_key] = tm_params
elif m_key == 'embedding':
init_embedding(params, m_params, config)
else:
if m_key in train_state.optimizer.target:
params[m_key] = m_params
else:
logging.info('Skipping %s. In restored model but not in target', m_key)
if log_initialised_param_shapes:
logging.info('Parameter summary after initialising from train state')
debug_utils.log_param_shapes(params)
return train_state.replace(
optimizer=train_state.optimizer.replace(target=flax.core.freeze(params)))
def init_posemb(to_params, from_params, config, restored_model_cfg,
is_temporal):
"""Initialize the positional embeddings."""
if config.init_from.restore_positional_embedding:
posemb = to_params['posembed_input']['pos_embedding']
restored_posemb = from_params['posembed_input']['pos_embedding']
if restored_posemb.shape != posemb.shape:
# Rescale the grid of pos, embeddings.
# Default parameter shape is (1, N, 768)
logging.info('Adapting positional embeddings from %s to %s',
restored_posemb.shape, posemb.shape)
ntok = posemb.shape[1]
if restored_model_cfg.model.classifier == 'token':
# The first token is the CLS token.
cls_tok = restored_posemb[:, :1]
restored_posemb_grid = restored_posemb[0, 1:]
else:
cls_tok = restored_posemb[:, :0]
restored_posemb_grid = restored_posemb[0]
if config.model.classifier == 'token':
ntok -= 1
if ((config.model.classifier == 'token') !=
(restored_model_cfg.model.classifier == 'token')):
logging.warning('Only one of target and restored model uses'
'classification token')
if restored_posemb_grid == ntok:
# In case the following `if` is not going to run, lets add batch dim:
restored_posemb = restored_posemb_grid[None, ...]
if len(restored_posemb_grid) != ntok: # We need a resolution change.
if is_temporal:
if config.init_from.restore_temporal_embedding_for_goal:
restored_posemb_grid = (
vivit_model_utils.interpolate_1d_positional_embeddings(
restored_posemb_grid, ntok))
else:
restored_posemb_grid = (
vivit_model_utils.interpolate_1d_positional_embeddings(
restored_posemb_grid, ntok - 1))
elif config.init_from.positional_embed_size_change == 'resize':
restored_posemb_grid = (
vivit_model_utils.interpolate_positional_embeddings(
restored_posemb_grid, ntok))
elif config.init_from.positional_embed_size_change == 'tile':
restored_posemb_grid = (
vivit_model_utils.tile_positional_embeddings(
restored_posemb_grid, ntok))
elif config.init_from.positional_embed_size_change == 'resize_tile':
temp_encoding = config.model.temporal_encoding_config
if temp_encoding.method == 'temporal_sampling':
tokens_per_frame = int(ntok / temp_encoding.n_sampled_frames)
elif temp_encoding.method == '3d_conv':
n_frames = (
config.dataset_configs.num_frames //
config.model.patches.size[2])
tokens_per_frame = ntok // n_frames
else:
raise AssertionError(
f'Unknown temporal encoding {temp_encoding.method}')
restored_posemb_grid = (
vivit_model_utils.interpolate_positional_embeddings(
restored_posemb_grid, tokens_per_frame))
restored_posemb_grid = restored_posemb_grid[0]
restored_posemb_grid = vivit_model_utils.tile_positional_embeddings(
restored_posemb_grid, ntok)
else:
raise AssertionError(
'Unknown positional embedding size changing method')
# Attach the CLS token again.
if config.model.classifier == 'token':
restored_posemb = jnp.array(
np.concatenate([cls_tok, restored_posemb_grid], axis=1))
else:
restored_posemb = restored_posemb_grid
if is_temporal and not config.init_from.restore_temporal_embedding_for_goal:
logging.info('Not restoring temporal embedding for goal')
restored_posemb = jnp.array(
np.concatenate(
[restored_posemb,
to_params['posembed_input']['pos_embedding'][:, -1:]], axis=1))
to_params['posembed_input']['pos_embedding'] = restored_posemb
else:
logging.info('Not restoring positional encodings from pretrained model')
def init_embedding(to_params, from_params, config):
"""Initialize input embedding."""
if config.init_from.get('restore_input_embedding', True):
input_kernel = to_params['embedding']['kernel']
restored_kernel = from_params['kernel']
restored_bias = from_params['bias']
if input_kernel.shape != restored_kernel.shape:
# Kernel dimensions are [t, h, w, c_in, c_out].
# assert config.model.temporal_encoding_config.method == '3d_conv', (
# 'Input kernel dimensions should only differ if 3d_conv is the'
# 'temporal encoding method')
assert (input_kernel.shape[1:] == restored_kernel.shape
or input_kernel.shape[1:] == restored_kernel.shape[1:]), (
'All filter dimensions besides the temporal dimension should '
'be equal. {} vs {}'.format(
input_kernel.shape, restored_kernel.shape))
kernel_init_method = config.model.temporal_encoding_config.kernel_init_method
if kernel_init_method == 'reduce_mean_initializer':
logging.info('Initializing 2D input kernel with mean temporal frame.')
restored_kernel = np.mean(restored_kernel, axis=0)
restored_kernel = np.expand_dims(restored_kernel, axis=0)
elif kernel_init_method == 'reduce_sum_initializer':
logging.info(
'Initializing 2D input kernel with sum of temporal frames.')
restored_kernel = np.sum(restored_kernel, axis=0)
restored_kernel = np.expand_dims(restored_kernel, axis=0)
elif kernel_init_method == 'last_frame_initializer':
logging.info('Initializing 2D input kernel with last temporal frame.')
restored_kernel = restored_kernel[:1]
else:
raise AssertionError(
'Unknown input kernel initialization {}'.format(kernel_init_method))
to_params['embedding']['kernel'] = restored_kernel
to_params['embedding']['bias'] = restored_bias
else:
logging.info('Not restoring input embedding parameters')
| [
37811,
18274,
4487,
329,
16049,
38432,
51,
12,
3106,
20683,
4981,
526,
15931,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
2352,
75,
1330,
18931,
198,
11748,
781,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
1... | 2.485153 | 4,277 |
#!/usr/bin/env python
################################################################################
# Build script for slick-reporter
################################################################################
__author__ = 'Jason Corbett'
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name="slick-reporter",
description="A command line utility that can run other commands and turn their output into slick results.",
version="1.0" + open("build.txt").read(),
license="License :: OSI Approved :: Apache Software License",
long_description=open('README.txt').read(),
packages=find_packages(exclude=['distribute_setup']),
package_data={'': ['*.txt', '*.rst', '*.html']},
include_package_data=True,
install_requires=['slickqa>=2.0.27',],
author="Slick Developers",
url="http://code.google.com/p/slickqa",
entry_points={
'console_scripts': ['slick-reporter = slickreporter:main',],
}
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
29113,
14468,
198,
2,
10934,
4226,
329,
29395,
12,
260,
26634,
198,
29113,
29113,
14468,
198,
198,
834,
9800,
834,
796,
705,
26497,
2744,
48138,
6,
198,
198,
11748,
14983,
62,
4... | 3.294498 | 309 |
from collections import OrderedDict
import importlib
import inspect
import six
import warnings
calculators = OrderedDict() # active calculators
# all the calculators including those cannot be activated
# (not disclosed to outside, but used by make_details_md.py)
all_calculators = OrderedDict()
def check_signature(func):
"""Check cost calculator's signature
Cost calculator has to have the following parameter.
- func
- in_data
- **kwargs
Name can be different.
"""
if not callable(func):
return False
if six.PY2:
p = inspect.getargspec(func)
if len(p.args) != 2 or p.varargs is not None or p.keywords is None:
return False
else:
p = inspect.signature(func).parameters
if len(p) != 3:
return False
_, _, kwargs = p.keys()
if p[kwargs].kind != inspect.Parameter.VAR_KEYWORD:
return False
return True
def register(func):
"""A decorator to register cost calculator function (internal use only)
This registers the function as a cost calculator function for the specified
type of Chainer Function.
You can specify the target Chainer Function by the following ways.
(1) Type of Chainer Function (FunctionNode)
You can directly pass the type object to the decorator.
If the type may not exist in some Chainer versions, try the second way.
(2) Fully qualified name of a Chainer Function.
chainer-computational-cost tries to import it and registers the cost
calculator for the Function.
In case the specified Chainer Function is not found, for example the
current chainer version doesn't support that Function yet,
the cost calculator will not be registered.
For example, `"chainer.functions.activation.relu.ReLU"`
args:
func: Chainer Function that you want the cost calculator function to be
registered for.
"""
if type(func) is str:
func_name = func
try:
# F.activation.relu.ReLU -> ['F.activation.relu', 'ReLU']
func_module, func_class = func.rsplit('.', 1)
m = importlib.import_module(func_module)
func = getattr(m, func_class)
except ImportError:
func = None
except AttributeError:
func = None
else:
func_name = func.__name__
return reg
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
1330,
8019,
198,
11748,
10104,
198,
11748,
2237,
198,
11748,
14601,
198,
198,
9948,
3129,
2024,
796,
14230,
1068,
35,
713,
3419,
220,
220,
220,
220,
1303,
4075,
5204,
2024,
198,
198,
... | 2.715909 | 880 |
import traceback
from PySide2.QtCore import QFile
from PySide2.QtWidgets import QApplication
| [
11748,
12854,
1891,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
1195,
8979,
198,
6738,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
628,
198
] | 3.064516 | 31 |
import logging
import requests
import time
from datetime import datetime
import gevent
from gevent.event import Event
from gevent.queue import Queue
from ..base.service import BaseService
INITIAL_TIMEOUT = 5
MAX_TIMEOUT = 600
WORKER_COUNT = 20
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
7007,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
4903,
1151,
198,
6738,
4903,
1151,
13,
15596,
1330,
8558,
198,
6738,
4903,
1151,
13,
36560,
1330,
4670,
518,
198,
198,
6738,
1148... | 3.235955 | 89 |
import re
from django import forms
from dal import autocomplete
from django.forms.formsets import BaseFormSet
from data_wrapper.models import LSTSData, SeqData, ResFinderData, SeqTracking, OLN, CultureData
| [
11748,
302,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
288,
282,
1330,
1960,
42829,
6677,
198,
6738,
42625,
14208,
13,
23914,
13,
23914,
1039,
1330,
7308,
8479,
7248,
198,
6738,
1366,
62,
48553,
13,
27530,
1330,
406,
2257,
50,
6601... | 3.185714 | 70 |
import os
def check_path(path: str):
"""Check if a path exists, if some directory is missing it creates it.
"""
if not os.path.isdir(os.path.split(path)[0]) and os.sep in path: # Check that the required path doesn't exist and there is a slash in it
directories_list = split_path(os.path.split(path)[0]) # Get all directories to the file
directories_list = accumulate_list(directories_list, separator=os.sep) # Accumulate them ["home", "cristobal"] -> ["home", "home/cristobal"]
for directory in directories_list: # Iterate trough each directory on the path
if not os.path.isdir(directory): # If the directory doesn't exist
os.mkdir(directory) # Create it
def accumulate_list(my_list: (list, tuple), separator: str="") -> list:
"""["a", "b", "c"] -> ["a", "ab", "abc"]
"""
result = []
for e, ele in enumerate(my_list):
if e == 0:
result.append(ele)
continue
result.append(f"{result[e-1]}{separator}{ele}")
return result
def remove_comments(string: str, comment_char: str="#") -> str:
"""Remove comments from strings.
Note:
Iterates through the given string, if founds a quote and there isn't a backslash \ before it set in_string to True, if finds a # and in_string is False break the loop and cut the string until there and return it.
Args:
string (str): An string to remove the comments from
comment_char: (str, optional="#"): Which character represents a comment
Returns:
The same string without comments.
"""
in_string = False # If iterating in string ignore comments otherwise don't
quote = "" # Type of quote (simple or double), because you can't open a string with simple quotes and close it with double
for e, char in enumerate(string): # Iterate thorught the string
if char == "'" or char == '"': # Checks if the current character is a quote
if e != 0: # Checks if the quote isn't in the first place
if string[e -1] == "\\": # Checks if the character before it is a backslahs
continue # If it is ignore it
if quote == char or not in_string: # If the type of quote is the current char, or if isn't in a string
quote = char # Set the quote to the char
in_string = not in_string # And set in_string to True if False and viceversa
if char == comment_char and not in_string: # If the current character is the comment character and isn't in a string
string = string[:e] # Cut string until here
break # And break
return string # Return the string
| [
11748,
28686,
198,
198,
4299,
2198,
62,
6978,
7,
6978,
25,
965,
2599,
198,
197,
37811,
9787,
611,
257,
3108,
7160,
11,
611,
617,
8619,
318,
4814,
340,
8075,
340,
13,
198,
197,
37811,
198,
197,
361,
407,
28686,
13,
6978,
13,
9409,
... | 3.188802 | 768 |
from flask import Flask
from app.api.controllers import api
# Define the WSGI application object
app = Flask(__name__)
# Configurations
app.config.from_object('config')
# Register blueprint(s)
app.register_blueprint(api)
# global routes
@app.errorhandler(400)
@app.errorhandler(404)
@app.errorhandler(405)
| [
6738,
42903,
1330,
46947,
198,
6738,
598,
13,
15042,
13,
3642,
36667,
1330,
40391,
198,
198,
2,
2896,
500,
262,
25290,
18878,
3586,
2134,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
2,
17056,
20074,
198,
1324,
13,
11250,
... | 3.088235 | 102 |
"""Has binance enums."""
from typing import Final, Literal
class BinanceOrderType:
"""Binance order types."""
LIMIT: Final[Literal["LIMIT"]] = "LIMIT"
MARKET: Final[Literal["MARKET"]] = "MARKET"
STOP_LOSS: Final[Literal["STOP_LOSS"]] = "STOP_LOSS"
STOP_LOSS_LIMIT: Final[Literal["STOP_LOSS_LIMIT"]] = "STOP_LOSS_LIMIT"
TAKE_PROFIT: Final[Literal["TAKE_PROFIT"]] = "TAKE_PROFIT"
TAKE_PROFIT_LIMIT: Final[Literal["TAKE_PROFIT_LIMIT"]
] = "TAKE_PROFIT_LIMIT"
LIMIT_MAKER: Final[Literal["LIMIT_MAKER"]] = "LIMIT_MAKER"
class BinanceOrderStatus:
"""Binance order statuses."""
NEW: Final[Literal["NEW"]] = "NEW"
PARTIALLY_FILLED: Final[Literal["PARTIALLY_FILLED"]] = "PARTIALLY_FILLED"
FILLED: Final[Literal["FILLED"]] = "FILLED"
CANCELED: Final[Literal["CANCELED"]] = "CANCELED"
PENDING_CANCEL: Final[Literal["PENDING_CANCEL"]] = "PENDING_CANCEL"
REJECTED: Final[Literal["REJECTED"]] = "REJECTED"
EXPIRED: Final[Literal["EXPIRED"]] = "EXPIRED"
class BinanceOrderSide:
"""Binance order sides."""
BUY: Final[Literal["BUY"]] = "BUY"
SELL: Final[Literal["SELL"]] = "SELL"
class BinanceOrderResponseType:
"""Binance order response type."""
ACK: Final[Literal["ACK"]] = "ACK"
RESULT: Final[Literal["RESULT"]] = "RESULT"
FULL: Final[Literal["FULL"]] = "FULL"
class BinanceTimeInForce:
"""Binance time in force."""
GTC: Final[Literal["GTC"]] = "GTC"
IOC: Final[Literal["IOC"]] = "IOC"
FOK: Final[Literal["FOK"]] = "FOK"
class BinanceSymbolPermission:
"""Binance symbol permissions."""
SPOT: Final[Literal["SPOT"]] = "SPOT"
MARGIN: Final[Literal["MARGIN"]] = "MARGIN"
class BinanceSymbolStatus:
"""Binance symbol statuses."""
TRADING: Final[Literal["TRADING"]] = "TRADING"
PRE_TRADING: Final[Literal["PRE_TRADING"]] = "PRE_TRADING"
POST_TRADING: Final[Literal["POST_TRADING"]] = "POST_TRADING"
END_OF_DAY: Final[Literal["END_OF_DAY"]] = "END_OF_DAY"
HALT: Final[Literal["HALT"]] = "HALT"
AUCTION_MATCH: Final[Literal["AUCTION_MATCH"]] = "AUCTION_MATCH"
BREAK: Final[Literal["BREAK"]] = "BREAK"
class BinanceRateLimitInterval:
"""Binance rate limit intervals."""
SECOND: Final[Literal["SECOND"]] = "SECOND"
MINUTE: Final[Literal["MINUTE"]] = "MINUTE"
DAY: Final[Literal["DAY"]] = "DAY"
class BinanceRateLimitType:
"""Binance rate limit types."""
REQUEST_WEIGHT: Final[Literal["REQUEST_WEIGHT"]] = "REQUEST_WEIGHT"
ORDERS: Final[Literal["ORDERS"]] = "ORDERS"
RAW_REQUESTS: Final[Literal["RAW_REQUESTS"]] = "RAW_REQUESTS"
class BinanceSymbolFilterType:
"""Binance symbol filter types."""
PRICE: Final[Literal["PRICE_FILTER"]] = "PRICE_FILTER"
PERCENT_PRICE: Final[Literal["PERCENT_PRICE"]] = "PERCENT_PRICE"
LOT_SIZE: Final[Literal["LOT_SIZE"]] = "LOT_SIZE"
MIN_NOTIONAL: Final[Literal["MIN_NOTIONAL"]] = "MIN_NOTIONAL"
ICEBERG_PARTS: Final[Literal["ICEBERG_PARTS"]] = "ICEBERG_PARTS"
MARKET_LOT_SIZE: Final[Literal["MARKET_LOT_SIZE"]] = "MARKET_LOT_SIZE"
MAX_NUM_ORDERS: Final[Literal["MAX_NUM_ORDERS"]] = "MAX_NUM_ORDERS"
MAX_NUM_ALGO_ORDERS: Final[Literal["MAX_NUM_ALGO_ORDERS"]
] = "MAX_NUM_ALGO_ORDERS"
MAX_NUM_ICEBERG_ORDERS: Final[Literal["MAX_NUM_ICEBERG_ORDERS"]
] = "MAX_NUM_ICEBERG_ORDERS"
MAX_POSITION: Final[Literal["MAX_POSITION"]] = "MAX_POSITION"
class BinanceExchangeFilterType:
"""Binance exchange filter types."""
MAX_NUM_ORDERS: Final[Literal["EXCHANGE_MAX_NUM_ORDERS"]
] = "EXCHANGE_MAX_NUM_ORDERS"
MAX_NUM_ALGO_ORDERS: Final[Literal["EXCHANGE_MAX_ALGO_ORDERS"]
] = "EXCHANGE_MAX_ALGO_ORDERS"
class BinanceAccountType:
"""Binance account types."""
SPOT: Final[Literal["SPOT"]] = "SPOT"
class BinanceErrorCode:
"""Binance error codes."""
UNKNOWN: Final[Literal[-1000]] = -1000
DISCONNECTED: Final[Literal[-1001]] = -1001
UNAUTHORIZED: Final[Literal[-1002]] = -1002
TOO_MANY_REQUESTS: Final[Literal[-1003]] = -1003
SERVER_BUSY: Final[Literal[-1004]] = -1004
UNEXPECTED_RESP: Final[Literal[-1006]] = -1006
TIMEOUT: Final[Literal[-1007]] = -1007
UNKNOWN_ORDER_COMPOSITION: Final[Literal[-1014]] = -1014
TOO_MANY_ORDERS: Final[Literal[-1015]] = -1015
SERVICE_SHUTTING_DOWN: Final[Literal[-1016]] = -1016
UNSUPPORTED_OPERATION: Final[Literal[-1020]] = -1020
INVALID_TIMESTAMP: Final[Literal[-1021]] = -1021
INVALID_SIGNATURE: Final[Literal[-1022]] = -1022
# TODO -1099
# 11xx - 2xxx Request issues
ILLEGAL_CHARS: Final[Literal[-1100]] = -1100
TOO_MANY_PARAMETERS: Final[Literal[-1101]] = -1101
MANDATORY_PARAM_EMPTY_OR_MALFORMED: Final[Literal[-1102]] = -1102
UNKNOWN_PARAM: Final[Literal[-1103]] = -1103
UNREAD_PARAMETERS: Final[Literal[-1104]] = -1104
PARAM_EMPTY: Final[Literal[-1105]] = -1105
PARAM_NOT_REQUIRED: Final[Literal[-1106]] = -1106
BAD_PRECISION: Final[Literal[-1111]] = -1111
NO_DEPTH: Final[Literal[-1112]] = -1112
TIF_NOT_REQUIRED: Final[Literal[-1114]] = -1114
INVALID_TIF: Final[Literal[-1115]] = -1115
INVALID_ORDER_TYPE: Final[Literal[-1116]] = -1116
INVALID_SIDE: Final[Literal[-1117]] = -1117
EMPTY_NEW_CL_ORD_ID: Final[Literal[-1118]] = -1118
EMPTY_ORG_CL_ORD_ID: Final[Literal[-1119]] = -1119
BAD_CANDLE_INTERVAL: Final[Literal[-1120]] = -1120
BAD_SYMBOL: Final[Literal[-1121]] = -1121
INVALID_LISTEN_KEY: Final[Literal[-1125]] = -1125
MORE_THAN_XX_HOURS: Final[Literal[-1127]] = -1127
OPTIONAL_PARAMS_BAD_COMBO: Final[Literal[-1128]] = -1128
INVALID_PARAMETER: Final[Literal[-1130]] = -1130
BAD_RECV_WINDOW: Final[Literal[-1131]] = -1131
NEW_ORDER_REJECTED: Final[Literal[-2010]] = -2010
CANCEL_REJECTED: Final[Literal[-2011]] = -2011
NO_SUCH_ORDER: Final[Literal[-2013]] = -2013
BAD_API_KEY_FMT: Final[Literal[-2014]] = -2014
REJECTED_MBX_KEY: Final[Literal[-2015]] = -2015
NO_TRADING_WINDOW: Final[Literal[-2016]] = -2016
# 3xxx - 5xxx SAPI-specific issues
INNER_FAILURE: Final[Literal[-3000]] = -3000
NEED_ENABLE_2FA: Final[Literal[-3001]] = -3001
ASSET_DEFICIENCY: Final[Literal[-3002]] = -3002
NO_OPENED_MARGIN_ACCOUNT: Final[Literal[-3003]] = -3003
TRADE_NOT_ALLOWED: Final[Literal[-3004]] = -3004
TRANSFER_OUT_NOT_ALLOWED: Final[Literal[-3005]] = -3005
EXCEED_MAX_BORROWABLE: Final[Literal[-3006]] = -3006
HAS_PENDING_TRANSACTION: Final[Literal[-3007]] = -3007
BORROW_NOT_ALLOWED: Final[Literal[-3008]] = -3008
VALIDATION_FAILED: Final[Literal[-3026]] = -3026
LISTEN_KEY_NOT_FOUND: Final[Literal[-3038]] = -3038
BALANCE_IS_NOT_ENOUGH: Final[Literal[-3041]] = -3041
SYSTEM_BUSY: Final[Literal[-3044]] = -3044
| [
37811,
19242,
9874,
590,
551,
5700,
526,
15931,
198,
198,
6738,
19720,
1330,
8125,
11,
25659,
1691,
628,
198,
4871,
347,
14149,
18743,
6030,
25,
198,
220,
220,
220,
37227,
33,
14149,
1502,
3858,
526,
15931,
628,
220,
220,
220,
27564,
... | 2.077871 | 3,326 |
import functools
import asyncio
| [
11748,
1257,
310,
10141,
198,
11748,
30351,
952,
628,
198
] | 3.4 | 10 |
from django import forms
from paper_forms.composers.bootstrap4 import Bootstrap4
from ._models import SampleModel
from .utils import get_bound_field
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
3348,
62,
23914,
13,
785,
1930,
364,
13,
18769,
26418,
19,
1330,
18892,
26418,
19,
198,
198,
6738,
47540,
27530,
1330,
27565,
17633,
198,
6738,
764,
26791,
1330,
651,
62,
7784,
62,
3245,
... | 3.511111 | 45 |
# TODO update all 2p stim related attr's to naparm submodule
from dataclasses import dataclass
import glob
import os
import signal
import time
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import tifffile as tf
from packerlabimaging import TwoPhotonImaging
from packerlabimaging.main.subcore import ImagingMetadata, ImagingData, CellAnnotations
from packerlabimaging.main.core import Experiment, ImagingTrial
from packerlabimaging.main.paq import PaqData
from packerlabimaging.processing.naparm import Targets
# %%
from packerlabimaging.processing.anndata import AnnotatedData
# grabbing functions from .utils_funcs that are used in this script - Prajay's edits (review based on need)
PLANE = 0
BADFRAMESLOC = '/home/pshah/Documents/code/packerlabimaging/tests/'
prestim_sec: float = 1.0 #: length of pre stim trace collected (secs)
poststim_sec: float = 3.0 #: length of post stim trace collected (secs)
pre_stim_response_window: float = 0.500 #: time window for collecting pre-stim measurement (units: msec)
post_stim_response_window: float = 0.500 #: time window for collecting post-stim measurement (units: msec)
# todo think about creating a custom class to hold directly taken coords targets imaging cellsdata - would work well for SLM targets, might even be able to extend in the naparm--> Targets class
class AllOpticalTrial(TwoPhotonImaging):
"""All Optical Experimental Data Analysis Workflow."""
def __init__(self, naparm_path, dataPath: str, saveDir: str, date: str, trialID: str, expID: str,
expGroup: str = '',
comment: str = '', imparams: ImagingMetadata = None, cells: CellAnnotations = None,
tmdata: PaqData = None):
"""
:param metainfo: TypedDict containing meta-information field needed for this experiment. Please see TwoPhotonImagingMetainfo for type hints on accepted keys.
:param paq_options: TypedDict containing meta-information about .paq file associated with current trial
:param naparm_path: path to folder containing photostimulation setup built by NAPARM
:param analysis_save_path: path of where to save the experiment analysis object
:param microscope: name of microscope used to record imaging (options: "Bruker" (default), "other")
:param prestim_sec: pre-photostimulation timeperiod for collecting photostimulation timed signals
:param poststim_sec: post-photostimulation timeperiod for collecting photostimulation timed signals
:param pre_stim_response_window: pre-photostimulation time window for measurement of photostimulation evoked responses
:param post_stim_response_window: post-photostimulation time window for measurement of photostimulation evoked responses
"""
initialization_dict = {'date': date,
'trialID': trialID,
'dataPath': dataPath,
'saveDir': saveDir,
'expID': expID,
'expGroup': expGroup,
'comment': comment}
# 1) initialize object as a TwoPhotonImagingTrial
super().__init__(imparams=imparams, cells=cells, tmdata=tmdata, **initialization_dict)
# Initialize Attributes:
# PHOTOSTIM PROTOCOL
self.stim_start_times = None
self.nomulti_sig_units = None
# attr's for processing/analysis of photostim experiments
self.__prestim_sec: Union[float, int] = 1.0 #: length of pre stim trace collected (in secs)
self.__poststim_sec: Union[float, int] = 3.0 #: length of post stim trace collected (in secs)
self.__prestim_response_window: Union[
float, int] = 0.500 #: time window for collecting pre-stim measurement (units: msec)
self.__poststim_response_window: Union[
float, int] = 0.500 #: time window for collecting post-stim measurement (units: msec)
# attr's for statistical analysis of photostim trials responses
self.photostimResponsesData = None # anndata object for collecting photostim responses and associated metadata for experiment and cells
# TODO update comment descriptions
self.all_trials = [] # all trials for each cell, dff detrended
self.all_amplitudes = [] # all amplitudes of response between dff test periods
self.stas = [] # avg of all trials for each cell, dff
self.sta_amplitudes = [] # avg amplitude of response between dff test periods
self.prob_response = None # probability of response of cell to photostim trial; obtained from single trial significance (ROB suggestion)
self.t_tests = [] # result from related samples t-test between dff test periods
self.wilcoxons = [] #:
self.sig_units = None #:
self.sta_sig = [] # based on t-test between dff test periods
self.sta_sig_nomulti = [] # as above, no multiple comparisons correction
########
# initializing cellsdata processing, cellsdata analysis and/or results associated attr's
# PHOTOSTIM SLM TARGETS
# TODO add attr's related to numpy array's and pandas dataframes for photostim trials - SLM targets
self.responses_SLMtargets = [] # dF/prestimF responses for all SLM targets for each photostim trial
self.responses_SLMtargets_tracedFF = [] # poststim dFF - prestim dFF responses for all SLM targets for each photostim trial
# ALL CELLS (from suite2p ROIs)
# TODO add attr's related to numpy array's and pandas dataframes for photostim trials - suite2p ROIs
# FUNCTIONS TO RUN AFTER init's of ALL ATTR'S
# 3) process 2p stim protocol and collect imaging frames at stim starts and during photostimulation
# set naparm path
self.twopstim, self.twopstim.stim_start_frames, self.twopstim.photostim_frames = self.photostimProcessing(
naparm_path=naparm_path)
# 5) todo collect Flu traces from SLM targets - probably leave out of the init right??
if hasattr(self, 'Suite2p'):
self.raw_SLMTargets, self.dFF_SLMTargets, self.meanFluImg_registered = self.collectSignalFromCoords(
curr_trial_frames=self.Suite2p.trial_frames, save=True, target_coords_masks=np.array(self.twopstim.target_areas))
self.targets_snippets = self.getTargetsStimTraceSnippets()
else:
Warning('NO Flu traces collected from any SLM targets because Suite2p not found for trial.')
# 6) Collect Flu traces from Suite2p ROIs:
# create:
# 1) array of dFF pre + post stim Flu snippets for each stim and cell [num cells x num peri-stim frames collected x num stims]
# 2) photostim response amplitudes in a dataframe for each cell and each photostim
# 3) save photostim response amplitudes to AnnotatedData
self.photostimFluArray, self.photostimResponseAmplitudes, self.photostimResponsesData = self.photostimProcessingAllCells()
# extend annotated imaging cellsdata object with imaging frames in photostim and stim_start_frames as additional keys in vars
__frames_in_stim = [False] * self.imparams.n_frames
__stim_start_frame = [False] * self.imparams.n_frames
for frame in self.twopstim.photostim_frames: __frames_in_stim[frame] = True
for frame in self.twopstim.stim_start_frames: __stim_start_frame[frame] = True
self.data.add_var(var_name='photostim_frame', values=__frames_in_stim)
self.data.add_var(var_name='stim_start_frame', values=__stim_start_frame)
# save final object
self.save()
print(f'\----- CREATED AllOpticalTrial cellsdata object for {self.t_series_name}')
@classmethod
def AllOpticalTrialfromImagingTrial(cls, naparm_path, imaging_trial: ImagingTrial):
"""Alternative constructor for AllOpticalTrial.
Creates an all optical trial from an existing imaging trial.
"""
initialization_dict = {'naparm_path': naparm_path, 'dataPath': imaging_trial.dataPath,
'saveDir': imaging_trial.saveDir,
'expID': imaging_trial.expID, 'group': imaging_trial.expGroup,
'comment': imaging_trial.comment}
aotrial = cls(**initialization_dict)
return aotrial
@property
def twopstim_path(self):
"""path to folder containing photostimulation protocols output by NAPARM"""
if self.twopstim:
return self.twopstim.path
@property
def prestim_sec(self):
"""length of pre stim trace collected (secs)"""
return self.__prestim_sec
@prestim_sec.setter
@property
def poststim_sec(self):
"""length of post stim trace collected (secs)"""
return self.__poststim_sec
@poststim_sec.setter
@property
def prestim_response_window(self):
"""time window for collecting pre-stim measurement (units: msec)"""
return self.__prestim_response_window
@prestim_response_window.setter
@property
def prestim_response_frames_window(self):
"""num frames for measuring Flu trace before each photostimulation trial during photostim response measurement (units: frames)"""
return int(self.imparams.fps * self.prestim_response_window)
@property
def poststim_response_window(self):
"""time window for collecting post-stim measurement (units: msec)"""
return self.__poststim_response_window
@poststim_response_window.setter
@property
def poststim_response_frames_window(self):
"""num frames for measuring Flu response after each photostimulation trial during photostim response measurement (units: frames)"""
return int(self.imparams.fps * self.poststim_response_window)
@property
def prestim_frames(self):
"""num frames for collecting Flu trace after each photostimulation trial (units: frames)"""
return int(self.__prestim_sec * self.imparams.fps)
@property
def poststim_frames(self):
"""num frames for collecting Flu trace after each photostimulation trial (units: frames)"""
return int(self.__poststim_sec * self.imparams.fps)
@property
def n_stims(self):
#: TODO set property using anndata responses shape property assigning from array: cells x Flu frames x # of photostim trials
"""number of photostimulation trials """
return
@property
def stim_start_frames(self):
"""Imaging frames corresponding to start of photostimulation trials."""
# todo use anndata for getting this
return []
@property
def photostim_frames(self):
"""Imaging frames during photostimulation trials."""
# todo just use anndata for getting this
return []
@property
def stim_duration_frames(self):
"""Duration of photostimulation as number of frames.
Note: Removing 1 more frame than the stim duration, as the stim isn't perfectly aligned with the start of the imaging frame
"""
if not hasattr(self, 'twopstim'):
raise ValueError(
'cannot retrieve stim_duration_frames. photostimulation analysis module cannot be found under .twopstim')
else:
duration_ms = self.twopstim.stim_dur
frame_rate = self.imparams.fps
duration_frames = np.ceil((duration_ms / 1000) * frame_rate)
return int(duration_frames) + 1
@property
def prestim_test_slice(self):
"""slice representing prestim response frames"""
return np.s_[self.prestim_frames - self.prestim_response_frames_window: self.prestim_frames]
@property
def poststim_test_slice(self):
"""slice representing poststim response frames"""
stim_end = self.prestim_frames + self.stim_duration_frames
return np.s_[stim_end: stim_end + self.poststim_response_frames_window]
def photostimProcessing(self, naparm_path):
"""
Processing alloptical trial photostimulation protocol.
- parse NAPARM protocol and SLM Targets information under .twopstim
- collect stimulation timing synchronized to imaging cellsdata frame timings
"""
self.twopstim = Targets(naparm_path=naparm_path, frame_x=self.imparams.frame_x,
frame_y=self.imparams.frame_y,
pix_sz_x=self.imparams.pix_sz_x, pix_sz_y=self.imparams.pix_sz_y)
# find stim frames
stim_start_frames = []
for stim in self.tmdata.data['stim_start_times']:
# the index of the frame immediately preceeding stim
stim_start_frame = next(i - 1 for i, sample in enumerate(self.tmdata.frame_times) if sample - stim >= 0)
stim_start_frames.append(stim_start_frame)
# find all photostimulation frames in imaging series
print('\n\----- Finding photostimulation frames in imaging frames ...')
print('# of photostim frames calculated per stim. trial: ', self.stim_duration_frames)
photostim_frames = []
for j in stim_start_frames:
for i in range(
self.stim_duration_frames):
photostim_frames.append(j + i)
print('\t|- # of Imaging frames:', self.imparams.n_frames,
'frames') #: todo set this to n_frames property from trialdata
print('\t|- # of Photostimulation frames:', len(self.photostim_frames), 'frames')
return self.twopstim, stim_start_frames, photostim_frames
#### TODO review attr's and write docs from the following functions: // start
### ALLOPTICAL PROCESSING OF TRACES
## ... no methods determined here yet...
### ALLOPTICAL ANALYSIS - FOCUS ON SLM TARGETS RELATED METHODS - TEST all methods below
# todo test
def getTargetsStimTraceSnippets(self, targets_idx: Union[list, str] = 'all', pre_stim: Union[float, int] = 0.5,
post_stim: Union[float, int] = 4.0, stims: list = None):
"""
Collect photostimulation timed snippets of signal from selected targets.
:param stims:
:param targets_idx: integer for the index of target cell to process
:param subselect_cells: ls of cells to subset from the overall set of traces (use in place of targets_idx if desired)
:param pre_stim: number of frames to use as pre-stim
:param post_stim: number of frames to use as post-stim
:param filter_sz: whether to filter out stims that are occuring seizures
:return: lists of individual targets dFF traces, and averaged targets dFF over all stims for each target
"""
self.prestim_sec = pre_stim
self.poststim_sec = post_stim
pre_stim = self.prestim_frames
post_stim = self.poststim_frames
if stims is None:
stim_timings = self.stim_start_frames
else:
stim_timings = stims
process = 'trace dFF'
if targets_idx == 'all':
data_to_process = self.dFF_SLMTargets
else:
assert type(targets_idx) == list, 'provide targets_idx as list on cell indexes to collect.'
data_to_process = np.asarray([self.dFF_SLMTargets[idx] for idx in targets_idx])
num_cells = data_to_process.shape[0]
print(f'collecting stim traces for {num_cells} cells ', num_cells)
# collect photostim timed trace snippets traces of photostim targets
flu = np.asarray([data_to_process[:, stim - pre_stim: stim + self.stim_duration_frames + post_stim] for stim in
stim_timings])
print(f"shape photostim. trials trace snippets array: {flu.shape}")
return flu
# todo test
def findTargetedCells(self, plot: bool = True):
"""finding s2p cell ROIs that were also SLM targets (or more specifically within the target areas as specified by _findTargetAreas - include 15um radius from center coordinate of spiral)
Make a binary mask of the targets and multiply by an image of the cells
to find cells that were targeted
--- LAST UPDATED NOV 6 2021 - copied over from Vape ---
"""
print('\n\----- Searching for targeted cells in annotated cells...')
## TODO add necessary edits for multi-plane experiments
targets = list(['non-target' for cell in
range(self.cells.n_cells)]) # initialize all cells as non-target, add as annotation to .cells
##### IDENTIFYING S2P ROIs THAT ARE WITHIN THE SLM TARGET SPIRAL AREAS
# make all target area coords in to a binary mask
targ_img = np.zeros([self.imparams.frame_x, self.imparams.frame_y], dtype='uint16')
target_areas = np.array(self.twopstim.target_areas)
targ_img[target_areas[:, :, 1], target_areas[:, :, 0]] = 1
# make an image of every cell area, filled with the index of that cell
cell_img = np.zeros_like(targ_img)
cell_x = self.cells.cell_coords[:, 0]
cell_y = self.cells.cell_coords[:, 1]
for i, coord in enumerate(zip(cell_x, cell_y)):
cell_img[coord] = i + 1
# binary mask x cell image to get the cells that overlap with target areas
targ_cell = cell_img * targ_img
targ_cell_ids = np.unique(targ_cell)[1:] - 1 # correct the cell id due to zero indexing
self.targeted_cells = np.zeros([self.cells.n_cells], dtype='bool')
self.targeted_cells[targ_cell_ids] = True
self.cell_targets = [self.cells.cell_id[i] for i in np.where(self.targeted_cells)[
0]] # get list of cells that were photostim targetted -- todo turn into property accessing targets annotations of .cells
self.n_targeted_cells = np.sum(
self.targeted_cells) # todo turn into property accessing targets annotations of .cells
# add targeted cells to targets
for idx, cell in enumerate(self.cells.cell_id):
if cell in self.cell_targets:
targets[idx] = 'target'
print('\t|- Search completed.')
self.save()
print('\t|- Number of targeted cells: ', self.n_targeted_cells)
# IDENTIFYING S2P ROIs THAT ARE WITHIN THE EXCLUSION ZONES OF THE SLM TARGETS
# make all target area coords in to a binary mask
targ_img = np.zeros([self.imparams.frame_x, self.imparams.frame_y], dtype='uint16')
target_areas_exclude = np.array(self.twopstim.target_areas_exclude)
targ_img[target_areas_exclude[:, :, 1], target_areas_exclude[:, :, 0]] = 1
# make an image of every cell area, filled with the index of that cell
cell_img = np.zeros_like(targ_img)
cell_x = self.cells.cell_coords[:, 0]
cell_y = self.cells.cell_coords[:, 1]
for i, coord in enumerate(zip(cell_x, cell_y)):
cell_img[coord] = i + 1
# binary mask x cell image to get the cells that overlap with target areas
targ_cell = cell_img * targ_img
targ_cell_ids = np.unique(targ_cell)[1:] - 1 # correct the cell id due to zero indexing
exclude_cells = np.zeros([self.cells.n_cells], dtype='bool')
exclude_cells[targ_cell_ids] = True
cells_exclude = [self.cells.n_cells[i] for i in
np.where(exclude_cells)[0]] # get ls of s2p cells that were photostim targetted
self.n_exclude_cells = np.sum(exclude_cells)
print('\t|-Search completed.')
self.save()
print(f"\t|-Number of exclude Suite2p ROIs: {self.n_exclude_cells}")
# define non targets from suite2p ROIs (exclude cells in the SLM targets exclusion - .s2p_cells_exclude)
for idx, cell in enumerate(self.cells.cell_id):
if cell not in cells_exclude:
targets[idx] = 'exclude'
if plot:
fig, ax = plt.subplots(figsize=[6, 6])
targ_img = np.zeros([self.imparams.frame_x, self.imparams.frame_y], dtype='uint16')
target_areas = np.array(self.twopstim.target_areas)
targ_img[target_areas[:, :, 1], target_areas[:, :, 0]] = 1
ax.imshow(targ_img, cmap='Greys_r', zorder=0)
ax.set_title('SLM targets areas')
# for (x, y) in self.twopstim.target_coords_all:
# ax.scatter(x=x, y=y, edgecolors='white', facecolors='none', linewidths=1.0)
fig.show()
# add targets classification as observations annotation to .cellsdata anndata
self.data.add_obs(obs_name='photostim_class', values=targets)
self.cells.cellsdata['photostim_class'] = targets
print(f"\t|- Number of non-target ROIs: {len(self.cells.cellsdata['photostim_class'] == 'non-target')}")
# todo code and test
def _makePhotostimTrialFluSnippets(self, plane_flu: np.ndarray,
stim_frames: list = None) -> np.ndarray: # base code copied from Vape's _makeFluTrials
"""
Make Flu snippets timed on photostimulation, for each cell, for each stim instance. [cells x Flu frames x stims] # TODO triple check order of this array's dimensions
Inputs:
plane_flu - array of dff traces for all cells for this plane only
plane - imaging plane corresponding to plane_flu, default = 0 (for one plane datasets)
stim_frames - optional, if provided then only use these photostim frames to collect photostim_array
Outputs:
photostim_array - dFF peri-photostim Flu array [cell x Flu frames x trial]
"""
print('\n\- Collecting peri-stim traces ...')
trial_array = []
_stims = self.stim_start_frames if stim_frames is None else stim_frames
assert plane_flu.ndim == 2, 'plane_flu needs to be of ndim: 2'
assert _stims == self.stim_start_frames, "stims not found in the stim frames list of this plane"
for i, stim in enumerate(_stims):
# get frame indices of entire trial from pre-stim start to post-stim end
trial_frames = np.s_[stim - self.prestim_frames: stim + self.poststim_frames]
# use trial frames to extract this trial for every cell
flu_trial = plane_flu[:, trial_frames]
flu_trial_len = self.prestim_frames + self.stim_duration_frames + self.poststim_frames
# todo test if needed or better way to implement: catch timeseries which ended in the middle of an ongoing photostim instance
if flu_trial.shape[1] == flu_trial_len:
# only append trials of the correct length - will catch corrupt/incomplete cellsdata and not include
if len(trial_array) == 0:
trial_array = flu_trial
else:
trial_array = np.dstack((trial_array, flu_trial))
else:
print('**incomplete trial detected and not appended to trial_array**', end='\r')
print(f'\nFinished collecting peri-stim traces, out shape: {trial_array.shape}')
return trial_array
# todo test
def _normalize_snippets_prestim(self, snippets: np.ndarray = None):
"""
Normalize each trace snippet to pre-stim period.
:return:
"""
snippets = snippets if snippets else self.targets_snippets
num_cells = snippets.shape[0]
targets_dff = np.zeros([num_cells, len(self.stim_start_frames),
self.prestim_frames + self.stim_duration_frames + self.poststim_frames])
# create pre-stim period mean substracted photostim trials trace snippets
for j, traces in enumerate(snippets):
for i, trace in enumerate(traces):
mean_pre = np.mean(trace[0: self.prestim_frames])
trace_dff = (trace - mean_pre)
targets_dff[j, i] = trace_dff
print(f"shape photostim. trials trace snippets array: {targets_dff.shape}")
return targets_dff
# todo test
def calculatePhotoresponses(self, snippets: np.ndarray, stims_to_use: Union[list, str] = 'all'):
"""
Calculations of responses (post-stim - pre-stim) to photostimulation of SLM Targets of the provided snippets array.
:param stims_to_use: ls of stims to retrieve photostim trial dFF responses
:return:
"""
if stims_to_use is 'all':
stims_to_use = range(len(self.stim_start_frames))
stims_idx = [self.stim_start_frames.index(stim) for stim in stims_to_use]
else:
stims_idx = [self.stim_start_frames.index(stim) for stim in stims_to_use]
# method 1) initializing pandas df that collects responses of stimulations
d = {}
for stim in stims_idx:
d[stim] = [None] * snippets.shape[0]
df = pd.DataFrame(d, index=range(snippets.shape[0])) # population dataframe
# calculate photostimulation responses
cell_ids = df.index
for target_idx in range(len(cell_ids)):
responses = []
for stim_idx in stims_idx:
dff_trace = snippets[target_idx][stim_idx]
response_result = np.mean(dff_trace[self.prestim_frames + self.stim_duration_frames + 1:
self.prestim_frames + self.stim_duration_frames +
self.poststim_response_frames_window]) # calculate the dF over pre-stim mean F response within the response window
responses.append(np.round(response_result, 2))
df.loc[target_idx, stim_idx] = response_result
# method 2) alternate method for calculating photostim responses
self.__analysis_array = snippets
self.__pre_array = np.mean(self.__analysis_array[:, self.prestim_test_slice, :],
axis=1) # [cells x prestim frames] (avg'd taken over all stims)
self.__post_array = np.mean(self.__analysis_array[:, self.poststim_test_slice, :],
axis=1) # [cells x poststim frames] (avg'd taken over all stims)
# Vape's version for collection photostim response amplitudes
# calculate amplitude of response for all cells, all trials
all_amplitudes = self.__post_array - self.__pre_array
df = pd.DataFrame(index=range(self.Suite2p.n_units), columns=self.stim_start_frames, data=all_amplitudes)
# todo compare two methods
return df
# todo code up
def _TargetsPhotostimResponsesAnndata(self):
"""
Create an anndata table for photostimulation responses of Targets.
:return:
"""
# todo test
def _CellsPhotostimResponsesAnndata(self, photostimResponseAmplitudes: pd.DataFrame):
"""
Creates annotated cellsdata (see anndata library) object based around the Ca2+ matrix of the imaging trial.
"""
# try:
# SETUP THE OBSERVATIONS (CELLS) ANNOTATIONS TO USE IN anndata
# build dataframe for obs_meta from suite2p stat information
obs_meta = self.cells.cellsdata
# SETUP THE VARIABLES ANNOTATIONS TO USE IN anndata
# build dataframe for var annot's from Paq file
var_meta = self.tmdata.data
# var_meta.columns = photostimResponseAmplitudes.columns
print(f"\n\----- CREATING annotated cellsdata object for photostim responses using AnnData:")
photostimResponseAmplitudes.columns = var_meta.columns
adata = AnnotatedData(X=np.asarray(photostimResponseAmplitudes), obs=obs_meta, var=var_meta.T)
print(f"\t{adata}")
return adata
def photostimProcessingAllCells(self, plane: int = 0): # NOTE: not setup for multi-plane imaging processing yet...
"""
Take dfof trace for entire timeseries and break it up in to individual trials, calculate
the mean amplitudes of response and statistical significance across all trials
Inputs:
plane - imaging plane n
"""
print('\n----------------------------------------------------------------')
print('running trial Processing for all cells ')
print('----------------------------------------------------------------')
# make trial arrays from dff cellsdata shape: [cells x stims x frames]
if hasattr(self, 'Suite2p'):
photostimFluArray = self._makePhotostimTrialFluSnippets(plane_flu=self.normalize_dff(self.Suite2p.raw))
photostimResponseAmplitudes = self.collectPhotostimResponses(photostimFluArray)
## create new anndata object for storing measured photostim responses from cellsdata, with other relevant cellsdata
photostim_responses_adata = self._CellsPhotostimResponsesAnndata(
photostimResponseAmplitudes=photostimResponseAmplitudes)
return photostimFluArray, photostimResponseAmplitudes, photostim_responses_adata
else:
NotImplementedError('Photostim processing cannot be performed without Suite2p cellsdata.')
def statisticalProcessingAllCells(self):
"""Runs statistical processing on photostim response arrays"""
from packerlabimaging.processing.stats import runWilcoxonsTest
self.wilcoxons = runWilcoxonsTest(array1=self.__pre_array, array2=self.__post_array)
from packerlabimaging.processing.stats import sigTestAvgResponse
self.sig_units = sigTestAvgResponse(trial=self, p_vals=self.wilcoxons, alpha=0.1)
def staSignificance(self, test):
"""
TODO docstring
:param test:
"""
self.sta_sig = []
for plane in range(self.imparams.n_planes):
# set this to true if you want to multiple comparisons correct for the number of cells
multi_comp_correction = True
if not multi_comp_correction:
divisor = 1
else:
divisor = self.n_units[plane]
if test == 't_test':
p_vals = [t[1] for t in self.t_tests[plane]]
if test == 'wilcoxon':
p_vals = [t[1] for t in self.wilcoxons[plane]]
if multi_comp_correction:
print('performing t-test on cells with mutliple comparisons correction')
else:
print('performing t-test on cells without mutliple comparisons correction')
sig_units = []
for i, p in enumerate(p_vals):
if p < (0.05 / divisor):
unit_index = self.cell_id[plane][i]
# print('stimulation has significantly changed fluoresence of s2p unit {}, its P value is {}'.format(unit_index, p))
sig_units.append(unit_index) # significant units
self.sta_sig.append(sig_units)
def singleTrialSignificance(self):
"""
TODO docstring
"""
self.single_sig = [] # single trial significance value for each trial for each cell in each plane
for plane in range(self.imparams.n_planes):
single_sigs = []
for cell, _ in enumerate(self.cell_id[plane]):
single_sig = []
for trial in range(self.n_trials):
pre_f_trial = self.all_trials[plane][cell][trial][: self.pre_frames]
std = np.std(pre_f_trial)
if np.absolute(self.all_amplitudes[plane][cell][trial]) >= 2 * std:
single_sig.append(True)
else:
single_sig.append(False)
single_sigs.append(single_sig)
self.single_sig.append(single_sigs)
## NOT REVIEWED FOR USAGE YET
# other useful functions for all-optical analysis
def run_stamm_nogui(self, numDiffStims, startOnStim, everyXStims, preSeconds=0.75, postSeconds=1.25):
"""
run STAmoviemaker for current trial
:param numDiffStims:
:param startOnStim:
:param everyXStims:
:param preSeconds:
:param postSeconds:
"""
qnap_path = os.path.expanduser('/home/pshah/mnt/qnap')
# cellsdata path
movie_path = self.tiff_path
sync_path = self._paq_path
# stamm save path
stam_save_path = os.path.join(qnap_path, 'Analysis', self.metainfo['date'], 'STA_Movies',
'%s_%s_%s' % (self.metainfo['date'],
self.metainfo['expID'],
self.metainfo['trialID']))
os.makedirs(stam_save_path, exist_ok=True)
##
assert os.path.exists(stam_save_path)
print('QNAP_path:', qnap_path,
'\ncellsdata path:', movie_path,
'\nsync path:', sync_path,
'\nSTA movie save s2pResultsPath:', stam_save_path)
# define STAmm parameters
frameRate = int(self.imparams.fps)
arg_dict = {'moviePath': movie_path, # hard-code this
'savePath': stam_save_path,
'syncFrameChannel': "frame_clock",
'syncStimChannel': 'packio2markpoints',
'syncStartSec': 0,
'syncStopSec': 0,
'numDiffStims': numDiffStims,
'startOnStim': startOnStim,
'everyXStims': everyXStims,
'preSeconds': preSeconds,
'postSeconds': postSeconds,
'frameRate': frameRate,
'averageImageStart': 0.5,
'averageImageStop': 1.5,
'methodDF': False,
'methodDFF': True,
'methodZscore': False,
'syncPath': sync_path,
'zPlanes': 1,
'useStimOrder': False,
'stimOrder': [],
'useSingleTrials': False,
'doThreshold': False,
'threshold': 0,
'colourByTime': False,
'useCorrelationImage': False,
'blurHandS': False,
'makeMaxImage': True,
'makeColourImage': False
}
# # run STAmm
# STAMM.STAMovieMaker(arg_dict)
# show the MaxResponseImage
img = glob.glob(stam_save_path + '/*MaxResponseImage.tif')[0]
# plot_single_tiff(img, frame_num=0)
#### // end
if __name__ == '__main__':
LOCAL_DATA_PATH = '/Users/prajayshah/data/oxford-data-to-process/'
REMOTE_DATA_PATH = '/home/pshah/mnt/qnap/Data/'
BASE_PATH = LOCAL_DATA_PATH
ExperimentMetainfo = {
'dataPath': f'{BASE_PATH}/2020-12-19/2020-12-19_t-013/2020-12-19_t-013_Cycle00001_Ch3.tif',
'saveDir': f'{BASE_PATH}/2020-12-19/',
'expID': 'RL109',
'comment': 'two photon imaging + alloptical trials',
}
expobj = Experiment(**ExperimentMetainfo)
idict = alloptical_trial_fixture()
aotrial = test_AllOpticalClass(idict)
# archive
# def collectPhotostimResponses(self, photostimFluArray):
# """
# TODO docstring
#
# :param photostimFluArray:
# :return:
# """
# # create parameters, slices, and subsets for making pre-stim and post-stim arrays to use in stats comparison
# # test_period = self.prestim_response_window / 1000 # sec
# # self.test_frames = int(self.imparams.fps * test_period) # test period for stats
#
# # mean pre and post stimulus (within post-stim response window) flu trace values for all cells, all trials
# self.__analysis_array = photostimFluArray
# self.__pre_array = np.mean(self.__analysis_array[:, self.prestim_test_slice, :],
# axis=1) # [cells x prestim frames] (avg'd taken over all stims)
# self.__post_array = np.mean(self.__analysis_array[:, self.poststim_test_slice, :],
# axis=1) # [cells x poststim frames] (avg'd taken over all stims)
#
# # Vape's version for collection photostim response amplitudes
# # calculate amplitude of response for all cells, all trials
# all_amplitudes = self.__post_array - self.__pre_array
#
# df = pd.DataFrame(index=range(self.Suite2p.n_units), columns=self.stim_start_frames, data=all_amplitudes)
#
# return df
| [
2,
16926,
46,
4296,
477,
362,
79,
7132,
3519,
708,
81,
338,
284,
25422,
1670,
850,
21412,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
6737,
198,
11748,
640,
198,
6738,
19720,
... | 2.359859 | 15,595 |
"""Models for Ambee."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
@dataclass
class AirQuality:
"""Object representing an AirQuality response from Ambee."""
particulate_matter_2_5: float | None
particulate_matter_10: float | None
sulphur_dioxide: float | None
nitrogen_dioxide: float | None
ozone: float | None
carbon_monoxide: float | None
air_quality_index: int | None
@staticmethod
def from_dict(data: dict[str, Any]) -> AirQuality:
"""Return AirQuality object from the Ambee API response.
Args:
data: The data from the Ambee API.
Returns:
A AirQuality object.
"""
station = data["stations"][0]
return AirQuality(
particulate_matter_2_5=station.get("PM25"),
particulate_matter_10=station.get("PM10"),
sulphur_dioxide=station.get("SO2"),
nitrogen_dioxide=station.get("NO2"),
ozone=station.get("OZONE"),
carbon_monoxide=station.get("CO"),
air_quality_index=station.get("AQI"),
)
@dataclass
class Pollen:
"""Object representing an Pollen response from Ambee."""
grass_poaceae: int | None
grass_risk: str | None
grass: int | None
tree_alder: int | None
tree_birch: int | None
tree_cypress: int | None
tree_elm: int | None
tree_hazel: int | None
tree_oak: int | None
tree_pine: int | None
tree_plane: int | None
tree_poplar: int | None
tree_risk: str | None
tree: int | None
weed_chenopod: int | None
weed_mugwort: int | None
weed_nettle: int | None
weed_ragweed: int | None
weed_risk: str | None
weed: int | None
@staticmethod
def from_dict(data: dict[str, Any]) -> Pollen:
"""Return Pollen object from the Ambee API response.
Args:
data: The data from the Ambee API.
Returns:
A Pollen object.
"""
data = data["data"][0]
count = data.get("Count", {})
risk = data.get("Risk", {})
species = data.get("Species", {})
grass = species.get("Grass", {})
tree = species.get("Tree", {})
weed = species.get("Weed", {})
return Pollen(
grass_poaceae=grass.get("Grass / Poaceae"),
grass_risk=risk.get("grass_pollen"),
grass=count.get("grass_pollen"),
tree_alder=tree.get("Alder"),
tree_birch=tree.get("Birch"),
tree_cypress=tree.get("Cypress"),
tree_elm=tree.get("Elm"),
tree_hazel=tree.get("Hazel"),
tree_oak=tree.get("Oak"),
tree_pine=tree.get("Pine"),
tree_plane=tree.get("Plane"),
tree_poplar=tree.get("Poplar / Cottonwood"),
tree_risk=risk.get("tree_pollen"),
tree=count.get("tree_pollen"),
weed_chenopod=weed.get("Chenopod"),
weed_mugwort=weed.get("Mugwort"),
weed_nettle=weed.get("Nettle"),
weed_ragweed=weed.get("Ragweed"),
weed_risk=risk.get("weed_pollen"),
weed=count.get("weed_pollen"),
)
@dataclass
class Weather:
"""Object representing an Weather response from Ambee."""
apparent_temperature: float | None
cloud_cover: float | None
dew_point: float | None
humidity: float | None
ozone: float | None
pressure: float | None
temperature: float | None
time: int | None
visibility: int | None
wind_bearing: int | None
wind_gust: float | None
wind_speed: float | None
@staticmethod
def from_dict(data: dict[str, Any]) -> Weather:
"""Return Weather object from the Ambee API response.
Args:
data: The data from the Ambee API.
Returns:
A Weather object.
"""
data = data["data"]
return Weather(
apparent_temperature=data.get("apparentTemperature"),
cloud_cover=data.get("cloudCover"),
dew_point=data.get("dewPoint"),
humidity=data.get("humidity"),
ozone=data.get("ozone"),
pressure=data.get("pressure"),
temperature=data.get("temperature"),
time=data.get("time"),
visibility=data.get("visibility"),
wind_bearing=data.get("windBearing"),
wind_gust=data.get("windGust"),
wind_speed=data.get("windSpeed"),
)
| [
37811,
5841,
1424,
329,
1703,
20963,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
4377,
628,
198,
31,
19608,
330,
31172,
198,
4871,
3701,
35013,
... | 2.193 | 2,057 |
# Ler dois valores para as variáveis A e B e efetuar a troca dos valores de forma que a variável A passe a possuir o valor da variável B e a variável B passe a possuir o valor da variável A. Apresentar os valores após a efetivação do processamento da troca.
A = input('valor de A: ')
B = input('Valor de B: ')
Aux = B
B = A
A = Aux
print(' Valor de A: {} \n Valor de B: {}'.format(A , B)) | [
2,
31831,
466,
271,
1188,
2850,
31215,
355,
5553,
6557,
303,
271,
317,
304,
347,
304,
304,
34045,
84,
283,
257,
4161,
6888,
23430,
1188,
2850,
390,
1296,
64,
8358,
257,
5553,
6557,
626,
317,
279,
21612,
257,
1184,
84,
343,
267,
1188... | 2.453416 | 161 |
import logging
import numpy as np
import pandas as pd
# PCA
from sklearn import decomposition
# THIS IS WHAT WE NEED TO LOOK AT!
from sklearn import preprocessing as preproc
# from sklearn.preprocessing import StandardScaler
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
4217,
32,
198,
6738,
1341,
35720,
1330,
26969,
9150,
628,
198,
2,
12680,
3180,
25003,
12887,
36465,
5390,
48045,
5161,
0,
198,
6738,
... | 3.467532 | 77 |
from flask import Flask
from flask_cors import CORS
from flask_graphql import GraphQLView
from schema import Schema
if __name__ == '__main__':
app = create_app(graphiql=True)
CORS(app, resources={r'/graphql': {'origins': '*'}})
app.run()
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
42903,
62,
34960,
13976,
1330,
29681,
9711,
7680,
198,
6738,
32815,
1330,
10011,
2611,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
83... | 2.72043 | 93 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, print_function, division,
absolute_import)
"""
Bridge between this package and the nose package
"""
__copyright__ = "Copyright 2020, Datera, Inc."
def _prepare_sys_path():
"""
Raises ImportError on failure.
Tries to import nose using the default sys.path; if that fails, adds
our third_party/ folder to sys.path and tries again.
"""
try:
import nose
return
except ImportError:
pass
import os
import sys
thisdir = os.path.dirname(os.path.realpath(__file__))
third_party_libdir = os.path.join(thisdir, "third_party")
sys.path.insert(0, third_party_libdir)
import nose
_prepare_sys_path()
import nose
import nose.plugins
from nose.plugins.attrib import attr
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
357,
46903,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
11,
7297,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.584375 | 320 |
import numpy as np
import matplotlib.pyplot as plt
import pcl
import sys
import os.path as path
import argparse
### TODO
# implement command line args!
#inputfile = "test2.csv" # the input test data
parser = argparse.ArgumentParser(description='Generate Pointclouds out of VR-3000 csv data')
parser.add_argument('--infile', type=argparse.FileType('r', encoding='ISO-8859-1'),
required=True, dest="infile")
parser.add_argument('--outfile',
required=True, dest="outfile")
parser.add_argument('--outdir',
required=False, dest="outdir")
args = parser.parse_args()
inputfile = args.infile
outputfile = args.outfile
outputdir = "."
if (args.outdir):
print("bla",args.outdir)
outputdir = args.outdir
print("input file: {}".format(inputfile))
print("output file: {}".format(outputfile))
headerLines = 22
resolutionX = 24
resolutionY = 18
matrix = [] # the complete scan as a matrix
with inputfile as fin:
for _ in range(headerLines):
next(fin)
for line in fin: # iterate over every line in the file
tmpElementsPerLine = [] # this is a temporary array used to store all the actual data (per line)
for element in line.strip().split(";"): # every data point in the file looks like this value,value
# only the first element ist needed
if element == '': # if there is no data, add a zero
tmpElementsPerLine.append(0)
else:
try: # try to extract the first value per data point and convert it to a float
tmpElementsPerLine.append(float(element.split(",")[0]))
except:
print("could not convert element to float, skipping")
tmpElementsPerLine.append(0)
matrix.append(tmpElementsPerLine)
matrix = np.array(matrix)
print("read in {} lines with {} pixels".format(len(matrix), len(matrix[0,:])))
# scaling
pointCloud = [] # the actual point cloud, containing only valid coordinates
for row in range(len(matrix)): # the x coordinate cooresponds to the row position
for col in range(len(matrix[row,:])): # the y coordinate corresponds to the col position
if matrix[row,col] != 0: # skip all zero values
# here scaling
pointCloud.append([(float(row)/1024)*resolutionX, (float(col)/768)*resolutionY, matrix[row,col]/1000]) # append the coordinates in the pointCloud list
pointCloud = np.array(pointCloud, dtype=np.float32)
# done! all relevant coordinates are now stored in the pointCloud list
print("overall point cloud has {} elements".format(len(pointCloud)))
pclCloud = pcl.PointCloud(pointCloud)
pcl.save(pclCloud, path.join(outputdir, outputfile))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
279,
565,
198,
11748,
25064,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
11748,
1822,
29572,
628,
198,
21017,
16926,
46,
198,
2,
... | 2.492615 | 1,151 |
#!/usr/local/bin/python3.0
import apache_tools
import json
import log_tools
import os
import sql_connector
import sys
import urllib.request
from datetime import datetime
main()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
13,
15,
198,
198,
11748,
2471,
4891,
62,
31391,
198,
11748,
33918,
198,
11748,
2604,
62,
31391,
198,
11748,
28686,
198,
11748,
44161,
62,
8443,
273,
198,
11748,
25064,
198,
11748,
2... | 3.067797 | 59 |
# -*- coding: utf-8 -*-
from django import forms
from children.functions import get_last_date
from dictionaries.models import Dictionary
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
1751,
13,
12543,
2733,
1330,
651,
62,
12957,
62,
4475,
198,
6738,
48589,
3166,
13,
27530,
1330,
28261,
628
] | 3.390244 | 41 |
"""
=======================================================================================================================
Filename: c03_ex02-saudacoes.py
Author: Henrique Rodrigues
Description: CURSO INTENSIVO DE PYTHON - Eric Matthes
3.2 Saudações: Comece com a lista usada no Exercício 3.1, mas em vez de simplesmente exibir o nome de
cada pessoa, apresente uma mensagem a elas. O texto de cada mensagem deve ser o mesmo, porém cada
mensagem deve estar personalizada com o nome da pessoa.
Date: 02/01/2022
=======================================================================================================================
"""
patriarcas = ["Abraão", "Isaque", "Jacó"]
i = 0
while i < 3:
print(f"Bem-vindo, {patriarcas[i]}, ao Curso Intensivo do Python!")
i += 1
| [
37811,
198,
23926,
10052,
4770,
1421,
18604,
198,
35063,
25,
220,
220,
220,
220,
220,
220,
269,
3070,
62,
1069,
2999,
12,
82,
3885,
330,
3028,
13,
9078,
198,
13838,
25,
220,
220,
220,
220,
220,
220,
220,
220,
6752,
33865,
16114,
947... | 2.632219 | 329 |
import abc
| [
11748,
450,
66,
628
] | 3 | 4 |
# numpy ndarray
import numpy as np
data=np.random.randn(2,3)
data1=[6,7.5,8,0,1]
arr1=np.array(data1)
print(arr1)
# array= 다차원 동일자료형
np.arange(15)
arrayList=np.arange(10)
numList=[]
for i in range(0,10):
numList.append(i)
numList[5:8]
arrayList[5:8]=12
print(numList,"\n",arrayList)
# array[0][2] == array[0,2] 위치접근
names=np.array(["B","J","W","B","W","J","J"])
data=np.random.rand(7,4) | [
2,
299,
32152,
299,
67,
18747,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7890,
28,
37659,
13,
25120,
13,
25192,
77,
7,
17,
11,
18,
8,
198,
198,
7890,
16,
41888,
21,
11,
22,
13,
20,
11,
23,
11,
15,
11,
16,
60,
198,
3258,
... | 1.729258 | 229 |
"""
Streamlit App for displaying results.
"""
import logging
import requests
import streamlit as st
from stock import search
logger = logging.getLogger(__name__)
pexels_reference = """
<a href="https://www.pexels.com">Photos provided by Pexels</a>
"""
@st.cache(show_spinner=False)
def load_image(url) -> bytes:
"""
Load an image from a url.
Returns:
bytes: The raw bytes of an image.
"""
logger.debug(f"Loading image from {url}")
response = requests.get(url)
return response.content
if __name__ == "__main__":
main()
| [
37811,
198,
12124,
18250,
2034,
329,
19407,
2482,
13,
198,
37811,
198,
198,
11748,
18931,
198,
198,
11748,
7007,
198,
11748,
4269,
18250,
355,
336,
198,
198,
6738,
4283,
1330,
2989,
628,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
136... | 2.785366 | 205 |