content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from hops import constants
class HopsfsTrainingDataset():
"""
Represents a training dataset on hopsfs in the feature store
"""
def __init__(self, hopsfs_training_dataset_json):
"""
Initalizes the hopsfs training dataset from JSON payload
Args:
:external_training_dataset_json: JSON data about the training dataset returned from Hopsworks REST API
"""
self.hopsfs_connector_id = hopsfs_training_dataset_json[
constants.REST_CONFIG.JSON_TRAINING_DATASET_HOPSFS_CONNECTOR_ID]
self.hopsfs_connector_name = hopsfs_training_dataset_json[
constants.REST_CONFIG.JSON_TRAINING_DATASET_HOPSFS_CONNECTOR_NAME]
self.size = hopsfs_training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_SIZE]
self.hdfs_store_path = hopsfs_training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_HDFS_STORE_PATH]
self.inode_id = hopsfs_training_dataset_json[constants.REST_CONFIG.JSON_TRAINING_DATASET_INODE_ID]
| [
6738,
29438,
1330,
38491,
198,
198,
4871,
367,
2840,
9501,
44357,
27354,
292,
316,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1432,
6629,
257,
3047,
27039,
319,
29438,
9501,
287,
262,
3895,
3650,
198,
220,
220,
220,
37227,
... | 2.301339 | 448 |
from . import utils
import datetime
import glob
import matplotlib
import matplotlib.image
import numpy as np
import os
import requests
import subprocess
import tables
import time
import torch.utils.data
movie_cache = {}
class PVC1(torch.utils.data.Dataset):
"""
Loads a segment from the Ringach crcns pvc-1 dataset.
Each call to __get_item__ returns a tuple ((X, experiment), y)
X: a numpy array with size (3, nt, ny, nx)
experiment: a string corresponding to which experiment we're talking about.
y: a numpy array with size (nt - ntau + 1, nelectrodes[experiment]).
Arguments:
root: the root folder where the data is stored
nx: the number of x values (will center crop)
ny: the number of y value (will center crop)
nt: the number of images per micro-batch
ntau: the number of time lags that the y response listens to
nframedelay: the number of frames the neural response is delayed by compared to the neural data.
nframestart: the number of frames after the onset of a sequence to start at. 15 by default ~ 500ms
split: either train, tune or report (if tune or report, returns a 1 / 10 tune/report set, if train, 8/10)
"""
def _movie_info(root):
"""
Build up a hashmap from tuples of (movie, segment) to info about the
movie, including location and duration
"""
path = os.path.join(root, 'movies.h5')
h5file = tables.open_file(path, 'r')
movie_info = {}
for i in range(30):
for j in range(4):
node = f'/movie{j:03}_{i:03}'
nframes = len(h5file.get_node(node))
movie_info[(j, i)] = {'nframes': nframes}
h5file.close()
return movie_info
def download(root, url=None):
"""Download the dataset to disk.
Arguments:
root: root folder to download to.
url: the root URL to grab the data from.
Returns:
True if downloaded correctly
"""
if url is None:
url = os.getenv('GCS_ROOT')
zip_name = 'crcns-pvc1.zip'
out_file = os.path.join(root, 'zip', zip_name)
if os.path.exists(out_file) and os.stat(out_file).st_size == 1798039870:
print(f"Already fetched {zip_name}")
else:
try:
os.makedirs(os.path.join(root, 'zip'))
except FileExistsError:
pass
# Instead of downloading in Python and taking up a bunch of memory, use curl.
process = subprocess.Popen(['wget',
'--quiet',
url + zip_name,
'-O',
out_file],
stdout=subprocess.DEVNULL)
t0 = datetime.datetime.now()
progress = '|\\-/'
while process.poll() is None:
dt = (datetime.datetime.now() - t0) / datetime.timedelta(seconds=.5)
char = progress[int(dt) % 4]
print('\r' + char, end='')
time.sleep(.5)
print('\n')
# Check everything good
if not os.path.exists(out_file):
# Something bad happened during download
print(f"Failed to download {zip_name}")
return False
# Now unzip the data if necessary.
if os.path.exists(os.path.join(root, 'crcns-ringach-data')):
print("Already unzipped")
return True
else:
process = subprocess.Popen(['unzip',
out_file,
'-d',
root],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
return True
if __name__ == '__main__':
dataset_train = PVC1(split='train')
train_electrodes = dataset_train.total_electrodes
train_len = len(dataset_train)
print("Len(train_dataset): ", train_len)
for i in range(0, len(dataset_train), 100):
d = dataset_train[i]
assert len(d) == 2
assert len(d[0]) == 2
assert d[0][1].ndim == 1
assert d[0][1].size >= 1
assert d[1].shape[0] == d[0][0].shape[-1]
dataset_test = PVC1(split='test')
test_electrodes = dataset_test.total_electrodes
test_len = len(dataset_test)
assert train_electrodes == test_electrodes
print("Len(test_dataset): ", test_len)
for i in range(0, len(dataset_test), 100):
d = dataset_test[i]
assert abs((train_len + test_len) / test_len - 7) < .1 | [
6738,
764,
1330,
3384,
4487,
201,
198,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
15095,
201,
198,
11748,
2603,
29487,
8019,
201,
198,
11748,
2603,
29487,
8019,
13,
9060,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2868... | 2.027327 | 2,342 |
"""
Approach-1:
If we observe the given 2d array.Array is increasing from left to right as well as increasing from top to bottom.so
approach is to start from top right and
(1)If the current element is greater than target which means all the elements in the current column is greater than
the target.So exclude that column and go to previous column i.e move left
(2)If the current element is less than target which mean all the elements in the current row is less than
the target.so exclude that column and go to next row
"""
if __name__=="__main__":
"""
m,n=map(int,input().split())
matrix=[[int(input()) for c in range(n)]for r in range(m)]
target=int(input())
print(solve(matrix,target))
"""
matrix = [
[1, 3, 9],
[2, 5, 10],
[5, 7, 13]
]
target = 7
print(solve(matrix,target))
"""
Approach-2:
Traverse across each row and use the Binary search algorithm to find the target required
"""
if __name__=="__main__":
"""
m,n=map(int,input().split())
matrix=[[int(input()) for c in range(n)]for r in range(m)]
target=int(input())
print(solve(matrix,target))
"""
matrix = [
[1, 3, 9],
[2, 5, 10],
[5, 7, 13]
]
target = 7
print(solve(matrix,target)) | [
37811,
198,
4677,
28562,
12,
16,
25,
198,
1532,
356,
12414,
262,
1813,
362,
67,
7177,
13,
19182,
318,
3649,
422,
1364,
284,
826,
355,
880,
355,
3649,
422,
1353,
284,
4220,
13,
568,
198,
21064,
620,
318,
284,
923,
422,
1353,
826,
2... | 2.701299 | 462 |
# -*- coding: utf-8 -*-
import sys
from os import makedirs, path
from typing import NoReturn, Text
# ==============================================================================
# CLASS
# ==============================================================================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
25064,
201,
198,
6738,
28686,
1330,
285,
4335,
17062,
11,
3108,
201,
198,
6738,
19720,
1330,
1400,
13615,
11,
8255,
201,
198,
201,
198,
2,
38093,
... | 4.7 | 60 |
from anthill.framework.handlers.streaming.uploadfile import UploadFileStreamHandler
from anthill.platform.handlers import UserHandlerMixin
# noinspection PyAbstractClass
| [
6738,
26794,
359,
13,
30604,
13,
4993,
8116,
13,
5532,
278,
13,
25850,
7753,
1330,
36803,
8979,
12124,
25060,
198,
6738,
26794,
359,
13,
24254,
13,
4993,
8116,
1330,
11787,
25060,
35608,
259,
628,
198,
2,
645,
1040,
14978,
9485,
23839,
... | 4 | 43 |
#!/usr/bin/env python
# encoding: utf-8
from gi.repository import GLib
from urllib.parse import unquote
import sys
import dbus
import dbus.service
import dbus.exceptions
import dbus.mainloop.glib
def decompose_uri(uri):
"""
extract folder part & filename part.
"""
last_slash = uri.rfind('/')
uri_part = uri[:last_slash]
filename = uri[last_slash + 1:]
colon = uri_part.find(':')
folder = uri_part[colon+3:]
return (unquote(folder), unquote(filename))
filemanager1_bus_name = 'org.freedesktop.FileManager1'
filemanager1_bus_path = '/org/freedesktop/FileManager1'
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
bus_name = dbus.service.BusName(filemanager1_bus_name, session_bus)
filemanager = FileManager1(session_bus, filemanager1_bus_path)
print('Service started.')
mainloop = GLib.MainLoop()
mainloop.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
6738,
308,
72,
13,
260,
1930,
37765,
1330,
10188,
571,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
555,
22708,
198,
198,
11748,
25064,
198,
... | 2.440506 | 395 |
import collections
import os
import re
import time
| [
11748,
17268,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
640,
628,
628
] | 4.153846 | 13 |
import glob
from pathlib import Path
import json
import re
import os
import numpy as np
import pandas as pd
from isochrones.models import StellarModelGrid, ModelGridInterpolator
from isochrones.mist.models import MISTEvolutionTrackGrid
from isochrones.mist.bc import MISTBolometricCorrectionGrid
from isochrones.mags import interp_mag_4d, interp_mags_4d
from isochrones.priors import FlatPrior
| [
11748,
15095,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
318,
5374,
9821,
13,
27530,
1330,
39... | 3.325 | 120 |
# -*-coding:utf-8 -*-
u"""
:创建时间: 2022/3/18 19:04
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
from __future__ import unicode_literals, print_function
import array
from cpapi.all import *
import cpapi.all as om
import cpapi.utils as omtl
import maya.api.OpenMaya as om2
import maya.api.OpenMayaAnim as om2aim
from cpmel._object_types.core import Node, Transform, Shape, other_node_cls, new_object, Attr, Component
from cpmel._args_conv import arg_conv
from cpmel.exc import *
@other_node_cls("kJoint")
@other_node_cls("kMesh")
@other_node_cls('kNurbsCurve')
@other_node_cls('kNurbsSurface')
@other_node_cls("kSkinClusterFilter")
| [
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
532,
9,
12,
198,
84,
37811,
198,
25,
26344,
249,
161,
119,
118,
33768,
114,
29785,
112,
25,
33160,
14,
18,
14,
1507,
678,
25,
3023,
198,
25,
43291,
38519,
25,
5525,
233,
235,
45298,
... | 2.238764 | 356 |
import numpy as np
import cv2
import sys, Image, ImageDraw
import colorsys
import allproc as one
import math
import kNN
cap = cv2.VideoCapture(0) #cap is an object that stores the camera properties. Cap is used to retrieve camera options.
list_coord = []
xValues = []
yValues = []
count_points = 0
sumX = 0
sumY = 0
sumX2 = 0
sumXY = 0
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read() #reads a frame and returns to ret, frame.
frame = one.flip_frame(cap, frame)
image_clusters = one.find_clusters(frame, (20, 140, 140), (30, 255, 255))
one.form_rectangles(image_clusters, frame)
one.display_boundary(frame)
coord = one.find_largest(image_clusters, frame)
count_points, sumX, sumY, sumX2, sumXY, flag = one.line_distinguish(coord, count_points, sumX, sumY, sumX2, sumXY)
if(flag):
list_coord.append(coord)
xValues.append(coord[0])
yValues.append(coord[1])
# Display the resulting frame
cv2.imshow('frame name',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# slope = one.find_the_line(count_points, sumX, sumY, sumX2, sumXY)
# one.kNN_Classifier(slope)
one.kNN_integration(xValues, yValues)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
25064,
11,
7412,
11,
7412,
25302,
198,
11748,
7577,
893,
198,
11748,
477,
36942,
355,
530,
198,
11748,
10688,
198,
11748,
479,
6144,
198,
198,
11128,
796,
269,
85,
17... | 2.660907 | 463 |
#!/usr/bin/python
import argparse
import base64
import json
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Dump all certificates out of Traefik's acme.json file")
parser.add_argument('acme_json', help='path to the acme.json file')
parser.add_argument('dest_dir',
help='path to the directory to store the certificate')
args = parser.parse_args()
#check cert path for right permissions
if os.path.exists(args.dest_dir):
os.chmod(args.dest_dir, 0o755)
certs = read_certs(args.acme_json)
print('Found certs for %d domains' % (len(certs),))
for domain, cert in certs.items():
if(has_changes(args.dest_dir, domain, cert)):
print('Writing cert for domain %s' % (domain,))
write_cert(args.dest_dir, domain, cert)
print('Done')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
1822,
29572,
198,
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
1822,
29572,... | 2.560831 | 337 |
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from ckan.plugins import toolkit
from flask import Blueprint
blueprint = Blueprint(name='vfactor-iiif', import_name=__name__, url_prefix='/vfactor_iiif')
@blueprint.route('/')
def index():
'''
Render the vfactor iiif page.
'''
return toolkit.render('vfactor_iiif.html')
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
269,
74,
1531,
742,
12,
77,
23940,
198,
2,
15622,
416,
262,
12068,
7443,
9594,
287,
3576,
11,
3482,
... | 2.806667 | 150 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the StringRegexHandler class."""
import re
from future.utils import string_types
from .handler import Handler
class StringRegexHandler(Handler):
"""Handler class to handle string updates based on a regex which checks the update content.
Read the documentation of the ``re`` module for more information. The ``re.match`` function is
used to determine if an update should be handled by this handler.
Note:
This handler is not used to handle Telegram :attr:`telegram.Update`, but strings manually
put in the queue. For example to send messages with the bot using command line or API.
Attributes:
pattern (:obj:`str` | :obj:`Pattern`): The regex pattern.
callback (:obj:`callable`): The callback function for this handler.
pass_groups (:obj:`bool`): Determines whether ``groups`` will be passed to the
callback function.
pass_groupdict (:obj:`bool`): Determines whether ``groupdict``. will be passed to
the callback function.
pass_update_queue (:obj:`bool`): Determines whether ``update_queue`` will be
passed to the callback function.
pass_job_queue (:obj:`bool`): Determines whether ``job_queue`` will be passed to
the callback function.
Args:
pattern (:obj:`str` | :obj:`Pattern`): The regex pattern.
callback (:obj:`callable`): The callback function for this handler. Will be called when
:attr:`check_update` has determined that an update should be processed by this handler.
Callback signature for context based API:
``def callback(update: Update, context: CallbackContext)``
The return value of the callback is usually ignored except for the special case of
:class:`telegram.ext.ConversationHandler`.
pass_groups (:obj:`bool`, optional): If the callback should be passed the result of
``re.match(pattern, data).groups()`` as a keyword argument called ``groups``.
Default is ``False``
DEPRECATED: Please switch to context based callbacks.
pass_groupdict (:obj:`bool`, optional): If the callback should be passed the result of
``re.match(pattern, data).groupdict()`` as a keyword argument called ``groupdict``.
Default is ``False``
DEPRECATED: Please switch to context based callbacks.
pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`
that contains new updates which can be used to insert updates. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a
:class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`
which can be used to schedule new jobs. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
"""
def check_update(self, update):
"""Determines whether an update should be passed to this handlers :attr:`callback`.
Args:
update (:obj:`str`): An incoming command.
Returns:
:obj:`bool`
"""
if isinstance(update, string_types):
match = re.match(self.pattern, update)
if match:
return match
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
317,
5888,
326,
3769,
257,
11361,
7071,
284,
262,
50203,
18579,
7824,
198,
2,
15069,
357,
34,
8,
1853,
12,
7908,
198,
2,
1004,
28092,
36026,
390,
22862,
4496,
1279,
7959,
... | 2.885971 | 1,561 |
from functools import lru_cache
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import plotly.graph_objs as go
from app import app
from db import session
from models import SingleAgentWellResult, WellResult
from utils import inhibition_colorscale, viability_colorscale, get_matrix_from_url, \
float_formatter
@lru_cache()
@app.callback(
[dash.dependencies.Output('viability-heatmap', 'figure'),
dash.dependencies.Output('viability-surface', 'figure'),
dash.dependencies.Output('lib1-viability-heatmap','figure'),
dash.dependencies.Output('lib2-viability-heatmap','figure'),],
[dash.dependencies.Input('viability-heatmap-zvalue', 'value')],
[dash.dependencies.State('url', 'pathname')]
)
@lru_cache(maxsize=10000)
@lru_cache() | [
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
198,
11748,
14470,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
... | 2.956376 | 298 |
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
boxed_image = Image.new('RGB', size, (128,128,128))
boxed_image.paste(resized_image, ((w-new_w)//2,(h-new_h)//2))
return boxed_image
def get_random_data_(image,box, input_shape, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''random preprocessing for real-time data augmentation'''
iw, ih = image.size
h, w = input_shape
dx=0
dy=0
nw = w
nh = h
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>=w] = w-1
box[:, 3][box[:, 3]>=h] = h-1
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
| [
37811,
31281,
25673,
10361,
5499,
526,
15931,
198,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
13,
4033,
669,
1330,
46140,
62,
1462,
6... | 2.100952 | 1,050 |
from ..converter_utils import let_user_pick
from ..converter_utils import print_message as prmsg
from ..converter_utils import run_ffmpeg
from .base import Base
class Audio(Base):
"""Get user input to execute different video conversions"""
def run(self):
"""Run the Audio command."""
chosen_option = let_user_pick(self.conversion_map)
source_paths, output_paths, params = self.get_user_input(
self.conversion_map[chosen_option]
)
for (source_path, output_path) in list(zip(source_paths, output_paths)):
run_ffmpeg(source_path, output_path, params, self.options)
prmsg('completed')
| [
6738,
11485,
1102,
332,
353,
62,
26791,
1330,
1309,
62,
7220,
62,
27729,
198,
6738,
11485,
1102,
332,
353,
62,
26791,
1330,
3601,
62,
20500,
355,
778,
19662,
198,
6738,
11485,
1102,
332,
353,
62,
26791,
1330,
1057,
62,
487,
43913,
198... | 2.573643 | 258 |
"""Tests for the rds scheduler class."""
from moto import mock_rds2
from package.scheduler.rds_handler import RdsScheduler
from .utils import launch_rds_instance
import pytest
@pytest.mark.parametrize(
"aws_region, tag_key, tag_value, result_count", [
("eu-west-1", "tostop", "true", 1),
("eu-west-2", "tostop", "true", 1),
("eu-west-2", "badtagkey", "badtagvalue", 0),
]
)
@mock_rds2
def test_list_rds(aws_region, tag_key, tag_value, result_count):
"""Verify list rds instance function."""
launch_rds_instance(aws_region, "tostop", "true")
rds_scheduler = RdsScheduler(aws_region)
taglist = rds_scheduler.list_instances(tag_key, tag_value)
assert len(list(taglist)) == result_count
| [
37811,
51,
3558,
329,
262,
374,
9310,
6038,
18173,
1398,
526,
15931,
198,
198,
6738,
285,
2069,
1330,
15290,
62,
4372,
82,
17,
198,
198,
6738,
5301,
13,
1416,
704,
18173,
13,
4372,
82,
62,
30281,
1330,
371,
9310,
50,
1740,
18173,
19... | 2.367412 | 313 |
# TODO comment this if need to disable debug logging of config
| [
628,
628,
628,
198,
220,
220,
220,
1303,
16926,
46,
2912,
428,
611,
761,
284,
15560,
14257,
18931,
286,
4566,
628
] | 3.571429 | 21 |
from face_recog_gui import *
import logging
import os
from flask import Flask
from flask_ask import Ask, request, session, question, statement
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
@ask.intent('HelloWorldIntent')
@ask.intent('AuthenticateIntent')
@ask.intent('AMAZON.HelpIntent')
@ask.intent('AMAZON.StopIntent')
@ask.intent('AMAZON.CancelIntent')
@ask.session_ended
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
| [
6738,
1986,
62,
8344,
519,
62,
48317,
1330,
1635,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
2093,
1330,
16981,
11,
2581,
11,
6246,
11,
1808,
11,
2643,
198,
198,
1324,
796,
46947,
7... | 2.432526 | 289 |
from typing import Type, List, Tuple, Dict, Any
import pytest
from pydantic import BaseModel, ValidationError
from ensysmod.schemas.energy_transmission_distance import EnergyTransmissionDistanceCreate
schemas_with_region_to_or_ref_required: List[Tuple[Type[BaseModel], Dict[str, Any]]] = [
(EnergyTransmissionDistanceCreate, {"distance": 4, "ref_region_from": 42})
]
schemas_with_region_to_or_ref_optional: List[Tuple[Type[BaseModel], Dict[str, Any]]] = []
schemas_with_region_to_or_ref = schemas_with_region_to_or_ref_required + schemas_with_region_to_or_ref_optional
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref_required)
def test_error_missing_region_to_or_ref(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref is required for a schema
"""
with pytest.raises(ValidationError) as exc_info:
schema(**data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("__root__",)
assert exc_info.value.errors()[0]["msg"] == "Either region_to or ref_region_to must be provided."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref_optional)
def test_ok_missing_region_to_or_ref(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref is optional for a schema
"""
schema(**data)
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref)
def test_error_on_negative_ref(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref is not under zero
"""
with pytest.raises(ValidationError) as exc_info:
schema(ref_region_to=-1, **data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("__root__",)
assert exc_info.value.errors()[0]["msg"] == "Reference to the region_to must be positive."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref)
def test_error_on_zero_ref(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref is not under zero
"""
with pytest.raises(ValidationError) as exc_info:
schema(ref_region_to=0, **data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("__root__",)
assert exc_info.value.errors()[0]["msg"] == "Reference to the region_to must be positive."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref)
def test_error_on_long_region_to(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref is not under zero
"""
with pytest.raises(ValidationError) as exc_info:
schema(region_to='a' * 101, **data)
assert len(exc_info.value.errors()) == 1
assert exc_info.value.errors()[0]["loc"] == ("__root__",)
assert exc_info.value.errors()[0]["msg"] == "The region_to must not be longer than 100 characters."
assert exc_info.value.errors()[0]["type"] == "value_error"
@pytest.mark.parametrize("schema,data", schemas_with_region_to_or_ref)
def test_ok_region_to_or_ref(schema: Type[BaseModel], data: Dict[str, Any]):
"""
Test that a region_to_or_ref with everything over 0 is valid
"""
schema(region_to='a', **data)
schema(region_to='a' * 100, **data)
schema(ref_region_to=1, **data)
| [
6738,
19720,
1330,
5994,
11,
7343,
11,
309,
29291,
11,
360,
713,
11,
4377,
198,
198,
11748,
12972,
9288,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
3254,
24765,
12331,
198,
198,
6738,
3140,
893,
4666,
13,
1416,
4411,
292,
13,
... | 2.558545 | 1,375 |
print(physics([1,2,3],[1,2,3])) | [
198,
4798,
7,
746,
23154,
26933,
16,
11,
17,
11,
18,
38430,
16,
11,
17,
11,
18,
60,
4008
] | 1.684211 | 19 |
from . import models
from . import constants
from . import searchers
from . import utilities | [
6738,
764,
1330,
4981,
198,
6738,
764,
1330,
38491,
198,
6738,
764,
1330,
9622,
3533,
198,
6738,
764,
1330,
20081
] | 4.6 | 20 |
# digite notas até digitar compute e depois faz a média
nota = '0'
soma = 0
incremento = 0
while nota != 'compute':
nota = input ('Digite uma nota de 0 a 10: ')
if nota == 'compute':
print (' ')
else:
soma += float (nota)
incremento += 1
print ('A média será',soma, '/', incremento,'=',soma / incremento)
# mesmo exercicio utilizando listas para armazenar as notas
notas = []
while True:
nota = input ('Digite uma nota (ou digite "compute" para encerrar): ')
if nota == 'compute':
break
valor = float (nota)
notas.append (valor)
print (sum(notas) / len (notas))
| [
2,
3100,
578,
407,
292,
379,
2634,
3100,
7940,
24061,
304,
1207,
10924,
277,
1031,
257,
285,
2634,
67,
544,
220,
198,
198,
1662,
64,
796,
705,
15,
6,
198,
82,
6086,
796,
657,
198,
24988,
434,
78,
796,
657,
198,
4514,
407,
64,
14... | 2.271739 | 276 |
# Author: Atheesh Krishnan
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
axis = pd.read_csv('D:/DS/AXISBANK.csv')
cipla = pd.read_csv('D:/DS/CIPLA.csv')
fortis = pd.read_csv('D:/DS/FORTIS.csv')
jet = pd.read_csv('D:/DS/JETAIRWAYS.csv')
titan = pd.read_csv('D:/DS/TITAN.csv')
axis = axis.rename(index = str, columns={"Close Price": "axis_cp"})
cipla = cipla.rename(index = str, columns={"Close Price": "cipla_cp"})
fortis = fortis.rename(index = str, columns={"Close Price": "fortis_cp"})
jet = jet.rename(index = str, columns={"Close Price": "jet_cp"})
titan = titan.rename(index = str, columns={"Close Price": "titan_cp"})
# create new df with close prices for all 5 stocks
portfolio = pd.DataFrame(axis.axis_cp)
cipla, portfolio = [d.reset_index(drop=True) for d in (cipla, portfolio)]
portfolio = portfolio.join(cipla['cipla_cp'])
fortis, portfolio = [d.reset_index(drop=True) for d in (fortis, portfolio)]
portfolio = portfolio.join(fortis['fortis_cp'])
jet, portfolio = [d.reset_index(drop=True) for d in (jet, portfolio)]
portfolio = portfolio.join(jet['jet_cp'])
titan, portfolio = [d.reset_index(drop=True) for d in (titan, portfolio)]
portfolio = portfolio.join(titan['titan_cp'])
# create the correlation matrix
corr_matrix = np.corrcoef(portfolio)
#convert daily stock prices into daily returns
returns = portfolio.pct_change()
#calculate mean daily return and covariance of daily returns
mean_daily_returns = returns.mean()
cov_matrix = returns.cov()
volatility = returns.std()
#set number of runs of random portfolio weights
num_portfolios = 50000
stock = ['Axis', 'Cipla', 'Fortis', 'Jet', 'Titan']
#increased the size of the array to hold the weight values for each stock
results = np.zeros((4+len(stock)-1,num_portfolios))
for i in range(num_portfolios):
#select random weights for portfolio holdings
w = np.array(np.random.random(5))
#rebalance weights to sum to 1, so that total portfolio alloc stays within 100%
w /= np.sum(w)
portfolio_return = np.sum(mean_daily_returns * w) * 252
portfolio_vol = np.sqrt(np.dot(w.T,np.dot(cov_matrix, w))) * np.sqrt(252)
results[0,i] = portfolio_return
results[1,i] = portfolio_vol
#portfolio_sharpe = portfolio_return / portfolio_vol
results[2,i] = results[0,i]/results[1,i]
#iterate through the weight vector and add data to results array
for j in range(len(w)):
results[j+3,i] = w[j]
result_frame = pd.DataFrame(results.T,columns=['ret','stdev','sharpe',stock[0],stock[1],stock[2],stock[3],stock[4]])
#locate position of portfolio with highest Sharpe Ratio
max_sharpe_port = result_frame.iloc[result_frame['sharpe'].idxmax()]
#locate positon of portfolio with minimum standard deviation
min_vol_port = result_frame.iloc[result_frame['stdev'].idxmin()]
stdev = np.asarray(result_frame.stdev)
ret = np.asarray(result_frame.ret)
sharpe = np.asarray(result_frame.sharpe)
# plotting the efficient frontier
plt.xlabel('Volatility')
plt.ylabel('Returns')
plt.title('Efficient Frontier')
plt.scatter(stdev, ret,c =sharpe,cmap='viridis')
plt.colorbar()
plt.scatter(max_sharpe_port[1],max_sharpe_port[0],marker=(5,1,0),color='r',s=1000)
plt.scatter(min_vol_port[1],min_vol_port[0],marker=(5,1,0),color='g',s=1000)
print(plt.plot())
# percentage allocation
print('Portfolio Allocation for Minimum Volatility: ')
print(min_vol_port)
print('Portfolio Alocation for Maximum Sharpe Ratio: ')
print(max_sharpe_port)
| [
2,
6434,
25,
27751,
5069,
31372,
12647,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
198,
... | 2.665644 | 1,304 |
from . import mirror_graph
from . import mirror_graph_sbu
| [
6738,
764,
1330,
10162,
62,
34960,
198,
6738,
764,
1330,
10162,
62,
34960,
62,
82,
11110,
198
] | 3.411765 | 17 |
# -*- coding: utf8 -*-
# Copyright (c) 2021 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import abc
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, Generic, Type, TypeVar
from shut.utils.io.virtual import VirtualFiles
from shut.utils.type_registry import TypeRegistry
if TYPE_CHECKING:
from shut.model.abstract import AbstractProjectModel
T = TypeVar('T')
T_co = TypeVar('T_co', covariant=True)
T_AbstractProjectModel = TypeVar('T_AbstractProjectModel', bound='AbstractProjectModel')
__all__ = [
'VersionRef',
'Renderer',
'register_renderer',
'get_files',
'get_version_refs',
]
@dataclass
registry = TypeRegistry[Renderer[T_AbstractProjectModel]]()
def register_renderer(t: Type[T_AbstractProjectModel], renderer: Type[Renderer[T_AbstractProjectModel]]) -> None:
"""
Register the *renderer* implementation to run when creating files for *t*.
"""
registry.put(t, renderer)
def get_files(obj: T_AbstractProjectModel) -> VirtualFiles:
"""
Gets all the files from the renderers registered to the type of *obj*.
"""
files = VirtualFiles()
for renderer in map(lambda r: r(), registry.for_type(type(obj))):
renderer.get_files(files, obj)
for renderer in obj.get_auxiliary_renderers():
renderer.get_files(files, obj)
return files
def get_version_refs(obj: T_AbstractProjectModel) -> Iterable[VersionRef]:
"""
Gets all version refs returned by registered for type *T_co*.
"""
for renderer in registry.for_type(type(obj)):
yield from renderer().get_version_refs(obj)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
33448,
11271,
21921,
41916,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
4... | 3.337629 | 776 |
import pkg_resources
import pandas as pd
def charity_donation():
"""Return a dataframe about the charity donation.
Contains the following fields:
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 n_donations 354 non-null int64
1 total_donations 354 non-null int64
2 time_donating 354 non-null int64
3 recent_donation 354 non-null int64
4 last_donation 354 non-null int64
5 gender 354 non-null object
6 reside 354 non-null object
7 age 354 non-null int64
"""
stream = pkg_resources.resource_stream(__name__, 'data/charity_donation.csv')
return pd.read_csv(stream)
def employee_survey():
"""Return a dataframe about the employee survey.
Contains the following fields:
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Happiness 2833 non-null int64
1 Ben1 2833 non-null int64
2 Ben2 2833 non-null int64
3 Ben3 2833 non-null int64
4 Work1 2833 non-null int64
5 Work2 2833 non-null int64
6 Work3 2833 non-null int64
7 Man1 2833 non-null int64
8 Man2 2833 non-null int64
9 Man3 2833 non-null int64
10 Car1 2833 non-null int64
11 Car2 2833 non-null int64
12 Car3 2833 non-null int64
13 Car4 2833 non-null int64
"""
stream = pkg_resources.resource_stream(__name__, 'data/employee_survey.csv')
return pd.read_csv(stream)
| [
11748,
279,
10025,
62,
37540,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
4299,
11016,
62,
9099,
341,
33529,
198,
220,
220,
220,
37227,
13615,
257,
1366,
14535,
546,
262,
11016,
13784,
13,
628,
220,
220,
220,
49850,
262,
1708,
703... | 1.960309 | 907 |
# Python program to draw star
# using Turtle Programming
import turtle
star = turtle.Turtle()
for i in range(50):
star.forward(50)
star.right(144)
turtle.done()
| [
2,
11361,
1430,
284,
3197,
3491,
220,
201,
198,
2,
1262,
33137,
30297,
220,
201,
198,
11748,
28699,
220,
201,
198,
201,
198,
7364,
796,
28699,
13,
51,
17964,
3419,
220,
201,
198,
201,
198,
1640,
1312,
287,
2837,
7,
1120,
2599,
220,
... | 2.447368 | 76 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from podium_api.asyncreq import get_json_header_token, make_request_custom_success
from podium_api.types.paged_response import get_paged_response_from_json
from podium_api.types.venue import get_venue_from_json
def make_venue_get(
token,
endpoint,
expand=False,
quiet=None,
success_callback=None,
redirect_callback=None,
failure_callback=None,
progress_callback=None,
):
"""
Request that returns a PodiumVenue that represents a specific
venue found at the URI.
Args:
token (PodiumToken): The authentication token for this session.
endpoint (str): The URI for the venue.
Kwargs:
expand (bool): Expand all objects in response output. Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumVenue)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
params = {}
if expand is not None:
params["expand"] = expand
if quiet is not None:
params["quiet"] = quiet
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
venue_success_handler,
method="GET",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
params=params,
header=header,
)
def make_venues_get(
token,
endpoint,
start=None,
per_page=None,
expand=False,
quiet=None,
success_callback=None,
redirect_callback=None,
failure_callback=None,
progress_callback=None,
):
"""
Request that returns a PodiumPagedRequest of venues.
Args:
token (PodiumToken): The authentication token for this session.
endpoint (str): The endpoint to make the request too.
Kwargs:
expand (bool): Expand all objects in response output. Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
Return:
UrlRequest: The request being made.
"""
params = {}
if expand is not None:
params["expand"] = expand
if quiet is not None:
params["quiet"] = quiet
if start is not None:
params["start"] = start
if per_page is not None:
per_page = min(per_page, 100)
params["per_page"] = per_page
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
venues_success_handler,
method="GET",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
params=params,
header=header,
)
def venues_success_handler(req, results, data):
"""
Creates and returns a PodiumPagedResponse with PodiumVenue as the
payload to the success_callback found in data if there is one.
Called automatically by **make_venues_get**.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](get_paged_response_from_json(results, "venues"))
def venue_success_handler(req, results, data):
"""
Creates and returns a PodiumVenue.
Called automatically by **make_venue_get**.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["success_callback"] is not None:
data["success_callback"](get_venue_from_json(results["venue"]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
6738,
27941,
62,
15042,
13,
292,
2047,
7513,
80,
1330,
651,
62,
17752,
62,
25677,
62,
30001,
11,
787,
62,
... | 2.492376 | 2,492 |
from urllib.request import urlopen # b_soup_1.py
from bs4 import BeautifulSoup
import xlwt
if __name__ == "__main__":
main() | [
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
220,
1303,
275,
62,
82,
10486,
62,
16,
13,
9078,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
2124,
75,
46569,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417... | 2.591837 | 49 |
# Generated by Django 2.2 on 2021-09-02 23:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
33448,
12,
2931,
12,
2999,
2242,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
from django.contrib.auth import logout, login
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView, FormView
from petstagram.accounts.forms import LoginForm, RegisterForm, ProfileForm
from petstagram.accounts.models import Profile
from petstagram.pets.models import Pet
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
2604,
448,
11,
17594,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
189... | 3.584071 | 113 |
#!/usr/bin/env python
import string
import math
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxySubscriberCached
from geometry_msgs.msg import Point, Pose
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
'''
Created on 03.06.2019
@author: Quentin Gaillot
'''
class GetPositionToPlaceOnTable(EventState):
"""
Renvoie la position d'un endroit sur la table pour poser un objet.
### InputKey
># angle msg.ranges
### OutputKey
<= done Angle de l'obstacle
"""
def __init__(self):
'''
Constructor
'''
super(GetPositionToPlaceOnTable, self).__init__(outcomes=['done', 'not_found'], input_keys=['distanceFromEdge'], output_keys=['position'])
self.planeTopic = "/segment_table/plane"
self.planeSub = ProxySubscriberCached({self.planeTopic: PointCloud2})
self.robotposeTopic = "/robot_pose"
self.robotposeSub = ProxySubscriberCached({self.robotposeTopic: Pose})
self.plane = None
def execute(self, userdata):
'''
Execute this state
'''
# Get the latest robot pose
mypose = self.robotposeSub.get_last_msg(self.robotposeTopic)
if self.planeSub.has_msg(self.planeTopic):
Logger.loginfo('getting table point cloud')
self.plane = self.planeSub.get_last_msg(self.planeTopic)
self.planeSub.remove_last_msg(self.planeTopic)
if self.plane is not None and mypose is not None:
gen = point_cloud2.read_points(self.plane)
closest_point = Point()
closest_point.x = 0
closest_point.y = 0
closest_point.z = 0
min_dist = 99999
numberOfPoints = 0
sum_x = 0
sum_y = 0
sum_z = 0
# find the closest point and center
point = Point()
for p in gen:
numberOfPoints = numberOfPoints + 1
point.x = p[0]
point.y = p[1]
point.z = p[2]
sum_x += point.x
sum_y += point.y
sum_z += point.z
if self.dist(mypose.position, point) < min_dist:
min_dist = self.dist(mypose.position, point)
closest_point = point
center = Point()
center.x = sum_x / numberOfPoints
center.y = sum_y / numberOfPoints
center.z = sum_z / numberOfPoints
#find point to return
distanceClosestPointCenter = self.dist(closest_point, center)
if distanceClosestPointCenter < userdata.distanceFromEdge:
userdata.position = center
return "done"
else:
rapportProportionnel = userdata.distanceFromEdge/distanceClosestPointCenter
pointToReturn = Point()
pointToReturn.x = closest_point.x + (center.x - closest_point.x) * rapportProportionnel
pointToReturn.y = closest_point.y + (center.y - closest_point.y) * rapportProportionnel
pointToReturn.z = closest_point.z + (center.z - closest_point.z) * rapportProportionnel
userdata.position = pointToReturn
return "done"
else:
if self.plane is None:
Logger.loginfo('plane is none')
if mypose is None:
Logger.loginfo('pose is none') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
4731,
198,
11748,
10688,
198,
6738,
7059,
1350,
62,
7295,
1330,
8558,
9012,
11,
5972,
1362,
198,
6738,
7059,
1350,
62,
7295,
13,
36436,
1330,
38027,
7004,
1416,
24735,
34,
2317,
... | 2.104665 | 1,672 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_thresholding(x, soft_eta, mode):
"""
Perform row-wise soft thresholding.
The row wise shrinkage is specific on E(k+1) updating
The element wise shrinkage is specific on Z(k+1) updating
:param x: one block of target matrix, shape[num_nodes, num_features]
:param soft_eta: threshold scalar stores in a torch tensor
:param mode: model types selection "row" or "element"
:return: one block of matrix after shrinkage, shape[num_nodes, num_features]
"""
assert mode in ('element', 'row'), 'shrinkage type is invalid (element or row)'
if mode == 'row':
row_norm = torch.linalg.norm(x, dim=1).unsqueeze(1)
row_norm.clamp_(1e-12)
row_thresh = F.relu(row_norm - soft_eta) / row_norm
out = x * row_thresh
else:
out = F.relu(x - soft_eta) - F.relu(-x - soft_eta)
return out
# TV regularization filter
# Edge denoising filter
# Node denoising filter
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
201,
198,
201,
198,
4299,
2705,
62,
400,
10126,
278,
7,
87,
11... | 2.449772 | 438 |
from orator.migrations import Migration
| [
6738,
393,
1352,
13,
76,
3692,
602,
1330,
36991,
628
] | 4.1 | 10 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for operating on a Database Server Subclient
DatabaseSubclient is the only class defined in this file.
DatabaseSubclient: Derived class from Subclient Base class, representing a Database server subclient,
and to perform operations on that subclient
DatabaseSubclient:
log_backup_storage_policy() -- updpates the log backup storage policy for this
subclient
"""
from __future__ import unicode_literals
from ..subclient import Subclient
from ..exception import SDKException
class DatabaseSubclient(Subclient):
"""Derived class from Subclient Base class, representing a file system subclient,
and to perform operations on that subclient."""
@property
def log_backup_storage_policy(self):
"""Treats the subclient description as a property of the Subclient class."""
storage_device = self._subclient_properties['commonProperties']['storageDevice']
if 'logBackupStoragePolicy' in storage_device:
if 'storagePolicyName' in storage_device['logBackupStoragePolicy']:
return str(
storage_device['logBackupStoragePolicy']['storagePolicyName']
)
@log_backup_storage_policy.setter
def log_backup_storage_policy(self, value):
"""Sets the log backup storage policy of subclient as the value provided as input.
Args:
value (str) -- Log backup Storage policy name to be assigned to subclient
Raises:
SDKException:
if failed to update log backup storage policy name
if log backup storage policy name is not in string format
"""
if isinstance(value, str):
self._set_subclient_properties("_commonProperties['storageDevice']['logBackupStoragePolicy']['storagePolicyName']",value)
else:
raise SDKException(
'Subclient', '102', 'Subclient log backup storage policy should be a string value'
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
16529,
35937,
201,
198,
2,
15069,
1520,
85,
1721,
11998,
11,
3457,
13,
201,
198,
2,
201,
198... | 2.747664 | 1,070 |
import numpy as np
import math
from ..miniworld import MiniWorldEnv, Room
from ..entity import Box
from gym import spaces
class Hallway(MiniWorldEnv):
"""
Environment in which the goal is to go to a red box
at the end of a hallway
"""
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
11485,
45313,
6894,
1330,
12558,
10603,
4834,
85,
11,
10096,
198,
6738,
11485,
26858,
1330,
8315,
198,
6738,
11550,
1330,
9029,
198,
198,
4871,
4789,
1014,
7,
39234,
10603,
4834... | 3.189873 | 79 |
from dbstream.DBStream import DBStream
| [
6738,
20613,
5532,
13,
11012,
12124,
1330,
20137,
12124,
198
] | 3.9 | 10 |
import pandas as pd
def with_func(df, columns, func):
"""
Tranforms the given columns based on the given function
:param df: Table to be transformed
:param columns: columns to be transformed
:param func: function that receives the dataframe and the name of the column to be formated
:return:
"""
columns = columns if columns is not None else df.columns
for column in columns:
if column in df.columns:
df = df.assign(**{column: func(df, column)})
# return df.assign(**{column: lambda x: func(x, column) for column in columns if column in df.columns})
return df
def as_date(df, columns=None, *args, **kwargs):
"""
Tranforms the given columns into european dates in the given table
:param df: Table to be transformed
:param columns: columns to be transformed
:return:
"""
return with_func(df, columns, lambda x, column: pd.to_datetime(df[column], *args, **kwargs))
def as_set_len_code(df, columns=None):
"""
Tranforms the given columns into set lenght codes (ex. '001')
:param df: Table to be transformed
:param columns: dict
columns to be transformed as keys, lenght of expected results as values
:param lenght: columns to be transformed
:return:
"""
for column, lenght in columns.items():
df = with_func(df, [column], lambda x, col: x[col].astype(str).str.zfill(lenght))
return df
| [
11748,
19798,
292,
355,
279,
67,
628,
198,
4299,
351,
62,
20786,
7,
7568,
11,
15180,
11,
25439,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
833,
272,
23914,
262,
1813,
15180,
1912,
319,
262,
1813,
2163,
198,
220,
220,
220,
... | 2.829365 | 504 |
"""
Author: Benny
Date: Nov 2019
"""
import argparse
import os
from data_utils.S3DISDataLoader import ScannetDatasetWholeScene,S3DISDataset
from data_utils.indoor3d_util import g_label2color
import torch
import logging
from pathlib import Path
import sys
import importlib
from tqdm import tqdm
import provider
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
classes = ["Terrain",
"Building",
"UrbanAsset",
"Vegetation",
"Car"]
class2label = {cls: i for i,cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i,cat in enumerate(seg_classes.keys()):
seg_label_to_cat[i] = cat
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('Model')
parser.add_argument('--batch_size', type=int, default=1, help='batch size in testing [default: 32]')#32
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--num_point', type=int, default=4096, help='Point Number [default: 4096]')
parser.add_argument('--log_dir', type=str, default='semseg', help='Experiment root')
parser.add_argument('--visual', action='store_true', default=False, help='Whether visualize result [default: False]')
parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')
parser.add_argument('--num_votes', type=int, default=1, help='Aggregate segmentation scores with voting [default: 5]')
parser.add_argument('--data_path', default='/content/gdrive/My Drive/AI/Swiss/sampling/*', help='path to data')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
| [
37811,
198,
13838,
25,
44275,
198,
10430,
25,
5267,
13130,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
1366,
62,
26791,
13,
50,
18,
26288,
6601,
17401,
1330,
1446,
1236,
316,
27354,
292,
316,
1199,
2305,
36542,
11... | 2.689655 | 667 |
# Generated by Django 3.1.7 on 2021-07-23 21:06
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
2998,
12,
1954,
2310,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628,
198
] | 2.787879 | 33 |
from sklearn import metrics
from seqeval.metrics import classification_report, f1_score
from seqeval.scheme import IOBES
def calcuate_ne_f1(EVAL_TRUE,EVAL_PRED):
"""
EVAL_TRUE : List[List[str]] , size = (all sentences, sentence len)
EVAL_PRED : List[List[str]] , size = (all sentences, sentence len)
"""
return classification_report(EVAL_TRUE, EVAL_PRED, mode='strict', scheme=IOBES)
| [
6738,
1341,
35720,
1330,
20731,
220,
201,
198,
6738,
33756,
18206,
13,
4164,
10466,
1330,
17923,
62,
13116,
11,
277,
16,
62,
26675,
201,
198,
201,
198,
6738,
33756,
18206,
13,
15952,
1326,
1330,
314,
9864,
1546,
201,
198,
201,
198,
42... | 2.440678 | 177 |
from .cabinet_object import CabinetObject
from .button_object import ButtonObject
from .serving_region import ServingRegionObject
from .stove_object import StoveObject
from .pot_object import PotObject
| [
6738,
764,
66,
6014,
316,
62,
15252,
1330,
20384,
10267,
198,
6738,
764,
16539,
62,
15252,
1330,
20969,
10267,
198,
6738,
764,
31293,
62,
36996,
1330,
49208,
47371,
10267,
198,
6738,
764,
301,
659,
62,
15252,
1330,
520,
659,
10267,
198,... | 4.06 | 50 |
import numpy as np
import pandas as pd
from pydantic import BaseModel
from rdkit.Chem import AllChem
from typing import List
from icolos.core.containers.compound import Conformer
from icolos.core.workflow_steps.step import StepBase
from icolos.utils.enums.step_enums import StepRMSFilterEnum
_SRF = StepRMSFilterEnum()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
374,
67,
15813,
13,
41829,
1330,
1439,
41829,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
14158,
349,
... | 3.028037 | 107 |
import csv
from spacy.lang.en import English
nlp = English()
tokenizer = nlp.Defaults.create_tokenizer(nlp)
if __name__ == "__main__":
write_data('data/to_label_abstracts.txt', 'data/papers/labeled_dev.csv')
write_data('data/unlabeled_abstracts.txt', 'data/papers/unlabeled.csv')
| [
11748,
269,
21370,
198,
6738,
599,
1590,
13,
17204,
13,
268,
1330,
3594,
198,
198,
21283,
79,
796,
3594,
3419,
198,
30001,
7509,
796,
299,
34431,
13,
7469,
13185,
13,
17953,
62,
30001,
7509,
7,
21283,
79,
8,
628,
628,
198,
361,
1159... | 2.547826 | 115 |
#!/usr/bin/env python
# Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pformat
import distro
print("os_release_info:")
pprint(distro.os_release_info())
print("lsb_release_info:")
pprint(distro.lsb_release_info())
print("distro_release_info:")
pprint(distro.distro_release_info())
print(f"id: {distro.id()}")
print(f"name: {distro.name()}")
print(f"name_pretty: {distro.name(True)}")
print(f"version: {distro.version()}")
print(f"version_pretty: {distro.version(True)}")
print(f"like: {distro.like()}")
print(f"codename: {distro.codename()}")
print(f"linux_distribution_full: {distro.linux_distribution()}")
print(f"linux_distribution: {distro.linux_distribution(False)}")
print(f"major_version: {distro.major_version()}")
print(f"minor_version: {distro.minor_version()}")
print(f"build_number: {distro.build_number()}")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1853,
11,
5304,
31487,
17154,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.976087 | 460 |
# -*- coding: utf-8 -*-
from src.Ui.Ui_MainWindow import Ui_MainWindow
from src.NGA_Spider import *
from src.CheckDialog import *
from PyQt5.QtWidgets import *
import json
from bs4 import BeautifulSoup
import bs4
import re
import os
import time
data_dir = './data'
duplicate_str = '!@#err!duplicate!err!!@#' # 标记重复投票的字符串
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
12351,
13,
52,
72,
13,
52,
72,
62,
13383,
27703,
1330,
471,
72,
62,
13383,
27703,
198,
6738,
12351,
13,
45,
9273,
62,
41294,
1330,
1635,
198,
6738,
12351,
13,
... | 2.219178 | 146 |
from IPython.html.widgets import ContainerWidget
from IPython.utils.traitlets import CInt
class GridWidget(ContainerWidget):
"""Grid widget
Allows you to easily align widgets to a Bootstrap fluid grid."""
height = CInt(820, help='Pixel height of the entire grid.')
padding = CInt(10, help='Pixel padding between cells in the grid.')
def __init__(self, columns, rows, debug=False, **kwargs):
"""Public constructor
PARAMETERS
----------
columns: int
rows: int
debug: bool
Use rainbow background colors for the cells.
"""
ContainerWidget.__init__(self)
# Create the grid
self.children = self._create_grid(columns, rows, debug=debug)
# The grid should occupy the entire widget-subarea by default.
self.set_css('width', '100%')
# Manually handle height and padding changes.
self.on_trait_change(self._update_layout, ['height', 'padding'])
# Update the layout once to set initial values.
self._update_layout(None, self.height)
def __getitem__(self, dimensions):
"""Get cell"""
for dim in dimensions:
if isinstance(dim, slice):
raise TypeError('GridWidget does not support slicing')
if len(dimensions) != 2:
raise ValueError('GridWidget only supports two dimensions')
return self.get_cell(*dimensions)
def get_cell(self, column, row):
"""Get a cell by column and row indicies (0 based)."""
return self.children[row].children[column]
def _update_layout(self, name, new):
"""Update the height/padding values of the child widgets.
Basically this method applies the height and padding traitlet values."""
rows = len(self.children)
for row in self.children:
row.set_css({
# The height of the cell is a fraction of the total height,
# minus all of the padding used vertically between cells.
'height': str(self.height/rows + (rows-1)*self.padding) + 'px',
'margin-bottom': str(self.padding) + 'px',
})
for cell in row.children:
cell.set_css({
'margin-left': str(self.padding) + 'px',
})
def _create_cell(self, color=''):
"""Create a grid cell"""
cell = ContainerWidget()
cell.set_css({
'background': color,
'height': '100%',
'margin-right': '0px',
'margin-top': '0px',
'margin-bottom': '0px',
})
return cell
def _create_grid(self, columns, rows, debug=False):
"""Create the grid rows and cells.
Returns a list of rows."""
debug_colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo']
return [ContainerWidget(children=[self._create_cell(color=debug_colors[min(i*columns+j, len(debug_colors)-1)] if debug else '') for j in range(columns)]) for i in range(rows)]
def _ipython_display_(self, *pargs, **kwargs):
"""Rich display repr for this widget."""
# Call the normal display logic.
ContainerWidget._ipython_display_(self, *pargs, **kwargs)
# Call custom add/remove class logic AFTER display.
for row in self.children:
row.remove_class('widget-container')
row.remove_class('vbox')
row.add_class('row-fluid')
[c.add_class('span%d' % int(12/len(row.children))) for c in row.children]
| [
6738,
6101,
7535,
13,
6494,
13,
28029,
11407,
1330,
43101,
38300,
198,
6738,
6101,
7535,
13,
26791,
13,
9535,
2578,
912,
1330,
327,
5317,
198,
198,
4871,
24846,
38300,
7,
29869,
38300,
2599,
198,
220,
220,
220,
37227,
41339,
26295,
198,... | 2.352057 | 1,531 |
"""
Do a couple basic tests to check that the default keys are returned correctly.
"""
from fence import keys
def test_default_public_key(app, rsa_public_key):
"""Test that the default public key is correct."""
assert keys.default_public_key(app) == rsa_public_key
def test_default_private_key(app, rsa_private_key):
"""Test that the default private key is correct."""
assert keys.default_private_key(app) == rsa_private_key
| [
37811,
198,
5211,
257,
3155,
4096,
5254,
284,
2198,
326,
262,
4277,
8251,
389,
4504,
9380,
13,
198,
37811,
198,
198,
6738,
13990,
1330,
8251,
628,
198,
4299,
1332,
62,
12286,
62,
11377,
62,
2539,
7,
1324,
11,
374,
11400,
62,
11377,
... | 3.185714 | 140 |
import configparser
from azure.devops.connection import Connection
from msrest.authentication import BasicAuthentication
def create_azdo_connection(org_name, org_pat):
"""
Creates the connection to the Azure DevOps orgnanization that will be used by the clients
:param str org_name: Name of the organization
:param str org_pat: Personal access token for the organization
:return: azure.devops.connection.Connection
"""
organization_url = f'https://dev.azure.com/{org_name}'
credentials = BasicAuthentication('', org_pat)
connection = Connection(base_url=organization_url, creds=credentials)
return connection
def load_settings(filename='settings.ini'):
"""
Loads the settings.ini file which contains the organization name and personal access token
:param str filename: location of the file
:return: config object or None
"""
config = configparser.ConfigParser()
config.read(filename)
if not config.sections():
return None
else:
try:
if config['org']['name'] is not None and config['org']['pat'] is not None:
return config
except KeyError:
return None
| [
11748,
4566,
48610,
198,
198,
6738,
35560,
495,
13,
7959,
2840,
13,
38659,
1330,
26923,
198,
198,
6738,
13845,
2118,
13,
41299,
3299,
1330,
14392,
47649,
3299,
628,
198,
198,
4299,
2251,
62,
1031,
4598,
62,
38659,
7,
2398,
62,
3672,
1... | 2.963145 | 407 |
import subprocess, re
import json
import itertools
from time import perf_counter
import numpy as np
with open("true_cardinality.json","r") as j:
true_cardinalities = json.load(j)
with open("queries.json","r") as j2:
queries = json.load(j2)
with open("attr_range.json","r") as j3:
attr_range = json.load(j3)
i = 43
query = queries[43]
order = dict()
print(f"Predicting cardinality for query {i}: {query}")
card_start_t = perf_counter()
for q in query.keys():
attr = query[q]
limit = attr_range[q]
for num in attr:
if num < 0 or num >= limit:
attr.remove(num)
for q in list(query.keys()):
attr = query[q]
limit = attr_range[q]
if len(attr) == limit:
query.pop(q)
for q in query.keys():
if query[q] == []:
query.remove(q)
vals = list(query.values())
output = subprocess.getoutput("~/Desktop/dice/Dice.native bayescard_test.dice").split("\n")[1:-2]
probs = 0
for l in output:
line = l.split("\t")
prob = float(line[-1].strip())
combo = re.findall("[0-9]+", line[0])
combo = [int(n) for n in combo]
check = all([True if combo[i] in vals[i] else False for i in range(len(vals))])
if check:
probs += prob
card_end_t = perf_counter()
latency_ms = (card_end_t-card_start_t) * 1000
cardinality_predict = probs * 2458285
cardinality_true = true_cardinalities[i]
# print(f"cardinality predict: {cardinality_predict} and cardinality true: {cardinality_true}")
if cardinality_predict == 0 and cardinality_true == 0:
q_error = 1.0
elif np.isnan(cardinality_predict) or cardinality_predict == 0:
cardinality_predict = 1
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
elif cardinality_true == 0:
cardinality_true = 1
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
else:
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
print(f"latency: {latency_ms} and error: {q_error}") | [
11748,
850,
14681,
11,
302,
198,
11748,
33918,
198,
11748,
340,
861,
10141,
198,
6738,
640,
1330,
23035,
62,
24588,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4480,
1280,
7203,
7942,
62,
9517,
1292,
414,
13,
17752,
2430,
81,
4943,
... | 2.467879 | 825 |
class Solution:
"""
@param grid: a boolean 2D matrix
@return: an integer
@ Time: O(n^^2) Space O(1)
@ DFS
"""
'''
class Solution:
"""
@param grid: a boolean 2D matrix
@return: an integer
"""
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
10706,
25,
257,
25131,
362,
35,
17593,
198,
220,
220,
220,
2488,
7783,
25,
281,
18253,
198,
220,
220,
220,
2488,
3862,
25,
440,
7,
77,
18237,
17,
8,
220,... | 2.294118 | 102 |
import csv
import logging
from typing import Optional, Sequence
from sqltask.base.lookup_source import BaseLookupSource
from sqltask.base.row_source import BaseRowSource
from sqltask.utils.file import detect_encode
logger = logging.getLogger(__name__)
class CsvRowSource(BaseRowSource):
"""
Row source that reads from a CSV file. Expects the first row to contain column
names, with subsequent rows containing column values in the same order.
"""
def __init__(self,
file_path: str,
name: Optional[str] = None,
delimiter: str = ",",
encoding: Optional[str] = None):
"""
:param name: name of data source.
:param file_path: path to the csv file.
:param delimiter: csv file delimiter.
:param encoding: Character encoding of csv file.
"""
super().__init__(name)
self.file_path = file_path
self.delimiter = delimiter
if encoding is None:
logger.debug(
f"Autodetecting encoding for CSV row source: "
f"{name or file_path or '<undefined>'}"
)
result = detect_encode(file_path)
encoding = result["encoding"]
logger.debug(f"Detected file encoding: {encoding}")
self.encoding = encoding
def __iter__(self):
"""
Iterate over
:return:
"""
columns = []
row_number = 0
logger.debug(
f"Start reading CSV row source: {self}")
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csvreader = csv.reader(csvfile, delimiter=self.delimiter)
for in_row in csvreader:
row_number += 1
# read column names on first row
if row_number == 1:
for column in in_row:
columns.append(column)
continue
if len(in_row) != len(columns):
raise Exception(
f"Error reading row {row_number} of CSV file {self}: "
f"Expected {len(columns)} columns, found {len(in_row)}")
row_dict = {columns[i]: col for i, col in enumerate(in_row)}
yield row_dict
logger.info(
f"Finished reading {row_number - 1} rows for CSV row source: {self}"
)
| [
11748,
269,
21370,
198,
11748,
18931,
198,
6738,
19720,
1330,
32233,
11,
45835,
198,
198,
6738,
19862,
2528,
2093,
13,
8692,
13,
5460,
929,
62,
10459,
1330,
7308,
8567,
929,
7416,
198,
6738,
19862,
2528,
2093,
13,
8692,
13,
808,
62,
1... | 2.078547 | 1,184 |
"""Add city table
Revision ID: 96dfc07259e3
Revises: 9e76e10b7227
Create Date: 2022-04-23 10:04:42.716365
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '96dfc07259e3'
down_revision = '9e76e10b7227'
branch_labels = None
depends_on = None
| [
37811,
4550,
1748,
3084,
198,
198,
18009,
1166,
4522,
25,
9907,
7568,
66,
2998,
25191,
68,
18,
198,
18009,
2696,
25,
860,
68,
4304,
68,
940,
65,
22,
24403,
198,
16447,
7536,
25,
33160,
12,
3023,
12,
1954,
838,
25,
3023,
25,
3682,
... | 2.533333 | 135 |
import pandas as pd
import pytest
from etl_vouchers import etl
@pytest.mark.parametrize(
"df_orders, df_barcodes, df_expected_vouchers, allow_useless",
[
(
pd.DataFrame(
{
"customer_id": [
1,
1,
1,
1,
2,
2,
2,
3,
3,
3,
3,
4,
4,
5,
6,
7,
8,
],
"order_id": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
],
}
),
pd.DataFrame(
{
"barcode": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, None, 7, 8, 10],
"order_id": [1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
}
),
pd.DataFrame(
{
"customer_id": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8],
"order_id": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
],
"barcodes": [
[1, 2],
[3],
[4],
[5, 6],
[],
[],
[9],
[],
[11],
[12],
[],
[],
[],
[],
[],
[],
[],
],
}
),
True,
),
(
pd.DataFrame(
{
"customer_id": [
1,
1,
1,
1,
2,
2,
2,
3,
3,
3,
3,
4,
4,
5,
6,
7,
8,
],
"order_id": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
],
}
),
pd.DataFrame(
{
"barcode": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, None, 7, 8, 10],
"order_id": [1, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
}
),
pd.DataFrame(
{
"customer_id": [1, 1, 1, 1, 2, 3, 3],
"order_id": [1, 2, 3, 4, 7, 9, 10],
"barcodes": [[1, 2], [3], [4], [5, 6], [9], [11], [12]],
}
),
False,
),
],
)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
6738,
2123,
75,
62,
85,
280,
3533,
1330,
2123,
75,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
7568,
62,
6361,
11,
... | 1.153767 | 3,902 |
#!/usr/bin/env python3
############################################################
## Jose F. Sanchez ##
## Copyright (C) 2019-2020 Lauro Sumoy Lab, IGTP, Spain ##
############################################################
## useful imports
import time
import io
import os
import re
import sys
from sys import argv
import subprocess
import traceback
from HCGB.functions import system_call_functions
from HCGB.functions import files_functions
############################################################
def create_genomeDir(folder, STAR_exe, num_threads, fasta_file, limitGenomeGenerateRAM):
"""Create the STAR_index genome dir
:param folder: folder to store the results
:param STAR_exe: Executable path for STAR binary
:param num_threads: number of threads to do the computation
:param fasta_file: path to the genome directory
:param limitGenomeGenerateRAM: limit RAM bytes to be used in the computation
:type folder: string
:type num_threads: int
:type STAR_exe: string
:type fasta_file: string
:type limitGenomeGenerateRAM: int
:returns: genomeDir
"""
##
genomeDir = files_functions.create_subfolder("STAR_index", folder)
cmd_create = "%s --runMode genomeGenerate --limitGenomeGenerateRAM %s --runThreadN %s --genomeDir %s --genomeFastaFiles %s" %(
STAR_exe, limitGenomeGenerateRAM, num_threads, genomeDir, fasta_file)
print ('\t+ genomeDir generation for STAR mapping')
create_code = system_call_functions.system_call(cmd_create, False, True)
if not create_code:
print ("** ERROR: Some error occurred during genomeDir creation... **")
exit()
return (genomeDir)
############################################################
def load_Genome(folder, STAR_exe, genomeDir, num_threads):
"""Load the genome stored in STAR_index
:param folder: folder where the genome will be loaded
:param STAR_exe: Executable path for STAR binary
:param num_threads: number of threads to do the computation
:param genomeDir: path to the genome directory
:type folder: string
:type num_threads: int
:type STAR_exe: string
:type genomeDir: string
:returns: load_code
"""
## --genomeLoad LoadAndExit
Load_folder = files_functions.create_subfolder('LoadMem', folder)
cmd_LD = "%s --genomeDir %s --runThreadN %s --outFileNamePrefix %s --genomeLoad LoadAndExit" %(
STAR_exe, genomeDir, num_threads, Load_folder)
print ('\t+ Loading memory for STAR mapping')
load_code = system_call_functions.system_call(cmd_LD, False, True)
return (load_code)
############################################################
def remove_Genome(STAR_exe, genomeDir, folder, num_threads):
"""Remove the loaded genome
:param folder: folder where the genome was loaded
:param STAR_exe: Executable path for STAR binary
:param num_threads: number of threads to do the computation
:param genomeDir: path to the genome directory
:type folder: string
:type num_threads: int
:type STAR_exe: string
:type genomeDir: string
:returns: remove_code
"""
## --genomeLoad Remove
remove_folder = files_functions.create_subfolder('RemoveMem', folder)
cmd_RM = "%s --genomeDir %s --outFileNamePrefix %s --runThreadN %s --genomeLoad Remove" %(
STAR_exe, genomeDir, remove_folder, num_threads)
## send command
print ('\t+ Removing memory loaded for STAR mapping')
remove_code = system_call_functions.system_call(cmd_RM, False, True)
return (remove_code)
############################################################
def mapReads(option, reads, folder, name, STAR_exe, genomeDir, limitRAM_option, num_threads, Debug):
"""
Map reads using STAR software. Some parameters are set for small RNA Seq.
Parameters set according to ENCODE Project directives for small RNAs
https://www.encodeproject.org/rna-seq/small-rnas/
:param option: If multiple files to map, use loaded genome (LoadAndKeep) if only one map, anything else.
:param reads: List containing absolute path to reads (SE or PE)
:param folder: Path for output results
:param name: Sample name
:param STAR_exe: Executable path for STAR binary
:param genomeDir: path to the genome directory
:param limitRAM_option: maximum available RAM (bytes) for map reads process. Default: 40000000000
:param num_threads: number of threads to do the computation
:type option: string
:type reads: list
:type folder: string
:type name: string
:type STAR_exe: string
:type genomeDir: string
:type limitRAM_option: int
:type num_threads: int
:returns: mapping_code
"""
## open file
print("\t+ Mapping sample %s using STAR" %name)
if not os.path.isdir(folder):
folder = files_functions.create_folder(folder)
##
bam_file_name = os.path.join(folder, 'Aligned.sortedByCoord.out.bam')
## read is a list with 1 or 2 read fastq files
jread = " ".join(reads)
## prepare command
cmd = "%s --genomeDir %s --runThreadN %s " %(STAR_exe, genomeDir, num_threads)
cmd = cmd + "--limitBAMsortRAM %s --outFileNamePrefix %s " %(limitRAM_option, folder + '/')
## some common options
cmd = cmd + "--alignSJDBoverhangMin 1000 --outFilterMultimapNmax 1 --outFilterMismatchNoverLmax 0.03 "
cmd = cmd + "--outFilterScoreMinOverLread 0 --outFilterMatchNminOverLread 0 --outFilterMatchNmin 16 "
cmd = cmd + "--alignIntronMax 1 --outSAMheaderHD @HD VN:1.4 SO:coordinate --outSAMtype BAM SortedByCoordinate "
## Multiple samples or just one?
if option == 'LoadAndKeep':
cmd = cmd + "--genomeLoad LoadAndKeep"
else:
cmd = cmd + "--genomeLoad NoSharedMemory"
## ReadFiles
cmd = cmd + " --readFilesIn %s " %jread
## logfile & errfile
logfile = os.path.join(folder, 'STAR.log')
errfile = os.path.join(folder, 'STAR.err')
cmd = cmd + ' > ' + logfile + ' 2> ' + errfile
## sent command
mapping_code = system_call_functions.system_call(cmd, False, True)
return (mapping_code)
###############
###########
######
if __name__== "__main__":
main()
#######
# Usage: STAR [options]... --genomeDir /path/to/genome/index/ --readFilesIn R1.fq R2.fq
# Spliced Transcripts Alignment to a Reference (c) Alexander Dobin, 2009-2019
########
#######
# For more details see:
# <https://github.com/alexdobin/STAR>
# <https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf>
#
# ### versions
# versionGenome 2.7.1a
# string: earliest genome index version compatible with this STAR release. Please do not change this value!
#
# ### Parameter Files
# parametersFiles -
# string: name of a user-defined parameters file, "-": none. Can only be defined on the command line.
#
# ### System
# sysShell -
# string: path to the shell binary, preferably bash, e.g. /bin/bash.
# - ... the default shell is executed, typically /bin/sh. This was reported to fail on some Ubuntu systems - then you need to specify path to bash.
#
# ### Run Parameters
# runMode alignReads
# string: type of the run.
#
# alignReads ... map reads
# genomeGenerate ... generate genome files
# inputAlignmentsFromBAM ... input alignments from BAM. Presently only works with --outWigType and --bamRemoveDuplicates.
# liftOver ... lift-over of GTF files (--sjdbGTFfile) between genome assemblies using chain file(s) from --genomeChainFiles.
#
# runThreadN 1
# int: number of threads to run STAR
#
# runDirPerm User_RWX
# string: permissions for the directories created at the run-time.
# User_RWX ... user-read/write/execute
# All_RWX ... all-read/write/execute (same as chmod 777)
#
# runRNGseed 777
# int: random number generator seed.
#
#
# ### Genome Parameters
# genomeDir ./GenomeDir/
# string: path to the directory where genome files are stored (for --runMode alignReads) or will be generated (for --runMode generateGenome)
#
# genomeLoad NoSharedMemory
# string: mode of shared memory usage for the genome files. Only used with --runMode alignReads.
# LoadAndKeep ... load genome into shared and keep it in memory after run
# LoadAndRemove ... load genome into shared but remove it after run
# LoadAndExit ... load genome into shared memory and exit, keeping the genome in memory for future runs
# Remove ... do not map anything, just remove loaded genome from memory
# NoSharedMemory ... do not use shared memory, each job will have its own private copy of the genome
#
# genomeFastaFiles -
# string(s): path(s) to the fasta files with the genome sequences, separated by spaces. These files should be plain text FASTA files, they *cannot* be zipped.
# Required for the genome generation (--runMode genomeGenerate). Can also be used in the mapping (--runMode alignReads) to add extra (new) sequences to the genome (e.g. spike-ins).
#
# genomeChainFiles -
# string: chain files for genomic liftover. Only used with --runMode liftOver .
#
# genomeFileSizes 0
# uint(s)>0: genome files exact sizes in bytes. Typically, this should not be defined by the user.
#
# genomeConsensusFile -
# string: VCF file with consensus SNPs (i.e. alternative allele is the major (AF>0.5) allele)
#
# ### Genome Indexing Parameters - only used with --runMode genomeGenerate
# genomeChrBinNbits 18
# int: =log2(chrBin), where chrBin is the size of the bins for genome storage: each chromosome will occupy an integer number of bins. For a genome with large number of contigs, it is recommended to scale this parameter as min(18, log2[max(GenomeLength/NumberOfReferences,ReadLength)]).
#
# genomeSAindexNbases 14
# int: length (bases) of the SA pre-indexing string. Typically between 10 and 15. Longer strings will use much more memory, but allow faster searches. For small genomes, the parameter --genomeSAindexNbases must be scaled down to min(14, log2(GenomeLength)/2 - 1).
#
# genomeSAsparseD 1
# int>0: suffux array sparsity, i.e. distance between indices: use bigger numbers to decrease needed RAM at the cost of mapping speed reduction
#
# genomeSuffixLengthMax -1
# int: maximum length of the suffixes, has to be longer than read length. -1 = infinite.
#
#
# ### Splice Junctions Database
# sjdbFileChrStartEnd -
# string(s): path to the files with genomic coordinates (chr <tab> start <tab> end <tab> strand) for the splice junction introns. Multiple files can be supplied wand will be concatenated.
#
# sjdbGTFfile -
# string: path to the GTF file with annotations
#
# sjdbGTFchrPrefix -
# string: prefix for chromosome names in a GTF file (e.g. 'chr' for using ENSMEBL annotations with UCSC genomes)
#
# sjdbGTFfeatureExon exon
# string: feature type in GTF file to be used as exons for building transcripts
#
# sjdbGTFtagExonParentTranscript transcript_id
# string: GTF attribute name for parent transcript ID (default "transcript_id" works for GTF files)
#
# sjdbGTFtagExonParentGene gene_id
# string: GTF attribute name for parent gene ID (default "gene_id" works for GTF files)
#
# sjdbGTFtagExonParentGeneName gene_name
# string(s): GTF attrbute name for parent gene name
#
# sjdbGTFtagExonParentGeneType gene_type gene_biotype
# string(s): GTF attrbute name for parent gene type
#
# sjdbOverhang 100
# int>0: length of the donor/acceptor sequence on each side of the junctions, ideally = (mate_length - 1)
#
# sjdbScore 2
# int: extra alignment score for alignmets that cross database junctions
#
# sjdbInsertSave Basic
# string: which files to save when sjdb junctions are inserted on the fly at the mapping step
# Basic ... only small junction / transcript files
# All ... all files including big Genome, SA and SAindex - this will create a complete genome directory
#
# ### Variation parameters
# varVCFfile -
# string: path to the VCF file that contains variation data.
#
# ### Input Files
# inputBAMfile -
# string: path to BAM input file, to be used with --runMode inputAlignmentsFromBAM
#
# ### Read Parameters
# readFilesType Fastx
# string: format of input read files
# Fastx ... FASTA or FASTQ
# SAM SE ... SAM or BAM single-end reads; for BAM use --readFilesCommand samtools view
# SAM PE ... SAM or BAM paired-end reads; for BAM use --readFilesCommand samtools view
#
# readFilesIn Read1 Read2
# string(s): paths to files that contain input read1 (and, if needed, read2)
#
# readFilesPrefix -
# string: preifx for the read files names, i.e. it will be added in front of the strings in --readFilesIn
# -: no prefix
#
# readFilesCommand -
# string(s): command line to execute for each of the input file. This command should generate FASTA or FASTQ text and send it to stdout
# For example: zcat - to uncompress .gz files, bzcat - to uncompress .bz2 files, etc.
#
# readMapNumber -1
# int: number of reads to map from the beginning of the file
# -1: map all reads
#
# readMatesLengthsIn NotEqual
# string: Equal/NotEqual - lengths of names,sequences,qualities for both mates are the same / not the same. NotEqual is safe in all situations.
#
# readNameSeparator /
# string(s): character(s) separating the part of the read names that will be trimmed in output (read name after space is always trimmed)
#
# readQualityScoreBase 33
# int>=0: number to be subtracted from the ASCII code to get Phred quality score
#
# clip3pNbases 0
# int(s): number(s) of bases to clip from 3p of each mate. If one value is given, it will be assumed the same for both mates.
#
# clip5pNbases 0
# int(s): number(s) of bases to clip from 5p of each mate. If one value is given, it will be assumed the same for both mates.
#
# clip3pAdapterSeq -
# string(s): adapter sequences to clip from 3p of each mate. If one value is given, it will be assumed the same for both mates.
#
# clip3pAdapterMMp 0.1
# double(s): max proportion of mismatches for 3p adpater clipping for each mate. If one value is given, it will be assumed the same for both mates.
#
# clip3pAfterAdapterNbases 0
# int(s): number of bases to clip from 3p of each mate after the adapter clipping. If one value is given, it will be assumed the same for both mates.
#
#
# ### Limits
# limitGenomeGenerateRAM 31000000000
# int>0: maximum available RAM (bytes) for genome generation
#
# limitIObufferSize 150000000
# int>0: max available buffers size (bytes) for input/output, per thread
#
# limitOutSAMoneReadBytes 100000
# int>0: max size of the SAM record (bytes) for one read. Recommended value: >(2*(LengthMate1+LengthMate2+100)*outFilterMultimapNmax
#
# limitOutSJoneRead 1000
# int>0: max number of junctions for one read (including all multi-mappers)
#
# limitOutSJcollapsed 1000000
# int>0: max number of collapsed junctions
#
# limitBAMsortRAM 0
# int>=0: maximum available RAM (bytes) for sorting BAM. If =0, it will be set to the genome index size. 0 value can only be used with --genomeLoad NoSharedMemory option.
#
# limitSjdbInsertNsj 1000000
# int>=0: maximum number of junction to be inserted to the genome on the fly at the mapping stage, including those from annotations and those detected in the 1st step of the 2-pass run
#
# limitNreadsSoft -1
# int: soft limit on the number of reads
#
# ### Output: general
# outFileNamePrefix ./
# string: output files name prefix (including full or relative path). Can only be defined on the command line.
#
# outTmpDir -
# string: path to a directory that will be used as temporary by STAR. All contents of this directory will be removed!
# - the temp directory will default to outFileNamePrefix_STARtmp
#
# outTmpKeep None
# string: whether to keep the tempporary files after STAR runs is finished
# None ... remove all temporary files
# All .. keep all files
#
# outStd Log
# string: which output will be directed to stdout (standard out)
# Log ... log messages
# SAM ... alignments in SAM format (which normally are output to Aligned.out.sam file), normal standard output will go into Log.std.out
# BAM_Unsorted ... alignments in BAM format, unsorted. Requires --outSAMtype BAM Unsorted
# BAM_SortedByCoordinate ... alignments in BAM format, unsorted. Requires --outSAMtype BAM SortedByCoordinate
# BAM_Quant ... alignments to transcriptome in BAM format, unsorted. Requires --quantMode TranscriptomeSAM
#
# outReadsUnmapped None
# string: output of unmapped and partially mapped (i.e. mapped only one mate of a paired end read) reads in separate file(s).
# None ... no output
# Fastx ... output in separate fasta/fastq files, Unmapped.out.mate1/2
#
# outQSconversionAdd 0
# int: add this number to the quality score (e.g. to convert from Illumina to Sanger, use -31)
#
# outMultimapperOrder Old_2.4
# string: order of multimapping alignments in the output files
# Old_2.4 ... quasi-random order used before 2.5.0
# Random ... random order of alignments for each multi-mapper. Read mates (pairs) are always adjacent, all alignment for each read stay together. This option will become default in the future releases.
#
# ### Output: SAM and BAM
# outSAMtype SAM
# strings: type of SAM/BAM output
# 1st word:
# BAM ... output BAM without sorting
# SAM ... output SAM without sorting
# None ... no SAM/BAM output
# 2nd, 3rd:
# Unsorted ... standard unsorted
# SortedByCoordinate ... sorted by coordinate. This option will allocate extra memory for sorting which can be specified by --limitBAMsortRAM.
#
# outSAMmode Full
# string: mode of SAM output
# None ... no SAM output
# Full ... full SAM output
# NoQS ... full SAM but without quality scores
#
# outSAMstrandField None
# string: Cufflinks-like strand field flag
# None ... not used
# intronMotif ... strand derived from the intron motif. Reads with inconsistent and/or non-canonical introns are filtered out.
#
# outSAMattributes Standard
# string: a string of desired SAM attributes, in the order desired for the output SAM
# NH HI AS nM NM MD jM jI XS MC ch ... any combination in any order
# None ... no attributes
# Standard ... NH HI AS nM
# All ... NH HI AS nM NM MD jM jI MC ch
# vA ... variant allele
# vG ... genomic coordiante of the variant overlapped by the read
# vW ... 0/1 - alignment does not pass / passes WASP filtering. Requires --waspOutputMode SAMtag
# STARsolo:
# CR CY UR UY ... sequences and quality scores of cell barcodes and UMIs for the solo* demultiplexing
# CB UB ... error-corrected cell barcodes and UMIs for solo* demultiplexing. Requires --outSAMtype BAM SortedByCoordinate.
# sM ... assessment of CB and UMI
# sS ... sequence of the entire barcode (CB,UMI,adapter...)
# sQ ... quality of the entire barcode
# Unsupported/undocumented:
# rB ... alignment block read/genomic coordinates
# vR ... read coordinate of the variant
#
# outSAMattrIHstart 1
# int>=0: start value for the IH attribute. 0 may be required by some downstream software, such as Cufflinks or StringTie.
#
# outSAMunmapped None
# string(s): output of unmapped reads in the SAM format
# 1st word:
# None ... no output
# Within ... output unmapped reads within the main SAM file (i.e. Aligned.out.sam)
# 2nd word:
# KeepPairs ... record unmapped mate for each alignment, and, in case of unsorted output, keep it adjacent to its mapped mate. Only affects multi-mapping reads.
#
# outSAMorder Paired
# string: type of sorting for the SAM output
# Paired: one mate after the other for all paired alignments
# PairedKeepInputOrder: one mate after the other for all paired alignments, the order is kept the same as in the input FASTQ files
#
# outSAMprimaryFlag OneBestScore
# string: which alignments are considered primary - all others will be marked with 0x100 bit in the FLAG
# OneBestScore ... only one alignment with the best score is primary
# AllBestScore ... all alignments with the best score are primary
#
# outSAMreadID Standard
# string: read ID record type
# Standard ... first word (until space) from the FASTx read ID line, removing /1,/2 from the end
# Number ... read number (index) in the FASTx file
#
# outSAMmapqUnique 255
# int: 0 to 255: the MAPQ value for unique mappers
#
# outSAMflagOR 0
# int: 0 to 65535: sam FLAG will be bitwise OR'd with this value, i.e. FLAG=FLAG | outSAMflagOR. This is applied after all flags have been set by STAR, and after outSAMflagAND. Can be used to set specific bits that are not set otherwise.
#
# outSAMflagAND 65535
# int: 0 to 65535: sam FLAG will be bitwise AND'd with this value, i.e. FLAG=FLAG & outSAMflagOR. This is applied after all flags have been set by STAR, but before outSAMflagOR. Can be used to unset specific bits that are not set otherwise.
#
# outSAMattrRGline -
# string(s): SAM/BAM read group line. The first word contains the read group identifier and must start with "ID:", e.g. --outSAMattrRGline ID:xxx CN:yy "DS:z z z".
# xxx will be added as RG tag to each output alignment. Any spaces in the tag values have to be double quoted.
# Comma separated RG lines correspons to different (comma separated) input files in --readFilesIn. Commas have to be surrounded by spaces, e.g.
# --outSAMattrRGline ID:xxx , ID:zzz "DS:z z" , ID:yyy DS:yyyy
#
# outSAMheaderHD -
# strings: @HD (header) line of the SAM header
#
# outSAMheaderPG -
# strings: extra @PG (software) line of the SAM header (in addition to STAR)
#
# outSAMheaderCommentFile -
# string: path to the file with @CO (comment) lines of the SAM header
#
# outSAMfilter None
# string(s): filter the output into main SAM/BAM files
# KeepOnlyAddedReferences ... only keep the reads for which all alignments are to the extra reference sequences added with --genomeFastaFiles at the mapping stage.
# KeepAllAddedReferences ... keep all alignments to the extra reference sequences added with --genomeFastaFiles at the mapping stage.
#
#
# outSAMmultNmax -1
# int: max number of multiple alignments for a read that will be output to the SAM/BAM files.
# -1 ... all alignments (up to --outFilterMultimapNmax) will be output
#
# outSAMtlen 1
# int: calculation method for the TLEN field in the SAM/BAM files
# 1 ... leftmost base of the (+)strand mate to rightmost base of the (-)mate. (+)sign for the (+)strand mate
# 2 ... leftmost base of any mate to rightmost base of any mate. (+)sign for the mate with the leftmost base. This is different from 1 for overlapping mates with protruding ends
#
# outBAMcompression 1
# int: -1 to 10 BAM compression level, -1=default compression (6?), 0=no compression, 10=maximum compression
#
# outBAMsortingThreadN 0
# int: >=0: number of threads for BAM sorting. 0 will default to min(6,--runThreadN).
#
# outBAMsortingBinsN 50
# int: >0: number of genome bins fo coordinate-sorting
#
# ### BAM processing
# bamRemoveDuplicatesType -
# string: mark duplicates in the BAM file, for now only works with (i) sorted BAM fed with inputBAMfile, and (ii) for paired-end alignments only
# - ... no duplicate removal/marking
# UniqueIdentical ... mark all multimappers, and duplicate unique mappers. The coordinates, FLAG, CIGAR must be identical
# UniqueIdenticalNotMulti ... mark duplicate unique mappers but not multimappers.
#
# bamRemoveDuplicatesMate2basesN 0
# int>0: number of bases from the 5' of mate 2 to use in collapsing (e.g. for RAMPAGE)
#
# ### Output Wiggle
# outWigType None
# string(s): type of signal output, e.g. "bedGraph" OR "bedGraph read1_5p". Requires sorted BAM: --outSAMtype BAM SortedByCoordinate .
# 1st word:
# None ... no signal output
# bedGraph ... bedGraph format
# wiggle ... wiggle format
# 2nd word:
# read1_5p ... signal from only 5' of the 1st read, useful for CAGE/RAMPAGE etc
# read2 ... signal from only 2nd read
#
# outWigStrand Stranded
# string: strandedness of wiggle/bedGraph output
# Stranded ... separate strands, str1 and str2
# Unstranded ... collapsed strands
#
# outWigReferencesPrefix -
# string: prefix matching reference names to include in the output wiggle file, e.g. "chr", default "-" - include all references
#
# outWigNorm RPM
# string: type of normalization for the signal
# RPM ... reads per million of mapped reads
# None ... no normalization, "raw" counts
#
# ### Output Filtering
# outFilterType Normal
# string: type of filtering
# Normal ... standard filtering using only current alignment
# BySJout ... keep only those reads that contain junctions that passed filtering into SJ.out.tab
#
# outFilterMultimapScoreRange 1
# int: the score range below the maximum score for multimapping alignments
#
# outFilterMultimapNmax 10
# int: maximum number of loci the read is allowed to map to. Alignments (all of them) will be output only if the read maps to no more loci than this value.
# Otherwise no alignments will be output, and the read will be counted as "mapped to too many loci" in the Log.final.out .
#
# outFilterMismatchNmax 10
# int: alignment will be output only if it has no more mismatches than this value.
#
# outFilterMismatchNoverLmax 0.3
# real: alignment will be output only if its ratio of mismatches to *mapped* length is less than or equal to this value.
#
# outFilterMismatchNoverReadLmax 1.0
# real: alignment will be output only if its ratio of mismatches to *read* length is less than or equal to this value.
#
#
# outFilterScoreMin 0
# int: alignment will be output only if its score is higher than or equal to this value.
#
# outFilterScoreMinOverLread 0.66
# real: same as outFilterScoreMin, but normalized to read length (sum of mates' lengths for paired-end reads)
#
# outFilterMatchNmin 0
# int: alignment will be output only if the number of matched bases is higher than or equal to this value.
#
# outFilterMatchNminOverLread 0.66
# real: sam as outFilterMatchNmin, but normalized to the read length (sum of mates' lengths for paired-end reads).
#
# outFilterIntronMotifs None
# string: filter alignment using their motifs
# None ... no filtering
# RemoveNoncanonical ... filter out alignments that contain non-canonical junctions
# RemoveNoncanonicalUnannotated ... filter out alignments that contain non-canonical unannotated junctions when using annotated splice junctions database. The annotated non-canonical junctions will be kept.
#
# outFilterIntronStrands RemoveInconsistentStrands
# string: filter alignments
# RemoveInconsistentStrands ... remove alignments that have junctions with inconsistent strands
# None ... no filtering
#
# ### Output Filtering: Splice Junctions
# outSJfilterReads All
# string: which reads to consider for collapsed splice junctions output
# All: all reads, unique- and multi-mappers
# Unique: uniquely mapping reads only
#
# outSJfilterOverhangMin 30 12 12 12
# 4 integers: minimum overhang length for splice junctions on both sides for: (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif. -1 means no output for that motif
# does not apply to annotated junctions
#
# outSJfilterCountUniqueMin 3 1 1 1
# 4 integers: minimum uniquely mapping read count per junction for: (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif. -1 means no output for that motif
# Junctions are output if one of outSJfilterCountUniqueMin OR outSJfilterCountTotalMin conditions are satisfied
# does not apply to annotated junctions
#
# outSJfilterCountTotalMin 3 1 1 1
# 4 integers: minimum total (multi-mapping+unique) read count per junction for: (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif. -1 means no output for that motif
# Junctions are output if one of outSJfilterCountUniqueMin OR outSJfilterCountTotalMin conditions are satisfied
# does not apply to annotated junctions
#
# outSJfilterDistToOtherSJmin 10 0 5 10
# 4 integers>=0: minimum allowed distance to other junctions' donor/acceptor
# does not apply to annotated junctions
#
# outSJfilterIntronMaxVsReadN 50000 100000 200000
# N integers>=0: maximum gap allowed for junctions supported by 1,2,3,,,N reads
# i.e. by default junctions supported by 1 read can have gaps <=50000b, by 2 reads: <=100000b, by 3 reads: <=200000. by >=4 reads any gap <=alignIntronMax
# does not apply to annotated junctions
#
# ### Scoring
# scoreGap 0
# int: splice junction penalty (independent on intron motif)
#
# scoreGapNoncan -8
# int: non-canonical junction penalty (in addition to scoreGap)
#
# scoreGapGCAG -4
# GC/AG and CT/GC junction penalty (in addition to scoreGap)
#
# scoreGapATAC -8
# AT/AC and GT/AT junction penalty (in addition to scoreGap)
#
# scoreGenomicLengthLog2scale -0.25
# extra score logarithmically scaled with genomic length of the alignment: scoreGenomicLengthLog2scale*log2(genomicLength)
#
# scoreDelOpen -2
# deletion open penalty
#
# scoreDelBase -2
# deletion extension penalty per base (in addition to scoreDelOpen)
#
# scoreInsOpen -2
# insertion open penalty
#
# scoreInsBase -2
# insertion extension penalty per base (in addition to scoreInsOpen)
#
# scoreStitchSJshift 1
# maximum score reduction while searching for SJ boundaries inthe stitching step
#
#
# ### Alignments and Seeding
#
# seedSearchStartLmax 50
# int>0: defines the search start point through the read - the read is split into pieces no longer than this value
#
# seedSearchStartLmaxOverLread 1.0
# real: seedSearchStartLmax normalized to read length (sum of mates' lengths for paired-end reads)
#
# seedSearchLmax 0
# int>=0: defines the maximum length of the seeds, if =0 max seed lengthis infinite
#
# seedMultimapNmax 10000
# int>0: only pieces that map fewer than this value are utilized in the stitching procedure
#
# seedPerReadNmax 1000
# int>0: max number of seeds per read
#
# seedPerWindowNmax 50
# int>0: max number of seeds per window
#
# seedNoneLociPerWindow 10
# int>0: max number of one seed loci per window
#
# seedSplitMin 12
# int>0: min length of the seed sequences split by Ns or mate gap
#
# alignIntronMin 21
# minimum intron size: genomic gap is considered intron if its length>=alignIntronMin, otherwise it is considered Deletion
#
# alignIntronMax 0
# maximum intron size, if 0, max intron size will be determined by (2^winBinNbits)*winAnchorDistNbins
#
# alignMatesGapMax 0
# maximum gap between two mates, if 0, max intron gap will be determined by (2^winBinNbits)*winAnchorDistNbins
#
# alignSJoverhangMin 5
# int>0: minimum overhang (i.e. block size) for spliced alignments
#
# alignSJstitchMismatchNmax 0 -1 0 0
# 4*int>=0: maximum number of mismatches for stitching of the splice junctions (-1: no limit).
# (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif.
#
# alignSJDBoverhangMin 3
# int>0: minimum overhang (i.e. block size) for annotated (sjdb) spliced alignments
#
# alignSplicedMateMapLmin 0
# int>0: minimum mapped length for a read mate that is spliced
#
# alignSplicedMateMapLminOverLmate 0.66
# real>0: alignSplicedMateMapLmin normalized to mate length
#
# alignWindowsPerReadNmax 10000
# int>0: max number of windows per read
#
# alignTranscriptsPerWindowNmax 100
# int>0: max number of transcripts per window
#
# alignTranscriptsPerReadNmax 10000
# int>0: max number of different alignments per read to consider
#
# alignEndsType Local
# string: type of read ends alignment
# Local ... standard local alignment with soft-clipping allowed
# EndToEnd ... force end-to-end read alignment, do not soft-clip
# Extend5pOfRead1 ... fully extend only the 5p of the read1, all other ends: local alignment
# Extend5pOfReads12 ... fully extend only the 5p of the both read1 and read2, all other ends: local alignment
#
# alignEndsProtrude 0 ConcordantPair
# int, string: allow protrusion of alignment ends, i.e. start (end) of the +strand mate downstream of the start (end) of the -strand mate
# 1st word: int: maximum number of protrusion bases allowed
# 2nd word: string:
# ConcordantPair ... report alignments with non-zero protrusion as concordant pairs
# DiscordantPair ... report alignments with non-zero protrusion as discordant pairs
#
# alignSoftClipAtReferenceEnds Yes
# string: allow the soft-clipping of the alignments past the end of the chromosomes
# Yes ... allow
# No ... prohibit, useful for compatibility with Cufflinks
#
# alignInsertionFlush None
# string: how to flush ambiguous insertion positions
# None ... insertions are not flushed
# Right ... insertions are flushed to the right
#
# ### Paired-End reads
# peOverlapNbasesMin 0
# int>=0: minimum number of overlap bases to trigger mates merging and realignment
#
# peOverlapMMp 0.01
# real, >=0 & <1: maximum proportion of mismatched bases in the overlap area
#
# ### Windows, Anchors, Binning
#
# winAnchorMultimapNmax 50
# int>0: max number of loci anchors are allowed to map to
#
# winBinNbits 16
# int>0: =log2(winBin), where winBin is the size of the bin for the windows/clustering, each window will occupy an integer number of bins.
#
# winAnchorDistNbins 9
# int>0: max number of bins between two anchors that allows aggregation of anchors into one window
#
# winFlankNbins 4
# int>0: log2(winFlank), where win Flank is the size of the left and right flanking regions for each window
#
# winReadCoverageRelativeMin 0.5
# real>=0: minimum relative coverage of the read sequence by the seeds in a window, for STARlong algorithm only.
#
# winReadCoverageBasesMin 0
# int>0: minimum number of bases covered by the seeds in a window , for STARlong algorithm only.
#
# ### Chimeric Alignments
# chimOutType Junctions
# string(s): type of chimeric output
# Junctions ... Chimeric.out.junction
# SeparateSAMold ... output old SAM into separate Chimeric.out.sam file
# WithinBAM ... output into main aligned BAM files (Aligned.*.bam)
# WithinBAM HardClip ... (default) hard-clipping in the CIGAR for supplemental chimeric alignments (defaultif no 2nd word is present)
# WithinBAM SoftClip ... soft-clipping in the CIGAR for supplemental chimeric alignments
#
# chimSegmentMin 0
# int>=0: minimum length of chimeric segment length, if ==0, no chimeric output
#
# chimScoreMin 0
# int>=0: minimum total (summed) score of the chimeric segments
#
# chimScoreDropMax 20
# int>=0: max drop (difference) of chimeric score (the sum of scores of all chimeric segments) from the read length
#
# chimScoreSeparation 10
# int>=0: minimum difference (separation) between the best chimeric score and the next one
#
# chimScoreJunctionNonGTAG -1
# int: penalty for a non-GT/AG chimeric junction
#
# chimJunctionOverhangMin 20
# int>=0: minimum overhang for a chimeric junction
#
# chimSegmentReadGapMax 0
# int>=0: maximum gap in the read sequence between chimeric segments
#
# chimFilter banGenomicN
# string(s): different filters for chimeric alignments
# None ... no filtering
# banGenomicN ... Ns are not allowed in the genome sequence around the chimeric junction
#
# chimMainSegmentMultNmax 10
# int>=1: maximum number of multi-alignments for the main chimeric segment. =1 will prohibit multimapping main segments.
#
# chimMultimapNmax 0
# int>=0: maximum number of chimeric multi-alignments
# 0 ... use the old scheme for chimeric detection which only considered unique alignments
#
# chimMultimapScoreRange 1
# int>=0: the score range for multi-mapping chimeras below the best chimeric score. Only works with --chimMultimapNmax > 1
#
# chimNonchimScoreDropMin 20
# int>=0: to trigger chimeric detection, the drop in the best non-chimeric alignment score with respect to the read length has to be greater than this value
#
# chimOutJunctionFormat 0
# int: formatting type for the Chimeric.out.junction file
# 0 ... no comment lines/headers
# 1 ... comment lines at the end of the file: command line and Nreads: total, unique, multi
#
# ### Quantification of Annotations
# quantMode -
# string(s): types of quantification requested
# - ... none
# TranscriptomeSAM ... output SAM/BAM alignments to transcriptome into a separate file
# GeneCounts ... count reads per gene
#
# quantTranscriptomeBAMcompression 1 1
# int: -2 to 10 transcriptome BAM compression level
# -2 ... no BAM output
# -1 ... default compression (6?)
# 0 ... no compression
# 10 ... maximum compression
#
# quantTranscriptomeBan IndelSoftclipSingleend
# string: prohibit various alignment type
# IndelSoftclipSingleend ... prohibit indels, soft clipping and single-end alignments - compatible with RSEM
# Singleend ... prohibit single-end alignments
#
# ### 2-pass Mapping
# twopassMode None
# string: 2-pass mapping mode.
# None ... 1-pass mapping
# Basic ... basic 2-pass mapping, with all 1st pass junctions inserted into the genome indices on the fly
#
# twopass1readsN -1
# int: number of reads to process for the 1st step. Use very large number (or default -1) to map all reads in the first step.
#
#
# ### WASP parameters
# waspOutputMode None
# string: WASP allele-specific output type. This is re-implemenation of the original WASP mappability filtering by Bryce van de Geijn, Graham McVicker, Yoav Gilad & Jonathan K Pritchard. Please cite the original WASP paper: Nature Methods 12, 1061–1063 (2015), https://www.nature.com/articles/nmeth.3582 .
# SAMtag ... add WASP tags to the alignments that pass WASP filtering
#
# ### STARsolo (single cell RNA-seq) parameters
# soloType None
# string(s): type of single-cell RNA-seq
# CB_UMI_Simple ... (a.k.a. Droplet) one UMI and one Cell Barcode of fixed length in read2, e.g. Drop-seq and 10X Chromium
# CB_UMI_Complex ... one UMI of fixed length, but multiple Cell Barcodes of varying length, as well as adapters sequences are allowed in read2 only, e.g. inDrop.
#
# soloCBwhitelist -
# string(s): file(s) with whitelist(s) of cell barcodes. Only one file allowed with
#
# soloCBstart 1
# int>0: cell barcode start base
#
# soloCBlen 16
# int>0: cell barcode length
#
# soloUMIstart 17
# int>0: UMI start base
#
# soloUMIlen 10
# int>0: UMI length
#
# soloBarcodeReadLength 1
# int: length of the barcode read
# 1 ... equal to sum of soloCBlen+soloUMIlen
# 0 ... not defined, do not check
#
# soloCBposition -
# strings(s) position of Cell Barcode(s) on the barcode read.
# Presently only works with --soloType CB_UMI_Complex, and barcodes are assumed to be on Read2.
# Format for each barcode: startAnchor_startDistance_endAnchor_endDistance
# start(end)Anchor defines the anchor base for the CB: 0: read start; 1: read end; 2: adapter start; 3: adapter end
# start(end)Distance is the distance from the CB start(end) to the Anchor base
# String for different barcodes are separated by space.
# Example: inDrop (Zilionis et al, Nat. Protocols, 2017):
# --soloCBposition 0_0_2_-1 3_1_3_8
#
# soloUMIposition -
# string position of the UMI on the barcode read, same as soloCBposition
# Example: inDrop (Zilionis et al, Nat. Protocols, 2017):
# --soloCBposition 3_9_3_14
#
# soloAdapterSequence -
# string: adapter sequence to anchor barcodes.
#
# soloAdapterMismatchesNmax 1
# int>0: maximum number of mismatches allowed in adapter sequence.
#
# soloCBmatchWLtype 1MM_multi
# string: matching the Cell Barcodes to the WhiteList
# Exact ... only exact matches allowed
# 1MM ... only one match in whitelist with 1 mismatched base allowed. Allowed CBs have to have at least one read with exact match.
# 1MM_multi ... multiple matches in whitelist with 1 mismatched base allowed, posterior probability calculation is used choose one of the matches.
# Allowed CBs have to have at least one read with exact match. Similar to CellRanger 2.2.0
# 1MM_multi_pseudocounts ... same as 1MM_Multi, but pseudocounts of 1 are added to all whitelist barcodes.
# Similar to CellRanger 3.x.x
#
# soloStrand Forward
# string: strandedness of the solo libraries:
# Unstranded ... no strand information
# Forward ... read strand same as the original RNA molecule
# Reverse ... read strand opposite to the original RNA molecule
#
# soloFeatures Gene
# string(s): genomic features for which the UMI counts per Cell Barcode are collected
# Gene ... genes: reads match the gene transcript
# SJ ... splice junctions: reported in SJ.out.tab
# GeneFull ... full genes: count all reads overlapping genes' exons and introns
# Transcript3p ... quantification of transcript for 3' protocols
#
# soloUMIdedup 1MM_All
# string(s): type of UMI deduplication (collapsing) algorithm
# 1MM_All ... all UMIs with 1 mismatch distance to each other are collapsed (i.e. counted once)
# 1MM_Directional ... follows the "directional" method from the UMI-tools by Smith, Heger and Sudbery (Genome Research 2017).
# Exact ... only exactly matching UMIs are collapsed
#
# soloUMIfiltering -
# string(s) type of UMI filtering
# - ... basic filtering: remove UMIs with N and homopolymers (similar to CellRanger 2.2.0)
# MultiGeneUMI ... remove lower-count UMIs that map to more than one gene (introduced in CellRanger 3.x.x)
#
# soloOutFileNames Solo.out/ features.tsv barcodes.tsv matrix.mtx
# string(s) file names for STARsolo output:
# file_name_prefix gene_names barcode_sequences cell_feature_count_matrix
#
# soloCellFilter CellRanger2.2 3000 0.99 10
# string(s): cell filtering type and parameters
# CellRanger2.2 ... simple filtering of CellRanger 2.2, followed by thre numbers: number of expected cells, robust maximum percentile for UMI count, maximum to minimum ratio for UMI count
# TopCells ... only report top cells by UMI count, followed by the excat number of cells
# None ... do not output filtered cells
#
#######
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29113,
14468,
7804,
4242,
198,
2235,
5264,
376,
13,
21909,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.326707 | 21,631 |
#!/usr/bin/env python3
# tubestats/youtube_presenter.py - main script for displaying data and graphs
#
# by Shivan Sivakumaran
from datetime import datetime, timedelta
import streamlit as st
from tubestats.youtube_data import YouTubeData
# Settings
ALI_ABDAAL_CHANNEL_ID = "UCoOae5nYA7VqaXzerajD0lg"
SHIVAN_SIVAKUMARAN_CHANNEL_ID = "UCrbYXWUmeCy4GqArthu4hCw"
DEBUG = False
DEFAULT_CHANNEL_ID = ALI_ABDAAL_CHANNEL_ID
@st.cache
def main():
"""
# TubeStats
*Analysis for YouTube Channel Consistency*
"""
# User input
user_input = st.text_input(
"Please enter YouTube channel ID or URL to a YouTube video:",
ALI_ABDAAL_CHANNEL_ID,
)
if not user_input:
st.warning(
"Please input a YouTube channel ID (e.g. {example_ID}) or a link to a YouTube video.".format(
example_ID=ALI_ABDAAL_CHANNEL_ID
)
)
st.stop()
youtuber_data = fetch_data(user_input)
if DEBUG == True:
raw_df = youtuber_data.raw_dataframe()
st.write(raw_df)
df = youtuber_data.dataframe()
st.header(youtuber_data.channel_name())
img_col, stat_col = st.columns(2)
with img_col:
st.image(youtuber_data.thumbnail_url())
with stat_col:
st.subheader("Quick Statistics")
st.markdown(
"Total Number of Videos: `"
+ "{:,}".format(int(youtuber_data.video_count()))
+ "`"
)
st.markdown("Join Date: `" + str(youtuber_data.start_date()) + "`")
st.markdown(
"Total View Count: `"
+ "{:,}".format(int(youtuber_data.total_channel_views()))
+ "`"
)
st.markdown(
"Total Comments: `"
+ "{:,}".format(int(youtuber_data.total_comments()))
+ "`"
)
st.markdown(
"Total Watch Time: `" + str(youtuber_data.total_watchtime()) + "`"
)
st.write(youtuber_data.channel_description())
st.header("Videos")
"""
Below is a graph plotting the views of each video over time. Please note:
- colour represents the like and dislike
- size represents the number of views.
- a natural log axis is applied to the view count due to its 'viral' nature
"""
first_video_date = (
df["snippet.publishedAt_REFORMATED"].min().to_pydatetime()
)
last_video_date = (
df["snippet.publishedAt_REFORMATED"].max().to_pydatetime()
)
date_start, date_end = date_slider()
transformed_df = youtuber_data.transform_dataframe(
date_start=date_start, date_end=date_end
)
c = youtuber_data.scatter_all_videos(transformed_df)
st.altair_chart(c, use_container_width=True)
st.subheader("Videos by Time Difference")
"""
This looks at the time difference between the current video and the previous video.
"""
time_df = youtuber_data.time_difference_calculate(df=transformed_df)
time_diff = youtuber_data.list_time_difference_ranked(df=time_df)
st.altair_chart(
youtuber_data.time_difference_plot(df=time_df),
use_container_width=True,
)
quantiles = youtuber_data.time_difference_statistics(df=time_df)
st.subheader("Time Difference Statistics:")
st.markdown(
"25th Percentile: `" + "{:0.1f}".format(quantiles[0.25]) + "` days"
)
st.markdown("Median: `" + "{:0.1f}".format(quantiles[0.50]) + "` days")
st.markdown(
"75th Percentile: `" + "{:0.1f}".format(quantiles[0.75]) + "` days"
)
st.markdown(
"Longest Hiatus: `" + "{:0.1f}".format(quantiles[1.0]) + "` days"
)
vid_list = youtuber_data.greatest_time_difference_video(time_df)
st.subheader("Longest Hiatus:")
st.video("https://www.youtube.com/watch?v=" + str(vid_list["greatest"]))
prev_col, next_col = st.columns(2)
with prev_col:
st.subheader("Previous:")
st.video("https://www.youtube.com/watch?v=" + str(vid_list["prev"]))
with next_col:
st.subheader("Next:")
st.video("https://www.youtube.com/watch?v=" + str(vid_list["_next"]))
st.write(time_diff)
st.header("Most Popular Videos")
"""
Hypothesis: view count indicates well performing videos. \
The content is engaging enough and liked to be \
recommended and viewed more often to other viewers.
"""
most_viewed_info = youtuber_data.most_viewed_videos(df=transformed_df)
st.write(most_viewed_info["preserved_df"])
display_vid_links(most_viewed_info)
# dislike_num = st.slider('Number of videos', 5, 20, key=0)
st.header("Most Unpopular Videos")
"""
Remaining a hypothesis, people actively show their digust for \
a video by hitting dislike video. Hence, we are provided \
with a like-dislike ratio. We also have the sum to ensure \
we have enough likes/dislikes for fair comparison.
"""
most_disliked_info = youtuber_data.most_disliked_videos(df=transformed_df)
st.write(most_disliked_info["preserved_df"])
display_vid_links(most_disliked_info)
st.header("List of Video")
"""
List of videos and all relevant features.
"""
st.write(df)
st.header("Feedback")
"""
This was made by [Shivan Sivakumaran](https://shivansivakumaran.com).
[Here is the code](https://github.com/ShivanS93/tubestats).
[Please get in touch if you have any feedback](mailto:shivan@shivansivakumaran.com).
"""
if __name__ == "__main__":
st.set_page_config(page_title="TubeStats")
if DEBUG == True:
main()
try:
main()
except Exception as e:
st.error("Error: {e}".format(e=e))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12202,
395,
1381,
14,
11604,
62,
25579,
263,
13,
9078,
532,
1388,
4226,
329,
19407,
1366,
290,
28770,
198,
2,
198,
2,
416,
911,
13809,
311,
452,
461,
388,
19173,
198,
198,
6... | 2.281713 | 2,499 |
import os
from fabric.api import cd, sudo
from dploy.context import ctx, get_project_dir
| [
11748,
28686,
198,
198,
6738,
9664,
13,
15042,
1330,
22927,
11,
21061,
198,
198,
6738,
288,
1420,
13,
22866,
1330,
269,
17602,
11,
651,
62,
16302,
62,
15908,
628,
628,
198
] | 3.064516 | 31 |
# Generated by Django 2.2.13 on 2020-07-08 14:51
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1485,
319,
12131,
12,
2998,
12,
2919,
1478,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
"""GANomaly: Semi-Supervised Anomaly Detection via Adversarial Training.
https://arxiv.org/abs/1805.06725
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from typing import Dict, List, Union
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.callbacks import EarlyStopping
from torch import Tensor, nn, optim
from anomalib.models.components import AnomalyModule
from .torch_model import Discriminator, Generator
class GanomalyLightning(AnomalyModule):
"""PL Lightning Module for the GANomaly Algorithm.
Args:
hparams (Union[DictConfig, ListConfig]): Model parameters
"""
def _reset_min_max(self):
"""Resets min_max scores."""
self.min_scores = torch.tensor(float("inf"), dtype=torch.float32) # pylint: disable=not-callable
self.max_scores = torch.tensor(float("-inf"), dtype=torch.float32) # pylint: disable=not-callable
@staticmethod
def weights_init(module: nn.Module):
"""Initialize DCGAN weights.
Args:
module (nn.Module): [description]
"""
classname = module.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(module.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(module.weight.data, 1.0, 0.02)
nn.init.constant_(module.bias.data, 0)
def configure_callbacks(self):
"""Configure model-specific callbacks."""
early_stopping = EarlyStopping(
monitor=self.hparams.model.early_stopping.metric,
patience=self.hparams.model.early_stopping.patience,
mode=self.hparams.model.early_stopping.mode,
)
return [early_stopping]
def configure_optimizers(self) -> List[optim.Optimizer]:
"""Configure optimizers for generator and discriminator.
Returns:
List[optim.Optimizer]: Adam optimizers for discriminator and generator.
"""
optimizer_d = optim.Adam(
self.discriminator.parameters(),
lr=self.hparams.model.lr,
betas=(self.hparams.model.beta1, self.hparams.model.beta2),
)
optimizer_g = optim.Adam(
self.generator.parameters(),
lr=self.hparams.model.lr,
betas=(self.hparams.model.beta1, self.hparams.model.beta2),
)
return [optimizer_d, optimizer_g]
def training_step(self, batch, _, optimizer_idx): # pylint: disable=arguments-differ
"""Training step.
Args:
batch (Dict): Input batch containing images.
optimizer_idx (int): Optimizer which is being called for current training step.
Returns:
Dict[str, Tensor]: Loss
"""
images = batch["image"]
loss: Dict[str, Tensor]
# Discriminator
if optimizer_idx == 0:
# forward pass
fake, _, _ = self.generator(images)
pred_real, _ = self.discriminator(images)
pred_fake, _ = self.discriminator(fake.detach())
error_discriminator_real = self.loss_bce(
pred_real, torch.ones(size=pred_real.shape, dtype=torch.float32, device=pred_real.device)
)
error_discriminator_fake = self.loss_bce(
pred_fake, torch.zeros(size=pred_fake.shape, dtype=torch.float32, device=pred_fake.device)
)
loss_discriminator = (error_discriminator_fake + error_discriminator_real) * 0.5
loss = {"loss": loss_discriminator}
# Generator
else:
# forward pass
fake, latent_i, latent_o = self.generator(images)
pred_real, _ = self.discriminator(images)
pred_fake, _ = self.discriminator(fake)
error_enc = self.loss_enc(latent_i, latent_o)
error_con = self.loss_con(images, fake)
error_adv = self.loss_adv(pred_real, pred_fake)
loss_generator = (
error_adv * self.hparams.model.wadv
+ error_con * self.hparams.model.wcon
+ error_enc * self.hparams.model.wenc
)
loss = {"loss": loss_generator}
return loss
def on_validation_start(self) -> None:
"""Reset min and max values for current validation epoch."""
self._reset_min_max()
return super().on_validation_start()
def validation_step(self, batch, _) -> Dict[str, Tensor]: # type: ignore # pylint: disable=arguments-differ
"""Update min and max scores from the current step.
Args:
batch (Dict[str, Tensor]): Predicted difference between z and z_hat.
Returns:
Dict[str, Tensor]: batch
"""
self.generator.eval()
_, latent_i, latent_o = self.generator(batch["image"])
batch["pred_scores"] = torch.mean(torch.pow((latent_i - latent_o), 2), dim=1).view(-1) # convert nx1x1 to n
self.max_scores = max(self.max_scores, torch.max(batch["pred_scores"]))
self.min_scores = min(self.min_scores, torch.min(batch["pred_scores"]))
return batch
def validation_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().validation_epoch_end(outputs)
return outputs
def on_test_start(self) -> None:
"""Reset min max values before test batch starts."""
self._reset_min_max()
return super().on_test_start()
def test_step(self, batch, _):
"""Update min and max scores from the current step."""
super().test_step(batch, _)
self.max_scores = max(self.max_scores, torch.max(batch["pred_scores"]))
self.min_scores = min(self.min_scores, torch.min(batch["pred_scores"]))
return batch
def test_epoch_end(self, outputs):
"""Normalize outputs based on min/max values."""
for prediction in outputs:
prediction["pred_scores"] = self._normalize(prediction["pred_scores"])
super().test_epoch_end(outputs)
return outputs
def _normalize(self, scores: Tensor) -> Tensor:
"""Normalize the scores based on min/max of entire dataset.
Args:
scores (Tensor): Un-normalized scores.
Returns:
Tensor: Normalized scores.
"""
scores = (scores - self.min_scores.to(scores.device)) / (
self.max_scores.to(scores.device) - self.min_scores.to(scores.device)
)
return scores
| [
37811,
45028,
24335,
25,
35525,
12,
12442,
16149,
1052,
24335,
46254,
2884,
1215,
690,
36098,
13614,
13,
198,
198,
5450,
1378,
283,
87,
452,
13,
2398,
14,
8937,
14,
1507,
2713,
13,
15,
3134,
1495,
198,
37811,
198,
198,
2,
15069,
357,
... | 2.314839 | 3,100 |
from dali.utils import apply_recursively_on_type
__all__ = [
"VocabEncoded","Vocab"
]
| [
6738,
288,
7344,
13,
26791,
1330,
4174,
62,
8344,
1834,
2280,
62,
261,
62,
4906,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
53,
420,
397,
27195,
9043,
2430,
53,
420,
397,
1,
198,
60,
198
] | 2.275 | 40 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
628
] | 3.5 | 26 |
from PIL import ImageTk
import tkinter as tk
import sys
root = tk.Tk() # トップレベルウィンドウの作成
root.title("画像 viewer") # タイトル
img = ImageTk.PhotoImage(file=sys.argv[1]) # 画像の読み込み コマンドライン引数の1番目のファイル名
tk.Label(root, image=img).pack() # ラベルウィジェットの作成、画像指定
root.mainloop() | [
171,
119,
123,
6738,
350,
4146,
1330,
7412,
51,
74,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
11748,
25064,
198,
15763,
796,
256,
74,
13,
51,
74,
3419,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
2... | 1.508108 | 185 |
first_name=input()
last_name=input()
age=int(input())
town=input()
print(f"You are {first_name} {last_name}, a {age}-years old person from {town}.") | [
11085,
62,
3672,
28,
15414,
3419,
198,
12957,
62,
3672,
28,
15414,
3419,
198,
496,
28,
600,
7,
15414,
28955,
198,
12735,
28,
15414,
3419,
198,
198,
4798,
7,
69,
1,
1639,
389,
1391,
11085,
62,
3672,
92,
1391,
12957,
62,
3672,
5512,
... | 2.660714 | 56 |
# Generated by Django 3.0.12 on 2021-02-24 20:42
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
1065,
319,
33448,
12,
2999,
12,
1731,
1160,
25,
3682,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
import inspect
import os
import resource
from functools import lru_cache
from importlib import import_module
from main.utils import get_suggestions_for_exception
def patch_cwd():
"""
os.getcwd() requires opening a file, which fails under the limits,
so this removes the need for that.
"""
cwd = os.getcwd()
os.getcwd = getcwd
os.chdir = chdir
@lru_cache
| [
11748,
10104,
198,
11748,
28686,
198,
11748,
8271,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
198,
6738,
1388,
13,
26791,
1330,
651,
62,
47811,
507,
62,
1640,
62,
1069,
4... | 2.805755 | 139 |
from twisted.application import service, strports
from nevow import appserver
from environment import env as e
import store
import dispatcher
import itodo
application = service.Application('todo')
db = store.Todos(e.dbname, e.user, e.password, e.host)
disp = dispatcher.Dispatch()
site = appserver.NevowSite(resource=disp)
site.remember(db, itodo.ITodos)
site.remember(e, itodo.IEnv)
webserver = strports.service("tcp:8080", site)
webserver.setServiceParent(application)
| [
6738,
19074,
13,
31438,
1330,
2139,
11,
965,
3742,
198,
6738,
497,
85,
322,
1330,
598,
15388,
198,
198,
6738,
2858,
1330,
17365,
355,
304,
198,
11748,
3650,
198,
11748,
49952,
198,
11748,
340,
24313,
198,
198,
31438,
796,
2139,
13,
23... | 3.064516 | 155 |
#!/usr/bin/env python
from binance.client import Client
import json
import os.path
import sys
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
9874,
590,
13,
16366,
1330,
20985,
198,
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.833333 | 48 |
"""
Description: cost measurement functions
Contributors: Simon Schwär, Sebastian Rosenzweig, Meinard Müller
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the Differentiable Intonation Tools
https://github.com/simonschwaer/differentiable-intonation-tools/
"""
import numpy as np
def tonal(f, K=12, f_ref=440., gradient=False):
"""Tonal cost for frequencies deviating from an equal-temperament (ET) grid
Parameters
----------
f : float scalar or 1D numpy array
Frequencies of interest in Hz
K : int
Number of ET divisions per octave (default: 12)
f_ref : float
Reference frequency for alignment of the the ET grid in Hz (default: 440 Hz)
gradient : bool
Whether to return the cost value (default) or the gradient at f
Returns
-------
The return value has the same dimension as f, always as a numpy array, even when f is a scalar.
if gradient is False:
Cost between 0 and 1 at f
(0 := exactly on ET grid, 1 := exactly between two ET grid frequencies)
if gradient is True:
Cost gradient at f with unit 'cost change per cent shifted'
"""
f_arr = np.atleast_1d(np.asarray(f, dtype=np.float32))
if not gradient:
return (1 - np.cos(2 * np.pi * K * np.log2(f_arr/f_ref))) / 2
else:
return np.pi * K * np.sin(2 * np.pi * K * np.log2(f_arr/f_ref)) / 1200
def harmonic(f1, f2, fixed_wc=None, gradient=False):
"""Harmonic cost between frequencies based on Plomp/Levelt perceptual dissonance
Parameters
----------
f1 : float scalar or 1D numpy array
Frequencies of first set in Hz (length N)
f2 : float scalar or 1D numpy array
Frequencies of second set in Hz (length M)
fixed_wc : float
Optional fixed dissonance curve width parameter
(default "None": automatic width based on frequency)
gradient : bool
Whether to return the cost value (default) or the gradient at f
Returns
-------
NxM 2D numpy array containing
if gradient is False:
The pairwise harmonic cost between f1 and f2 with arbitrary unit
(maximum of 1 is reached when log2(f1/f2) = wc)
if gradient is True:
The pairwise harmonic cost gradient w.r.t f1 with unit 'cost change per cent shifted'
Notes
-----
The function uses a parametrization of the harmonic cost from:
J. Berezovsky, “The structure of musical harmony as an ordered phase of sound:
A statistical mechanics approach to music theory,” Science Advances, vol. 5,
p. eaav8490, May 2019.
"""
f1_arr = np.atleast_1d(np.asarray(f1, dtype=np.float32))
f2_arr = np.atleast_1d(np.asarray(f2, dtype=np.float32))
assert len(f1_arr.shape) == 1, \
"Input frequencies to 'dit.cost.harmonic' should be scalar or a 1D array."
assert len(f2_arr.shape) == 1, \
"Input frequencies to 'dit.cost.harmonic' should be scalar or a 1D array."
assert np.all(f1_arr > 0), \
"Input frequencies to 'dit.cost.harmonic' must be strictly positive."
assert np.all(f2_arr > 0), \
"Input frequencies to 'dit.cost.harmonic' must be strictly positive."
N = f1_arr.shape[0]
M = f2_arr.shape[0]
result = np.zeros((N, M))
# np.vectorize is not really faster than the nested loop, so we keep
# them for readability. Also with numba, nested loops seem to be faster
# than vectorization.
for i in range(N):
for j in range(M):
if f1_arr[i] == f2_arr[j]:
result[i,j] = 0.
continue
if fixed_wc is not None:
wc = fixed_wc
assert wc > 0, "fixed_wc input to 'dit.cost.harmonic' must be strictly positive."
else:
wc = 6.7 * min(f1_arr[i], f2_arr[j])**(-0.68)
ln_dfwc = np.log(np.abs(np.log2(f1_arr[i]/f2_arr[j]))/wc)
if not gradient:
result[i,j] = np.exp(-1 * ln_dfwc**2)
else:
result[i,j] = -1 * np.exp(-1 * ln_dfwc**2) * ln_dfwc / (600 * np.log2(f1_arr[i]/f2_arr[j]))
return result
def tonal_for_frames(P1, P2, K=12, f_ref=440., fit_grid=True, gradient=False):
"""Calculate total and weighted tonal cost for sets of pure tones
Parameters
----------
P1 : 2D or 3D float numpy array
(T, N, 2) numpy array with N frequency/amplitude pairs (f_n, a_n), as for example
returned by 'utils.find_peaks'. 'T' is an optional time dimension and must be equal
to 'P2' if given.
P2 : 2D or 3D float numpy array
(T, M, 2) numpy array with M frequency/amplitude pairs (f_m, a_m), as for example
returned by 'utils.find_peaks'. 'T' is an optional time dimension and must be equal
to 'P1' if given.
Only used to find the best grid shift automatically (default is an empty list, then 'f_ref' is used)
K : int
Number of ET divisions per octave (default: 12)
f_ref : float
Reference frequency for alignment of the the ET grid in Hz (default: 440 Hz)
fit_grid : bool
Whether or not to find the best fitting reference frequency for P2 (default True)
gradient : bool
Whether to return the cost value (default) or the gradient at each f
Returns
-------
cost : 1D float numpy array (dimensions: (T))
Total tonal cost or tonal cost gradient for all frequencies in P1, weighted and
normalized by amplitude. T is 1 if P1 and P2 have only two dimensions.
"""
P_lead, P_backing = _ensure_dimensions_peak_sets(P1, P2)
T = P_lead.shape[0]
result = np.zeros((T))
for t in range(T):
if len(P_lead[t]) == 0 or np.sum(P_lead[t,:,0]) < 0.0001:
# return zero cost if the set is empty or practically silent
# (happens e.g. when a voice is quiet in the signal analyzed by 'utils.find_peaks')
continue
# find best reference frequency for et penalty
# TODO: more elegant way to find minimum
if fit_grid and len(P_backing[t]) > 0:
ref_candidates = np.linspace(440 * np.power(2, -0.5/K), 440 * np.power(2, 0.5/K), 100)
ref_results = np.zeros_like(ref_candidates)
for i in range(len(ref_candidates)):
for j in range(len(P_backing[t])):
if P_backing[t,j,0] == 0: continue
ref_results[i] += np.abs(P_backing[t,j,1]) * tonal(P_backing[t,j,0], K=K, f_ref=ref_candidates[i])[0]
opt_ref = ref_candidates[np.argmin(ref_results)]
else:
opt_ref = f_ref
# with found reference, calculate cost for lead voice, weighted and normalized by amplitude
ampl = 0
for i in range(len(P_lead[t])):
if P_lead[t,i,0] == 0: continue
result[t] += np.abs(P_lead[t,i,1]) * tonal(P_lead[t,i,0], K=K, f_ref=opt_ref, gradient=gradient)[0]
ampl += np.abs(P_lead[t,i,1])
result[t] /= ampl
return result
def harmonic_for_frames(P1, P2, fixed_wc=None, gradient=False):
"""Calculate harmonic cost between all pairs of pure tones in two sets
Parameters
----------
P1 : 2D or 3D float numpy array
(T, N, 2) numpy array with N frequency/amplitude pairs (f_n, a_n), as for example
returned by 'utils.find_peaks'. 'T' is an optional time dimension and must be equal
to 'P2' if given.
P2 : 2D or 3D float numpy array
(T, M, 2) numpy array with M frequency/amplitude pairs (f_m, a_m), as for example
returned by 'utils.find_peaks'. 'T' is an optional time dimension and must be equal
to 'P1' if given.
fixed_wc : float
Optional fixed dissonance curve width parameter
(default "None": automatic width based on frequency)
gradient : bool
Whether to return the cost value (default) or the gradient at f
Returns
-------
Total harmonic cost or harmonic cost gradient for all frequencies in P1 w.r.t. P2,
weighted by amplitude
TODO: Amplitude weighting would not be elegant here, because the average cost
of one frequency pair is very low. Currently, the sum of all weighted pairings
is divided only by the summed amplitudes in P1. This follows from the assumption
that on average, each frequency in P1 is close to one or two frequencies in P2,
so that the division results in a cost range comparable to the tonal cost.
"""
P_lead, P_backing = _ensure_dimensions_peak_sets(P1, P2)
T = P_lead.shape[0]
result = np.zeros((T))
for t in range(T):
if len(P_lead[t]) == 0 or len(P_backing[t]) == 0 or \
np.sum(P_lead[t,:,0]) < 0.0001 or np.sum(P_backing[t,:,0]) < 0.0001:
# return zero cost if P1 or P2 is empty or practically silent
# (happens e.g. when a voice is quiet in the signal analyzed by 'utils.find_peaks')
continue
count = 0.
for i in range(len(P_lead[t])):
if P_lead[t,i,0] == 0: continue
for j in range(len(P_backing[t])): # TODO: use vectorization for this loop?
if P_backing[t,j,0] == 0: continue
ampl = min(abs(P_lead[t,i,1]), abs(P_backing[t,j,1]))
result[t] += ampl * harmonic(P_lead[t,i,0], P_backing[t,j,0], fixed_wc, gradient)[0,0]
result[t] /= np.sum(P_lead[t,:,1])
return result
| [
37811,
198,
11828,
25,
1575,
15558,
5499,
198,
37146,
669,
25,
11288,
20469,
11033,
81,
11,
26190,
15564,
89,
732,
328,
11,
2185,
259,
446,
40790,
6051,
198,
34156,
25,
383,
17168,
5964,
11,
3740,
1378,
44813,
1668,
13,
2398,
14,
677,... | 2.294353 | 4,250 |
#!/usr/bin/env python3
"""
Name: src_rebrand.py
Main: JimmyBot(jmfgdev)
Lisc: GPLV3
Desc: Rebranding OBSD base
sources for use in
LBSD.
"""
# Usage: src_rebrand.sh $SRC_DIR
. ./libdeblob.sh
PATCH_DIR=/tmp/src_rebrand
if [ -e $PATCH_DIR ]
then
self_destruct_sequence $PATCH_DIR
else
mkdir $PATCH_DIR
fi
if test -z $1
then
SRC_DIR=/usr/src
else
SRC_DIR=$1
fi
arch_list="amd64 i386"
rep "export OBSD=\"OpenBSD/\$ARCH \$VNAME\"" "export OBSD=\"LibertyBSD/\$ARCH \$VNAME\"" distrib/miniroot/dot.profile
#iso_list="alpha amd64 hppa i386 sgi sparc sparc64 vax"
iso_list="amd64 i386"
for arch in $(echo $iso_list)
do
rep "OpenBSD \${OSREV} ${arch} Install CD" "LibertyBSD \${OSREV} ${arch} Install CD" distrib/$arch/iso/Makefile
rep "Copyright (c) `date +%Y` Theo de Raadt, The OpenBSD project" "Copyright (c) `date +%Y` The *OpenBSD* and LibertyBSD projects" distrib/$arch/iso/Makefile
rep "Theo de Raadt <deraadt@openbsd.org>" "Riley Baird <riley@openmailbox.org>" distrib/$arch/iso/Makefile
rep "OpenBSD/\${MACHINE} \${OSREV} Install CD" "LibertyBSD/\${MACHINE} \${OSREV} Install CD" distrib/$arch/iso/Makefile
done
#cdfs_list="alpha amd64 i386 loongson sgi sparc sparc64 vax"
cdfs_list="amd64 i386"
for arch in $(echo $cdfs_list)
do
rep "OpenBSD \${OSREV} ${arch} bootonly CD" "LibertyBSD \${OSREV} ${arch} bootonly CD" distrib/$arch/cdfs/Makefile
rep "Copyright (c) `date +%Y` Theo de Raadt, The OpenBSD project" "Copyright (c) `date +%Y` The *OpenBSD* and LibertyBSD projects" distrib/$arch/cdfs/Makefile
rep "Theo de Raadt <deraadt@openbsd.org>" "Riley Baird <riley@openmailbox.org>" distrib/$arch/cdfs/Makefile
rep "OpenBSD/${arch} \${OSREV} boot-only CD" "LibertyBSD/${arch} \${OSREV} boot CD" distrib/$arch/cdfs/Makefile
done
# Distrib changes for all archs
for arch in $(echo $arch_list)
do
rep "${arch}-openbsd" "${arch}-libertybsd" distrib/sets/lists/base/md.$arch
rep "You will not be able to boot OpenBSD from \${1}." "You will not be able to boot LibertyBSD from \${1}." distrib/$arch/common/install.md
done
dir_list="lib/libiberty lib/libobjc lib/libstdc++ share usr.bin/binutils usr.bin/binutils-2.17 usr.bin/gcc usr.bin/texinfo ../usr.sbin/bind ../usr.sbin/unbound"
for dir in $dir_list
do
rep "UNAME_SYSTEM=\`(uname -s) 2>/dev/null\`" "UNAME_SYSTEM=\`(echo OpenBSD) 2>/dev/null\`" gnu/${dir}/config.guess
done
dircp files/uname-obsd usr.bin/uname-obsd
lineadd "./usr/bin/uname" "./usr/bin/uname-obsd" distrib/sets/lists/base/mi
rep "uname " "uname" distrib/sets/lists/base/mi
rep "uname-obsd " "uname-obsd" distrib/sets/lists/base/mi
rep "uname" "uname uname-obsd" usr.bin/Makefile
lineadd "uname.1" "./usr/share/man/man1/uname-obsd.1" distrib/sets/lists/man/mi
lineadd "openbsd) osname=openbsd" "$(space 15) libertybsd) osname=libertybsd" gnu/usr.bin/perl/Configure
lineadd "openbsd) osname=openbsd" "$(space 23) ;;" gnu/usr.bin/perl/Configure
lineadd "openbsd) osname=openbsd" "$(space 23) osvers=\"\$3\"" gnu/usr.bin/perl/Configure
rep "osname=openbsd" "osname=libertybsd" gnu/usr.bin/perl/Configure
rep "interix|dragonfly|bitrig" "libertybsd|interix|dragonfly|bitrig" gnu/usr.bin/perl/Configure
rep "dragonfly\*|bitrig*" "libertybsd\*|dragonfly\*|bitrig\*" gnu/usr.bin/perl/Makefile.SH
rep "-openbsd" "-libertybsd" gnu/usr.bin/perl/Makefile.bsd-wrapper
filecp gnu/usr.bin/perl/hints/openbsd.sh gnu/usr.bin/perl/hints/libertybsd.sh
rep "#define DMESG_START \"OpenBSD \"" "#define DMESG_START \"LibertyBSD \"" usr.bin/sendbug/sendbug.c
rep "bugs@openbsd.org" "bugs@libertybsd.net" usr.bin/sendbug/sendbug.c
# Adding LBSD keys
filecp files/keys/libertybsd-61-base.pub etc/signify/libertybsd-61-base.pub
filecp files/keys/libertybsd-61-pkg.pub etc/signify/libertybsd-61-pkg.pub
filecp files/keys/libertybsd-61-syspatch.pub etc/signify/libertybsd-61-syspatch.pub
filecp files/keys/libertybsd-62-base.pub etc/signify/libertybsd-62-base.pub
filecp files/keys/libertybsd-62-pkg.pub etc/signify/libertybsd-62-pkg.pub
filecp files/keys/libertybsd-62-syspatch.pub etc/signify/libertybsd-62-syspatch.pub
rep "openbsd-" "libertybsd-" distrib/sets/lists/base/mi
rep "-59-base.pub" "-59.pub" distrib/sets/lists/base/mi
linedel "./etc/signify/openbsd-59-pkg.pub" distrib/sets/lists/base/mi
linedel "./etc/signify/openbsd-60-base.pub" distrib/sets/lists/base/mi
linedel "./etc/signify/openbsd-60-fw.pub" distrib/sets/lists/base/mi
linedel "./etc/signify/openbsd-60-pkg.pub" distrib/sets/lists/base/mi
filecp files/motd etc/motd
filecp files/root.mail etc/root/root.mail
filecp files/install.sub distrib/miniroot/install.sub
rep "openbsd-" "libertybsd-" usr.sbin/syspatch/syspatch.sh
rep "OpenBSD" "LibertyBSD" usr.sbin/syspatch/syspatch.sh
apply
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
5376,
25,
12351,
62,
260,
17938,
13,
9078,
198,
13383,
25,
12963,
20630,
7,
73,
76,
40616,
7959,
8,
198,
43,
2304,
25,
38644,
53,
18,
198,
24564,
25,
797,
1793... | 2.34648 | 2,003 |
from __future__ import print_function, division
import numpy as np
from openmdao.api import ExplicitComponent
from openaerostruct.aerodynamics.utils import _assemble_AIC_mtx, _assemble_AIC_mtx_b, \
_assemble_AIC_mtx_d
try:
from openaerostruct.fortran import OAS_API
fortran_flag = True
except:
fortran_flag = False
data_type = float
class AssembleAIC(ExplicitComponent):
"""
Compute the circulations based on the AIC matrix and the panel velocities.
Note that the flow tangency condition is enforced at the 3/4 chord point.
There are multiple versions of the first four inputs with one
for each surface defined.
Each of these four inputs has the name of the surface prepended on the
actual input name.
Parameters
----------
def_mesh[nx, ny, 3] : numpy array
Array defining the nodal coordinates of the lifting surface.
b_pts[nx-1, ny, 3] : numpy array
Bound points for the horseshoe vortices, found along the 1/4 chord.
c_pts[nx-1, ny-1, 3] : numpy array
Collocation points on the 3/4 chord line where the flow tangency
condition is satisfed. Used to set up the linear system.
normals[nx-1, ny-1, 3] : numpy array
The normal vector for each panel, computed as the cross of the two
diagonals from the mesh points.
v : float
Freestream air velocity in m/s.
alpha : float
Angle of attack in degrees.
Returns
-------
AIC[(nx-1)*(ny-1), (nx-1)*(ny-1)] : numpy array
The aerodynamic influence coefficient matrix. Solving the linear system
of AIC * circulations = n * v gives us the circulations for each of the
horseshoe vortices.
rhs[(nx-1)*(ny-1)] : numpy array
The right-hand-side of the linear system that yields the circulations.
"""
if fortran_flag:
if 0:
else:
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
6738,
1280,
9132,
5488,
13,
15042,
1330,
11884,
21950,
201,
198,
201,
198,
6738,
1280,
25534,
455,
1356,
13,
25534,
... | 2.51292 | 774 |
# coding: utf-8
# Para ejecutar el script invocar a:
#
# ```python pascal2yolo_1class -d <dir>```
#
# donde ``<dir>`` es el path absoluto (sin el / final) donde se encuentran las imágenes las anotaciones en formato pascal voc.
# Cargamos lo primero las librerías que vamos a necesitar.
# In[35]:
import xml.etree.ElementTree as ET
import os
import argparse
from imutils import paths
import cv2
#import tqdm
# ### Código para transformar ficheros PascalVOC a YOLO
#
# Función para extraer los cuadros de un fichero en formato pascal voc.
# In[19]:
# Transformar un box de PascalVOC a YOLO.
# In[30]:
# Transformar fichero PascalVOC asociado a una imagen a YOLO, la función toma como parámetro la imagen (debe haber un fichero anotado con el mismo nombre en la misma carpeta pero con extensión xml) y la carpeta de salida.
# In[39]:
# ### Función para generar el fichero con la lista de imágenes.
# In[16]:
# ### Para leer el path del dataset como un parámetro del script.
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
2547,
64,
304,
73,
721,
315,
283,
1288,
4226,
800,
420,
283,
257,
25,
198,
2,
220,
198,
2,
7559,
63,
29412,
279,
27747,
17,
88,
14057,
62,
16,
4871,
532,
67,
1279,
15908,
29,
... | 2.564885 | 393 |
import traceback
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QCoreApplication, QFile, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QFileDialog,QMainWindow, QMessageBox, QTableWidget,QTableWidgetItem, QTreeWidgetItem,QWidget | [
11748,
12854,
1891,
198,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
54,
312,
11407,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1195,
14055,
23416,
11,
1195,
8979,
11,
33734,
198,
6738,
9485,
48,
83,
20,
... | 2.8 | 95 |
import pickle
import os
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import argparse
import math
import torch
import sys
from global_parameters import answers_dir, QG_REPO_DIR, HOWTO_PATH, TRANSFORMERS_PATH
sys.path.insert(0, os.path.join(QG_REPO_DIR, "question_generation"))
from pipelines import pipeline
parser = argparse.ArgumentParser("")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--max_length", type=int, default=32)
args = parser.parse_args()
# Load infered sentences from speech transcripts
videos = pickle.load(
open(
os.path.join(
HOWTO_PATH, "caption_howto100m_sw_nointersec_norepeat_punctuated.pickle"
),
"rb",
)
)
done = os.listdir(answers_dir)
doneset = set(x[:11] for x in done)
videos = {x: y for x, y in videos.items() if x not in doneset}
# Answer extraction transformer model
ans_tokenizer = AutoTokenizer.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(
"valhalla/t5-small-qa-qg-hl", cache_dir=TRANSFORMERS_PATH
)
ans_model.cuda()
# Dataloader - shuffle so that if this script can be parallelized on an arbitrary number of GPUs
dataset = Answer_Extraction_Dataset(caption=videos, tokenizer=ans_tokenizer)
dataloader = DataLoader(dataset, batch_size=1, num_workers=args.n_workers, shuffle=True)
# Inference
for i, batch in tqdm(enumerate(dataloader)):
text, input_ids, attention_mask, video_id = (
batch["text"],
batch["input_ids"].squeeze(0).cuda(),
batch["attention_mask"].squeeze(0).cuda(),
batch["video_id"][0],
)
# Verify if the video has already been processed
if os.path.exists(os.path.join(answers_dir, video_id + ".pkl")):
continue
# Batch inference
n_iter = int(math.ceil(len(input_ids) / float(args.batch_size)))
outs = torch.zeros(len(input_ids), args.max_length).long()
with torch.no_grad():
for k in range(n_iter):
batch_outputs = (
ans_model.generate(
input_ids=input_ids[
k * args.batch_size : (k + 1) * args.batch_size
],
attention_mask=attention_mask[
k * args.batch_size : (k + 1) * args.batch_size
],
max_length=args.max_length,
)
.detach()
.cpu()
)
outs[
k * args.batch_size : (k + 1) * args.batch_size, : batch_outputs.size(1)
] = batch_outputs
# Decoding
dec = [ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs]
answers = [item.split("<sep>") for item in dec]
answers = [i[:-1] for i in answers]
answers = [
list(set([y.strip() for y in x if len(y.strip())])) for x in answers
] # remove duplicates
answers = [
[x for x in y if x in text[l] or x.capitalize() in text[l]]
for l, y in enumerate(answers)
] # remove answers that we cannot find back in the original sentence
# Save
if os.path.exists(os.path.join(answers_dir, video_id + ".pkl")):
continue
pickle.dump(answers, open(os.path.join(answers_dir, video_id + ".pkl"), "wb"))
| [
11748,
2298,
293,
198,
11748,
28686,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
11,
16092,
292,
316,
198,
6738,
6121,
364,
1330,
11160,
30642,
7509,
11,
11160,
17633,
1890,... | 2.248047 | 1,536 |
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by Sergey Trifonov based on contributions by Joel Krier,
# Michael Bouzinier, Shamil Sunyaev and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from forome_tools.inventory import loadDatasetInventory
#===============================================
| [
2,
220,
15069,
357,
66,
8,
13130,
13,
14205,
3893,
17784,
290,
584,
1866,
286,
198,
2,
220,
1114,
462,
5396,
198,
2,
198,
2,
220,
6013,
276,
416,
36106,
833,
361,
261,
709,
1912,
319,
9284,
416,
18623,
509,
5277,
11,
198,
2,
220... | 3.757937 | 252 |
# Note: this can be ignored for now -- it is here to support future package management
# For now, requirements.txt handles the dependencies
from setuptools import setup
setup(
name='model',
version='0.0.1',
url='https://github.com/geohci/research-api-endpoint-template',
license='MIT License',
maintainer='Isaac J.',
maintainer_email='isaac@wikimedia.org',
description='Generic API template for Wikimedia Research',
long_description='',
packages=['model'],
install_requires=['fasttext',
'flask',
'flask_cors',
'mwapi',
'pyyaml',
'uwsgi'],
package_data={'model': ['config/*']},
) | [
2,
5740,
25,
428,
460,
307,
9514,
329,
783,
1377,
340,
318,
994,
284,
1104,
2003,
5301,
4542,
198,
2,
1114,
783,
11,
5359,
13,
14116,
17105,
262,
20086,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
... | 2.272446 | 323 |
for x in range(0, 3):
print("Liczba to: %d" % (x))
| [
1640,
2124,
287,
2837,
7,
15,
11,
513,
2599,
198,
220,
220,
220,
3601,
7203,
26656,
89,
7012,
284,
25,
220,
4064,
67,
1,
4064,
357,
87,
4008,
198
] | 1.931034 | 29 |
from kingfisher_scrapy.spiders.digiwhist_base import DigiwhistBase
| [
6738,
5822,
69,
4828,
62,
1416,
2416,
88,
13,
2777,
4157,
13,
12894,
72,
1929,
396,
62,
8692,
1330,
7367,
72,
1929,
396,
14881,
628
] | 2.72 | 25 |
#!/usr/bin/env python
import mock
import math
import time
import robocar
from position import Position
TEST_ROUTE = [Position(0.0, 0.0), Position(2.0, 0.0), Position(2.0, 2.0), Position(0.0, 2.0)]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
15290,
198,
11748,
10688,
198,
11748,
640,
198,
198,
11748,
3857,
420,
283,
198,
6738,
2292,
1330,
23158,
198,
198,
51,
6465,
62,
49,
2606,
9328,
796,
685,
26545,
7,
15,
... | 2.5 | 80 |
import logging
import sys
from sanic.log import logger
LOGGING_CONFIG_PATCH = dict(
formatters={
"generic": {
"format": "%(asctime)s [%(name)s] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S]",
},
},
handlers={
"console": {
"stream": sys.stderr
},
"access_console": {
"stream": sys.stderr
},
},
)
BASE_LOGGING = {
**LOGGING_CONFIG_PATCH['formatters']['generic']
}
| [
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
5336,
291,
13,
6404,
1330,
49706,
628,
198,
198,
25294,
38,
2751,
62,
10943,
16254,
62,
47,
11417,
796,
8633,
7,
628,
220,
220,
220,
5794,
1010,
34758,
198,
220,
220,
220,
220,
220,
2... | 1.828467 | 274 |
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from matplotlib import colors
from matplotlib.ticker import FormatStrFormatter
from scipy.signal import resample
from MagniPy.util import confidence_interval
from MagniPy.Analysis.Visualization.barplot import bar_plot
from MagniPy.Analysis.Statistics.routines import build_densities
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
4033,
669,
355,
7577,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
7577,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,... | 3.490566 | 106 |
#!/usr/bin/env python
"""
This script gathers settings and dictionaries from friso (a chinese
tokenization library) and generates a C source file that can later be
compiled into RediSearch, allowing the module to have a built-in chinese
dictionary. By default this script will generate a C source file of
compressed data but there are other options to control output (mainly for
debugging).
The `read_friso` script can be used to analyze the dumped data for debugging
purposes
"""
import zlib
import errno
import os
import re
import struct
import sys
import time
import string
from argparse import ArgumentParser
# Load the ini file
ap = ArgumentParser()
ap.add_argument('-i', '--ini', default='friso/friso.ini',
help='ini file to use for initialization')
ap.add_argument('-m', '--mode', default='c', help='output mode',
choices=['c', 'raw_z', 'raw_u'])
ap.add_argument('-d', '--dir', default='.',
help='Override directory of lex files')
ap.add_argument('-o', '--out', help='Name of destination directory',
default='cndict_generated')
opts = ap.parse_args()
lexdir = opts.dir
DICT_VARNAME = 'ChineseDict'
SIZE_COMP_VARNAME = 'ChineseDictCompressedLength'
SIZE_FULL_VARNME = 'ChineseDictFullLength'
configs = [
ConfigEntry('max_len', 'max_len', int),
ConfigEntry('r_name', 'r_name', int),
ConfigEntry('mix_len', 'mix_len', int),
ConfigEntry('lna_len', 'lna_len', int),
ConfigEntry('add_syn', 'add_syn', int),
ConfigEntry('clr_stw', 'clr_stw', int),
ConfigEntry('keep_urec', 'keep_urec', int),
ConfigEntry('spx_out', 'spx_out', int),
ConfigEntry('nthreshold', 'nthreshold', int),
ConfigEntry('mode', 'mode', int),
ConfigEntry('charset', 'charset', int),
ConfigEntry('en_sseg', 'en_sseg', int),
ConfigEntry('st_minl', 'st_minl', int),
ConfigEntry('kpuncs', 'kpuncs', str)
]
with open(opts.ini, 'r') as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
continue
key, value = line.split('=')
key = key.strip()
value = value.strip()
if key == 'friso.lex_dir':
if not lexdir:
lexdir = value
else:
set_key_value(key, value)
# Parse the header snippet in order to emit the correct constant.
_LEXTYPE_MAP_STRS = \
r'''
__LEX_CJK_WORDS__ = 0,
__LEX_CJK_UNITS__ = 1,
__LEX_ECM_WORDS__ = 2, //english and chinese mixed words.
__LEX_CEM_WORDS__ = 3, //chinese and english mixed words.
__LEX_CN_LNAME__ = 4,
__LEX_CN_SNAME__ = 5,
__LEX_CN_DNAME1__ = 6,
__LEX_CN_DNAME2__ = 7,
__LEX_CN_LNA__ = 8,
__LEX_STOPWORDS__ = 9,
__LEX_ENPUN_WORDS__ = 10,
__LEX_EN_WORDS__ = 11,
__LEX_OTHER_WORDS__ = 15,
__LEX_NCSYN_WORDS__ = 16,
__LEX_PUNC_WORDS__ = 17, //punctuations
__LEX_UNKNOW_WORDS__ = 18 //unrecognized words.
'''
LEXTYPE_MAP = {}
for m in re.findall('\s*(__[^=]*__)\s*=\s*([\d]*)', _LEXTYPE_MAP_STRS):
LEXTYPE_MAP[m[0]] = int(m[1])
# Lex type currently occupies
TYPE_MASK = 0x1F
F_SYNS = 0x01 << 5
F_FREQS = 0x02 << 5
lexre = re.compile(r'([^:]+)\w*:\w*\[([^\]]*)\]', re.MULTILINE)
lexindex = os.path.join(lexdir, 'friso.lex.ini')
lexinfo = open(lexindex, 'r').read()
matches = lexre.findall(lexinfo)
# print matches
dstdir = opts.out
if opts.mode == 'c':
dstfile = 'cndict_data.c'
else:
dstfile = 'cndict_data.out'
try:
os.makedirs(dstdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dstfile = os.path.join(dstdir, dstfile)
ofp = open(dstfile, 'w')
if opts.mode == 'c':
ofp.write(r'''
// Compressed chinese dictionary
// Generated by {}
// at {}
#include "friso/friso.h"
#include <stdlib.h>
#include <string.h>
const char {}[] =
'''.format(' '.join(sys.argv), time.ctime(), DICT_VARNAME))
ofp.flush()
lexout = SourceEncoder(ofp)
lexbuf = LexBuffer(lexout)
for m in matches:
typestr, filestr = sanitize_file_entry(m[0], m[1])
# print typestr
# print filestr
for filename in filestr:
filename = os.path.join(os.path.dirname(lexindex), filename)
process_lex_entry(typestr, filename, lexbuf)
lexbuf.flush(is_final=True)
ofp.write(';\n')
ofp.write('const size_t {} = {};\n'.format(SIZE_COMP_VARNAME, lexbuf.compressed_size))
ofp.write('const size_t {} = {};\n'.format(SIZE_FULL_VARNME, lexbuf.full_size))
config_lines = write_config_init('frisoConfig', configs)
config_fn = '\n'.join(config_lines)
friso_config_txt = '''
void ChineseDictConfigure(friso_t friso, friso_config_t frisoConfig) {
'''
friso_config_txt += config_fn
friso_config_txt += '\n}\n'
ofp.write(friso_config_txt)
ofp.flush()
ofp.close()
# hdrfile = os.path.join(dstdir, 'cndict_data.h')
# hdrfp = open(hdrfile, 'w')
# hdrfp.write(r'''
#ifndef CNDICT_DATA_H
#define CNDICT_DATA_H
# extern const char {data_var}[];
# extern const size_t {uncomp_len_var};
# extern const size_t {comp_len_var};
# {config_fn_txt}
# #endif
# '''.format(
# data_var=DICT_VARNAME,
# uncomp_len_var=SIZE_FULL_VARNME,
# comp_len_var=SIZE_COMP_VARNAME,
# config_fn_txt=friso_config_txt
# ))
# hdrfp.flush()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1212,
4226,
43609,
6460,
290,
48589,
3166,
422,
1216,
26786,
357,
64,
442,
3762,
198,
30001,
1634,
5888,
8,
290,
18616,
257,
327,
2723,
2393,
326,
460,
1568,
307,
198,... | 2.247191 | 2,314 |
"""
DICpy: digital imahe correlation with python
========================================
"""
import pkg_resources
import DICpy.DIC_2D
import DICpy.utils
from DICpy.DIC_2D import *
from DICpy.utils import *
try:
__version__ = pkg_resources.get_distribution("DICpy").version
except pkg_resources.DistributionNotFound:
__version__ = None
try:
__version__ = pkg_resources.get_distribution("DICpy").version
except pkg_resources.DistributionNotFound:
__version__ = None
| [
37811,
198,
35,
2149,
9078,
25,
4875,
545,
64,
258,
16096,
351,
21015,
198,
10052,
2559,
198,
37811,
198,
198,
11748,
279,
10025,
62,
37540,
198,
198,
11748,
360,
2149,
9078,
13,
35,
2149,
62,
17,
35,
198,
11748,
360,
2149,
9078,
13... | 2.963415 | 164 |
from .. import main
| [
6738,
11485,
1330,
1388,
198
] | 4 | 5 |
"""
Message module docstring.
"""
import abc
class Message(object, metaclass=abc.ABCMeta):
"""
Message class docstring.
"""
@abc.abstractmethod
| [
37811,
198,
12837,
8265,
2205,
8841,
13,
198,
37811,
198,
11748,
450,
66,
628,
198,
4871,
16000,
7,
15252,
11,
1138,
330,
31172,
28,
39305,
13,
24694,
48526,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16000,
1398,
2205,
8841... | 2.745763 | 59 |
# Run from home directory with python -m pytest tests
import pytest
import random
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense, Concatenate, BatchNormalization, GRU, LSTM
from nn_builder.tensorflow.RNN import RNN
N = 250
X = np.random.random((N, 3, 5))
X = X.astype('float32')
X[0:125, :, 3] += 10.0
y = X[:, 2, 3] > 5.0
def test_user_hidden_layers_input_rejections():
"""Tests whether network rejects invalid hidden_layers inputted from user"""
inputs_that_should_fail = [[["linearr", 33]], [["linear", 12, 33]], [["gru", 2, 33]], [["lstm", 2, 33]], [["lstmr", 33]],
[["gruu", 33]], [["gru", 33], ["xxx", 33]], [["linear", 33], ["gru", 12], ["gru", 33]] ]
for input in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(layers_info=input, hidden_activations="relu",
output_activation="relu")
def test_user_hidden_layers_input_acceptances():
"""Tests whether network rejects invalid hidden_layers inputted from user"""
inputs_that_should_work = [[["linear", 33]], [["linear", 12]], [["gru", 2]], [["lstm", 2]], [["lstm", 1]],
[["gru", 330]], [["gru", 33], ["linear", 2]] ]
for input in inputs_that_should_work:
assert RNN(layers_info=input, hidden_activations="relu",
output_activation="relu")
def test_hidden_layers_created_correctly():
"""Tests that create_hidden_layers works correctly"""
layers = [["gru", 25], ["lstm", 23], ["linear", 5], ["linear", 10]]
rnn = RNN(layers_info=layers, hidden_activations="relu",
output_activation="relu")
assert type(rnn.hidden_layers[0]) == GRU
assert rnn.hidden_layers[0].units == 25
assert type(rnn.hidden_layers[1]) == LSTM
assert rnn.hidden_layers[1].units == 23
assert type(rnn.hidden_layers[2]) == Dense
assert rnn.hidden_layers[2].units == 5
assert type(rnn.output_layers[0]) == Dense
assert rnn.output_layers[0].units == 10
def test_output_layers_created_correctly():
"""Tests that create_output_layers works correctly"""
layers = [["gru", 25], ["lstm", 23], ["linear", 5], ["linear", 10]]
rnn = RNN(layers_info=layers, hidden_activations="relu", output_activation="relu")
assert rnn.output_layers[0].units == 10
layers = [["gru", 25], ["lstm", 23], ["lstm", 10]]
rnn = RNN(layers_info=layers, hidden_activations="relu",
output_activation="relu")
assert rnn.output_layers[0].units == 10
layers = [["gru", 25], ["lstm", 23], [["lstm", 10], ["linear", 15]]]
rnn = RNN(layers_info=layers, hidden_activations="relu",
output_activation=["relu", "softmax"])
assert rnn.output_layers[0].units == 10
assert rnn.output_layers[1].units == 15
def test_output_dim_user_input():
"""Tests whether network rejects an invalid output_dim input from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(layers_info=[2, input_value], hidden_activations="relu", output_activation="relu")
with pytest.raises(AssertionError):
RNN(layers_info=input_value, hidden_activations="relu", output_activation="relu")
def test_activations_user_input():
"""Tests whether network rejects an invalid hidden_activations or output_activation from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(layers_info=[["linear", 2]], hidden_activations=input_value,
output_activation="relu")
RNN(layers_info=[["linear", 2]], hidden_activations="relu",
output_activation=input_value)
def test_initialiser_user_input():
"""Tests whether network rejects an invalid initialiser from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(layers_info=[["linear", 2]], hidden_activations="relu",
output_activation="relu", initialiser=input_value)
RNN(layers_info=[["linear", 2], ["linear", 2]], hidden_activations="relu",
output_activation="relu", initialiser="xavier")
def test_batch_norm_layers():
"""Tests whether batch_norm_layers method works correctly"""
layers = [["gru", 20], ["lstm", 3], ["linear", 4], ["linear", 10]]
rnn = RNN(layers_info=layers, hidden_activations="relu",
output_activation="relu", initialiser="xavier", batch_norm=True)
assert len(rnn.batch_norm_layers) == 3
for layer in rnn.batch_norm_layers:
assert isinstance(layer, BatchNormalization)
def test_linear_layers_only_come_at_end():
"""Tests that it throws an error if user tries to provide list of hidden layers that include linear layers where they
don't only come at the end"""
layers = [["gru", 20], ["linear", 4], ["lstm", 3], ["linear", 10]]
with pytest.raises(AssertionError):
rnn = RNN(layers_info=layers, hidden_activations="relu",
output_activation="relu", initialiser="xavier", batch_norm=True)
layers = [["gru", 20], ["lstm", 3], ["linear", 4], ["linear", 10]]
assert RNN(layers_info=layers, hidden_activations="relu",
output_activation="relu", initialiser="xavier", batch_norm=True)
def test_output_activation():
"""Tests whether network outputs data that has gone through correct activation function"""
RANDOM_ITERATIONS = 20
for _ in range(RANDOM_ITERATIONS):
data = np.random.random((25, 10, 30))
data = data.astype('float32')
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier", batch_norm=True)
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="sigmoid", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
summed_result = tf.reshape(summed_result, [-1, 1])
assert summed_result != 1.0
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="softmax", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu",
output_activation="softmax", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[["linear", 20], ["linear", 50]],
hidden_activations="relu")
out = RNN_instance(data)
assert not all(tf.reshape(out, [-1]) >= 0)
assert not all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert not (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[ ["lstm", 25], ["linear", 10]],
hidden_activations="relu")
out = RNN_instance(data)
assert not all(tf.reshape(out, [-1]) >= 0)
assert not all(tf.reshape(out, [-1]) <= 0)
summed_result = tf.reduce_sum(out, axis=1)
assert not (np.round(summed_result, 3) == 1.0).all()
def test_output_activation():
"""Tests whether network outputs data that has gone through correct activation function"""
RANDOM_ITERATIONS = 20
for _ in range(RANDOM_ITERATIONS):
data = np.random.random((25, 10, 30))
data = data.astype('float32')
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier", batch_norm=True)
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="relu", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="sigmoid", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
summed_result = tf.reshape(summed_result, [-1, 1])
assert summed_result != 1.0
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu",
output_activation="softmax", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu",
output_activation="softmax", initialiser="xavier")
out = RNN_instance(data)
assert all(tf.reshape(out, [-1]) >= 0)
assert all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[["linear", 20], ["linear", 50]],
hidden_activations="relu")
out = RNN_instance(data)
assert not all(tf.reshape(out, [-1]) >= 0)
assert not all(tf.reshape(out, [-1]) <= 1)
summed_result = tf.reduce_sum(out, axis=1)
assert not (np.round(summed_result, 3) == 1.0).all()
RNN_instance = RNN(layers_info=[ ["lstm", 25], ["linear", 10]],
hidden_activations="relu")
out = RNN_instance(data)
assert not all(tf.reshape(out, [-1]) >= 0)
assert not all(tf.reshape(out, [-1]) <= 0)
summed_result = tf.reduce_sum(out, axis=1)
assert not (np.round(summed_result, 3) == 1.0).all()
def test_y_range():
"""Tests whether setting a y range works correctly"""
for _ in range(20):
val1 = random.random() - 3.0*random.random()
val2 = random.random() + 2.0*random.random()
lower_bound = min(val1, val2)
upper_bound = max(val1, val2)
rnn = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", y_range=(lower_bound, upper_bound), initialiser="xavier")
random_data = np.random.random((10, 11, 22))
random_data = random_data.astype('float32')
out = rnn(random_data)
assert all(tf.reshape(out, [-1]) > lower_bound)
assert all(tf.reshape(out, [-1]) < upper_bound)
def test_deals_with_None_activation():
"""Tests whether is able to handle user inputting None as output activation"""
assert RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", output_activation=None,
initialiser="xavier")
def test_y_range_user_input():
"""Tests whether network rejects invalid y_range inputs"""
invalid_y_range_inputs = [ (4, 1), (2, 4, 8), [2, 4], (np.array(2.0), 6.9)]
for y_range_value in invalid_y_range_inputs:
with pytest.raises(AssertionError):
print(y_range_value)
rnn = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", y_range=y_range_value,
initialiser="xavier")
def solves_simple_problem(X, y, nn_instance):
"""Checks if a given network is able to solve a simple problem"""
print("X shape ", X.shape)
print("y shape ", y.shape)
nn_instance.compile(optimizer='adam',
loss='mse')
nn_instance.fit(X, y, epochs=25)
results = nn_instance.evaluate(X, y)
print("FINAL RESULT ", results)
return results < 0.1
def test_model_trains():
"""Tests whether a small range of networks can solve a simple task"""
for output_activation in ["sigmoid", "None"]:
rnn = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 1]],
hidden_activations="relu", output_activation=output_activation,
initialiser="xavier")
assert solves_simple_problem(X, y, rnn)
def test_model_trains_part_2():
"""Tests whether a small range of networks can solve a simple task"""
z = X[:, 2:3, 3:4] > 5.0
z = np.concatenate([z == 1, z == 0], axis=1)
z = z.reshape((-1, 2))
rnn = RNN(layers_info=[["gru", 20], ["lstm", 2]],
hidden_activations="relu", output_activation="softmax", dropout=0.01,
initialiser="xavier")
assert solves_simple_problem(X, z, rnn)
rnn = RNN(layers_info=[["lstm", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None,
initialiser="xavier")
assert solves_simple_problem(X, y, rnn)
rnn = RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None,
initialiser="xavier")
assert solves_simple_problem(X, y, rnn)
def test_model_trains_with_batch_norm():
"""Tests whether a model with batch norm on can solve a simple task"""
rnn = RNN(layers_info=[["lstm", 20], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None,
initialiser="xavier", batch_norm=True)
assert solves_simple_problem(X, y, rnn)
def test_dropout():
"""Tests whether dropout layer reads in probability correctly"""
rnn = RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation="sigmoid", dropout=0.9999,
initialiser="xavier")
assert rnn.dropout_layer.rate == 0.9999
assert not solves_simple_problem(X, y, rnn)
rnn = RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None, dropout=0.0000001,
initialiser="xavier")
assert rnn.dropout_layer.rate == 0.0000001
assert solves_simple_problem(X, y, rnn)
def test_all_activations_work():
"""Tests that all activations get accepted"""
nn_instance = RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None, dropout=0.0000001,
initialiser="xavier")
for key in nn_instance.str_to_activations_converter.keys():
assert RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations=key, output_activation=key, dropout=0.0000001,
initialiser="xavier")
def test_all_initialisers_work():
"""Tests that all initialisers get accepted"""
nn_instance = RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
hidden_activations="relu", output_activation=None, dropout=0.0000001,
initialiser="xavier")
for key in nn_instance.str_to_initialiser_converter.keys():
assert RNN(layers_info=[["lstm", 20], ["gru", 10], ["linear", 20], ["linear", 1]],
dropout=0.0000001,
initialiser=key)
def test_output_shapes():
"""Tests whether network outputs of correct shape"""
rnn = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 3]],
hidden_activations="relu", initialiser="xavier")
output = rnn(X)
assert output.shape == (N, 3)
rnn = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
hidden_activations="relu", initialiser="xavier", return_final_seq_only=False)
output = rnn(X)
assert output.shape == (N, 3, 7)
rnn = RNN(layers_info=[["gru", 20], ["lstm", 8], ["lstm", 3]],
hidden_activations="relu", initialiser="xavier")
output = rnn(X)
assert output.shape == (N, 3)
rnn = RNN(layers_info=[["gru", 20], ["lstm", 8], ["lstm", 7]],
hidden_activations="relu", initialiser="xavier", return_final_seq_only=False)
output = rnn(X)
assert output.shape == (N, 3, 7)
def test_return_final_seq_user_input_valid():
"""Checks whether network only accepts a valid boolean value for return_final_seq_only"""
for valid_case in [True, False]:
assert RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
hidden_activations="relu", initialiser="xavier", return_final_seq_only=valid_case)
for invalid_case in [[True], 22, [1, 3], (True, False), (5, False)]:
with pytest.raises(AssertionError):
print(invalid_case)
RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
hidden_activations="relu", initialiser="xavier", return_final_seq_only=invalid_case)
def test_embedding_layers():
"""Tests whether create_embedding_layers_info method works correctly"""
for embedding_in_dim_1, embedding_out_dim_1, embedding_in_dim_2, embedding_out_dim_2 in zip(range(5, 8), range(3, 6), range(1, 4), range(24, 27)):
nn_instance = RNN( layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]], columns_of_data_to_be_embedded=[0, 1],
embedding_dimensions =[[embedding_in_dim_1, embedding_out_dim_1], [embedding_in_dim_2, embedding_out_dim_2]])
for layer in nn_instance.embedding_layers:
assert isinstance(layer, tf.keras.layers.Embedding)
assert len(nn_instance.embedding_layers) == 2
assert nn_instance.embedding_layers[0].input_dim == embedding_in_dim_1
assert nn_instance.embedding_layers[0].output_dim == embedding_out_dim_1
assert nn_instance.embedding_layers[1].input_dim == embedding_in_dim_2
assert nn_instance.embedding_layers[1].output_dim == embedding_out_dim_2
def test_incorporate_embeddings():
"""Tests the method incorporate_embeddings"""
X_new = X
X_new[:, [0, 2], :] = tf.round(X_new[:, [0, 2], :])
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
columns_of_data_to_be_embedded=[0, 2],
embedding_dimensions=[[50, 3], [55, 4]])
out = nn_instance.incorporate_embeddings(X)
assert out.shape == (N, 3, 10)
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
columns_of_data_to_be_embedded=[0, 1, 2],
embedding_dimensions=[[50, 3], [55, 4], [55, 4]])
out = nn_instance.incorporate_embeddings(X)
assert out.shape == (N, 3, 13)
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 7]],
columns_of_data_to_be_embedded=[2],
embedding_dimensions=[[150, 30]])
out = nn_instance.incorporate_embeddings(X)
assert out.shape == (N, 3, 34)
def test_embedding_network_can_solve_simple_problem():
"""Tests whether network can solve simple problem using embeddings"""
X = (np.random.random((N, 4, 5)) - 0.5) * 5.0 + 20.0
y = (X[:, :, 0] >= 25) * (X[:, :, 1] <= 25) * 1.0
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 1]],
columns_of_data_to_be_embedded=[0, 1],
embedding_dimensions=[[50, 3],
[55, 3]])
assert solves_simple_problem(X, y, nn_instance)
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 1]],
columns_of_data_to_be_embedded=[1],
embedding_dimensions=[[55, 3]])
assert solves_simple_problem(X, y, nn_instance)
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], ["linear", 1]],
columns_of_data_to_be_embedded=[1, 3, 0],
embedding_dimensions=[[55, 3], [55, 5], [55, 2]])
assert solves_simple_problem(X, y, nn_instance)
def test_output_heads_error_catching():
"""Tests that having multiple output heads catches errors from user inputs"""
output_dims_that_should_break = [["linear", 2, 2, "SAME", "conv", 3, 4, "SAME"], [[["lstm", 3], ["gru", 4]]],
[[2, 8]], [-33, 33, 33, 33, 33]]
for output_dim in output_dims_that_should_break:
with pytest.raises(AssertionError):
RNN(layers_info=[["gru", 20], ["lstm", 8], output_dim],
hidden_activations="relu", output_activation="relu")
output_activations_that_should_break = ["relu", ["relu"], ["relu", "softmax"]]
for output_activation in output_activations_that_should_break:
with pytest.raises(AssertionError):
RNN(layers_info=[["gru", 20], ["lstm", 8], [["linear", 5], ["linear", 2], ["linear", 5]]],
hidden_activations="relu", output_activation=output_activation)
def test_output_head_layers():
"""Tests whether the output head layers get created properly"""
for output_dim in [[["linear", 3],["linear", 9]], [["linear", 4], ["linear", 20]], [["linear", 1], ["linear", 1]]]:
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], output_dim],
hidden_activations="relu", output_activation=["softmax", None])
assert nn_instance.output_layers[0].units == output_dim[0][1]
assert nn_instance.output_layers[1].units == output_dim[1][1]
def test_output_head_activations_work():
"""Tests that output head activations work properly"""
output_dim = [["linear", 5], ["linear", 10], ["linear", 3]]
nn_instance = RNN(layers_info=[["gru", 20], ["lstm", 8], output_dim],
hidden_activations="relu", output_activation=["softmax", None, "relu"])
x = np.random.random((20, 10, 4)) * -20.0
x = x.astype('float32')
out = nn_instance(x)
assert out.shape == (20, 18)
sums = tf.reduce_sum(out[:, :5], axis=1)
sums_others = tf.reduce_sum(out[:, 5:], axis=1)
sums_others_2 = tf.reduce_sum(out[:, 5:15], axis=1)
sums_others_3 = tf.reduce_sum(out[:, 15:18], axis=1)
for row in range(out.shape[0]):
assert tf.math.equal(np.round(sums[row], 4), 1.0), sums[row]
assert not tf.math.equal(np.round(sums_others[row], 4), 1.0), np.round(sums_others[row], 4)
assert not tf.math.equal(np.round(sums_others_2[row], 4), 1.0), np.round(sums_others_2[row], 4)
assert not tf.math.equal(np.round(sums_others_3[row], 4), 1.0), np.round(sums_others_3[row], 4)
for col in range(3):
assert out[row, 15 + col] >= 0.0, out[row, 15 + col]
def test_output_head_shapes_correct():
"""Tests that the output shape of network is correct when using multiple outpout heads"""
N = 20
X = np.random.random((N, 10, 4)) * -20.0
X = X.astype('float32')
for _ in range(25):
nn_instance = RNN(
layers_info=[["gru", 20], ["lstm", 8], ["linear", 1], ["linear", 12]],
hidden_activations="relu")
out = nn_instance(X)
assert out.shape[0] == N
assert out.shape[1] == 12
for output_dim in [[ ["linear", 10], ["linear", 4], ["linear", 6]], [["linear", 3], ["linear", 8], ["linear", 9]]]:
nn_instance = RNN(
layers_info=[["gru", 20], ["lstm", 8], ["linear", 1], ["linear", 12], output_dim],
hidden_activations="relu", output_activation=["softmax", None, "relu"])
out = nn_instance(X)
assert out.shape[0] == N
assert out.shape[1] == 20
| [
2,
5660,
422,
1363,
8619,
351,
21015,
532,
76,
12972,
9288,
5254,
198,
11748,
12972,
9288,
198,
11748,
4738,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,... | 2.173365 | 11,969 |
from datetime import datetime
if __name__ == "__main__":
currentage = input("What is your current age? ")
retirementage = input("At what age would you like to retire? ")
retirementCalculator = RetirementCalculator()
yearsleft = retirementCalculator.yearsleft(int(currentage), int(retirementage))
print("You have " + str(yearsleft) + " years left until you can retire.")
print("It's " + str(retirementCalculator.currentyear()) + ", so you can retire in " + str(
retirementCalculator.retirementyear(yearsleft)))
| [
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1459,
496,
796,
5128,
7203,
2061,
318,
534,
1459,
2479,
30,
366,
8,
198,
220,
220,
220,
10737,
496,
796,... | 3.150289 | 173 |
import cmath
import math
from tkinter import W
polos = 8
freq = 60
t1 = 380
t2 = 220
r1 = 0.19
r2L = 0.11
x1 = 0.45
x2L = 0.21
rp = 130
xm = 22
perdastotais = 1735
rpm = 400
ns = 120*freq/polos
print("ns = ", ns)
s = (ns - rpm)/ns
print("s = ", s)
aux1=[x1+x2L]
Zs = complex(r1+r2L/s,sum(list(aux1)))
print("Zs = ", Zs)
Zsp = cmath.polar(Zs)
print("Zsp = ", Zsp)
Zp = rp*complex(0,xm)/complex(rp,xm)
print("Zp RECTANGULAR = ", Zp)
Zpp = cmath.polar(Zp)
print("Zp POLAR = ", Zpp)
I2 = complex(t2,0)/Zs
print("I2 RECT= ", I2)
I2p = cmath.polar(I2)
print("I2 POLAR= ", I2p)
If = complex(t2,0)/Zp
print("If RECT= ", If)
I1 = I2 + If
print("I1 RECT= ", I1)
I1p = cmath.polar(I1)
print("I1 POLAR= ", I1p)
print("\nA - a corrente de entrada\n")
Zeq = (Zs*Zp)/(Zs+Zp)
print("Zeq RECT= ", Zeq)
cmath.polar(Zeq)
print("Zeq POLAR= ", Zeq)
print("\nB - A potencia consumida da rede\n")
p1 = 3*t2*I1p[0]*cmath.cos(I1p[1])
p1real = round(p1.real,4)
print("p1 = ", p1real,"W")
print("\nC - A potencia no eixo da maquina\n")
p2 = 3*(1-s)*(r2L/s)*(I2p[0])*(I2p[0])
p2 = round(p2,4)
print("p2 =", p2,"W")
print("\nD - O torque no eixo da maquina\n")
w = 2*cmath.pi*rpm/freq
w = round(w,4)
print("w = ", w,"rad/s")
t=p2/w
t = round(t,4)
print("t = ", t,"N.m")
print("\nE - rendimento\n")
eni = (p2/p1real)*100
eni = round(eni,4)
print("eni = ", eni,"%") | [
11748,
269,
11018,
198,
11748,
10688,
198,
6738,
256,
74,
3849,
1330,
370,
628,
198,
16104,
418,
796,
807,
198,
19503,
80,
796,
3126,
198,
83,
16,
796,
29101,
198,
83,
17,
796,
15629,
198,
81,
16,
796,
657,
13,
1129,
198,
81,
17,
... | 1.853224 | 729 |
from configuration import *
file_name = '4pages.pdf'
result_file_name = "result.xml"
opts = {
"file": test_data_path + file_name
}
response = pdf_api.put_pdf_in_request_to_xml(
temp_folder + '/' + result_file_name, **opts)
pprint(response)
| [
6738,
8398,
1330,
1635,
198,
198,
7753,
62,
3672,
796,
705,
19,
31126,
13,
12315,
6,
198,
20274,
62,
7753,
62,
3672,
796,
366,
20274,
13,
19875,
1,
198,
198,
404,
912,
796,
1391,
198,
220,
220,
220,
366,
7753,
1298,
1332,
62,
7890... | 2.51 | 100 |
#
# $Id$
#
from BeautifulSoup import BeautifulSoup
import sys, string
import HTMLParser
from utils import commaSep
parser = HTMLParser.HTMLParser()
soup = BeautifulSoup(file(sys.argv[1]).read())
#def commaSep(items):
# return ",".join(['"' + i + '"' for i in items])
for division in soup.findAll('tr', {'bgcolor':'silver'}):
parent = division.parent
rows = parent.findAll('tr')
division_name = rows[0].contents[0].contents[0].contents[0]
division = division_name.split(' ')[1]
headers = [str(cell.contents[0]) for cell in rows[0].findAll('b')]
headers[0] = 'Team Name'
headers.append('Division')
headers.append('Position')
if division == "1":
print commaSep(headers)
position = 1
for row in rows[1:]:
data = [parser.unescape(str(cell.contents[0])) for cell in row.findAll('td') if str(cell.contents[0]) != ' ']
data.append(division)
data.append(str(position))
position = position + 1
print commaSep(data)
| [
2,
198,
2,
720,
7390,
3,
198,
2,
198,
6738,
23762,
50,
10486,
1330,
23762,
50,
10486,
198,
11748,
25064,
11,
4731,
198,
11748,
11532,
46677,
198,
6738,
3384,
4487,
1330,
39650,
19117,
198,
198,
48610,
796,
11532,
46677,
13,
28656,
466... | 2.740741 | 351 |
# question can be found on leetcode.com/problems/valid-palindrome
| [
2,
1808,
460,
307,
1043,
319,
443,
316,
8189,
13,
785,
14,
1676,
22143,
14,
12102,
12,
18596,
521,
5998,
628
] | 3.190476 | 21 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# Poskrapeovat iba jazyk - slovensky
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2896,
500,
994,
262,
4981,
329,
534,
15881,
276,
3709,
198,
2,
198,
2,
4091,
10314,
287,
25,
198,
2,
2638,
1378,
15390,
13,
1416,
2416,
88,
13,
2398,
14,
2... | 2.582278 | 79 |
import codecs
import os
import pathlib
import platform
import re
from setuptools import setup, find_packages
def clean_html(raw_html):
"""
Args:
raw_html:
Returns:
"""
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html).strip()
return cleantext
# Single sourcing code from here:
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(*file_paths):
"""
Args:
*file_paths:
Returns:
"""
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Args:
*parts:
Returns:
"""
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def fetch_long_description():
"""
Returns:
"""
with open("README.md", encoding="utf8") as f:
readme = f.read()
# https://stackoverflow.com/a/12982689
readme = clean_html(readme)
return readme
def fetch_requirements():
"""
Returns:
"""
requirements_file = "requirements.txt"
if platform.system() == "Windows":
DEPENDENCY_LINKS.append("https://download.pytorch.org/whl/torch_stable.html")
with open(requirements_file) as f:
reqs = f.read()
reqs = reqs.strip().split("\n")
return reqs
HERE = pathlib.Path(__file__).parent
DISTNAME = "self_attention_cv"
DESCRIPTION = "Self-attention building blocks for computer vision applications in PyTorch"
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESCRIPTION_CONTENT_TYPE = "text/markdown"
URL = "https://github.com/The-AI-Summer/self_attention_cv"
AUTHOR = "Adaloglou Nikolas"
AUTHOR_EMAIL = "nikolas@theaiusummer.com"
LICENSE = "MIT"
DEPENDENCY_LINKS = []
REQUIREMENTS = (fetch_requirements())
EXCLUDES = ("examples")
EXT_MODULES = []
if __name__ == "__main__":
setup(
name=DISTNAME,
install_requires=REQUIREMENTS,
url=URL,
license=LICENSE,
include_package_data=True,
version=find_version("self_attention_cv", "version.py"),
packages=find_packages(exclude=EXCLUDES),
python_requires=">=3.6",
ext_modules=EXT_MODULES,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
dependency_links=DEPENDENCY_LINKS,
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
]
)
| [
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
3859,
198,
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
4299,
3424,
62,
6494,
7,
1831,
62,
6494,
2599,
198,
220,
220... | 2.294586 | 1,256 |
from scrappy.persistor.in_memory import InMemoryPersistor
from scrappy.persistor.document import Document
| [
6738,
19320,
14097,
13,
19276,
32380,
13,
259,
62,
31673,
1330,
554,
30871,
30946,
32380,
198,
6738,
19320,
14097,
13,
19276,
32380,
13,
22897,
1330,
16854,
628
] | 3.962963 | 27 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.fcoe_networks import FcoeNetworks
from hpOneView.resources.resource import Resource, ResourceHelper, ResourcePatchMixin
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
21017,
198,
2,
357,
34,
8,
15069,
357,
6999,
12,
23344,
8,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
... | 3.787062 | 371 |
from __future__ import (absolute_import, division, print_function)
import sys
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
198,
11748,
25064,
198
] | 3.761905 | 21 |
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error
"""
线性回归:梯度下降法
:return:None
"""
# 1.获取数据
data = load_boston()
# 2.数据集划分
x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, random_state=22)
# 3.特征工程-标准化
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.fit_transform(x_test)
# 4.机器学习-线性回归(特征方程)
estimator = SGDRegressor(max_iter=1000)
estimator.fit(x_train, y_train)
# 5.模型评估
# 5.1 获取系数等值
y_predict = estimator.predict(x_test)
print("预测值为:\n", y_predict)
print("模型中的系数为:\n", estimator.coef_)
print("模型中的偏置为:\n", estimator.intercept_)
# 5.2 评价
# 均方误差
error = mean_squared_error(y_test, y_predict)
print("误差为:\n", error) | [
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
65,
5744,
201,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
201,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
201,
198,
67... | 1.766537 | 514 |
import unittest
from zeppos_data_manager.series_enricher import SeriesEnricher
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
41271,
381,
418,
62,
7890,
62,
37153,
13,
25076,
62,
268,
1173,
372,
1330,
7171,
4834,
1173,
372,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715... | 2.723404 | 47 |
#!/usr/bin/env python3
# The arrow library is used to handle datetimes
import arrow
import json
# The request library is used to fetch content through HTTP
import requests
import re
tz_bo = 'America/La_Paz'
def extract_xsrf_token(html):
"""Extracts XSRF token from the source code of the generation graph page."""
return re.search(r'var ttoken = "([a-f0-9]+)";', html).group(1)
def fetch_production(zone_key='BO', session=None, target_datetime=None, logger=None) -> dict:
"""Requests the last known production mix (in MW) of a given country."""
if target_datetime is not None:
now = arrow.get(target_datetime)
else:
now = arrow.now(tz=tz_bo)
r = session or requests.session()
# Define actual and previous day (for midnight data).
formatted_date = now.format('YYYY-MM-DD')
# initial path for url to request
url_init = 'https://www.cndc.bo/gene/dat/gene.php?fechag={0}'
# XSRF token for the initial request
xsrf_token = extract_xsrf_token(r.get("https://www.cndc.bo/gene/index.php").text)
resp = r.get(url_init.format(formatted_date), headers={
"x-csrf-token": xsrf_token
})
hour_rows = json.loads(resp.text.replace('', ''))["data"]
payload = []
for hour_row in hour_rows:
[hour, forecast, _total, thermo, hydro, wind, _unknown] = hour_row
if target_datetime is None and hour > now.hour:
continue
if hour == 24:
timestamp = now.shift(days=1)
else:
timestamp = now
if target_datetime is not None and hour < 24:
timestamp = timestamp.replace(hour=hour-1)
hour_resp = template_response(zone_key, timestamp.datetime, "cndc.bo")
hour_resp["production"]["unknown"] = thermo
hour_resp["production"]["hydro"] = hydro
hour_resp["production"]["wind"] = wind
payload.append(hour_resp)
return payload
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
print('fetch_generation_forecast() ->')
print(fetch_generation_forecast())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
383,
15452,
5888,
318,
973,
284,
5412,
4818,
46874,
198,
11748,
15452,
198,
11748,
33918,
198,
2,
383,
2581,
5888,
318,
973,
284,
21207,
2695,
832,
14626,
198,
11748,
7007,... | 2.564761 | 857 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from utils.fever_db import convert_brc
from chaonan_src._utils.wiki_pageview_utils import WikiPageviews
from chaonan_src._doc_retrieval.item_rules_spiral import ItemRuleBuilderSpiral
__author__ = ['chaonan99']
class ItemRuleRawPageview(ItemRuleBuilderSpiral):
"""docstring for ItemRuleRawPageview"""
def pageview_rule(self):
"""Assign high priority to frequently viewed pages
"""
if not hasattr(self, 'wiki_pv'):
print("Reload wiki pageview dict")
self.wiki_pv = WikiPageviews()
item = self.item
docid_groups = [[i[0] for i in it] \
for _, it in item['structured_docids'].items()]
for key, group_prio_docids in item['structured_docids'].items():
group_docids = [it[0] for it in group_prio_docids]
all_scores = map(lambda x: self.wiki_pv[convert_brc(x)],
group_docids)
all_scores = np.array(list(all_scores))
prios = np.argsort(all_scores)[::-1]
new_gpd = []
for i, p in enumerate(prios):
# new_gpd.append((group_prio_docids[p][0],
# group_prio_docids[p][1] + \
# max(1.0 - i*0.2, 0)))
new_gpd.append((group_prio_docids[p][0],
int(all_scores[p])))
item['structured_docids'][key] = new_gpd
try:
finded_keys = item['structured_docids'].values()
finded_keys = set([i for ii in finded_keys for i in ii]) \
if len(finded_keys) > 0 else set(finded_keys)
item['prioritized_docids'] = list(finded_keys)
except Exception as e:
from IPython import embed; embed(); import os; os._exit(1)
return self
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3384,
4487,
13,
69,
964,
62,
9945,
1330,
10385,
62,
1671,
66,
198... | 1.947211 | 1,004 |