content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
#coding=utf-8
import fcntl,subprocess,socket,struct,multiprocessing,queue,threading
sock_dict={}
sock_dict_lock=threading.Lock()
Buffer=2048
accept_access()
for k,v in zip(sock_dict.keys(),sock_dict.values()):
print(k,v)
th1=threading.Thread(target=router,args=(socket.inet_aton('172.16.10.100'),socket.inet_aton('172.16.10.101')),daemon=1)
th2=threading.Thread(target=router,args=(socket.inet_aton('172.16.10.101'),socket.inet_aton('172.16.10.100')),daemon=1)
th1.start()
th2.start()
try:
while 1:
input()
except KeyboardInterrupt:
print('\rexit...')
finally:
for client in sock_dict.values():
client.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
198,
11748,
277,
66,
429,
75,
11,
7266,
14681,
11,
44971,
11,
7249,
11,
16680,
541,
305,
919,
278,
11,
36560,
11,
16663,
278,
198,
198,
8... | 2.382353 | 272 |
__author__ = 'noe'
from .api import *
| [
834,
9800,
834,
796,
705,
77,
2577,
6,
198,
198,
6738,
764,
15042,
1330,
1635,
198
] | 2.4375 | 16 |
"""Alter OAuth2Token.token_type to Enum
Revision ID: 82184d7d1e88
Revises: 5e2954a2af18
Create Date: 2016-11-10 21:14:33.787194
"""
# revision identifiers, used by Alembic.
revision = '82184d7d1e88'
down_revision = '5e2954a2af18'
from alembic import op
import sqlalchemy as sa
| [
37811,
2348,
353,
440,
30515,
17,
30642,
13,
30001,
62,
4906,
284,
2039,
388,
198,
198,
18009,
1166,
4522,
25,
9415,
22883,
67,
22,
67,
16,
68,
3459,
198,
18009,
2696,
25,
642,
68,
1959,
4051,
64,
17,
1878,
1507,
198,
16447,
7536,
... | 2.378151 | 119 |
import os
import signal
from os.path import join
from sys import argv
from utils.csv_table import CsvTable
from utils.fasta_map import FastaMap
from utils.hierarchy_tree import HierarchyTree
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
if __name__ == '__main__':
if len(argv) == 2:
pid_h = os.fork()
if pid_h == 0:
main()
else:
try:
os.wait()
except KeyboardInterrupt:
os.kill(pid_h, signal.SIGKILL)
print("\nshutdown")
else:
print("python sarscovhierarchy.py <data_path>")
| [
11748,
28686,
198,
11748,
6737,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
6738,
3384,
4487,
13,
40664,
62,
11487,
1330,
327,
21370,
10962,
198,
6738,
3384,
4487,
13,
7217,
64,
62,
8899,
1330,
... | 2.009967 | 301 |
from rest_framework import status
from rest_framework.response import Response
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
628,
628,
198
] | 4.611111 | 18 |
from datastructures.stack import Stack
import unittest
if __name__ == '__main__':
unittest.main() | [
6738,
4818,
459,
1356,
942,
13,
25558,
1330,
23881,
198,
11748,
555,
715,
395,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419
] | 2.810811 | 37 |
from PIL import Image
import requests
url = "https://www.yr.no/place/Norway/Viken/Halden/Halden//meteogram.png"
response = requests.get(url, stream = True)
img = Image.open(response.raw)
#TODO! Test image size 800, x
img.thumbnail((800, 262)) #Resizing
#TODO! Convert better
img = img.convert("L")
#img.show()
img.save("meteogram.png")
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
7007,
628,
198,
6371,
796,
366,
5450,
1378,
2503,
13,
2417,
13,
3919,
14,
5372,
14,
21991,
1014,
14,
53,
29943,
14,
39,
1940,
268,
14,
39,
1940,
268,
1003,
4164,
68,
21857,
13,
11134,
1,
19... | 2.589552 | 134 |
# Copyright 2018-2019 David Corbett
# Copyright 2019-2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Builder']
import collections
import enum
import functools
import itertools
import io
import math
import re
import unicodedata
import fontforge
import fontTools.agl
import fontTools.feaLib.ast
import fontTools.feaLib.builder
import fontTools.feaLib.parser
import fontTools.misc.transform
import fontTools.otlLib.builder
import anchors
import schema
from schema import Ignorability
from schema import MAX_DOUBLE_MARKS
from schema import MAX_HUB_PRIORITY
from schema import NO_PHASE_INDEX
from schema import Schema
from shapes import AnchorWidthDigit
from shapes import Bound
from shapes import Carry
from shapes import ChildEdge
from shapes import Circle
from shapes import CircleRole
from shapes import Complex
from shapes import ContextMarker
from shapes import ContinuingOverlap
from shapes import ContinuingOverlapS
from shapes import Curve
from shapes import DigitStatus
from shapes import Dot
from shapes import Dummy
from shapes import End
from shapes import EntryWidthDigit
from shapes import GlyphClassSelector
from shapes import Hub
from shapes import InitialSecantMarker
from shapes import InvalidDTLS
from shapes import InvalidOverlap
from shapes import InvalidStep
from shapes import LINE_FACTOR
from shapes import LeftBoundDigit
from shapes import Line
from shapes import MarkAnchorSelector
from shapes import Notdef
from shapes import Ou
from shapes import ParentEdge
from shapes import RADIUS
from shapes import RightBoundDigit
from shapes import RomanianU
from shapes import RootOnlyParentEdge
from shapes import SeparateAffix
from shapes import Space
from shapes import Start
from shapes import TangentHook
from shapes import ValidDTLS
from shapes import Wa
from shapes import Wi
from shapes import WidthNumber
from shapes import XShape
import sifting
from utils import CAP_HEIGHT
from utils import CLONE_DEFAULT
from utils import CURVE_OFFSET
from utils import Context
from utils import DEFAULT_SIDE_BEARING
from utils import EPSILON
from utils import GlyphClass
from utils import MAX_TREE_DEPTH
from utils import MAX_TREE_WIDTH
from utils import NO_CONTEXT
from utils import Type
from utils import WIDTH_MARKER_PLACES
from utils import WIDTH_MARKER_RADIX
from utils import mkmk
BRACKET_HEIGHT = 1.27 * CAP_HEIGHT
BRACKET_DEPTH = -0.27 * CAP_HEIGHT
SHADING_FACTOR = 12 / 7
REGULAR_LIGHT_LINE = 70
MINIMUM_STROKE_GAP = 70
STRIKEOUT_POSITION = 258
CONTINUING_OVERLAP_CLASS = 'global..cont'
HUB_CLASS = 'global..hub'
CONTINUING_OVERLAP_OR_HUB_CLASS = 'global..cont_or_hub'
PARENT_EDGE_CLASS = 'global..pe'
CHILD_EDGE_CLASSES = [f'global..ce{child_index + 1}' for child_index in range(MAX_TREE_WIDTH)]
INTER_EDGE_CLASSES = [[f'global..edge{layer_index}_{child_index + 1}' for child_index in range(MAX_TREE_WIDTH)] for layer_index in range(MAX_TREE_DEPTH)]
assert WIDTH_MARKER_RADIX % 2 == 0, 'WIDTH_MARKER_RADIX must be even'
| [
2,
15069,
2864,
12,
23344,
3271,
2744,
48138,
198,
2,
15069,
13130,
12,
1238,
2481,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.398239 | 1,022 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-05 04:42
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
2919,
12,
2713,
8702,
25,
3682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
import asyncio
from .Client import CLIENT
| [
11748,
30351,
952,
201,
198,
6738,
764,
11792,
1330,
45148,
201,
198,
201,
198,
201,
198
] | 3 | 16 |
import numpy as np, math
| [
11748,
299,
32152,
355,
45941,
11,
10688,
198
] | 3.125 | 8 |
#!/usr/bin/env python
# coding: utf-8
import argparse
import base64
import glob
import os
import platform
import re
import sys
import tempfile
import json
import time
import logging
from pymongo import MongoClient
import requests
log = logging.getLogger(name=__name__)
if platform.system() != 'Windows':
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto import Random
from Crypto.Cipher import AES
BLOCK_SIZE = 16
DEFAULT_KEY_PATH = '~/.ssh/id_rsa'
_keys = {}
def encrypt(message, public_key=None, width=60, **kwargs):
"""
Encrypt a string using Asymmetric and Symmetric encryption.
:param width:
:param message: message to encrypt
:param public_key: public key to use in encryption
:return: encrypted string
"""
random = Random.new()
key = random.read(AES.key_size[0])
passphrase = base64.b64encode(key)
iv = Random.new().read(AES.block_size)
aes = AES.new(passphrase, AES.MODE_CBC, iv)
message = read_value(message, kwargs)
data = aes.encrypt(pad(message))
token = rsa_encrypt(key + iv, public_key)
enc_str = base64.b64encode(data + token).decode()
if width > 0:
x = split2len(enc_str, width)
return '\n'.join(x)
else:
return enc_str
def decrypt(encrypted, private_key=None, **kwargs):
"""
Decrypt a string using Asymmetric and Symmetric encryption.
:param encrypted: message to decrypt
:param private_key: private key to use in decryption
:return: decrypted string
"""
encrypted = ''.join(encrypted.split('\n'))
data = base64.b64decode(encrypted)
payload = data[:-256]
token = rsa_decrypt(data[-256:], private_key)
passphrase = base64.b64encode(token[:AES.key_size[0]])
iv = token[AES.key_size[0]:]
aes = AES.new(passphrase, AES.MODE_CBC, iv)
return aes.decrypt(payload).rstrip(b'\0').decode()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
1822,
29572,
198,
11748,
2779,
2414,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
302,
198,
11748,
25064,
198,
1... | 2.612218 | 753 |
from osp.corpus.syllabus import Syllabus
def test_log_path(mock_osp):
"""
Syllabus#log_path should return the .log file path.
"""
path = mock_osp.add_file()
syllabus = Syllabus(path)
assert syllabus.log_path == path+'.log'
| [
198,
198,
6738,
267,
2777,
13,
10215,
79,
385,
13,
1837,
297,
46844,
1330,
1632,
297,
46844,
628,
198,
4299,
1332,
62,
6404,
62,
6978,
7,
76,
735,
62,
2117,
2599,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
1632,
297,
46844,
2,... | 2.419048 | 105 |
#!/usr/bin/env python
# coding: utf-8
# # Title : 1985 Auto Imports Database Analyses
# <img src='Large10.jpg'>
# ## <font color='green'>Data Dictionary</font>
# ### Input variables
#
# 01. **symboling**: [its assigned insurance risk rating -> [-3, -2, -1, 0, 1, 2, 3]]
# 02. **normalized-losses**: [average loss payment per insured vehicle year -> continuous from 65 to 256.]
# 03. make: [ Manufacturer name eg : alfa-romero, audi, bmw, chevrolet, dodge, honda,isuzu etc. ]
# 04. fuel-type: [diesel, gas]
# 05. aspiration: [std, turbo]
# 06. num-of-doors: [four, two].
# 07. body-style: [hardtop, wagon, sedan, hatchback, convertible]
# 08. drive-wheels: [4wd, fwd, rwd]
# 09. engine-location: [front, rear]
# 10. wheel-base: [continuous from 86.6 120.9]
# 11. length: [continuous from 141.1 to 208.1]
# 12. width: [continuous from 60.3 to 72.3]
# 13. height: [continuous from 47.8 to 59.8]
# 14. curb-weight: [continuous from 1488 to 4066]
# 15. engine-type: [dohc, dohcv, l, ohc, ohcf, ohcv, rotor]
# 16. num-of-cylinders: [eight, five, four, six, three, twelve, two]
# 17. engine-size: [continuous from 61 to 326]
# 18. fuel-system: [1bbl, 2bbl, 4bbl, idi, mfi, mpfi, spdi, spfi]
# 19. bore: [continuous from 2.54 to 3.94]
# 20. stroke: [continuous from 2.07 to 4.17]
# 21. compression-ratio: [continuous from 7 to 23]
# 22. horsepower: [continuous from 48 to 288]
# 23. peak-rpm: [continuous from 4150 to 6600]
# 24. city-mpg: [continuous from 13 to 49]
# 25. highway-mpg: [continuous from 16 to 54]
#
# ## Output Variable
# price: [continuous from 5118 to 45400]
#
# ## Import libraries
# In[ ]:
# Numerical libraries
import numpy as np
# Import Linear Regression machine learning library
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import Normalizer
# to handle data in form of rows and columns
import pandas as pd
# importing ploting libraries
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
#importing seaborn for statistical plots
import seaborn as sns
# ## Load data
#
# In[ ]:
df = pd.read_csv("../../../input/toramky_automobile-dataset/Automobile_data.csv",na_values=['?'])
# In[ ]:
df.head()
# ##### This data set consists of three types of entities:
# ##### (a) the specification of an auto in terms of various characteristics
# ##### (b)its assigned insurance risk rating
# ##### (c) its normalized losses in use as compared to other cars.
# ## Exploratory Data Analysis
# ### a. Analyse Data
#
# In[ ]:
df.info()
# In[ ]:
df.describe()
# In[ ]:
na_cols = {}
for col in df.columns:
missed = df.shape[0] - df[col].dropna().shape[0]
if missed > 0:
na_cols[col] = missed
na_cols
# In[ ]:
sum(df.isnull().any())
#sum(df.isnull().any())
# In[ ]:
df[np.any(df[df.columns[2:]].isnull(), axis=1)]
# #### This clearly shows the number of rows and columns having missing or NA values.
# In[ ]:
df[['normalized-losses','bore','stroke','horsepower','peak-rpm']] = df[['normalized-losses','bore','stroke','horsepower','peak-rpm']].astype('float64')
# In[ ]:
df.info()
# In[ ]:
df_1 = df.copy()
# In[ ]:
df_1.head()
# ### b. Refine & Transform
#
# In[ ]:
# Imputting Missing value
imp = Imputer(missing_values='NaN', strategy='mean' )
df_1[['normalized-losses','bore','stroke','horsepower','peak-rpm','price']] = imp.fit_transform(df_1[['normalized-losses','bore','stroke','horsepower','peak-rpm','price']])
df_1.head()
#########################################################################################################################
# In[ ]:
df_1['num-of-doors'] = df_1['num-of-doors'].fillna('four')
# In[ ]:
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
for i in ['make','fuel-type','aspiration', 'num-of-doors','body-style','drive-wheels','engine-location','engine-type','num-of-cylinders','fuel-system']:
df_1[i] = labelencoder.fit_transform(df_1[i])
df_1.head()
# ### Analyse Dataset -
# ##### 4 .How many records are available in the data set and how many attributes. Do you think the depth (number of records) is sufficient given the breadth? In other words, is the sample likely to be a good representative of the universe?
# In[ ]:
df_1.shape
# #### The above dataset has 205 rows and 26 columns which is not a good sample. We can say that it is not a good representative of the universe
# ### d. Visualize data
# ### <font color='red'> 5.Analyse the data distribution for the various attributes and share your observations. <\font>
# In[ ]:
# In[ ]:
from matplotlib import pyplot as plt
# ### *****Top Selling Car Manufacturer is **Toyota**
#
# #### Categorical features distributions:
# In[ ]:
categorical = ['make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'engine-location', 'drive-wheels', 'engine-type', 'num-of-cylinders', 'fuel-system']
fig, axs = plt.subplots(nrows=3, ncols=3, figsize=(18, 12))
for col, ax in zip(categorical[1:], axs.ravel()):
sns.countplot(x=col, data=df, ax=ax)
# #### Max Cars are running on Gas
# #### Max Cars have engine in front
# #### Max Cars have 4 cylinders
# #### Max Cars have mpfi as fuel system
# In[ ]:
df_1.corr()
# In[ ]:
from matplotlib import pyplot as plt
plt.figure(figsize=(15, 15))
print()
plt.title('Cross correlation between numerical')
print()
# In[ ]:
## Above graph shows Wheel base , Length , Width are highly correlated.
## Highway mpg and city mpg is also highly correlated.
## Compression ratio and fuel type is also correlated
## Engine size and horse power is also correlated
df_2 = df_1.drop(['length','width','city-mpg','fuel-type','horsepower'],axis=1)
df_2.head()
# In[ ]:
from matplotlib import pyplot as plt
plt.figure(figsize=(15, 15))
print()
plt.title('Cross correlation between numerical')
print()
# ## Above graphs and HeatMap shows that -
# ### Wheel base , Length , Width are highly correlated.
# ### Highway mpg and city mpg is also highly correlated.
# ### Compression ratio and fuel type is also correlated
# ### Engine size and horse power is also correlated
# ## Attributes which has stronger relationship with price -
#
# ## 1. Curb-Weight
# ## 2. Engine-Size
# ## 3. Horsepower
# ## 4. Mpg(City / Highway mpg)
# ## 5. Lenght/ Width
# In[ ]:
sns.lmplot(x= 'curb-weight' , y='price', data=df_2)
# In[ ]:
sns.lmplot(x= 'engine-size' , y='price', hue = 'num-of-doors', data=df_2)
# In[ ]:
sns.lmplot(x= 'horsepower' , y='price',hue = 'fuel-system', data=df)
# In[ ]:
sns.lmplot(x= 'highway-mpg' , y='price', data=df)
# ## Split data into training and test data
# In[ ]:
X = df_2.drop('price',axis =1)
X.head()
# In[ ]:
# Lets use 80% of data for training and 20% for testing
import sklearn
Y = df_2['price']
X = df_2.drop('price',axis =1)
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y,train_size=0.8, test_size=0.2, random_state=0)
# ### Linear Regression could be the best algorithm to solve such problem with better accuracy as most of the attributes (Independent Variables) follow Linear pattern with Dependent variable i.e. (Price)
# ## Training of the model
# In[ ]:
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
lm_1 = regressor.fit(x_train, y_train)
# In[ ]:
lm_1.score(x_train,y_train)
# In[ ]:
lm_1.score(x_test,y_test)
# In[ ]:
df_2.shape
# In[ ]:
df_3 = df_2.copy()
# In[ ]:
# Replace '-' in column names with '_'
names = []
for name in df_3.columns:
names.append(name.replace('-', '_'))
df_3.columns = names
# In[ ]:
df_3.info()
# In[ ]:
import statsmodels.formula.api as smf
lm0 = smf.ols(formula= 'price ~ symboling+normalized_losses+make+aspiration+num_of_doors+body_style+drive_wheels+engine_location+wheel_base+height+curb_weight+engine_type+num_of_cylinders+engine_size+fuel_system+bore+stroke+compression_ratio+peak_rpm' , data =df_3).fit()
# In[ ]:
lm0.params
# In[ ]:
print(lm0.summary())
# ## Model Builduing Part -2
# In[ ]:
from sklearn.preprocessing import Normalizer
# Normalizing Data
nor = Normalizer()
df_4 = nor.fit_transform(df_2)
# In[ ]:
col = []
for i in df_2.columns:
col.append(i.replace('-', '_'))
# In[ ]:
df_4 = pd.DataFrame(df_4 , columns = col)
df_4.head()
# In[ ]:
# Lets use 80% of data for training and 20% for testing
import sklearn
Y_1 = df_4['price']
X_1 = df_4.drop('price',axis =1)
x_train_1, x_test_1, y_train_1, y_test_1 = sklearn.model_selection.train_test_split(X_1, Y_1,train_size=0.8, test_size=0.2, random_state=0)
# In[ ]:
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
lm_2 = regressor.fit(x_train_1, y_train_1)
# In[ ]:
pred_train_y = regressor.predict(x_train_1)
pred_test_y = regressor.predict(x_test_1)
# In[ ]:
lm_2.score(x_train_1,y_train_1)
# ## R^2 = 0.98 for Train data
# In[ ]:
lm_2.score(x_test_1,y_test_1)
# ## R^2 = 0.96 for Test data
# In[ ]:
mse = np.mean((pred_test_y -y_test_1)**2)
mse
# In[ ]:
## Residual Vs fitted plot -
x_plot = plt.scatter(pred_test_y,(pred_test_y - y_test_1),c='b')
plt.hlines(y=0,xmin = 0 , xmax = 1)
plt.title('Residual plot')
# ### There is no pattern so we can infer that data is linear and there is no Heteroskedasticity issue
# ## Linear model using OLS -
# In[ ]:
import statsmodels.formula.api as smf
lm1 = smf.ols(formula= 'price ~ symboling+normalized_losses+make+aspiration+num_of_doors+body_style+drive_wheels+engine_location+wheel_base+height+curb_weight+engine_type+num_of_cylinders+engine_size+fuel_system+bore+stroke+compression_ratio+peak_rpm' , data =df_4).fit()
# In[ ]:
lm2 = smf.ols(formula= 'price ~ symboling+normalized_losses+make+aspiration+num_of_doors+drive_wheels+engine_location+wheel_base+height+curb_weight+engine_type+num_of_cylinders+engine_size+fuel_system+bore+stroke+compression_ratio+peak_rpm' , data =df_4).fit()
# In[ ]:
lm3 = smf.ols(formula= 'price ~ aspiration+num_of_doors+wheel_base+curb_weight+engine_size+fuel_system+bore+stroke+peak_rpm' , data =df_4).fit()
# In[ ]:
lm3.params
# In[ ]:
print(lm3.summary())
# ## The Above results shows Multi Linear Regression Model with R^2 = 0.974
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
11851,
1058,
12863,
11160,
1846,
3742,
24047,
1052,
43710,
220,
198,
198,
2,
1279,
9600,
12351,
11639,
21968,
940,
13,
9479,
44167,
... | 2.512661 | 4,344 |
import discord
from discord.ext import commands
from motor.motor_asyncio import AsyncIOMotorClient
import json
with open('config.json') as f:
config_var = json.load(f)
cluster = AsyncIOMotorClient(config_var['mango_link'])
cursor = cluster["custom_prefix"]["prefix"]
bcursor = cluster['bot']['blacklist']
intents = discord.Intents.all()
bot = commands.Bot(command_prefix=get_prefix, intents=intents, help_command=CustomHelp(),
description="One bot Many functionality", owner_id=860876181036335104, enable_debug_events=True,
case_insensitive=True, activity=discord.Streaming(name="Happy new Year!", url="https://www.twitch.tv/dvieth"))
@bot.event
@bot.check
bot.add_check(block_blacklist_user)
@bot.event
@bot.event
@bot.event
cog_list = ['audio', 'economy', 'entertainment', 'leveling', 'moderation', 'owner', 'rtfm', 'settings', 'tag', 'utilities']
if __name__ == '__main__':
# Load extension
for folder in cog_list:
bot.load_extension(f'cogs.{folder}')
bot.load_extension('jishaku')
bot.run(config_var['token'])
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
5584,
13,
76,
20965,
62,
292,
13361,
952,
1330,
1081,
13361,
40,
2662,
20965,
11792,
198,
11748,
33918,
628,
198,
4480,
1280,
10786,
11250,
13,
17752,
11537,
355,
277,
25,... | 2.607565 | 423 |
"""
imjayigpayatinlay.py
Jim's pig latin
See ./readme.md.
Here are tests based on the design constraints,
in roughly order of increasing difficulty.
--- tests --------------------------------------
>>> text_to_piglatin("apple") # word starts with vowel
'appleway'
>>> text_to_piglatin("cat") # word starts with consonant
'atcay'
>>> text_to_piglatin("strike") # word starts with consonant cluster
'ikestray'
>>> text_to_piglatin("style") # "y" within word is vowel
'ylstay'
>>> text_to_piglatin("yellow") # "y" starts word is consonant
'ellowyay'
>>> text_to_piglatin("quiet") # "qu" is treated as a single letter
'ietquay'
>>> text_to_piglatin("one two three") # multiple words
'oneway otway eethreway'
>>> text_to_piglatin("one, two, three!") # puncuation
'oneway, otway, eethreway!'
>>> text_to_piglatin("South Bend Indiana") # capitalization
'Outhsay Endbay Indianaway'
>>> text_to_piglatin('The cat said "meow".') # sentence, more punctuation
'Ethay atcay aidsay "eowmay".'
>>> text_to_piglatin("an off-campus apartment") # hyphenated word
'anway offway-ampuscay apartmentway'
>>> text_to_piglatin("(foo) [bar]") # parens and brackets
'(oofay) [arbay]'
>>> text_to_piglatin("It is 7.3 inches high.") # words and numbers
'Itway isway 7.3 inchesway ighhay."
>>> text_to_piglatin("17 23.2 one2 s78 7th") # pure and mixed numbers
'17 23.2 one2way 78say 7thway'
>>> text_to_piglatin("Célébrons la 10e saison de BIXI en 2018!") # diacritic
'Élébronsay laway 10eway aisonsay eday enway 2018!'
>>> text_to_piglatin("And I can't stand him.") # contraction
'Andway Iway an'tcay andstay imhay.'
>>> text_to_piglatin("His name is Dr. Jones.") # words with only consonants
'Ishay amenay isway Adray. Onesjay.'
>>> text_to_piglatin('He said "Сказки братьев Гримм" on the 12th of month 7.')
'Ehay aidsay "Сказки братьев Гримм" onway ethay 12thway ofway onthmay 7.'
----------------------------------------------------------
Jim Mahoney | Feb 2018 | cs.marlboro.college | MIT License
"""
vowels = set(['a', 'e', 'i', 'o', 'u'])
def split_word(word):
""" Return leading consonant cluster (leading)
and the rest of the characters
>>> split_word("scratch")
('scr', 'atch')
"""
leading = ''
rest = word
while rest and rest[0] not in vowels:
leading += rest[0]
rest = rest[1:]
return (leading, rest)
def word_to_piglatin(word):
""" Convert one word to piglatin
>>> word_to_piglatin('card')
'ardcay'
>>> word_to_piglatin('oops')
'oopsway'
"""
if word[0] in vowels:
return word + 'way'
else:
(leading, rest) = split_word(word)
return rest + leading + 'ay'
def text_to_piglatin(text):
""" Return text translated to pig latin. """
# TODO: Handle more than the simplest case ...
words = text.split(' ')
pig_words = map(word_to_piglatin, words)
pig_text = ' '.join(pig_words)
return pig_text
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
37811,
198,
545,
33708,
328,
15577,
10680,
10724,
13,
9078,
628,
5395,
338,
12967,
3042,
259,
628,
4091,
24457,
961,
1326,
13,
9132,
13,
220,
220,
628,
3423,
389,
5254,
1912,
319,
262,
1486,
17778,
11,
220,
198,
287,
7323,
1502,
286,
... | 2.54288 | 1,236 |
import torch
import torch.nn as nn
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198
] | 3.083333 | 12 |
# Generated by Django 3.2.5 on 2021-07-21 15:15
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2998,
12,
2481,
1315,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import random
import copy
import json
import time
import sys
players = [QLearnerModelAgent(), RandomAgent()]
game = XOGame()
'''print players[1].observe([[['X', 'O', 'X'],['O', 'O', '.'],['.', 'X', '.']], 'O'], 0, game)
sys.exit(0)'''
totalx = 0
totalo = 0
total_games = 10000
for x in range(2):
for i in range(total_games):
game = XOGame()
current_state = game.getInitialState()
player_in_turn = 0
while not game.isTerminalState(current_state):
player_action = players[player_in_turn].observe(current_state, 0, game)
current_state = game.getNextStateFromStateAndAction(current_state, player_action)
if x == 1:
game.printBoardFromState(current_state)
if player_in_turn == 1:
print players[0].v_values.get(json.dumps(current_state), 0)
player_in_turn += 1
player_in_turn %= 2
winner = game.getBoardWinner(current_state[0])
score = 0
if winner == 'X':
score = 10000
totalx += 1
elif winner == 'O':
score = -100
totalo += 1
players[0].observe(current_state, score , game)
if x == 1:
print winner, ' wins'
print 'X wins', totalx
print 'O wins', totalo
print 1.0 * totalo / total_games
players[1] = HumanAgent()
players[0].learning_rate = 0.7
| [
11748,
4738,
198,
11748,
4866,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
25064,
628,
628,
628,
198,
32399,
796,
685,
9711,
451,
1008,
17633,
36772,
22784,
14534,
36772,
3419,
60,
198,
6057,
796,
1395,
7730,
480,
3419,
198,
7061,
6... | 2.131343 | 670 |
# Clone Graph
# https://www.interviewbit.com/problems/clone-graph/
#
# Clone an undirected graph. Each node in the graph contains a label and a list of its neighbors.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
# @param node, a undirected graph node
# @return a undirected graph node | [
2,
30698,
29681,
198,
2,
3740,
1378,
2503,
13,
3849,
1177,
2545,
13,
785,
14,
1676,
22143,
14,
21018,
12,
34960,
14,
198,
2,
198,
2,
30698,
281,
3318,
1060,
276,
4823,
13,
5501,
10139,
287,
262,
4823,
4909,
257,
6167,
290,
257,
13... | 2.571429 | 196 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand-.- XBMC Plugin
# Canale casacinema
# ------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "casacinema"
host = 'https://www.casacinema.news'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', '%s/genere/serie-tv' % host],]
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
# ==============================================================================================================================================================================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
20368,
1783,
10541,
198,
2,
4269,
623,
368,
392,
12,
7874,
1395,
33,
9655,
42636,
198,
2,
1680,
1000,
6124,
330,
7749,
64,
198,
198,
2,
20368,
1783,
10541,
198,
1... | 7.114478 | 297 |
try:
# Python 2 old-style classes
from types import ClassType as class_type # type: ignore
class_types = (class_type, type)
string_types = (unicode, str) # type: ignore # pylint: disable=undefined-variable
except ImportError:
class_types = (type,) # type: ignore
string_types = (str,) # type: ignore
| [
28311,
25,
198,
220,
220,
220,
1303,
11361,
362,
1468,
12,
7635,
6097,
198,
220,
220,
220,
422,
3858,
1330,
5016,
6030,
355,
1398,
62,
4906,
220,
1303,
2099,
25,
8856,
628,
220,
220,
220,
1398,
62,
19199,
796,
357,
4871,
62,
4906,
... | 2.869565 | 115 |
"""
This is an equality comparator for hdf5 files.
"""
import h5py
import itertools
import numpy
import sys
def files_match(filename1, filename2):
"Checks that two files have the same HDF5 structure."
f1 = h5py.File(filename1, mode='r')
f2 = h5py.File(filename2, mode='r')
for k in iter(f1):
# special case for the top level: skip randomly-generated refs
if k in '#refs#':
print >>sys.stderr, 'skip: ' + k
continue
print >>sys.stderr, 'check: ' + k
if not subset(f1, f2, f1[k], f2[k], path=[k], verbose=True):
return False
if not subset(f2, f1, f2[k], f1[k], path=[k]):
return False
return True
def subset(f1, f2, a, b, path=None, verbose=False):
"""Returns true if object a in f1 is a subset of object b in f2.
path, if passed, tracks the location within the HDF5 structure.
"""
if not path:
path = []
a_t = type(a)
b_t = type(b)
if not type_equiv(a_t, b_t):
print_diff(a_t, b_t, path)
return False
elif a_t == h5py.h5r.Reference:
return subset(f1, f2, f1[a], f2[b], path + ['<r>'], verbose)
elif a_t == h5py.Dataset:
return subset(f1, f2, a.value, b.value, path + ['<d>'], verbose)
elif a_t == numpy.ndarray:
for i, (x, y) in enumerate(itertools.izip_longest(a, b)):
cur = '<arr[{}]>'.format(i)
if not subset(f1, f2, x, y, path + [cur], verbose):
return False
return True
elif a_t in [numpy.int8, numpy.int16, numpy.int32, numpy.int64,
numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64,
numpy.float, numpy.float64]:
if not a == b:
print_diff(a, b, path)
return False
return True
elif a_t == h5py._hl.group.Group:
if verbose:
print_path(path)
for k in a.keys():
cur = k
if verbose:
print >>sys.stderr, ' ' + str(k)
if not k in b:
print_diff(a[k], None, path + [cur])
return False
if not subset(f1, f2, a[k], b[k], path + [cur], verbose):
return False
return True
else:
print >>sys.stderr, 'Unknown type: ' + str(a_t)
print_path(path)
return False
| [
37811,
198,
1212,
318,
281,
10537,
4616,
1352,
329,
289,
7568,
20,
3696,
13,
198,
37811,
198,
11748,
289,
20,
9078,
198,
11748,
340,
861,
10141,
198,
11748,
299,
32152,
198,
11748,
25064,
198,
198,
4299,
3696,
62,
15699,
7,
34345,
16,... | 1.932023 | 1,221 |
print("This is pandas basics")
brics = pd.DataFrame(dict)
print(brics)
| [
198,
4798,
7203,
1212,
318,
19798,
292,
19165,
4943,
198,
198,
1671,
873,
796,
279,
67,
13,
6601,
19778,
7,
11600,
8,
198,
4798,
7,
1671,
873,
8,
198
] | 2.517241 | 29 |
import os
from django.conf import settings
from django.core.management.base import BaseCommand
import gspread
from conferences.models import (
ConferenceEmailTemplate,
ConferenceEmailRegistration,
ConferenceEmailLogs,
)
| [
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
11748,
308,
43639,
628,
198,
6738,
19993,
13,
27530,
1330,
357,
198,
220,
220,
220,
... | 3.646154 | 65 |
from django.urls import path
from .views import index, store, update
urlpatterns = [
path('tallas/', index, name='sizes.index'),
path('crear-nueva-talla/', store, name='sizes.store'),
path('actualizar-talla/<id>/', update, name='sizes.update'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
201,
198,
6738,
764,
33571,
1330,
6376,
11,
3650,
11,
4296,
201,
198,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
220,
3108,
10786,
83,
7826,
14,
3256,
6376,
11,
1438,
11639... | 2.509434 | 106 |
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import KBinsDiscretizer
pipe_cat = OneHotEncoder(handle_unknown='ignore')
pipe_num = KBinsDiscretizer()
| [
6738,
1341,
35720,
13,
3866,
36948,
1330,
1881,
21352,
27195,
12342,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
14204,
1040,
15642,
1186,
7509,
198,
198,
34360,
62,
9246,
796,
1881,
21352,
27195,
12342,
7,
28144,
62,
34680,
11639,
464... | 3.396226 | 53 |
import unittest
from RegExpBuilder import RegExpBuilder
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
201,
198,
6738,
3310,
16870,
32875,
1330,
3310,
16870,
32875,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
201,
198
] | 2.658537 | 41 |
from __future__ import unicode_literals
import os
from django.conf import settings
from mayan.apps.converter.classes import Layer
from mayan.apps.converter.layers import layer_saved_transformations
from ..literals import PAGE_RANGE_ALL
from ..models import DocumentType
from .literals import (
TEST_DOCUMENT_TYPE_DELETE_PERIOD, TEST_DOCUMENT_TYPE_DELETE_TIME_UNIT,
TEST_DOCUMENT_TYPE_LABEL, TEST_DOCUMENT_TYPE_LABEL_EDITED,
TEST_DOCUMENT_TYPE_QUICK_LABEL, TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED,
TEST_SMALL_DOCUMENT_FILENAME, TEST_SMALL_DOCUMENT_PATH,
TEST_TRANSFORMATION_ARGUMENT, TEST_TRANSFORMATION_CLASS,
TEST_VERSION_COMMENT
)
__all__ = ('DocumentTestMixin',)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
743,
272,
13,
18211,
13,
1102,
332,
353,
13,
37724,
1330,
34398,
198,
6738,
743,
... | 2.609665 | 269 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from oslo_log import log as logging
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
INDEPENDENT = '*independent'
ROBOTS = '*robots'
def get_company_by_email(domains_index, email):
"""Get company based on email domain
Automatically maps email domain into company name. Prefers
subdomains to root domains.
:param domains_index: dict {domain -> company name}
:param email: valid email. may be empty
:return: company name or None if nothing matches
"""
if not email:
return None
name, at, domain = email.partition('@')
if domain:
parts = domain.split('.')
for i in range(len(parts), 1, -1):
m = '.'.join(parts[len(parts) - i:])
if m in domains_index:
return domains_index[m]
return None
def update_user_affiliation(domains_index, user):
"""Update user affiliation
Affiliation is updated only if user is currently independent
but makes contribution from company domain.
:param domains_index: dict {domain -> company name}
:param user: user profile
"""
for email in user.get('emails'):
company_name = get_company_by_email(domains_index, email)
uc = user['companies']
if (company_name and (len(uc) == 1) and
(uc[0]['company_name'] == INDEPENDENT)):
LOG.debug('Updating affiliation of user %s to %s',
user['user_id'], company_name)
uc[0]['company_name'] = company_name
break
def merge_user_profiles(domains_index, user_profiles):
"""Merge user profiles into one
The function merges list of user profiles into one figures out which
profiles can be deleted.
:param domains_index: dict {domain -> company name}
:param user_profiles: user profiles to merge
:return: tuple (merged user profile, [user profiles to delete])
"""
LOG.debug('Merge profiles: %s', user_profiles)
# check of there are more than 1 launchpad_id
lp_ids = set(u.get('launchpad_id') for u in user_profiles
if u.get('launchpad_id'))
if len(lp_ids) > 1:
LOG.debug('Ambiguous launchpad ids: %s on profiles: %s',
lp_ids, user_profiles)
merged_user = {} # merged user profile
# collect ordinary fields
for key in ['seq', 'user_name', 'user_id', 'github_id', 'launchpad_id',
'companies', 'static', 'zanata_id']:
value = next((v.get(key) for v in user_profiles if v.get(key)),
None)
if value:
merged_user[key] = value
# update user_id, prefer it to be equal to launchpad_id
merged_user['user_id'] = (merged_user.get('launchpad_id') or
merged_user.get('user_id'))
# always preserve `user_name` since its required field
if 'user_name' not in merged_user:
merged_user['user_name'] = merged_user['user_id']
# merge emails
emails = set([])
core_in = set([])
for u in user_profiles:
emails |= set(u.get('emails', []))
core_in |= set(u.get('core', []))
merged_user['emails'] = sorted(list(emails))
if core_in:
merged_user['core'] = sorted(list(core_in))
gerrit_ids = _merge_gerrit_ids(user_profiles)
if gerrit_ids:
merged_user['gerrit_ids'] = gerrit_ids
# merge companies
merged_companies = merged_user['companies']
for u in user_profiles:
companies = u.get('companies')
if companies:
if (companies[0]['company_name'] != INDEPENDENT or
len(companies) > 1):
merged_companies = companies
break
merged_user['companies'] = merged_companies
update_user_affiliation(domains_index, merged_user)
users_to_delete = []
seqs = set(u.get('seq') for u in user_profiles if u.get('seq'))
if len(seqs) > 1:
# profiles are merged, keep only one, remove others
seqs.remove(merged_user['seq'])
for u in user_profiles:
if u.get('seq') in seqs:
users_to_delete.append(u)
return merged_user, users_to_delete
def are_users_same(users):
"""True if all users are the same and not Nones"""
x = set(u.get('seq') for u in users)
return len(x) == 1 and None not in x
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.485555 | 1,973 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import math
import random as rand
from itertools import product
import numpy as np
from mapel.voting.models_main import store_ordinal_election
from mapel.voting.metrics.main_ordinal_distances import compute_swap_bf_distance
from mapel.voting.objects.OrdinalElection import OrdinalElection
try:
from sympy.utilities.iterables import multiset_permutations
except:
pass
def generate_all_ordinal_elections(experiment, num_candidates, num_voters):
""" At the same time generate elections and compute distances """
id_ctr = 0
experiment.elections = {}
a = [i for i in range(num_candidates)]
A = list(multiset_permutations(a))
if num_voters == 3:
X = [p for p in product([a], A, A)]
elif num_voters == 4:
X = [tuple(p) for p in product([a], A, A, A)]
elif num_voters == 5:
X = [tuple(p) for p in product([a], A, A, A, A)]
Y = []
for votes in X:
ordered_votes = sorted(votes)
Y.append(ordered_votes)
Z = []
tmp_ctr = 0
for ordered_votes in Y:
if ordered_votes not in Z:
model_id = 'all'
election_id = f'{model_id}_{id_ctr}'
params = {'id_ctr': id_ctr}
ballot = 'ordinal'
new_election = OrdinalElection(experiment.experiment_id, election_id, votes=ordered_votes,
num_voters=num_voters, num_candidates=num_candidates)
for target_election in experiment.elections.values():
if target_election.election_id != new_election.election_id:
obj_value, _ = compute_swap_bf_distance(target_election, new_election)
if obj_value == 0:
print('dist == 0')
break
else:
print(id_ctr, tmp_ctr)
store_ordinal_election(experiment, model_id, election_id, num_candidates, num_voters, params,
ballot, votes=ordered_votes)
id_ctr += 1
experiment.elections[election_id] = new_election
Z.append(ordered_votes)
tmp_ctr += 1
print(len(X), len(Y), len(Z))
# Compute distances between current election and all previous elections
# for i in range(id_ctr):
# # experiment.elections[election_id]
#
#
# if a dist=0 break
# for
# else:
# store the election
# model_id =''
# election_id = ''
# store_ordinal_election(experiment, model_id, election_id, num_candidates, num_voters,
# params, ballot)
# Store the distances
| [
11748,
10688,
198,
11748,
4738,
355,
43720,
198,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3975,
417,
13,
85,
10720,
13,
27530,
62,
12417,
1330,
3650,
62,
585,
1292,
62,
14300,
198,
67... | 2.148594 | 1,245 |
from django.conf.urls import url, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'budget-detail', views.BudgetViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
764,
1330,
5009,
628,
198,
472,
353,
796,
41144,
13,
19463,
49,
39605,
3419,
198,
472,
353,
13,
30238,
7,
8... | 2.939759 | 83 |
# Copyright 2020 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this module will be included in the api
"""
##########################
def chansmooth(xds, type='triang', size=3, gain=1.0, window=None):
"""
Apply a smoothing kernel to the channel axis
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
type : str or tuple
type of window function to use: 'boxcar', 'triang', 'hann' etc. Default is 'triang'. Scipy.signal is used to generate the
window weights, refer to https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows for a
complete list of supported windows. If your window choice requires additional parameters, use a tuple e.g. ('exponential', None, 0.6)
size : int
width of window (# of channels). Default is 3
gain : float
gain factor after convolution. Used to set weights. Default is unity gain (1.0)
window : list of floats
user defined window weights to apply (all other options ignored if this is supplied). Default is None
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset with updated data
"""
import xarray
import numpy as np
from scipy.signal import get_window
if window is None:
window = gain * get_window(type, size, False) / (np.sum(get_window(type, size, False)))
else:
window = np.atleast_1d(window)
window = xarray.DataArray(window, dims=['window'])
# save names of coordinates, then reset them all to variables
coords = [cc for cc in list(xds.coords) if cc not in xds.dims]
new_xds = xds.reset_coords()
# create rolling window view of dataset along channel dimension
rolling_xds = new_xds.rolling(chan=size, min_periods=1, center=True).construct('window')
for dv in rolling_xds.data_vars:
xda = rolling_xds.data_vars[dv]
# apply chan smoothing to compatible variables
if ('window' in xda.dims) and (new_xds[dv].dtype.type != np.str_) and (new_xds[dv].dtype.type != np.bool_):
new_xds[dv] = xda.dot(window).astype(new_xds[dv].dtype)
# return the appropriate variables to coordinates and stick attributes back in
new_xds = new_xds.set_coords(coords).assign_attrs(xds.attrs)
return new_xds
| [
2,
220,
220,
15069,
12131,
317,
10080,
11,
3457,
13,
2669,
6257,
11,
4916,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
... | 2.701287 | 1,088 |
import os
from setuptools import find_packages, setup
# TODO random links...
# https://docs.pytest.org/en/latest/goodpractices.html?highlight=src#tests-outside-application-code
# https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure
# https://realpython.com/pypi-publish-python-package/
# https://github.com/navdeep-G/samplemod/blob/master/setup.py
# https://github.com/navdeep-G/setup.py/blob/master/setup.py
# https://packaging.python.org/guides/distributing-packages-using-setuptools/
# https://github.com/tobgu/pyrsistent
# https://github.com/tobgu/pyrsistent/blob/master/requirements.txt
# https://setuptools.readthedocs.io/en/latest/setuptools.html
# TODO also look at pytest for package layout, they have a nice almost-everything-private code layout
# TODO set up codecov
VERSION = "0.1.0"
PYTHON_REQUIRES = "~=3.6"
setup(
name="ccs-py",
use_scm_version={"write_to": "src/ccs/_version.py"},
description="CCS language for config files",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="Matt Hellige",
author_email="matt@immute.net",
url="https://github.com/hellige/ccs-py",
python_requires=PYTHON_REQUIRES,
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="ccs config configuration",
install_requires=["pyrsistent"],
packages=find_packages("src"),
package_dir={"": "src"},
setup_requires=["setuptools-scm",],
entry_points={"console_scripts": ["ccs = ccs.cli:main",]},
)
| [
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
2,
16926,
46,
4738,
6117,
986,
198,
2,
3740,
1378,
31628,
13,
9078,
9288,
13,
2398,
14,
268,
14,
42861,
14,
11274,
29152,
1063,
13,
6494,
30... | 2.654062 | 714 |
# Python3
from solution1 import floatRange as f
qa = [
(-0.9, 0.45, 0.2,
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]),
(1.5, 1.5, 10,
[]),
(1, 2, 1.5,
[1]),
(-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34, -12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35, -2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75, 8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85, 19.96, 21.07]),
(0, 1, 0.05,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = [ round(i, 5) for i in f(*q) ]
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| [
2,
11361,
18,
198,
198,
6738,
4610,
16,
1330,
12178,
17257,
355,
277,
198,
198,
20402,
796,
685,
198,
220,
220,
220,
13841,
15,
13,
24,
11,
657,
13,
2231,
11,
657,
13,
17,
11,
198,
220,
220,
220,
220,
25915,
15,
13,
24,
11,
53... | 1.612903 | 589 |
import asyncio
from typing import List
import discord
import discord.ext
from osu.game import Game
from settings import tourney_name, rulebook_url, footer_icon, footer_note, \
veto_timeout, newline
from utils.checks import beatmapCheck, playerCheck
class Match:
"""
Represents an osu! match
"""
@property
@property
@property
def swap_players(self):
"""
Swap player1 and player2
"""
# swap player objects
p1 = self.player2
p1w = self.player2_wins
p2 = self.player1
p2w = self.player1_wins
# set new players
self.player1 = p1
self.player1_wins = p1w
self.player2 = p2
self.player2_wins = p2w
| [
11748,
30351,
952,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
36446,
198,
11748,
36446,
13,
2302,
198,
198,
6738,
267,
2385,
13,
6057,
1330,
3776,
198,
6738,
6460,
1330,
4205,
1681,
62,
3672,
11,
3896,
2070,
62,
6371,
11,
2366,
26... | 2.282609 | 322 |
from typing import Dict, Optional
import base64
import hashlib
import hmac
import json
import re
import arrow
from pydantic import conint
RequestContent = Dict
def verify_signed_request(
signed_request,
app_secret,
acceptable_freshness_sec: Optional[conint(ge=0)] = None,
) -> Optional[RequestContent]:
"""
Verify Signed Request from Context object retrieves from webview, frontend
https://developers.facebook.com/docs/messenger-platform/webview/context
fork from https://gist.github.com/adrienjoly/1373945/0434b4207a268bdd9cbd7d45ac22ec33dfaad199
"""
encoded_signature, payload = signed_request.split(".")
signature = base64_url_decode(encoded_signature)
request_content = json.loads(base64_url_decode(payload))
issued_at = arrow.get(request_content["issued_at"])
if request_content.get("algorithm").upper() != "HMAC-SHA256":
raise NotImplementedError("Unknown algorithm")
elif (
acceptable_freshness_sec
and issued_at.shift(seconds=acceptable_freshness_sec) < arrow.utcnow()
):
raise Exception(
f"This signed request was too old. It was issue at {issued_at.format()}"
)
else:
calculated_signature = hmac.new(
str.encode(app_secret), str.encode(payload), hashlib.sha256
).digest()
if signature != calculated_signature:
return None
else:
return request_content
pattern = r"(.+)\.(.+)"
signed_request_regex = re.compile(pattern)
def verify_webhook_body(signature, app_secret, body):
"""
https://developers.facebook.com/docs/messenger-platform/webhook#security
"""
# signature = request.headers["X-Hub-Signature"]
assert len(signature) == 45
assert signature.startswith("sha1=")
signature = signature[5:]
# body = await request.body()
expected_signature = hmac.new(
str.encode(app_secret), body, hashlib.sha1
).hexdigest()
if expected_signature != signature:
return False
return True
| [
6738,
19720,
1330,
360,
713,
11,
32233,
198,
198,
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
11748,
33918,
198,
11748,
302,
198,
198,
11748,
15452,
198,
6738,
279,
5173,
5109,
1330,
369,
600,
628,
198,
198,... | 2.64026 | 770 |
"""
Undo/redo framework for anim.edit
"""
from x7.geom.typing import *
from x7.geom.model import ControlPoint
__all__ = ['Command', 'CommandDummy', 'CommandStack', 'CommandEditCP']
class Command(object):
"""ABC for Command pattern"""
def do(self):
"""Apply the change and call .update() or .erase() on impacted objects"""
raise NotImplementedError
def undo(self):
"""Apply the change and call .update() or .erase() on impacted objects"""
raise NotImplementedError
class CommandDummy(Command):
"""Placeholder command that does nothing"""
| [
37811,
198,
31319,
78,
14,
48454,
9355,
329,
2355,
13,
19312,
198,
37811,
198,
198,
6738,
2124,
22,
13,
469,
296,
13,
774,
13886,
1330,
1635,
198,
6738,
2124,
22,
13,
469,
296,
13,
19849,
1330,
6779,
12727,
628,
198,
834,
439,
834,
... | 2.994975 | 199 |
#
# COPYRIGHT (C) 2012-2013 TCS Ltd
#
"""
.. module:: fpvariants
:platform: Unix, Windows, MacOSX
:synopsis: Module to list out variants downstream to frameshift/stopgain
mutation also present in same chromatid.
.. moduleauthor:: Kunal Kundu (kunal@atc.tcs.com); modified by changjin.hong@gmail.com
Module to list out variants downstream to frameshift/stopgain mutation also
present in same chromatid.
INPUT -
Input to this module -
i. VCF file
ii. Child SampleID in VCF
iii. Father SampleID in VCF
iv. Mother SampleID in VCF
v. Threshold GQ (Genotype Quality)
This module also works if the parent information is not known.
OUTPUT -
The output is in tsv format and is printed to console.
"""
from gcn.lib.io import vcf
import sys
import argparse
from gcn.lib.databases.refgene import Refgene
from gcn.lib.utils.phase import phase
from gcn.lib.varann.vartype.varant import varant_parser as vp
def check_genotype(rec, pedigree, GQ_THRES):
"""Checks for presence of genotype and its quality
for the Child SampleID, Father SampleID and Mother
SampleID.
Args:
- rec(dictionary): Parsed vcf record as generated by VCF parser.
- pedigree(list): [Father SampleID, Mother SampleID,
Child SampleID]. Expects the order in which
the SampleIDs are mentioned above.
- GQ_THRES(int): Threshold Genotype Quality
Returns:
- genotypes(tuple): Genotypes of the pedigree.
For e.g. genotypes=('0/1', '0/0', '0/1')
Genotypes are in order
- Father, Mother, Child in the tuple.
"""
genotypes = []
c = pedigree[2] # Child
if rec[c]['GT'] != './.' and rec[c]['GQ'] >= GQ_THRES:
if rec[c]['GT'] != '0/0':
genotypes.append(rec[c]['GT'])
if pedigree[0]: # Father
p1 = pedigree[0]
if rec[p1]['GT'] != './.' and rec[p1]['GQ'] >= GQ_THRES:
genotypes.insert(0, rec[p1]['GT'])
else:
genotypes.insert(0, './.')
else:
genotypes.insert(0, './.')
if pedigree[1]: # Mother
p2 = pedigree[1]
if rec[p2]['GT'] != './.' and rec[p2]['GQ'] >= GQ_THRES:
genotypes.insert(1, rec[p2]['GT'])
else:
genotypes.insert(1, './.')
else:
genotypes.insert(1, './.')
else:
return genotypes
else:
return genotypes
return tuple(genotypes)
def get_gene_data(vcffile, pedigree, GQ_THRES):
"""Retrieves gene_transcript wise variants where there exits at least one
frameshift/stopgain mutation.
Args:
- vcffile(str): Input VCF file.
Note - VCF should be VARANT annotated.
- pedigree(list): [Father SampleID, Mother SampleID,
Child SampleID]. Expects the order in which
the SampleIDs are mentioned above.
- GQ_THRES(int): Threshold Genotype Quality
Returns:
- gene_data_phased(dictionary): Genotype Phased gene_transcript
wise variants where there is
at least one Frameshift/
Stopgain mutation.
- gene_data_unphased(dictionary): Genotype Unphased gene_transcript
wise variants where there is
at least one Frameshift/Stopgain
mutation in homozygous state.
"""
data1 = {}
data2 = {}
FILTER = ['PASS', 'VQSRTrancheSNP99.00to99.90']
v = vcf.VCFParser(vcffile)
for rec in v:
v.parseinfo(rec)
v.parsegenotypes(rec)
varfltr = rec['filter']
if len([True for flt in FILTER if flt in varfltr]) > 0:
genotypes = check_genotype(rec, pedigree, GQ_THRES)
if genotypes:
pg = phase(*genotypes)
if pg[1] == '|':
c1, c2 = int(pg[0]), int(pg[-1])
va = vp.parse(rec.info)
for idx, altid in enumerate([c1, c2]):
if altid != 0:
if altid in va:
gene = va[altid].keys()[0]
if len(va[altid][gene]) > 0:
for ta in va[altid][gene]['TRANSCRIPTS']:
if ta.region == 'CodingExonic':
trans_id = ta.trans_id
key = (rec.chrom, rec.pos, \
','.join(rec.id), rec.ref, \
rec.alt[altid - 1], altid)
gi = (gene, trans_id)
if gi not in data1:
data1[gi] = [{}, {}]
data1[gi][idx][key] = \
[ta.mutation,
pg,
genotypes[0],
genotypes[1]]
else:
data1[gi][idx][key] = \
[ta.mutation,
pg,
genotypes[0],
genotypes[1]]
else:
c1, c2 = int(pg[0]), int(pg[-1])
va = vp.parse(rec.info)
for altid in [c1, c2]:
if altid != 0:
if altid in va:
gene = va[altid].keys()[0]
if len(va[altid][gene]) > 0:
for ta in va[altid][gene]['TRANSCRIPTS']:
if ta.region == 'CodingExonic':
trans_id = ta.trans_id
key = (rec.chrom, rec.pos, \
','.join(rec.id), rec.ref, \
rec.alt[altid - 1], altid)
gi = (gene, trans_id)
if gi not in data2:
data2[gi] = [{}]
data2[gi][0][key] = \
[ta.mutation,
pg,
genotypes[0],
genotypes[1]]
else:
data2[gi][0][key] = \
[ta.mutation,
pg,
genotypes[0],
genotypes[1]]
gene_data_phased = {}
for k, v in data1.items():
for e in v:
if len(e) > 0:
if len(e.values()) > 1:
if len([True for mut in [x[0] for x in e.values()] \
if mut.startswith('FrameShift') \
or mut == 'StopGain']) > 0:
if k not in gene_data_phased:
gene_data_phased[k] = [e]
else:
gene_data_phased[k].append(e)
del data1
gene_data_unphased = {}
for k, v in data2.items():
for e in v:
if len(e) > 0:
if len(e.values()) > 1:
if len([True for y in [(x[0], x[1]) for x in e.values()] \
if (y[0].startswith('FrameShift') or \
y[0] == 'StopGain') and \
int(y[1][0]) == int(y[1][2])]) > 0:
if k not in gene_data_unphased:
gene_data_unphased[k] = [e]
else:
gene_data_unphased[k].append(e)
del data2
return gene_data_phased, gene_data_unphased
def filter_dwnmut(gene_data):
"""Removes the variants upstream to Frameshift/StopGain mutation.
Args:
- gene_data(dictionary): gene_transcript wise variants where
there is at least one Frameshift/Stopgain
mutation.
Returns:
- flt_data(dictionary): gene_transcript wise variants where there
is at least one Frameshift/StopGain mutation
and at least one downstream coding exonic
variant.
"""
rfgene = Refgene()
flt_gene_data = {}
for gene_info, val in gene_data.items():
trans_id = gene_info[1]
strand = rfgene.get_strand(trans_id)
if not strand:
continue
for e in val:
t = {}
variants = e.keys()
if strand == '+':
variants.sort()
elif strand == '-':
variants.sort(reverse=True)
size = 0
mut_type = ''
flag = False
for var in variants:
if flag == False and e[var][0] == 'StopGain':
mut_type = 'StopGain'
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == False and e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[tuple(list(var) + ['#'])] = e[var]
flag = True
elif flag == True:
if mut_type == 'StopGain':
t[var] = e[var]
elif e[var][0].startswith('FrameShift'):
if e[var][0][10:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][10:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
elif e[var][0].startswith('NonFrameShift'):
if e[var][0][13:] == 'Insert':
size += len(var[4]) - 1
elif e[var][0][13:] == 'Delete':
size -= len(var[3]) - 1
t[var] = e[var]
if size == 0 or divmod(size, 3)[1] == 0:
flag = False
else:
t[var] = e[var]
if len(t) > 1:
key = tuple(list(gene_info) + [strand])
if key not in flt_gene_data:
flt_gene_data[key] = [t]
else:
if t != flt_gene_data[key][0]:
flt_gene_data[key].append(t)
return flt_gene_data
def display(d1, d2, pedigree, vcffile):
"""Prints to console the Coding Exonic variants downstream to
Frameshift/StopGain Mutation."""
print '## VCF file used %s' % vcffile
print '## Pedigree used %s' % ','.join([e for e in pedigree if e])
print '## Details about list of variants downstream to \
FrameShift/StopGain Mutation.'
header = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'ALT_ID', 'GENE',
'TRANSCRIPT', 'STRAND', 'MUTATION', 'TYPE', 'CHROMATID',
'CHILD-%s' % pedigree[-1]]
if pedigree[0]:
header.append('FATHER-%s' % pedigree[0])
if pedigree[1]:
header.append('MOTHER-%s' % pedigree[1])
print '\t'.join(header)
for d in [d1, d2]:
gene_info = d.keys()
gene_info.sort()
for gi in gene_info:
gene, trans_id, strand = gi
val = d[gi]
chrom_pair = 0
for e in val:
chrom_pair += 1
variants = e.keys()
if strand == '+':
variants.sort()
else:
variants.sort(reverse=True)
for variant in variants:
if int(e[variant][1][0]) == int(e[variant][1][-1]):
chromatid = 'BOTH_CHROM'
elif e[variant][1][1] == '|':
if variant[5] == int(e[variant][1][0]):
chromatid = 'FATHER_CHROM'
elif variant[5] == int(e[variant][1][-1]):
chromatid = 'MOTHER_CHROM'
else:
chromatid = 'UNKNOWN_CHROM'
if variant[-1] == '#':
print '\n'
print '\t'.join([str(x) for x in variant[:-1]] + \
[gene, trans_id, strand,
e[variant][0],
e[variant][0].upper(),
chromatid] + e[variant][1:])
else:
print '\t'.join([str(x) for x in variant] + \
[gene, trans_id, strand,
e[variant][0], 'DOWNSTREAM',
chromatid] + e[variant][1:])
def compute(vcffile, GQ_THRES, pedigree):
"""Identifies the coding exonic variants downstream to frameshift/
stopgain mutation and prints the output to console."""
# Get the coding exonic variants transcript wise where for a transcript
# there is atleast one frameshift/stopgain causing variant.
gene_data_phased, gene_data_unphased = get_gene_data(vcffile,
pedigree, GQ_THRES)
# Remove the variants upstream to Frameshift/stopgain causing variant
# for phased data
dwnmut_data_phased = filter_dwnmut(gene_data_phased)
# Remove the variants upstream to Frameshift/stopgain causing variant
# for unphased data
dwnmut_data_unphased = filter_dwnmut(gene_data_unphased)
# Print the output to console
display(dwnmut_data_phased, dwnmut_data_unphased, pedigree, vcffile)
def main():
"""Main script to extract exoding exonic variants downstream to Frameshift/
StopGain mutation and also present in same chromatid."""
desc = 'Script to extract all CodingExonic variants downstream to\
FrameShift mutation and also occuring in same chromatid.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-i', '--input', dest='vcffile', type=str,
help='VCF file')
parser.add_argument('-f', '--father', dest='father', type=str,
help='Sample Name for Father as mentioned in VCF')
parser.add_argument('-m', '--mother', dest='mother', type=str,
help='Sample Name for Mother as mentioned in VCF')
parser.add_argument('-c', '--child', dest='child', type=str,
help='Sample Name for Child as mentioned in VCF')
parser.add_argument('-GQ', '--genotype_quality', dest='gq', type=str,
default=30, help='Genotype Quality of the Samples')
args = parser.parse_args()
pedigree = [args.father, args.mother, args.child]
compute(args.vcffile, float(args.gq), pedigree)
sys.exit(0)
if __name__ == '__main__':
main()
| [
2,
198,
2,
27975,
38162,
9947,
357,
34,
8,
2321,
12,
6390,
309,
7902,
12052,
198,
2,
198,
37811,
198,
492,
8265,
3712,
277,
79,
25641,
1187,
198,
220,
220,
220,
1058,
24254,
25,
33501,
11,
3964,
11,
4100,
2640,
55,
198,
220,
220,
... | 1.644023 | 10,113 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
## Loading & preparing data
X, y = datasets.load_boston(return_X_y=True)
X_scaler = StandardScaler()
X = X_scaler.fit_transform(X)
y_scaler = StandardScaler()
y = y_scaler.fit_transform(pd.DataFrame(y)).squeeze()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.33, random_state=42)
## Making & training the model
LAYERS = st.sidebar.slider('Number of layers', min_value = 0, max_value = 5, value = 1, step = 1)
UNITS = st.sidebar.slider('Number of units per layer', min_value = 10, max_value = 100, value = 50, step = 5)
DROPOUT = st.sidebar.slider('Dropout rate', min_value = 0.0, max_value = 0.5, value = 0.2, step = 0.05)
ACTIVATION = st.sidebar.selectbox('Activation function', ('relu', 'tanh', 'sigmoid'))
OPTIMIZER = st.sidebar.selectbox('Optimizer', ('adam', 'sgd'))
LOSS = st.sidebar.selectbox('Loss function', ('mse', 'mae'))
BATCH_SIZE = st.sidebar.slider('Batch size', min_value = 1, max_value = 48, value = 12, step = 1)
model = make_model(layers = LAYERS, units = UNITS, dropout = DROPOUT, activation = ACTIVATION, optimizer = OPTIMIZER, loss = LOSS)
summary = model.summary()
history = model.fit(X, y, batch_size = BATCH_SIZE, epochs = 1000,
callbacks = [EarlyStopping(patience=10),
ModelCheckpoint(filepath='model.h5',
monitor='val_loss',
mode='min',
save_best_only=True)],
validation_data = (X_val, y_val))
train_history = pd.DataFrame(history.history)
model = load_model('model.h5')
## Making predictions
predictions = model.predict(X_test).squeeze()
validation = pd.DataFrame({'measured': y_scaler.inverse_transform(y_test), 'predicted': y_scaler.inverse_transform(predictions)})
## Plotting predictions
validation['measured-predicted'] = validation['measured'] - validation['predicted']
mmp_stats = validation['measured-predicted'].describe()
st.title('Dense neural network explorer')
st.write("""
## Using the Boston Housing dataset available through scikit-learn
This app allows you to explore the effect of 6 different hyper parameter settings on a Dense neural network's accuracy when predicting the price of a house using 13 datapoints about different aspects of the property.
The Dense neural network is created using Tensorflow & the Keras API, every time a hyper-parameter value is changed a new network is trained until its performance worsens (overfitting), using the callback API available through Keras
the best version of the new network is used for predictions.
## All analysis is done using a third set of data the network has never seen or been evaluated against.
### The network's training progress
The X axis shows the epoch number (a complete training cycle against all available training data) and the y axis shows the loss as defined by the "Loss Function" parameter in the sidebar.
The "loss" is the score of the network against data is has seen before, and the val_loss is the networks accuracy against data it hasn't seen before.
""")
st.line_chart(train_history)
st.write("""
### Lineplot of the measured and predicted housing prices
the X axis shows the sample number and the y axis the price in $
""")
st.line_chart(validation[['measured', 'predicted']])
st.write("""
### Kde plot and histogram of the forecasted and measured price
""")
plot_histogram()
st.write("""
### Kde plot and histogram of the difference between the predicted and the measured price
The vertical lines show the model's bias
""")
plot_error()
st.write("""
### Scatter plot of measured V predicted values
For each dot the corresponding value on the X axis is the measured price and the corresponding value on the y axis is the predicted price. A perfect forecast is represented by a straight diagonal line from bottom left to top right.
""")
scatter()
| [
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
384,
397,
1211,
355,
3013,
82,
201,
198,
201,
198,
11748,
4269,
18250,
355,
... | 2.836243 | 1,661 |
import logging
from typing import (
Iterator,
List,
Optional,
Tuple,
)
from quakestats.core.game.qlmatch import (
FullMatchInfo,
)
from quakestats.core.q3parser.api import (
Q3ParserAPI,
)
from quakestats.core.q3toql.api import (
Q3toQLAPI,
QuakeGame,
)
from quakestats.core.ql import (
QLGame,
)
from quakestats.core.qlparser.api import (
QLFeed,
QLParserAPI,
)
from quakestats.core.wh import (
Warehouse,
WarehouseItem,
)
from quakestats.dataprovider import (
analyze,
)
from quakestats.dataprovider.analyze import (
AnalysisResult,
)
from quakestats.datasource.entities import (
Q3Match,
)
from quakestats.system.context import (
SystemContext,
)
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
40806,
1352,
11,
198,
220,
220,
220,
7343,
11,
198,
220,
220,
220,
32233,
11,
198,
220,
220,
220,
309,
29291,
11,
198,
8,
198,
198,
6738,
627,
461,
395,
1381,
13,
7295... | 2.406349 | 315 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Reinhard Stampp
# This file is part of fortrace - http://fortrace.fbi.h-da.de
# See the file 'docs/LICENSE' for copying permission.
"""This python script destroy the networks local and internet using libvirt.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
try:
from fortrace.common.network import setup_networks
from fortrace.common.network import stop_and_delete_networks
except ImportError as e:
print(("Import error in main.py! " +str(e)))
if __name__ == "__main__":
try:
main()
except:
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
2211,
12,
4967,
22299,
10424,
30097,
381,
198,
2,
770,
2393,
318,
636,
286,
329,
40546,
532,
... | 2.849785 | 233 |
import itertools
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, Optional
import common.input_data as input_data
@dataclass
PASSPORTS: list[str] = input_data.read("input/input4.txt")
if __name__ == "__main__":
print(f"Number of valid passports: "
f"{get_number_of_valid_passports(PASSPORTS)}")
print(f"Number of data-valid passports: "
f"{get_number_of_valid_data_passports(PASSPORTS)}")
| [
11748,
340,
861,
10141,
198,
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
40806,
540,
11,
32233,
198,
198,
11748,
2219,
13,
15414,
62,
7890,
355,
5128,
62,... | 2.707865 | 178 |
from http.server import HTTPServer, SimpleHTTPRequestHandler
import ssl
import os
import argparse
if __name__ == '__main__':
top_parser = argparse.ArgumentParser(description='Simple HTTPS server')
top_parser.add_argument('--port', action="store", dest="port", type=int, help="The port to listen on", default="443")
args = top_parser.parse_args()
os.system("openssl req -nodes -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj '/CN=mylocalhost'")
httpd = HTTPServer(('0.0.0.0', args.port), SimpleHTTPRequestHandler)
sslctx = ssl.SSLContext()
sslctx.check_hostname = False
sslctx.load_cert_chain(certfile='cert.pem', keyfile="key.pem")
httpd.socket = sslctx.wrap_socket(httpd.socket, server_side=True)
print(f"Server running on https://0.0.0.0:{args.port}")
httpd.serve_forever()
| [
6738,
2638,
13,
15388,
1330,
38288,
18497,
11,
17427,
40717,
18453,
25060,
198,
11748,
264,
6649,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
1353,
62,
48610,
... | 2.812287 | 293 |
#!/usr/bin/python
import sys, os, re;
dTs = 0
oldTs=-1
for l in sys.stdin:
l2=l.rstrip()
t = l2.split(' ')
if len(t) < 2:
continue
ts = t[1].split(':')
tsInMs = (int(ts[0])*3600 + int(ts[1])*60 + float(ts[2]))*1000
#print '%f -- %s ffffffffffffff\n' % ( tsInMs, t[1])
if (oldTs > 0):
dTs = tsInMs - oldTs
#t[0]=tsInMs
oline = "%f %f %s" % (dTs, tsInMs, " ".join(t))
print oline
oldTs = tsInMs
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
201,
198,
11748,
25064,
11,
28686,
11,
302,
26,
201,
198,
201,
198,
67,
33758,
796,
657,
201,
198,
727,
33758,
10779,
16,
201,
198,
201,
198,
1640,
300,
287,
25064,
13,
19282,
259,
2... | 1.772 | 250 |
"""
Programmer: Trinav Bhattacharyya
Date of Development: 18/10/2020
This code has been developed according to the procedures mentioned in the following research article:
X.-S. Yang, S. Deb, “Cuckoo search via Levy flights”, in: Proc. of
World Congress on Nature & Biologically Inspired Computing (NaBIC 2009),
December 2009, India. IEEE Publications, USA, pp. 210-214 (2009).
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from Py_FS.datasets import get_dataset
from Py_FS.wrapper.nature_inspired.algorithm import Algorithm
from Py_FS.wrapper.nature_inspired._utilities_test import compute_accuracy, compute_fitness, initialize, sort_agents
from Py_FS.wrapper.nature_inspired._transfer_functions import get_trans_function
# Cuckoo Search Algorithm
############################### Parameters ####################################
# #
# num_agents: number of agents #
# max_iter: maximum number of generations #
# train_data: training samples of data #
# train_label: class labels for the training samples #
# obj_function: the function to maximize while doing feature selection #
# trans_function_shape: shape of the transfer function used #
# save_conv_graph: boolean value for saving convergence graph #
# #
###############################################################################
if __name__ == '__main__':
data = datasets.load_digits()
algo = CS(num_agents=20, max_iter=30, train_data=data.data, train_label=data.target, save_conv_graph=True)
algo.run() | [
37811,
198,
15167,
647,
25,
833,
26802,
347,
11653,
620,
560,
3972,
198,
10430,
286,
7712,
25,
1248,
14,
940,
14,
42334,
198,
1212,
2438,
468,
587,
4166,
1864,
284,
262,
9021,
4750,
287,
262,
1708,
2267,
2708,
25,
198,
55,
7874,
50,... | 2.383109 | 817 |
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
HALF2FULLWIDTH = dict((i, i + 0xFEE0) for i in range(0x21, 0x7F))
FULL2HALFWIDTH = dict((i + 0xFEE0, i) for i in range(0x21, 0x7F))
if __name__ == "__main__":
text = 'This is a test\nABCDEFGHIJKLMNOPQRSTUVW'
fontSpec = fetchFontSpec('/Library/Fonts/Courier New.ttf')
print(getTextDimensions(text, 12, fontSpec, [0,0]))
fg = FontGeom('/Library/Fonts/Courier New.ttf', 12)
print(fg.getTextDimensions(text))
| [
6738,
10369,
33637,
13,
926,
25835,
1330,
309,
10234,
756,
198,
6738,
10369,
33637,
13,
926,
25835,
13,
83,
2977,
13557,
66,
62,
76,
62,
64,
62,
79,
1330,
327,
8899,
7004,
11487,
198,
198,
39,
1847,
37,
17,
37,
9994,
54,
2389,
422... | 2.169421 | 242 |
ELECTION_YEAR_LIST = [
1989,
1994,
2000,
2001,
2004,
2010,
2015,
2020,
]
| [
36,
16779,
2849,
62,
56,
17133,
62,
45849,
796,
685,
198,
220,
220,
220,
11104,
11,
198,
220,
220,
220,
9162,
11,
198,
220,
220,
220,
4751,
11,
198,
220,
220,
220,
5878,
11,
198,
220,
220,
220,
5472,
11,
198,
220,
220,
220,
3050... | 1.721311 | 61 |
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix,plot_confusion_matrix, ConfusionMatrixDisplay
from catboost import CatBoostClassifier
import matplotlib.pyplot as plt
from explore import all_crop_codes
import math
import numpy as np
from pathlib import Path
import json
# TODO: invalid reference (https://github.com/openEOPlatform/openeo-classification/issues/2)
df = pd.read_csv("resources/training_data/final_features.csv",index_col=0)
print(df.head())
## Other class mag iig niet op Other cereals gaan trainen
### FF open laten : seizonaal uit elkaar trekken of klassen uit elkaar trekken (wheat VS rye VS barley etc. of spring vs. winter)
## Remove NaN values
df = df[df["B06_p50"].astype(int) != 65535]
# band_names = ["B06", "B12"] + ["NDVI", "NDMI", "NDGI", "ANIR", "NDRE1", "NDRE2", "NDRE5"] + ["ratio", "VV", "VH"]
# tstep_labels = ["t" + str(4 * index) for index in range(0, 6)]
# all_bands = [band + "_" + stat for band in band_names for stat in ["p10", "p50", "p90", "sd"] + tstep_labels]
band_names_s2 = ["B06", "B12"] + ["NDVI", "NDMI", "NDGI", "ANIR", "NDRE1", "NDRE2", "NDRE5"]
band_names_s1 = ["ratio", "VV", "VH"]
tstep_labels_s2 = ["t4","t7","t10","t13","t16","t19"]
tstep_labels_s1 = ["t2","t5","t8","t11","t14","t17"]
features_s2 = [band + "_" + stat for band in band_names_s2 for stat in ["p25", "p50", "p75", "sd"] + tstep_labels_s2]
features_s1 = [band + "_" + stat for band in band_names_s1 for stat in ["p25", "p50", "p75", "sd"] + tstep_labels_s1]
all_bands = features_s2 + features_s1
df[all_bands] = df[all_bands].astype(int)
df[["groupID","zoneID"]] = df[["groupID","zoneID"]].astype(str)
# num // 10 ** (int(math.log(num, 10)) - 4 + 1)
### TODO: Dit groeperen op iedere class die ik wil predicten + de other class
df["y"] = df["id"].apply(lambda num: all_crop_codes[num])
# ### TEST CASE 1: TRAIN CEREALS SEPARATELY, WITHOUT TRAINING ON GRASS SPECIFICALLY
# def crop_codes_y1(num):
# crop_list = [1110, 1510, 1910, # "Winter wheat", "Winter barley", "Winter cereal", # Winter cereals
# 1120, 1520, 1920, #"Spring wheat", "Spring barley", "Spring cereal", # Spring / summer cereals
# 4351, 1200, 5100, 8100, #"Winter rapeseed", "Maize", "Potatoes", "Sugar beet",
# # "Grasses and other fodder crops", "Temporary grass crops", "Permanent grass crops" # Grasses : 9100, 9110, 9120
# ]
# if num in crop_list:
# return all_crop_codes[num]
# else:
# return "Other"
# df["y1"] = df["ids"].apply(crop_codes_y1)
# ### TEST CASE 2: TRAIN CEREALS SEPARATELY, WITH TRAINING ON GRASS SPECIFICALLY
# def crop_codes_y2(num):
# crop_list = [1110, 1510, 1910, # "Winter wheat", "Winter barley", "Winter cereal", # Winter cereals
# 1120, 1520, 1920, #"Spring wheat", "Spring barley", "Spring cereal", # Spring / summer cereals
# 4351, 1200, 5100, 8100, #"Winter rapeseed", "Maize", "Potatoes", "Sugar beet",
# 9100, 9110, 9120, # "Grasses and other fodder crops", "Temporary grass crops", "Permanent grass crops" # Grasses
# ]
# if num in crop_list:
# return all_crop_codes[num]
# else:
# return "Other"
# df["y2"] = df["ids"].apply(crop_codes_y2)
# ### TEST CASE 3: TRAIN CEREALS JOINTLY, WITHOUT TRAINING ON GRASS SPECIFICALLY
# def crop_codes_y3(num):
# crop_list = [
# 4351, 1200, 5100, 8100, #"Winter rapeseed", "Maize", "Potatoes", "Sugar beet",
# # 9100, 9110, 9120, # "Grasses and other fodder crops", "Temporary grass crops", "Permanent grass crops" # Grasses
# ]
# if num in crop_list:
# return all_crop_codes[num]
# elif num in [1110, 1510, 1910]: # "Winter wheat", "Winter barley", "Winter cereal", # Winter cereals
# return "Winter cereals"
# elif num in [1120, 1520, 1920]: #"Spring wheat", "Spring barley", "Spring cereal", # Spring / summer cereals
# return "Spring cereals"
# else:
# return "Other"
# df["y3"] = df["ids"].apply(crop_codes_y3)
### TEST CASES
## VERSCHILLENDE GROEPERINGEN VAN AEZ STRATIFICATIE
## EENTJE ZONDER STRATIFICATIE, KIJK OOK FEATURE IMPORTANCE
X1 = df[all_bands]
## MET STRATIFICATIE ERBIJ ALS FEATURE
# X2 = df[all_bands+["groupID"]+["zoneID"]]
## MET STRATIFICATIE ALS IN LOSSE MODELLEN
# print([col for col in df.columns if col not in all_bands])
## GRAS ERBIJ TRAINEN OF LOS
## CEREALS LATER GROEPEREN: MAG JE DE PROBABILITIES OPTELLEN? E.G. WINTER WHEAT HEEFT 0.1 EN WINTER BARLEY 0.2 NOU DAN IS TOTAAL 0.3 EN DIE IS T
out_path = Path.cwd() / "resources" / "model1"
out_path.mkdir(parents=True, exist_ok=True)
## Model training
X = df[all_bands]
y = df["y"]
param_grid = {'learning_rate': [0.07],#[0.03, 0.1],
'depth': [6],#[4, 6, 10]
'l2_leaf_reg': [10],#[1, 3, 5,],
'iterations': [10]}#, 100, 150]}
train_classifier(X,y,param_grid,out_path)
### TEST CASES
## EENTJE ZONDER STRATIFICATIE, KIJK OOK FEATURE IMPORTANCE
## MET STRATIFICATIE ERBIJ ALS FEATURE
## MET STRATIFICATIE ALS IN LOSSE MODELLEN
## VERSCHILLENDE GROEPERINGEN VAN DE STRATIFICATIE
## GRAS ERBIJ TRAINEN OF LOS
## CEREALS LATER GROEPEREN: MAG JE DE PROBABILITIES OPTELLEN? E.G. WINTER WHEAT HEEFT 0.1 EN WINTER BARLEY 0.2 NOU DAN IS TOTAAL 0.3 EN DIE IS T
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
11,
24846,
18243,
33538,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
11,
15440,
62,
8344,
439,
62... | 2.325231 | 2,386 |
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import select_data as sd
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by Heeryon Cho & Sang Min Yoon
This code outputs the UPPER body sensors data HAR performance using
other baseline machine learning techniques, such as
logistic regression and random forest,
given in the bar graph of Figure 15 (blue bars indicating Upper Body Sensors).
(Sensors 2018, 18(4), 1055, page 17 of 24)
'''
print "========================================================="
print " Outputs performance of other ML techniques, namely,"
print " Logistic Regression & Random Forest"
print " Using UPPER body sensors data."
print "========================================================="
print "\n==========================="
print " [UPPER] 4-Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "end2end")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "\n------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] Abstract Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "abst")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test ACC: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] UP Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "up")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] DOWN Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "down")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
print "\n--- End Output ---"
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/baseline_lrrf_upper.py
=========================================================
Outputs performance of other ML techniques, namely,
Logistic Regression & Random Forest
Using UPPER body sensors data.
=========================================================
===========================
[UPPER] 4-Class
===========================
--- Logistic Regression ---
Test Acc: 0.833184789067
[[4860 333 133 0]
[1379 2497 9 0]
[ 316 76 3068 0]
[ 0 0 0 793]]
------ Random Forest ------
Test Acc: 0.80830362448
[[4959 218 149 0]
[1620 2199 66 0]
[ 32 12 3416 0]
[ 9 0 475 309]]
===========================
[UPPER] Abstract Class
===========================
--- Logistic Regression ---
Test ACC: 0.973336304219
[[9131 80]
[ 279 3974]]
------ Random Forest ------
Test Acc: 0.982174688057
[[9176 35]
[ 205 4048]]
===========================
[UPPER] UP Class
===========================
--- Logistic Regression ---
Test Acc: 0.812289653675
[[4875 451]
[1278 2607]]
------ Random Forest ------
Test Acc: 0.809358375855
[[5064 262]
[1494 2391]]
===========================
[UPPER] DOWN Class
===========================
--- Logistic Regression ---
Test Acc: 1.0
[[3460 0]
[ 0 793]]
------ Random Forest ------
Test Acc: 0.981189748413
[[3460 0]
[ 80 713]]
--- End Output ---
Process finished with exit code 0
''' | [
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
10802,
62,
6759,
8609,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
6738,
1341,
35720,
13,
107... | 2.854872 | 1,950 |
# Copyright 2019 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import sys
import time
import urllib
import urllib.error
import urllib.request
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python3 test_web_service_rate_limiting.py <WEB_SERVICE_HOST_URL> "
"(ex: python3 test_web_service_rate_limiting.py http://www.ebi.ac.uk) [nosleep]")
sys.exit(1)
urlString = "{0}/eva/webservices/rest/v1/segments/1:105000001-105500000/variants?species=mmusculus_grcm38&limit=5"\
.format(sys.argv[1])
print("To test parallel requests from multiple IP addresses, "
"please run this script with the nosleep argument within 1 minute from other machines...")
if len(sys.argv) == 2:
time.sleep(60) # Allow some time for the script to be invoked in multiple machines
print("****************************************************")
print("All the service requests below should be successful!")
success_use_case(urlString)
print("*****************************************************")
time.sleep(30)
print("**********************************************************************")
print("Some of the following service requests below should NOT be successful!")
print("**********************************************************************")
failure_use_case(urlString)
| [
2,
15069,
13130,
17228,
9148,
532,
3427,
16024,
259,
18982,
873,
5136,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351... | 3.295608 | 592 |
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import subprocess
import JTutils
if len(sys.argv)>1:
destproc = sys.argv[1]
else: destproc = "999"
if len(sys.argv) >2:
showRES = sys.argv[2]
else: showRES = "y"
data2d = CURDATA()
data1d = data2d[:]
data1d[2] = destproc
fulld2d = JTutils.fullpath(data2d)
fulld1d = JTutils.fullpath(data1d)
RSR("1", procno=destproc, show="n")
JTutils.run_CpyBin_script('stack2D_.py', [fulld2d, fulld1d])
if showRES == 'y':
NEWWIN()
RE(data1d)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
850,
14681,
198,
11748,
449,
51,
26791,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
29,
1... | 2.082305 | 243 |
"""Initialization of version_query package."""
__all__ = ['VersionComponent', 'Version',
'query_folder', 'query_caller', 'query_version_str', 'predict_git_repo',
'predict_caller', 'predict_version_str']
from .version import VersionComponent, Version
from .query import query_folder, query_caller, query_version_str
from .git_query import predict_git_repo
from .query import predict_caller, predict_version_str
| [
37811,
24243,
1634,
286,
2196,
62,
22766,
5301,
526,
15931,
198,
198,
834,
439,
834,
796,
37250,
14815,
21950,
3256,
705,
14815,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
22766,
62,
43551,
3256,
705,
22766,
62,... | 2.993103 | 145 |
"""Private utilities."""
import numpy as np
# from sklearn.utils.validation import NotFittedError
# Copied from scikit-learn 0.19.
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be positive")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be positive")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
# Copied from scikit-learn 0.19.
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
# Adapted from scikit-learn 0.21.
| [
37811,
29067,
20081,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
2,
422,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
1892,
37,
2175,
12331,
628,
198,
2,
6955,
798,
422,
629,
1134,
270,
12,
35720,
657,
13,
1129,
13,
1... | 2.04056 | 1,356 |
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required, permission_required
from django.template.context_processors import csrf
from django.shortcuts import render, render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseNotFound, HttpResponseBadRequest
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.views.generic.edit import CreateView
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
#from django.template.context_processors import csrf
from models import *
import string
import random
import datetime
import time
from forms import *
from pprint import pprint
class LoginRequiredMixin(object):
"""A mixin that forces a login to view the CBTemplate."""
@classmethod
#
# conditions
# 1 - All Codes
# 2 - First Level only
# 3 - Second level only
# 4 - (later: uncertainty yes/no)
#
# map of the conditions
#
# current_page : page # : condition : { page: template_name, next: link for next page }
#
condition_map = {
"instructions" : {
"1" : {
"1": { "page": "instructions/instructions1-1.html", "next": "/instructions/2/" },
"2": { "page": "instructions/instructions1-1.html", "next": "/instructions/2/" },
"3": { "page": "instructions/instructions1-1.html", "next": "/instructions/2/" },
},
"2" : {
"1": { "page": "instructions/instructions2-1.html", "next": "/instructions/3/" },
"2": { "page": "instructions/instructions2-1.html", "next": "/instructions/3/" },
"3": { "page": "instructions/instructions2-1.html", "next": "/instructions/3/" },
},
"3" : {
"1": { "page": "instructions/instructions3-1.html", "next": "/instructions/4/" },
"2": { "page": "instructions/instructions3-2.html", "next": "/instructions/4/" },
"3": { "page": "instructions/instructions3-3.html", "next": "/instructions/4/" },
},
"4" : {
"1": { "page": "instructions/instructions4-1.html", "next": "/instructioncheck/" },
"2": { "page": "instructions/instructions4-2.html", "next": "/instructioncheck/" },
"3": { "page": "instructions/instructions4-3.html", "next": "/instructioncheck/" },
}
},
"pre_survey": {
"1": { "next": "/instructions/1/" },
"2": { "next": "/instructions/1/" },
"3": { "next": "/instructions/1/" },
},
"post_survey": {
"1": { "next": "/thanks/" },
"2": { "next": "/thanks/" },
"3": { "next": "/thanks/" },
},
"validate": {
"0" : {
"1": { "positive_redirect": "/pause/", "negative_redirect": "/survey/post/" },
"2": { "positive_redirect": "/pause/", "negative_redirect": "/survey/post/" },
"3": { "positive_redirect": "/pause/", "negative_redirect": "/survey/post/" },
},
"1" : {
"1": { "positive_redirect": "/survey/post/", "negative_redirect": "/survey/post/" },
"2": { "positive_redirect": "/survey/post/", "negative_redirect": "/survey/post/" },
"3": { "positive_redirect": "/survey/post/", "negative_redirect": "/survey/post/" },
},
},
"coding" : {
"0": {
"1": { "page": "coding.html", "next": "/validate/0/", "help": "instructions/summary1.html" },
"2": { "page": "coding.html", "next": "/validate/0/", "help": "instructions/summary2.html" },
"3": { "page": "coding.html", "next": "/validate/0/", "help": "instructions/summary3.html" },
},
"1": {
"1": { "page": "coding.html", "next": "/validate/1/", "help": "instructions/summary1.html" },
"2": { "page": "coding.html", "next": "/validate/1/", "help": "instructions/summary2.html" },
"3": { "page": "coding.html", "next": "/validate/1/", "help": "instructions/summary3.html" },
}
},
"bonus_check" : {
"1": { "yes": "/coding/1/", "no": "/survey/post/"},
"2": { "yes": "/coding/1/", "no": "/survey/post/"},
"3": { "yes": "/coding/1/", "no": "/survey/post/"},
},
}
default_password = "password!"
######################
#
# VIEWS
#
######################
# Create your views here.
def validate(request, page):
"""
This attempts to validate some of the tweets
"""
_start = time.time()
c = build_user_cookie(request)
print "validate--- (%s)"%(page)
print request.user.id
print request.user.turkuser.id
print "authenticated", request.user.is_authenticated()
assignment_id = int(c["assignment"])
condition_id = int(c["condition"])
condition = Condition.objects.get(pk=condition_id)
datasets = condition.dataset.all()
correct = set()
all_items = set()
# verify the page is in range
page_num = int(page)
if page_num < 0 or page_num >= datasets.count():
return HttpResponseBadRequest()
dataset = datasets[page_num]
# find the attention checks
attention_checks = Tweet.objects.filter(dataset = dataset, attention_check=True)
ac_ids = [ac.id for ac in attention_checks]
#print "ac_ids: ", repr(ac_ids)
#print "condition id: ", condition_id
answers = Answer.objects.filter(tweet_id__in=ac_ids, condition=condition)
answer_dict = make_instance_struct(answers)
#print "answer_dict: ", repr(answer_dict)
# grab instances
instances = CodeInstance.objects.filter(tweet_id__in=ac_ids, assignment=assignment_id, deleted=False)
instance_dict = make_instance_struct(instances)
#print "instance_dict: ", repr(instance_dict)
# check each one of the attention checks
for ac in ac_ids:
# add the attention check to our items list
all_items.add(ac)
answer_set = answer_dict.get(ac, set())
instance_set = instance_dict.get(ac,set())
is_correct = (instance_set == answer_set)
if is_correct:
correct.add(ac)
uvi = UserValidatedInstance(
user = request.user,
kind = UserValidatedInstance.ATTENTION_CHECK,
correct = is_correct,
tweet_1_id = ac,
tweet_2_id = None,
tweet_1_codes = "answers: " + repr(answer_dict),
tweet_2_codes = "instances: " + repr(instance_dict)
)
uvi.save()
print "tweet %d: %s"%(ac, str(is_correct))
# find duplicates
duplicate_tweet_ids = Tweet.objects \
.filter(dataset=dataset) \
.values("tweet_id") \
.annotate(num=Count("tweet_id")) \
.order_by() \
.filter(num__gt=1)
#print "dupes: ", duplicate_tweet_ids
duplicate_tweet_ids = [t["tweet_id"] for t in duplicate_tweet_ids]
#print "dupes: ", duplicate_tweet_ids
duplicate_tweets = Tweet.objects.filter(dataset=dataset, tweet_id__in=duplicate_tweet_ids)
duplicate_ids = [t.id for t in duplicate_tweets]
dup_instances = CodeInstance.objects.filter(tweet_id__in=duplicate_ids, assignment=assignment_id, deleted=False)
dup_dict = {}
for dt in duplicate_tweets:
if dt.tweet_id not in dup_dict:
dup_dict[dt.tweet_id] = set()
dup_dict[dt.tweet_id].add(dt.id)
#print "dup dict: ", dup_dict
# validate the duplicates
# this will only do forward comparisons. So each dupe's codes is compared to the last
# it does NOT do full pairwise comparisons
# so the total # of comparisons will be N-1 (number of dupes - 1)
dinst_dict = make_instance_struct(dup_instances)
for tid, dup_set in dup_dict.iteritems():
last_instance = None
last_id = None
for id in dup_set:
cur_instance = dinst_dict.get(id, set())
if last_instance is not None:
# add it to the entire set. do not add the first one as it isn't a check
all_items.add(id)
is_consistent = (cur_instance == last_instance)
if is_consistent:
print "%d is consistent with %d (%s,%s)"%(
id, last_id,
repr(cur_instance), repr(last_instance))
correct.add(id)
else:
print "%d is INCONSISTENT with %d (%s,%s)"%(
id, last_id,
repr(cur_instance), repr(last_instance))
uvi = UserValidatedInstance(
user=request.user,
kind=UserValidatedInstance.DUPLICATE_CHECK,
correct=is_consistent,
tweet_1_id=last_id,
tweet_2_id=id,
tweet_1_codes=repr(last_instance),
tweet_2_codes=repr(cur_instance)
)
uvi.save()
last_instance = cur_instance
last_id = id
print "%d of %d correct"%(len(correct), len(all_items))
#_end = time.time()
#_total_time = _end - _start
#print "total_time: ", _total_time
cnd_map_entry = condition_map["validate"][page][str(condition.id)]
if len(correct) > (len(all_items)//2):
return HttpResponseRedirect(cnd_map_entry["positive_redirect"])
else:
return HttpResponseRedirect(cnd_map_entry["negative_redirect"])
#def start(request):
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
... | 2.509997 | 3,351 |
import nibabel as nib
import numpy as np
from util.util import transform_single, warning, error, remove_outliers, normalize_with_opt
| [
11748,
33272,
9608,
355,
33272,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
7736,
13,
22602,
1330,
6121,
62,
29762,
11,
6509,
11,
4049,
11,
4781,
62,
448,
75,
3183,
11,
3487,
1096,
62,
4480,
62,
8738,
628
] | 3.461538 | 39 |
import pytest
from django.conf import settings
from django.urls import reverse, resolve
from django.test import RequestFactory, Client
from django.http import Http404
from rsscraper.feeds.models import Feed, FeedItem
from rsscraper.feeds.views import FeedDetailView, FeedItemDetailView,\
FeedDeleteView
from rsscraper.feeds.tests.factories import FeedFactory
pytestmark = pytest.mark.django_db
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
11,
10568,
198,
6738,
42625,
14208,
13,
9288,
1330,
19390,
22810,
11,
20985,
198,
6738,
42625,
14208,
13,
4023,
1330,
... | 3.214286 | 126 |
# subprocess to return the information of the directory
import subprocess
subprocess.call('dir',shell=True)
| [
2,
850,
14681,
284,
1441,
262,
1321,
286,
262,
8619,
198,
11748,
850,
14681,
198,
7266,
14681,
13,
13345,
10786,
15908,
3256,
29149,
28,
17821,
8,
198
] | 4 | 27 |
from src.utils.program3.node import Node
from src.utils.program3.statements.statement import Statement
| [
6738,
12351,
13,
26791,
13,
23065,
18,
13,
17440,
1330,
19081,
198,
6738,
12351,
13,
26791,
13,
23065,
18,
13,
14269,
3196,
13,
26090,
1330,
21983,
628,
628
] | 3.785714 | 28 |
""" OpenstackDriver for Compute
based on BaseDriver for Compute Resource
"""
import mock
from keystoneauth1.exceptions.base import ClientException
from calplus.tests import base
from calplus.v1.compute.drivers.openstack import OpenstackDriver
fake_config_driver = {
'os_auth_url': 'http://controller:5000/v2_0',
'os_username': 'test',
'os_password': 'veryhard',
'os_project_name': 'demo',
'os_endpoint_url': 'http://controller:9696',
'os_driver_name': 'default',
'os_project_domain_name': 'default',
'os_user_domain_name': 'default',
'tenant_id': 'fake_tenant_id',
'limit': {
"subnet": 10,
"network": 10,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
}
}
class OpenstackDriverTest(base.TestCase):
"""docstring for OpenstackDriverTest"""
| [
37811,
4946,
25558,
32103,
329,
3082,
1133,
198,
220,
220,
220,
1912,
319,
7308,
32103,
329,
3082,
1133,
20857,
198,
37811,
628,
198,
11748,
15290,
198,
198,
6738,
1994,
6440,
18439,
16,
13,
1069,
11755,
13,
8692,
1330,
20985,
16922,
19... | 2.3875 | 400 |
from six.moves import range
NO_MORE_SENTINEL = object()
def take(seq, count):
"""
Take count many elements from a sequence or generator.
Args
----
seq : sequnce or generator
The sequnce to take elements from.
count : int
The number of elments to take.
"""
for _ in range(count):
i = next(seq, NO_MORE_SENTINEL)
if i is NO_MORE_SENTINEL:
return
yield i
def chunks(obj, size):
"""
Splits a list into sized chunks.
Args
----
obj : list
List to split up.
size : int
Size of chunks to split list into.
"""
for i in range(0, len(obj), size):
yield obj[i:i + size]
def one_or_many(f):
"""
Wraps a function so that it will either take a single argument, or a variable
number of args.
"""
return _f
| [
6738,
2237,
13,
76,
5241,
1330,
2837,
198,
198,
15285,
62,
23346,
62,
50,
3525,
1268,
3698,
796,
2134,
3419,
628,
198,
4299,
1011,
7,
41068,
11,
954,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7214,
954,
867,
4847,
422,
... | 2.330623 | 369 |
import warnings
warnings.warn(
'`import vital` is deprecated, use import kwiver.vital instead',
UserWarning
)
from kwiver.vital import * # NOQA
| [
11748,
14601,
198,
40539,
654,
13,
40539,
7,
198,
220,
220,
220,
705,
63,
11748,
9204,
63,
318,
39224,
11,
779,
1330,
479,
86,
1428,
13,
85,
1287,
2427,
3256,
198,
220,
220,
220,
11787,
20361,
198,
8,
198,
198,
6738,
479,
86,
1428... | 2.8 | 55 |
# -*- coding: utf-8 -*-
"""Модуль, описывающий CLI-утилиту пакета gvapi"""
import sys
from os import environ
from pathlib import Path
from hashlib import md5
import pickle
import click
from gvapi import Hero, errors
@click.command()
@click.option('-g', '--god', required=False, default=environ.get('GVAPI_GOD'), help='Имя божества')
@click.option('-t', '--token', required=False, default=environ.get('GVAPI_TOKEN'), help='Токен')
@click.option('--drop-cache', is_flag=True, default=False, help='Сбросить кэш при выполнении')
@click.argument('property_name', required=True)
def cli(god, token, drop_cache, property_name):
"""CLI-интерфейс для доступа к API игры Годвилль.
Аргументы:
PROPERTY_NAME Имя свойства героя
Полный список свойств и примеры использования данного
CLI-интерфейса можно получить в документации."""
if not god:
raise errors.GVAPIException('Не получено имя божества.')
cache_dir = Path(Path.joinpath(Path.home(), '.cache', 'gvapi'))
cache_dir.mkdir(parents=True, exist_ok=True)
if token:
cache_filename = md5('{}:{}'.format(god, token).encode()).hexdigest()
else:
cache_filename = md5(god.encode()).hexdigest()
cache = Path(Path.joinpath(cache_dir, cache_filename))
if cache.is_file() and not drop_cache:
with open(cache, 'rb') as dump:
hero = pickle.loads(dump.read())
else:
if token:
hero = Hero(god, token)
else:
hero = Hero(god)
try:
value = getattr(hero, property_name)
except AttributeError:
click.echo("Получено некорректное свойство {}".format(property_name))
sys.exit(1)
except errors.NeedToken:
click.echo('Для доступа к данному свойству необходим токен')
sys.exit(1)
except errors.InvalidToken:
click.echo("Токен невалиден или был сброшен")
sys.exit(1)
click.echo(value)
with open(cache, 'wb') as dump:
dump.write(pickle.dumps(hero))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
140,
250,
25443,
112,
35072,
30143,
45367,
11,
12466,
122,
140,
123,
18849,
21727,
45035,
38857,
16142,
141,
236,
141,
231,
18849,
140,
117,
43749,
12,
35072,
20375... | 1.80523 | 1,109 |
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1979-01-01/to/1979-12-31",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1979-01-01.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1980-01-01/to/1980-12-30",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1980-01-01.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1980-12-31/to/1981-12-30",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1980-12-31.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1981-12-31/to/1982-12-30",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1981-12-31.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1982-12-31/to/1983-12-30",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1982-12-31.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1983-12-31/to/1984-12-29",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1983-12-31.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1984-12-30/to/1985-12-29",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1984-12-30.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1985-12-30/to/1986-12-29",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1985-12-30.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1986-12-30/to/1987-12-29",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1986-12-30.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1987-12-30/to/1988-12-28",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1987-12-30.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1988-12-29/to/1989-12-28",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1988-12-29.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1989-12-29/to/1990-12-28",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1989-12-29.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1990-12-29/to/1991-12-28",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1990-12-29.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1991-12-29/to/1992-12-27",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1991-12-29.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1992-12-28/to/1993-12-27",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1992-12-28.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1993-12-28/to/1994-12-27",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1993-12-28.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1994-12-28/to/1995-12-27",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1994-12-28.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1995-12-28/to/1996-12-26",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1995-12-28.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1996-12-27/to/1997-12-26",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1996-12-27.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1997-12-27/to/1998-12-26",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1997-12-27.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1998-12-27/to/1999-12-26",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1998-12-27.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1999-12-27/to/2000-12-25",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '1999-12-27.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2000-12-26/to/2001-12-25",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2000-12-26.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2001-12-26/to/2002-12-25",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2001-12-26.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2002-12-26/to/2003-12-25",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2002-12-26.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2003-12-26/to/2004-12-24",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2003-12-26.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2004-12-25/to/2005-12-24",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2004-12-25.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2005-12-25/to/2006-12-24",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2005-12-25.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2006-12-25/to/2007-12-24",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2006-12-25.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2007-12-25/to/2008-12-23",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2007-12-25.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2008-12-24/to/2009-12-23",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2008-12-24.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2009-12-24/to/2010-12-23",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2009-12-24.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2010-12-24/to/2011-12-23",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2010-12-24.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2011-12-24/to/2012-12-22",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2011-12-24.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2012-12-23/to/2013-12-22",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2012-12-23.nc',
})
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "2013-12-23/to/2014-12-22",
"expver": "1",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": "34.128/49.128/134.128/143.128/164.128/165.128/166.128/167.128/168.128/169.128/228.128",
"step": "6",
"stream": "oper",
"time": "00:00:00/12:00:00",
"type": "fc",
"format" : "netcdf",
"target" : '2013-12-23.nc',
})
| [
6738,
9940,
76,
86,
69,
15042,
1330,
13182,
14326,
37,
6601,
10697,
198,
198,
15388,
796,
13182,
14326,
37,
6601,
10697,
3419,
198,
15388,
13,
1186,
30227,
15090,
198,
197,
220,
220,
220,
366,
4871,
1298,
366,
20295,
1600,
198,
197,
2... | 1.815306 | 8,663 |
# 后台
from flask import Blueprint
from flask.views import MethodView
from flask import render_template, session, g
from apps.cms.forms import UserForm, ResetPwdForm, ResetEailForm, ResetEmailSendCode,URL
from flask import request, jsonify
from apps.common.baseResp import *
from ext import db, mail
from flask_mail import Message
from apps.cms.models import *
from config import REMBERME, LOGIN, CURRENT_USER_ID, CURRENT_USER
import string
import random
from apps.common.memcachedUtil import saveCache, getCache
from functools import wraps
from apps.common.models import Banner,Board
from apps.cms.forms import BannerForm,BannerUpdate,addBoaderFrom,updateboardFrom,deleteboardFrom
from qiniu import Auth
bp = Blueprint('cms', __name__, url_prefix="/cms")
def loginDecotor(func):
"""限制登录的装饰器"""
@wraps(func)
return inner
@bp.route("/")
@bp.route('/login/', methods=['post'])
@bp.route('/index/')
@loginDecotor
@bp.route("/logout/")
@loginDecotor
@bp.route("/user_infor/")
@loginDecotor
@checkPermission(Permission.USER_INFO)
@bp.route("/send_email_code/", methods=['post'])
@loginDecotor
@checkPermission(Permission.USER_INFO)
def sendEmailCode():
'''发送邮箱验证码'''
fm = ResetEmailSendCode(formdata=request.form)
if fm.validate():
# 查询邮箱有没有
# user = User.query.filter(User.email == fm.email.data).first()
# if user :
# return jsonify(respParamErr(msg='邮箱已注册'))
# else: # 发送邮件
r = string.ascii_letters + string.digits
r = ''.join(random.sample(r, 6))
saveCache(fm.email.data, r.upper(), 30 * 60)
msg = Message("破茧科技更新邮箱验证码", recipients=[fm.email.data], body="验证码为" + r)
mail.send(msg)
return jsonify(respSuccess(msg='发送成功,请查看邮箱'))
else:
return jsonify(respParamErr(msg=fm.err))
#轮播图管理
@bp.route('/banner/')
@loginDecotor
@checkPermission(Permission.BANNER)
#添加轮播图
@bp.route("/addbanner/",methods=['post'])
@loginDecotor
@checkPermission(Permission.BANNER)
@bp.route("/deletebanner/",methods=['post'])
@loginDecotor
@checkPermission(Permission.BANNER)
@bp.route("/updatebanner/",methods=['post'])
@checkPermission(Permission.BANNER)
# 给客户端返回上传的令牌(token),因为
@bp.route("/qiniu_token/")
# 每次请求的时候都会执行,返回字典可以直接在模板中使用
@bp.context_processor
@bp.route("/board/")
@loginDecotor
@checkPermission(Permission.PLATE)
@bp.route("/addboard/",methods=["post"])
@loginDecotor
@checkPermission(Permission.PLATE)
@bp.route("/updateboard/",methods=["post"])
@loginDecotor
@checkPermission(Permission.PLATE)
@bp.route("/deleteboard/",methods=["post"])
@loginDecotor
@checkPermission(Permission.PLATE)
# 每次请求的时候都会执行,返回字典可以直接在模板中使用
@bp.context_processor
@bp.route("/send_email/",methods=["get"])
bp.add_url_rule('/resetpwd/', endpoint='resetpwd', view_func=ResetPwd.as_view('resetpwd'))
bp.add_url_rule('/resetemail/', endpoint='resetemail', view_func=ResetEmail.as_view('resetemail'))
| [
2,
10263,
238,
236,
20998,
108,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
13,
33571,
1330,
11789,
7680,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
6246,
11,
308,
198,
6738,
6725,
13,
46406,
13,
23914,
1330,
11787,
8479,
11,
3... | 2.093974 | 1,394 |
"""
Solve the unique lowest-cost assignment problem using the
Hungarian algorithm (also known as Munkres algorithm).
"""
# Based on original code by Brain Clapper, adapted to numpy by Gael Varoquaux
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# LICENSE: BSD
import numpy as np
###############################################################################
# Object-oriented form of the algorithm
class _Hungarian(object):
"""Hungarian algorithm
Calculate the Munkres solution to the classical assignment problem.
Warning: this code is not following scikit-learn standards and will be
refactored.
"""
def compute(self, cost_matrix):
"""
Compute the indices for the lowest-cost pairings.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Does not have to be square.
Returns
-------
indices : 2D array of indices
The pairs of (row, col) indices in the original array giving
the original ordering.
"""
cost_matrix = np.atleast_2d(cost_matrix)
# If there are more rows (n) than columns (m), then the algorithm
# will not be able to work correctly. Therefore, we
# transpose the cost function when needed. Just have to
# remember to swap the result columns later in this function.
doTranspose = (cost_matrix.shape[1] < cost_matrix.shape[0])
if doTranspose:
self.C = (cost_matrix.T).copy()
else:
self.C = cost_matrix.copy()
# At this point, m >= n.
self.n = n = self.C.shape[0]
self.m = m = self.C.shape[1]
self.row_uncovered = np.ones(n, dtype=np.bool)
self.col_uncovered = np.ones(m, dtype=np.bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n+m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
done = False
step = 1
steps = {1: self._step1,
3: self._step3,
4: self._step4,
5: self._step5,
6: self._step6}
if m == 0 or n == 0:
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
done = True
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = np.array(np.where(self.marked == 1)).T
# We need to swap the columns because we originally
# did a transpose on the input cost matrix.
if doTranspose:
results = results[:, ::-1]
return results.tolist()
def _step1(self):
""" Steps 1 and 2 in the wikipedia page.
"""
# Step1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
self.C -= self.C.min(axis=1)[:, np.newaxis]
# Step2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(self.C == 0)):
if self.col_uncovered[j] and self.row_uncovered[i]:
self.marked[i, j] = 1
self.col_uncovered[j] = False
self.row_uncovered[i] = False
self._clear_covers()
return 3
def _step3(self):
"""
Cover each column containing a starred zero. If n columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (self.marked == 1)
self.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() >= self.n:
return 7 # done
else:
return 4
def _step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (self.C == 0).astype(np.int)
covered_C = C*self.row_uncovered[:, np.newaxis]
covered_C *= self.col_uncovered.astype(np.int)
n = self.n
m = self.m
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return 6
else:
self.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(self.marked[row] == 1)
if not self.marked[row, star_col] == 1:
# Could not find one
self.Z0_r = row
self.Z0_c = col
return 5
else:
col = star_col
self.row_uncovered[row] = False
self.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
self.row_uncovered.astype(np.int))
covered_C[row] = 0
def _step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count, 0] = self.Z0_r
path[count, 1] = self.Z0_c
done = False
while not done:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(self.marked[:, path[count, 1]] == 1)
if not self.marked[row, path[count, 1]] == 1:
# Could not find one
done = True
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count-1, 1]
if not done:
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(self.marked[path[count, 0]] == 2)
if self.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count-1, 0]
path[count, 1] = col
# Convert paths
for i in range(count+1):
if self.marked[path[i, 0], path[i, 1]] == 1:
self.marked[path[i, 0], path[i, 1]] = 0
else:
self.marked[path[i, 0], path[i, 1]] = 1
self._clear_covers()
# Erase all prime markings
self.marked[self.marked == 2] = 0
return 3
def _step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
# the smallest uncovered value in the matrix
if np.any(self.row_uncovered) and np.any(self.col_uncovered):
minval = np.min(self.C[self.row_uncovered], axis=0)
minval = np.min(minval[self.col_uncovered])
self.C[np.logical_not(self.row_uncovered)] += minval
self.C[:, self.col_uncovered] -= minval
return 4
def _find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = np.argmax(self.marked[row] == 2)
if self.marked[row, col] != 2:
col = -1
return col
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
###############################################################################
# Functional form for easier use
def linear_assignment(X):
"""Solve the linear assignment problem using the Hungarian algorithm
The problem is also known as maximum weight matching in bipartite graphs.
The method is also known as the Munkres or Kuhn-Munkres algorithm.
Parameters
----------
X : array
The cost matrix of the bipartite graph
Returns
-------
indices : array,
The pairs of (row, col) indices in the original array giving
the original ordering.
References
----------
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
"""
H = _Hungarian()
indices = H.compute(X)
indices.sort()
# Re-force dtype to ints in case of empty list
indices = np.array(indices, dtype=int)
# Make sure the array is 2D with 2 columns.
# This is needed when dealing with an empty list
indices.shape = (-1, 2)
return indices
| [
37811,
198,
50,
6442,
262,
3748,
9016,
12,
15805,
16237,
1917,
1262,
262,
198,
39505,
3699,
11862,
357,
14508,
1900,
355,
337,
2954,
411,
11862,
737,
198,
198,
37811,
198,
2,
13403,
319,
2656,
2438,
416,
14842,
44834,
11,
16573,
284,
... | 2.254207 | 4,516 |
import logging
import sys
import time
import torch
from model import MatchModel
from data import TripletTextDataset
from util import seed_all
logging.disable(sys.maxsize)
start_time = time.time()
input_path = "./data/test/test.json"
output_path = "./data/test/output.txt"
if len(sys.argv) == 3:
input_path = sys.argv[1]
output_path = sys.argv[2]
inf = open(input_path, "r", encoding="utf-8")
ouf = open(output_path, "w", encoding="utf-8")
seed_all(42)
MODEL_DIR = "./output/model"
model = MatchModel.load(MODEL_DIR, torch.device("cpu"))
print('Model: ' + MODEL_DIR)
test_set = TripletTextDataset.from_jsons(input_path)
results = model.predict(test_set)
for label, _ in results:
print(str(label), file=ouf)
inf.close()
ouf.close()
end_time = time.time()
spent = end_time - start_time
print("numbers of samples: %d" % len(results))
print("time spent: %.2f seconds" % spent)
| [
11748,
18931,
198,
11748,
25064,
198,
11748,
640,
198,
198,
11748,
28034,
198,
198,
6738,
2746,
1330,
13225,
17633,
198,
6738,
1366,
1330,
19817,
83,
8206,
27354,
292,
316,
198,
6738,
7736,
1330,
9403,
62,
439,
198,
198,
6404,
2667,
13,... | 2.622807 | 342 |
'''
Analysis and figures for research notes
Requires running OH_1665_narrowchannel_imaging
'''
from os.path import join as osjoin
from paths import c_path
# Masking and moment-making scripts
# Make velocity corrected cubes
execfile(osjoin(c_path, "Lines/OH_maser_luminosity.py"))
execfile(osjoin(c_path, "Lines/OH_maser_figure.py"))
# This creates 3 figures, which are combined into the paper version using
# keynote
| [
198,
7061,
6,
198,
32750,
290,
5538,
329,
2267,
4710,
198,
198,
39618,
2491,
18723,
62,
1433,
2996,
62,
77,
6018,
17620,
62,
320,
3039,
198,
7061,
6,
198,
198,
6738,
28686,
13,
6978,
1330,
4654,
355,
28686,
22179,
198,
198,
6738,
13... | 3.180451 | 133 |
from functools import reduce, wraps
from typing import Any
import requests
import tinder
from tinder.recs import Rec, TimeOutException, RetryException
| [
6738,
1257,
310,
10141,
1330,
4646,
11,
27521,
201,
198,
6738,
19720,
1330,
4377,
201,
198,
201,
198,
11748,
7007,
201,
198,
201,
198,
11748,
256,
5540,
201,
198,
6738,
256,
5540,
13,
260,
6359,
1330,
3311,
11,
3862,
7975,
16922,
11,
... | 3.354167 | 48 |
import abc
if __name__ == '__main__':
#se crean los diferentes archivos que formaran parte del directorio
root = directory('/')
etc = directory('/etc')
var = directory('/var')
usr = directory('/usr')
include = directory('/include')
home = directory('/home')
users = directory('/users')
salguer = directory('/salguer')
documentos = directory('/documentos')
archivo1 = Hoja('ensayo', 'txt')
tarea = Hoja('presentacion', 'txt')
tarea2 = Hoja('DAS', 'txt')
root.agregar(etc)
root.agregar(var)
root.agregar(usr)
root.agregar(home)
usr.agregar(include)
home.agregar(users)
users.agregar(salguer)
salguer.agregar(archivo1)
salguer.agregar(documentos)
salguer.agregar(tarea)
salguer.agregar(tarea2)
root.path()
| [
11748,
450,
66,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
325,
1126,
272,
22346,
288,
361,
9100,
274,
3934,
452,
418,
8358,
1296,
19173,
636,
68,
1619,
3437,
952,
198,
22... | 2.298592 | 355 |
import time
if __name__ == '__main__':
print(enc_test())
print(dec_test())
| [
11748,
640,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
12685,
62,
9288,
28955,
198,
220,
220,
220,
3601,
7,
12501,
62,
9288,
28955,
198
] | 2.388889 | 36 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
PagesView controls the normal Pages of the System
@author Kevin Lucas Simon, Christina Bernhardt ,Nelson Morais
Projekt OOAD Hausarbeit WiSe 2020/21
"""
from django.shortcuts import render
from django.views import View
class PagesView(View):
"""Pages views class"""
def get_welcome(request):
"""
displays a welcome page
:param request: HTTP Request
:return: renders a page
"""
return render(request, "welcome.html")
| [
37811,
198,
26656,
15385,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
273,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
17080,
6169,
351,
428,
670,
329,
3224,
1321,
198,
2301,
13493,
6634,
9238,
13... | 3.564841 | 347 |
# -*- coding: utf-8 -*-
"""module containg the API wrapper"""
import io
import requests
import yaml
from pysolar import solar
import pandas as pd
class EndpointWrapper:
"""Base class for endpoint wrapper
The usage of the API requires an API key that can be requested for free at:
"https://developer.nrel.gov/signup/"
List of all Endpoints at:
"https://developer.nrel.gov/docs/solar/nsrdb/"
Parameters
----------
latlong : tuple of list with latitude and longitude as decimal numbers
config_path : absolute filepath, optional(default=None)
request_attr_dict : dictonary, optinal(default=empty dict)
Should contain parameters and values for the API call. Space character
is not allowed and should be replaced with "+" in all string values
Needs to contain Values for:
api_key: string
full_name: string
email: string
affiliation: string
reason: string
names: int or list of int, years that should be extracted
mailing_list: string, possible values "true" and "false"
"""
def request_data(self, parse_datetime=True):
"""Requests data from NSRDB server and converts it to a pandas
dataframe
Parameters
----------
parse_datetime: Boolean, optional(default=True)
If parse_datetime is set to True the original datetime defining columns
are transformed to a pandas datetime column.
"""
# NSRDB api does not support %formated url payloads
payload_str = "&".join("%s=%s" % (k, v)
for k, v in self.request_attr.items())
response = requests.get(self.endpoint_url, params=payload_str)
if response.status_code != 200:
raise Exception('''Request error with status code: {}\n
REsponse message:\n{}'''.format(
response.status_code, response.content))
buffer = io.BytesIO(response.content)
buffer.seek(0)
self.df = pd.read_csv(buffer, skiprows=[0, 1])
if parse_datetime:
self.parse_datetime()
def parse_datetime(self, drop_original=True):
"""Parsing the 5 datetime columns from the original NSRDB data to one
pandas datetime column.
Parameters
----------
drop_original: Boolean, optional(default=True)
If drop_original is set to True the original datetime defining columns
are droped from the dataframe.
"""
time_columns = ['Year', 'Month', 'Day', 'Hour', 'Minute']
self.df[time_columns] = self.df[time_columns].astype(str)
self.df['dt'] = pd.to_datetime(self.df.Year +
self.df.Month.apply('{:0>2}'.format) +
self.df.Day.apply('{:0>2}'.format) +
self.df.Hour.apply('{:0>2}'.format) +
self.df.Minute.apply('{:0>2}'.format),
format='%Y%m%d%H%M')
if drop_original:
self.df = self.df.drop(time_columns, axis=1)
def add_zenith_azimuth(self):
"""Adds zenith and azimuth from location and datetime with using
pysolar. Datetime needs to be timezone aware.
"""
self.df['zenith'] = \
self.df.dt.apply(lambda x: solar.get_altitude(self.latitude,
self.longitude,
x))
self.df['azimuth'] = \
self.df.dt.apply(lambda x: solar.get_azimuth(self.latitude,
self.longitude,
x))
class SpectralTMYWrapper(EndpointWrapper):
"""Wrapper for Endpoint to download Spectral TMY Data
The usage of the API requires an API key that can be requested for free at:
"https://developer.nrel.gov/signup/"
Documentation of the Endpoint at:
"https://developer.nrel.gov/docs/solar/nsrdb/spectral_tmy_data_download/"
Parameters
----------
latlong : tuple of list with latitude and longitude as decimal numbers#
config_path : absolute filepath, optional(default=None)
request_attr_dict : dictonary, optinal(default=empty dict)
Should contain parameters and values for the API call. Space character
is not allowed and should be replaced with "+" in all string values
Needs to contain Values for:
api_key: string
full_name: string
email: string
affiliation: string
reason: string
mailing_list: string, possible values "true" and "false"
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
21412,
542,
64,
278,
262,
7824,
29908,
37811,
628,
198,
11748,
33245,
198,
11748,
7007,
198,
11748,
331,
43695,
198,
6738,
279,
893,
6192,
1330,
6591,
198,
11748,
... | 2.180424 | 2,217 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
6738,
1908,
563,
13,
9288,
26791,
1330,
3486,
2043,
395,
20448,
628
] | 3.555556 | 36 |
# Handlers
handlers = [PreciseF32, Pthreads]
# client-side asm code modification
| [
198,
2,
7157,
8116,
198,
198,
4993,
8116,
796,
685,
6719,
37561,
37,
2624,
11,
350,
16663,
82,
60,
198,
198,
2,
5456,
12,
1589,
355,
76,
2438,
17613,
628
] | 2.833333 | 30 |
import skimage as sk
import numpy as np
from rectpack import newPacker
## not commented
| [
198,
11748,
1341,
9060,
355,
1341,
220,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
13621,
8002,
1330,
649,
47,
10735,
198,
198,
2235,
407,
16476,
628,
198
] | 3.241379 | 29 |
"""
Created on Sep 22, 2018
@author: Yizhe Sun
"""
import os
import uuid
from werkzeug.utils import secure_filename
from redis import Redis
from rq import Queue
import sqlalchemy as db
from .config import ALLOWED_EXTENSIONS, DATABASE_URI
# Initialise the task queue for background tasks
q = Queue(connection=Redis())
# connect to database
engine = db.create_engine(DATABASE_URI)
connection = engine.connect()
metadata = db.MetaData()
# frame_analysis table
frame_analysis = db.Table(
'frame_analysis', metadata, autoload=True, autoload_with=engine)
# video table
video = db.Table('video', metadata, autoload=True, autoload_with=engine)
# load the pre-trained model, including the CNN model and the LSTM model
# Check whether the file is within the supported format
# Save the uploaded video file to disk
# Silently remove a file
| [
37811,
198,
41972,
319,
8621,
2534,
11,
2864,
198,
198,
31,
9800,
25,
575,
528,
258,
3825,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
334,
27112,
198,
198,
6738,
266,
9587,
2736,
1018,
13,
26791,
1330,
5713,
62,
34345,
198,
6738,... | 3.249042 | 261 |
import argparse
import warnings
import json
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
args = vars(ap.parse_args())
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None | [
198,
11748,
1822,
29572,
198,
11748,
14601,
198,
11748,
33918,
198,
198,
2,
5678,
262,
4578,
30751,
290,
21136,
262,
7159,
198,
499,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
499,
13,
2860,
62,
49140,
7203,
12,
66,
1600,
36... | 3.57265 | 117 |
# -*- coding: utf-8 -*-
"""
This script is a help for checking if the new NUMBA functions are correctly
integrated into acoular.
One has to make a savefile (see 'all_bfWeave.sav') for both, the old acoular
version an the new one. In section '#%% Compare Weave vs Numba' both versions
are compared.
This script uses essentially 'example3.py', so therefor 'example_data.h5' and
'example_calib.xml' are needed.
Copyright (c) 2006-2015 The Acoular developers.
All rights reserved.
"""
# imports from acoular
import acoular
from acoular import L_p, TimeSamples, Calib, MicGeom, EigSpectra,\
RectGrid3D, BeamformerBase, BeamformerFunctional, BeamformerEig, BeamformerOrth, \
BeamformerCleansc, BeamformerCapon, BeamformerMusic, BeamformerCMF, PointSpreadFunction, BeamformerClean, BeamformerDamas
# other imports
from os import path
#from mayavi import mlab
from numpy import amax
#from cPickle import dump, load
from pickle import dump, load
# see example3
t = TimeSamples(name='example_data.h5')
cal = Calib(from_file='example_calib.xml')
m = MicGeom(from_file=path.join(\
path.split(acoular.__file__)[0], 'xml', 'array_56.xml'))
g = RectGrid3D(x_min=-0.6, x_max=-0.0, y_min=-0.3, y_max=0.3, \
z_min=0.48, z_max=0.88, increment=0.1)
f = EigSpectra(time_data=t, window='Hanning', overlap='50%', block_size=128, ind_low=5, ind_high=15)
csm = f.csm[:]
eva = f.eva[:]
eve = f.eve[:]
#""" Creating the beamformers
bb1Rem = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='classic')
bb2Rem = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='inverse')
bb3Rem = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true level')
bb4Rem = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true location')
bb1Full = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic')
bb2Full = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse')
bb3Full = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level')
bb4Full = BeamformerBase(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location')
Lbb1Rem = L_p(bb1Rem.synthetic(4000,1))
Lbb2Rem = L_p(bb2Rem.synthetic(4000,1))
Lbb3Rem = L_p(bb3Rem.synthetic(4000,1))
Lbb4Rem = L_p(bb4Rem.synthetic(4000,1))
Lbb1Full = L_p(bb1Full.synthetic(4000,1))
Lbb2Full = L_p(bb2Full.synthetic(4000,1))
Lbb3Full = L_p(bb3Full.synthetic(4000,1))
Lbb4Full = L_p(bb4Full.synthetic(4000,1))
bf1Rem = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='classic', gamma=3)
bf2Rem = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='inverse', gamma=3)
bf3Rem = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true level', gamma=3)
bf4Rem = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true location', gamma=3)
bf1Full = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic', gamma=3)
bf2Full = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse', gamma=3)
bf3Full = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level', gamma=3)
bf4Full = BeamformerFunctional(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location', gamma=3)
Lbf1Rem = L_p(bf1Rem.synthetic(4000,1))
Lbf2Rem = L_p(bf2Rem.synthetic(4000,1))
Lbf3Rem = L_p(bf3Rem.synthetic(4000,1))
Lbf4Rem = L_p(bf4Rem.synthetic(4000,1))
Lbf1Full = L_p(bf1Full.synthetic(4000,1))
Lbf2Full = L_p(bf2Full.synthetic(4000,1))
Lbf3Full = L_p(bf3Full.synthetic(4000,1))
Lbf4Full = L_p(bf4Full.synthetic(4000,1))
bca1Full = BeamformerCapon(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic')
bca2Full = BeamformerCapon(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse')
bca3Full = BeamformerCapon(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level')
bca4Full = BeamformerCapon(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location')
Lbca1Full = L_p(bca1Full.synthetic(4000,1))
Lbca2Full = L_p(bca2Full.synthetic(4000,1))
Lbca3Full = L_p(bca3Full.synthetic(4000,1))
Lbca4Full = L_p(bca4Full.synthetic(4000,1))
be1Rem = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='classic', n=12)
be2Rem = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='inverse', n=12)
be3Rem = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true level', n=12)
be4Rem = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true location', n=12)
be1Full = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic', n=12)
be2Full = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse', n=12)
be3Full = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level', n=12)
be4Full = BeamformerEig(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location', n=12)
Lbe1Rem = L_p(be1Rem.synthetic(4000,1))
Lbe2Rem = L_p(be2Rem.synthetic(4000,1))
Lbe3Rem = L_p(be3Rem.synthetic(4000,1))
Lbe4Rem = L_p(be4Rem.synthetic(4000,1))
Lbe1Full = L_p(be1Full.synthetic(4000,1))
Lbe2Full = L_p(be2Full.synthetic(4000,1))
Lbe3Full = L_p(be3Full.synthetic(4000,1))
Lbe4Full = L_p(be4Full.synthetic(4000,1))
bm1Full = BeamformerMusic(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic', n=12)
bm2Full = BeamformerMusic(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse', n=12)
bm3Full = BeamformerMusic(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level', n=12)
bm4Full = BeamformerMusic(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location', n=12)
Lbm1Full = L_p(bm1Full.synthetic(4000,1))
Lbm2Full = L_p(bm2Full.synthetic(4000,1))
Lbm3Full = L_p(bm3Full.synthetic(4000,1))
Lbm4Full = L_p(bm4Full.synthetic(4000,1))
bcsc1Rem = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='classic')
bcsc2Rem = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='inverse')
bcsc3Rem = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true level')
bcsc4Rem = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true location')
bcsc1Full = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic')
bcsc2Full = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse')
bcsc3Full = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level')
bcsc4Full = BeamformerCleansc(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location')
Lbcsc1Rem = L_p(bcsc1Rem.synthetic(4000,1))
Lbcsc2Rem = L_p(bcsc2Rem.synthetic(4000,1))
Lbcsc3Rem = L_p(bcsc3Rem.synthetic(4000,1))
Lbcsc4Rem = L_p(bcsc4Rem.synthetic(4000,1))
Lbcsc1Full = L_p(bcsc1Full.synthetic(4000,1))
Lbcsc2Full = L_p(bcsc2Full.synthetic(4000,1))
Lbcsc3Full = L_p(bcsc3Full.synthetic(4000,1))
Lbcsc4Full = L_p(bcsc4Full.synthetic(4000,1))
bort1Rem = BeamformerOrth(beamformer=be1Rem, eva_list=list(range(4,8)))
bort2Rem = BeamformerOrth(beamformer=be2Rem, eva_list=list(range(4,8)))
bort3Rem = BeamformerOrth(beamformer=be3Rem, eva_list=list(range(4,8)))
bort4Rem = BeamformerOrth(beamformer=be4Rem, eva_list=list(range(4,8)))
bort1Full = BeamformerOrth(beamformer=be1Full, eva_list=list(range(4,8)))
bort2Full = BeamformerOrth(beamformer=be2Full, eva_list=list(range(4,8)))
bort3Full = BeamformerOrth(beamformer=be3Full, eva_list=list(range(4,8)))
bort4Full = BeamformerOrth(beamformer=be4Full, eva_list=list(range(4,8)))
Lbort1Rem = L_p(bort1Rem.synthetic(4000,1))
Lbort2Rem = L_p(bort2Rem.synthetic(4000,1))
Lbort3Rem = L_p(bort3Rem.synthetic(4000,1))
Lbort4Rem = L_p(bort4Rem.synthetic(4000,1))
Lbort1Full = L_p(bort1Full.synthetic(4000,1))
Lbort2Full = L_p(bort2Full.synthetic(4000,1))
Lbort3Full = L_p(bort3Full.synthetic(4000,1))
Lbort4Full = L_p(bort4Full.synthetic(4000,1))
bcmf1Rem = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='classic')
bcmf2Rem = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='inverse')
bcmf3Rem = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true level')
bcmf4Rem = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=True, c=346.04, steer='true location')
bcmf1Full = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='classic')
bcmf2Full = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='inverse')
bcmf3Full = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true level')
bcmf4Full = BeamformerCMF(freq_data=f, grid=g, mpos=m, r_diag=False, c=346.04, steer='true location')
Lbcmf1Rem = L_p(bcmf1Rem.synthetic(4000,1))
Lbcmf2Rem = L_p(bcmf2Rem.synthetic(4000,1))
Lbcmf3Rem = L_p(bcmf3Rem.synthetic(4000,1))
Lbcmf4Rem = L_p(bcmf4Rem.synthetic(4000,1))
Lbcmf1Full = L_p(bcmf1Full.synthetic(4000,1))
Lbcmf2Full = L_p(bcmf2Full.synthetic(4000,1))
Lbcmf3Full = L_p(bcmf3Full.synthetic(4000,1))
Lbcmf4Full = L_p(bcmf4Full.synthetic(4000,1))
##==============================================================================
## There are various variations to calculate the psf: Need to be checked individually
## #psfSingle = PointSpreadFunction(grid=g, mpos=m, calcmode='single')
## #LPsfSingle = L_p(psfSingle.psf[:])
##
## #psfBlock = PointSpreadFunction(grid=g, mpos=m, calcmode='block')
## #LPsfBlock = L_p(psfBlock.psf[:])
##
## #psfFull = PointSpreadFunction(grid=g, mpos=m, calcmode='full')
## #LPsfFull = L_p(psfFull.psf[:])
##
## #all_bf = (LPsfFull,)
##==============================================================================
psf1 = PointSpreadFunction(grid=g, mpos=m, c=346.04, steer='classic')
psf2 = PointSpreadFunction(grid=g, mpos=m, c=346.04, steer='inverse')
psf3 = PointSpreadFunction(grid=g, mpos=m, c=346.04, steer='true level')
psf4 = PointSpreadFunction(grid=g, mpos=m, c=346.04, steer='true location')
Lpsf1 = L_p(psf1.psf[:])
Lpsf2 = L_p(psf2.psf[:])
Lpsf3 = L_p(psf3.psf[:])
Lpsf4 = L_p(psf4.psf[:])
bcpsf1Rem = BeamformerClean(beamformer=bb1Rem)
bcpsf2Rem = BeamformerClean(beamformer=bb2Rem)
bcpsf3Rem = BeamformerClean(beamformer=bb3Rem)
bcpsf4Rem = BeamformerClean(beamformer=bb4Rem)
bcpsf1Full = BeamformerClean(beamformer=bb1Full)
bcpsf2Full = BeamformerClean(beamformer=bb2Full)
bcpsf3Full = BeamformerClean(beamformer=bb3Full)
bcpsf4Full = BeamformerClean(beamformer=bb4Full)
Lbcpsf1Rem = L_p(bcpsf1Rem.synthetic(4000,1))
Lbcpsf2Rem = L_p(bcpsf2Rem.synthetic(4000,1))
Lbcpsf3Rem = L_p(bcpsf3Rem.synthetic(4000,1))
Lbcpsf4Rem = L_p(bcpsf4Rem.synthetic(4000,1))
Lbcpsf1Full = L_p(bcpsf1Full.synthetic(4000,1))
Lbcpsf2Full = L_p(bcpsf2Full.synthetic(4000,1))
Lbcpsf3Full = L_p(bcpsf3Full.synthetic(4000,1))
Lbcpsf4Full = L_p(bcpsf4Full.synthetic(4000,1))
bd1Rem = BeamformerDamas(beamformer=bb1Rem, n_iter=100)
bd2Rem = BeamformerDamas(beamformer=bb2Rem, n_iter=100)
bd3Rem = BeamformerDamas(beamformer=bb3Rem, n_iter=100)
bd4Rem = BeamformerDamas(beamformer=bb4Rem, n_iter=100)
bd1Full = BeamformerDamas(beamformer=bb1Full, n_iter=100)
bd2Full = BeamformerDamas(beamformer=bb2Full, n_iter=100)
bd3Full = BeamformerDamas(beamformer=bb3Full, n_iter=100)
bd4Full = BeamformerDamas(beamformer=bb4Full, n_iter=100)
Lbd1Rem = L_p(bd1Rem.synthetic(4000,1))
Lbd2Rem = L_p(bd2Rem.synthetic(4000,1))
Lbd3Rem = L_p(bd3Rem.synthetic(4000,1))
Lbd4Rem = L_p(bd4Rem.synthetic(4000,1))
Lbd1Full = L_p(bd1Full.synthetic(4000,1))
Lbd2Full = L_p(bd2Full.synthetic(4000,1))
Lbd3Full = L_p(bd3Full.synthetic(4000,1))
Lbd4Full = L_p(bd4Full.synthetic(4000,1))
all_bf = (Lbb1Rem, Lbb2Rem, Lbb3Rem, Lbb4Rem, Lbb1Full, Lbb2Full, Lbb3Full, Lbb4Full,
Lbf1Rem, Lbf2Rem, Lbf3Rem, Lbf4Rem, Lbf1Full, Lbf2Full, Lbf3Full, Lbf4Full,
Lbca1Full, Lbca2Full, Lbca3Full, Lbca4Full,
Lbe1Rem, Lbe2Rem, Lbe3Rem, Lbe4Rem, Lbe1Full, Lbe2Full, Lbe3Full, Lbe4Full,
Lbm1Full, Lbm2Full, Lbm3Full, Lbm4Full,
Lbcsc1Rem, Lbcsc2Rem, Lbcsc3Rem, Lbcsc4Rem, Lbcsc1Full, Lbcsc2Full, Lbcsc3Full, Lbcsc4Full,
Lbort1Rem, Lbort2Rem, Lbort3Rem, Lbort4Rem, Lbort1Full, Lbort2Full, Lbort3Full, Lbort4Full,
Lbcmf1Rem, Lbcmf2Rem, Lbcmf3Rem, Lbcmf4Rem, Lbcmf1Full, Lbcmf2Full, Lbcmf3Full, Lbcmf4Full,
Lpsf1, Lpsf2, Lpsf3, Lpsf4,
Lbcpsf1Rem, Lbcpsf2Rem, Lbcpsf3Rem, Lbcpsf4Rem, Lbcpsf1Full, Lbcpsf2Full, Lbcpsf3Full, Lbcpsf4Full,
Lbd1Rem, Lbd2Rem, Lbd3Rem, Lbd4Rem, Lbd1Full, Lbd2Full, Lbd3Full, Lbd4Full)
fi = open('all_bfWeave.sav','w') # This file saves the outputs of the current acoular version
#fi = open('all_bfNumba.sav','w') # This file saves the outputs of the new acoular version, which has to be validated
dump(all_bf,fi,-1) # uses newest pickle protocol -1 (default = 0)
fi.close()
#"""
#%% Compare Weave vs Numba
fi = open('all_bfWeave.sav','r')
all_bfWeave = load(fi)
fi.close()
fi = open('all_bfNumba.sav','r')
all_bfNumba = load(fi)
fi.close()
# remove all negative levels
err = [] # keep in mind that these are levels!!!
for cnt in range(len(all_bfNumba)):
all_bfNumba[cnt][all_bfNumba[cnt] < 0] = all_bfWeave[cnt][all_bfWeave[cnt] < 0] = 1e-20
relDiff = (all_bfWeave[cnt] - all_bfNumba[cnt]) / (all_bfWeave[cnt] + all_bfNumba[cnt]) * 2
err.append(amax(amax(amax(abs(relDiff), 0), 0), 0))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
1212,
4226,
318,
257,
1037,
329,
10627,
611,
262,
649,
36871,
4339,
5499,
389,
9380,
220,
201,
198,
18908,
4111,
656,
936,
2852,
283,
13,
201,
19... | 2.115944 | 6,529 |
from django import template
register = template.Library()
@register.filter(name='percent') | [
6738,
42625,
14208,
1330,
11055,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
24455,
7,
3672,
11639,
25067,
11537
] | 3.833333 | 24 |
"""
Styling for prompt_toolkit applications.
"""
from __future__ import unicode_literals
from .base import *
from .defaults import *
from .from_dict import *
from .from_pygments import *
from .utils import *
#: The default built-in style.
#: (For backwards compatibility, when Pygments is installed, this includes the
#: default Pygments style.)
try:
import pygments
except ImportError:
DEFAULT_STYLE = style_from_dict(DEFAULT_STYLE_EXTENSIONS)
else:
DEFAULT_STYLE = style_from_pygments()
| [
37811,
198,
18716,
1359,
329,
6152,
62,
25981,
15813,
5479,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
764,
8692,
1330,
1635,
198,
6738,
764,
12286,
82,
1330,
1635,
198,
6738,
764,
... | 3.111111 | 162 |
from sqlalchemy.ext.declarative import declarative_base
# Base sqlalchemy
Base = declarative_base()
| [
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
198,
2,
7308,
44161,
282,
26599,
198,
14881,
796,
2377,
283,
876,
62,
8692,
3419,
198
] | 3.060606 | 33 |
# -*- coding: utf-8 -*-
from typing import Iterable, ClassVar
from .actions import GenerateCertAction, RemoveCertAction, GenerateSignerCertificateAction
from .schema import CertsSchema
from ..feature import Feature
from ..schema import FeatureSchema
from ...action import Action
class CertsFeature(Feature):
"""
Generate SSL certificates for your project.
"""
@property
@property
@property
@property
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
19720,
1330,
40806,
540,
11,
5016,
19852,
198,
198,
6738,
764,
4658,
1330,
2980,
378,
37608,
12502,
11,
17220,
37608,
12502,
11,
2980,
378,
11712,
263,
37608,
22460,... | 3.320611 | 131 |
"""
Majordomo Protocol client example. Uses the mdcli API to hide all MDP aspects
Author : Min RK <benjaminrk@gmail.com>
"""
import sys
from mdcliapi2 import MajorDomoClient
if __name__ == '__main__':
main()
| [
37811,
198,
44,
1228,
585,
17902,
20497,
5456,
1672,
13,
36965,
262,
45243,
44506,
7824,
284,
7808,
477,
337,
6322,
7612,
198,
198,
13838,
1058,
1855,
371,
42,
1279,
11722,
13337,
81,
74,
31,
14816,
13,
785,
29,
198,
198,
37811,
198,
... | 2.88 | 75 |
# Make a program that reads a person's name and displays a welcome message.
| [
2,
6889,
257,
1430,
326,
9743,
257,
1048,
338,
1438,
290,
11298,
257,
7062,
3275,
13,
198
] | 4.470588 | 17 |
from . import ppo
AGENTS = {
"PPO": ppo.PPOAlgo,
}
| [
6738,
764,
1330,
279,
7501,
198,
198,
4760,
15365,
796,
1391,
198,
220,
220,
220,
366,
10246,
46,
1298,
279,
7501,
13,
10246,
46,
2348,
2188,
11,
198,
92,
628
] | 1.9 | 30 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from typing import Optional
from django.apps import apps
from django.db import models
from modules.packages.consts import UserPackageStatus, USER_PACKAGE_STATUSES
from modules.packages.models.utils import get_reward_token
from users.models.end_workers import EndWorker
class UserPackageProgress(models.Model):
"""
Stores information about current progress of `EndWorker`'s annotation
for selected `Package`.
It generates `reward_token`, which is an unique token that can be used
in mturk-like scenarios. If `EndWorker` finish annotations, the code will
be available int the `reward` variable.
"""
user = models.ForeignKey(EndWorker, on_delete=models.CASCADE)
package = models.ForeignKey("Package", on_delete=models.CASCADE,
related_name="progress")
items_done = models.IntegerField(default=0)
status = models.CharField(choices=USER_PACKAGE_STATUSES,
default=UserPackageStatus.NONE,
max_length=32)
reward_token = models.CharField(max_length=32, default=get_reward_token)
@property
def is_completed(self):
"""
If True, it means annotations for this Package should not be continued.
"""
return self.status in [UserPackageStatus.CLOSED, UserPackageStatus.FINISHED]
def close(self):
"""
Manually closes the Package for this user, regardless of annotation progress.
"""
if not self.is_completed:
self.status = UserPackageStatus.CLOSED
self.save()
def update(self):
"""
Run after each annotation finished by the EndWorker.
Updates `items_done` and `status`.
"""
Annotation = apps.get_model("tasks.Annotation")
self.items_done = Annotation.objects.filter(
annotated=True, rejected=False,
item__package=self.package, user=self.user
).values("item").distinct().count()
self.update_status(False)
self.save()
def update_status(self, commit=True):
"""
Updates status based on items annotated by the EndWorker.
:param commit: if True, it will save changes to database
"""
last_status = self.status
if self.status == UserPackageStatus.NONE:
if self.items_done > 0:
self.status = UserPackageStatus.IN_PROGRESS
if self.status == UserPackageStatus.IN_PROGRESS:
if self.items_done == self.items_count:
self.status = UserPackageStatus.FINISHED
if commit and last_status != self.status:
self.save()
@property
def progress(self):
"""
Percentage value of how many items were already annotated by this user
"""
if self.items_count:
return self.items_done / self.items_count
@property
@property
def reward(self) -> Optional[str]:
"""
If the user finished annotation for this package, it will return an unique code.
This code can be used to award the price in systems like `mturk`.
"""
if self.progress >= 1.0:
return self.reward_token
return None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,... | 2.482733 | 1,332 |
import os
import h5py
import numpy as np
import pytest
from torch.testing import assert_allclose
from loguru import logger
from skimage.data import binary_blobs
import survos
import survos2.frontend.control
from survos2.frontend.control import Launcher
from survos2.entity.pipeline import Patch
import survos2.frontend.control
from survos2.model import DataModel
from survos2.improc.utils import DatasetManager
from survos2.entity.pipeline import run_workflow
from survos2.server.state import cfg
from survos2.server.superseg import sr_predict
from survos2.api.superregions import supervoxels
from survos2.server.superseg import sr_predict
from survos2.frontend.nb_utils import view_dataset
@pytest.fixture(scope="session")
# def test_rasterize_points(self, datamodel):
# DataModel = datamodel
# src = DataModel.g.dataset_uri("__data__", None)
# dst = DataModel.g.dataset_uri("001_gaussian_blur", group="features")
# result = survos.run_command(
# "features", "gaussian_blur", uri=None, src=src, dst=dst
# )
# assert result[0]["id"] == "001_gaussian_blur"
# result = survos.run_command(
# "objects",
# "create",
# uri=None,
# workspace=DataModel.g.current_workspace,
# fullname="test.csv",
# scale=1.0,
# offset=0.0,
# )
# assert result[0]["id"] == "001_points"
# dst = src = DataModel.g.dataset_uri(result[0]["id"], group="objects")
# # add data to workspace
# result = survos.run_command(
# "objects",
# "points",
# uri=None,
# workspace=DataModel.g.current_workspace,
# dtype="float32",
# fullname="test.csv",
# dst=dst,
# scale=1.0,
# offset=(0.0, 0.0, 0.0),
# crop_start=(0.0, 0.0, 0.0),
# crop_end=(0.0, 0.0, 0.0),
# )
# result = survos.run_command(
# "pipelines",
# "create",
# uri=None,
# workspace=DataModel.g.current_workspace,
# pipeline_type="rasterize_points",
# )
# src = DataModel.g.dataset_uri("__data__", None)
# dst = DataModel.g.dataset_uri(result[0]["id"], group="pipelines")
# params = {
# "feature_id": "001_gaussian_blur",
# "object_id": "001_points",
# "acwe": False,
# "size": (2, 2, 2),
# "balloon": 0,
# "threshold": 0,
# "iterations": 1,
# "smoothing": 0,
# }
# result = survos.run_command(
# "pipelines",
# "rasterize_points",
# workspace=DataModel.g.current_workspace,
# src=src,
# dst=dst,
# **params
# )
# def test_objects(self, datamodel):
# DataModel = datamodel
# # add data to workspace
# result = survos.run_command(
# "objects",
# "create",
# uri=None,
# workspace=DataModel.g.current_workspace,
# fullname="test.csv",
# )
# # assert result[0]["id"] == "001_points"
# result = survos.run_command(
# "objects",
# "create",
# uri=None,
# workspace=DataModel.g.current_workspace,
# fullname="test.csv",
# )
# # assert result[0]["id"] == "002_points"
# result = survos.run_command(
# "objects",
# "existing",
# uri=None,
# workspace=DataModel.g.current_workspace,
# dtype="float32",
# )
# assert len(result[0]) == 2
# def test_analyzers(self, datamodel):
# DataModel = datamodel
# add data to workspace
# result = survos.run_command(
# "analyzer", "create", uri=None, workspace=DataModel.g.current_workspace
# )
# assert result[0]["id"] == "001_label_splitter"
# result = survos.run_command(
# "analyzer",
# "existing",
# uri=None,
# workspace=DataModel.g.current_workspace,
# dtype="float32",
# )
# assert len(result[0]) == 1
if __name__ == "__main__":
pytest.main()
| [
11748,
28686,
198,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
28034,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
1341,
9060,
13,
7890,
13... | 1.961261 | 2,220 |
import math
import random
from itertools import product
from chalk import *
random.seed(1337)
h = math.sqrt(3) / 2
h1 = math.cos(math.pi / 3)
dia = hex_variation(12).line_width(0.02).rotate_by(1 / 4)
dia.render("examples/output/hex-variation.png", height=512)
dia.render_svg("examples/output/hex-variation.svg", height=512)
| [
11748,
10688,
198,
11748,
4738,
198,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
198,
6738,
30860,
1330,
1635,
628,
198,
25120,
13,
28826,
7,
1485,
2718,
8,
628,
198,
71,
796,
10688,
13,
31166,
17034,
7,
18,
8,
1220,
362,
198,
71,
... | 2.488889 | 135 |
# -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
"""
TeeInput replace old FileInput. It use a file
if size > MAX_BODY or memory. It's now possible to rewind
read or restart etc ... It's based on TeeInput from Gunicorn.
"""
import copy
import os
try:
from io import StringIO
except ImportError:
from io import StringIO
import tempfile
from restkit import conn
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
1334,
15813,
2716,
739,
262,
17168,
5964,
13,
198,
2,
4091,
262,
28536,
329,
517,
1321,
13,
628,
198,
37811,
198,
51,
1453,
20560,
6330,
... | 3.318519 | 135 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-18 02:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
17,
319,
2177,
12,
940,
12,
1507,
7816,
25,
4349,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.73913 | 69 |
"""
Example 1 of logging usage
"""
import logging
logging.basicConfig(filename="logs.log", filemode="w", level=logging.DEBUG)
def process1():
"""
Process 1, okay?
"""
logging.info("Process 1 is complete...")
return
def process2():
"""
Process 2, okay?
"""
logging.info("Process 2 is complete...")
return
def process3():
"""
Process 3, okay?
"""
logging.info("Process 3 is complete...")
return
logging.info("Started program execution")
while True:
try:
process = input("Choose the process you want to complete: ")
if process == "1":
logging.info("User chose process 1")
process1()
elif process == "2":
logging.info("User chose process 2")
process2()
elif process == "3":
logging.info("User chose process 3")
process3()
else:
logging.warning(f"User chose a process that is not in the list of processes. Input is {process}")
except KeyboardInterrupt:
logging.info("User has exited the program")
break
logging.info("Finished the program execution") | [
37811,
198,
16281,
352,
286,
18931,
8748,
198,
37811,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
34345,
2625,
6404,
82,
13,
6404,
1600,
2393,
14171,
2625,
86,
1600,
1241,
28,
6404,
2667,
13,
30531,
8,
198,
198,
4299... | 2.50108 | 463 |
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from mock import patch, Mock
from diamond.collector import Collector
from kafka_consumer_lag import KafkaConsumerLagCollector
##########################################################################
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
28,
40477,
12,
23,
198,
29113,
29113,
7804,
2235,
198,
198,
6738,
1332,
1330,
17573,
14402,
20448,
198,
6738,
1332,
1330,
651,
62,
33327,
273,
62,
11250,
198,
6738,
15290,
1330,
852... | 5.078947 | 76 |
# Unit 3 | Assignment - Py Me Up, Charlie (PyPoll)
# Import Modules/Dependencies
import os
import csv
# Variables
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
# Set Path For File
csvpath = os.path.join('.', 'PyPoll', 'Resources', 'election_data.csv')
# Open & Read CSV File
with open(csvpath, newline='') as csvfile:
# CSV Reader Specifies Delimiter & Variable That Holds Contents
csvreader = csv.reader(csvfile, delimiter=',')
# Read The Header Row First (Skip This Step If There Is No Header)
csv_header = next(csvfile)
# Read Each Row Of Data After The Header
for row in csvreader:
# Calculate Total Number Of Votes Cast
total_votes += 1
# Calculate Total Number Of Votes Each Candidate Won
if (row[2] == "Khan"):
khan_votes += 1
elif (row[2] == "Correy"):
correy_votes += 1
elif (row[2] == "Li"):
li_votes += 1
else:
otooley_votes += 1
# Calculate Percentage Of Votes Each Candidate Won
kahn_percent = khan_votes / total_votes
correy_percent = correy_votes / total_votes
li_percent = li_votes / total_votes
otooley_percent = otooley_votes / total_votes
# Calculate Winner Of The Election Based On Popular Vote
winner = max(khan_votes, correy_votes, li_votes, otooley_votes)
if winner == khan_votes:
winner_name = "Khan"
elif winner == correy_votes:
winner_name = "Correy"
elif winner == li_votes:
winner_name = "Li"
else:
winner_name = "O'Tooley"
# Print Analysis
print(f"Election Results")
print(f"---------------------------")
print(f"Total Votes: {total_votes}")
print(f"---------------------------")
print(f"Kahn: {kahn_percent:.3%}({khan_votes})")
print(f"Correy: {correy_percent:.3%}({correy_votes})")
print(f"Li: {li_percent:.3%}({li_votes})")
print(f"O'Tooley: {otooley_percent:.3%}({otooley_votes})")
print(f"---------------------------")
print(f"Winner: {winner_name}")
print(f"---------------------------")
# Specify File To Write To
output_file = os.path.join('.', 'PyPoll', 'Resources', 'election_data_revised.text')
# Open File Using "Write" Mode. Specify The Variable To Hold The Contents
with open(output_file, 'w',) as txtfile:
# Write New Data
txtfile.write(f"Election Results\n")
txtfile.write(f"---------------------------\n")
txtfile.write(f"Total Votes: {total_votes}\n")
txtfile.write(f"---------------------------\n")
txtfile.write(f"Kahn: {kahn_percent:.3%}({khan_votes})\n")
txtfile.write(f"Correy: {correy_percent:.3%}({correy_votes})\n")
txtfile.write(f"Li: {li_percent:.3%}({li_votes})\n")
txtfile.write(f"O'Tooley: {otooley_percent:.3%}({otooley_votes})\n")
txtfile.write(f"---------------------------\n")
txtfile.write(f"Winner: {winner_name}\n")
txtfile.write(f"---------------------------\n") | [
2,
11801,
513,
930,
50144,
532,
9485,
2185,
3205,
11,
11526,
357,
20519,
39176,
8,
198,
198,
2,
17267,
3401,
5028,
14,
35,
2690,
3976,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
198,
2,
15965,
2977,
198,
23350,
62,
29307,
796,
... | 2.457711 | 1,206 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: hzsunshx
# Created: 2015-03-23 14:42
"""
sift
"""
import aircv as ac
if __name__ == '__main__':
# sift_test()
# tmpl_test()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
6434,
25,
220,
289,
89,
19155,
1477,
87,
198,
2,
15622,
25,
1853,
12,
3070,
12,
1954,
1478,
25,
3682... | 2.030928 | 97 |
import re
from typing import (
TYPE_CHECKING,
Any,
Tuple,
)
from urllib import (
parse,
)
from platon_typing import (
URI,
BlockNumber,
HexStr,
)
from platon_utils import (
add_0x_prefix,
is_integer,
remove_0x_prefix,
)
from hexbytes import (
HexBytes,
)
from platonpm.constants import (
SUPPORTED_CHAIN_IDS,
)
if TYPE_CHECKING:
from platon import Web3
BLOCK = "block"
BIP122_URL_REGEX = (
"^"
"blockchain://"
"(?P<chain_id>[a-zA-Z0-9]{64})"
"/"
"(?P<resource_type>block|transaction)"
"/"
"(?P<resource_hash>[a-zA-Z0-9]{64})"
"$"
)
BLOCK_OR_TRANSACTION_HASH_REGEX = "^(?:0x)?[a-zA-Z0-9]{64}$"
def create_BIP122_uri(
chain_id: HexStr, resource_type: str, resource_identifier: HexStr
) -> URI:
"""
See: https://github.com/bitcoin/bips/blob/master/bip-0122.mediawiki
"""
if resource_type != BLOCK:
raise ValueError("Invalid resource_type. Must be one of 'block'")
elif not is_block_or_transaction_hash(resource_identifier):
raise ValueError(
"Invalid resource_identifier. Must be a hex encoded 32 byte value"
)
elif not is_block_or_transaction_hash(chain_id):
raise ValueError("Invalid chain_id. Must be a hex encoded 32 byte value")
return URI(
parse.urlunsplit(
[
"blockchain",
remove_0x_prefix(chain_id),
f"{resource_type}/{remove_0x_prefix(resource_identifier)}",
"",
"",
]
)
)
| [
11748,
302,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
41876,
62,
50084,
2751,
11,
198,
220,
220,
220,
4377,
11,
198,
220,
220,
220,
309,
29291,
11,
198,
8,
198,
6738,
2956,
297,
571,
1330,
357,
198,
220,
220,
220,
21136,
11... | 2.0625 | 768 |
# terrascript/resource/vmware/vmc.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:35 UTC)
import terrascript
__all__ = [
"vmc_cluster",
"vmc_public_ip",
"vmc_sddc",
"vmc_site_recovery",
"vmc_srm_node",
]
| [
2,
8812,
15961,
14,
31092,
14,
14761,
1574,
14,
14761,
66,
13,
9078,
198,
2,
17406,
4142,
7560,
416,
4899,
14,
15883,
8189,
13,
9078,
357,
1731,
12,
19117,
12,
1238,
2481,
1315,
25,
1270,
25,
2327,
18119,
8,
198,
11748,
8812,
15961,... | 2.135593 | 118 |
###########################################################################
# ____ _____________ __ __ __ _ _____ ___ _ #
# / __ \/ ____/ ___/\ \/ / | \/ (_)__ _ _ __|_ _/ __| /_\ (R) #
# / / / / __/ \__ \ \ / | |\/| | / _| '_/ _ \| || (__ / _ \ #
# / /_/ / /___ ___/ / / / |_| |_|_\__|_| \___/|_| \___/_/ \_\ #
# /_____/_____//____/ /_/ T E C H N O L O G Y L A B #
# #
# Copyright 2021 Deutsches Elektronen-Synchrotron DESY. #
# SPDX-License-Identifier: BSD-3-Clause #
# #
###########################################################################
import argparse
import os
import sys
import yaml
from datetime import datetime
import logging
from frugy.__init__ import __version__
from frugy.fru import Fru
from frugy.fru_registry import FruRecordType, rec_enumerate, rec_lookup_by_name, rec_info, schema_entry_info
from frugy.types import FruAreaChecksummed
from frugy.multirecords import MultirecordEntry
if __name__ == '__main__':
main()
| [
29113,
29113,
7804,
21017,
198,
2,
220,
220,
220,
220,
220,
220,
1427,
220,
220,
2602,
29343,
220,
11593,
220,
220,
220,
11593,
220,
11593,
4808,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
29343,
46444,
220,
220,
4808,
2... | 1.96519 | 632 |
"""Tests for ietf.py"""
from pathlib import Path
from dataplaybook.const import ATable
import dataplaybook.tasks.ietf as ietf
def test_extract_standards():
"""Test starting from string."""
txt = "IEEE 802.3ah"
std = list(ietf.extract_standards(txt))
assert std == ["IEEE 802.3ah"]
txt = "draft-ietf-l3vpn-2547bis-mcast-bgp-08.txt"
std = list(ietf.extract_standards(txt))
assert std == ["draft-ietf-l3vpn-2547bis-mcast-bgp-08"]
assert std[0].key == "draft-ietf-l3vpn-2547bis-mcast-bgp"
def test_extract_standards_pad():
"""Test starting from string."""
txt = "RFC1 RFC11 RFC111 RFC1111 RFC11116"
std = list(ietf.extract_standards(txt))
assert std == ["RFC0001", "RFC0011", "RFC0111", "RFC1111", "RFC11116"]
def test_extract_standards_version():
"""Test starting from string."""
txt = "draft-ietf-standard-01 draft-ietf-std--zz draft-ietf-std-01--zz"
std = list(ietf.extract_standards(txt))
assert std == ["draft-ietf-standard-01", "draft-ietf-std", "draft-ietf-std-01"]
assert std[0].key == "draft-ietf-standard"
assert std[1].key == "draft-ietf-std"
assert std[2].key == "draft-ietf-std"
def test_extract_standards_ordered():
"""Test starting from string."""
txt = "RFC 1234 draft-ietf-standard-01 "
std = list(ietf.extract_standards(txt))
assert std == ["draft-ietf-standard-01", "RFC1234"]
std = list(ietf.extract_standards_ordered(txt))
assert std == ["RFC1234", "draft-ietf-standard-01"]
def test_extract_standards_unique():
"""Test duplicates are removed."""
txt = "RFC1234 RFC1234"
std = list(ietf.extract_standards(txt))
assert std == ["RFC1234"]
assert std[0].start == 0
def test_extract_x_all():
"""Test all know variants."""
allitems = (
"RFC1234",
("RFC 2345", "RFC2345"),
"IEEE 802.1x",
("801.2x", "IEEE 801.2x"),
"ITU-T G.1111.1",
"3GPP Release 11",
"GR-1111-CORE",
"ITU-T I.111",
"gnmi.proto version 0.0.1",
"a-something-mib",
"openconfig-a-global.yang version 1.1.1",
"ANSI T1.101.11",
)
txt = ""
exp = []
for itm in allitems:
if isinstance(itm, tuple):
txt += itm[0] + " "
exp.append(itm[1])
else:
txt += itm + " "
exp.append(itm)
std = list(ietf.extract_standards_ordered(txt))
assert std == exp
def test_task_add_std_col():
"""Add column."""
table = [{"ss": "rfc 1234 rfc 5678"}]
ietf.add_standards_column(table=table, rfc_col="r", columns=["ss"])
assert "r" in table[0]
assert table[0]["r"] == "RFC1234, RFC5678"
def test_extract_std():
"""Extract std."""
table = [{"ss": "rfc 1234 rfc 5678 rfc 3GPP Release 10"}, {"ss": "rfc 9999"}]
resttt = ietf.extract_standards_from_table(
table=table, extract_columns=["ss"] # , include_columns=[], # rfc_col="r",
)
assert isinstance(resttt, list)
res = list(resttt)
assert len(res) == 4
assert "name" in res[0]
assert "key" in res[0]
assert "lineno" in res[0]
assert res[0] == {"name": "RFC1234", "key": "RFC1234", "lineno": 1}
assert res[1] == {"name": "RFC5678", "key": "RFC5678", "lineno": 1}
assert res[2] == {"name": "3GPP Release 10", "key": "3GPP Release 10", "lineno": 1}
assert res[3] == {"name": "RFC9999", "key": "RFC9999", "lineno": 2}
table = ATable(table)
table.name = "ttt"
resttt = ietf.extract_standards_from_table(table=table, extract_columns=["ss"])
res = list(resttt)
assert res[0] == {"name": "RFC1234", "key": "RFC1234", "table": "ttt", "lineno": 1}
assert res[1] == {"name": "RFC5678", "key": "RFC5678", "table": "ttt", "lineno": 1}
assert res[2] == {
"name": "3GPP Release 10",
"key": "3GPP Release 10",
"table": "ttt",
"lineno": 1,
}
assert res[3] == {"name": "RFC9999", "key": "RFC9999", "table": "ttt", "lineno": 2}
def test_extract_standards_case():
"""Test starting from string."""
txt = "mfa fORUM 0.0.0 gNMI.Proto vERSION 0.1.0 file.Proto vERSION 0.0.1"
std = list(ietf.extract_standards(txt))
assert std[0].key == "gnmi.proto"
assert std[1].key == "file.proto"
assert std[2].key == "MFA Forum 0.0.0"
assert std == [
"gnmi.proto version 0.1.0",
"file.proto version 0.0.1",
"MFA Forum 0.0.0",
]
txt = "rfc openconfig-isis-policy.yang vErsion 0.3.0, a"
std = list(ietf.extract_standards(txt))
assert std[0].key == "openconfig-isis-policy.yang"
assert std[0] == "openconfig-isis-policy.yang version 0.3.0"
def test_compliance_file():
"""Test a local compliance file."""
Path("../../testcases.xlsx").resolve()
# Path("../../testcases.xlsx").resolve(strict=True)
| [
37811,
51,
3558,
329,
220,
1155,
69,
13,
9078,
37811,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
1366,
1759,
2070,
13,
9979,
1330,
5161,
540,
198,
11748,
1366,
1759,
2070,
13,
83,
6791,
13,
1155,
69,
355,
220,
1155,
69,
628... | 2.203729 | 2,199 |