content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import requests
from apikey import apikey # Your API key, it's better not to store it in the program
# Enter the WoS search query to evaluate its self-citation percentage:
search_query = '(TS=("self citation*" or selfcitation*)) AND (TP==("HIGHLY CITED PAPERS"))'
headers = {
'X-APIKey': apikey
}
endpoint = "https://api.clarivate.com/api/wos"
# This will save several API queries/records received by storing the already checked citing papers locally
checked_citing_papers = [('ut', 'cited_paper')]
# This is the function that performs the self-citation calculation for every cited reference. If the self-citation event
# has been identified by the above calculation() function, then the citing document is analyzed for the number of
# references to that particular cited document. This is required because the number of citations and the number of
# citing documents are not the same thing. One citing document can have multiple cited references leading to the cited
# one, so the total amount of citations to a paper can sometimes be significantly higher than the number of citing
# records.
a = cited_papers()
b = citing_papers(a)
self_citations(a)
| [
11748,
7007,
198,
6738,
2471,
522,
88,
1330,
2471,
522,
88,
220,
220,
1303,
3406,
7824,
1994,
11,
340,
338,
1365,
407,
284,
3650,
340,
287,
262,
1430,
198,
198,
2,
6062,
262,
22173,
50,
2989,
12405,
284,
13446,
663,
2116,
12,
66,
... | 3.748408 | 314 |
from abc import ABCMeta
from types import TracebackType
from typing import ContextManager
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing_extensions import Literal
from typing_extensions import Protocol
from fbsrankings.common import Command
from fbsrankings.common import CommandBus
from fbsrankings.common import EventBus
from fbsrankings.common import Query
from fbsrankings.common import QueryBus
from fbsrankings.domain import RaiseBehavior
from fbsrankings.domain import ValidationError
from fbsrankings.domain import ValidationService
from fbsrankings.infrastructure import QueryManagerFactory
from fbsrankings.infrastructure import TransactionFactory
from fbsrankings.infrastructure.memory import DataSource as MemoryDataSource
from fbsrankings.infrastructure.sportsreference import SportsReference
from fbsrankings.infrastructure.sqlite import DataSource as SqliteDataSource
from fbsrankings.service.command import CommandManager
from fbsrankings.service.config import Config
from fbsrankings.service.config import ConfigStorageType
R = TypeVar("R", covariant=True)
| [
6738,
450,
66,
1330,
9738,
48526,
198,
6738,
3858,
1330,
34912,
1891,
6030,
198,
6738,
19720,
1330,
30532,
13511,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
32233,
198,
6738,
19720,
1330,
5994,
198,
6738,
19720,
1330,
5994,
198... | 4.067376 | 282 |
import inspect
import sys
import typing
from dataclasses import dataclass
if sys.version_info < (3, 9):
from typing_extensions import Annotated, get_args, get_origin
else:
from typing import Annotated, get_origin, get_args
from di.typing import get_markers_from_parameter
from xpresso._utils.typing import model_field_from_param
from xpresso.binders.api import ModelNameMap, OpenAPIBody, OpenAPIBodyMarker, Schemas
from xpresso.binders.dependants import BodyBinderMarker
from xpresso.openapi import models as openapi_models
@dataclass(frozen=True)
@dataclass(frozen=True)
| [
11748,
10104,
198,
11748,
25064,
198,
11748,
19720,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
361,
25064,
13,
9641,
62,
10951,
1279,
357,
18,
11,
860,
2599,
198,
220,
220,
220,
422,
19720,
62,
2302,
5736,
1330,
10... | 2.994898 | 196 |
from operator import itemgetter
import time
import math
import random
import numpy as np
import datetime
from osgeo import ogr, osr
latlongToAlbers = getCoordConverter(4326,5070)
albersToLatlong = getCoordConverter(5070,4326)
start_date = datetime.datetime(1992,1,1)
end_date = datetime.datetime(2017,12,31)
current_date = start_date
increment = datetime.timedelta(minutes=15)
sample_point = (-41.8822705,28.4248646) # (Long, Lat)
travel_path = [sample_point]
while current_date < end_date:
# while line != "":
# line = sea_file.readline()
# point_data = line.split(',')
# try:
# print(type(point_data))
# print(type(point_data[1]))
# print(datetime.datetime.strptime(point_data[1][1],"%Y-%m-%d"))
# # sorted(point_data, key=lambda e: datetime.datetime.strptime(e[1], "%Y-%m-%d"))
# except Exception:
# print("sorting didn't work")
# print(point_data)
# line = ""
bin_file = f"ecco_{str(current_date.year).zfill(4)}-{str(current_date.month).zfill(2)}_000.npy"
curr_vector_field = np.load(f"../images/{bin_file}")
[y,x] = latlongToIndex(sample_point)
# print(f"Index: {[y,x]}")
# print(f"Possible Index: {curr_vector_field[y,x]}")
# print(f"Possible Index: {curr_vector_field[x,y]}")
# print(f"Does this shit even exist???? {curr_vector_field[360-y-1,x]}")
curr_vector = curr_vector_field[y,x]
if np.isnan(curr_vector[0]):
neighbors = get_neighbors(curr_vector_field, x, y)
if len(neighbors) is not 0:
curr_vector = random.choice(neighbors)
sample_point = move_point(sample_point, curr_vector)
travel_path.append(sample_point)
current_date += increment
| [
6738,
10088,
1330,
2378,
1136,
353,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
6738,
28686,
469,
78,
1330,
267,
2164,
11,
267,
27891,
198,
198,
15460,
6511,
251... | 2.197484 | 795 |
from .example import my_func
| [
6738,
764,
20688,
1330,
616,
62,
20786,
201,
198
] | 3.333333 | 9 |
import time
from Xboxcmd import *
import pygame
pygame.init()
pygame.joystick.init()
#查看现在有几个遥控器
joycount = pygame.joystick.get_count()
print("joycount:"+str(joycount))
#连接第一个控制器
joystick = pygame.joystick.Joystick(0)
while True:
#接收事件
pygame.event.get()
axis = get_axis(joystick=joystick)
button = get_button(joystick=joystick)
hats = get_hats(joystick=joystick)
print("_____________")
print(" axis_value:")
print(axis)
print(" button_value")
print(button[3])
print("hat_value")
print(hats)
print("_____________")
time.sleep(3)
| [
11748,
640,
198,
6738,
9445,
28758,
1330,
1635,
198,
11748,
12972,
6057,
198,
198,
9078,
6057,
13,
15003,
3419,
198,
9078,
6057,
13,
2633,
13915,
13,
15003,
3419,
198,
198,
2,
162,
253,
98,
40367,
233,
163,
236,
108,
28839,
101,
17312... | 2.073684 | 285 |
'''
@author:yk7333
last modified:2021-4-7
language:python
'''
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import os
if __name__ == "__main__":
os.chdir("C:\\Users\\m\\Desktop\\第四次作业")
for i in range(3,8,2): #3,5,7
img=read("test2.tif") #第一问
gaussion=Blur(img,i,"Gaussion")
median=Blur(img,i,"Median")
save("gaussion2{0}x{1}.jpg".format(i,i),gaussion)
save("medium2{0}x{1}.jpg".format(i,i),median)
for i in range(3,8,2):
print(Gaussion(i,1.5)) #第二问
print("\n")
img3=read("test3_corrupt.pgm")
img4=read("test4 copy.bmp")
#unshape masking
img3_blur=Blur(img3,5,sigma=1) #采用5x5高斯滤波进行模糊处理
img4_blur=Blur(img4,5,sigma=1)
mask3=img3-img3_blur
mask4=img4-img4_blur
save("img3_unmask.jpg",mask3)
save("img4_unmask.jpg",mask4)
#Sobel edge detector
sobelx=cv.Sobel(img3,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img3,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img3_sobel.jpg",sobelxy)
sobelx=cv.Sobel(img4,cv.CV_64F,0,1,ksize=3)
sobelx=cv.convertScaleAbs(sobelx)
sobely=cv.Sobel(img4,cv.CV_64F,1,0,ksize=3)
sobely=cv.convertScaleAbs(sobely)
sobelxy=cv.addWeighted(sobelx,0.5,sobely,0.5,0)
save("img4_sobel.jpg",sobelxy)
#laplace edge detection
laplacian = cv.Laplacian(img3,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img3_lap.jpg",laplacian)
laplacian = cv.Laplacian(img4,cv.CV_64F)
laplacian = cv.convertScaleAbs(laplacian)
save("img4_lap.jpg",laplacian)
#canny algorithm
canny=cv.Canny(img3,50,80)
save("img3_canny.jpg",canny)
canny=cv.Canny(img4,50,80)
save("img4_canny.jpg",canny)
| [
7061,
6,
198,
31,
9800,
25,
48361,
22,
20370,
220,
198,
938,
9518,
25,
1238,
2481,
12,
19,
12,
22,
220,
220,
198,
3303,
25,
29412,
198,
7061,
6,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
355,
45941,
198,
117... | 1.716216 | 1,110 |
from pymongo import MongoClient
if __name__ == '__main__':
print(CustomerRepository().get_customers())
| [
6738,
279,
4948,
25162,
1330,
42591,
11792,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
3601,
7,
44939,
6207,
13264,
22446,
1136,
62,
23144,
364,
28955,
20... | 2.697674 | 43 |
import os
import json
import logging
import logging.config
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import Filters
from config import config
from jobs import JOBS_CALLBACKS
import utils as u
logger = logging.getLogger(__name__)
load_logging_config()
@u.restricted
@u.restricted
@u.restricted
if __name__ == '__main__':
main()
| [
11748,
28686,
201,
198,
11748,
33918,
201,
198,
11748,
18931,
201,
198,
11748,
18931,
13,
11250,
201,
198,
201,
198,
6738,
573,
30536,
13,
2302,
1330,
3205,
67,
729,
201,
198,
6738,
573,
30536,
13,
2302,
1330,
9455,
25060,
201,
198,
6... | 2.607362 | 163 |
import os
import re
import json
import requests
from datetime import datetime
github_headers = {'Authorization': 'token %s' % os.environ.get("GITHUB_TOKEN")}
repo_info_table = {
"vouch-proxy": {
"name": "vouch-proxy",
"type": "github",
"owner": "vouch",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis_exporter": {
"name": "redis_exporter",
"type": "github",
"owner": "oliver006",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mysqld_exporter": {
"name": "mysqld_exporter",
"type": "github",
"owner": "prometheus",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgres_exporter": {
"name": "postgres_exporter",
"type": "github",
"owner": "prometheus-community",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"caddy": {
"name": "caddy",
"type": "github",
"owner": "caddyserver",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"envtpl": {
"name": "envtpl",
"type": "github",
"owner": "subfuzion",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"erlang": {
"name": "otp",
"type": "github",
"owner": "erlang",
"match": "^OTP-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"fluentd": {
"name": "fluentd",
"type": "github",
"owner": "fluent",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"go": {
"name": "go",
"type": "github",
"owner": "golang",
"match": "^go[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"gosu": {
"name": "gosu",
"type": "github",
"owner": "tianon",
"match": "^[0-9]{1,}\.[0-9]{1,}$",
},
"grafana": {
"name": "grafana",
"type": "github",
"owner": "grafana",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"helm": {
"name": "helm",
"type": "github",
"owner": "helm",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"influxdb": {
"name": "influxdb",
"type": "github",
"owner": "influxdata",
"match": "^v[2-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ini-file": {
"name": "ini-file",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"java": {
"name": "jdk",
"type": "github",
"owner": "openjdk",
"match": "^jdk-[0-9]{1,}\+[0-9]{1,}$",
},
"jq": {
"name": "jq",
"type": "github",
"owner": "stedolan",
"match": "^jq-[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"kubectl": {
"name": "kubectl",
"type": "github",
"owner": "kubernetes",
"match": "^kubernetes-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mariadb": {
"name": "server",
"type": "github",
"owner": "MariaDB",
"match": "^mariadb-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"mc": {
"name": "mc",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"minio": {
"name": "minio",
"type": "github",
"owner": "minio",
"match": "^RELEASE\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z$",
},
"nginx": {
"name": "nginx",
"type": "github",
"owner": "nginx",
"match": "^release-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"node": {
"name": "node",
"type": "github",
"owner": "nodejs",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"pack": {
"name": "pack",
"type": "github",
"owner": "buildpacks",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"php": {
"name": "php-src",
"type": "github",
"owner": "php",
"match": "^php-[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"podman": {
"name": "podman",
"type": "github",
"owner": "containers",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"postgresql": {
"name": "postgres",
"type": "github",
"owner": "postgres",
"match": "^REL_[0-9]{1,}_[0-9]{1,}$",
},
"python": {
"name": "cpython",
"type": "github",
"owner": "python",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"rabbitmq": {
"name": "rabbitmq-server",
"type": "github",
"owner": "rabbitmq",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"redis-sentinel": {
"name": "redis",
"type": "github",
"owner": "redis",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"registry": {
"name": "distribution",
"type": "github",
"owner": "distribution",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"ruby": {
"name": "ruby",
"type": "github",
"owner": "ruby",
"match": "^v[0-9]{1,}_[0-9]{1,}_[0-9]{1,}$",
},
"rust": {
"name": "rust",
"type": "github",
"owner": "rust-lang",
"match": "^[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"telegraf": {
"name": "telegraf",
"type": "github",
"owner": "influxdata",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wait-for-port": {
"name": "wait-for-port",
"type": "github",
"owner": "bitnami",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
"wal-g": {
"name": "wal-g",
"type": "github",
"owner": "wal-g",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.?[0-9]{0}$",
},
"yj": {
"name": "yj",
"type": "github",
"owner": "sclevine",
"match": "^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$",
},
}
github_tags_graphql = """
query {
repository(owner: "{owner}", name: "{name}") {
refs(refPrefix: "refs/tags/", first: 10, orderBy: {field: TAG_COMMIT_DATE, direction: DESC}) {
edges {
node {
name
target {
oid
... on Tag {
commitUrl
tagger {
date
}
}
}
}
}
}
}
}
"""
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
302,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
12567,
62,
50145,
796,
1391,
6,
13838,
1634,
10354,
705,
30001,
4064,
82,
6,
4064,
28686,
13,
268,
2268,
13,
1136,
... | 1.589418 | 4,177 |
from .args import ArgsWrapper
from .dataset import Dataset
from .consts import DATA_PATH, TRAINING_DATASET | [
6738,
764,
22046,
1330,
943,
14542,
36918,
2848,
198,
6738,
764,
19608,
292,
316,
1330,
16092,
292,
316,
198,
6738,
764,
1102,
6448,
1330,
42865,
62,
34219,
11,
29125,
1268,
2751,
62,
35,
1404,
1921,
2767
] | 2.944444 | 36 |
from Dataset import *
from datetime import *
import time
dataset = Dataset('TestData/Dados.csv')
begin_date = datetime.strptime('2021-08-2 12:00',"%Y-%m-%d %H:%M")
end_date = datetime.strptime('2021-08-7 12:00',"%Y-%m-%d %H:%M")
main_var = 'TU-11C:SS-HLS-Ax48NW5:Level-Mon'
start = time.time()
delays, corrs, names = dataset.correlate(main_var, begin_date, end_date, 0.2)
end = time.time()
print(end - start)
print(delays)
print(corrs)
| [
6738,
16092,
292,
316,
1330,
1635,
198,
6738,
4818,
8079,
1330,
1635,
198,
11748,
640,
198,
198,
19608,
292,
316,
796,
16092,
292,
316,
10786,
14402,
6601,
14,
35,
22484,
13,
40664,
11537,
198,
27471,
62,
4475,
796,
4818,
8079,
13,
25... | 2.211055 | 199 |
import pytest
from textx_ls_core import utils
@pytest.mark.parametrize("uri, expected_ext", [
(None, ''),
('', ''),
('/test/path/file.txt', 'txt'),
('Textxfile', 'Textxfile')
])
| [
11748,
12972,
9288,
198,
6738,
2420,
87,
62,
7278,
62,
7295,
1330,
3384,
4487,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7203,
9900,
11,
2938,
62,
2302,
1600,
685,
198,
220,
220,
220,
357,
14202,
11,
10148,
828,... | 2.305882 | 85 |
from setuptools import setup
setup(
name='openmrsapi',
version='0.1',
description='a library for interacting with openmrs api in python',
url='https://github.com/isears/openmrsapi',
author='Isaac Sears',
author_email='isaac.j.sears@gmail.com',
license='MIT',
packages=['openmrsapi'],
zip_safe=False,
install_requires=[
'requests'
]
) | [
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9654,
76,
3808,
15042,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
6764,
11639,
64,
5888,
329,
24986,
35... | 2.449367 | 158 |
from blogposts import app
if __name__ == '__main__':
app.run(host='192.168.43.57',debug=True)
| [
6738,
4130,
24875,
1330,
598,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
598,
13,
5143,
7,
4774,
11639,
17477,
13,
14656,
13,
3559,
13,
3553,
3256,
24442,
28,
17821,
8,
201,
198
] | 2.404762 | 42 |
import asyncio
import logging
import random
import time
from datetime import datetime
from typing import Any, Dict, Optional, Tuple
import reddit_adapter
import subscriptions_manager
import telegram_adapter
workers: Dict[Tuple[int, str], asyncio.Task[Any]] = {}
async def check_exceptions(refresh_period: int = 24 * 60 * 60):
"""
Check whether private or banned subs are now available
"""
while True:
unavailable_subs = subscriptions_manager.unavailable_subreddits()
for sub in unavailable_subs:
try:
try:
await reddit_adapter.new_posts(sub)
except (
reddit_adapter.SubredditPrivate,
reddit_adapter.SubredditBanned,
):
continue
old_subscribers = subscriptions_manager.get_old_subscribers(sub)
for chat_id in old_subscribers:
subscriptions_manager.subscribe(chat_id, sub, 31)
await telegram_adapter.send_message(
chat_id, f"{sub} is now available again"
)
subscriptions_manager.delete_exception(sub)
except Exception as e:
await telegram_adapter.send_exception(
e, f"Exception while checking unavailability of {sub}"
)
await asyncio.sleep(refresh_period)
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
11,
309,
29291,
198,
198,
11748,
18374,
62,
324,
3429,
198,
11748,
... | 2.125369 | 678 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation of the Trigger Unit communication."""
import logging
import re
import socket
_log = logging.getLogger(__name__)
physical_names = {
'A2_Delay': r'Simmer_delay(1uS)',
'A4_Delay': r'Burst_delay(1uS)',
'A4_Number': r'Burst_number',
'A4_Period': r'Burst_period(1uS)',
'A5_Pulse': r'Trigger_Enable_pulse(1uS)',
'B1_Delay': r'ADC_Enable_delay(1uS)',
'B1_Pulse': r'ADC_Enable_pulse(1uS)',
'B2_Delay': r'CMOS_plasma_delay(1uS)',
'B2_Number': r'CMOS_Plasma_number',
'B2_Period': r'CMOS_Plasma_period(1uS)',
'B2_Pulse': r'CMOS_Plasma_pulse(1uS)',
'B4_Delay': r'CMOS_Laser_delay(0.1uS)',
'B4_Pulse': r'CMOS_Laser_pulse(0.1uS)',
'B5_Delay': r'II_Gate_Plasma_delay(0.1uS)',
'B5_Number': r'II_Gate_Plasma_number',
'B5_Period': r'II_Gate_Plasma_period(0.1uS)',
'B5_Pulse': r'II_Gate_Plasma_pulse(0.1uS)',
'B6_Delay': r'II_Plasma_Delay_delay(0.1uS)',
'B6_Pulse': r'II_Plasma_Delay_pulse(0.1uS)',
'B7_Delay': r'II_Gate_Laser_delay(0.1uS)',
'B7_Pulse': r'II_Gate_Laser_pulse(0.1uS)',
'B8_Delay': r'II_Flash_Bool_delay(1uS)',
'B8_Pulse': r'II_Flash_Bool_pulse(1uS)',
'B9_Delay': r'Flash_delay(1uS)',
'B9_Pulse': r'Flash_pulse(1uS)',
'B12_Delay': r'Pockels_delay(1uS)',
'B12_Number': r'Pockels_number',
'B12_Period': r'Pockels_period(1uS)',
'B12_Pulse': r'Pockels_pulse(1uS)',
'TS0_Delay': r'TS0_Delay(1uS)',
'TS0_Period': r'TS0_Period(1uS)',
'Enable_IOs': r'Enable_IOs',
'A1_SW_enable': r'A1_SW_enable',
'A2_SW_enable': r'A2_SW_enable',
'A4_SW_enable': r'A4_SW_enable',
'CMOSPOn': r'CMOSPOn',
'CMOSLOn': r'CMOSLOn'
}
try:
# For Python 3
logical_names = {v: k for k, v in physical_names.items()}
except:
# For Python 2
logical_names = dict((v, k) for k, v in physical_names.iteritems())
regex = re.compile('(\S+)[\s*]=[\s*]"(\S+)"')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
3546,
32851,
286,
262,
24593,
11801,
6946,
526,
15931,
198,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
17802,... | 1.815299 | 1,072 |
store.set_global_value('hotkey', '<ctrl>+e')
engine.set_return_value('<end>')
engine.run_script('chromium')
| [
8095,
13,
2617,
62,
20541,
62,
8367,
10786,
8940,
2539,
3256,
705,
27,
44755,
29,
10,
68,
11537,
198,
18392,
13,
2617,
62,
7783,
62,
8367,
10786,
27,
437,
29,
11537,
198,
18392,
13,
5143,
62,
12048,
10786,
28663,
1505,
11537,
198
] | 2.571429 | 42 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Very weak testing of the basic functionality using unittest and QTest"""
from __future__ import division
__author__ = "Ivan Luchko (luchko.ivan@gmail.com)"
__version__ = "1.0a1"
__date__ = "Apr 4, 2017"
__copyright__ = "Copyright (c) 2017, Ivan Luchko and Project Contributors "
import sys
import os
import subprocess
import unittest
# define pyQt version
try:
from PyQt4.QtGui import QApplication, QDialogButtonBox, QTextCursor
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
except ImportError:
try:
from PyQt5.QtWidgets import QApplication, QDialogButtonBox
from PyQt5.QtGui import QTextCursor
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
except ImportError:
raise ImportError("neither PyQt4 or PyQt5 is found")
from latticegraph_designer.app.main import MainWindow
from latticegraph_designer.app.dialogs import (DialogImportCryst, DialogDistSearch,
MyDialogPreferences, DialogEditXML)
from mpl_animationmanager import QDialogAnimManager
app = QApplication(sys.argv)
test_folder = "./latticegraph_designer/test/"
from latticegraph_designer.app.core import Vertex, Edge, UnitCell, Lattice, CrystalCluster
from latticegraph_designer.app.mpl_pane import GraphEdgesEditor
from matplotlib.backend_bases import KeyEvent, MouseEvent
import matplotlib.pyplot as plt
import numpy as np
class GeeMethodsTest(unittest.TestCase):
'''Test the mpl_pane GraphEdgesEditor methods'''
def test_USE_COLLECTIONS(self):
'''testing the usage of lineCollection for depicting edges'''
GraphEdgesEditor.USE_COLLECTIONS = True
self.setUp()
try:
self.assertEqual(self.gee.UC.num_vertices, 2)
self.assertEqual(self.gee.UC.num_edges, 6)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 6)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+6)
# select edge
_id = 3
self.gee.select_edge(_id)
self.assertTrue(self.gee.e_active_ind == _id)
# remove edge
self.gee.delete_active_edge_callback()
self.assertEqual(self.gee.UC.num_edges, 5)
self.assertEqual(len(self.gee.edges_lines), 5)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+5)
# clear edges
self.gee.clearEdges_callback()
self.assertEqual(self.gee.UC.num_edges, 0)
self.assertEqual(len(self.ax.artists), 6+1) # arrows + new edge
self.assertEqual(len(self.gee.edges_lines), 0)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+0)
# add edge
self.addEdge(0, 4)
self.assertEqual(self.gee.UC.num_edges, 1)
self.assertEqual(len(self.gee.edges_lines), 1)
# collections: vertices, lattice, edges
self.assertEqual(len(self.ax.collections), 1+1+1)
except: # we have to set USE_COLLECTIONS=False for other tests
GraphEdgesEditor.USE_COLLECTIONS = False
raise
finally:
GraphEdgesEditor.USE_COLLECTIONS = False
class GeeInteractionTest(unittest.TestCase):
'''Test the mpl_pane keybounding and mouse manipulation'''
class MainWindowTest(unittest.TestCase):
'''Test the MainWindow GUI'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
# def test_terminalLaunch(self):
#
# p = subprocess.Popen(['graphdesigner','&'],
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#
# output, error = p.communicate()
#
## p = subprocess.call("graphdesigner", shell=True)
# p.kill()
#
# if p.returncode == 0:
# return output
# else:
# raise Exception(error)
# return "Error"
class PreferencesTest(unittest.TestCase):
'''Test the Preferences manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
class AnimaManagerTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=False)
class CodeEditorTest(unittest.TestCase):
'''Test the Animation manager'''
def setUp(self):
'''Create the GUI'''
self.mainWindow = MainWindow(TEXT_MODE=True)
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
16371,
4939,
4856,
286,
262,
4096,
11244,
1262,
555,
715,
395,
290,
1195,
14402,
37811,
198,
198,
6738,
1159... | 2.152972 | 2,288 |
# file: config_gen/admin.py
from django.contrib import admin
# Register your models here.
| [
2,
2393,
25,
4566,
62,
5235,
14,
28482,
13,
9078,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17296,
534,
4981,
994,
13,
198
] | 3.172414 | 29 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from confluent_kafka import TopicPartition
from confluent_kafka.error import ConsumeError, ValueSerializationError
from confluent_kafka.schema_registry.json_schema import (JSONSerializer,
JSONDeserializer)
def _testProduct_to_dict(product_obj, ctx):
"""
Returns testProduct instance in dict format.
Args:
product_obj (_TestProduct): testProduct instance.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
dict: product_obj as a dictionary.
"""
return {"productId": product_obj.product_id,
"productName": product_obj.name,
"price": product_obj.price,
"tags": product_obj.tags,
"dimensions": product_obj.dimensions,
"warehouseLocation": product_obj.location}
def _testProduct_from_dict(product_dict, ctx):
"""
Returns testProduct instance from its dict format.
Args:
product_dict (dict): testProduct in dict format.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
_TestProduct: product_obj instance.
"""
return _TestProduct(product_dict['productId'],
product_dict['productName'],
product_dict['price'],
product_dict['tags'],
product_dict['dimensions'],
product_dict['warehouseLocation'])
def test_json_record_serialization(kafka_cluster, load_file):
"""
Tests basic JsonSerializer and JsonDeserializer basic functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"productId": 1,
"productName": "An ice sculpture",
"price": 12.50,
"tags": ["cold", "ice"],
"dimensions": {
"length": 7.0,
"width": 12.0,
"height": 9.5
},
"warehouseLocation": {
"latitude": -78.75,
"longitude": 20.4
}}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([actual[k] == v for k, v in record.items()])
def test_json_record_serialization_incompatible(kafka_cluster, load_file):
"""
Tests Serializer validation functionality.
product.json from:
https://json-schema.org/learn/getting-started-step-by-step.html
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 1,
"contractorName": "David Davidson",
"contractRate": 1250,
"trades": ["mason"]}
with pytest.raises(ValueSerializationError,
match=r"(.*) is a required property"):
producer.produce(topic, value=record, partition=0)
def test_json_record_serialization_no_title(kafka_cluster, load_file):
"""
Ensures ValueError raise if JSON Schema definition lacks Title annotation.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file('not_title.json')
with pytest.raises(ValueError,
match="Missing required JSON schema annotation title"):
JSONSerializer(schema_str, sr)
def test_json_record_serialization_custom(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr,
to_dict=_testProduct_to_dict)
value_deserializer = JSONDeserializer(schema_str,
from_dict=_testProduct_from_dict)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = _TestProduct(product_id=1,
name="The ice sculpture",
price=12.50,
tags=["cold", "ice"],
dimensions={"length": 7.0,
"width": 12.0,
"height": 9.5},
location={"latitude": -78.75,
"longitude": 20.4})
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
msg = consumer.poll()
actual = msg.value()
assert all([getattr(actual, attribute) == getattr(record, attribute)
for attribute in vars(record)])
def test_json_record_deserialization_mismatch(kafka_cluster, load_file):
"""
Ensures to_dict and from_dict hooks are properly applied by the serializer.
Args:
kafka_cluster (KafkaClusterFixture): cluster fixture
load_file (callable(str)): JSON Schema file reader
"""
topic = kafka_cluster.create_topic("serialization-json")
sr = kafka_cluster.schema_registry({'url': 'http://localhost:8081'})
schema_str = load_file("contractor.json")
schema_str2 = load_file("product.json")
value_serializer = JSONSerializer(schema_str, sr)
value_deserializer = JSONDeserializer(schema_str2)
producer = kafka_cluster.producer(value_serializer=value_serializer)
record = {"contractorId": 2,
"contractorName": "Magnus Edenhill",
"contractRate": 30,
"trades": ["pickling"]}
producer.produce(topic, value=record, partition=0)
producer.flush()
consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
consumer.assign([TopicPartition(topic, 0)])
with pytest.raises(
ConsumeError,
match="'productId' is a required property"):
consumer.poll()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
12131,
7326,
28216,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.333912 | 3,459 |
"""
pyexcel_io.utils
~~~~~~~~~~~~~~~~~~~
utility functions
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.constants as constants
XLS_PLUGIN = "pyexcel-xls"
XLSX_PLUGIN = "pyexcel-xlsx"
ODS_PLUGIN = "pyexcel-ods"
ODS3_PLUGIN = "pyexcel-ods3"
XLSXW_PLUGIN = "pyexcel-xlsxw"
IO_ITSELF = "pyexcel-io"
AVAILABLE_READERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLS_PLUGIN, XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
AVAILABLE_WRITERS = {
constants.FILE_FORMAT_XLS: [XLS_PLUGIN],
constants.FILE_FORMAT_XLSX: [XLSX_PLUGIN, XLSXW_PLUGIN],
constants.FILE_FORMAT_XLSM: [XLSX_PLUGIN],
constants.FILE_FORMAT_ODS: [ODS_PLUGIN, ODS3_PLUGIN],
constants.FILE_FORMAT_CSV: [IO_ITSELF],
constants.FILE_FORMAT_TSV: [IO_ITSELF],
constants.FILE_FORMAT_CSVZ: [IO_ITSELF],
constants.FILE_FORMAT_TSVZ: [IO_ITSELF],
}
def is_empty_array(array):
"""
Check if an array is an array of '' or not
"""
empty_array = [element for element in array if element != ""]
return len(empty_array) == 0
def swap_empty_string_for_none(array):
""" replace empty string fields with None """
def swap(value):
""" change empty string to None """
if value == "":
return None
else:
return value
return [swap(x) for x in array]
| [
37811,
198,
220,
220,
220,
12972,
1069,
5276,
62,
952,
13,
26791,
198,
220,
220,
220,
220,
27156,
4907,
93,
628,
220,
220,
220,
10361,
5499,
628,
220,
220,
220,
1058,
22163,
4766,
25,
357,
66,
8,
1946,
12,
5539,
416,
1550,
8461,
1... | 2.194872 | 780 |
import unittest
from cube import RubiksCube
# rename that class
# test solution funcs <- make sure the tests arent interfering with each other
# def test_bottom_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_bottom_layer()
# print("Success")
# def test_middle_layer_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.cube._solve_mid_layer()
# def test_top_cross_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_cross()
# def test_top_face_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_face()
# def test_top_corners_robustness(self, n=50):
# for _ in range(n):
# self.cube.initialize_cube()
# self.test_top_corners()
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
23441,
1330,
6256,
72,
591,
29071,
628,
198,
2,
36265,
326,
1398,
198,
198,
2,
1332,
4610,
1257,
6359,
24293,
787,
1654,
262,
5254,
389,
429,
32874,
351,
1123,
584,
628,
220,
220,
220,
1303,
825,
133... | 2.075099 | 506 |
#coding:utf-8
'''
filename:mysequence.py
chap:6
subject:20
conditions:inherit collections.abc.Sequence
新容器内的对象必须按照一定顺序排列
solution:class MySequence
'''
import collections
import numbers
class MySequence(collections.abc.Sequence):
'''必要方法 __getitem__,__len__'''
@staticmethod
def order(seq):
'''返回 按类别排序的序列'''
# print('seq:',seq)
source = list(seq)
# print('source:',source)
number_list = []
str_list = []
tuple_list = []
list_list = []
dict_list = []
set_list = []
other_list = []
d = {'numbers.Real':number_list,
'str':str_list,
'tuple':tuple_list,
'list':list_list,
'dict':dict_list,
'set':set_list}
for item in source:
for cls_string in d.keys():
if isinstance(item,eval(cls_string)):
d[cls_string].append(item)
break
else:
other_list.append(item)
# print('other_list :',other_list)
rst = []
lists = list(d.values())
for lst in lists:
# print('before sort:',lst)
lst.sort()
# print('after sort:',lst)
rst += lst
return rst+other_list
if __name__ == '__main__':
l = [1,2,(3,4,55),{'a','b'},{(11,11):111,'name':'laoqi'},(33,5),62,'python',9,'age']
a = MySequence(l)
print(l)
print(a)
print(len(a))
print(list(a))
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
198,
7061,
6,
198,
220,
220,
220,
29472,
25,
1820,
43167,
13,
9078,
198,
220,
220,
220,
220,
220,
220,
220,
28022,
25,
21,
198,
220,
220,
220,
2426,
25,
1238,
198,
220,
220,
220,
3403,
25,
... | 1.700219 | 914 |
"""starts a sync remote server
"""
import os
import getpass
import pathlib
import logging
import click
from . import cli
import paramiko
import paramiko.sftp_client
import syncro.support as support
import syncro.cli as cli
logger = logging.getLogger(__name__)
@click.command()
@click.argument("host")
@click.option('--password', hide_input=True)
@click.option('--username', default=lambda: getpass.getuser())
@cli.standard(quiet=True)
def main(host, username, password):
"hello world"
logger.debug("A")
logger.info("B")
logger.warning("C")
port = 22
print("one", username, password)
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.load_host_keys(pathlib.Path("~/.ssh/known_hosts").expanduser())
client.connect(host, port, username=username, password=password)
transport = client.get_transport()
transport.set_keepalive(2)
print(support.remote(transport, ["ls", "-la",])[1])
# @cli.add_logging()
# def two(*args, **kwargs):
# print("two", args, kwargs)
#
# @cli.add_logging(1, b=2)
# def three(*args, **kwargs):
# print("three", args, kwargs)
if __name__ == '__main__':
main()
| [
37811,
301,
5889,
257,
17510,
6569,
4382,
198,
37811,
198,
11748,
28686,
198,
11748,
651,
6603,
198,
11748,
3108,
8019,
198,
11748,
18931,
198,
198,
11748,
3904,
198,
6738,
764,
1330,
537,
72,
198,
198,
11748,
5772,
12125,
198,
11748,
5... | 2.684091 | 440 |
from typing import List, Tuple
#fenzhi1xiugai
def n31(a: int) -> Tuple[List[int], int]:
"""
Returns the Collatz sequence and its length of any positive integer.
>>> n31(4)
([4, 2, 1], 3)
"""
if not isinstance(a, int):
raise TypeError("Must be int, not {}".format(type(a).__name__))
if a < 1:
raise ValueError(f"Given integer must be greater than 1, not {a}")
path = [a]
while a != 1:
if a % 2 == 0:
a = a // 2
else:
a = 3 * a + 1
path += [a]
return path, len(path)
def test_n31():
"""
>>> test_n31()
"""
assert n31(4) == ([4, 2, 1], 3)
assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15)
assert n31(31) == (
[
31,
94,
47,
142,
71,
214,
107,
322,
161,
484,
242,
121,
364,
182,
91,
274,
137,
412,
206,
103,
310,
155,
466,
233,
700,
350,
175,
526,
263,
790,
395,
1186,
593,
1780,
890,
445,
1336,
668,
334,
167,
502,
251,
754,
377,
1132,
566,
283,
850,
425,
1276,
638,
319,
958,
479,
1438,
719,
2158,
1079,
3238,
1619,
4858,
2429,
7288,
3644,
1822,
911,
2734,
1367,
4102,
2051,
6154,
3077,
9232,
4616,
2308,
1154,
577,
1732,
866,
433,
1300,
650,
325,
976,
488,
244,
122,
61,
184,
92,
46,
23,
70,
35,
106,
53,
160,
80,
40,
20,
10,
5,
16,
8,
4,
2,
1,
],
107,
)
if __name__ == "__main__":
num = 4
path, length = n31(num)
print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}")
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
2,
69,
19471,
5303,
16,
29992,
1018,
1872,
198,
4299,
299,
3132,
7,
64,
25,
493,
8,
4613,
309,
29291,
58,
8053,
58,
600,
4357,
493,
5974,
198,
220,
220,
220,
37227,
198,
220,
220,... | 1.382968 | 2,008 |
from nltk import Tree
import nltk
import argparse
import pandas as pandas
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--infile', default='./ptb-collins.merge.txt', help="preprocessing tree")
#parser.add_argument('--seed', type=int, default=2004, help="random seed for initialization")
parser.add_argument('--outfile', default='./processed_ptb-collins.merge1.txt', help="file containing logs")
if (__name__ == "__main__"):
args = parser.parse_args()
trees_file = open(args.infile, 'r')
lines = trees_file.readlines()
list_lines = [line for line in lines]
trees_file.close()
processed_lines = []
for list_line in list_lines:
ls=[]
for tokens in list_line.split():
if tokens[0] == "(":
try:
if tokens[1] in string.ascii_letters:
tokens = rmsym('-',tokens)
tokens = rmsym('=', tokens)
tokens = rmsym('|', tokens)
tokens = rmsym('$', tokens)
tokens = rmsym('#', tokens)
tokens = rmsym('+', tokens)
except:
print("some bugs")
ls.append(tokens)
processed_line = " ".join(ls)
processed_lines.append(processed_line)
f=open(args.outfile,'w')
for ele in processed_lines:
f.write(ele+'\n')
f.close()
print("Pre-processing is done") | [
6738,
299,
2528,
74,
1330,
12200,
198,
11748,
299,
2528,
74,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
19798,
292,
198,
11748,
19798,
292,
355,
279,
67,
628,
220,
220,
220,
220,
198,
48610,
796,
1822,
29572,
13,
28100,
171... | 2.069156 | 723 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Efficient Walsh-Hadamard transform in JAX."""
import math
from typing import Tuple, Union
import jax
import jax.numpy as jnp
import scipy
from fedjax.core.typing import PRNGKey, Params
@jax.jit
def walsh_hadamard_transform(
x: jnp.ndarray,
small_n: int = 2**7,
precision: Union[jax.lax.Precision, str] = 'highest') -> jnp.ndarray:
"""Efficient Walsh-Hadamard transform in JAX.
An accelerator friendly O(n log n) Walsh-Hadamard transform.
Args:
x: A vector. len(x) must be a power of 2.
small_n: Size to break x into. The default value is tuned on TPUv3. Must be
a power of 2 and > 1.
precision: Precision for general dot products.
Returns:
Transformed vector.
"""
if small_n <= 1:
raise ValueError(f'small_n must be > 1, got {small_n}')
# Let
# - A ⊗ B be the Kronecker product of A and B;
# - flat(X) be the vector obtained by flattening the rows of X of shape
# [M, N].
#
# We can show the following:
#
# (A ⊗ B^T) flat(X) = flat(A X B)
#
# Note that the Hadamard matrix H_{2^M 2^N} = H_{2^M} ⊗ H_{2^N}, and
# Hadamard matrices are symmetrical. Therefore, for a [2^M, 2^N] matrix X,
#
# H_{2^M 2^N} flat(X) = flat(H_{2^M} X H_{2^N})
#
# The idea can be generalized by breaking a Hadamard matrix into the Kronecker
# product of many small Hadamard matrices, and reshaping the vector input into
# a many-dimensional array, and running einsum on each dimension.
#
# Let the input vector be of length D, because our "small" Hadamard matrices
# are of size at most small_n x small_n, a constant, each einsum is O(D). We
# need to run log D einsums, thus the overall time complexity is O(D log D),
# same as the classical divide and conquer algorithm.
#
# However, thanks to efficient software & hardware implementations of einsum,
# we can often achieve far better speed than the classical algorithm on
# accelerators, at the same time producing a far simpler XLA HLO graph.
n = len(x)
# Find out the shape to reshape x into.
shape = []
while n > 1:
shape.append(min(n, small_n))
n //= small_n
shape.reverse()
num_dims = len(shape)
if num_dims + 1 >= 10:
# We will run out of dimension names in einsums.
raise ValueError(f'small_n={small_n} is too small for input size {n}')
y = x.reshape(shape)
# Hadamard matrices we will need.
hadamards = dict((d, hadamard_matrix(d, x.dtype)) for d in set(shape))
# einsum on each dimension.
for i, d in enumerate(shape):
y_dims = ''.join(str(j) for j in range(num_dims))
h_dims = f'{i}{num_dims + 1}'
out_dims = y_dims.replace(str(i), str(num_dims + 1), 1)
operands = f'{y_dims},{h_dims}->{out_dims}'
y = jnp.einsum(operands, y, hadamards[d], precision=precision)
return y.flatten()
def hadamard_matrix(n: int, dtype: jnp.dtype) -> jnp.ndarray:
"""Generates the Hadamard matrix.
Because there are JAX dtypes not supported in numpy, the equivalent function
in scipy can't be used directly.
Args:
n: Number of rows/columns of the Hadamard matrix. Must be a power of 2.
dtype: Output dtype.
Returns:
The Hadamard matrix of the given size and type.
"""
return jnp.array(scipy.linalg.hadamard(n), dtype)
@jax.jit
def structured_rotation(x: jnp.ndarray,
rng: PRNGKey) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes HD(x)/sqrt(d).
Here H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: array to be rotated.
rng: PRNGKey used for rotation.
Returns:
Rotated matrix and the original shape.
"""
x_flat = jnp.reshape(x, [-1])
d = 2**math.ceil(math.log2(x_flat.size))
w = jnp.pad(x_flat, (0, d - x.size))
rademacher = jax.random.rademacher(rng, w.shape)
return walsh_hadamard_transform(w * rademacher) / jnp.sqrt(d), jnp.array(
x.shape)
def inverse_structured_rotation(x: jnp.ndarray, rng: PRNGKey,
original_shape: jnp.ndarray) -> jnp.ndarray:
"""Computes (HD)^(-1)(x)/sqrt(d).
Here where H is the walsh Hadamard matrix, d is the dimensionlity of x, and D
is a random Rademacher matrix.
Args:
x: rotated array, which needs to be unrotated.
rng: PRNGKey used for rotation.
original_shape: desired shape of the output.
Returns:
Output of (HD)^(-1)(x)/sqrt(d) with appropriate shape.
"""
rademacher = jax.random.rademacher(rng, x.shape)
w = walsh_hadamard_transform(x) * rademacher / jnp.sqrt(x.size)
original_size = jnp.prod(original_shape)
y_flat = w.take(jnp.arange(original_size))
return jnp.reshape(y_flat, original_shape)
def structured_rotation_pytree(params: Params,
rng: PRNGKey) -> Tuple[Params, Params]:
"""Applies strucuted rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
Returns:
Pytrees of rotated arrays and shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
rngs = jax.random.split(rng, len(leaves))
rotated_leaves = []
shapes = []
for l, r in zip(leaves, rngs):
leaf, shape = structured_rotation(l, r)
rotated_leaves.append(leaf)
shapes.append(shape)
rotated_pytree = jax.tree_util.tree_unflatten(tree_def, rotated_leaves)
original_shapes_pytree = jax.tree_util.tree_unflatten(tree_def, shapes)
return rotated_pytree, original_shapes_pytree
def inverse_structured_rotation_pytree(params: Params, rng: PRNGKey,
shapes: Params) -> Params:
"""Applies inverse structured rotation to all elements of the pytree.
Args:
params: pytree to be rotated.
rng: jax random key.
shapes: pytree of shapes to be rotated.
Returns:
Inversely rotated pytree whose arrays are specified by input shapes.
"""
leaves, tree_def = jax.tree_util.tree_flatten(params)
leaves_shapes, _ = jax.tree_util.tree_flatten(shapes)
rngs = jax.random.split(rng, len(leaves))
new_leaves = []
for l, r, shape in zip(leaves, rngs, leaves_shapes):
new_leaves.append(inverse_structured_rotation(l, r, shape))
return jax.tree_util.tree_unflatten(tree_def, new_leaves)
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.605293 | 2,607 |
import sweeper.utils as utils
import unittest
from pprint import PrettyPrinter
from scheduler.manager import create_schedule_plan
from sweeper import Workflow
pp = PrettyPrinter(indent=1)
if __name__ == '__main__':
unittest.main()
| [
11748,
3490,
5723,
13,
26791,
355,
3384,
4487,
198,
11748,
555,
715,
395,
198,
198,
6738,
279,
4798,
1330,
20090,
6836,
3849,
198,
6738,
6038,
18173,
13,
37153,
1330,
2251,
62,
15952,
5950,
62,
11578,
198,
6738,
3490,
5723,
1330,
5521,
... | 3.037975 | 79 |
BATCH_SIZE = 128
NUM_CLASSES = 10
EPOCHS = 20
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
# set if false if you want to use trained weights
TO_TRAIN = True
| [
33,
11417,
62,
33489,
796,
13108,
198,
41359,
62,
31631,
1546,
796,
838,
198,
8905,
46,
3398,
50,
796,
1160,
198,
198,
2,
5128,
2939,
15225,
198,
3955,
38,
62,
49,
22845,
11,
8959,
38,
62,
25154,
50,
796,
2579,
11,
2579,
198,
198,... | 2.609375 | 64 |
'''
Calculates the 13C(a,n) cross section
"Free" parameters:
* partial width BGP (1/2+, neutron)
* level energy (3/2+)
* partial width (3/2+, neutron)
* partial width (3/2+, alpha)
'''
import os
import sys
from multiprocessing import Pool
import emcee
import numpy as np
from scipy import stats
import model
########################################
# We'll set up the sampler and get it started.
nw = 4*model.nd # number of walkers = 4 * number of sampled parameters
# Pick a point (theta) in parameter space around which we'll start each walker.
theta0 = [1.87, 2.3689, 35000, -0.61, 3.5002, 57500, -0.67, 3.5451, 45200,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# Each walkers needs its own starting position.
p0 = np.zeros((nw, model.nd))
for i in range(nw):
mu = theta0
sig = np.abs(theta0) * 0.01
p0[i, :] = stats.norm(mu, sig).rvs()
# We'll store the chain in test_mcmc.h5. (See emcee Backends documentation.)
backend = emcee.backends.HDFBackend('test_mcmc.h5')
backend.reset(nw, model.nd)
nsteps = 1000 # How many steps should each walker take?
nthin = 10 # How often should the walker save a step?
nprocs = 4 # How many Python processes do you want to allocate?
# AZURE2 and emcee are both parallelized. We'll restrict AZURE2 to 1 thread to
# simplify things.
os.environ['OMP_NUM_THREADS'] = '1'
# emcee allows the user to specify the way the ensemble generates proposals.
moves = [(emcee.moves.DESnookerMove(), 0.8), (emcee.moves.DEMove(), 0.2)]
with Pool(processes=nprocs) as pool:
sampler = emcee.EnsembleSampler(nw, model.nd, model.lnP, moves=moves, pool=pool,
backend=backend)
state = sampler.run_mcmc(p0, nsteps, thin_by=nthin, progress=True, tune=True)
| [
7061,
6,
198,
220,
220,
220,
27131,
689,
262,
1511,
34,
7,
64,
11,
77,
8,
3272,
2665,
198,
220,
220,
220,
366,
11146,
1,
10007,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1635,
13027,
9647,
347,
16960,
357,
16,
14,
17,
28200,
... | 2.461326 | 724 |
import subprocess
import os
import time
import re
runPath = os.path.realpath(os.path.dirname(os.path.abspath(__file__)) + '/../../')
| [
11748,
850,
14681,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
302,
198,
198,
5143,
15235,
796,
28686,
13,
6978,
13,
5305,
6978,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
... | 2.755102 | 49 |
from wasmer import engine, wat2wasm, Store, Module, Instance
from wasmer_compiler_cranelift import Compiler
TEST_BYTES = wat2wasm(
"""
(module
(memory 16)
(export "memory" (memory 0)))
"""
)
| [
6738,
373,
647,
1330,
3113,
11,
4383,
17,
86,
8597,
11,
9363,
11,
19937,
11,
2262,
590,
198,
6738,
373,
647,
62,
5589,
5329,
62,
66,
2596,
417,
2135,
1330,
3082,
5329,
198,
198,
51,
6465,
62,
17513,
51,
1546,
796,
4383,
17,
86,
... | 2.340426 | 94 |
A[p]= max( A[i]+ A[i:j]+ f(j,p) + f(i,p) ) +f(1,p)
for p in range(N):
| [
198,
198,
32,
58,
79,
22241,
3509,
7,
317,
58,
72,
48688,
317,
58,
72,
25,
73,
48688,
277,
7,
73,
11,
79,
8,
1343,
277,
7,
72,
11,
79,
8,
220,
220,
1267,
1343,
69,
7,
16,
11,
79,
8,
628,
198,
1640,
279,
287,
2837,
7,
45,... | 1.473684 | 57 |
if __name__ == '__main__':
remove_lines()
print ("done")
| [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4781,
62,
6615,
3419,
198,
220,
220,
220,
3601,
5855,
28060,
4943,
628
] | 2.392857 | 28 |
import numpy as np
from .observation import PyBulletObservationType
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
672,
3168,
341,
1330,
9485,
33481,
1616,
31310,
13208,
6030,
628
] | 3.5 | 20 |
from typing import List
from plenum.server.replica_freshness_checker import FreshnessChecker
from crypto.bls.bls_bft_replica import BlsBftReplica
from plenum.common.config_util import getConfig
from plenum.common.event_bus import InternalBus, ExternalBus
from plenum.common.messages.node_messages import Checkpoint
from plenum.common.stashing_router import StashingRouter
from plenum.common.timer import TimerService
from plenum.server.consensus.checkpoint_service import CheckpointService
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.view_change_service import ViewChangeService
from plenum.server.request_managers.write_request_manager import WriteRequestManager
from plenum.test.testing_utils import FakeSomething
class ReplicaService:
"""
This is a wrapper consensus-related services. Now it is intended mostly for
simulation tests, however in future it can replace actual Replica in plenum.
"""
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
458,
44709,
13,
15388,
13,
35666,
3970,
62,
48797,
1108,
62,
9122,
263,
1330,
20138,
1108,
9787,
263,
198,
198,
6738,
21473,
13,
2436,
82,
13,
2436,
82,
62,
65,
701,
62,
35666,
3970,
1330,
10... | 3.624138 | 290 |
if __name__ == '__main__':
main()
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.105263 | 19 |
'''
Utilities for training TM-Glow in parallel as well as calculating
the loss in parallel on different GPUs for memory purposes.
Original Implementation by Zhang, Rutgers University
https://medium.com/huggingface/training-larger-batches-practical-tips-on-1-gpu-multi-gpu-distributed-setups-ec88c3e51255
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097
doi: https://dx.doi.org/10.3934/fods.2020019
github: https://github.com/zabaras/deep-turbulence
=====
'''
import threading
import functools
from itertools import chain
from typing import Optional
import torch
from torch.autograd import Variable, Function
import torch.cuda.comm as comm
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from torch._utils import ExceptionWrapper
from torch.cuda._utils import _get_device_index
torch_ver = torch.__version__[:3]
__all__ = ['allreduce', 'DataParallelCriterion']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class DataParallelINNModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
# def gather(self, outputs, output_device):
# return outputs
def inn_parallel_apply(modules, inputs, kwargs_tup=None, devices=None, forward=True):
r"""Applies each `module` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
# Start thread for each GPU worker
# Distribute scattered inputs and arguements to each GPU
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage.
The targets are splitted across the specified devices by chunking in
the batch dimension.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i) | [
7061,
6,
198,
18274,
2410,
329,
3047,
21232,
12,
38,
9319,
287,
10730,
355,
880,
355,
26019,
198,
1169,
2994,
287,
10730,
319,
1180,
32516,
329,
4088,
4959,
13,
198,
198,
20556,
46333,
416,
19439,
11,
30595,
2059,
198,
5450,
1378,
241... | 2.828434 | 2,279 |
import requests
#possible creds generated by level
#one of them is valid for one of your web app
from credentials import creds
#url='http://YOUR_INTERNAL_IP/login'
urls=['http://10.138.0.58/login', 'http://10.138.0.59/login','http://10.138.0.60/login']
for url in urls:
for u in creds:
#prepare data for post request
payload={'username':u,'password':creds[u]}
#send username and password through post method to web app url
post=requests.Session().post(url, data=payload)
#check if respond text contains invalid credentails
if 'Invalid credentials' not in post.text:
#print valid username and password
print(u+' '+creds[u]+' ' + url )
| [
11748,
7007,
198,
2,
79,
4733,
2600,
82,
7560,
416,
1241,
198,
2,
505,
286,
606,
318,
4938,
329,
530,
286,
534,
3992,
598,
220,
220,
198,
6738,
18031,
1330,
2600,
82,
628,
628,
198,
198,
2,
6371,
11639,
4023,
1378,
56,
11698,
62,
... | 2.661538 | 260 |
from collections import Counter
text = "hubba bubba"
# def get_char_count(text):
# letters = {}
# for letter in text:
# letters[letter] = text.count(letter) # hidden loop in count
# return letters
print(get_char_count(text))
count = Counter(text)
print(count)
print(count.most_common())
| [
6738,
17268,
1330,
15034,
198,
5239,
796,
366,
40140,
7012,
10015,
7012,
1,
198,
2,
825,
651,
62,
10641,
62,
9127,
7,
5239,
2599,
198,
2,
220,
220,
220,
220,
7475,
796,
23884,
198,
220,
198,
2,
220,
220,
220,
220,
329,
3850,
287,
... | 2.507692 | 130 |
# Copyright (c) 2018 Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import pathlib
import pytest
from neuroglancer_scripts.file_accessor import FileAccessor
from neuroglancer_scripts.accessor import (
DataAccessError,
)
@pytest.mark.parametrize("flat", [False, True])
@pytest.mark.parametrize("gzip", [False, True])
| [
2,
15069,
357,
66,
8,
2864,
27325,
354,
2150,
82,
89,
298,
6582,
449,
2731,
488,
402,
2022,
39,
198,
2,
6434,
25,
575,
1236,
42957,
81,
924,
1279,
88,
13,
293,
1050,
924,
31,
69,
89,
12,
73,
2731,
488,
13,
2934,
29,
198,
2,
... | 2.832258 | 155 |
// シェーダー空手のやつ
//# https://thebookofshaders.com/05/kynd.png
#define BPM 90.0
const float PI = acos(-1.0);
const float TAU = PI * 2.0;
/* sound common */
float timeToBeat(float t) {return t / 60.0 * BPM;}
float beatToTime(float b) {return b / BPM * 60.0;}
float sine(float phase) {
return sin(TAU * phase);
}
float pitch(float scale) {
return 440.0 * pow(2.0, scale / 12.0);
}
vec2 mainSound(float time) {
float bpm = timeToBeat(time);
float tempo = sine((mod(bpm, 4.0) >= 1.0 ? 440.0 : 880.0) * time) * exp(-1e2 * fract(bpm));
float sound = 0.0;
//#float tone = sin( 6.2831 * 440.0 * time );
//#float env = fract(-bpm);
float f = fract(bpm);
float s = sin(PI * bpm / 2.0);
float tone = 0.0;
float env = 0.0;
//tone = sine(beatToTime(bpm) * pitch(0.0));
tone = sine(beatToTime(bpm) * 64.0);
env = 1.0 - pow(abs(s), 0.5);
//env = 1.0 - pow(abs(s), 1.0);
//env = 1.0 - pow(abs(s), 3.5);
//env = pow(cos(PI * s / 2.0), 0.5);
//env = pow(cos(PI * s / 2.0), 1.0);
//env = pow(cos(PI * s / 2.0), 3.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 0.5);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 1.0);
//env = 1.0 - pow(abs(sin(PI * s / 2.0)), 3.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 0.5);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 1.0);
//env = pow(min(cos(PI * s / 2.0), 1.0 - abs(s)), 3.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 0.5);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 1.0);
//env = 1.0 - pow(max(0.0, abs(s) * 2.0 - 1.0), 3.5);
float w = smoothstep(1.0, -1.0, tan(bpm * PI));
env = sin(w * TAU);
sound += tone * env;
sound += tempo;
//#if (abs(sound) > 1.0) sound /= abs(sound);
return vec2(sound);
}
| [
1003,
220,
15661,
24806,
12045,
222,
6312,
163,
102,
118,
33699,
233,
5641,
1792,
226,
2515,
97,
198,
1003,
2,
3740,
1378,
1169,
2070,
1659,
1477,
9972,
13,
785,
14,
2713,
14,
2584,
358,
13,
11134,
198,
198,
2,
13086,
347,
5868,
410... | 1.994337 | 883 |
"""Handles incoming ebs requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import ebs_backends
class EBSResponse(BaseResponse):
"""Handler for EBS requests and responses."""
@property
def ebs_backend(self):
"""Return backend instance specific for this region."""
return ebs_backends[self.region]
def start_snapshot(self):
"""
The following parameters are not yet implemented: ParentSnapshotId, ClientToken, Encrypted, KmsKeyArn, Timeout
"""
params = json.loads(self.body)
volume_size = params.get("VolumeSize")
tags = params.get("Tags")
description = params.get("Description")
snapshot = self.ebs_backend.start_snapshot(
volume_size=volume_size,
tags=tags,
description=description,
)
return 200, {}, json.dumps(snapshot.to_json())
def complete_snapshot(self, request, full_url, headers):
"""
The following parameters are not yet supported: ChangedBlocksCount, Checksum, ChecksumAlgorithm, ChecksumAggregationMethod
"""
self.setup_class(request, full_url, headers)
snapshot_id = full_url.split("/")[-1]
status = self.ebs_backend.complete_snapshot(snapshot_id=snapshot_id)
return 200, {}, json.dumps(status)
def put_snapshot_block(self, full_url, headers):
"""
The following parameters are currently not taken into account: DataLength, Progress.
The Checksum and ChecksumAlgorithm are taken at face-value, but no validation takes place.
"""
snapshot_id = full_url.split("/")[-3]
block_index = full_url.split("/")[-1]
block_data = self.body
headers = {k.lower(): v for k, v in headers.items()}
checksum = headers.get("x-amz-checksum")
checksum_algorithm = headers.get("x-amz-checksum-algorithm")
data_length = headers.get("x-amz-data-length")
checksum, checksum_algorithm = self.ebs_backend.put_snapshot_block(
snapshot_id=snapshot_id,
block_index=block_index,
block_data=block_data,
checksum=checksum,
checksum_algorithm=checksum_algorithm,
data_length=data_length,
)
return (
200,
{
"x-amz-Checksum": checksum,
"x-amz-Checksum-Algorithm": checksum_algorithm,
},
"{}",
)
def list_snapshot_blocks(self):
"""
The following parameters are not yet implemented: NextToken, MaxResults, StartingBlockIndex
"""
snapshot_id = self.path.split("/")[-2]
snapshot = self.ebs_backend.list_snapshot_blocks(
snapshot_id=snapshot_id,
)
blocks = [
{"BlockIndex": idx, "BlockToken": b.block_token}
for idx, b in snapshot.blocks.items()
]
return (
200,
{},
json.dumps(
dict(
Blocks=blocks,
VolumeSize=snapshot.volume_size,
BlockSize=snapshot.block_size,
)
),
)
| [
37811,
12885,
829,
15619,
304,
1443,
7007,
11,
800,
3369,
5050,
11,
5860,
9109,
526,
15931,
198,
11748,
33918,
198,
198,
6738,
285,
2069,
13,
7295,
13,
16733,
274,
1330,
7308,
31077,
198,
6738,
764,
27530,
1330,
304,
1443,
62,
1891,
2... | 2.174116 | 1,499 |
import unittest
import torch.nn
from rl_starterpack import AC, OpenAIGym, experiment
| [
11748,
555,
715,
395,
198,
198,
11748,
28034,
13,
20471,
198,
198,
6738,
374,
75,
62,
12339,
8002,
1330,
7125,
11,
4946,
32,
3528,
4948,
11,
6306,
628
] | 3.142857 | 28 |
"""Centec OS Support"""
from netmiko.cisco_base_connection import CiscoBaseConnection
import time
| [
37811,
19085,
721,
7294,
7929,
37811,
198,
6738,
2010,
76,
12125,
13,
66,
4861,
62,
8692,
62,
38659,
1330,
28289,
14881,
32048,
198,
11748,
640,
628,
628
] | 3.740741 | 27 |
#Challenge 3
#The program asks the user to inputtheir surname and then their first name.
#The program then outputsthe user’s first name and then their surname separately.
name2 = input("please enter your surname: ")
name1 = input("please enter your first name: ")
print(name2)
print(name1)
| [
2,
41812,
3540,
513,
201,
198,
2,
464,
1430,
7893,
262,
2836,
284,
5128,
24571,
40358,
290,
788,
511,
717,
1438,
13,
201,
198,
2,
464,
1430,
788,
5072,
301,
258,
2836,
447,
247,
82,
717,
1438,
290,
788,
511,
40358,
13869,
13,
201,... | 3.271739 | 92 |
# pylint: disable=line-too-long
from __future__ import print_function
import json
import re
import traceback
import zipfile
import arrow
import pytz
from passive_data_kit.models import DataPoint
from passive_data_kit_external_data.models import annotate_field
from ..utils import hash_content, encrypt_content, create_engagement_event, queue_batch_insert, include_data
# Older format?
| [
2,
279,
2645,
600,
25,
15560,
28,
1370,
12,
18820,
12,
6511,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
12854,
1891,
198,
11748,
19974,
7753,
198,
198,
11748,
15452,
1... | 3.438596 | 114 |
from distutils.core import setup
import os
from setuptools import find_packages
DIR = os.path.dirname(__file__)
with open(os.path.join(DIR, "README.md")) as f:
readme = f.read().splitlines()
setup(
name='use_logging',
version='0.0.1',
packages=find_packages(include='use_logging*'),
url='https://github.com/GambitResearch/use_logging',
author='Daniel Royde',
author_email='danielroyde@gmail.com',
description=readme[6],
long_description='\n'.join(readme[3:]).lstrip(),
keywords=['Python', 'Logging'],
scripts=['bin/use_logging'],
license='MIT',
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
198,
198,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
198,
4480,
1280,
7,
418,
13,
69... | 2.630841 | 214 |
from pathlib import Path
| [
6738,
3108,
8019,
1330,
10644,
628
] | 4.333333 | 6 |
import math
import torch
import torch.nn as nn
import models
import utils
from .models import register
@register('classifier')
@register('linear-classifier')
@register('nn-classifier')
@register('moco')
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, encoder, encoder_args, K=65536, m=0.999, T=0.07, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# feature embedding size is the output fc dimension
self.encoder_q = models.make(encoder, **encoder_args)
self.encoder_k = models.make(encoder, **encoder_args)
dim = self.encoder_q.out_dim
self.encoder = self.encoder_q # use encoder_q for downstream tasks
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
"""
batch_size = x.shape[0]
# random shuffle index
idx_shuffle = torch.randperm(batch_size).long().cuda()
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
return x[idx_shuffle], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
"""
return x[idx_unshuffle]
def forward(self, im_q, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
| [
11748,
10688,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
11748,
4981,
198,
11748,
3384,
4487,
198,
6738,
764,
27530,
1330,
7881,
628,
198,
31,
30238,
10786,
4871,
7483,
11537,
628,
198,
31,
30238,
1078... | 2.110212 | 1,978 |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Hadoop/Hive deployment and smoke test for the Apache Bigtop Hive service.
"""
@classmethod
def test_hive(self):
"""
Validate Hive by running the smoke-test action.
"""
uuid = self.hive.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Hive smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329... | 3.128319 | 452 |
import pytest
from BlueKumquatAutoDiff.autodiff import *
| [
11748,
12972,
9288,
198,
6738,
4518,
42,
388,
421,
265,
27722,
28813,
13,
2306,
375,
733,
1330,
1635,
628,
628,
628
] | 2.952381 | 21 |
# -*- coding: utf-8 -*-
from aenum import Flag
from .results import Results
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
257,
44709,
1330,
19762,
198,
6738,
764,
43420,
1330,
15691,
628,
198
] | 2.888889 | 27 |
from django.core.management.base import BaseCommand
import time
from core.services.update import Updater
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
11748,
640,
198,
198,
6738,
4755,
13,
30416,
13,
19119,
1330,
3205,
67,
729,
628
] | 3.821429 | 28 |
import nltk
import sys
sentence = """At eight o'clock on Thursday morning Arthur didn't feel very good."""
tokens = nltk.word_tokenize(sentence)
if tokens != ['At', 'eight', "o'clock", 'on', 'Thursday', 'morning',
'Arthur', 'did', "n't", 'feel', 'very', 'good', '.']:
sys.stderr.write("Error in tokenization")
sys.exit(1)
| [
11748,
299,
2528,
74,
198,
11748,
25064,
198,
198,
34086,
594,
796,
37227,
2953,
3624,
267,
6,
15750,
319,
3635,
3329,
13514,
1422,
470,
1254,
845,
922,
526,
15931,
198,
83,
482,
641,
796,
299,
2528,
74,
13,
4775,
62,
30001,
1096,
7... | 2.57037 | 135 |
import os
_data_path_prefix = lambda name:os.sep.join(['www.VyperLogix.com',name])
| [
11748,
28686,
198,
198,
62,
7890,
62,
6978,
62,
40290,
796,
37456,
1438,
25,
418,
13,
325,
79,
13,
22179,
7,
17816,
2503,
13,
53,
88,
525,
11187,
844,
13,
785,
3256,
3672,
12962,
198
] | 2.4 | 35 |
import pathlib
import pkg_resources
from setuptools import setup, find_packages
with pathlib.Path('requirements.txt').open() as requirements_txt:
install_requires = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements_txt)
]
version = '0.4.2'
setup(
name='sammy',
version=version,
description="Python library for generating AWS SAM "
"(Serverless Application Model) templates with validation.",
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only"
],
keywords='serverless, cloudformation, sam',
author='Brian Jinwright',
author_email='opensource@ipoots.com',
maintainer='Brian Jinwright',
packages=find_packages(),
url='https://github.com/capless/sammy',
license='GNU General Public License v3.0',
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
)
| [
11748,
3108,
8019,
198,
11748,
279,
10025,
62,
37540,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
3108,
8019,
13,
15235,
10786,
8897,
18883,
13,
14116,
27691,
9654,
3419,
355,
5359,
62,
14116,
25,
198,... | 2.833333 | 402 |
import numpy
import sklearn.naive_bayes
import sklearn.feature_extraction.text
import sklearn.pipeline
# New additions
import mlflow.sklearn
mlflow.set_tracking_uri("http://atrium.datmo.com")
mlflow.set_experiment("training_module")
...
train_and_evaluate_model() | [
11748,
299,
32152,
198,
11748,
1341,
35720,
13,
2616,
425,
62,
24406,
274,
198,
11748,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
198,
11748,
1341,
35720,
13,
79,
541,
4470,
198,
198,
2,
968,
19885,
198,
11748,
285,
1652,
9319,... | 2.860215 | 93 |
'''
Created on Jun 14, 2017
@author: xinguan
'''
# import mysql.connector
import mysql.connector
create_dice_jobs = (
"CREATE TABLE IF NOT EXISTS `dice_jobs` ("
" `job_unique_id` varchar(50) NOT NULL,"
" `job_title` text NOT NULL,"
" `job_url` text NOT NULL,"
" `company` text NOT NULL,"
" `post_date` date NOT NULL,"
" `job_description` text NOT NULL,"
" PRIMARY KEY (`job_unique_id`)"
") ENGINE=InnoDB")
cnx = mysql.connector.connect(user='root', password='u6a3pwhe',
host='127.0.0.1',
database='dice_test')
cursor = cnx.cursor()
try:
cursor.execute(create_dice_jobs)
cnx.commit()
except mysql.connector.Error as err:
print err
cnx.rollback()
finally:
cursor.close()
cnx.close()
| [
7061,
6,
198,
41972,
319,
7653,
1478,
11,
2177,
198,
198,
31,
9800,
25,
2124,
6680,
272,
198,
7061,
6,
198,
2,
1330,
48761,
13,
8443,
273,
198,
11748,
48761,
13,
8443,
273,
198,
198,
17953,
62,
67,
501,
62,
43863,
796,
357,
198,
... | 2.123037 | 382 |
# !/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import asyncio as aio
from multiprocessing import set_start_method
def get_logger(print_format: str = '[%(asctime)s.%(msecs)03d: %(levelname).1s %(filename)s:%(lineno)s] %(message)s',
date_format: str = '%Y-%m-%d %H:%M:%S',
print: bool = True,
save: bool = True,
save_path: str = 'upbit-trader.log'):
''' Logger Configuration'''
log = logging.getLogger()
# Setup logger level
log.setLevel(logging.INFO)
# Setup logger format
formatter = logging.Formatter(fmt=print_format, datefmt=date_format)
# Setup logger handler
if print:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if save:
if save_path == 'upbit-trader.log' and not sys.platform.startswith('win'):
file_handler = logging.FileHandler('upbit-trader.log')
else:
file_handler = logging.FileHandler(save_path)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
return log
| [
2,
5145,
14,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
30351,
952,
355,
257,
952,
198,
6738,
220,
18540,
305,
919,... | 2.254335 | 519 |
import os
def load_idmap(idmap_file):
"""Load tab-separated idmap file containing label index and label string
Args:
idmap_file (str): filepath to idmap
Returns:
dict: labelmap (key=index, value=string)
"""
if not os.path.exists(idmap_file):
raise FileExistsError(idmap_file)
labelmap = {}
with open(idmap_file, "r") as rf:
for row in rf:
row = row.split("\t")
labelmap[int(row[0])] = row[1].strip()
return labelmap
| [
11748,
28686,
628,
198,
4299,
3440,
62,
312,
8899,
7,
312,
8899,
62,
7753,
2599,
198,
220,
220,
220,
37227,
8912,
7400,
12,
25512,
515,
4686,
8899,
2393,
7268,
6167,
6376,
290,
6167,
4731,
628,
220,
220,
220,
943,
14542,
25,
198,
22... | 2.227074 | 229 |
from unittest import TestCase
from unittest.mock import Mock, patch
import pytest
from pytest import approx
from functions import mpc_to_mly
from voevent import VOEventFromXml, VOEventFromEventId
import tests.voevent_test_data as test_data
import ligo
from ligo.gracedb.exceptions import HTTPError
@patch("ligo.gracedb.rest.GraceDb.voevents")
@patch("ligo.gracedb.rest.GraceDb.get")
@patch("ligo.gracedb.rest.GraceDb.get")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("event_id")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("mock_event_file")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("real_event_file")
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
8529,
198,
198,
11748,
12972,
9288,
198,
6738,
12972,
9288,
1330,
5561,
198,
198,
6738,
5499,
1330,
285,
14751,
62,
1462,
62,
76,
306,
1... | 2.673387 | 248 |
__author__ = 'Justin McClure'
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from random import choice
from lib.api_calls import APIException
# Note: Wait view will probably be removed in the future
| [
834,
9800,
834,
796,
705,
33229,
23780,
495,
6,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
20985,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
4738,
1330,
3572,
198,
6738,
9195... | 3.513889 | 72 |
import requests
import lxml.html
import json
# tutorial from An Intro to Web Scraping With lxml and Python – Python Tips
# https://pythontips.com/2018/06/20/an-intro-to-web-scraping-with-lxml-and-python/
# html = requests.get("https://www.beatport.com/genre/psy-trance/13/top-100")
html = requests.get("https://store.steampowered.com/explore/new/")
doc = lxml.html.fromstring(html.content)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')
doc = lxml.html.fromstring(html.content)
print(new_releases)
new_releases = doc.xpath('//div[@id="tab_newreleases_content"]')[0]
titles = new_releases.xpath('.//div[@class="tab_item_name"]/text()')
print(titles)
prices = new_releases.xpath(
'.//div[@class="discount_final_price"]/text()')
print(prices)
# tags = new_releases.xpath('.//div[@class="tab_item_top_tags"]')
# total_tags = []
# for tag in tags:
# total_tags.append(tag.text_content())
#
# print(total_tags)
tags = [tag.text_content() for tag in new_releases.xpath(
'.//div[@class="tab_item_top_tags"]')]
tags = [tag.split(', ') for tag in tags]
print(tags)
platforms_div = new_releases.xpath('.//div[@class="tab_item_details"]')
total_platforms = []
for game in platforms_div:
temp = game.xpath('.//span[contains(@class, "platform_img")]')
platforms = [t.get('class').split(' ')[-1] for t in temp]
if 'hmd_separator' in platforms:
platforms.remove('hmd_separator')
total_platforms.append(platforms)
print(total_platforms)
output = []
for info in zip(titles, prices, tags, total_platforms):
resp = {}
resp['title'] = info[0]
resp['price'] = info[1]
resp['tags'] = info[2]
resp['platforms'] = info[3]
output.append(resp)
print(output)
with open('output.json', 'w') as outfile:
json.dump(output, outfile)
| [
11748,
7007,
198,
11748,
300,
19875,
13,
6494,
198,
11748,
33918,
198,
198,
2,
11808,
422,
1052,
37219,
284,
5313,
1446,
2416,
278,
2080,
300,
19875,
290,
11361,
784,
11361,
27558,
198,
2,
3740,
1378,
79,
5272,
756,
2419,
13,
785,
14,... | 2.517532 | 713 |
# flake8: noqa
from .aen import AdaptiveElasticNet
from .aencv import AdaptiveElasticNetCV
| [
2,
781,
539,
23,
25,
645,
20402,
198,
198,
6738,
764,
64,
268,
1330,
30019,
425,
9527,
3477,
7934,
198,
6738,
764,
64,
12685,
85,
1330,
30019,
425,
9527,
3477,
7934,
33538,
198
] | 2.787879 | 33 |
from ..misc import binary_digitize
import numpy as np
import pandas as pd | [
6738,
11485,
44374,
1330,
13934,
62,
27003,
1096,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67
] | 3.363636 | 22 |
# This example requires the `message_content` privileged intent for access to message content.
import discord
intents = discord.Intents.default()
intents.message_content = True
client = MyClient(intents=intents)
client.run("TOKEN")
| [
2,
770,
1672,
4433,
262,
4600,
20500,
62,
11299,
63,
21929,
6824,
329,
1895,
284,
3275,
2695,
13,
198,
198,
11748,
36446,
628,
198,
198,
600,
658,
796,
36446,
13,
5317,
658,
13,
12286,
3419,
198,
600,
658,
13,
20500,
62,
11299,
796,... | 3.590909 | 66 |
import torch.nn as nn
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
from transform_file import cut
root='/home/wang/Dataset/Caltech256/'
#root='/media/this/02ff0572-4aa8-47c6-975d-16c3b8062013/Caltech256/'
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6121,
62,
7753,
1330,
2005,
198,
198,
15763,
11639,
... | 2.580645 | 93 |
"""Data type models"""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import datetime
import enum
from typing import Any, Dict, List, Optional
import deserialize
def iso8601parse(date_string: Optional[str]) -> Optional[datetime.datetime]:
"""Parse an ISO8601 date string into a datetime.
:param date_string: The date string to parse
:returns: The parsed datetime
"""
if date_string is None:
return None
try:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
return datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ")
# pylint: disable=missing-docstring
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
@deserialize.parser("firstOccurrence", iso8601parse)
@deserialize.parser("lastOccurrence", iso8601parse)
@deserialize.parser("timestamp", iso8601parse)
@deserialize.parser("timestamp", iso8601parse)
@deserialize.parser("appLaunchTimestamp", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("store_type", "type")
@deserialize.key("identifier", "id")
@deserialize.parser("uploaded_at", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.parser("provisioning_profile_expiry_date", iso8601parse)
@deserialize.parser("uploaded_at", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.parser("expiration_date", iso8601parse)
@deserialize.key("identifier", "id")
@deserialize.key("identifier", "id")
@deserialize.parser("created_at", iso8601parse)
| [
37811,
6601,
2099,
4981,
37811,
198,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
4818,
8079,
198,
11748,
33829,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
322... | 2.705357 | 672 |
import os
import pandas as pd
import collections
import re
import pickle
from basic_util.files import *
import argparse
if __name__ =='__main__':
parser = get_parser()
args = parser.parse_args()
imap = IMap(args.dir_path, args.base_name)
imap.learn_dic(args.count_names, args.check_names)
imap.convert_and_save(args.convert_names) | [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
17268,
198,
11748,
302,
198,
11748,
2298,
293,
198,
6738,
4096,
62,
22602,
13,
16624,
1330,
1635,
198,
11748,
1822,
29572,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
... | 2.637037 | 135 |
# Generated by Django 3.0.3 on 2020-11-03 07:43
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
18,
319,
12131,
12,
1157,
12,
3070,
8753,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import argparse
import requests
from os import getenv
import sys
from influxdb import InfluxDBClient
from datetime import datetime, timedelta
solaredge_api_url = "https://monitoringapi.solaredge.com"
required_version = dict(release="1.0.0")
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
7007,
198,
6738,
28686,
1330,
651,
24330,
198,
11748,
25064,
198,
198,
6738,
25065,
9945,
1330,
4806,
22564,
11012,
11792,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
628,
198,
34453,
1144,
4... | 3.020408 | 98 |
import urllib3
print(main())
| [
11748,
2956,
297,
571,
18,
198,
198,
4798,
7,
12417,
28955,
198
] | 2.5 | 12 |
from lib.dynamo.client import DynamoClientManager
async def table_exists(name: str) -> bool:
"""Check if table exists."""
async with DynamoClientManager() as dynamodb:
try:
await dynamodb.describe_table(TableName=name)
except dynamodb.exceptions.ResourceNotFoundException:
state = False
else:
state = True
# allow the Context Manager to exit
return state
async def ensure_table(schema: dict):
"""Ensure the table exists."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.create_table(**schema)
waiter = dynamodb.get_waiter('table_exists')
await waiter.wait(TableName=table_name)
async def delete_table(schema: dict):
"""Deletes the table."""
table_name = schema.get('TableName')
if not table_name:
return
exists = await table_exists(table_name)
if not exists:
return
async with DynamoClientManager() as dynamodb:
await dynamodb.delete_table(TableName=table_name)
waiter = dynamodb.get_waiter('table_not_exists')
await waiter.wait(TableName=table_name)
| [
6738,
9195,
13,
67,
4989,
78,
13,
16366,
1330,
41542,
11792,
13511,
628,
198,
292,
13361,
825,
3084,
62,
1069,
1023,
7,
3672,
25,
965,
8,
4613,
20512,
25,
198,
220,
220,
220,
37227,
9787,
611,
3084,
7160,
526,
15931,
198,
220,
220,
... | 2.501923 | 520 |
import numpy as np
import torch
from lib.models.backbone.models.hypernet import _gen_supernet
def build_supernet_DP(flops_maximum=600):
"""Backbone with Dynamic output position"""
set_seed()
model, sta_num, size_factor = _gen_supernet(
flops_minimum=0,
flops_maximum=flops_maximum,
DP=True,
num_classes=1000,
drop_rate=0.0,
global_pool='avg',
resunit=False,
dil_conv=False,
slice=4)
return model, sta_num
if __name__ == '__main__':
_, sta_num = build_supernet(flops_maximum=600)
print(sta_num)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
9195,
13,
27530,
13,
1891,
15992,
13,
27530,
13,
49229,
3262,
1330,
4808,
5235,
62,
16668,
3262,
628,
628,
198,
4299,
1382,
62,
16668,
3262,
62,
6322,
7,
2704,
2840,
62,
470... | 2.194853 | 272 |
"Utilities for asking for and processing Morse Code signals."
from typing import Final
from ktane import ask
__all__ = ["valid_morse", "decode", "ask_word"]
MORSE_ALPHABET: Final = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----."
}
INVERSE_MORSE_ALPHABET: Final = {v: k for k, v in MORSE_ALPHABET.items()}
def valid_morse(text: str) -> bool:
"Determine whether a string is valid Morse code."
chars = text.split()
return all(c in INVERSE_MORSE_ALPHABET for c in chars)
def decode(code: str) -> str:
"Convert a Morse code string into regular text."
chars = code.split()
return "".join(INVERSE_MORSE_ALPHABET[char] for char in chars)
def ask_word() -> str:
"Get a Morse code string from the user and convert it to a word."
code = ask.str_from_func(valid_morse)
return decode(code)
| [
1,
18274,
2410,
329,
4737,
329,
290,
7587,
44049,
6127,
10425,
526,
198,
198,
6738,
19720,
1330,
8125,
198,
198,
6738,
479,
83,
1531,
1330,
1265,
198,
198,
834,
439,
834,
796,
14631,
12102,
62,
4491,
325,
1600,
366,
12501,
1098,
1600,... | 2.08953 | 659 |
import npyscreen
import pyperclip
import createVm
import main
import popup
import selectableGrid
import virtualMachine
| [
11748,
45941,
28349,
1361,
198,
11748,
12972,
525,
15036,
198,
198,
11748,
2251,
53,
76,
198,
11748,
1388,
198,
11748,
46207,
198,
11748,
2922,
540,
41339,
198,
11748,
7166,
37573,
198
] | 3.870968 | 31 |
import hashlib
import socket
import unittest
from io import BytesIO
from os import remove as rm
from os.path import exists
from time import sleep
import tests.test_helpers as h
if __name__ == '__main__':
unittest.main()
| [
11748,
12234,
8019,
198,
11748,
17802,
198,
11748,
555,
715,
395,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
28686,
1330,
4781,
355,
42721,
198,
6738,
28686,
13,
6978,
1330,
7160,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
5... | 3.150685 | 73 |
#!/usr/bin/env python2.7
import sys
import pymongo
import os
import click
import datetime
import rvo.utils as utils
from rvo import __version__
import rvo.config
command_folder = os.path.join(os.path.dirname(__file__), 'commands')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# rvo command class
# base help message
@click.command(cls=rvoCommands, context_settings=CONTEXT_SETTINGS,
help="""
Manage text data on commandline
\b
888,8, Y8b Y888P e88 88e
888 " Y8b Y8P d888 888b
888 Y8b " Y888 888P
888 Y8P "88 88"
For the sake of your own data being managed
by you and only you!
""")
@click.version_option(version=__version__, prog_name="rvo")
@click.pass_context
if __name__ == '__main__':
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
198,
11748,
25064,
198,
11748,
279,
4948,
25162,
198,
11748,
28686,
198,
11748,
3904,
198,
11748,
4818,
8079,
198,
11748,
374,
13038,
13,
26791,
355,
3384,
4487,
198,
6738,
... | 2.488294 | 299 |
# -*- coding: utf-8 -*-
"""
This module contains variables that can be changed, but are not exposed to non-expert users.
"""
import os
import multiprocessing
#==============================================================================
#==============================================================================
SCENARIOS = ['india_base']
SECTORS = ['res','com','ind']
SECTOR_NAMES = {'res':'Residential','com':'Commercial','ind':'Industrial'}
TECHS = [['solar']]
TECH_MODES = ['elec']
BA_COLUMN = 'state_id' #geo id column that data is available at such as control_reg_id, state_id, district_id etc.
#==============================================================================
# get the path of the current file
#==============================================================================
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
#==============================================================================
# model start year
#==============================================================================
START_YEAR = 2016
#==============================================================================
# local cores
#==============================================================================
LOCAL_CORES = int(multiprocessing.cpu_count() / 2)
#==============================================================================
# silence some output
#==============================================================================
VERBOSE = False
#==============================================================================
# run a smaller agent_df for debugging
#==============================================================================
SAMPLE_PCT = 1
#==============================================================================
# Runtime Tests
#==============================================================================
NULL_COLUMN_EXCEPTIONS = ['state_incentives', 'pct_state_incentives', 'batt_dispatch_profile', 'export_tariff_results','carbon_price_cents_per_kwh']
# 'market_share_last_year', 'max_market_share_last_year', 'adopters_cum_last_year', 'market_value_last_year', 'initial_number_of_adopters', 'initial_pv_kw', 'initial_market_share', 'initial_market_value', 'system_kw_cum_last_year', 'new_system_kw', 'batt_kw_cum_last_year', 'batt_kwh_cum_last_year',
CHANGED_DTYPES_EXCEPTIONS = []
MISSING_COLUMN_EXCEPTIONS = [] | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
8265,
4909,
9633,
326,
460,
307,
3421,
11,
475,
389,
407,
7362,
284,
1729,
12,
1069,
11766,
2985,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
18540,... | 4.16323 | 582 |
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
STATE_CHOICES = ((settings.NC_KEY, "North Carolina"),)
STATUS_CHOICES = (
("running", "Running"),
("error", "Error"),
("finished", "Finished"),
)
GEOGRAPHY_CHOICES = (
("county", "County"),
("place", "Place"),
)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
5436,
11395,
47139,
1352,
11,
1855,
11395,
47139,
1352,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
44724,
62,
44899,
34444,... | 2.791045 | 134 |
from __future__ import absolute_import
import cStringIO
import types
__name__ = 'pyrehol'
__author__ = 'James Brown <jbrown@uber.com>'
version_info = (0, 3)
__version__ = '.'.join(map(str, version_info))
INDENT_CHAR = ' '
PREDEFINED_SERVICES = frozenset([
'any', 'anystateless', 'all',
'AH', 'apcupsd', 'apcupsdnis', 'aptproxy', 'asterisk', 'cups',
'cvspserver', 'darkstat', 'daytime', 'dcc', 'dcpp', 'dhcprelay', 'dict',
'distcc', 'dns', 'echo', 'eserver', 'ESP', 'finger', 'gift', 'giftui',
'gkrellmd', 'GRE', 'h323', 'heartbeat', 'http', 'https', 'iax', 'iax2',
'icmp', 'ICMP', 'icp', 'ident', 'imap', 'imaps', 'irc', 'isakmp',
'jabber', 'jabberd', 'ldap', 'ldaps', 'lpd', 'mms', 'msn', 'msnp',
'mysql', 'netbackup', 'nfs', 'nntp', 'nntps', 'ntp', 'nut', 'nxserver', 'openvpn',
'oracle', 'OSPF', 'pop3', 'pop3s', 'portmap', 'postgres', 'privoxy',
'radius', 'radiusold', 'radiusoldproxy', 'radiusproxy', 'rdp', 'rndc',
'rsync', 'rtp', 'sip', 'smtp', 'smtps', 'snmp', 'snmptrap', 'socks',
'squid', 'ssh', 'stun', 'submission', 'sunrpc', 'swat', 'syslog', 'telnet',
'time', 'upnp', 'uucp', 'vmware', 'vmwareauth', 'vmwareweb', 'vnc',
'webcache', 'webmin', 'whois', 'xdmcp',
])
class Pyrehol(object):
"""Top-level wrapper for a Firehol config"""
def emit(self, out_fo=None):
"""Write out to a file descriptor. If one isn't passed, prints to standard out.
:param out_fo: A file-like object or None
"""
print_it = False
if out_fo is None:
out_fo = cStringIO.StringIO()
print_it = True
out_fo.write('version %d\n\n' % self.version)
if self.leader_lines:
out_fo.write('\n'.join(self.leader_lines))
out_fo.write('\n\n')
for thing in sorted(self.service_defines.values()):
thing.emit(out_fo)
out_fo.write('\n')
for thing in self.contents:
thing.emit(out_fo)
out_fo.write('\n')
if self.trailer_lines:
out_fo.write('\n'.join(self.trailer_lines))
out_fo.write('\n\n')
if print_it:
print out_fo.getvalue()
def define_service(self, service_name, server_portspec,
client_portspec='default'):
"""Add a new service to Firehol (for use in server/client blocks later).
:param service_name: Name for the service, suitable for use as a bash variable name
:param server_portspec: Port specification for the server side (example: "tcp/80 tcp/443")
:param client_portspec: Port specification for the client side (example: "any")
"""
new_define = _PyreholService(
service_name, server_portspec, client_portspec, root=self
)
if service_name in self.services:
assert new_define == self.service_defines[service_name],\
'%s != %s' % (new_define, self.service_defines[service_name])
else:
self.service_defines[service_name] = new_define
self.services.add(service_name)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
269,
10100,
9399,
198,
11748,
3858,
198,
198,
834,
3672,
834,
796,
705,
9078,
260,
3937,
6,
198,
834,
9800,
834,
796,
705,
14731,
4373,
1279,
73,
33282,
31,
18478,
13,
... | 2.185524 | 1,423 |
# Tests (scarce) for win32print module
import os
import unittest
import win32print as wprn
if __name__ == "__main__":
unittest.main()
| [
2,
30307,
357,
13034,
344,
8,
329,
1592,
2624,
4798,
8265,
198,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
11748,
1592,
2624,
4798,
355,
266,
1050,
77,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1... | 2.648148 | 54 |
#!/usr/bin/env python3
"""
Create dataset and experiments.
A dataset is a directory with subdirectories, one subdir per class.
An experiment is a directory subdirectories, one subdir per participant.
"""
import os
from os.path import join as pjoin
from os import listdir as ld
import numpy as np
import shutil
import sys
from PIL import Image
import numpy as np
import math
from torchvision import transforms
from ..helper import human_categories as hc
from .. import constants as consts
def resize_crop_image(input_file,
resize_size,
crop_size):
"""Replace input_file with resized and cropped version (png)."""
img = Image.open(input_file)
t = transforms.Compose([transforms.Resize(resize_size),
transforms.CenterCrop(crop_size)])
new_img = t(img)
os.remove(input_file)
new_img.save(input_file.replace(".JPEG", ".png"), 'png')
def create_experiment(expt_name,
expt_abbreviation,
expt_source_dir,
expt_target_dir,
only_dnn=True,
num_subjects=1,
rng=None):
"""Create human / CNN experiment.
parameters:
- only_dnn: boolean indicating whether this is a DNN experiment
or not (if not, a human experiment will be created.)
"""
if not only_dnn:
assert rng is not None, "Please specify random number generator (rng)!"
assert("_" not in expt_name), "no '_' in experiment name!"
assert(os.path.exists(expt_source_dir)), "directory "+expt_source_dir+" does not exist."
for i in range(0, num_subjects+1):
if i==0:
subject_abbreviation = "dnn"
subject_name="dnn"
else:
subject_abbreviation = "s"+get_leading_zeros(i, 2)
subject_name = "subject-"+get_leading_zeros(i, 2)
print("Creating experiment for subject: '"+subject_name+"'")
target_dir = pjoin(expt_target_dir, expt_name,
subject_name, "session-1")
if os.path.exists(target_dir):
print("Error: target directory "+target_dir+" does already exist.")
sys.exit(1)
else:
os.makedirs(target_dir)
img_list = []
for c in sorted(hc.get_human_object_recognition_categories()):
for x in sorted(ld(pjoin(expt_source_dir, c))):
input_file = pjoin(expt_source_dir, c, x)
img_list.append(input_file)
order = np.arange(len(img_list))
if i != 0:
rng.shuffle(order)
for i, img_index in enumerate(order):
input_file = img_list[img_index]
imgname = input_file.split("/")[-1]
correct_category = input_file.split("/")[-2]
condition = "0"
target_image_path = pjoin(target_dir,
(get_leading_zeros(i+1)+"_"+
expt_abbreviation+"_"+
subject_abbreviation+"_"+
condition+"_"+
correct_category+"_"+
"00_"+
imgname))
shutil.copyfile(input_file, target_image_path)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
16447,
27039,
290,
10256,
13,
198,
32,
27039,
318,
257,
8619,
351,
850,
12942,
1749,
11,
530,
850,
15908,
583,
1398,
13,
198,
2025,
6306,
318,
257,
8619,
850,
12... | 1.95807 | 1,741 |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017-2018 Roxanne Gibson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import asyncio
import discord
import datetime
import youtube_dl
from math import ceil
from discord.ext import commands
import roxbot
from roxbot import guild_settings
def _clear_cache():
"""Clears the cache folder for the music bot. Ignores the ".gitignore" file to avoid deleting versioned files."""
for file in os.listdir("roxbot/cache"):
if file != ".gitignore":
os.remove("roxbot/cache/{}".format(file))
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': './roxbot/cache/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
}
ffmpeg_options = {
'before_options': '-nostdin',
'options': '-vn -loglevel panic --force-ipv4'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class ModifiedFFmpegPMCAudio(discord.FFmpegPCMAudio):
"""Modifies the read function of FFmpegPCMAudio to add a timer.
Thanks to eliza(nearlynon#3292) for teaching me how to do this"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
2177,
12,
7908,
34821,
21952,
20400,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
... | 3.165289 | 726 |
"""
Slack Bot Untrack Command
"""
import logging
from ebr_trackerbot.bot import register_command, get_storage
def untrack_command(text, result, payload, config, commands):
"""
Slack Bot Untrack Command
"""
logging.debug("Untrack command")
test = result.group(1)
get_storage()["delete_for_user"](payload["data"]["user"], test)
payload["web_client"].chat_postMessage(
channel=payload["data"]["channel"],
text="Tracking was stopped for test *" + test + "*",
thread_ts=payload["data"]["ts"],
)
register_command(
"untrack", "Stops test tracking. Command syntax: untrack full_testname", "^untrack ([^ ]+)$", untrack_command
)
logging.info("Untrack command registered")
| [
37811,
198,
11122,
441,
18579,
26970,
39638,
9455,
198,
37811,
198,
11748,
18931,
198,
6738,
304,
1671,
62,
2213,
10735,
13645,
13,
13645,
1330,
7881,
62,
21812,
11,
651,
62,
35350,
628,
198,
4299,
1418,
39638,
62,
21812,
7,
5239,
11,
... | 2.782443 | 262 |
# EGM skimmer
# Author: Rafael Lopes de Sa
import FWCore.ParameterSet.Config as cms
# Run with the 2017 detector
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('SKIM',Run2_2017)
# Import the standard packages for reconstruction and digitization
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('RecoEgamma.EgammaMCTools.pfClusterMatchedToPhotonsSelector_cfi')
# Global Tag configuration ... just using the same as in the RelVal
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '81X_upgrade2017_realistic_v26', '')
process.MessageLogger.cerr.threshold = 'ERROR'
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.options = cms.untracked.PSet( allowUnscheduled = cms.untracked.bool(True) )
# This is where users have some control.
# Define which collections to save and which dataformat we are using
savedCollections = cms.untracked.vstring('drop *',
# The commented ones are large collections that can be kept for debug
# 'keep EcalRecHitsSorted_*_*_*',
# 'keep recoPFClusters_*_*_*',
# 'keep recoCaloClusters_*_*_*',
# 'keep recoSuperClusters_*_*_*',
# 'keep recoGsfElectron*_*_*_*',
# 'keep recoPhoton*_*_*_*',
# 'keep *_mix_MergedTrackTruth_*',
'keep *_reducedEcalRecHits*_*_*',
'keep double_fixedGridRho*_*_*',
'keep recoGenParticles_*_*_*',
'keep GenEventInfoProduct_*_*_*',
'keep PileupSummaryInfos_*_*_*',
'keep *_ecalDigis_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_particleFlowCluster*_*_*')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(15))
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/AODSIM/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/005AB6CE-27ED-E611-98CA-E0071B7A8590.root'
),
secondaryFileNames = cms.untracked.vstring(
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/0416D6B7-04ED-E611-B342-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/14829DD8-04ED-E611-8049-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/54AFE9C4-04ED-E611-952D-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/5A32C6B9-04ED-E611-B1EB-E0071B7A8550.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/60E162B8-04ED-E611-898D-E0071B7A58F0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/6A47DD1A-FEEC-E611-81EB-A0000420FE80.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/92B923B6-04ED-E611-9DC9-24BE05C48821.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/B40E77B4-04ED-E611-9E30-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/C48157B5-04ED-E611-BEC1-E0071B7A45D0.root',
'/store/mc/PhaseIFall16DR/GluGluHToGG_M-125_13TeV_powheg_pythia8/GEN-SIM-RAW/FlatPU28to62HcalNZSRAW_81X_upgrade2017_realistic_v26-v1/100000/CAED3A16-FEEC-E611-8262-24BE05CEFB41.root'
)
)
process.PFCLUSTERoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('skimEGMobjects_fromRAW.root'),
outputCommands = savedCollections,
splitLevel = cms.untracked.int32(0)
)
# Run the digitizer to make the trackingparticles
process.mix.digitizers = cms.PSet(process.theDigitizersValid)
process.trackingtruth_step = cms.Path(process.pdigi_valid)
# Remake the PFClusters
process.pfclusters_step = cms.Path(process.bunchSpacingProducer *
process.ecalDigis *
process.ecalPreshowerDigis *
process.ecalPreshowerRecHit *
process.ecalMultiFitUncalibRecHit *
process.ecalDetIdToBeRecovered *
process.ecalRecHit *
process.particleFlowRecHitPS *
process.particleFlowRecHitECAL *
process.particleFlowClusterECALUncorrected *
process.particleFlowClusterPS *
process.particleFlowClusterECAL)
# Select the PFClusters we want to calibrate
process.particleFlowClusterECALMatchedToPhotons = process.pfClusterMatchedToPhotonsSelector.clone()
process.selection_step = cms.Path(process.particleFlowClusterECALMatchedToPhotons)
# Ends job and writes our output
process.endjob_step = cms.EndPath(process.endOfProcess)
process.output_step = cms.EndPath(process.PFCLUSTERoutput)
# Schedule definition, rebuilding rechits
process.schedule = cms.Schedule(process.trackingtruth_step,process.pfclusters_step,process.selection_step,process.endjob_step,process.output_step)
| [
2,
412,
15548,
1341,
10957,
198,
2,
6434,
25,
31918,
406,
13920,
390,
10318,
198,
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
2,
5660,
351,
262,
2177,
31029,
198,
6738,
28373,
13,
36,
8847,
13,
36,... | 1.878007 | 4,074 |
"""
"""
import unittest
from runesanalyzer import data
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
| [
37811,
198,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
6738,
32326,
38200,
9107,
1330,
1366,
198,
198,
834,
9800,
834,
796,
14631,
2601,
2634,
434,
30837,
77,
959,
1279,
2375,
907,
979,
3007,
31,
64,
349,
13,
785,
29,
1600,
23... | 2.659091 | 44 |
"""
spec_uploader.py
A tool for uploading apigee specs
Usage:
spec_uploader.py <apigee_org> <specs_dir> -u <username> -p <password> [-t <apigee_token>]
spec_uploader.py (-h | --help)
Options:
-h --help Show this screen
-u Which username to log in with
-p Password for login
-t Access Token from apigee
"""
import os
from docopt import docopt
from apigee_client import ApigeeClient
ENV_NAMES = {
'nhsd-prod': ['sandbox', 'dev', 'int', 'prod'],
'nhsd-nonprod': ['internal-dev', 'internal-qa-sandbox', 'internal-qa', 'ref']
}
FRIENDLY_ENV_NAMES = {
'prod': '(Production)',
'int': '(Integration Testing)',
'dev': '(Development)',
'ref': '(Reference)',
'internal-qa': '(Internal QA)',
'internal-dev': '(Internal Development)'
}
FRIENDLY_API_NAMES = {
'personal-demographics': 'Personal Demographics Service API'
}
if __name__ == "__main__":
args = docopt(__doc__)
client = ApigeeClient(args['<apigee_org>'], args['<username>'], args['<password>'], args['<apigee_token>'])
upload_specs(ENV_NAMES[args['<apigee_org>']], args['<specs_dir>'], client)
| [
37811,
198,
16684,
62,
25850,
263,
13,
9078,
198,
198,
32,
2891,
329,
33794,
2471,
328,
1453,
25274,
198,
198,
28350,
25,
198,
220,
1020,
62,
25850,
263,
13,
9078,
1279,
499,
328,
1453,
62,
2398,
29,
1279,
4125,
6359,
62,
15908,
29,... | 2.411017 | 472 |
from .simple import (
SimpleNER,
SimpleMultiLabel,
SimpleClassification,
)
from .simple_t5 import SimpleT5
| [
6738,
764,
36439,
1330,
357,
198,
220,
220,
220,
17427,
21479,
11,
198,
220,
220,
220,
17427,
29800,
33986,
11,
198,
220,
220,
220,
17427,
9487,
2649,
11,
198,
8,
198,
6738,
764,
36439,
62,
83,
20,
1330,
17427,
51,
20,
198
] | 2.833333 | 42 |
import argparse
from allennlp.common.params import Params
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.models.archival import load_archive
from summarus.readers import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--archive-file', type=str, required=True)
parser.add_argument('--input-file', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
target_to_lines(**vars(args))
| [
11748,
1822,
29572,
198,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
37266,
1330,
2547,
4105,
198,
6738,
477,
1697,
34431,
13,
7890,
13,
19608,
292,
316,
62,
961,
364,
13,
19608,
292,
316,
62,
46862,
1330,
16092,
292,
316,
33634,
198,... | 2.878307 | 189 |
__author__ = 'sabe6191'
import json
import datetime
from tempest.common import rest_client
| [
834,
9800,
834,
796,
705,
82,
11231,
21,
26492,
6,
198,
198,
11748,
33918,
198,
11748,
4818,
8079,
198,
198,
6738,
20218,
395,
13,
11321,
1330,
1334,
62,
16366,
198
] | 3.1 | 30 |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
Date: 2019/11/27
Author: Xiao-Le Deng
Email: xiaoledeng at gmail.com
Function: remove duplicates in a given list
"""
# List1 = [1,1,1]
# List2 = ["John","John","John","Mark","David","David","Shalom","Shalom","Shalom"]
# print(list_remove_duplicate(List1))
# print(list_remove_duplicate(List2)) | [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
220,
198,
37811,
198,
10430,
25,
13130,
14,
1157,
14,
1983,
198,
13838,
25,
28249,
12,
3123,
41985,
198,
1533... | 2.48227 | 141 |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import healthservices,neighbourhood
import datetime as dt
# Create your tests here.
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
764,
27530,
1330,
1535,
30416,
11,
710,
394,
6084,
2894,
198,
198,
11748,
4818,
8079,
355,
288,
83,
... | 3.591837 | 49 |
#!/usr/bin/env python
from codecs import open
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import re
main_py = open('morfessor/__init__.py', encoding='utf-8').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
requires = [
# 'progressbar',
]
setup(name='Morfessor',
version=metadata['version'],
author=metadata['author'],
author_email='morpho@aalto.fi',
url='http://morpho.aalto.fi',
description='Morfessor',
packages=['morfessor', 'morfessor.test'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
license="BSD",
scripts=['scripts/morfessor',
'scripts/morfessor-train',
'scripts/morfessor-segment',
'scripts/morfessor-evaluate',
],
install_requires=requires,
extras_require={
'docs': [l.strip() for l in open('docs/build_requirements.txt')]
}
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
40481,
82,
1330,
1280,
198,
6738,
304,
89,
62,
40406,
1330,
779,
62,
2617,
37623,
10141,
198,
1904,
62,
2617,
37623,
10141,
3419,
198,
198,
6738,
900,
37623,
10141,
1330,
90... | 2.264815 | 540 |
# -*- coding: utf-8 -*-
from yapconf.docs import build_markdown_table
# flake8: noqa
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
331,
499,
10414,
13,
31628,
1330,
1382,
62,
4102,
2902,
62,
11487,
628,
198,
2,
781,
539,
23,
25,
645,
20402,
628
] | 2.342105 | 38 |
# --depends-on channel_access
# --depends-on check_mode
# --depends-on commands
# --depends-on permissions
import enum
from src import ModuleManager, utils
| [
2,
1377,
10378,
2412,
12,
261,
6518,
62,
15526,
198,
2,
1377,
10378,
2412,
12,
261,
2198,
62,
14171,
198,
2,
1377,
10378,
2412,
12,
261,
9729,
198,
2,
1377,
10378,
2412,
12,
261,
21627,
198,
198,
11748,
33829,
198,
198,
6738,
12351,... | 3.09434 | 53 |
import binascii
import csv
import gzip
import io
import sys
from sqlalchemy import MetaData, Table
from pytest_mock_resources.compat import boto3
def _parse_s3_command(statement):
"""Format, Parse and call patched 'COPY' command."""
statement = strip(statement)
params = dict()
# deleting copy
tokens = statement.split()[1:]
# Fetching table name
params["schema_name"], params["table_name"] = _split_table_name(tokens.pop(0))
# Checking for columns
if tokens[0][0] == "(":
ending_index = 0
for index, arg in enumerate(tokens):
if arg.endswith(")"):
ending_index = index
break
ending_index += 1
columns = tokens[0:ending_index]
columns[0] = columns[0].replace("(", "")
columns[-1] = columns[-1].replace(")", "")
columns = [x.replace(",", "") for x in columns]
columns = [x for x in columns if x != ""]
tokens = tokens[ending_index:]
params["columns"] = columns
# Fetching s3_uri
if tokens.pop(0).lower() != "from":
raise ValueError(
(
"Possibly malformed S3 URI Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following syntax: "
"COPY <table_name> FROM [(column 1, [column2, [..]])] '<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>'"
).format(statement=statement)
)
params["s3_uri"] = strip(tokens.pop(0))
# Fetching credentials
for token in tokens:
if "aws_access_key_id" in token.lower() or "aws_secret_access_key" in token.lower():
# This is because of the following possibiliteis:
# ... [with ]credentials[ AS] 'aws_access_key_id=x;aws_secret_access_key=y'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;aws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y;\naws_access_key_id=x'
# OR
# ... [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# Supportred AWS credentials format:
# [with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'
# No Support for additional credential formats, eg IAM roles, etc, yet.
credentials_list = token.split(";")
for credentials in credentials_list:
if "aws_access_key_id" in credentials:
params["aws_access_key_id"] = credentials.split("=")[-1]
elif "aws_secret_access_key" in credentials:
params["aws_secret_access_key"] = credentials.split("=")[-1]
else:
raise ValueError(
(
"Possibly malformed AWS Credentials Format. "
"Statement = {statement}"
"Redshift fixture only supports S3 Copy statments with the following "
"syntax: COPY <table_name> FROM [(column 1, [column2, [..]])] '"
"<file path on S3 bucket>' "
"credentials 'aws_access_key_id=<aws_access_key_id>;"
"aws_secret_access_key=<aws_secret_access_key>' "
"Supportred AWS credentials format: "
"[with ]credentials[ AS] 'aws_secret_access_key=y; aws_access_key_id=x'"
" No Support for additional credential formats, eg IAM roles, etc, yet."
).format(statement=statement)
)
return params
def _split_table_name(table_name):
"""Split 'schema_name.table_name' to (schema_name, table_name)."""
table_name_items = table_name.split(".")
if len(table_name_items) == 1:
schema_name = None
elif len(table_name_items) == 2:
schema_name, table_name = table_name_items
else:
raise ValueError("Cannot determine schema/table name from input {}".format(table_name))
return schema_name, table_name
def _mock_s3_copy(
table_name, s3_uri, schema_name, aws_secret_access_key, aws_access_key_id, columns, engine
):
"""Execute patched 'copy' command."""
s3 = boto3.client(
"s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key
)
ending_index = len(s3_uri)
path_to_file = s3_uri[5:ending_index]
bucket, key = path_to_file.split("/", 1)
response = s3.get_object(Bucket=bucket, Key=key)
# the following lins of code is used to check if the file is gzipped or not.
# To do so we use magic numbers.
# A mgic number is a constant numerical or text value used to identify a file format or protocol
# The magic number for gzip compressed files is 1f 8b.
is_gzipped = binascii.hexlify(response["Body"].read(2)) == b"1f8b"
response = s3.get_object(Bucket=bucket, Key=key)
data = read_data_csv(response["Body"].read(), is_gzipped, columns)
meta = MetaData()
table = Table(table_name, meta, autoload=True, schema=schema_name, autoload_with=engine)
engine.execute(table.insert(data))
def strip(input_string):
"""Strip trailing whitespace, single/double quotes."""
return input_string.strip().rstrip(";").strip('"').strip("'")
| [
11748,
9874,
292,
979,
72,
198,
11748,
269,
21370,
198,
11748,
308,
13344,
198,
11748,
33245,
198,
11748,
25064,
198,
198,
6738,
44161,
282,
26599,
1330,
30277,
6601,
11,
8655,
198,
198,
6738,
12972,
9288,
62,
76,
735,
62,
37540,
13,
... | 2.170991 | 2,544 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from iotools.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'sessions.login'
login_manager.login_message_category = 'info'
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
38235,
1330,
23093,
13511,
198,
6738,
42903,
62,
15630,
6012,
1330,
347,
29609,
198,
6738,
1312,
313,
10141,
13,
11250,
133... | 3.478723 | 94 |