content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field, validator
from bson.objectid import ObjectId
def generate_objectid():
"""Returns ObjectId as str."""
return str(ObjectId())
class TaskId(BaseModel):
"""
Id of a Task.
Despites the TaskId value represents an ObjectId,
we will threat it in our domain as an string.
It will be parsed to ObjectId on the repos.
We will check the supplied string is valid.
"""
value:str = Field(default_factory=generate_objectid)
@validator('value')
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
7663,
11,
4938,
1352,
198,
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
628,
198,
4299,
7716,
62,
15252,
312,
3... | 3.081522 | 184 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def galton_families(path):
"""Galton's data on the heights of parents and their children, by child
This data set lists the individual observations for 934 children in 205
families on which Galton (1886) based his cross-tabulation.
In addition to the question of the relation between heights of parents
and their offspring, for which this data is mainly famous, Galton had
another purpose which the data in this form allows to address: Does
marriage selection indicate a relationship between the heights of
husbands and wives, a topic he called *assortative mating*? Keen [p.
297-298](2010) provides a brief discussion of this topic.
A data frame with 934 observations on the following 8 variables.
`family`
family ID, a factor with levels `001`-`204`
`father`
height of father
`mother`
height of mother
`midparentHeight`
mid-parent height, calculated as `(father + 1.08*mother)/2`
`children`
number of children in this family
`childNum`
number of this child within family. Children are listed in
decreasing order of height for boys followed by girls
`gender`
child gender, a factor with levels `female` `male`
`childHeight`
height of child
Galton's notebook,
http://www.medicine.mcgill.ca/epidemiology/hanley/galton/notebook/,
transcribed by Beverley Shipley in 2001.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `galton_families.csv`.
Returns:
Tuple of np.ndarray `x_train` with 934 rows and 8 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'galton_families.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/GaltonFamilies.csv'
maybe_download_and_extract(path, url,
save_file_name='galton_families.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
269,
21370,
... | 2.975118 | 844 |
from osbot_aws.apis.Lambda import Lambda
from pbx_gs_python_utils.utils.Dev import Dev
from pbx_gs_python_utils.utils.Lambdas_Helpers import slack_message
from pbx_gs_python_utils.utils.Misc import Misc
from osbot_jupyter.api.CodeBuild_Jupyter_Helper import CodeBuild_Jupyter_Helper
from osbot_jupyter.api.Live_Notebook import Live_Notebook
| [
6738,
28686,
13645,
62,
8356,
13,
499,
271,
13,
43,
4131,
6814,
1330,
21114,
6814,
198,
6738,
279,
65,
87,
62,
14542,
62,
29412,
62,
26791,
13,
26791,
13,
13603,
1330,
6245,
198,
6738,
279,
65,
87,
62,
14542,
62,
29412,
62,
26791,
... | 2.658915 | 129 |
#!/usr/bin/env python3
import scrape_common as sc
print('BS')
# The list of articles is also available on https://www.gd.bs.ch/medienseite/medienmitteilungen.html
URL = sc.download("https://www.gd.bs.ch/")
URL = sc.filter(r'Tagesbulletin.*Corona', URL)
# 2020-03-25, List of sub-articles:
"""
<a href="/nm/2020-tagesbulletin-coronavirus-466-bestaetigte-faelle-im-kanton-basel-stadt-gd.html" target="_self">Tagesbulletin Coronavirus: 466 bestätigte Fälle im Kanton Basel-Stadt</a>
<a href="/nm/2020-tagesbulletin-coronavirus-414-bestaetigte-faelle-im-kanton-basel-stadt-gd.html" target="_self">Tagesbulletin Coronavirus: 414 bestätigte Fälle im Kanton Basel-Stadt</a>
<a href="/nm/2020-tagesbulletin-coronavirus-376-bestaetigte-faelle-im-kanton-basel-stadt-gd.html" target="_self">Tagesbulletin Coronavirus: 376 bestätigte Fälle im Kanton Basel-Stadt</a>
"""
URL = sc.filter(r'href', URL)
URL = URL.split('"')[1]
d = sc.download(f'https://www.gd.bs.ch/{URL}')
sc.timestamp()
d = d.replace('ä', 'ä')
d = d.replace('ö', 'ö')
d = d.replace(' ', ' ')
# 2020-03-25
"""
<p>Das Gesundheitsdepartement Basel-Stadt meldet mit Stand Mittwoch, 25. März 2020, 10 Uhr, insgesamt 466 positive Fälle von Personen mit Wohnsitz im Kanton Basel-Stadt sowie drei weitere Todesfälle. </p>
"""
# There are some extra (or repeated) information in the previous / next paragraphs:
# 2020-03-25
"""
<h1>Tagesbulletin Coronavirus: 466 bestätigte Fälle im Kanton Basel-Stadt</h1>
<div class="meta" role="contentinfo">
<ul>
<li class="date">25.03.2020 <span class="time">(11:15)</span></li>
...
<div class="lead">
<p>Das Gesundheitsdepartement Basel-Stadt meldet mit Stand Mittwoch, 25. März 2020, 10 Uhr, insgesamt 466 positive Fälle von Personen mit Wohnsitz im Kanton Basel-Stadt sowie drei weitere Todesfälle. </p>
</div>
<div class="text">
<p>Mit Stand Mittwoch, 25. März 2020, 10 Uhr, liegen insgesamt 466 positive Fälle von Personen mit Wohnsitz im Kanton Basel-Stadt vor. Dies sind 52 mehr als am Vortag. 128 Personen der 466 positiv Getesteten und somit über ein Viertel sind wieder genesen. 58 erkrankte Baslerinnen und Basler sind aktuell aufgrund einer Infektion mit Covid-19 (Coronavirus) hospitalisiert.</p>
<p>Im Kanton Basel-Stadt werden nebst den Tests der Kantonsbewohnerinnen und -bewohner auch Tests von Verdachtsfällen aus anderen Schweizer Kantonen und dem grenznahen Ausland durchgeführt. Bisher sind die Tests von 773 Personen positiv ausgefallen (inklusive der 466 Basler Fälle).</p>
"""
# 2020-04-01
"""
<div class="lead">
<p>Das Gesundheitsdepartement Basel-Stadt meldet mit Stand Mittwoch, 1. April 2020, 10 Uhr, 691 positive Fälle von Personen mit Wohnsitz im Kanton Basel-Stadt und zwei weitere Todesfälle. Aufgrund einer Labornachmeldung muss die Zahl der positiven Fälle einmalig nach oben korrigiert werden.</p>
</div>
<div class="text">
<p>Mit Stand Mittwoch, 1. April 2020, 10 Uhr, liegen insgesamt 691 positive Fälle von Personen mit Wohnsitz im Kanton Basel-Stadt vor. 323 Personen der 691 positiv Getesteten und damit über 45 Prozent sind wieder genesen.</p>
"""
# Use non-greedy matching.
print('Date and time:', sc.find(r'Stand\s*[A-Za-z]*,?\s*(.+?),\s*(?:liegen\s*)?insgesamt', d))
print('Confirmed cases:', sc.find(r'(?:insgesamt\s*)?([0-9]+)\s*positive', d))
print('Recovered:', sc.find(r'([0-9]+) Personen der [0-9]+ positiv Getesteten .+ sind wieder genesen', d))
print('Hospitalized:', sc.find(r'Aktuell befinden sich ([0-9]+) Einwohnerinnen und Einwohner des Kantons Basel-Stadt aufgrund einer Covid-19-Infektion in Spitalpflege', d))
print('ICU:', sc.find(r'Insgesamt ([0-9]+) Personen benötigen Intensivpflege', d))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
42778,
62,
11321,
355,
629,
198,
198,
4798,
10786,
4462,
11537,
198,
2,
383,
1351,
286,
6685,
318,
635,
1695,
319,
3740,
1378,
2503,
13,
21287,
13,
1443,
13,
354,
14,... | 2.178282 | 1,851 |
import numpy as np
import math
import cv2
import matplotlib.pyplot as plt
import time
import numpy.ma as ma
import gym
from gym import spaces
from gym.envs.toy_text import discrete
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.cmd_util import make_vec_env
from pathlib import Path
import os, sys, inspect
import random
def load_image_mask(path, img_path, msk_path):
""" Load image and mask (in final prototype will be received from previous step in pipeline)
Parameters:
path: relative path to folder with image and mask files
img_path: image file name (with extension)
msk_path: mask file name (with extension)
Returns:
image: image loaded
mask: mask loaded
"""
image = cv2.cvtColor(cv2.imread(os.path.join(path, img_path)), cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(path, msk_path))
thrshd = 100 ### to delete artifacts
mask[mask > thrshd] = 255
mask[mask <= thrshd] = 0
return image, mask
def analyze_image(image, mask):
""" Given image and mask calc the "projection alignment" as std over mean in our ROI
Parameters:
image: image (int n*m matrix). grayscale
mask: image (int n*m matrix). grayscale
Returns:
align: metric for alignment
"""
vett = np.array(np.nonzero(mask))
roi = [vett[0].min(), vett[0].max(), vett[1].min(), vett[1].max()]
target_cut = mask[roi[0]:roi[1], roi[2]:roi[3]]
mx = ma.masked_array(image[roi[0]:roi[1], roi[2]:roi[3]], [target_cut == 0])
align = np.round(mx.std(ddof=1) / mx.mean(), 4)
return align
def mask_red(mask):
""" Given mask generate a red mask over white background
Parameters:
mask: image (int n*m matrix).
Returns:
red_mask: mask with red colored veins
"""
vett = np.array(np.nonzero(mask))
roi = [vett[0].min(), vett[0].max(), vett[1].min(), vett[1].max()]
cut = mask[roi[0]:roi[1], roi[2]:roi[3]]
red_mask = np.zeros((cut.shape[0], cut.shape[1], 3), dtype=np.uint8)
red_mask.fill(255)
if (cut[:, :].shape[2] == 1):
red_mask[..., 1] -= cut[:, :]
red_mask[..., 0] -= cut[:, :]
else:
red_mask[..., 1] -= cut[:, :, 1]
red_mask[..., 0] -= cut[:, :, 0]
return red_mask
def rotate_scale(image, angle=0, scale=1):
""" rotate and scale an image, for rotation add also white background
Parameters:
image: image (int n*m matrix).
angle: angle of rotation in degrees
scale: scaling value
Returns:
ret: image rotated and or scaled
"""
# grab the dimensions of the image and then determine the
# center
img = image.copy()
(h, w) = img.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix then grab the sine and cosine
M = cv2.getRotationMatrix2D((cX, cY), angle, scale)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(img, M, (nW, nH), borderValue=(255, 255, 255))
def sim_projection(image, mask, rows, cols, angle, scale=1.0):
""" merge the mask with the image, (simulation of projector use)
Parameters:
image: image (int n*m*c matrix).
mask: image (int n*m matrix).
rows: int row position (inside image) where to merge the mask
cols: int column position (inside image) where to merge the mask
angle: angle of rotation in degrees
scale: scaling value
Returns:
merge: image, (int n*m*c) image with mask transformed and merged onto
"""
merge = image.copy()
rows = int(rows)
cols = int(cols)
# rotation
rotated = rotate_scale(mask, angle, scale)
### for recalculation of vertices of bounding box
center_r = int((rotated.shape[0] - mask.shape[0]) / 2)
center_c = int((rotated.shape[1] - mask.shape[1]) / 2)
# coordinates where to position the transformed mask
prj_crds = [rows - center_r, rows - center_r + rotated.shape[0], cols - center_c, cols - center_c + rotated.shape[1]]
# merge the 2 images
img_overlap = cv2.addWeighted(merge[prj_crds[0]:prj_crds[1], prj_crds[2]:prj_crds[3]], 0.8, rotated, 0.5, 0)
merge[prj_crds[0]:prj_crds[1], prj_crds[2]:prj_crds[3]] = img_overlap
# return
return merge
## Complete Version (translation, rotation and scaling)
class ProjectionEnv(gym.Env):
"""
Custom Environment that follows gym interface.
"""
metadata = {'render.modes': ['console', 'rgb_array']}
# Define constants for clearer code ### for 1 pixel
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
CLOCK = 4 ### for clock rotation
COUNT = 5 ### for counterclock rotation
INCR = 6
DECR = 7
nA = 8 ### number of actions
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
640,
198,
11748,
299,
32152,
13,
2611,
355,
17266,
198,
198,
11748,
11550,
198,
6738,
... | 2.468034 | 2,096 |
import typing as tp
import numpy as np
from nevergrad.common import testing
from . import game
@testing.parametrized(**{name: (name,) for name in game._Game().get_list_of_games()})
| [
11748,
19720,
355,
256,
79,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1239,
9744,
13,
11321,
1330,
4856,
198,
6738,
764,
1330,
983,
628,
198,
31,
33407,
13,
17143,
316,
380,
8863,
7,
1174,
90,
3672,
25,
357,
3672,
35751,
329,
1... | 3.155172 | 58 |
#!/usr/bin/env python
""" pg.examples.testsprite
Like the testsprite.c that comes with libsdl, this pygame version shows
lots of sprites moving around.
It is an abomination of ugly code, and mostly used for testing.
See pg.examples.aliens for some prettyier code.
"""
import sys
import os
from random import randint
from time import time
import pygame as pg
from pygame.compat import xrange_
if "-psyco" in sys.argv:
try:
import psyco
psyco.full()
except Exception:
print("No psyco for you! psyco failed to import and run.")
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, "data")
# use this to use update rects or not.
# If the screen is mostly full, then update rects are not useful.
update_rects = True
if "-update_rects" in sys.argv:
update_rects = True
if "-noupdate_rects" in sys.argv:
update_rects = False
use_static = False
if "-static" in sys.argv:
use_static = True
use_layered_dirty = False
if "-layered_dirty" in sys.argv:
update_rects = True
use_layered_dirty = True
flags = 0
if "-flip" in sys.argv:
flags ^= pg.DOUBLEBUF
if "-fullscreen" in sys.argv:
flags ^= pg.FULLSCREEN
if "-sw" in sys.argv:
flags ^= pg.SWSURFACE
use_rle = True
if "-hw" in sys.argv:
flags ^= pg.HWSURFACE
use_rle = False
if "-scaled" in sys.argv:
flags ^= pg.SCALED
screen_dims = [640, 480]
if "-height" in sys.argv:
i = sys.argv.index("-height")
screen_dims[1] = int(sys.argv[i + 1])
if "-width" in sys.argv:
i = sys.argv.index("-width")
screen_dims[0] = int(sys.argv[i + 1])
if "-alpha" in sys.argv:
use_alpha = True
else:
use_alpha = False
print(screen_dims)
##class Thingy(pg.sprite.Sprite):
## images = None
## def __init__(self):
## pg.sprite.Sprite.__init__(self)
## self.image = Thingy.images[0]
## self.rect = self.image.get_rect()
## self.rect.x = randint(0, screen_dims[0])
## self.rect.y = randint(0, screen_dims[1])
## #self.vel = [randint(-10, 10), randint(-10, 10)]
## self.vel = [randint(-1, 1), randint(-1, 1)]
##
## def move(self):
## for i in [0, 1]:
## nv = self.rect[i] + self.vel[i]
## if nv >= screen_dims[i] or nv < 0:
## self.vel[i] = -self.vel[i]
## nv = self.rect[i] + self.vel[i]
## self.rect[i] = nv
def main(
update_rects=True,
use_static=False,
use_layered_dirty=False,
screen_dims=[640, 480],
use_alpha=False,
flags=0,
):
"""Show lots of sprites moving around
Optional keyword arguments:
update_rects - use the RenderUpdate sprite group class (default True)
use_static - include non-moving images (default False)
use_layered_dirty - Use the FastRenderGroup sprite group (default False)
screen_dims - Pygame window dimensions (default [640, 480])
use_alpha - use alpha blending (default False)
flags - additional display mode flags (default no additional flags)
"""
if use_layered_dirty:
update_rects = True
# pg.init()
pg.display.init()
# if "-fast" in sys.argv:
screen = pg.display.set_mode(screen_dims, flags, vsync="-vsync" in sys.argv)
# this is mainly for GP2X, so it can quit.
pg.joystick.init()
num_joysticks = pg.joystick.get_count()
if num_joysticks > 0:
stick = pg.joystick.Joystick(0)
stick.init() # now we will receive events for the joystick
screen.fill([0, 0, 0])
pg.display.flip()
sprite_surface = pg.image.load(os.path.join(data_dir, "asprite.bmp"))
sprite_surface2 = pg.image.load(os.path.join(data_dir, "static.png"))
if use_rle:
sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY | pg.RLEACCEL)
sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY | pg.RLEACCEL)
else:
sprite_surface.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY)
sprite_surface2.set_colorkey([0xFF, 0xFF, 0xFF], pg.SRCCOLORKEY)
if use_alpha:
sprite_surface = sprite_surface.convert_alpha()
sprite_surface2 = sprite_surface2.convert_alpha()
else:
sprite_surface = sprite_surface.convert()
sprite_surface2 = sprite_surface2.convert()
Thingy.images = [sprite_surface]
if use_static:
Static.images = [sprite_surface2]
if len(sys.argv) > 1:
try:
numsprites = int(sys.argv[-1])
except Exception:
numsprites = 100
else:
numsprites = 100
sprites = None
if use_layered_dirty:
## sprites = pg.sprite.FastRenderGroup()
sprites = pg.sprite.LayeredDirty()
else:
if update_rects:
sprites = pg.sprite.RenderUpdates()
else:
sprites = pg.sprite.Group()
for i in xrange_(0, numsprites):
if use_static and i % 2 == 0:
sprites.add(Static())
sprites.add(Thingy())
frames = 0
start = time()
background = pg.Surface(screen.get_size())
background = background.convert()
background.fill([0, 0, 0])
going = True
while going:
if not update_rects:
screen.fill([0, 0, 0])
## for sprite in sprites:
## sprite.move()
if update_rects:
sprites.clear(screen, background)
sprites.update()
rects = sprites.draw(screen)
if update_rects:
pg.display.update(rects)
else:
pg.display.flip()
for event in pg.event.get():
if event.type in [pg.QUIT, pg.KEYDOWN, pg.QUIT, pg.JOYBUTTONDOWN]:
going = False
frames += 1
end = time()
print("FPS: %f" % (frames / ((end - start))))
pg.quit()
if __name__ == "__main__":
main(update_rects, use_static, use_layered_dirty, screen_dims, use_alpha, flags)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
23241,
13,
1069,
12629,
13,
41989,
1050,
578,
198,
198,
7594,
262,
5254,
1050,
578,
13,
66,
326,
2058,
351,
9195,
21282,
75,
11,
428,
12972,
6057,
2196,
2523,
198,
75,
1747,
2... | 2.226401 | 2,659 |
import pkgutil
import sys
import bankinator.bank
import bankinator.output
import getpass
| [
11748,
279,
10025,
22602,
198,
11748,
25064,
198,
11748,
3331,
20900,
13,
17796,
198,
11748,
3331,
20900,
13,
22915,
198,
11748,
651,
6603,
628,
628
] | 3.68 | 25 |
# -*- coding: utf-8 -*-
import datetime
import pathlib
import pickle
import os
import logging
import numpy as np
import torch as t
from torch.optim import Adagrad, lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from train_utils import save_model, configure_weights, UserBatchIncrementDataset, set_random_seed
from dataset import generate_train_files
import models
import argparse
import optuna
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
11748,
3108,
8019,
198,
11748,
2298,
293,
198,
11748,
28686,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
355,
25... | 3.223602 | 161 |
#!/usr/bin/python
import argparse
from apiclient.discovery import build
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
from apiclient.errors import HttpError
import pprint
from config import CREDENTIALS_JSON
def get_service(api_name, api_version, scope, client_secrets_path):
"""Get a service that communicates to a Google API.
Args:
api_name: string The name of the api to connect to.
api_version: string The api version to connect to.
scope: A list of strings representing the auth scopes to authorize for the
connection.
client_secrets_path: string A path to a valid client secrets file.
Returns:
A service that is connected to the specified API.
"""
# Parse command-line arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets_path, scope=scope,
message=tools.message_if_missing(client_secrets_path))
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage(api_name + '.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
@default_fields(('name', 'id'))
@default_fields(('name', 'id'))
@default_fields(('name', 'id'))
"""
Set up command line argument parser
"""
parser = argparse.ArgumentParser(description="User management tool for Google Analytics")
parser.add_argument("--fields", help="Override fields to output", type=str, default=None)
subparsers = parser.add_subparsers()
"""
Set up for user subcommands
"""
parser_user = subparsers.add_parser("user", description="subcommands relavant to single users")
parser_user.set_defaults(object="user")
user_subparsers = parser_user.add_subparsers()
#sub parser to list users
user_parser_list = user_subparsers.add_parser("list", description="list all the users for an id")
user_parser_list.set_defaults(action="list")
user_parser_list.add_argument("account", help="id for the relevant account", type=str)
user_parser_list.add_argument("--property", help="id for the relevant property", type=str, default="~all")
user_parser_list.add_argument("--profile", help="id for the relevant profile", type=str, default="~all")
#sub parser to add new user
user_parser_add = user_subparsers.add_parser("add", description="add a user to an account")
user_parser_add.set_defaults(action="add")
user_parser_add.add_argument("account", help="id for the relevant account", type=str)
user_parser_add.add_argument("email", help="email of the user to add", type=str)
user_parser_add.add_argument("--property", "-wp", help="id for the relevant property", type=str, default="~all")
user_parser_add.add_argument("--profile", "-p", help="id for the relevant profile", type=str, default="~all")
user_parser_add.add_argument("--permissions", "-perms", help="permissions", type=str, nargs="*", choices=["COLLABORATE", "EDIT", "READ_AND_ANALYZE", "MANAGE_USERS"], default=["READ_AND_ANALYZE"])
#sub parser to delete user
user_parser_del = user_subparsers.add_parser("delete", description="delete a user from an account")
user_parser_del.set_defaults(action="delete")
user_parser_del.add_argument("account", help="id for the relevant account", type=str)
user_parser_del.add_argument("email", help="email of the user to delete", type=str)
"""
Set up for accounts subcommands
"""
parser_account = subparsers.add_parser("accounts", description="subcommands relevant to accounts")
parser_account.set_defaults(object="account")
account_subparsers = parser_account.add_subparsers()
#sub parser to list accounts
account_parser_list = account_subparsers.add_parser("list", description="list accounts")
account_parser_list.set_defaults(action="list")
"""
Set up for property subcommands
"""
parser_property = subparsers.add_parser("properties", description="subcommands relevant to properties")
parser_property.set_defaults(object="property")
property_subparsers = parser_property.add_subparsers()
#sub parser to list properties
property_parser_list = property_subparsers.add_parser("list", description="list properties")
property_parser_list.set_defaults(action="list")
property_parser_list.add_argument("--account", "-a", help="id for account to get properties for", type=str, default="~all")
"""
Set up for profiles subcommands
"""
parser_profile = subparsers.add_parser("profiles", description="subcommands relevant to profiles")
parser_profile.set_defaults(object="profile")
profile_subparsers = parser_profile.add_subparsers()
#sub parser to list profiles
profile_parser_list = profile_subparsers.add_parser("list", description="list profiles")
profile_parser_list.set_defaults(action="list")
profile_parser_list.add_argument("--account", "-a", help="id for account to get profiles for", type=str, default="~all")
profile_parser_list.add_argument("--property", "-wp", help="id for property to get profiles for", type=str, default="~all")
args = parser.parse_args()
if args.fields:
fields = args.fields.split(',')
else:
fields = None
scope = ['https://www.googleapis.com/auth/analytics.readonly', 'https://www.googleapis.com/auth/analytics.manage.users']
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, CREDENTIALS_JSON)
if args.object == "user":
if args.action == "list":
list_users(service, args.account, args.property, args.profile, fields=fields)
elif args.action == "add":
add_user(service, args.account, args.email, args.permissions)
elif args.action == "delete":
delete_user(service, args.account, args.email)
elif args.object == "account":
if args.action == "list":
list_accounts(service, fields=fields)
elif args.object == "property":
if args.action == "list":
list_properties(service, args.account, fields=fields)
elif args.object == "profile":
if args.action == "list":
list_profiles(service, args.account, args.property, fields=fields)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
1822,
29572,
198,
198,
6738,
2471,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
11748,
1841,
489,
571,
17,
198,
6738,
267,
18439,
17,
16366,
1330,
5456,
198,
6738,
267,
18439,
... | 3.14689 | 2,090 |
"""
列名称、数据类型、单位辅助转换工具
列名称规则:
1. 去除前导大写字符; 如"一、营业总收入" -> "营业总收入"
"(一)基本每股收益" -> "基本每股收益"
2. 列名称 (上限) -> 列名称_上限_ -> 列名称_上限
3. 去除名称中的单位;如"分配股本基数(董 )万股" -> "分配股本基数_董"
4. 名称中含":",转换为"_";如"其中:营业收入" -> "其中_营业收入"
"""
import re
import pandas as pd
from .base import DB_DATE_FIELD, TS_DATE_FIELD
DATE_COL_PAT = re.compile('时间$|日$|日A$|日B$|日期$|年度$|报告期$')
UNIT_PAT = re.compile(r'[)((]?(单位:)?(\w*[币股元%‰])[))]?$')
CODE_PAT = re.compile(r'([.-]\w{1,3}$)')
# 去除前导数字
PREFIX_PAT = re.compile(r"^\d、|^[(]?[一二三四五六七八九].*?[、)]|^[(]\d[)]")
MID_PAT = re.compile(r"([))]$|\b[()()、::-])")
# 尾部单位
# SUFFIX_PAT = re.compile(r'[)((]?(单位:)?(\w*[^美][股元%‰])[))]?$')
SUFFIX_PAT = re.compile(
r'\s.*?[股元]|[(()]单位[::].*?[))]|[(()][万亿]?[股元][))]|[(( )]?[%‰][))]?$')
FIN_PAT = re.compile(r"(_{1,})$")
UNIT_MAPS = {
'%': 0.01,
'‰': 0.001,
'元': 1.0,
'人民币': 1.0,
'港币': 1.0,
'美元': 1.0,
'股': 1,
'万元': 10000.0,
'万股': 10000,
'亿股': 100000000,
'亿元': 100000000.0,
}
def parse_unit(col_name):
"""自列名称中解析数量单位,返回dict"""
f = UNIT_PAT.findall(col_name)
if len(f) == 1:
try:
return {col_name: UNIT_MAPS[f[0][1]]}
except KeyError:
# 如解析到'国家持股','B股'
return {}
else:
return {}
def get_unit_dict(df):
"""解析数据框的单位词典"""
units = {}
for col_name in df.columns:
units.update(parse_unit(col_name))
return units
def _fix_code(df):
"""修复代码"""
cols = ['证券代码', '股票代码', '上市代码', '转板代码', '基金代码']
# 股票行数数据 代码 000001-SZE
for c in cols:
if c in df.columns:
df[c] = df[c].map(f)
return df
def _fix_date(df):
"""修复日期"""
for col in df.columns:
if re.search(DATE_COL_PAT, col):
df[col] = pd.to_datetime(
df[col], infer_datetime_format=True, errors='coerce')
return df
# 以下部分处理 -----专题统计-----
def _special_fix(df, level, db_name):
"""针对特定项目的特殊处理"""
func = _factory(level, db_name)
df = func(df)
return df
def _fix_num_unit(df):
"""修复列数量单位"""
units = get_unit_dict(df)
for col, unit in units.items():
if not pd.api.types.is_numeric_dtype(df[col]):
raise TypeError(f'应为数字类型。列"{col}"实际为"{df[col].dtype}"')
df[col] = df[col] * unit
return df
def _remove_prefix_num(x):
"""去除列名称中的前导数字部分"""
return PREFIX_PAT.sub('', x)
def _remove_suffix_unit(x):
"""去除列名称中的尾部单位部分"""
return SUFFIX_PAT.sub('', x)
def _fix_col_name(df):
"""修复列名称"""
# 更名
if ("股票代码" in df.columns) and ("股票简称" in df.columns):
df.rename(columns={"股票代码": "证券代码",
"股票简称": "证券简称"},
inplace=True)
origin = df.columns
df.columns = map(f, origin)
return df
def fixed_data(input_df, level, db_name):
"""修复日期、股票代码、数量单位及规范列名称"""
# 避免原地修改
df = input_df.copy()
df = _special_fix(df, level, db_name)
df = _fix_code(df)
df = _fix_date(df)
df = _fix_num_unit(df)
df = _fix_col_name(df)
return df
| [
37811,
198,
198,
26344,
245,
28938,
235,
163,
100,
108,
23513,
46763,
108,
162,
235,
106,
163,
109,
119,
161,
252,
233,
23513,
39355,
243,
19526,
235,
164,
122,
227,
27950,
102,
164,
121,
105,
162,
235,
95,
32432,
98,
17739,
115,
19... | 1.262032 | 2,431 |
"""
---> Reveal Cards In Increasing Order
---> Medium
"""
import collections
in_deck = [17, 13, 11, 2, 3, 5, 7]
a = Solution()
# print(a.deckRevealedIncreasing(in_deck))
print(a.deckRevealedIncreasing_sol2(in_deck))
"""
Reference - https://leetcode.com/problems/reveal-cards-in-increasing-order/discuss/200515/JavaC%2B%2BPython-Simulate-the-Reversed-Process
Approach 1:
Add the next number ahead after keeping the last element of the list in first because it wll be shifted to last when
hand is shown
Complexities:
Time -> O(N^2)
Space -> O(N)
Approach 2:
when adding an element rotate the queue by one to right and add the element in left
Complexities:
Time -> O(NlogN)
Space -> O(N)
"""
| [
37811,
198,
198,
438,
3784,
31091,
282,
15824,
554,
38921,
8284,
198,
438,
3784,
13398,
198,
198,
37811,
198,
11748,
17268,
628,
198,
198,
259,
62,
35875,
796,
685,
1558,
11,
1511,
11,
1367,
11,
362,
11,
513,
11,
642,
11,
767,
60,
... | 2.857724 | 246 |
#!/usr/local/bin/python3.3
a, *b = 'spam'
print(a)
print(b)
nudge = 1
wink = 2
print(nudge, wink)
A, B = nudge, wink
print([A, B])
[C, D] = [nudge, wink]
nudge, wink = wink, nudge
print(nudge, wink)
[a, b, c] = (1, 2, 3)
print(a, c)
(a, b, c, d) = "SPAM"
print(a, c)
D = {'a': 'lala', 'b': 'haha'}
[g, y] = D
print([g, y])
string = 'SPAM'
a, b, c = string[0], string[1], string[2:]
print(a, b, c)
a, b, c = list(string[:2]) + [string[2:]]
print(a, b, c)
(a, b), c = string[:2], string[2:]
print(a, b, c)
red, blue, green = range(3)
print(red, blue)
L = [1, 2, 3, 4]
while L:
front, L = L[0], L[1:]
print(front, L)
seq = [1, 2, 3, 4]
a, b, c, d = seq
print(a, b, c, d)
a, *b = seq
print(a)
print(b)
*a, b = seq
print(a)
print(b)
a, *b, c = seq
print(a)
print(b)
print(c)
L = [1, 2, 3, 4]
while L:
front, *L = L
print(front, L)
a, b, c, *d = seq
print(a, b, c, d)
a, b, c, d, *e = seq
print(a, b, c, d, e)
# These are errors
# a, *b, c, *d = seq
*a, = seq
print(a)
a, *b = seq
print(a, b)
a, b = seq[0], seq[1:]
print(a, b)
*a, b = seq
print(a, b)
a, b = seq[:-1], seq[-1]
print(a, b)
a = b = c = 'spam'
print(a, b, c)
a = b = []
print(a, b)
b.append(42)
print(a, b)
a, b = [], []
print(a, b)
b.append(42)
print(a, b)
L = [1, 2]
M = L
L = L + [3, 4]
print(L, M)
L = [1, 2]
M = L
L += [3, 4]
print(L, M)
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
13,
18,
198,
198,
64,
11,
1635,
65,
796,
705,
2777,
321,
6,
198,
4798,
7,
64,
8,
198,
4798,
7,
65,
8,
198,
198,
77,
12587,
796,
352,
198,
86,
676,
796,
362,
198,
4798,
7,
... | 1.771164 | 756 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pyu2f.convenience.customauthenticator."""
import base64
import json
import struct
import sys
import mock
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import customauthenticator
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest # pylint: disable=g-import-not-at-top
else:
import unittest # pylint: disable=g-import-not-at-top
# Input/ouput values recorded from a successful signing flow
SIGN_SUCCESS = {
'app_id': 'test_app_id',
'app_id_hash_encoded': 'TnMguTdPn7OcIO9f-0CgfQdY254bvc6WR-DTPZnJ49w',
'challenge': b'asdfasdf',
'challenge_hash_encoded': 'qhJtbTQvsU0BmLLpDWes-3zFGbegR2wp1mv5BJ2BwC0',
'key_handle_encoded': ('iBbl9-VYt-XSdWeHVNX-gfQcXGzlrAQ7BcngVNUxWijIQQlnZEI'
'4Vb0Bp2ydBCbIQu_5rNlKqPH6NK1TtnM7fA'),
'origin': 'test_origin',
'signature_data_encoded': ('AQAAAI8wRQIhALlIPo6Hg8HwzELdYRIXnAnpsiHYCSXHex'
'CS34eiS2ixAiBt3TRmKE1A9WyMjc3JGrGI7gSPg-QzDSNL'
'aIj7JwcCTA'),
'client_data_encoded': ('eyJjaGFsbGVuZ2UiOiAiWVhOa1ptRnpaR1kiLCAib3JpZ2luI'
'jogInRlc3Rfb3JpZ2luIiwgInR5cCI6ICJuYXZpZ2F0b3IuaW'
'QuZ2V0QXNzZXJ0aW9uIn0'),
'u2f_version': 'U2F_V2',
'registered_key': model.RegisteredKey(base64.urlsafe_b64decode(
'iBbl9-VYt-XSdWeHVNX-gfQcXGzlrAQ7BcngVNUxWijIQQlnZEI4Vb0Bp2ydBCbIQu'
'_5rNlKqPH6NK1TtnM7fA=='
))
}
@mock.patch.object(sys, 'stderr', new=mock.MagicMock())
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
1584,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.009166 | 1,091 |
#!/usr/bin/env python
"""
Contains the sqlintf.Input class definition
Please note that this module is private. The sqlintf.Input class is
available in the ``wpipe.sqlintf`` namespace - use that instead.
"""
from .core import sa, orm
from .DPOwner import DPOwner
__all__ = ['Input']
class Input(DPOwner):
"""
A Input object represents a row of the `inputs` table.
DO NOT USE CONSTRUCTOR: constructing a Input object adds a new row
to the database: USE INSTEAD ITS WPIPE COUNTERPART.
"""
__tablename__ = 'inputs'
id = sa.Column(sa.Integer, sa.ForeignKey('dpowners.id'), primary_key=True)
name = sa.Column(sa.String(256))
rawspath = sa.Column(sa.String(256))
confpath = sa.Column(sa.String(256))
pipeline_id = sa.Column(sa.Integer, sa.ForeignKey('pipelines.id'))
pipeline = orm.relationship("Pipeline", back_populates="inputs", foreign_keys=[pipeline_id])
targets = orm.relationship("Target", back_populates="input")
__mapper_args__ = {
'polymorphic_identity': 'input',
}
__table_args__ = (sa.UniqueConstraint('pipeline_id', 'name'),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
4264,
1299,
262,
44161,
600,
69,
13,
20560,
1398,
6770,
198,
198,
5492,
3465,
326,
428,
8265,
318,
2839,
13,
383,
44161,
600,
69,
13,
20560,
1398,
318,
198,
15182,
287,
2... | 2.605923 | 439 |
import logging
_log = logging.getLogger(__name__)
import asyncio
from functools import partial, wraps
from . import raw
from .raw import Disconnected, RemoteError, Cancelled, Finished, LazyRepr
from ..wrapper import Value, Type
from .._p4p import (logLevelAll, logLevelTrace, logLevelDebug,
logLevelInfo, logLevelWarn, logLevelError,
logLevelFatal, logLevelOff)
__all__ = [
'Context',
'Value',
'Type',
'RemoteError',
'timeout',
]
def timesout(deftimeout=5.0):
"""Decorate a coroutine to implement an overall timeout.
The decorated coroutine will have an additional keyword
argument 'timeout=' which gives a timeout in seconds,
or None to disable timeout.
:param float deftimeout: The default timeout= for the decorated coroutine.
It is suggested perform one overall timeout at a high level
rather than multiple timeouts on low-level operations. ::
@timesout()
@asyncio.coroutine
def dostuff(ctxt):
yield from ctxt.put('msg', 'Working')
A, B = yield from ctxt.get(['foo', 'bar'])
yield from ctxt.put('bar', A+B, wait=True)
yield from ctxt.put('msg', 'Done')
@asyncio.coroutine
def exec():
with Context('pva') as ctxt:
yield from dostuff(ctxt, timeout=5)
"""
return decorate
class Context(raw.Context):
"""
:param str provider: A Provider name. Try "pva" or run :py:meth:`Context.providers` for a complete list.
:param conf dict: Configuration to pass to provider. Depends on provider selected.
:param bool useenv: Allow the provider to use configuration from the process environment.
:param dict nt: Controls :ref:`unwrap`. None uses defaults. Set False to disable
:param dict unwrap: Legacy :ref:`unwrap`.
The methods of this Context will block the calling thread until completion or timeout
The meaning, and allowed keys, of the configuration dictionary depend on the provider.
The "pva" provider understands the following keys:
* EPICS_PVA_ADDR_LIST
* EPICS_PVA_AUTO_ADDR_LIST
* EPICS_PVA_SERVER_PORT
* EPICS_PVA_BROADCAST_PORT
Timeout and Cancellation
^^^^^^^^^^^^^^^^^^^^^^^^
All coroutines/Futures returned by Context methods can be cancelled.
The methods of Context do not directly implement a timeout.
Instead :py:meth:`asyncio.wait_for` should be used.
It is suggested perform one overall timeout at a high level
rather than multiple timeouts on low-level operations. ::
@timesout()
@asyncio.coroutine
def dostuff(ctxt):
yield from ctxt.put('msg', 'Working')
A, B = yield from ctxt.get(['foo', 'bar'])
yield from ctxt.put('bar', A+B, wait=True)
yield from ctxt.put('msg', 'Done')
@asyncio.coroutine
def exec():
with Context('pva') as ctxt:
yield from dostuff(ctxt, timeout=5)
"""
@asyncio.coroutine
def get(self, name, request=None):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A p4p.Value, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values. ::
with Context('pva') as ctxt:
V = yield from ctxt.get('pv:name')
A, B = yield from ctxt.get(['pv:1', 'pv:2'])
"""
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._get_one(name, request=request))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
futs = [self._get_one(N, request=R) for N, R in zip(name, request)]
ret = yield from asyncio.gather(*futs, loop=self.loop)
return ret
@asyncio.coroutine
@asyncio.coroutine
def put(self, name, values, request=None, process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None. ::
with Context('pva') as ctxt:
yield from ctxt.put('pv:name', 5.0)
yield from ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
yield from ctxt.put('pv:name', {'value':5})
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
singlepv = isinstance(name, (bytes, str))
if singlepv:
return (yield from self._put_one(name, values, request=request, get=get))
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
futs = [self._put_one(N, V, request=R, get=get) for N, V, R in zip(name, values, request)]
yield from asyncio.gather(*futs, loop=self.loop)
@asyncio.coroutine
@asyncio.coroutine
def rpc(self, name, value, request=None):
"""Perform a Remote Procedure Call (RPC) operation
:param str name: PV name string
:param Value value: Arguments. Must be Value instance
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:returns: A Value. Subject to :py:ref:`unwrap`.
For example: ::
uri = NTURI(['A','B'])
with Context('pva') as ctxt:
result = yield from ctxt.rpc('pv:name:add', uri.wrap('pv:name:add', 5, B=6))
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict or Value, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
F = asyncio.Future(loop=self.loop)
cb = partial(self.loop.call_soon_threadsafe, cb)
op = super(Context, self).rpc(name, cb, value, request=request)
try:
return (yield from F)
finally:
op.close()
def monitor(self, name, cb, request=None, notify_disconnect=False):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
assert asyncio.iscoroutinefunction(cb), "monitor callback must be coroutine"
R = Subscription(name, cb, notify_disconnect=notify_disconnect, loop=self.loop)
cb = partial(self.loop.call_soon_threadsafe, R._E.set)
R._S = super(Context, self).monitor(name, cb, request)
return R
class Subscription(object):
"""An active subscription.
"""
def close(self):
"""Begin closing subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
self._run = False
self._E.set()
@property
def done(self):
'Has all data for this subscription been received?'
return self._S is None or self._S.done()
@property
def empty(self):
'Is data pending in event queue?'
return self._S is None or self._S.empty()
@asyncio.coroutine
def wait_closed(self):
"""Wait until subscription is closed.
"""
assert self._S is None, "Not close()'d"
yield from self._T
@asyncio.coroutine
| [
198,
11748,
18931,
198,
62,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
11748,
30351,
952,
198,
198,
6738,
1257,
310,
10141,
1330,
13027,
11,
27521,
198,
198,
6738,
764,
1330,
8246,
198,
6738,
764,
1831,
... | 2.505407 | 3,884 |
import carbon
import asyncio
loop = asyncio.get_event_loop() # Setting up asyncio
code = """
defmodule Something do
def anything() do
IO.puts "Hello, World"
end
end
""" # Any kind of code-block in any language
options = carbon.CarbonOptions(code)
cb = carbon.Carbon()
image = loop.run_until_complete(cb.generate(options)) # Returns a CarbonImage object
loop.run_until_complete(image.save('something-script'))
| [
11748,
6588,
198,
11748,
30351,
952,
198,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
3419,
220,
1303,
25700,
510,
30351,
952,
198,
198,
8189,
796,
37227,
198,
4299,
21412,
13742,
466,
198,
220,
220,
220,
825,
1997,
3... | 2.97931 | 145 |
from Annotations import annotate_modification
"""
This functions deletes/filters sequences and columns/positions on the MSA on the
following order:
- Removes all the columns/position on the MSA with gaps on the reference
sequence (first sequence)
- Removes all the sequences with a coverage with respect to the number of
columns/positions on the MSA **less** than a `coveragelimit` (default to
`0.75`: sequences with 25% of gaps)
- Removes all the columns/position on the MSA with **more** than a `gaplimit`
(default to `0.5`: 50% of gaps)
"""
| [
6738,
47939,
1330,
24708,
378,
62,
4666,
2649,
628,
628,
628,
628,
198,
37811,
198,
1212,
5499,
28128,
274,
14,
10379,
1010,
16311,
290,
15180,
14,
1930,
1756,
319,
262,
337,
4090,
319,
262,
198,
27780,
278,
1502,
25,
628,
532,
3982,
... | 3.5 | 160 |
import torch
import os
import glob
from torch.utils.data import Dataset
import numpy as np
import scipy.stats as scipy_stats
import numpy.matlib
from PIL import Image
from torchvision import transforms
from xmuda.data.utils.preprocess import create_img_grid, create_voxel_grid, select_points_in_frustum, compute_local_frustums, vox2pix, compute_CP_mega_matrix, compute_mega_context
from xmuda.models.ssc_loss import construct_ideal_affinity_matrix
import pickle
import imageio
from tqdm import tqdm
from itertools import combinations
import time
import random
import xmuda.common.utils.fusion as fusion
import torch.nn.functional as F
from xmuda.data.NYU.params import NYU_class_cluster_4, NYU_class_cluster_6
seg_class_map = [0, 1, 2, 3, 4, 11, 5, 6, 7, 8, 8, 10, 10, 10, 11, 11, 9, 8, 11, 11, 11,
11, 11, 11, 11, 11, 11, 10, 10, 11, 8, 10, 11, 9, 11, 11, 11]
# print(cnts[1:] * 100 / np.sum(cnts[1:]))
if __name__ == '__main__':
main()
| [
11748,
28034,
198,
11748,
28686,
198,
11748,
15095,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
355,
629,
541,
88,
62,
34242,
198,
11748,
299,
... | 2.564987 | 377 |
"""Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import platform
import site
import sys
import sysconfig
from distutils import sysconfig as distutils_sysconfig
from distutils.command.install import SCHEME_KEYS # type: ignore
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
if MYPY_CHECK_RUNNING:
from typing import Any, Union, Dict, List, Optional
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_path("purelib") # type: Optional[str]
# This is because of a bug in PyPy's sysconfig module, see
# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
# for more information.
if platform.python_implementation().lower() == "pypy":
site_packages = distutils_sysconfig.get_python_lib()
try:
# Use getusersitepackages if this is present, as it ensures that the
# value is initialised properly.
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
# type:(str, bool, str, str, bool, str) -> dict
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]]
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
# Ignoring, typeshed issue reported python/typeshed/issues/2567
d.parse_config_files()
# NOTE: Ignoring type since mypy can't find attributes on 'Command'
i = d.get_command_obj('install', create=True) # type: Any
assert i is not None
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={} prefix={}".format(user, prefix)
assert not (home and prefix), "home={} prefix={}".format(home, prefix)
i.user = user or i.user
if user or home:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
# Ignoring, typeshed issue reported python/typeshed/issues/2567
if 'install_lib' in d.get_option_dict('install'): # type: ignore
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
| [
37811,
43,
20968,
810,
356,
804,
329,
4566,
82,
11,
2721,
3404,
11,
3503,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
3859,
198,
11748,
2524,
198,
11748,
... | 2.606868 | 1,689 |
import os.path
import smtplib
import socket
import ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Copyright (c) 2021. Xin Yang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python ip_updater.py
# Please replace the information included in the brackets {}
cache_path = "{./ip_cache.txt}" # Path for the IP cache file
sender_email = "{sender@gmail.com}"
receiver_email = "{receiver@gmail.com}"
sender_pswd = "{password}" # Password of the sender email, only run in a trusted environment or replace with a secure authentication API
msg_subject = "[IP Updater] {IP Change Detected}" # Email subject
# get current ip using socket
# Detect the existence of the IP cache file
# Compose email content and send out
# If IP change detected, overwrite the cached IP, close cache file and send out email notification
if __name__ == "__main__":
curr_ip = get_curr_ip()
if not cache_exist(cache_path):
# Cache file doesn't exist, create cache file
f = open(cache_path, "w")
print("[IP Updater] Cache file created!")
# Send out email notification for initialization
update_ip(f, curr_ip)
print("[IP Updater] Initialized.\n", curr_ip)
else:
# Cache file exist, read cached IP and compare with the current one
f = open(cache_path, "r+")
cached_ip = f.readline()
if cached_ip != curr_ip:
update_ip(f, curr_ip)
print("[IP Updater] New IP detected!\n", cached_ip, "->", curr_ip)
else:
print("[IP Updater] IP unchanged.\n", curr_ip) | [
11748,
28686,
13,
6978,
198,
11748,
895,
83,
489,
571,
198,
11748,
17802,
198,
11748,
264,
6649,
198,
6738,
3053,
13,
76,
524,
13,
16680,
541,
433,
1330,
337,
3955,
3620,
586,
541,
433,
198,
6738,
3053,
13,
76,
524,
13,
5239,
1330,
... | 3.011613 | 775 |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = 'loc_123'
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
39858,
628,
198,
51,
6465,
62,
19535,
31033,
62,
2389,
796,
705,
17946,
62,
10163,
6,
628
] | 3.294118 | 34 |
"""
백준 11549번 : Identifying tea
"""
T = int(input())
print(list(map(int, input().split())).count(T)) | [
37811,
198,
167,
108,
109,
168,
97,
222,
12279,
2920,
167,
110,
230,
1058,
11440,
4035,
8887,
198,
37811,
198,
198,
51,
796,
493,
7,
15414,
28955,
198,
4798,
7,
4868,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
28955,
737,
9127,
7,
5... | 2.244444 | 45 |
from random import randint
from time import sleep
cor = {31 : '\033[31m', 32 : '\033[32m', 33 : '\033[33m', 34 : '\033[34m', 'f' : '\033[m'}
n = randint(1, 10)
print(cor[33], '=*=+=*='*14, cor['f'])
print('Tente adivinhar o número {}entre 0 a 10 {}escolhido pelo computador e vença o jogo '.format(cor[31], cor['f']))
print(cor[33], '=*=+=*='*14, cor['f'])
jog = int(input('Escolha seu número te tente vencer: '))
print(cor[31],'PROCESSANDO....',cor['f'])
sleep(3)
if n == jog:
print(cor[32], '=0='*30, cor['f'])
print('O computador escolheu o Nº {}{}{} e você o Nº {}{}{} PARABENS VOCÊ VENCEU!!!'.format(cor[31], n,cor['f'],cor[33], jog,cor['f']))
else:
print('O computador escolheu o Nº {}{}{} e você o Nº {}{}{} Você é muito pato PERDEU TENTE NOVAMENTE!!!'.format(cor[31], n,cor['f'],cor[33], jog,cor['f'])) | [
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
198,
10215,
796,
1391,
3132,
1058,
705,
59,
44427,
58,
3132,
76,
3256,
3933,
1058,
705,
59,
44427,
58,
2624,
76,
3256,
4747,
1058,
705,
59,
44427,
58,
2091,
76,
3256,
4974,
1... | 2.140625 | 384 |
# In for loops, 'item' is the loop variable.
# We don't need a loop counter
# Loop through each character
for item in 'Python':
print(item)
# Loop through each item in a list
for item in ['Eric', 'Nancy', 'River']:
print(item)
# Loop through each item in a list
for item in [1, 2, 3, 4]:
print(item)
# Loop through each item in a range of numbers
for item in range(10):
print(item)
# Loop through each item in a range of numbers from 5 to 10 (exclusive)
for item in range(5,10):
print(item)
# Loop through each item in a range of numbers from 5 to 10 (exclusive), stepping by 2
for item in range(5,10,2):
print(item)
#exercise - using a for loop, loop through items in a cart, calculating the total and then printing it out
prices = [10,20,30]
total = 0
for price in prices:
total += price
print(f"Total is: {total}") | [
198,
2,
554,
329,
23607,
11,
705,
9186,
6,
318,
262,
9052,
7885,
13,
198,
2,
775,
836,
470,
761,
257,
9052,
3753,
198,
198,
2,
26304,
832,
1123,
2095,
198,
1640,
2378,
287,
705,
37906,
10354,
198,
220,
220,
220,
3601,
7,
9186,
8... | 3.024823 | 282 |
#!/usr/bin/python3
"""Module with database connection classess"""
# System libraries
import os, copy
# Third-party libraries
import psycopg2
from pymongo import MongoClient
from config import Config
VARS = [
Config.PSQL_DB,
Config.DB_HOST,
Config.PSQL_PASSWORD,
Config.PSQL_PORT,
Config.PSQL_USER,
Config.MONGO_PORT,
Config.MONGO_DB
]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
26796,
351,
6831,
4637,
1398,
408,
37811,
198,
198,
2,
4482,
12782,
198,
11748,
28686,
11,
4866,
198,
198,
2,
10467,
12,
10608,
12782,
198,
11748,
17331,
22163,
70,
17,
198,
6738,
... | 2.582192 | 146 |
try:
while True:
cipo = input().split("x")
maior = max(len(cipo[0]), len(cipo[-1]))
for segmento in cipo:
if len(segmento) // 2 > maior:
maior = len(segmento) // 2
print(maior)
except EOFError:
pass
| [
28311,
25,
201,
198,
220,
220,
220,
981,
6407,
25,
201,
198,
220,
220,
220,
220,
220,
220,
220,
269,
541,
78,
796,
5128,
22446,
35312,
7203,
87,
4943,
201,
198,
220,
220,
220,
220,
220,
220,
220,
17266,
1504,
796,
3509,
7,
11925,
... | 1.720497 | 161 |
import unittest
import uuid
import py3crdt
from py3crdt.sequence import Sequence
from datetime import datetime
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
334,
27112,
198,
11748,
12972,
18,
66,
4372,
83,
198,
6738,
12972,
18,
66,
4372,
83,
13,
43167,
1330,
45835,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
... | 2.824561 | 57 |
# 创建一个人事系统类
hrSystem.record('bob',2000,98)
hrSystem.print_record()
#homework2
calendar.new()
| [
2,
10263,
230,
249,
161,
119,
118,
31660,
10310,
103,
21689,
12859,
233,
163,
111,
119,
163,
119,
253,
163,
109,
119,
198,
198,
11840,
11964,
13,
22105,
10786,
65,
672,
3256,
11024,
11,
4089,
8,
198,
11840,
11964,
13,
4798,
62,
2210... | 1.694915 | 59 |
#!/usr/bin/env python
# coding: utf-8
# Dernier TP (mini projet) : exemples de calcul numérique
# - Prof : Lilian Besson
# - Site du cours : [https://perso.crans.org/besson/teach/intro_num_DEM_2020/](https://perso.crans.org/besson/teach/intro_num_DEM_2020/)
# - Date : mercredi 14/10/2020 et vendredi 16/10/2020.
# ----
# ## Cosinus et sinus (bonus)
# ### Cosinus
# Une des définitions de la fonction cosinus est la suivante :
#
# $$\cos(x) = \sum_{n=0}^{+\infty} \frac{(-1)^n * x^{2n}}{(2n)!}$$
#
# On va pouvoir calculer une approximation de $\cos(x)$ en calculant la somme des $N$ premiers termes, par exemple pour $N=30$ :
# $$\cos(x) \simeq \sum_{n=0}^{N=30} \frac{(-1)^n x^{2n}}{(2n)!} = \frac{x^0}{0!} - \frac{x^2}{2!} + \dots - \frac{x^{2*29}}{(2*29)!} + \frac{x^{2*30}}{(2*30)!}$$
#
#
# - Question : en vous inspirant de votre code pour `exp(x)`, écrire une fonction `cos(x)`.
# - Sur quelques valeurs que vous connaissez peut-être ($x=0, \pi/4, \pi/2$), comparez la avec la fonction `math.sin` (ou avec celle de votre calculatrice).
def cos(x):
""" Approximation de cos(x) avec sa série calculée aux N=30 premiers termes."""
N = 30
n = 1
cos_x = 1.0
while n < N:
# pour l'instant cos_x = x**0/0! - x**1/1! +
# ... + (-1)**(n-1) x**(2*(n-1))/(2*(n-1))!
cos_x = ... # # /!\ à vous d'écrire quelque chose ici
# désormais cos_x = x**0/0! - x**1/1! + ...
# ... + (-1)**(n-1) x**(2*(n-1))/(2*(n-1))! + (-1)**n x**(2*n)/(2*n)!
n = n + 1
return cos_x
x = 0
print("Pour x = 0, cos(x) =", cos(x)) # expected: 1
x = math.pi / 4
print("Pour x = pi/4, cos(x) =", cos(x)) # expected: sqrt(2)
x = math.pi / 2
print("Pour x = pi/2, cos(x) =", cos(x)) # expected: 0
x = 10*2*math.pi
print("Pour x = 10*2*pi, cos(x) =", cos(x)) # expected: 1
x = 10*2*math.pi + math.pi / 4
print("Pour x = 10*2*pi + pi/4, cos(x) =", cos(x)) # expected: sqrt(2)
x = 10*2*math.pi + math.pi / 2
print("Pour x = 10*2*pi + pi/2, cos(x) =", cos(x)) # expected: 0
# Commentez sur la perte de précision observée entre les deux calculs de $\cos(\pi/2)$ et $\cos(10*2*\pi + \pi/2)$ alors que leurs valeurs exactes (mathématiques) sont égales.
# ### Sinus
# Pour la fonction sinus, la définition est très similaire :
#
# $$\sin(x) = \sum_{n=0}^{+\infty} \frac{(-1)^n * x^{2n+1}}{(2n+1)!}$$
#
# - Question : en vous inspirant de votre code pour `exp(x)` et `cos(x)`, écrire une fonction `sin(x)`.
# - Sur quelques valeurs que vous connaissez peut-être ($x=0, \pi/4, \pi/2$), comparez la avec la fonction `math.sin` (ou avec celle de votre calculatrice).
def sin(x):
""" Approximation de sin(x) avec sa série calculée aux N=30 premiers termes."""
N = 30
n = 1
sin_x = 1.0
while n < N:
# pour l'instant sin_x = x*1/1! - x*3/3! + ...
# ... + (-1)*(n-1) x*(2*(n-1)+1)/(2*(n-1)+1)!
sin_x = ... # # /!\ à vous d'écrire quelque chose ici
# désormais sin_x = x*1/1! - x*3/3! + ...
# ... + (-1)*(n-1) x*(2*(n-1)+1)/(2*(n-1)+1)! + (-1)*n x*(2*n+1)/(2*n+1)!
n = n + 1
return sin_x
x = 0
print("Pour x = 0, sin(x) =", sin(x)) # expected: 0
x = math.pi / 4
print("Pour x = pi/4, sin(x) =", sin(x)) # expected: sqrt(2)
x = math.pi / 2
print("Pour x = pi/2, sin(x) =", sin(x)) # expected: 1
x = 10*2*math.pi
print("Pour x = 10*2*pi, sin(x) =", sin(x)) # expected: 0
x = 10*2*math.pi + math.pi / 4
print("Pour x = 10*2*pi + pi/4, sin(x) =", sin(x)) # expected: sqrt(2)
x = 10*2*math.pi + math.pi / 2
print("Pour x = 10*2*pi + pi/2, sin(x) =", sin(x)) # expected: 1
print("Vous devez remplir le fichier et faire l'exercice")
print("Fin du fichier squelette_cossin.py")
# ## Conclusion
#
# J'espère que cette activité vous aura plu.
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
360,
1142,
959,
24525,
357,
45313,
386,
31173,
8,
1058,
409,
368,
2374,
390,
5204,
997,
2634,
33865,
198,
2,
532,
4415,
1058,
16342,
66... | 2.014949 | 1,873 |
from django.contrib import admin
from like.models import Like
admin.site.register(Like, LikeAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
588,
13,
27530,
1330,
4525,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
7594,
11,
4525,
46787,
8,
198
] | 3.4 | 30 |
"""
General Constants
"""
__all__ = [
]
| [
37811,
198,
12218,
4757,
1187,
198,
37811,
198,
198,
834,
439,
834,
796,
685,
198,
198,
60,
198
] | 2.333333 | 18 |
# -*- coding: utf-8 -*-
# 019_cleaner.py
# CODED TO BE EXECUTED SERVER SIDE :
# cd /home/common/shade
# python3 manage.py shell
import sys
from apis.voca import *
##################################
# Init des paths et noms de fichiers
AddLog('title' , 'Début du nettoyage du fichier')
work_dir = '/home/common/shade/apis/raw/019_raw/'
# Nom du fichier source
raw_file = 'src'
##################################
# Création de la liste brute
with open(work_dir + raw_file , 'r') as file:
raw_list = [i for i in file.read().splitlines()]
##################################
# Elimination des strings surnuméraires
middle_list = []
to_elim = ['0','1','2','3','4','5','6','7','8','9',',']
for line in raw_list:
middle_list.append(''.join([i for i in line if i not in to_elim]))
# Elimination des espaces àlakon
ref_list = [i.strip() for i in middle_list]
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Début de la fonction OutFileCreate')
OutFileCreate('/home/common/shade/apis/out/','019_src',ref_list,'AssetPlace;Empire du Roi-Lune') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
5534,
24,
62,
27773,
263,
13,
9078,
201,
198,
201,
198,
2,
327,
3727,
1961,
5390,
9348,
7788,
2943,
3843,
1961,
18871,
5959,
311,
14114,
1058,
201,
198,
2,
2... | 2.633178 | 428 |
from app.api.classes.observation.models import Observation
from app.db import db
from flask import jsonify
from sqlalchemy.sql import text
| [
6738,
598,
13,
15042,
13,
37724,
13,
672,
3168,
341,
13,
27530,
1330,
11086,
13208,
198,
6738,
598,
13,
9945,
1330,
20613,
198,
6738,
42903,
1330,
33918,
1958,
198,
6738,
44161,
282,
26599,
13,
25410,
1330,
2420,
628
] | 3.684211 | 38 |
from collections import defaultdict
from tests.data_handler.data_handler_tests_utils import DataHandlerTestsUtils
from covid19_il.data_handler.data_handlers.tested_individuals_scores import TestedIndividualsScores
from covid19_il.data_handler.enums.resource_id import ResourceId
class TestTestedIndividualsScores(DataHandlerTestsUtils):
""" Tests for Tested Individuals Scores Data Handler Class.
Methods:
setUp(self): Announce of starting the class's tests, initialize & verify Age Gender data handler's instance.
test_get_statistics(self): Tests results data & type of total statistics.
test_get_statistics_by_date(self): Tests results data & type of statistics by given_first_week_day.
"""
def setUp(self) -> None:
""" Announce of starting the class's tests, initialize & verify Tested Individuals Scores data handler's
instance """
print("testing Tested Individuals Scores Class...")
self.data_handler_1 = \
self._init_mocked_data_handler(json_file_path="json_files/tested_individuals_scores_mocked_data.json",
resource_id_enum=ResourceId.TESTED_INDIVIDUALS_SCORES_RESOURCE_ID)
self._check_base_step_of_all_methods(data_handler=self.data_handler_1, class_type=TestedIndividualsScores)
def test_get_statistics(self) -> None:
""" Tests results data & type of total statistics """
# Get Data
data = self.data_handler_1.get_statistics()
results = defaultdict(None,
{'male': defaultdict(int, {'NULL': 6378, 'No': 257010, 'Yes': 54325}),
'female': defaultdict(int, {'NULL': 5661, 'No': 288084, 'Yes': 75234}),
'NULL': defaultdict(int, {'NULL': 589, 'No': 922, 'Yes': 350})})
# Data Validation
self._test_two_level_depth_nested_dictionaries(data, results)
def test_get_statistics_by_date(self) -> None:
""" Tests results data & type of statistics by given_first_week_day """
# Get Data
data = self.data_handler_1.get_statistics_by_date('2020-10-05')
results = defaultdict(None,
{'NULL':
{'male': 296, 'female': 330, 'NULL': 45},
'No': {'male': 17578, 'female': 21223, 'NULL': 130},
'Yes': {'male': 4222, 'female': 6725, 'NULL': 8}})
# Data Validation
self._test_two_level_depth_nested_dictionaries(data, results)
| [
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
5254,
13,
7890,
62,
30281,
13,
7890,
62,
30281,
62,
41989,
62,
26791,
1330,
6060,
25060,
51,
3558,
18274,
4487,
198,
6738,
39849,
312,
1129,
62,
346,
13,
7890,
62,
30281,
13,
7890,
62,
... | 2.320755 | 1,113 |
import re
from unidecode import unidecode
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import StandardScaler
import numpy as np
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_small_letters = 'abcdefghijklmnopqrstuvwxyz'
_rejected = '\'():;"'
_punct = ':;,.?'
TTS_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
FORCE_ALIGNMENT_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_small_letters)
)
def put_spacing_num(string):
"""
'ni1996' -> 'ni 1996'
"""
string = re.sub('[A-Za-z]+', lambda ele: ' ' + ele[0] + ' ', string)
return re.sub(r'[ ]+', ' ', string).strip()
| [
11748,
302,
198,
6738,
555,
485,
8189,
1330,
555,
485,
8189,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
2764,
38469,
7509,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
198,
11748,
299,
32152,... | 2.323699 | 346 |
from __future__ import absolute_import, division, print_function
import os
if __name__ == "__main__":
run()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1057,
3419,
198
] | 3.2 | 35 |
#-*- coding: UTF-8 -*-
# 01107267
# 03/24/2017
from flask import Flask, Response, redirect
from flask_login import (LoginManager, login_required, login_user,
current_user, logout_user, UserMixin)
from itsdangerous import URLSafeTimedSerializer
from datetime import timedelta
from datetime import datetime
from hashlib import md5
from bson.json_util import dumps
from SfcsmError import SfcsmError
from pymongo import MongoClient
from pymongo import MongoReplicaSetClient
from bson import ObjectId
version = "1.4.0"
app = Flask(__name__)
app.secret_key = "Mon Nov 30 17:20:29 2015"
app.config["REMEMBER_COOKIE_DURATION"] = timedelta(days=14)
#Login_serializer used to encryt and decrypt the cookie token for the remember
#me option of flask-login
login_serializer = URLSafeTimedSerializer(app.secret_key)
login_manager = LoginManager()
login_manager.init_app(app)
from subprocess import CalledProcessError
import mongoJuiceCore
import time
import httplib
from poolsCtrl import PoolsCtrl,Pools
from osdsCtrl import OsdsCtrl,Osds
from monsCtrl import MonitorsCtrl,Monitors
from rbdCtrl import RbdCtrl
import subprocess
from StringIO import StringIO
#import probesCtrl
from S3Ctrl import S3Ctrl, S3Error
from S3ObjectCtrl import *
import sys
import os
sys.path.append(os.path.split(sys.path[0])[0])
from sfcsmUtil.OperateLog import OperateLog
def hash_pass(password):
"""
Return the md5 hash of the password+salt
"""
salted_password = password + app.secret_key
return md5(salted_password).hexdigest()
# Load configuration from file
configfile = "/opt/sfcsm/etc/sfcsm.conf"
datasource = open(configfile, "r")
conf = json.load(datasource)
datasource.close()
client = None;
#ceph_rest_api = None
# get a field value from global conf according to the specifpsutil_versionied ceph conf
client = getDbClient()
# control sfcsm users collection in mongo
# dbsfcsm = mongoJuiceCore.getClient(conf, 'sfcsm')
dbsfcsm = client['sfcsm']
fsid = getfsid()
dbcluster = client[fsid]
if dbsfcsm.sfcsm_users.count() == 0:
print "list users is empty: populating with default users"
user = {"name":"sfcsmAdm",
"password": hash_pass("sf01107267."),
"roles":["admin"],
"createTime":int(round(time.time() * 1000)),
"creator":"system"}
dbsfcsm.sfcsm_users.insert(user)
user = {"name":"guest",
"password": hash_pass(""),
"roles":["general"],
"createTime": int(round(time.time() * 1000)),
"creator": "system"}
dbsfcsm.sfcsm_users.insert(user)
#
# Security
# User类作为系统用户类,用户名,用户类型,创建时间,创建人
#
@app.route("/syslogs/<string:_id>", methods=["DELETE"])
# @app.route("/syslogs/<string:_id>", methods=["PUT"])
# def update_syslog_from_db(_id):
# oplog = {}
# # oplog['operator'] = current_user
# oplog['operator'] = "test"
# oplog['description '] = 'delete syslog, _id is' + _id
# oplog['optype'] = 'N'
# oplog['destip'] = 'all'
# syslog = dbcluster.syslog.find({"_id":ObjectId(_id)})
# if syslog.count() !=0:
# syslog[0]['']
# dbcluster.syslog.remove({"_id":ObjectId(_id)})
# if dbcluster.syslog.find({"_id": ObjectId(_id)}).count() != 0:
# return Response('delete fail', status=600)
# else:
# oplog['operateTime'] = int(round(time.time() * 1000))
# dbcluster.operationlog.insert(oplog)
# return Response('success', status=200)
# else:
# return Response('update fail, document is not found', status=600)
@app.route("/syslogs/", methods=["GET"])
@app.route("/radosgws/", methods=["GET"])
@login_manager.user_loader
def load_user(userid):
"""
Flask-Login user_loader callback.
The user_loader function asks this function to get a User Object or return
None based on the userid.
The userid was stored in the session environment by Flask-Login.
user_loader stores the returned User object in current_user during every
flask request.
"""
return User.get(userid)
@login_manager.token_loader
def load_token(token):
"""
Flask-Login token_loader callback.
The token_loader function asks this function to take the token that was
stored on the users computer process it to check if its valid and then
return a User Object if its valid or None if its not valid.
"""
#The Token itself was generated by User.get_auth_token. So it is up to
#us to known the format of the token data itself.
#The Token was encrypted using itsdangerous.URLSafeTimedSerializer which
#allows us to have a max_age on the token itself. When the cookie is stored
#on the users computer it also has a exipry date, but could be changed by
#the user, so this feature allows us to enforce the exipry date of the token
#server side and not rely on the users cookie to exipre.
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
#Find the User
user = User.get(data[0])
#Check Password and return user or None
if user and data[1] == user.password:
return user
return None
@app.route("/login/", methods=["GET", "POST"])
def login_page():
"""
Web Page to Display Login Form and process form.
"""
if request.method == "POST":
user = User.get(request.form['name'])
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing
# it.
if user and hash_pass(request.form['password']) == user.password:
login_user(user, remember=True)
return redirect(request.args.get("next") or "/sfcsmViz/index.html")
else:
return redirect('/sfcsmViz/login.html?result=failed')
return redirect("/sfcsmViz/login.html", code=302)
@app.route('/logout')
#
# global management
#
@app.route('/conf.json', methods=['GET'])
@login_required # called by every page, so force to be identified
@app.route('/flags', methods=['POST','PUT'])
# /<string:op>/<string:key>/<string:destip>
#
# sfcsm users management
#
@app.route('/sfcsm_user/', methods=['GET'])
# 平台用户管理
@app.route('/sfcsm_user/<id>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@login_required
@app.route('/sfcsm_user_role/', methods=['GET'])
#
# mongoDB query facility
#
@app.route('/<db>/<collection>', methods=['GET', 'POST'])
@app.route('/<db>', methods=['POST'])
#
# Pools management
#
@app.route('/poolList/', methods=['GET'])
@app.route('/pools/', methods=['GET', 'POST'])
@app.route('/pools/<int:id>', methods=['GET', 'DELETE', 'PUT'])
@app.route('/pools/<int:id>/snapshot', methods=['POST'])
@app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
@app.route('/mons/', methods=['GET'])
@app.route('/daemons/', methods=['POST'])
#
# Probes management
#
#@app.route('/probes/<string:probe_type>/<string:probe_name>/<string:action>', methods=['POST'])
#def actionOnProbe(probe_type, probe_name, action):
# print "Calling probesCtrl.action_on_probe() method", action
# try:
# return Response(probesCtrl.action_on_probe(probe_type, probe_name, action), mimetype='application/json')
# except CalledProcessError, e:
# return Response(e.output, status=500)
#
#
# Osds management
#
@app.route('/cluster/', methods=['GET'])
@app.route('/osds', methods=['PUT'])
@app.route('/osds/stat/', methods=['POST'])
@app.route('/osdsList/', methods=['GET'])
#
# Object storage management
#
# This method return a S3 Object that id is "objId".
# An exception is trhown if the object does not exist or there an issue
@app.route('/S3/object', methods=['GET'])
# User management
@app.route('/S3/user', methods=['GET'])
@app.route('/S3/user', methods=['POST'])
@app.route('/S3/user/<string:uid>', methods=['GET'])
@app.route('/S3/user/<string:uid>', methods=['PUT'])
@app.route('/S3/user/<string:uid>', methods=['DELETE'])
@app.route('/S3/user/<string:uid>/key/<string:key>', methods=['DELETE'])
@app.route('/S3/user/<string:uid>/subuser', methods=['PUT'])
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>', methods=['DELETE'])
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['PUT'])
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['DELETE'])
@app.route('/S3/user/<string:uid>/caps', methods=['PUT', 'POST'])
@app.route('/S3/user/<string:uid>/caps', methods=['DELETE'])
@app.route('/S3/user/<string:uid>/qos', methods=['PUT', 'POST'])
@app.route('/S3/user/<string:uid>/quota', methods=['PUT', 'POST'])
# bucket management
@app.route('/S3/user/<string:uid>/buckets', methods=['GET'])
@app.route('/S3/bucket', methods=['PUT'])
@app.route('/S3/bucket', methods=['GET'])
@app.route('/S3/bucket/<string:bucket>', methods=['GET'])
@app.route('/S3/bucket/<string:bucket>', methods=['DELETE'])
@app.route('/S3/bucket/<string:bucket>/link', methods=['DELETE','PUT'])
@app.route('/S3/bucket/<string:bucketName>/list', methods=['GET'])
| [
2,
12,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
201,
198,
2,
5534,
15982,
25674,
201,
198,
2,
7643,
14,
1731,
14,
5539,
201,
198,
201,
198,
201,
198,
6738,
42903,
1330,
46947,
11,
18261,
11,
18941,
201,
198,
6738,
42903,
62,... | 2.364668 | 4,053 |
import numpy as np
import tensorflow as tf
from utils import preprocess_flags, save_arch
from utils import arch_folder
if __name__ == '__main__':
f = tf.compat.v1.app.flags
from utils import define_default_flags
define_default_flags(f)
tf.compat.v1.app.run()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
3384,
4487,
1330,
662,
14681,
62,
33152,
11,
3613,
62,
998,
198,
6738,
3384,
4487,
1330,
3934,
62,
43551,
198,
198,
361,
11593,
3672,
834,
6624,
... | 2.67619 | 105 |
import unittest
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from django.db import models
from django.http import HttpRequest
from django.core.signals import request_finished, request_started
from django.core.cache import cache
from djangae.contrib import sleuth
from djangae.test import TestCase
from djangae.db import unique_utils
from djangae.db import transaction
from djangae.db.backends.appengine.context import ContextStack
from djangae.db.backends.appengine import caching
from djangae.db.caching import disable_cache, clear_context_cache
class MemcacheCachingTests(TestCase):
"""
We need to be pretty selective with our caching in memcache, because unlike
the context caching, this stuff is global.
For that reason, we have the following rules:
- save/update caches entities outside transactions
- Inside transactions save/update wipes out the cache for updated entities (a subsequent read by key will populate it again)
- Inside transactions filter/get does not hit memcache (that just breaks transactions)
- filter/get by key caches entities (consistent)
- filter/get by anything else does not (eventually consistent)
"""
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
@disable_cache(memcache=False, context=True)
class ContextCachingTests(TestCase):
"""
We can be a bit more liberal with hitting the context cache as it's
thread-local and request-local
The context cache is actually a stack. When you start a transaction we push a
copy of the current context onto the stack, when we finish a transaction we pop
the current context and apply the changes onto the outer transaction.
The rules are thus:
- Entering a transaction pushes a copy of the current context
- Rolling back a transaction pops the top of the stack
- Committing a transaction pops the top of the stack, and adds it to a queue
- When all transactions exit, the queue is applied to the current context one at a time
- save/update caches entities
- filter/get by key caches entities (consistent)
- filter/get by anything else does not (eventually consistent)
"""
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@unittest.skip("The datastore seems broken, see: https://code.google.com/p/googleappengine/issues/detail?id=11631&thanks=11631&ts=1422376783")
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
@disable_cache(memcache=True, context=False)
def test_context_cache_cleared_after_request(self):
""" The context cache should be cleared between requests. """
CachingTestModel.objects.create(field1="test")
with sleuth.watch("google.appengine.api.datastore.Query.Run") as query:
CachingTestModel.objects.get(field1="test")
self.assertEqual(query.call_count, 0)
# Now start a new request, which should clear the cache
request_started.send(HttpRequest(), keep_disabled_flags=True)
CachingTestModel.objects.get(field1="test")
self.assertEqual(query.call_count, 1)
# Now do another call, which should use the cache (because it would have been
# populated by the previous call)
CachingTestModel.objects.get(field1="test")
self.assertEqual(query.call_count, 1)
# Now clear the cache again by *finishing* a request
request_finished.send(HttpRequest(), keep_disabled_flags=True)
CachingTestModel.objects.get(field1="test")
self.assertEqual(query.call_count, 2)
| [
11748,
555,
715,
395,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
4818,
459,
382,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
4818,
459,
382,
62,
48277,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
673... | 2.896396 | 1,554 |
# -*- coding: utf-8 -*-
"""
wechatpy.client.jsapi
~~~~~~~~~~~~~~~~~~~~
This module provides some APIs for JS SDK
:copyright: (c) 2014 by messense.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import hashlib
import time
from wechatpy.utils import WeChatSigner, random_string
from wechatpy.client.api.base import BaseWeChatAPI
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
356,
17006,
9078,
13,
16366,
13,
8457,
15042,
198,
220,
220,
220,
220,
27156,
8728,
628,
220,
220,
220,
770,
8265,
3769,
617,
23113,
329,
26... | 3 | 137 |
def select_tcw(table, field=("*",), where=None):
"""
示例内容::
select_tcw("table", ("id", "name"), where="id='2' and name='3'")
转换sql: select id,name from table where id='2' and name='3'
:param table: 查询的表名称
:param field: 需要查询的字段,放入元祖中,默认值("*",)
:param where: 筛选的内容,如 id='2' and name='3',注意'用来声明字符串
:return: 查询sql语句
"""
sql = "select {} from {}".format(",".join(field), table)
if where:
sql += " where " + where
return sql
def insert_tc(table, content, many=False, ph="%s"):
"""
示例内容::
insert_tc("table", [1, 2, 3, 4, 5])
转换内容 : ('insert into table values(%s,%s,%s,%s,%s)', [1, 2, 3, 4, 5])
insert_tc("table", [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], many=True, ph="?")
转换内容 : ('insert into table values(?,?,?,?,?)', [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
insert_tc("table", {"id": 12, "name": "SystemLight"}, many=False, ph="%s")
转换内容 : ('insert into table(name,id) values(%s,%s)', ['SystemLight', 12])
insert_tc("table", {"key": ["id", "name"], "value": [["1", "lisys"], ["2", "sl"]]}, many=True, ph="%s")
转换内容 : ('insert into table(id,name) values(%s,%s)', [['1', 'lisys'], ['2', 'sl']])
:param table: 插入内容的表名称
:param content: 需要插入的内容,有多种类型方式供选择
:param many: 是否进行多行插入,默认值:False
:param ph: 预查询模板占位符,默认值:%s
:return: 元祖(插入预查询模板,预查询参数)
"""
if isinstance(content, list):
content_len = len(content[0]) if many else len(content)
sql = "insert into {} values({})".format(table, ",".join([ph] * content_len))
elif isinstance(content, dict):
if many:
field = "(" + ",".join(content["key"]) + ")"
sql = "insert into {}{} values({})".format(table, field, ",".join([ph] * len(content["key"])))
content = content["value"]
else:
field = "(" + ",".join(content.keys()) + ")"
sql = "insert into {}{} values({})".format(table, field, ",".join([ph] * len(content.values())))
content = list(content.values())
else:
raise TypeError("content is not a dict or list")
return sql, content
def insert_update_tc(table, content, many=False, ph="%s"):
"""
插入即更新,这条sql语句在mysql中是有效的,不同数据系统可能有所不同
示例内容::
insert_update_tc("table", {"id": 12, "name": "SystemLight"}, many=False, ph="%s")
转换内容 : ('insert into table(id,name) values(%s,%s) on duplicate key update
id = values(id),name = values(name)', [12, 'SystemLight'])
insert_update_tc("table", {"key": ["id", "name"], "value": [["1", "lisys"], ["2", "sl"]]}, many=True, ph="%s")
转换内容 : ('insert into table(id,name) values(%s,%s) on duplicate key update
id = values(id),name = values(name)', [['1', 'lisys'], ['2', 'sl']])
:param table: 插入即更新的table名称
:param content: 需要插入即更新的内容,有两种类型方式供选择
:param many: 是否进行多行插入,默认值:False
:param ph: 预查询模板占位符,默认值:%s
:return: 元祖(插入预查询模板,预查询参数)
"""
if isinstance(content, dict):
if many:
field = "(" + ",".join(content["key"]) + ")"
sql = "insert into {}{} values({}) on duplicate key update ".format(table, field, ",".join(
[ph] * len(content["key"])))
sql += ",".join(map(lambda x: "{} = values({})".format(x, x), content["key"]))
content = content["value"]
else:
field = "(" + ",".join(content.keys()) + ")"
sql = "insert into {}{} values({}) on duplicate key update ".format(table, field, ",".join(
[ph] * len(content.values())))
sql += ",".join(map(lambda x: "{} = values({})".format(x, x), content.keys()))
content = list(content.values())
else:
raise TypeError("content is not a dict")
return sql, content
def update_tcw(table, content, where=None, where_arg=None, ph="%s"):
"""
生成更新sql语句
示例内容::
update_tcw("table", {"id": 12, "name": "SystemLight"}, ph="%s")
转换内容 : ('update table set name=%s,id=%s', ['SystemLight', 12])
:param table: 更新的table名称
:param content: 需要修改的值,字典类型
:param where: 用于筛选,如id=2
:param where_arg: 预查询参数,列表类型
:param ph: 预查询模板占位符
:return: 元祖
"""
arg_list = list(content.values())
sql = "update {} set {}".format(table, ",".join(map(lambda x: x + "=" + ph, content.keys())))
if where:
sql += " where " + where
if where_arg:
arg_list.extend(where_arg)
return sql, arg_list
def delete_tw(table, where=None):
"""
示例内容::
delete_tw("table", where="id=1")
转换sql: delete from table where id=1
:param table: 需要删除的表的名称
:param where: 用于筛选,如id=2
:return: 删除sql
"""
sql = "delete from {}".format(table)
if where:
sql += " where " + where
return sql
def truncate_t(table):
"""
生成清空表sql语句
:param table: 需要清空的表的名称
:return: ['set foreign_key_checks=0', 'truncate table tabble', 'set foreign_key_checks=1']
"""
return ["set foreign_key_checks=0", "truncate table {}".format(table), "set foreign_key_checks=1"]
def limit(sql, start, total):
"""
生成限制返回数量的sql语句
:param sql: 现有sql语句
:param start: 开始位置
:param total: 总计条数
:return: 附件limit的sql语句
"""
return sql + " limit {},{}".format(start, total)
class InputResult:
"""
该类是当数据库module执行输入语句系列时,出现错误会默认返回的错误对象
status : 标识返回状态是否正确,如果处理sql语句时报错且回滚了数据,status标识为False
err_info : 错误信息
affect : sql语句影响到的行数
last_rowid : 返回自增ID的号码
"""
| [
4299,
2922,
62,
23047,
86,
7,
11487,
11,
2214,
28,
7203,
9,
1600,
828,
810,
28,
14202,
2599,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
13328,
97,
118,
160,
122,
233,
37863,
227,
22522,
117,
3712,
628,
220,
220,
220,
220,
220,... | 1.660688 | 3,345 |
import json
from surprise import dump
_, algo = dump.load('knn.algo')
with open('movies.json') as file:
movies = json.load(file)
movie_name_to_raw_ids = dict()
for movie_id, movie_info in movies.items():
movie_name_to_raw_ids[movie_info['name']] = movie_id
'''
>>> import knowledge
>>> from pprint import pprint
>>> pprint(knowledge.run(data={'movie_name': 'Dunkirk (2017)', 'k': 10}), indent=4)
[ { u'genres': [], u'name': u'Blade Runner 2049 (2017)'},
{ u'genres': [u'Adventure', u'Drama', u'Thriller'],
u'name': u'The Revenant (2015)'},
{ u'genres': [u'Comedy', u'Mystery'], u'name': u'Hail, Caesar! (2016)'},
{ u'genres': [u'Drama'], u'name': u'Blue Jasmine (2013)'},
{ u'genres': [u'Comedy', u'Drama', u'Musical'],
u'name': u'La La Land (2016)'},
{ u'genres': [u'Action', u'Adventure', u'Sci-Fi'],
u'name': u'Mad Max: Fury Road (2015)'},
{ u'genres': [u'Comedy', u'Drama'], u'name': u'Birdman (2014)'},
{ u'genres': [u'Action', u'Crime', u'Drama'], u'name': u'Sicario (2015)'},
{ u'genres': [u'Sci-Fi', u'Thriller'], u'name': u'Gravity (2013)'},
{ u'genres': [u'Drama', u'Mystery', u'Sci-Fi'], u'name': u'Arrival (2016)'}]
>>> pprint(knowledge.run(data={'movie_name': 'Toy Story (1995)', 'k': 10}), indent=4)
[ { u'genres': [u'Animation', u'Adventure', u'Comedy'],
u'name': u'Toy Story 3 (2010)'},
{ u'genres': [u'Animation', u'Adventure', u'Comedy'],
u'name': u'Monsters University (2013)'},
{ u'genres': [u'Animation', u'Adventure', u'Comedy'],
u'name': u'Monsters, Inc. (2001)'},
{ u'genres': [u'Action', u'Adventure', u'Fantasy'],
u'name': u'Star Wars: Episode V - The Empire Strikes Back (1980)'},
{ u'genres': [u'Adventure', u'Comedy', u'Sci-Fi'],
u'name': u'Back to the Future (1985)'},
{ u'genres': [u'Animation', u'Adventure', u'Comedy'],
u'name': u'Up (2009)'},
{ u'genres': [u'Animation', u'Adventure', u'Family'],
u'name': u'Rise of the Guardians (2012)'},
{ u'genres': [u'Animation', u'Adventure', u'Comedy'],
u'name': u'Wreck-It Ralph (2012)'},
{ u'genres': [u'Adventure', u'Drama', u'Fantasy'],
u'name': u'Life of Pi (2012)'},
{ u'genres': [u'Action', u'Crime', u'Thriller'],
u'name': u'John Wick: Chapter 2 (2017)'}]
'''
| [
11748,
33918,
198,
6738,
5975,
1330,
10285,
198,
198,
62,
11,
435,
2188,
796,
10285,
13,
2220,
10786,
15418,
77,
13,
282,
2188,
11537,
198,
4480,
1280,
10786,
76,
20526,
13,
17752,
11537,
355,
2393,
25,
198,
220,
220,
220,
6918,
796,
... | 2.164234 | 1,096 |
class Solution:
"""
@param pid: the process id
@param ppid: the parent process id
@param kill: a PID you want to kill
@return: a list of PIDs of processes that will be killed in the end
""" | [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
46514,
25,
262,
1429,
4686,
198,
220,
220,
220,
2488,
17143,
9788,
312,
25,
262,
2560,
1429,
4686,
198,
220,
220,
220,
2488,
17143,
1494,
25,
257,
37022,
3... | 2.958333 | 72 |
import sys
import shlex
import math
| [
11748,
25064,
198,
11748,
427,
2588,
198,
11748,
10688,
198
] | 3.6 | 10 |
# https://py.checkio.org/en/mission/sun-angle/
'''
Every true traveler must know how to do 3 things: fix the fire, find the water and extract useful information from the nature around him.
Programming won't help you with the fire and water, but when it comes to the information extraction - it might be just the thing you need.
Your task is to find the angle of the sun above the horizon knowing the time of the day.
Input data: the sun rises in the East at 6:00 AM, which corresponds to the angle of 0 degrees.
At 12:00 PM the sun reaches its zenith, which means that the angle equals 90 degrees.
6:00 PM is the time of the sunset so the angle is 180 degrees. If the input will be the time of the night (before 6:00 AM or after 6:00 PM),
your function should return - "I don't see the sun!".
'''
if __name__ == '__main__':
print("Example:")
print(sun_angle("07:00"))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert sun_angle("07:00") == 15
assert sun_angle("01:23") == "I don't see the sun!"
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
2,
3740,
1378,
9078,
13,
9122,
952,
13,
2398,
14,
268,
14,
3411,
14,
19155,
12,
9248,
14,
201,
198,
201,
198,
7061,
6,
201,
198,
6109,
2081,
40168,
1276,
760,
703,
284,
466,
513,
1243,
25,
4259,
262,
2046,
11,
1064,
262,
1660,
2... | 3.274286 | 350 |
import socket
from concurrent.futures import ThreadPoolExecutor, wait
import tkinter as tk
from scapy.all import *
if __name__ == "__main__":
top = tk.Tk()
myPortScanner = PortScanner(top)
# odic, cdic = myPortScanner.portScanner(['127.0.0.1'], 8086, 8200)
myPortScanner.window()
top.mainloop()
# print(odic, cdic)
| [
11748,
17802,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
11,
4043,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
629,
12826,
13,
439,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 2.425532 | 141 |
# Nícolas Ramos
# desenvolvido para ser igual ao pedido no desafio
print('====== DESAFIO 1 ======')
primeiro = int(input('Primeiro número '))
segundo = int(input('Segundo número '))
print(f'A soma é {primeiro + segundo}')
| [
2,
399,
8836,
4033,
292,
36692,
198,
2,
748,
268,
10396,
16921,
78,
31215,
1055,
45329,
723,
257,
78,
7190,
17305,
645,
748,
1878,
952,
198,
198,
4798,
10786,
50155,
22196,
8579,
9399,
352,
29335,
28,
11537,
198,
198,
35505,
7058,
796... | 2.472527 | 91 |
# flake8: noqa
from stactools.core.io import use_fsspec
from stactools.core.copy import (move_asset_file_to_item, move_assets,
move_all_assets, copy_catalog)
from stactools.core.layout import layout_catalog
from stactools.core.merge import (merge_items, merge_all_items)
| [
2,
781,
539,
23,
25,
645,
20402,
198,
198,
6738,
336,
529,
10141,
13,
7295,
13,
952,
1330,
779,
62,
69,
824,
43106,
198,
6738,
336,
529,
10141,
13,
7295,
13,
30073,
1330,
357,
21084,
62,
562,
316,
62,
7753,
62,
1462,
62,
9186,
1... | 2.328244 | 131 |
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import subprocess
if 'PUPPETBOARD_SETTINGS' not in os.environ:
os.environ['PUPPETBOARD_SETTINGS'] = os.path.join(
os.getcwd(), 'settings.py'
)
from puppetboard.app import app
if __name__ == '__main__':
# Start CoffeeScript to automatically compile our coffee source.
# We must be careful to only start this in the parent process as
# Werkzeug will create a secondary process when using the reloader.
if os.environ.get('WERKZEUG_RUN_MAIN') is None:
try:
subprocess.Popen([
app.config['DEV_COFFEE_LOCATION'], '-w', '-c',
'-o', 'puppetboard/static/js',
'puppetboard/static/coffeescript'
])
except OSError:
app.logger.error(
'The coffee executable was not found, disabling automatic '
'CoffeeScript compilation'
)
# Start the Flask development server
app.debug = True
app.run(app.config['DEV_LISTEN_HOST'], app.config['DEV_LISTEN_PORT'])
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
198,
361,
705,
5105,
10246,
2767,
8202,
9795,
62,
28480,
51,
20754,
6,
407,... | 2.313278 | 482 |
# Copyright 2018 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys, boto3, os
from awsglue.utils import getResolvedOptions
from awsglue.context import GlueContext | [
2,
15069,
2864,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
42... | 3.643243 | 185 |
# -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-31
算法思想: 三数之和最接近某值
"""
if __name__ == '__main__':
nums = [-1, 2, 1, -4]
target = 1
print Solution().threeSumClosest(nums, target) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
17171,
25,
29436,
25426,
2290,
198,
26130,
25,
347,
8577,
51,
198,
10430,
25,
2864,
12,
3070,
12,
3132,
198,
163,
106,
245,
37345,
243,
45250,
251,
46349,
... | 1.728 | 125 |
#-*- coding: utf-8 -*-
#File: config.py
#Author: yobobobo(zhouboacmer@qq.com)
import tensorflow as tf
from tensorgo.utils import logger
__all__ = ['TrainConfig']
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
8979,
25,
4566,
13,
9078,
198,
2,
13838,
25,
331,
672,
672,
20391,
7,
38536,
2127,
330,
647,
31,
38227,
13,
785,
8,
198,
198,
11748,
11192,
273,
11125,
355,
48700,... | 2.484848 | 66 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
| [
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
6738,... | 3.628205 | 78 |
import time
| [
11748,
640,
628
] | 4.333333 | 3 |
"""
Given an array nums containing n distinct numbers in the range [0, n], return the only number in the range that is missing from the array.
Follow up: Could you implement a solution using only O(1) extra space complexity and O(n) runtime complexity?
Example 1:
Input: nums = [3,0,1]
Output: 2
Explanation: n = 3 since there are 3 numbers, so all numbers are in the range [0,3]. 2 is the missing number in the range since it does not appear in nums.
Example 2:
Input: nums = [0,1]
Output: 2
Explanation: n = 2 since there are 2 numbers, so all numbers are in the range [0,2]. 2 is the missing number in the range since it does not appear in nums.
Example 3:
Input: nums = [9,6,4,2,3,5,7,0,1]
Output: 8
Explanation: n = 9 since there are 9 numbers, so all numbers are in the range [0,9]. 8 is the missing number in the range since it does not appear in nums.
Example 4:
Input: nums = [0]
Output: 1
Explanation: n = 1 since there is 1 number, so all numbers are in the range [0,1]. 1 is the missing number in the range since it does not appear in nums.
Constraints:
n == nums.length
1 <= n <= 104
0 <= nums[i] <= n
All the numbers of nums are unique.
"""
from typing import List
sol = Solution()
print(sol.missingNumber([3, 0, 1]))
| [
37811,
198,
198,
15056,
281,
7177,
997,
82,
7268,
299,
7310,
3146,
287,
262,
2837,
685,
15,
11,
299,
4357,
1441,
262,
691,
1271,
287,
262,
2837,
326,
318,
4814,
422,
262,
7177,
13,
198,
198,
7155,
510,
25,
10347,
345,
3494,
257,
4... | 3.114713 | 401 |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
# import os
def create_plots(setup, cwd=''):
"""
Function to create detailed heatmaps and the iteration plot for a single fault
Args:
setup (str): name of the setup (heat or advection)
cwd: current working directory (for testing)
"""
# basic plotting setup
axis_font = {'fontname': 'Arial', 'size': '8', 'family': 'serif'}
fs = 8 # fontsize
# assemble list of setups
setup_list = [(setup + '_steps_vs_iteration_hf_NOFAULT.npz', 'NOFAULT', 'no fault', 'k', '^'),
(setup + '_steps_vs_iteration_hf_SPREAD.npz', 'SPREAD', '1-sided', 'red', 'v'),
(setup + '_steps_vs_iteration_hf_INTERP.npz', 'INTERP', '2-sided', 'orange', 'o'),
(setup + '_steps_vs_iteration_hf_SPREAD_PREDICT.npz', 'SPREAD_PREDICT',
'1-sided + corr', 'blue', 's'),
(setup + '_steps_vs_iteration_hf_INTERP_PREDICT.npz', 'INTERP_PREDICT',
'2-sided + corr', 'green', 'd')]
maxres = -1
minres = -11
maxiter = 0
maxsteps = 0
# find axis limits
for file, _, _, _, _ in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
maxiter = max(maxiter, len(residual[:, 0]))
maxsteps = max(maxsteps, len(residual[0, :]))
# create heatmaps
for file, strategy, _, _, _ in setup_list:
residual = np.zeros((maxiter, maxsteps))
residual[:] = -99
infile = np.load(cwd + 'data/' + file)
input = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter']
residual[0:len(input[:, 0]), 0:len(input[0, :])] = input
rcParams['figure.figsize'] = 3.0, 2.5
fig, ax = plt.subplots()
cmap = plt.get_cmap('Reds')
pcol = plt.pcolor(residual.T, cmap=cmap, vmin=minres, vmax=maxres)
pcol.set_edgecolor('face')
plt.axis([0, maxiter, 0, maxsteps])
cax = plt.colorbar(pcol)
cax.set_label('log10(residual)', **axis_font)
cax.ax.tick_params(labelsize=fs)
plt.tick_params(axis='both', which='major', labelsize=fs)
ax.set_xlabel('iteration', labelpad=1, **axis_font)
ax.set_ylabel('step', labelpad=1, **axis_font)
ax.set_xticks(np.arange(maxiter) + 0.5, minor=False)
ax.set_yticks(np.arange(maxsteps) + 0.5, minor=False)
ax.set_xticklabels(np.arange(maxiter) + 1, minor=False)
ax.set_yticklabels(np.arange(maxsteps), minor=False)
# Set every second label to invisible
for labelx in ax.xaxis.get_ticklabels()[::2]:
labelx.set_visible(False)
for labely in ax.yaxis.get_ticklabels()[::2]:
labely.set_visible(False)
ax.tick_params(pad=2)
# plt.tight_layout()
if strategy != 'NOFAULT':
plt.text(step - 1 + 0.5, iter + 0.5, 'x', horizontalalignment='center', verticalalignment='center')
plt.title(strategy.replace('_', '-'), **axis_font)
fname = 'data/' + setup + '_steps_vs_iteration_hf_' + str(step) + 'x' + str(iter) + '_' + strategy + '.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
rcParams['figure.figsize'] = 6.0, 3.0
fig, ax = plt.subplots()
maxiter = 0
lw = 2
ms = 8
# create iteration vs. residual plot
for file, _, label, color, marker in setup_list:
infile = np.load(cwd + 'data/' + file)
residual = infile['residual']
step = infile['ft_step']
iter = infile['ft_iter'] - 1
yvals = residual[residual[:, step] > -99, step]
maxiter = max(maxiter, len(yvals))
xvals = range(1, len(yvals) + 1)
plt.plot(xvals[0:iter], yvals[0:iter], color=color, linewidth=lw, linestyle='-', markersize=ms, marker=marker,
markeredgecolor='k', markerfacecolor=color, label=label)
plt.plot(xvals[iter:len(yvals)], yvals[iter:], color=color, linewidth=lw, linestyle='-', markersize=ms,
marker=marker,
markeredgecolor='k', markerfacecolor=color)
xvals = range(1, maxiter + 1)
plt.plot(xvals, [-9 for _ in range(maxiter)], 'k--')
plt.annotate('tolerance', xy=(1, -9.4), fontsize=fs)
left = 6.15
bottom = -12
width = 0.7
height = 12
right = left + width
top = bottom + height
rect = plt.Rectangle(xy=(left, bottom), width=width, height=height, color='lightgrey')
plt.text(0.5 * (left + right), 0.5 * (bottom + top), 'node failure', horizontalalignment='center',
verticalalignment='center', rotation=90, color='k', fontsize=fs)
fig.gca().add_artist(rect)
plt.xlim(1 - 0.25, maxiter + 0.25)
plt.ylim(minres - 0.25, maxres + 0.25)
plt.xlabel('iteration', **axis_font)
plt.ylabel('log10(residual)', **axis_font)
plt.title('ALL', **axis_font)
ax.xaxis.labelpad = 0
ax.yaxis.labelpad = 0
plt.tick_params(axis='both', which='major', labelsize=fs)
plt.legend(numpoints=1, fontsize=fs)
plt.xticks(range(1, maxiter + 1))
plt.yticks(range(minres, maxres + 1))
ax.tick_params(pad=2)
# plt.tight_layout()
fname = 'data/' + setup + '_residuals_allstrategies.png'
plt.savefig(fname, bbox_inches='tight')
# os.system('pdfcrop ' + fname + ' ' + fname)
plt.close('all')
if __name__ == "__main__":
create_plots(setup='HEAT')
create_plots(setup='ADVECTION')
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
279,
2645,
397,
1330,
48321,
10044,
4105,
... | 2.121775 | 2,636 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Monitors a directory tree for changes."""
import sys
import types
from google.appengine.tools.devappserver2 import inotify_file_watcher
from google.appengine.tools.devappserver2 import mtime_file_watcher
from google.appengine.tools.devappserver2 import win32_file_watcher
class _MultipleFileWatcher(object):
"""A FileWatcher than can watch many directories."""
def get_file_watcher(directories, use_mtime_file_watcher):
"""Returns an instance that monitors a hierarchy of directories.
Args:
directories: A list representing the paths of the directories to monitor.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the current
platform.
Returns:
A FileWatcher appropriate for the current platform. start() must be called
before has_changes().
"""
assert not isinstance(directories, types.StringTypes), 'expected list got str'
if len(directories) != 1:
return _MultipleFileWatcher(directories, use_mtime_file_watcher)
directory = directories[0]
if use_mtime_file_watcher:
return mtime_file_watcher.MtimeFileWatcher(directory)
elif sys.platform.startswith('linux'):
return inotify_file_watcher.InotifyFileWatcher(directory)
elif sys.platform.startswith('win'):
return win32_file_watcher.Win32FileWatcher(directory)
return mtime_file_watcher.MtimeFileWatcher(directory)
# NOTE: The Darwin-specific watcher implementation (found in the deleted file
# fsevents_file_watcher.py) was incorrect - the Mac OS X FSEvents
# implementation does not detect changes in symlinked files or directories. It
# also does not provide file-level change precision before Mac OS 10.7.
#
# It is still possible to provide an efficient implementation by watching all
# symlinked directories and using mtime checking for symlinked files. On any
# change in a directory, it would have to be rescanned to see if a new
# symlinked file or directory was added. It also might be possible to use
# kevents instead of the Carbon API to detect files changes.
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
4343,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.459873 | 785 |
import betamax
import requests
import unittest
from requests_toolbelt import SSLAdapter
| [
11748,
731,
321,
897,
198,
11748,
7007,
198,
11748,
555,
715,
395,
198,
198,
6738,
7007,
62,
25981,
37976,
1330,
25952,
47307,
628
] | 3.913043 | 23 |
import appdaemon.plugins.hass.hassapi as hass
| [
11748,
598,
6814,
7966,
13,
37390,
13,
71,
562,
13,
71,
562,
15042,
355,
468,
82,
198
] | 2.705882 | 17 |
# Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module contains simple interfaces for File compression and decompression.
'''
import gzip
import zipfile
from io import BytesIO
__all__ = ['GzipHandler',
'ZipHandler']
class GzipHandler(object):
'''
Class for handling gzip-formatted string content.
'''
@classmethod
def check_format(cls, data):
'''Validate `data` whether it is in gzip format.
Bytes 0 and 1 should be (per RFC 1952):
data[0] = 31 (0x1f), data[1] = 139 (0x8b).
:param data: Data to check.
:type data: ``bytes``
:returns: True if it is in gzip format else False.
:rtype: ``bool``
'''
return data[0:2] == b'\x1f\x8b'
@classmethod
def decompress(cls, data):
'''Decompress gzip-compressed data `data`.
It will perform basic validation, then return the decompressed
data or raises ValueError exception for invalid `data`.
:param data: Gzip-compressed data to decompress.
:type data: ``bytes``
:returns: decompressed data.
:rtype: ``string``
:raises ValueError: If `data` is not in gzip format
'''
if not cls.check_format(data):
raise ValueError('File is not gzip format.')
return gzip.GzipFile(fileobj=BytesIO(data),
mode='rb').read()
class ZipHandler(object):
'''
Class for handling zip files.
'''
@classmethod
def check_format(cls, data):
'''Validate `data` whether it is in zip format.
:param data: Data to check.
:type data: ``bytes``
:returns: True if it is in zip format else False.
:rtype: ``bool``
'''
return zipfile.is_zipfile(BytesIO(data))
@classmethod
def decompress(cls, data):
'''Decompress zip-compressed data `data`.
It will perform basic validation, then return the decompressed
data or raises ValueError exception with error message.
:param data: Zip-compressed data to decompress.
:type data: ``bytes``
:returns: decompressed data.
:rtype: ``string``
:raises ValueError: If decompress data failed.
'''
if not cls.check_format(data):
raise ValueError('File is not zip format.')
fh = BytesIO(data)
decompressor = zipfile.ZipFile(fh)
files = decompressor.infolist()
if len(files) > 1:
raise ValueError(
'Zip files containing multiple files not supported by this '
'handler.')
try:
text = decompressor.read(files[0].filename)
except:
raise ValueError('Unknown exception when extracting zip file.')
if len(text) != files[0].file_size:
raise ValueError('Zip file size does not match actual size.')
return text
| [
2,
15069,
1584,
13341,
2954,
11,
3457,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
705,
34156,
6,
2599,
345,
743,
201,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.333333 | 1,536 |
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import csv
import matplotlib.pyplot as plt
import pandas as pd
from sqlalchemy import create_engine
# In[8]:
engine = create_engine('postgres://aruns2@mlpolicylab.db.dssg.io:5432/bills3_database')
# In[5]:
#test command
sql = "SELECT status_id FROM catalogs.bill_status"
result_set = engine.execute(sql)
for rec in result_set:
print(rec)
# In[4]:
# total number of bills
sql = "select count(distinct bill_id) from ml_policy_class.bill_progress"
result_set = engine.execute(sql)
for rec in result_set:
total_bills = rec
print(total_bills)
#total number of bills passed
sql = "select count(distinct bill_id) from ml_policy_class.bill_progress where bill_status =4"
result_set = engine.execute(sql)
for rec in result_set:
total_passed_bills = rec
print(total_passed_bills)
# In[5]:
#total number of bills in NY
sql = "select count(distinct bp.bill_id) from (select distinct bill_id from ml_policy_class.bill_progress) bp join ml_policy_class.bills b on b.bill_id = bp.bill_id join ml_policy_class.sessions s on s.session_id = b.session_id where s.state_id = 32"
result_set = engine.execute(sql)
for rec in result_set:
total_passed_bills = rec
print(total_passed_bills)
break
#total number of bills passed in NY
sql = "select count(distinct bp.bill_id) from (select distinct bill_id from ml_policy_class.bill_progress where bill_status =4) bp join ml_policy_class.bills b on b.bill_id = bp.bill_id join ml_policy_class.sessions s on s.session_id = b.session_id where s.state_id = 32"
result_set = engine.execute(sql)
for rec in result_set:
total_passed_bills = rec
print(total_passed_bills)
break
# In[18]:
#bills labels
sql = "select distinct m.bill_id, m.final_status from (select bill_id, (case when bill_status = 4 then 1 else 0 end) as final_status from ml_policy_class.bill_progress) m"
result_set = engine.execute(sql)
for rec in result_set:
print(rec)
break
# In[34]:
#bills details
sql = "select * from (select bp.bill_id,bp.final_status,s.session_id, s.state_id, s.special, s.year_start , s.year_end , b.bill_type , b.subjects, b.introduced_date, b.introduced_body, b.url from (select distinct m.bill_id as bill_id, m.final_status as final_status from (select bill_id, (case when bill_status = 4 then 1 else 0 end) as final_status from ml_policy_class.bill_progress) m) bp join ml_policy_class.bills b on b.bill_id = bp.bill_id join ml_policy_class.sessions s on s.session_id = b.session_id where s.state_id = 32) bill_details join ml_policy_class.bill_sponsors bs on bill_details.bill_id = bs.bill_id "
result_set = engine.execute(sql)
for rec in result_set:
print(rec)
break
# In[12]:
for rec in result_set:
total_passed_bills = rec
print(total_passed_bills[9])
break
# In[35]:
all_data = [{column: value for column, value in rowproxy.items()} for rowproxy in result_set]
all_data[0]
# In[37]:
#headers
headers = [i for i in all_data[0].keys()]
headers
len(all_data)
# In[40]:
csv_file= 'output_csv'
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for row in all_data:
writer.writerow(row)
csvfile.close()
# In[2]:
sql1 = "select * from (select bp.bill_id,bp.final_status,s.session_id, s.state_id, s.special, s.year_start , s.year_end , b.bill_type , b.subjects, b.introduced_date, b.introduced_body, b.url from (select distinct m.bill_id as bill_id, m.final_status as final_status from (select bill_id, (case when bill_status = 4 then 1 else 0 end) as final_status from ml_policy_class.bill_progress) m) bp join ml_policy_class.bills b on b.bill_id = bp.bill_id join ml_policy_class.sessions s on s.session_id = b.session_id where s.state_id = 32) bill_details join ml_policy_class.bill_sponsors bs on bill_details.bill_id = bs.bill_id "
#sql2 = "select * from ml_policy_class.bill_progress bp"
sql2 = 'select bill_id, session_id, introduced_date, final_date, present_date, (final_date - present_date) as "days_to_final", label from sketch.bill_processed order by present_date'
#data_extractor(sql)
# In[10]:
#getting bills progress
sql = "select * from ml_policy_class.bill_progress bp"
result_set = engine.execute(sql)
for rec in result_set:
print(rec)
break
#convert to dictionary
all_data = [{column: value for column, value in rowproxy.items()} for rowproxy in result_set]
headers = [i for i in all_data[0].keys()]
csv_file= 'billprogress_csv'
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for row in all_data:
writer.writerow(row)
csvfile.close()
# getting the new date labels
sql = 'select bill_id, session_id, introduced_date, final_date, present_date, (final_date - present_date) as "days_to_final", label from sketch.bill_processed order by present_date'
result_set = engine.execute(sql)
for rec in result_set:
print(rec)
break
#convert to dictionary
all_data = [{column: value for column, value in rowproxy.items()} for rowproxy in result_set]
headers = [i for i in all_data[0].keys()]
csv_file= 'bill_date_label_csv'
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for row in all_data:
writer.writerow(row)
csvfile.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
22,
5974,
628,
198,
11748,
269,
21370,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
3... | 2.658668 | 2,042 |
"""
2017 Day 9
https://adventofcode.com/2017/day/9
"""
from typing import Tuple
import aocd # type: ignore
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2017, day=9)
part1, part2 = parse_stream(data)
print(f"Part 1: {part1}")
print(f"Part 2: {part2}")
if __name__ == "__main__":
main()
| [
37811,
198,
5539,
3596,
860,
198,
5450,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
5539,
14,
820,
14,
24,
198,
37811,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
11748,
257,
420,
67,
220,
1303,
2099,
25,
8856,
628,
198,
198,
4299,
... | 2.445122 | 164 |
from django.test import TestCase
from django.test import Client
import json
# Create your tests here.
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
9288,
1330,
20985,
198,
11748,
33918,
198,
198,
2,
13610,
534,
5254,
994,
13,
628
] | 3.714286 | 28 |
#! /usr/bin/env python3
# -*-coding:utf-8 -*-
# @Time : 2019/06/18 17:04:28
# @Author : che
# @Email : ch1huizong@gmail.com
import imaplib
username = ""
password = ""
mail_server = "imap.qq.com"
i = imaplib.IMAP4_SSL(mail_server)
print(i.login(username, password))
print(i.select("INBOX"))
for msg_id in i.search(None, "ALL")[1][0].decode().split():
print(msg_id)
outf = open("/tmp/email/%s.eml" % msg_id, "w")
outf.write(i.fetch(msg_id, "(RFC822)")[1][0][1].decode())
outf.close()
i.logout()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
14,
3312,
14,
1507,
1596,
25,
3023,
25,
2078,
198,
2,
2488,
13838,... | 2.096774 | 248 |
from module.interface import *
from os import system
system('cls')
menu('List Comprehension Nomes')
mulheres = ['Jeanne', 'Lisa', 'Gina', 'Aurora', 'Monica', 'Grisália', 'Natália', 'Julia', 'Rosilene', 'Tabata']
homens = ['Samuel', 'Gustavo', 'Bob', 'André', 'David', 'Idelfonso', 'João', 'Julius', 'Pedro', 'José']
titulo('Homens 4 Letras')
homem_4l = [nome for nome in homens if len(nome) <= 4]
for pos, nome in enumerate(homem_4l):
print(f' {pos + 1} - {nome}')
system('pause')
system('cls')
titulo('Homens Duplas')
homens_dupla = [(nome[0], nome) for nome in homens]
for pos, tupla in enumerate(homens_dupla):
print(f'{pos + 1} - {tupla[0]} | {tupla[1]}')
system('pause')
system('cls')
titulo('Homens Dupla Dict')
hom_dupla_dict = {tupla[0]: tupla[1] for tupla in homens_dupla}
for k, v in hom_dupla_dict.items():
print(f'{k}: {v}')
print(f'Total: {len(hom_dupla_dict)} homens')
system('pause')
system('cls')
titulo('Homem Com Mulher')
h_m_zip = zip(homens, mulheres)
homem_mulher = {tupla[0]: tupla[1] for tupla in h_m_zip}
cont = 1
for homem, mulher in homem_mulher.items():
print(f'{cont} -> {homem} S2 {mulher}')
cont += 1
print(f'Total: {len(homem_mulher)} casais')
system('pause')
system('cls')
print('\033[36mPrograma Finalizado!\033[m')
| [
6738,
8265,
13,
39994,
1330,
1635,
198,
6738,
28686,
1330,
1080,
198,
198,
10057,
10786,
565,
82,
11537,
198,
26272,
10786,
8053,
3082,
7345,
295,
399,
2586,
11537,
198,
198,
76,
377,
19079,
796,
37250,
38248,
710,
3256,
705,
44203,
325... | 2.216638 | 577 |
#encoding=utf8
import os
# define redis connection, default is localhost and port is 6379
REDIS_URL = os.environ.get("REDIS_URL", 'redis://localhost:6379') | [
2,
12685,
7656,
28,
40477,
23,
198,
198,
11748,
28686,
198,
198,
2,
8160,
2266,
271,
4637,
11,
4277,
318,
1957,
4774,
290,
2493,
318,
718,
29088,
198,
22083,
1797,
62,
21886,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
22083,
1797,
6... | 2.907407 | 54 |
#!/usr/bin/env python
"""
Solution to Project Euler Problem 14
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains
10 terms. Although it has not been proved yet (Collatz Problem), it is thought
that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
"""
__count = {1: 1}
if __name__ == "__main__":
test()
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
46344,
284,
4935,
412,
18173,
20647,
1478,
198,
4023,
1378,
16302,
68,
18173,
13,
3262,
14,
198,
198,
1525,
5949,
282,
6081,
1279,
499,
282,
6081,
31,
14816,
13,
785,
29,
... | 3.208178 | 269 |
#
# nes-keypress.py
#
# Usage: python nes-keypress.py [--verbose] -if INPUT_FILE
#
# Input File Format:
# {
# "latch_pin": 10,
# "clock_pin": 3,
# "data_pin": 7,
# "key_mapping": {
# "a": "KEY_Z",
# "b": "KEY_X",
# "select": "KEY_Q",
# "start": "KEY_E",
# "up": "KEY_W",
# "down": "KEY_S",
# "left": "KEY_A",
# "right": "KEY_D",
# "menu": "KEY_P"
# }
# }
##
#!/usr/bin/env python
"""
Thanks to:
https://github.com/WiringPi/WiringPi/
http://little-scale.blogspot.ca/2007/07/nes-controller-to-arduino.html
http://blog.thestateofme.com/2012/08/10/raspberry-pi-gpio-joystick/
"""
import argparse
import uinput
import time
import atexit
import sys
import os
import json
import RPi.GPIO as GPIO
verbose = False
MENU_TIMER = 1
MENU_TIMER_WAIT = 50
# The controller button bit masks
NES_RIGHT = 0x01
NES_LEFT = 0x02
NES_DOWN = 0x04
NES_UP = 0x08
NES_START = 0x10
NES_SELECT = 0x20
NES_B = 0x40
NES_A = 0x80
##
# Retrieve and validate the command line parameters and intialize the config
#
# @return Dict
##
##
# Setup the NES GPIO ports
#
# @param Dict config The NES configuration dictionary
##
##
# Get the key mapping. This dictionary will map the presses on the NES
# controller to the keyboard presses
#
# @param Dict config The NES config dictionary
#
# @return Dict
##
##
# Read the state of the NES controller
#
# @param Dict config The NES config dictionary
#
# @return integer
##
##
# Send out keyboard presses
#
# @param integer buttons The bit string representing the buttons state
# @param Device device The keyboard Device object
# @param Dict keyMapping The mapping of NES presses to keyboard presses
##
##
# Clear the state of the GPIO
##
if __name__ == "__main__":
main()
| [
2,
198,
2,
299,
274,
12,
2539,
8439,
13,
9078,
198,
2,
198,
2,
29566,
25,
21015,
299,
274,
12,
2539,
8439,
13,
9078,
685,
438,
19011,
577,
60,
532,
361,
3268,
30076,
62,
25664,
198,
2,
198,
2,
23412,
9220,
18980,
25,
198,
2,
1... | 2.531519 | 698 |
# simpleshare
import sys
import os
import tkinter as tk
from simpleshare.gui import Simpleshare
from simpleshare.cli import cli_main
from simpleshare.util import MCASTGROUP, PORT
# center_window
# main
| [
2,
985,
2374,
43466,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
198,
6738,
985,
2374,
43466,
13,
48317,
1330,
3184,
2374,
43466,
198,
6738,
985,
2374,
43466,
13,
44506,
1330,
537,
72,
62,
12417... | 3.059701 | 67 |
# Copyright 2014 in medias res Gesellschaft fuer Informationstechnologie mbH
# The ddb project licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'pyramid==1.4.5',
'SQLAlchemy==0.9.3',
'transaction==1.4.3',
'pyramid_tm==0.7',
'zope.sqlalchemy==0.7.4',
'waitress==0.8.8',
'psycopg2==2.5.2',
'requests==2.2.1',
'httpagentparser==1.6.0',
'pyramid_beaker==0.8',
'geoalchemy2==0.2.3',
'sphinx==1.2.2'
]
setup(name='ddb',
version='0.6',
description='ddb map showcase',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='in medias res GmbH',
author_email='info@webgis.de',
url='http://www.webgis.de',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='ddb',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = ddb:main
[console_scripts]
initialize_ddb_db = ddb.scripts.initializedb:main
""",
)
| [
2,
15069,
1946,
287,
1117,
4448,
581,
45371,
19187,
11693,
701,
14035,
263,
6188,
4169,
1349,
928,
494,
285,
65,
39,
198,
2,
383,
288,
9945,
1628,
16625,
428,
2393,
284,
345,
739,
262,
24843,
13789,
11,
198,
2,
2196,
362,
13,
15,
... | 2.453202 | 812 |
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.db import models
from socialregistration.signals import connect, login
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
connect.connect(save_facebook_token, sender=FacebookProfile,
dispatch_uid='socialregistration.facebook.connect')
login.connect(save_facebook_token, sender = FacebookProfile,
dispatch_uid = 'socialregistration.facebook.login')
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
27530,
1330,
14413,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
19... | 3.452703 | 148 |
from calamari_ocr.ocr.dataset.textprocessors.text_processor import TextProcessor
from calamari_ocr.ocr.dataset.textprocessors.text_normalizer import (
TextNormalizerProcessorParams,
)
from calamari_ocr.ocr.dataset.textprocessors.text_regularizer import (
TextRegularizerProcessorParams,
)
from calamari_ocr.ocr.dataset.textprocessors.basic_text_processors import (
StripTextProcessorParams,
BidiTextProcessorParams,
)
from calamari_ocr.ocr.dataset.textprocessors.str_to_char_list import (
StrToCharListProcessorParams,
)
from calamari_ocr.ocr.dataset.textprocessors.text_synchronizer import synchronize
| [
6738,
35765,
2743,
62,
1696,
13,
1696,
13,
19608,
292,
316,
13,
5239,
14681,
669,
13,
5239,
62,
41341,
1330,
8255,
18709,
273,
198,
198,
6738,
35765,
2743,
62,
1696,
13,
1696,
13,
19608,
292,
316,
13,
5239,
14681,
669,
13,
5239,
62,... | 2.906977 | 215 |
"""
The builtin `datetime` module provides classes for points in time (`date`, and
`datetime`) as well as durations (`timedelta`), but it does not account for
time durations at a specific point. This module provides `Interval`, which
contains a start and end `date` or `datetime`, and a duration `timedelta`.
This is useful for representing calendar events. This module also provides
`PeriodicInterval` which can be used for repeating events, by containing a
period `timedelta` and a count of occurrences (either an `int` or `forever`).
"""
from datetime_interval.interval import Interval
from datetime_interval.periodic_interval import forever, PeriodicInterval
| [
37811,
198,
464,
3170,
259,
4600,
19608,
8079,
63,
8265,
3769,
6097,
329,
2173,
287,
640,
357,
63,
4475,
47671,
290,
198,
63,
19608,
8079,
63,
8,
355,
880,
355,
288,
20074,
357,
63,
16514,
276,
12514,
63,
828,
475,
340,
857,
407,
... | 3.730337 | 178 |
#!/usr/bin/env python3
from diagrams import Cluster, Diagram
from diagrams.aws.database import RDS
from diagrams.aws.general import User
from diagrams.aws.network import APIGateway
from diagrams.k8s.compute import Pod
with Diagram("apiks", show=False):
topic = User("client")
with Cluster("AWS"):
with Cluster("Node"):
pod = Pod("Nginx")
kong = APIGateway("Kong")
flask = Pod("Flask")
RDS = RDS("RDS PostgreSQL")
topic >> pod
pod >> kong >>flask
flask >> RDS
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
37067,
1330,
38279,
11,
6031,
6713,
198,
6738,
37067,
13,
8356,
13,
48806,
1330,
371,
5258,
198,
6738,
37067,
13,
8356,
13,
24622,
1330,
11787,
198,
6738,
37067,
13,
835... | 2.481481 | 216 |
from user_handle import views
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^register/$', views.Register.as_view(), name='Register'),
url(r'^login/$', views.Login.as_view(), name='Login'),
url(r'^logout/$', views.Logout.as_view(), name='Logout'),
url(r'^$', login_required(views.Index.as_view()), name='Index'),
url(r'^interest/$', login_required(views.ManageInterested.as_view()), name='Interests'),
url(r'^inbox/$', login_required(views.MessageInbox.as_view()), name='MessageInbox'),
url(r'^message/(?P<message_id>[0-9]+)/$',
login_required(views.MessageView.as_view()),
name='Message'),
]
| [
6738,
2836,
62,
28144,
1330,
5009,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
198,
6371,
33279,
82,
796,
685,
19... | 2.588235 | 272 |
"""
Simple functions for calling commands as subprocesses.
"""
import logging
import signal
import subprocess
__all__ = ["capture_command", "predicate_command", "action_command", "CommandError"]
logger = logging.getLogger(__name__)
| [
37811,
198,
26437,
5499,
329,
4585,
9729,
355,
850,
14681,
274,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
6737,
198,
11748,
850,
14681,
198,
198,
834,
439,
834,
796,
14631,
27144,
495,
62,
21812,
1600,
366,
28764,
5344,
62,
21812,
... | 3.449275 | 69 |
#many ways to do this, this is only one example
d = dict(G.degree)
1 in d.values()
# answer: none | [
198,
2,
21834,
2842,
284,
466,
428,
11,
428,
318,
691,
530,
1672,
198,
67,
796,
8633,
7,
38,
13,
16863,
8,
198,
16,
287,
288,
13,
27160,
3419,
198,
198,
2,
3280,
25,
4844
] | 2.828571 | 35 |
import pandas as pd
from py_expression_eval import Parser
math_parser = Parser()
def _get_minintensity(qualifier):
"""
Returns absolute min and relative min
Args:
qualifier ([type]): [description]
Returns:
[type]: [description]
"""
min_intensity = 0
min_percent_intensity = 0
min_tic_percent_intensity = 0
if qualifier is None:
min_intensity = 0
min_percent_intensity = 0
return min_intensity, min_percent_intensity, min_tic_percent_intensity
if "qualifierintensityvalue" in qualifier:
min_intensity = float(qualifier["qualifierintensityvalue"]["value"])
if "qualifierintensitypercent" in qualifier:
min_percent_intensity = float(qualifier["qualifierintensitypercent"]["value"]) / 100
if "qualifierintensityticpercent" in qualifier:
min_tic_percent_intensity = float(qualifier["qualifierintensityticpercent"]["value"]) / 100
# since the subsequent comparison is a strict greater than, if people set it to 100, then they won't get anything.
min_percent_intensity = min(min_percent_intensity, 0.99)
return min_intensity, min_percent_intensity, min_tic_percent_intensity
def _get_intensitymatch_range(qualifiers, match_intensity):
"""
Matching the intensity range
Args:
qualifiers ([type]): [description]
match_intensity ([type]): [description]
Returns:
[type]: [description]
"""
min_intensity = 0
max_intensity = 0
if "qualifierintensitytolpercent" in qualifiers:
tolerance_percent = qualifiers["qualifierintensitytolpercent"]["value"]
tolerance_value = float(tolerance_percent) / 100 * match_intensity
min_intensity = match_intensity - tolerance_value
max_intensity = match_intensity + tolerance_value
return min_intensity, max_intensity
def ms2prod_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 peak conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["mz_defect"] = ms2_filtered_df["mz"] - ms2_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_filtered_df[
(ms2_filtered_df["mz_defect"] > massdefect_min) &
(ms2_filtered_df["mz_defect"] < massdefect_max) &
(ms2_filtered_df["i"] > min_int) &
(ms2_filtered_df["i_norm"] > min_intpercent) &
(ms2_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df[(ms2_df["mz"] > mz_min) &
(ms2_df["mz"] < mz_max) &
(ms2_df["i"] > min_int) &
(ms2_df["i_norm"] > min_intpercent) &
(ms2_df["i_tic_norm"] > min_tic_percent_intensity)]
# Setting the intensity match register
_set_intensity_register(ms2_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms2_filtered_df = _filter_intensitymatch(ms2_filtered_df, reference_conditions_register, condition)
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms2nl_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 neutral loss conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["mz_defect"] = ms2_filtered_df["mz"] - ms2_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_filtered_df[
(ms2_filtered_df["mz_defect"] > massdefect_min) &
(ms2_filtered_df["mz_defect"] < massdefect_max) &
(ms2_filtered_df["i"] > min_int) &
(ms2_filtered_df["i_norm"] > min_intpercent) &
(ms2_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz) #TODO: This is incorrect logic if it comes to PPM accuracy
nl_min = mz - mz_tol
nl_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df[
((ms2_df["precmz"] - ms2_df["mz"]) > nl_min) &
((ms2_df["precmz"] - ms2_df["mz"]) < nl_max) &
(ms2_df["i"] > min_int) &
(ms2_df["i_norm"] > min_intpercent) &
(ms2_df["i_tic_norm"] > min_tic_percent_intensity)
]
# Setting the intensity match register
_set_intensity_register(ms2_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms2_filtered_df = _filter_intensitymatch(ms2_filtered_df, reference_conditions_register, condition)
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms2prec_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 precursor conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["precmz_defect"] = ms2_filtered_df["precmz"] - ms2_filtered_df["precmz"].astype(int)
ms2_filtered_df = ms2_filtered_df[(
ms2_filtered_df["precmz_defect"] > massdefect_min) &
(ms2_filtered_df["precmz_defect"] < massdefect_max)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
ms2_filtered_df = ms2_df[(
ms2_df["precmz"] > mz_min) &
(ms2_df["precmz"] < mz_max)
]
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
if len(ms1_df) > 0:
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms1_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS1 peak conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms1_df) == 0:
return ms1_df, ms2_df
ms1_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max) &
(ms1_filtered_df["i"] > min_int) &
(ms1_filtered_df["i_norm"] > min_intpercent) &
(ms1_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df[
(ms1_df["mz"] > mz_min) &
(ms1_df["mz"] < mz_max) &
(ms1_df["i"] > min_int) &
(ms1_df["i_norm"] > min_intpercent) &
(ms1_df["i_tic_norm"] > min_tic_percent_intensity)]
if massdefect_min > 0 or massdefect_max < 1:
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max)
]
# Setting the intensity match register
_set_intensity_register(ms1_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms1_filtered_df = _filter_intensitymatch(ms1_filtered_df, reference_conditions_register, condition)
ms1_list.append(ms1_filtered_df)
if len(ms1_list) == 1:
ms1_filtered_df = ms1_list[0]
else:
ms1_filtered_df = pd.concat(ms1_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms1_filtered_df["scan"])
original_scans = set(ms1_df["scan"])
negation_scans = original_scans - filtered_scans
ms1_filtered_df = ms1_df[ms1_df["scan"].isin(negation_scans)]
if len(ms1_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms1_filtered_df["scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(filtered_scans)]
if "ms1scan" in ms2_df:
ms2_df = ms2_df[ms2_df["ms1scan"].isin(filtered_scans)]
return ms1_df, ms2_df
def ms1_filter(condition, ms1_df):
"""
Filters the MS1 and MS2 data based upon MS1 peak filters
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
Returns:
ms1_df ([type]): [description]
"""
if len(ms1_df) == 0:
return ms1_df
ms1_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max) &
(ms1_filtered_df["i"] > min_int) &
(ms1_filtered_df["i_norm"] > min_intpercent) &
(ms1_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df[
(ms1_df["mz"] > mz_min) &
(ms1_df["mz"] < mz_max) &
(ms1_df["i"] > min_int) &
(ms1_df["i_norm"] > min_intpercent) &
(ms1_df["i_tic_norm"] > min_tic_percent_intensity)]
if massdefect_min > 0 or massdefect_max < 1:
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max)
]
ms1_list.append(ms1_filtered_df)
if len(ms1_list) == 1:
ms1_filtered_df = ms1_list[0]
else:
ms1_filtered_df = pd.concat(ms1_list)
if len(ms1_filtered_df) == 0:
return pd.DataFrame()
return ms1_filtered_df
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
12972,
62,
38011,
62,
18206,
1330,
23042,
263,
198,
11018,
62,
48610,
796,
23042,
263,
3419,
628,
198,
4299,
4808,
1136,
62,
1084,
47799,
7,
13255,
7483,
2599,
198,
220,
220,
220,
37227,
198... | 2.048617 | 8,207 |
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from datetime import datetime
# Create your views here.
@login_required | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
28243,
1330,
40213,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
... | 3.708861 | 79 |
import os
import pytest
import caproto as ca
from caproto._headers import MessageHeader
_incr_sends = [
[(b'abc', b'def', b'ghi'),
0,
(b'abc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
1,
(b'bc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
3,
(b'def', b'ghi')
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
0,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5)), b'def'),
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
5,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5))[5:], b'def'),
],
]
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
records_to_check = [
['x.NAME', ('x.NAME', 'x', 'NAME', None)],
['x.', ('x', 'x', None, None)],
['x', ('x', 'x', None, None)],
['x.NAME$',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.long_string, None),
)],
['x.VAL{"ts":true}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{"ts":true}')
)],
['x.{}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.VAL{}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.NAME${}',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.filtered |
ca.RecordModifiers.long_string, '{}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', records_to_check)
bad_filters = [
["x.{not-json}",
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{not-json}'),
)],
['x.{"none":null}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{"none":null}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', bad_filters)
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
1451,
305,
1462,
355,
1275,
198,
6738,
1451,
305,
1462,
13557,
50145,
1330,
16000,
39681,
628,
628,
198,
62,
1939,
81,
62,
82,
2412,
796,
685,
198,
220,
220,
220,
47527,
65,
6,
39305,... | 1.976141 | 964 |
from .kde_corner import * | [
6738,
764,
74,
2934,
62,
10215,
1008,
1330,
1635
] | 2.777778 | 9 |
import json
import urllib.request
from bs4 import BeautifulSoup
main() | [
11748,
33918,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
12417,
3419
] | 3.272727 | 22 |
from runPy_iBatchLearn import main
import sys
gpuid = int(sys.argv[1])
# gpuid=3
### Permuted-MNIST incremental class
offline_training = ''
boost_scale = 0
############## DATASET SELECT ##############
dataset='EMNIST'
### CNN
mlp_mha=10
batch_size=600
print_freq=100
### pretrained
# mlp_mha=11
# batch_size=256
# print_freq=50
# dataset='CIFAR100'
# batch_size=256
n_permutation=0
# n_permutation=10
first_split_size=10
other_split_size=10
# n_permutation=2
repeat=10 # How many experiments
# schedule=4 # Epoch
# batch_size=128
schedule=1 # Epoch
schedule=2 # Epoch
schedule=3 # Epoch
schedule=4 # Epoch
# schedule=5 # Epoch
# schedule=10 # Epoch
# schedule=20 # Epoch
# schedule=40 # Epoch
# schedule=1 # Epoch
# batch_size=2048
learning_rate=0.001
# agent_name="Fed_Memory_4000"
# arg_input = "iBatchLearn.py --gpuid {gpuid} --repeat {repeat} --incremental_class --optimizer Adam --n_permutation {n_permutation} --force_out_dim 100 --schedule {schedule} --batch_size {batch_size} --model_name MLP1000 --agent_type customization --agent_name {agent_name} --lr 0.0001 | tee ${OUTDIR}/Naive_Rehearsal_4000.log".format(gpuid=gpuid, repeat=repeat, n_permutation=n_permutation, schedule=schedule, batch_size=batch_size, agent_name=agent_name, OUTDIR="outputs/permuted_MNIST_incremental_class")
# ---------Memory Embedding Rehearsal-------
# agent_name = "Noise_Rehearsal_4400"
# agent_name = "No_Rehearsal_4400"
# agent_name = "Memory_Embedding_Rehearsal_1100"
# agent_name = "Memory_Embedding_Rehearsal_4400"
# agent_name = "Model_Generating_Rehearsal_8800"
# boost_scale = 1
agent_name = "Model_Generating_Rehearsal_4400"
boost_scale = 1
# agent_name = "Model_Generating_Rehearsal_2200"
# boost_scale = 1
# agent_name = "Model_Generating_Rehearsal_1100"
# boost_scale = 1
model_type="mlp"
model_name="MLP1000_img_sz"
# ------------Naive_Rehearsal--------------
# agent_name="Naive_Rehearsal_1100"
# agent_name="Naive_Rehearsal_2200"
agent_name="Naive_Rehearsal_4400"
boost_scale = 1
model_type="cnn"
model_name="CNN1000_img_sz"
# model_type="pretrained"
# model_name="PRETRAINED1000_img_sz"
# offline_training = '--offline_training'
arg_input = "iBatchLearn.py --gpuid {gpuid} --print_freq {print_freq} --dataset {dataset} {offline_training} --repeat {repeat} --first_split_size {first_split_size} --boost_scale {boost_scale} --mlp_mha {mlp_mha} --other_split_size {other_split_size} --optimizer Adam --n_permutation {n_permutation} --force_out_dim 10 --schedule {schedule} --batch_size {batch_size} --model_type {model_type} --model_name {model_name} --agent_type customization --agent_name {agent_name} --lr {learning_rate}".format(gpuid=gpuid, print_freq=print_freq, dataset=dataset, offline_training=offline_training, repeat=repeat, first_split_size=first_split_size, boost_scale=boost_scale, mlp_mha=mlp_mha, other_split_size=other_split_size, n_permutation=n_permutation, schedule=schedule, batch_size=batch_size, model_type=model_type, model_name=model_name, agent_name=agent_name, learning_rate=learning_rate)
# python -u iBatchLearn.py --gpuid $GPUID --repeat $REPEAT --optimizer Adam --n_permutation 10 --no_class_remap --force_out_dim 10 --schedule $SCHEDULE --batch_size $BS --model_name MLP1000 --agent_type customization --agent_name Naive_Rehearsal_4000 --lr 0.0001 | tee ${OUTDIR}/Naive_Rehearsal_4000.log
arg_list = arg_input.split("|")[0].split()
main(arg_list)
| [
6738,
1057,
20519,
62,
72,
33,
963,
20238,
1330,
1388,
198,
11748,
25064,
198,
198,
46999,
312,
796,
493,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
2,
308,
19944,
312,
28,
18,
198,
198,
21017,
2448,
76,
7241,
12,
39764,
8808,
294... | 2.590977 | 1,330 |
'''
Code uses standard trained model for prediction on transformed wavelet.
Uses MAE to calculate accuracy.
'''
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import numpy as np
from copy import copy
import seaborn as sns
from pomegranate import *
import pywt
from matplotlib.pyplot import xlabel
from sklearn import model_selection
from sklearn.preprocessing.tests import test_label
import random
#cluster_accuracies = get_clusterwise_accuracy()
if __name__ == "__main__":
filepath = "ECGDATA\\Truncated\\Normative\\"
readfiles(filepath)
| [
7061,
6,
201,
198,
10669,
3544,
3210,
8776,
2746,
329,
17724,
319,
14434,
6769,
1616,
13,
201,
198,
5842,
274,
8779,
36,
284,
15284,
9922,
13,
220,
201,
198,
7061,
6,
201,
198,
201,
198,
6738,
28686,
1330,
1351,
15908,
201,
198,
673... | 2.478261 | 276 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Name : __init__.py.py
Created on : 2017/06/19 13:20
Author : Liuker <liu@liuker.xyz>
Version : 1.0.0
Copyright : Copyright (C) 2016 - 2017, Liuker's Blog, https://liuker.org.
Description : .
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
6530,
220,
220,
220,
220,
220,
220,
220,
1058,
11593,
15003,
834,
13,
9078,
13,
9078,
198,
15622,
31... | 2.211382 | 123 |
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script takes a Clang git revision as an argument, it then
creates a feature branch, puts this revision into update.py, uploads
a CL, triggers Clang Upload try bots, and tells what to do next"""
from __future__ import print_function
import argparse
import fnmatch
import itertools
import os
import re
import shutil
import subprocess
import sys
from build import CheckoutLLVM, GetCommitDescription, LLVM_DIR
from update import CHROMIUM_DIR
# Path constants.
THIS_DIR = os.path.dirname(__file__)
UPDATE_PY_PATH = os.path.join(THIS_DIR, "update.py")
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
# Keep lines in here at <= 72 columns, else they wrap in gerrit.
COMMIT_FOOTER = \
'''
Bug: TODO
Cq-Include-Trybots: chromium/try:chromeos-amd64-generic-cfi-thin-lto-rel
Cq-Include-Trybots: chromium/try:dawn-win10-x86-deps-rel
Cq-Include-Trybots: chromium/try:linux-chromeos-dbg
Cq-Include-Trybots: chromium/try:linux_angle_deqp_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_cfi_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_chromeos_asan_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_chromeos_msan_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_compile_dbg_32_ng
Cq-Include-Trybots: chromium/try:linux_chromium_msan_rel_ng
Cq-Include-Trybots: chromium/try:mac-arm64-rel,mac_chromium_asan_rel_ng
Cq-Include-Trybots: chromium/try:win-angle-deqp-rel-64
Cq-Include-Trybots: chromium/try:win-asan,win7-rel,win-angle-deqp-rel-32
Cq-Include-Trybots: chrome/try:iphone-device,ipad-device
Cq-Include-Trybots: chrome/try:linux-chromeos-chrome
Cq-Include-Trybots: chrome/try:win-chrome,win64-chrome,mac-chrome
'''
is_win = sys.platform.startswith('win32')
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
1584,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
... | 2.630728 | 742 |
import sys
import os
from codecs import open
from setuptools import setup, find_packages
if sys.version_info < (3, 5):
print ("At least Python 3.5 is required. Please install Python 3.5.")
exit(1)
try:
from setuptools import setup
except ImportError as e:
sys.stderr.write("Could not import setuptools. Please install setuptools and try again to install htseq-clip. \n Error: {}".format(e))
sys.exit(1)
#try:
# import Cython
#except ImportError as e:
# sys.stderr.write("Could not import HTSeq dependency 'Cython'. Please install it with pip install Cython and then try again to install htseq-clip. \n Exception: {}".format(e))
# sys.exit(1)
try:
import numpy
except ImportError as e:
sys.stderr.write("Could not import numpy. Please install it with pip install numpy and then try again to install htseq-clip. \n Exception: {}".format(e))
sys.exit(1)
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='htseq-clip',
version='2.11.0b0',
description='htseq-clip: a toolset for the analysis of eCLIP/iCLIP datasets',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/EMBL-Hentze-group/htseq-clip',
author='Thomas Schwarzl, Sudeep Sahadevan, Marko Fritz, Nadia Ashraf',
author_email='schwarzl@embl.de, sudeep.sahadevan@embl.de',
zip_safe=False,
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
install_requires=['HTSeq==0.13.5', 'pysam'],
packages=['clip','tests'],
test_suite = 'tests',
entry_points = {
'console_scripts': ['htseq-clip=clip.command_line:main'],
}
)
| [
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
6738,
40481,
82,
1330,
1280,
201,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
201,
198,
201,
198,
361,
25064,
13,
9641,
62,
10951,
1279,
357,
18,
11,
642,
... | 2.565632 | 838 |
from cloudmesh.configuration.Config import Config
config = Config()
profile = config["cloudmesh.profile"]
print(profile)
##
"""
flow = flow_from_clientsecrets(filename, scope, message=message, #Change this to use jason as oblect
cache=cache, redirect_uri=redirect_uri,
device_uri=device_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
""" | [
6738,
6279,
76,
5069,
13,
11250,
3924,
13,
16934,
1330,
17056,
198,
11250,
796,
17056,
3419,
198,
13317,
796,
4566,
14692,
17721,
76,
5069,
13,
13317,
8973,
198,
4798,
7,
13317,
8,
198,
198,
2235,
198,
37811,
198,
220,
220,
220,
5202,... | 2.319797 | 197 |
from _Framework.Capabilities import CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE, controller_id, inport, \
outport
from APC_mini_mle import APC_mini_mle
| [
6738,
4808,
21055,
6433,
13,
15610,
5738,
1330,
27342,
46,
3069,
1137,
62,
2389,
62,
20373,
11,
350,
33002,
62,
20373,
11,
5626,
1546,
62,
4093,
11,
6374,
46023,
11,
22657,
23051,
11,
10444,
62,
312,
11,
287,
634,
11,
3467,
198,
220... | 2.560606 | 66 |
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import io
import itertools
import json
import os
import sys
import typing
import zipfile
from abc import ABC
import aiofiles
from aiofiles import os as aio_os
from aiofiles import ospath as async_path
# servers don't need textures, so pillow is not required
# WARNING: this M A Y break other stuff
try:
import PIL.Image as PIL_Image
except ImportError:
if not typing.TYPE_CHECKING:
else:
import PIL.Image as PIL_Image
import mcpython.common.config
import mcpython.util.texture
from mcpython import shared
from mcpython.engine import logger
"""
---------------------------------------------
Specifications for the resource loader system
---------------------------------------------
On startup / on reload, so called ResourceLocation's are created for every archive / directory in resourcepack-folder
and other asset sources (mod files)
functions to access data:
to_filename(representation: str) -> str: returns the transformed name (for example block/dirt gets
assets/minecraft/textures/block/dirt.png)
exists(filename: str) -> bool: returns if an directory exists somewhere
read_<xy>(filename: str) -> object: loads the file in the speicified mode
How mods do interact with these?
Mod files are automatically added to these system to make it easier to add own resources
There is a special class for simulating files in-memory
"""
class IResourceLoader(ABC):
"""
Base class for a class holding a link to a resource source, like and directory or zip-file
(but in theory can be anything, even over network)
"""
@staticmethod
def is_valid(path: str) -> bool:
"""
Checks if a location is valid as a source to load via the constructor
:param path: the path to check
:return: if it is valid or not
"""
raise NotImplementedError()
def get_path_info(self) -> str:
"""
Returns a unique identifier for this loader, like a path loaded from, or some mod name
"""
raise NotImplementedError()
async def is_in_path(self, path: str) -> bool:
"""
Checks if a local file-name is in the given path, so it can be loaded
:param path: the file path to check
:return: if it is in the path
"""
raise NotImplementedError()
async def read_raw(self, path: str) -> bytes:
"""
Will read a file in binary mode
:param path: the file name to use
:return: the content of the file loaded in binary
"""
raise NotImplementedError()
async def read_image(self, path: str) -> PIL_Image.Image:
"""
Will read a file as a PIL.Image.Image
:param path: the file name to use
:return: the content of the file loaded as image
"""
data = await self.read_raw(path)
return PIL_Image.open(io.BytesIO(data))
async def read_decoding(self, path: str, encoding: str = "utf-8") -> str:
"""
Will read a file into the system as a string, decoding the raw bytes in the given encoding
:param path: the file name to use
:param encoding: the encoding to use
:return: the content of the file loaded as string
"""
return (await self.read_raw(path)).decode(encoding)
def close(self):
"""
Called when the resource path should be closed
Should be used for cleanup
"""
def get_all_entries_in_directory(
self, directory: str, go_sub=True
) -> typing.Iterator[str]:
"""
Should return all entries in a local directory
:param directory: the directory to check
:param go_sub: if sub directories should be iterated or not
:return: a list of data
todo: add a regex variant
"""
raise NotImplementedError()
class ResourceZipFile(IResourceLoader):
"""
Implementation for zip-archives
"""
@staticmethod
class ResourceDirectory(IResourceLoader):
"""
Implementation for raw directories
"""
@staticmethod
class SimulatedResourceLoader(IResourceLoader):
"""
In-memory resource loader instance
"""
SIMULATOR_ID = 0
@staticmethod
# data loaders for the resource locations, SimulatedResourceLoader is not a default loader
RESOURCE_PACK_LOADERS = [
ResourceZipFile,
ResourceDirectory,
]
RESOURCE_LOCATIONS = [] # a list of all resource locations in the system
# todo: add manager class for this
def load_resource_packs():
"""
Will load the resource packs found in the paths for it
todo: add a way to add resource locations persistent to reloads
"""
close_all_resources()
if not os.path.exists(shared.home + "/resourcepacks"):
os.makedirs(shared.home + "/resourcepacks")
if shared.ENABLE_RESOURCE_PACK_LOADER:
for file in os.listdir(shared.home + "/resourcepacks"):
if file in [
"{}.jar".format(mcpython.common.config.MC_VERSION_BASE),
"minecraft.zip",
]:
continue
file = shared.home + "/resourcepacks/" + file
flag = True
for source in RESOURCE_PACK_LOADERS:
if flag and source.is_valid(file):
RESOURCE_LOCATIONS.append(source(file))
flag = False
if flag:
logger.println(
"[ResourceLocator][WARNING] can't load path {}. No valid loader found!".format(
file
)
)
i = 0
while i < len(sys.argv):
element = sys.argv[i]
if element == "--add-resource-path":
path = sys.argv[i + 1]
if zipfile.is_zipfile(path):
RESOURCE_LOCATIONS.append(ResourceZipFile(path))
else:
RESOURCE_LOCATIONS.append(ResourceDirectory(path))
i += 2
else:
i += 1
# for local accessing the various directories used by the game
# todo: this might need tweaks for build executables
RESOURCE_LOCATIONS.append(ResourceDirectory(shared.local))
RESOURCE_LOCATIONS.append(ResourceDirectory(shared.home))
RESOURCE_LOCATIONS.append(ResourceDirectory(shared.build))
if shared.dev_environment:
# only in dev-environment we need these special folders
# todo: strip when building
# todo: use the .jar file for source resources instead of extracting them
RESOURCE_LOCATIONS.append(ResourceDirectory(shared.local + "/resources/main"))
RESOURCE_LOCATIONS.append(
ResourceDirectory(shared.local + "/resources/generated")
)
RESOURCE_LOCATIONS.append(ResourceZipFile(shared.local + "/source.zip"))
shared.event_handler.call("resources:load")
def close_all_resources():
"""
Will close all opened resource locations using <locator>.close()
Will call the resource:close event in the process
"""
logger.println("[RESOURCE LOADER] clearing resource system...")
for item in RESOURCE_LOCATIONS:
item.close()
RESOURCE_LOCATIONS.clear()
if shared.event_handler:
shared.event_handler.call("resources:close")
# how mc locations look like
MC_IMAGE_LOCATIONS = [
"block",
"gui",
"item",
"entity",
"model",
]
async def transform_name(file: str, raise_on_error=True) -> str:
"""
Will transform an MC-ResourceLocation string into a local path
:param file: the thing to use
:return: the transformed
:param raise_on_error: will raise downer exception, otherwise return the file name
:raises NotImplementedError: when the data is invalid
"""
f = file.split(":")
if any([f[-1].startswith(x) for x in MC_IMAGE_LOCATIONS]):
if len(f) == 1:
f = "assets/minecraft/textures/{}/{}.png".format(
f[0].split("/")[0], "/".join(f[0].split("/")[1:])
)
else:
f = "assets/{}/textures/{}/{}.png".format(
f[0], f[1].split("/")[0], "/".join(f[1].split("/")[1:])
)
return f
if raise_on_error:
if file.endswith(".png"):
logger.println(
"can't find '{}' in resource system. Replacing with missing texture image...".format(
file
)
)
return "assets/missing_texture.png"
else:
raise FileNotFoundError(file)
return file
async def exists(file: str, transform=True):
"""
Checks if a given file exists in the system
:param file: the file to check
:param transform: if it should be transformed for check
:return: if it exists or not
"""
if file.startswith("build/"):
file = file.replace("build/", shared.build + "/", 1)
if file.startswith(
"@"
): # special resource notation, can be used for accessing special ResourceLocations
data = file.split("|")
resource = data[0][1:]
file = "|".join(data[1:])
for x in RESOURCE_LOCATIONS:
if x.path == resource:
return await x.is_in_path(file)
return False
for x in RESOURCE_LOCATIONS:
if await x.is_in_path(file):
return True
if transform:
try:
return await exists(await transform_name(file), transform=False)
except (NotImplementedError, FileNotFoundError):
pass
return False
async def read_raw(file: str):
"""
Will read the content of a file in binary mode
:param file: the file to load
:return: the content
"""
if file.startswith("build/"):
file = file.replace("build/", shared.build + "/", 1)
if file.startswith(
"@"
): # special resource notation, can be used for accessing special ResourceLocations
data = file.split("|")
resource = data[0][1:]
file = "|".join(data[1:])
if file.startswith("build/"):
file = file.replace("build/", shared.build + "/", 1)
for x in RESOURCE_LOCATIONS:
x: IResourceLoader
if x.get_path_info() == resource:
try:
return await x.read_raw(file)
except:
logger.println("exception during loading file '{}'".format(file))
raise
raise RuntimeError("can't find resource named {}".format(resource))
if not await exists(file, transform=False):
file = await transform_name(file)
loc = RESOURCE_LOCATIONS[:]
for x in loc:
if await x.is_in_path(file):
try:
return await x.read_raw(file)
except:
logger.println("exception during loading file '{}'".format(file))
raise
raise ValueError("can't find resource '{}' in any path".format(file))
async def read_image(file: str):
"""
Will read the content of a file in binary mode
:param file: the file to load
:return: the content
"""
if file is None:
raise ValueError(file)
if file.startswith("build/"):
file = file.replace("build/", shared.build + "/", 1)
if file.startswith(
"@"
): # special resource notation, can be used for accessing special ResourceLocations
data = file.split("|")
resource = data[0][1:]
file = "|".join(data[1:])
if file.startswith("build/"):
file = file.replace("build/", shared.build + "/", 1)
for x in RESOURCE_LOCATIONS:
x: IResourceLoader
if x.get_path_info() == resource:
try:
return await x.read_image(file)
except:
logger.println("exception during loading file '{}'".format(file))
raise
raise RuntimeError("can't find resource named {}".format(resource))
if not await exists(file, transform=False):
try:
file = await transform_name(file)
except FileNotFoundError:
logger.println("[WARN] could not find texture", file)
file = "assets/missing_texture.png"
loc = RESOURCE_LOCATIONS[:]
for x in loc:
if await x.is_in_path(file):
try:
return await x.read_image(file)
except (SystemExit, KeyboardInterrupt):
sys.exit(-1)
except:
logger.print_exception(
"exception during loading file '{}'".format(file)
)
raise ValueError("can't find resource '{}' in any path".format(file))
async def read_json(file: str):
"""
Reads a .json file from the system
"""
try:
data = (await read_raw(file)).decode("utf-8")
except:
print("during accessing", file)
raise
if not data:
raise ValueError
try:
return json.loads(data)
except:
print("during decoding", file)
raise
async def get_all_entries(directory: str) -> typing.Iterator[str]:
"""
Will get all files & directories [ending with an "/"] of an given directory across all resource locations
:param directory: the directory to use
:return: a list of all found files
"""
loc = RESOURCE_LOCATIONS
loc.reverse()
return itertools.chain.from_iterable(
(x.get_all_entries_in_directory(directory) for x in loc)
)
async def get_all_entries_special(directory: str) -> typing.Iterator[str]:
"""
Returns all entries found with their corresponding '@<path>:<file>'-notation
:param directory: the directory to search from
:return: a list of found resources
"""
return itertools.chain.from_iterable(
map(
lambda x: map(
lambda s: "@{}|{}".format(x.get_path_info(), s),
x.get_all_entries_in_directory(directory),
),
RESOURCE_LOCATIONS,
)
)
| [
37811,
198,
76,
13155,
7535,
532,
257,
6164,
3323,
17271,
3194,
287,
21015,
3476,
5864,
739,
262,
17168,
12,
677,
594,
220,
198,
7,
5450,
1378,
12567,
13,
785,
14,
76,
13155,
7535,
19,
12,
66,
7656,
14,
7295,
8,
198,
198,
37146,
6... | 2.453736 | 5,955 |
'''
https://leetcode.com/problems/valid-parentheses/
20. Valid Parentheses
Easy
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
Example 1:
Input: "()"
Output: true
Example 2:
Input: "()[]{}"
Output: true
Example 3:
Input: "(]"
Output: false
Example 4:
Input: "([)]"
Output: false
Example 5:
Input: "{[]}"
Output: true
'''
print(Solution().isValid('{[(]}')) | [
7061,
6,
201,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
12102,
12,
8000,
39815,
14,
201,
198,
1238,
13,
48951,
16774,
39815,
201,
198,
28406,
201,
198,
201,
198,
15056,
257,
4731,
7268,
655,
262,
3435,
29513,
32... | 2.782427 | 239 |
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import time
import logging
from preact_resnet import PreActResNet18
from wideresnet import WideResNet
from utils import *
from mart import mart_loss
parser = argparse.ArgumentParser(description='PyTorch CIFAR MART Defense')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train')
parser.add_argument('--model', default='pre', type=str, choices=['pre', 'wide'])
parser.add_argument('--wide-factor', default=10, type=int, help='Widen factor')
parser.add_argument('--weight-decay', '--wd', default=5e-4,
type=float, metavar='W')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--epsilon', type=float, default=8.,
help='perturbation bound')
parser.add_argument('--num-steps', default=10,
help='perturb number of steps')
parser.add_argument('--step_size', type=float, default=2.,
help='step size')
parser.add_argument('--beta', default=6.0,
help='weight before kl (misclassified examples)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--normalization', default='std', type=str, choices=['std', '01','+-1'])
parser.add_argument('--fname', default='output', type=str)
parser.add_argument('--data-dir', default='/mnt/storage0_8/torch_datasets/cifar-data', type=str)
parser.add_argument('--out-dir', default='mart_out', type=str, help='Output directory')
parser.add_argument('--save-model', action='store_true')
args = parser.parse_args()
# settings
# training settings
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
device = torch.device("cuda")
epsilon = (args.epsilon / 255.)
step_size = (args.step_size / 255.)
if args.normalization == 'std':
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
elif args.normalization == '01':
mu = torch.tensor((0.,0.,0.)).view(3,1,1).cuda()
std = torch.tensor((1.,1.,1.)).view(3,1,1).cuda()
elif args.normalization == '+-1':
mu = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).cuda()
std = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).cuda()
train_loader, test_loader = get_loaders(args.data_dir, args.batch_size)
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= 25:
lr = args.lr * 0.1
if epoch >= 40:
lr = args.lr * 0.01
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
main() | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,
11748,
28686,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
... | 2.377424 | 1,444 |
# Generated by Django 2.1.1 on 2019-05-16 16:00
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
13130,
12,
2713,
12,
1433,
1467,
25,
405,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
201,
198,
201,
198
] | 2.485714 | 35 |
from .detection import detect_spikes | [
6738,
764,
15255,
3213,
1330,
4886,
62,
2777,
7938
] | 4 | 9 |
from .feeding_learned_reward import FeedingLearnedRewardEnv
from .agents import pr2, baxter, sawyer, jaco, stretch, panda, human, human_mesh
from .agents.pr2 import PR2
from .agents.baxter import Baxter
from .agents.sawyer import Sawyer
from .agents.jaco import Jaco
from .agents.stretch import Stretch
from .agents.panda import Panda
from .agents.human import Human
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
robot_arm = 'right'
human_controllable_joint_indices = human.head_joints
register_env('assistive_gym:FeedingLearnedRewardPR2Human-v1', lambda config: FeedingLearnedRewardPR2HumanEnv())
register_env('assistive_gym:FeedingLearnedRewardBaxterHuman-v1', lambda config: FeedingLearnedRewardBaxterHumanEnv())
register_env('assistive_gym:FeedingLearnedRewardSawyerHuman-v1', lambda config: FeedingLearnedRewardSawyerHumanEnv())
register_env('assistive_gym:FeedingLearnedRewardJacoHuman-v1', lambda config: FeedingLearnedRewardJacoHumanEnv())
register_env('assistive_gym:FeedingLearnedRewardStretchHuman-v1', lambda config: FeedingLearnedRewardStretchHumanEnv())
register_env('assistive_gym:FeedingLearnedRewardPandaHuman-v1', lambda config: FeedingLearnedRewardPandaHumanEnv())
| [
6738,
764,
22824,
62,
35720,
276,
62,
260,
904,
1330,
18272,
278,
14961,
2817,
48123,
4834,
85,
198,
6738,
764,
49638,
1330,
778,
17,
11,
275,
40864,
11,
2497,
9860,
11,
474,
10602,
11,
7539,
11,
279,
5282,
11,
1692,
11,
1692,
62,
... | 3.087282 | 401 |
import matplotlib.pyplot as plt
import torch
from path import Path
import os
from utils import *
import torch.nn as nn
import torch.nn.functional as f
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
import cv2
from path import Path
from tqdm import tqdm
from opts import parse_args_main as parse_args
def get_smooth_loss( img,disp):#bchw
"""Computes the smoothness loss for a disparity image
The color image is used for edge-aware smoothness
"""
grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])
grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])
grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
grad_disp_x *= torch.exp(-grad_img_x)#err_x
grad_disp_y *= torch.exp(-grad_img_y)#err_y
ret = grad_disp_x[:,:,:-1,:]+grad_disp_y[:,:,:,:-1]
plt.subplot(2,3,1)
plt.imshow(t2arr(grad_disp_x))
plt.subplot(2,3,2)
plt.imshow(t2arr(grad_disp_y))
plt.subplot(2,3,3)
plt.imshow(t2arr(grad_img_x))
plt.subplot(2,3,4)
plt.imshow(t2arr(grad_img_y))
plt.subplot(2,3,5)
plt.imshow(t2arr(ret))
return ret.mean()
if __name__ == '__main__':
#main()
#test_rober()
#
#caculate_reg_mc_test()
caculate_reg_mc_gt()
# caculate_reg_mc_test() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
198,
6738,
3108,
1330,
10644,
198,
11748,
28686,
198,
6738,
3384,
4487,
1330,
1635,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13... | 2.112426 | 676 |
'''/*---------------------------------------------------------------------------------------------
* Copyright (c) VituTech. All rights reserved.
* Licensed under the Apache License 2.0. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'''
import os
| [
7061,
6,
15211,
10097,
1783,
32501,
198,
1635,
220,
15069,
357,
66,
8,
18271,
84,
17760,
13,
1439,
2489,
10395,
13,
198,
1635,
220,
49962,
739,
262,
24843,
13789,
362,
13,
15,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5... | 6.186441 | 59 |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yt2m.settings')
app = Celery('yt2m')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
28686,
198,
6738,
18725,
1924,
1330,
15248,
1924,
198,
198,
2,
900,
262,
4277,
37770,
6460,
8265,
329,
262,
705,
7015,
88,
6,
1430,
13,
198,
... | 3.017699 | 113 |
from django.test import TestCase
from .factories import *
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
764,
22584,
1749,
1330,
1635,
628,
198
] | 3.388889 | 18 |