content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# modulos
import math # bibliotecas com adicionais para o python
n = int(input('digite o n:'))
raiz = math.sqrt(n)
print('a raiz de {} e {:.2f}'.format(n, raiz))
| [
2,
953,
377,
418,
198,
198,
11748,
10688,
220,
1303,
275,
29142,
313,
721,
292,
401,
512,
47430,
15152,
31215,
267,
21015,
198,
198,
77,
796,
493,
7,
15414,
10786,
12894,
578,
267,
299,
32105,
4008,
198,
430,
528,
796,
10688,
13,
31... | 2.357143 | 70 |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import SubsetRandomSampler
import random
import math
import copy
from tqdm import tqdm
import time
import syft as sy
from torch.utils.tensorboard import SummaryWriter
# In[2]:
args = Arguments()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# In[3]:
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
# simulation functions
workers = connect_to_workers(n_workers=args.n_workers)
# In[4]:
temp = torch.utils.data.DataLoader(
datasets.MNIST('~/data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size,
shuffle = True,
pin_memory = True
)
train_loader = []
for i, (data, target) in tqdm(enumerate(temp)):
train_loader.append((data.to(device), target.to(device)))
#send data to all the client first
train_loader_send = []
for n in range(args.n_workers):
unit = len(train_loader)//args.n_workers
if n ==0:
for (data, target) in train_loader[:unit]:
train_loader_send.append((data.send(workers[n]), target.send(workers[n])))
else:
for (data, target) in train_loader[(n-1)*unit:n*unit]:
train_loader_send.append((data.send(workers[n]), target.send(workers[n])))
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('~/data', train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size,
pin_memory = True
)
# In[5]:
# model is not exactully the same as the paper since it did not mention the unit of fc
# In[6]:
# In[7]:
# In[8]:
# In[9]:
# In[14]:
# In[10]:
# In[11]:
# In[12]:
# In[15]:
# In[ ]:
#%%time
# model = Net().to(device)
args.best_lr_list = []
args.alpha_max = args.init_alpha_max
args.epsilon = 8
p_ng, p_nmax = args.epsilon / (2 * args.split), args.epsilon / (2 * args.split)
# for epoch in range(1, args.epochs + 1):
start = time.time()
# for client in workers
# while epsilon is over 0, keep training
server_model = Net().to(device)
temp_model = Net().to(device)
logdir = "/root/notebooks/tensorflow/logs/DPAGD/F_DPAGD_v2"
writer = SummaryWriter(logdir)
for r in range(args.rounds):
temp_model_list = []
# set model into model list
server_model_list = set_model_list(args, server_model, Net)
#train on all the client
for worker_index, (worker, model) in enumerate(zip(workers, server_model_list)):
print("Now is worker {}".format(worker_index))
args.alpha_max = args.init_alpha_max
args.epsilon = 8
p_ng = args.epsilon / (2 * args.split)
del args.best_lr_list[:]
while args.epsilon > 0:
worker_best_model, p_ng = train(args, device, model, train_loader , r, worker_index, p_ng)
#append model trained by client into list
temp_model.load_state_dict(worker_best_model.state_dict())
temp_model_list.append(temp_model)
temp_stat_dict = aggregate_model(args, temp_model_list)
server_model.load_state_dict(temp_stat_dict)
test(args, device, server_model, test_loader, r, writer)
print("Spend time:{:.1f}".format(time.time() - start))
if (args.save_model):
torch.save(model.state_dict(), "mnist_cnn.pt")
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
37... | 2.381406 | 1,678 |
"""
fit HDDM model, with history terms, to data from IBL mice
Anne Urai, 2019, CSHL
"""
# ============================================ #
# GETTING STARTED
# ============================================ #
import matplotlib as mpl
mpl.use('Agg') # to still plot even when no display is defined
from optparse import OptionParser
import pandas as pd
import os, time
# import HDDM functions, defined in a separate file
import hddm_funcs
# more handy imports
import hddm
import seaborn as sns
sns.set()
# read inputs
usage = "HDDM_run.py [options]"
parser = OptionParser(usage)
parser.add_option("-m", "--model",
default=[0],
type="int",
help="number of the model to run")
parser.add_option("-i", "--trace_id",
default=0,
type="int",
help="number of the trace id to run")
parser.add_option("-d", "--dataset",
default=0,
type="int",
help="dataset nr")
opts, args = parser.parse_args()
# ============================================ #
# READ INPUT ARGUMENTS; DATAFILE
# ============================================ #
# find path depending on location and dataset
usr = os.environ['USER']
if 'aeurai' in usr: # lisa
datapath = '/home/aeurai/Data/'
elif 'urai' in usr: # mbp laptop
datapath = '/Users/urai/Data/projects/0/neurodec/Data/MEG-PL/HDDM'
datasets = ['MEG_HDDM_all_clean']
# select only this dataset
if isinstance(opts.dataset, str):
opts.dataset = [opts.dataset]
dataset = datasets[opts.dataset]
# ============================================ #
# READ INPUT ARGUMENTS; model
# ============================================ #
models = ['nohist', #0
'prevchoice_z',
'prevchoice_dc',
'prevchoice_dcz',
'alpha_ips01stim_vz', #4
'gamma_ips23stim_vz', #5
'beta_motor_vz', # 6
'beta_motor_prestim_vz', # 7
'gammaresid_ips23stim_vz',
'alpharesid_ips01stim_vz',
'gamma_ips23prestim_vz',
'alpha_ips01prestim_vz',
]
if isinstance(opts.model, str):
opts.model = [opts.model]
# select only this model
m = models[opts.model]
print(opts)
print(m)
# ============================================ #
# GET DATA
# ============================================ #
data = pd.read_csv(os.path.join(datapath, dataset, 'allsubjects_megall_4hddm_norm_flip.csv'))
# MAKE A PLOT OF THE RT DISTRIBUTIONS PER ANIMAL
if not os.path.isfile(os.path.join(datapath, dataset, 'figures', 'rtdist.png')):
if not os.path.exists(os.path.join(datapath, dataset, 'figures')):
try:
os.makedirs(os.path.join(datapath, dataset, 'figures'))
except:
pass
g = sns.FacetGrid(data, col='subj_idx', col_wrap=8)
g.map(sns.distplot, "rt", kde=False, rug=True)
g.savefig(os.path.join(datapath, dataset, 'figures', 'rtdist.png'))
# ============================================ #
# FIT THE ACTUAL MODEL
# ============================================ #
# gsq fit, quick
# md = hddm_funcs.run_model_gsq(data, m, datapath)
starttime = time.time()
# regression model; slow but more precise
if not os.path.isfile(os.path.join(datapath, dataset, m, 'results_combined.csv')):
print('starting model %s, %s'%(datapath, dataset))
hddm_funcs.run_model(data, m, os.path.join(datapath, dataset, m),
n_samples=10000, force=False, trace_id=opts.trace_id)
# ============================================ #
# CONCATENATE across chains
# ============================================ #
if opts.trace_id == 14 and not os.path.exists(os.path.join(datapath, dataset, m,
'model_comparison_avg.csv')):
# wait until all the files are present
filelist = []
for t in range(15):
filelist.append(os.path.join(datapath, dataset, m, 'modelfit-md%d.model' % t))
print(filelist)
while True:
if all([os.path.isfile(f) for f in filelist]):
break
else: # wait
print("waiting for files")
# raise ValueError('Not all files present')
time.sleep(60)
# concatenate the different chains, will save disk space
hddm_funcs.concat_models(os.path.join(datapath, dataset), m)
# HOW LONG DID THIS TAKE?
elapsed = time.time() - starttime
print( "Elapsed time for %s, trace %d: %f seconds\n" %(m, opts.trace_id, elapsed))
# also sample posterior predictives (will only do if doesn't already exists
# hddm_funcs.posterior_predictive(os.path.join(datapath, d, m), n_samples=100) | [
37811,
198,
11147,
5572,
23127,
2746,
11,
351,
2106,
2846,
11,
284,
1366,
422,
314,
9148,
10693,
198,
43227,
471,
430,
72,
11,
13130,
11,
9429,
6581,
198,
198,
37811,
198,
198,
2,
46111,
2559,
18604,
1303,
198,
2,
17151,
48996,
33303,... | 2.378462 | 1,950 |
from kernel.Entities import Command
class User:
"""
Class that represents a user entity that stores all functionality related to
application user.
"""
def get_id(self):
"""
Get user id.
:return: The user id.
"""
return self.__id__
def set_id(self, id):
"""
Set user id.
:param id: The user id
"""
self.__id__ = id
def get_name(self):
"""
Get user name.
:return: The user name.
"""
return self.__name__
def set_name(self, name):
"""
Set user name.
:param name: User name to set.
"""
self.__name__ = name
def get_code(self):
"""
Get user code.
:return: The user code.
"""
return self.__code__
def set_code(self, code):
"""
Set user code, this is a special code for this user, like a login.
:param code: User code, generally alphanumeric.
"""
self.__code__ = code
def get_commands(self):
"""
Get all commands related to this user.
:return: All command for this user.
"""
return self.__commands__
def get_command(self, id):
"""
Returns a command id given.
:param id: Command id to look for.
:return: The command found.
"""
for command in self.get_commands():
if command.get_id() == id:
return command
return None
def add_command(self, command: Command):
"""
Adds a command to this user.
:param command: The command to add.
"""
max_id = 0
if len(self.__commands__) > 0:
max_id = max(self.__commands__, key=lambda t: t.get_id()).get_id()
command.set_id(max_id + 1)
self.__commands__.append(command)
def update_command(self, command):
"""
Updates a command, the command will be replaced.
:param command: The command to update
"""
index, _ = (x for i, x in enumerate(self.get_users()) if x.get_id() == command.get_id())
self.__commands__[index] = command
def remove_command(self, command: Command.Command):
"""
Removes a command, the command will be removed from this user, it is based on an id search.
:param command: Command to remove.
:return:
"""
old_command = self.get_command(command.get_id())
if old_command is not None:
self.__commands__.remove(old_command)
| [
6738,
9720,
13,
14539,
871,
1330,
9455,
628,
198,
4871,
11787,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5016,
326,
6870,
257,
2836,
9312,
326,
7000,
477,
11244,
3519,
284,
198,
220,
220,
220,
3586,
2836,
13,
198,
220,
220,... | 2.190517 | 1,181 |
from bricks_modeling.database import ldraw_colors
from typing import List
import solvers.brick_heads.config as conf
from bricks_modeling.file_IO.model_reader import read_bricks_from_file
import os
from typing import Tuple
from solvers.brick_heads import texture_to_brick
from solvers.brick_heads.lego_util import get_random_string, add_texture_to_brick
from shutil import copyfile
"""
1:西装领带 2:西装无领带 3.领带衬衫 4:夹克衫 5:开怀外套 6:半开怀外套 7:衬衫 8:纯色上衣 9:格子上衣(保留) 10:横条纹上衣(保留) 11:竖条纹上衣(保留) 12:双色上衣(保留) 13:篮球服 14:足球服 15:裙子(待细分)
"""
# get the bricks of the indicate files | [
6738,
28902,
62,
4666,
10809,
13,
48806,
1330,
300,
19334,
62,
4033,
669,
198,
6738,
19720,
1330,
7343,
198,
11748,
1540,
690,
13,
1671,
624,
62,
16600,
13,
11250,
355,
1013,
198,
6738,
28902,
62,
4666,
10809,
13,
7753,
62,
9399,
13,
... | 1.795597 | 318 |
"""
This source code is created based on https://github.com/wohlert/keras-squeezenet
---------------------------------------------------------------------------------------------------------------------------------------
https://github.com/wohlert/keras-squeezenet
MIT License
Copyright (c) 2016 Jesper Wohlert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D, Activation
from keras.layers import Input, Flatten, Dropout, Concatenate
from keras.models import Model
from keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/wohlert/keras-squeezenet/releases/download/v0.1/squeezenet_weights.h5'
| [
37811,
198,
1212,
2723,
2438,
318,
2727,
1912,
319,
3740,
1378,
12567,
13,
785,
14,
86,
48988,
861,
14,
6122,
292,
12,
16485,
1453,
4801,
316,
198,
198,
10097,
10097,
26866,
198,
5450,
1378,
12567,
13,
785,
14,
86,
48988,
861,
14,
6... | 3.748879 | 446 |
#!/usr/bin/env python
import functools
import json
import os
import sys
import typing
import click
import click_log
import tabulate
from natsort import natsorted
from sonic_package_manager.database import PackageEntry, PackageDatabase
from sonic_package_manager.errors import PackageManagerError
from sonic_package_manager.logger import log
from sonic_package_manager.manager import PackageManager
BULLET_UC = '\u2022'
def exit_cli(*args, **kwargs):
""" Print a message and exit with rc 1. """
click.secho(*args, **kwargs)
sys.exit(1)
def show_help(ctx):
""" Show help message and exit process successfully. """
click.echo(ctx.get_help())
ctx.exit(0)
def root_privileges_required(func: typing.Callable) -> typing.Callable:
""" Decorates a function, so that the function is invoked
only if the user is root. """
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
""" Wrapper around func. """
if os.geteuid() != 0:
exit_cli('Root privileges required for this operation', fg='red')
return func(*args, **kwargs)
wrapped_function.__doc__ += '\n\n NOTE: This command requires elevated (root) privileges to run.'
return wrapped_function
def add_options(options):
""" Decorator to append options from
input list to command. """
return _add_options
class MutuallyExclusiveOption(click.Option):
""" This options type is extended with 'mutually_exclusive'
parameter which makes CLI to check if several options are now
used together in single command. """
PACKAGE_SOURCE_OPTIONS = [
click.option('--from-repository',
help='Fetch package directly from image registry repository.',
cls=MutuallyExclusiveOption,
mutually_exclusive=['from_tarball', 'package_expr']),
click.option('--from-tarball',
type=click.Path(exists=True,
readable=True,
file_okay=True,
dir_okay=False),
help='Fetch package from saved image tarball.',
cls=MutuallyExclusiveOption,
mutually_exclusive=['from_repository', 'package_expr']),
click.argument('package-expr',
type=str,
required=False)
]
PACKAGE_COMMON_INSTALL_OPTIONS = [
click.option('--skip-host-plugins',
is_flag=True,
help='Do not install host OS plugins provided by the package (CLI, etc). '
'NOTE: In case when package host OS plugins are set as mandatory in '
'package manifest this option will fail the installation.')
]
PACKAGE_COMMON_OPERATION_OPTIONS = [
click.option('-f', '--force',
is_flag=True,
help='Force operation by ignoring package dependency tree and package manifest validation failures.'),
click.option('-y', '--yes',
is_flag=True,
help='Automatically answer yes on prompts.'),
click_log.simple_verbosity_option(log, help='Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default is INFO.'),
]
def get_package_status(package: PackageEntry):
""" Returns the installation status message for package. """
if package.built_in:
return 'Built-In'
elif package.installed:
return 'Installed'
else:
return 'Not Installed'
@click.group()
@click.pass_context
def cli(ctx):
""" SONiC Package Manager """
ctx.obj = PackageManager.get_manager()
@cli.group()
@click.pass_context
def repository(ctx):
""" Repository management commands. """
pass
@cli.group()
@click.pass_context
def show(ctx):
""" Package manager show commands. """
pass
@show.group()
@click.pass_context
def package(ctx):
""" Package show commands. """
pass
@cli.command()
@click.pass_context
def list(ctx):
""" List available packages. """
table_header = ['Name', 'Repository', 'Description', 'Version', 'Status']
table_body = []
manager: PackageManager = ctx.obj
try:
for package in natsorted(manager.database):
repository = package.repository or 'N/A'
version = package.version or 'N/A'
description = package.description or 'N/A'
status = get_package_status(package)
table_body.append([
package.name,
repository,
description,
version,
status
])
click.echo(tabulate.tabulate(table_body, table_header))
except PackageManagerError as err:
exit_cli(f'Failed to list repositories: {err}', fg='red')
@package.command()
@add_options(PACKAGE_SOURCE_OPTIONS)
@click.pass_context
def manifest(ctx,
package_expr,
from_repository,
from_tarball):
""" Show package manifest. """
manager: PackageManager = ctx.obj
try:
source = manager.get_package_source(package_expr,
from_repository,
from_tarball)
package = source.get_package()
click.echo(json.dumps(package.manifest.unmarshal(), indent=4))
except Exception as err:
exit_cli(f'Failed to print manifest: {err}', fg='red')
@package.command()
@click.argument('name')
@click.option('--all', is_flag=True, help='Show all available tags in repository.')
@click.option('--plain', is_flag=True, help='Plain output.')
@click.pass_context
def versions(ctx, name, all, plain):
""" Show available versions. """
try:
manager: PackageManager = ctx.obj
versions = manager.get_package_available_versions(name, all)
for version in versions:
if not plain:
click.secho(f'{BULLET_UC} ', bold=True, fg='green', nl=False)
click.secho(f'{version}')
except Exception as err:
exit_cli(f'Failed to get package versions for {name}: {err}', fg='red')
@package.command()
@add_options(PACKAGE_SOURCE_OPTIONS)
@click.pass_context
def changelog(ctx,
package_expr,
from_repository,
from_tarball):
""" Show package changelog. """
manager: PackageManager = ctx.obj
try:
source = manager.get_package_source(package_expr,
from_repository,
from_tarball)
package = source.get_package()
changelog = package.manifest['package']['changelog']
if not changelog:
raise PackageManagerError(f'No changelog for package {package.name}')
for version, entry in changelog.items():
author = entry.get('author') or 'N/A'
email = entry.get('email') or 'N/A'
changes = entry.get('changes') or []
date = entry.get('date') or 'N/A'
click.secho(f'{version}:\n', fg='green', bold=True)
for line in changes:
click.secho(f' {BULLET_UC} {line}', bold=True)
click.secho(f'\n {author} '
f'({email}) {date}', fg='green', bold=True)
click.secho('')
except Exception as err:
exit_cli(f'Failed to print package changelog: {err}', fg='red')
@repository.command()
@click.argument('name', type=str)
@click.argument('repository', type=str)
@click.option('--default-reference', type=str, help='Default installation reference. Can be a tag or sha256 digest in repository.')
@click.option('--description', type=str, help='Optional package entry description.')
@click.pass_context
@root_privileges_required
def add(ctx, name, repository, default_reference, description):
""" Add a new repository to database. """
manager: PackageManager = ctx.obj
try:
manager.add_repository(name,
repository,
description=description,
default_reference=default_reference)
except Exception as err:
exit_cli(f'Failed to add repository {name}: {err}', fg='red')
@repository.command()
@click.argument("name")
@click.pass_context
@root_privileges_required
def remove(ctx, name):
""" Remove repository from database. """
manager: PackageManager = ctx.obj
try:
manager.remove_repository(name)
except Exception as err:
exit_cli(f'Failed to remove repository {name}: {err}', fg='red')
@cli.command()
@click.option('--enable',
is_flag=True,
default=None,
help='Set the default state of the feature to enabled '
'and enable feature right after installation. '
'NOTE: user needs to execute "config save -y" to make '
'this setting persistent.')
@click.option('--set-owner',
type=click.Choice(['local', 'kube']),
default=None,
help='Default owner configuration setting for a feature.')
@click.option('--allow-downgrade',
is_flag=True,
default=None,
help='Allow package downgrade. By default an attempt to downgrade the package '
'will result in a failure since downgrade might not be supported by the package, '
'thus requires explicit request from the user.')
@add_options(PACKAGE_SOURCE_OPTIONS)
@add_options(PACKAGE_COMMON_OPERATION_OPTIONS)
@add_options(PACKAGE_COMMON_INSTALL_OPTIONS)
@click.pass_context
@root_privileges_required
def install(ctx,
package_expr,
from_repository,
from_tarball,
force,
yes,
enable,
set_owner,
skip_host_plugins,
allow_downgrade):
""" Install/Upgrade package using [PACKAGE_EXPR] in format "<name>[=<version>|@<reference>]".
The repository to pull the package from is resolved by lookup in package database,
thus the package has to be added via "sonic-package-manager repository add" command.
In case when [PACKAGE_EXPR] is a package name "<name>" this command will install or upgrade
to a version referenced by "default-reference" in package database. """
manager: PackageManager = ctx.obj
package_source = package_expr or from_repository or from_tarball
if not package_source:
exit_cli('Package source is not specified', fg='red')
if not yes and not force:
click.confirm(f'{package_source} is going to be installed, '
f'continue?', abort=True, show_default=True)
install_opts = {
'force': force,
'skip_host_plugins': skip_host_plugins,
}
if enable is not None:
install_opts['enable'] = enable
if set_owner is not None:
install_opts['default_owner'] = set_owner
if allow_downgrade is not None:
install_opts['allow_downgrade'] = allow_downgrade
try:
manager.install(package_expr,
from_repository,
from_tarball,
**install_opts)
except Exception as err:
exit_cli(f'Failed to install {package_source}: {err}', fg='red')
except KeyboardInterrupt:
exit_cli('Operation canceled by user', fg='red')
@cli.command()
@add_options(PACKAGE_COMMON_OPERATION_OPTIONS)
@add_options(PACKAGE_COMMON_INSTALL_OPTIONS)
@click.argument('name')
@click.pass_context
@root_privileges_required
def reset(ctx, name, force, yes, skip_host_plugins):
""" Reset package to the default version. """
manager: PackageManager = ctx.obj
if not yes and not force:
click.confirm(f'Package {name} is going to be reset to default version, '
f'continue?', abort=True, show_default=True)
try:
manager.reset(name, force, skip_host_plugins)
except Exception as err:
exit_cli(f'Failed to reset package {name}: {err}', fg='red')
except KeyboardInterrupt:
exit_cli('Operation canceled by user', fg='red')
@cli.command()
@add_options(PACKAGE_COMMON_OPERATION_OPTIONS)
@click.option('--keep-config', is_flag=True, help='Keep features configuration in CONFIG DB.')
@click.argument('name')
@click.pass_context
@root_privileges_required
def uninstall(ctx, name, force, yes, keep_config):
""" Uninstall package. """
manager: PackageManager = ctx.obj
if not yes and not force:
click.confirm(f'Package {name} is going to be uninstalled, '
f'continue?', abort=True, show_default=True)
uninstall_opts = {
'force': force,
'keep_config': keep_config,
}
try:
manager.uninstall(name, **uninstall_opts)
except Exception as err:
exit_cli(f'Failed to uninstall package {name}: {err}', fg='red')
except KeyboardInterrupt:
exit_cli('Operation canceled by user', fg='red')
@cli.command()
@add_options(PACKAGE_COMMON_OPERATION_OPTIONS)
@click.option('--dockerd-socket', type=click.Path())
@click.argument('database', type=click.Path())
@click.pass_context
@root_privileges_required
def migrate(ctx, database, force, yes, dockerd_socket):
""" Migrate packages from the given database file. """
manager: PackageManager = ctx.obj
if not yes and not force:
click.confirm('Continue with package migration?', abort=True, show_default=True)
try:
manager.migrate_packages(PackageDatabase.from_file(database), dockerd_socket)
except Exception as err:
exit_cli(f'Failed to migrate packages {err}', fg='red')
except KeyboardInterrupt:
exit_cli('Operation canceled by user', fg='red')
if __name__ == "__main__":
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1257,
310,
10141,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
19720,
198,
198,
11748,
3904,
198,
11748,
3904,
62,
6404,
198,
11748,
7400,
5039,
198,
... | 2.394751 | 5,753 |
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn import metrics
data = np.loadtxt('watermelon_data.csv', delimiter=',')
X = data[:, 1:3]
y = data[:, 3]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=2)
# sover=['lsqr', 'svd', 'eigen'] 最小二乘,奇异值分解,特征分解
model = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(metrics.confusion_matrix(y_test, y_pred))
print(metrics.classification_report(y_test, y_pred))
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
15410,
3036,
42483,
62,
20930,
1330,
44800,
15642,
3036,
42483,
32750,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
1330,... | 2.38403 | 263 |
import logging
logger = logging.getLogger(__name__)
import numpy as np
import cv2
from ipapi.base.ipt_abstract import IptBase
from ipapi.base import ip_common as ipc
| [
11748,
18931,
201,
198,
201,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
269,
85,
17,
201,
198,
201,
198,
6738,
20966,
15042,
13,
8692... | 2.571429 | 70 |
from typing import Any, List
from core.config import ConfigurableManager, ConfigurableOptions, Configuration
from core.emulator.enumerations import ConfigDataTypes, RegisterTlvs
from core.plugins.sdt import Sdt
class SessionConfig(ConfigurableManager, ConfigurableOptions):
"""
Provides session configuration.
"""
name: str = "session"
options: List[Configuration] = [
Configuration(
_id="controlnet", _type=ConfigDataTypes.STRING, label="Control Network"
),
Configuration(
_id="controlnet0", _type=ConfigDataTypes.STRING, label="Control Network 0"
),
Configuration(
_id="controlnet1", _type=ConfigDataTypes.STRING, label="Control Network 1"
),
Configuration(
_id="controlnet2", _type=ConfigDataTypes.STRING, label="Control Network 2"
),
Configuration(
_id="controlnet3", _type=ConfigDataTypes.STRING, label="Control Network 3"
),
Configuration(
_id="controlnet_updown_script",
_type=ConfigDataTypes.STRING,
label="Control Network Script",
),
Configuration(
_id="enablerj45",
_type=ConfigDataTypes.BOOL,
default="1",
label="Enable RJ45s",
),
Configuration(
_id="preservedir",
_type=ConfigDataTypes.BOOL,
default="0",
label="Preserve session dir",
),
Configuration(
_id="enablesdt",
_type=ConfigDataTypes.BOOL,
default="0",
label="Enable SDT3D output",
),
Configuration(
_id="sdturl",
_type=ConfigDataTypes.STRING,
default=Sdt.DEFAULT_SDT_URL,
label="SDT3D URL",
),
]
config_type: RegisterTlvs = RegisterTlvs.UTILITY
def get_config(
self,
_id: str,
node_id: int = ConfigurableManager._default_node,
config_type: str = ConfigurableManager._default_type,
default: Any = None,
) -> str:
"""
Retrieves a specific configuration for a node and configuration type.
:param _id: specific configuration to retrieve
:param node_id: node id to store configuration for
:param config_type: configuration type to store configuration for
:param default: default value to return when value is not found
:return: configuration value
"""
value = super().get_config(_id, node_id, config_type, default)
if value == "":
value = default
return value
def get_config_bool(self, name: str, default: Any = None) -> bool:
"""
Get configuration value as a boolean.
:param name: configuration name
:param default: default value if not found
:return: boolean for configuration value
"""
value = self.get_config(name)
if value is None:
return default
return value.lower() == "true"
def get_config_int(self, name: str, default: Any = None) -> int:
"""
Get configuration value as int.
:param name: configuration name
:param default: default value if not found
:return: int for configuration value
"""
value = self.get_config(name, default=default)
if value is not None:
value = int(value)
return value
| [
6738,
19720,
1330,
4377,
11,
7343,
198,
198,
6738,
4755,
13,
11250,
1330,
17056,
11970,
13511,
11,
17056,
11970,
29046,
11,
28373,
198,
6738,
4755,
13,
368,
8927,
13,
268,
6975,
602,
1330,
17056,
6601,
31431,
11,
17296,
51,
6780,
82,
... | 2.307233 | 1,507 |
while True:
lengths = list(map(int, input().split()))
lengths.sort()
if lengths[2] == 0:
break
if lengths[0] ** 2 + lengths[1] ** 2 == lengths[2] ** 2:
print('right')
else:
print('wrong')
| [
4514,
6407,
25,
198,
220,
220,
220,
20428,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
198,
220,
220,
220,
20428,
13,
30619,
3419,
628,
220,
220,
220,
611,
20428,
58,
17,
60,
6624,
657,
25,
198,
220,
220,
220,
... | 2.207547 | 106 |
import re
import falcon
RE_CHECKBOX = re.compile(r"(on|yes|1|true)$", re.IGNORECASE)
RE_USERNAME = re.compile(r"[a-z][a-z0-9]{1,31}$")
RE_PASSWORD = re.compile(r"[A-Za-z0-9@#$%^&+=]{8,}$")
RE_DATE = re.compile(r"\d\d\d\d-\d\d-\d\d$")
RE_PHONE = re.compile(r"\+[0-9]+( [0-9]+)*$")
RE_EMAIL = re.compile(r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # dom
| [
198,
11748,
302,
198,
11748,
24215,
1102,
198,
198,
2200,
62,
50084,
39758,
796,
302,
13,
5589,
576,
7,
81,
18109,
261,
91,
8505,
91,
16,
91,
7942,
8,
3,
1600,
302,
13,
16284,
1581,
2943,
11159,
8,
198,
2200,
62,
29904,
20608,
796... | 1.555556 | 369 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Performance benchmark on molecular graphs generated from protein crystal
structures."""
import json
import sys
import os
import re
from ase import Atoms
from graphdot.graph import Graph
from graphdot.kernel.molecular import Tang2019MolecularKernel
arg_dict = {}
for arg in sys.argv[1:]:
m = re.fullmatch(r"-([\w\d]+)=(.+)", arg)
if m:
try:
arg_dict[m.group(1)] = int(m.group(2))
except ValueError:
try:
arg_dict[m.group(1)] = float(m.group(2))
except ValueError:
arg_dict[m.group(1)] = m.group(2)
else:
sys.stderr.write('Unrecognized argument: %s\n' % arg)
sys.exit(1)
print(arg_dict)
file = arg_dict.pop('file', 'pdb-3kDa-1324.json')
active = json.loads('[' + arg_dict.pop('active', 1) + ']')
zoom = arg_dict.pop('zoom', 1.5)
# reorder = arg_dict['reorder'] if 'reorder' in arg_dict else 'natural'
sys.stderr.write('Loading file %s\n' % file)
try:
pdb_json = json.loads(open(file).read())
except FileNotFoundError:
pdb_json = json.loads(
open(os.path.join(os.path.dirname(__file__), file)).read()
)
graph_list = []
for i in active:
mol = pdb_json[i]
sys.stderr.write(
'%5d: %s, %d atoms\n' % (i, mol['pdb_id'], len(mol['sym']))
)
atoms = Atoms(mol['sym'], mol['xyz'])
graph_list.append(Graph.from_ase(atoms, adjacency=dict(h=zoom)))
kernel = Tang2019MolecularKernel()
print(kernel(graph_list))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
32273,
18335,
319,
18955,
28770,
7560,
422,
7532,
15121,
198,
7249,
942,
526,
15931,
198,
11748,
33918,
198,
11748... | 2.202035 | 688 |
import os
if __name__ == '__main__':
main()
| [
11748,
28686,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.318182 | 22 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
A Python interface to mimic numpy.einsum
'''
import sys
import re
import ctypes
import numpy
from pyscf.lib import misc
libtblis = misc.load_library('libtblis')
libtblis.as_einsum.restype = None
libtblis.as_einsum.argtypes = (
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
numpy.ctypeslib.ndpointer(), ctypes.c_int,
ctypes.POINTER(ctypes.c_size_t), ctypes.POINTER(ctypes.c_size_t),
ctypes.POINTER(ctypes.c_char),
ctypes.c_int,
numpy.ctypeslib.ndpointer(), numpy.ctypeslib.ndpointer()
)
tblis_dtype = {
numpy.dtype(numpy.float32) : 0,
numpy.dtype(numpy.double) : 1,
numpy.dtype(numpy.complex64) : 2,
numpy.dtype(numpy.complex128) : 3,
}
EINSUM_MAX_SIZE = getattr(misc.__config__, 'lib_einsum_max_size', 2000)
_numpy_einsum = numpy.einsum
def _contract(subscripts, *tensors, **kwargs):
'''
c = alpha * contract(a, b) + beta * c
Args:
tensors (list of ndarray) : Tensors for the operation.
Kwargs:
out (ndarray) : If provided, the calculation is done into this array.
dtype (ndarray) : If provided, forces the calculation to use the data
type specified.
alpha (number) : Default is 1
beta (number) : Default is 0
'''
a = numpy.asarray(tensors[0])
b = numpy.asarray(tensors[1])
if not kwargs and (a.size < EINSUM_MAX_SIZE or b.size < EINSUM_MAX_SIZE):
return _numpy_einsum(subscripts, a, b)
c_dtype = kwargs.get('dtype', numpy.result_type(a, b))
if (not (numpy.issubdtype(c_dtype, numpy.floating) or
numpy.issubdtype(c_dtype, numpy.complexfloating))):
return _numpy_einsum(subscripts, a, b)
sub_idx = re.split(',|->', subscripts)
indices = ''.join(sub_idx)
if '->' not in subscripts:
# Find chararacters which appear only once in the subscripts for c_descr
for x in set(indices):
if indices.count(x) > 1:
indices = indices.replace(x, '')
sub_idx += [indices]
alpha = kwargs.get('alpha', 1)
beta = kwargs.get('beta', 0)
c_dtype = numpy.result_type(c_dtype, alpha, beta)
alpha = numpy.asarray(alpha, dtype=c_dtype)
beta = numpy.asarray(beta , dtype=c_dtype)
a = numpy.asarray(a, dtype=c_dtype)
b = numpy.asarray(b, dtype=c_dtype)
a_shape = a.shape
b_shape = b.shape
a_descr, b_descr, c_descr = sub_idx
a_shape_dic = dict(zip(a_descr, a_shape))
b_shape_dic = dict(zip(b_descr, b_shape))
if any(a_shape_dic[x] != b_shape_dic[x]
for x in set(a_descr).intersection(b_descr)):
raise ValueError('operands dimension error for "%s" : %s %s'
% (subscripts, a_shape, b_shape))
ab_shape_dic = a_shape_dic
ab_shape_dic.update(b_shape_dic)
c_shape = tuple([ab_shape_dic[x] for x in c_descr])
out = kwargs.get('out', None)
if out is None:
order = kwargs.get('order', 'C')
c = numpy.empty(c_shape, dtype=c_dtype, order=order)
else:
assert(out.dtype == c_dtype)
assert(out.shape == c_shape)
c = out
a_shape = (ctypes.c_size_t*a.ndim)(*a_shape)
b_shape = (ctypes.c_size_t*b.ndim)(*b_shape)
c_shape = (ctypes.c_size_t*c.ndim)(*c_shape)
nbytes = c_dtype.itemsize
a_strides = (ctypes.c_size_t*a.ndim)(*[x//nbytes for x in a.strides])
b_strides = (ctypes.c_size_t*b.ndim)(*[x//nbytes for x in b.strides])
c_strides = (ctypes.c_size_t*c.ndim)(*[x//nbytes for x in c.strides])
libtblis.as_einsum(a, a.ndim, a_shape, a_strides, a_descr.encode('ascii'),
b, b.ndim, b_shape, b_strides, b_descr.encode('ascii'),
c, c.ndim, c_shape, c_strides, c_descr.encode('ascii'),
tblis_dtype[c_dtype], alpha, beta)
return c
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
1946,
12,
7908,
383,
9485,
6173,
37,
34152,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 2.167745 | 2,164 |
import sqlite3
import tweepy as tw
# Author:Mitos-Kotsis Antonios #
# cs3028@uoi.gr #
#========================================================#
db_connection=sqlite3.connect('tweet.db')
cursor=db_connection.cursor()
update_cursor=db_connection.cursor()
#Defining personal dev keys
consumer_key=''
consumer_secret_key=''
access_token=''
access_secret_token=''
auth=tw.OAuthHandler(consumer_key,consumer_secret_key)
auth.set_access_token(access_token,access_secret_token)
api=tw.API(auth,wait_on_rate_limit=True)
#add a column with the full text of the tweet in the coronavirus table
addDTextCol="ALTER TABLE coronavirus_table ADD COLUMN TWEET_TEXT TEXT"
cursor.execute(addDTextCol)
tweets=cursor.execute("SELECT TWEET_ID FROM coronavirus_table")
for row in tweets:
try:
tweet=api.get_status(row[0],tweet_mode="extended")
except:
continue
if hasattr(tweet, "retweeted_status"): # Check if Retweet
try:
print(tweet.retweeted_status.extended_tweet["full_text"]+'\n')
updateAction="UPDATE coronavirus_table SET TWEET_TEXT=(?) WHERE TWEET_ID= (?)"
update_cursor.execute(updateAction,(tweet.retweeted_status.extended_tweet["full_text"],row[0]))
except AttributeError:
print(tweet.retweeted_status.full_text+'\n')
updateAction="UPDATE coronavirus_table SET TWEET_TEXT=(?) WHERE TWEET_ID= (?)"
update_cursor.execute(updateAction,(tweet.retweeted_status.full_text,row[0]))
else:
try:
print(tweet.extended_tweet["full_text"]+'\n')
updateAction="UPDATE coronavirus_table SET TWEET_TEXT=(?) WHERE TWEET_ID= (?)"
update_cursor.execute(updateAction,(tweet.extended_tweet["full_text"],row[0]))
except AttributeError:
print(tweet.full_text+'\n')
updateAction="UPDATE coronavirus_table SET TWEET_TEXT=(?) WHERE TWEET_ID= (?)"
update_cursor.execute(updateAction,(tweet.full_text,row[0]))
db_connection.commit()
db_connection.close()
| [
11748,
44161,
578,
18,
198,
11748,
4184,
538,
88,
355,
665,
198,
2,
197,
197,
197,
197,
13838,
25,
43339,
418,
12,
42,
1747,
271,
9261,
4267,
197,
197,
220,
220,
220,
220,
1303,
198,
2,
197,
197,
197,
197,
197,
197,
6359,
1270,
... | 2.566216 | 740 |
# Nonlinear regression using variational inference for parameters.
# For simplicity we treat output noise variance as a fixed parameter.
# Adapted from
# https://brendanhasz.github.io/2019/07/23/bayesian-density-net.html
import superimport
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
tf.keras.backend.set_floatx('float32')
import svi_mlp_regression_model_tfp as svimlp
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
#from sklearn.metrics import mean_absolute_error, make_scorer
sns.reset_defaults()
#sns.set_style('whitegrid')
#sns.set_context('talk')
sns.set_context(context='talk',font_scale=0.7)
tfd = tfp.distributions
np.random.seed(12345)
tf.random.set_seed(12345)
## Make data
x_range = [-20, 60] # test range (for plotting)
# choose intervals in which training data is observed
#x_ranges = [[-20, -10], [0, 20], [40, 50]]
#ns = [10, 10, 10]
x_ranges = [ [-20,-5], [5,25], [30,55]]
ns = [100, 100, 100]
#x_ranges = [ [-20, 60]]
#ns = [1000]
y, x, x_tst = load_dataset()
x_train = x
y_train= y[..., np.newaxis]
N = x_train.shape[0]
plt.figure()
plt.plot(x, y, 'b.', label='observed');
plt.show()
# Make a TensorFlow Dataset from training data
BATCH_SIZE = 100
data_train = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(N).batch(BATCH_SIZE)
# Fit the model
configs = [
{'sampling': True, 'kl_factor': 1.0, 'kl_scaling': True, 'flipout': True},
{'sampling': False, 'kl_factor': 0.0, 'kl_scaling': False, 'flipout': True},
]
nexpts = len(configs)
models = []
elbo_traces = []
for i in range(nexpts):
ttl = 'experiment {}'.format(configs[i])
print(ttl)
sampling = configs[i]['sampling']
use_kl_scaling = configs[i]['kl_scaling']
kl_factor = configs[i]['kl_factor']
flipout = configs[i]['flipout']
model = svimlp.BayesianDenseRegression([1, 50, 50, 1], flipout=flipout)
LR = 0.01
optimizer = tf.keras.optimizers.Adam(lr=LR)
# this function relies on 'model' and 'optimizer' being in scope (yuk!)
@tf.function(experimental_relax_shapes=True)
EPOCHS = 1000
elbo_trace = np.zeros(EPOCHS)
if use_kl_scaling:
kl_mults = kl_factor * np.logspace(0.0, 1.0, EPOCHS)/10.0
else:
kl_mults = kl_factor * np.ones(EPOCHS)
for epoch in range(EPOCHS):
for x_data, y_data in data_train:
elbo_trace[epoch] += train_step(x_data, y_data, kl_mults[epoch], sampling)
# Save trained model so we can plot stuff later
models += [model]
elbo_traces += [elbo_trace]
# Plot the ELBO loss
plt.figure()
steps = range(10, EPOCHS)
plt.plot(steps, elbo_trace[steps])
plt.xlabel('Epoch')
plt.ylabel('ELBO Loss')
plt.title(ttl)
plt.show()
plt.figure()
plt.plot(x, y, 'b.', label='observed');
pred = model(x_tst, sampling=False)
m = pred[:,0]
s = pred[:,1]
plt.plot(x_tst, m, 'r', linewidth=4, label='mean')
plt.plot(x_tst, m + 2 * s, 'g', linewidth=2, label=r'mean + 2 stddev');
plt.plot(x_tst, m - 2 * s, 'g', linewidth=2, label=r'mean - 2 stddev');
plt.title(ttl)
plt.show()
nsamples = 1000
samples = model.samples(x_tst, nsamples) #ntst x nsamples
m = np.mean(samples, axis=1)
s = np.std(samples, axis=1)
n_tst = x_tst.shape[0]
ndx = range(10, n_tst)
plt.plot(x, y, 'b.', label='observed');
plt.plot(x_tst[ndx], m[ndx], 'r', linewidth=4, label='mean')
plt.plot(x_tst[ndx], m[ndx] + 2 * s[ndx], 'g', linewidth=2, label=r'mean + 2 stddev');
plt.plot(x_tst[ndx], m[ndx] - 2 * s[ndx], 'g', linewidth=2, label=r'mean - 2 stddev');
plt.title(ttl)
plt.show()
for i in range(nexpts):
model = models[i]
elbo_trace = elbo_traces[i]
ttl = 'experiment {}'.format(configs[i])
# Plot the ELBO loss
plt.figure()
steps = range(10, EPOCHS)
plt.plot(steps, elbo_trace[steps])
plt.xlabel('Epoch')
plt.ylabel('ELBO Loss')
plt.title(ttl)
plt.show()
plt.figure()
plt.plot(x, y, 'b.', label='observed');
pred = model(x_tst, sampling=False)
m = pred[:,0]
s = pred[:,1]
plt.plot(x_tst, m, 'r', linewidth=4, label='mean')
plt.plot(x_tst, m + 2 * s, 'g', linewidth=2, label=r'mean + 2 stddev');
plt.plot(x_tst, m - 2 * s, 'g', linewidth=2, label=r'mean - 2 stddev');
plt.title(ttl)
plt.show()
nsamples = 1000
samples = model.samples(x_tst, nsamples) #ntst x nsamples
m = np.mean(samples, axis=1)
s = np.std(samples, axis=1)
n_tst = x_tst.shape[0]
ndx = range(10, n_tst)
plt.plot(x, y, 'b.', label='observed');
plt.plot(x_tst[ndx], m[ndx], 'r', linewidth=4, label='mean')
plt.plot(x_tst[ndx], m[ndx] + 2 * s[ndx], 'g', linewidth=2, label=r'mean + 2 stddev');
plt.plot(x_tst[ndx], m[ndx] - 2 * s[ndx], 'g', linewidth=2, label=r'mean - 2 stddev');
plt.title(ttl)
plt.show()
| [
2,
8504,
29127,
20683,
1262,
5553,
864,
32278,
329,
10007,
13,
198,
2,
1114,
21654,
356,
2190,
5072,
7838,
24198,
355,
257,
5969,
11507,
13,
198,
2,
30019,
276,
422,
198,
2,
3740,
1378,
4679,
358,
272,
10134,
89,
13,
12567,
13,
952,... | 2.1221 | 2,457 |
from molsysmt._private_tools.exceptions import BadCallError
where_set_argument = {
'atom_name' : ['elements'],
'atom_id' : ['elements'],
'atom_type' : ['elements'],
'group_name' : ['elements'],
'group_id' : ['elements'],
'group_type' : ['elements'],
'component_name' : ['elements'],
'component_id' : ['elements'],
'component_type' : ['elements'],
'chain_name' : ['elements'],
'chain_id' : ['elements'],
'chain_type' : ['elements'],
'molecule_name' : ['elements'],
'molecule_id' : ['elements'],
'molecule_type' : ['elements'],
'entity_name' : ['elements'],
'entity_id' : ['elements'],
'entity_type' : ['elements'],
'bond_name' : ['bonds'],
'bond_id' : ['bonds'],
'bond_type' : ['bonds'],
'bond_order' : ['bonds'],
'bonded_atoms' : ['bonds'],
'inner_bonded_atoms' : ['bonds'],
'inner_bond_index' : ['bonds'],
'step' : ['coordinates'],
'time' : ['coordinates'],
'frame' : ['coordinates'],
'coordinates' : ['coordinates'],
'box' : ['box'],
}
set_argument_synonym = {
'atom_names': 'atom_name',
'atom_ids': 'atom_id',
'atom_types': 'atom_type',
'group_names': 'group_name',
'group_ids': 'group_id',
'group_types': 'group_type',
'component_names': 'component_name',
'component_ids': 'component_id',
'component_types': 'component_type',
'chain_names': 'chain_name',
'chain_ids': 'chain_id',
'chain_types': 'chain_type',
'molecule_names': 'molecule_name',
'molecule_ids': 'molecule_id',
'molecule_types': 'molecule_type',
'entity_names': 'entity_name',
'entity_ids': 'entity_id',
'entity_types': 'entity_type',
'bond_ids': 'bond_id',
'bond_types': 'bond_type',
'bonded_atom': 'bonded_atoms',
'bonds_order': 'bond_order',
'inner_bonded_atom': 'inner_bonded_atoms',
'inner_bond_indices': 'inner_bond_index',
'residue_index': 'group_index',
'residue_names': 'group_name',
'residue_name': 'group_name',
'residue_ids': 'group_id',
'residue_id': 'group_id',
'residue_types': 'group_type',
'residue_type': 'group_type',
'steps': 'step',
'times': 'time',
'frames': 'frame',
}
set_arguments = list(where_set_argument.keys())
| [
6738,
285,
10220,
893,
16762,
13557,
19734,
62,
31391,
13,
1069,
11755,
1330,
7772,
14134,
12331,
198,
198,
3003,
62,
2617,
62,
49140,
796,
1391,
628,
220,
220,
220,
705,
37696,
62,
3672,
6,
1058,
37250,
68,
3639,
6,
4357,
198,
220,
... | 2.195946 | 1,036 |
#!C:\Python27_64\python.exe
# -*- coding: utf-8 -*-
# Launcher for Nuitka
import nuitka.__main__
nuitka.__main__.main()
| [
2,
0,
34,
7479,
37906,
1983,
62,
2414,
59,
29412,
13,
13499,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
26385,
329,
399,
5013,
4914,
198,
198,
11748,
299,
5013,
4914,
13,
834,
12417,
834,
198,
77,
501... | 2.2 | 55 |
class SimpleLossCompute:
"A simple loss compute and train function."
| [
198,
4871,
17427,
43,
793,
7293,
1133,
25,
198,
220,
220,
220,
366,
32,
2829,
2994,
24061,
290,
4512,
2163,
526,
198
] | 3.363636 | 22 |
# Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DGSAE class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from pagi.components.sparse_autoencoder_component import SparseAutoencoderComponent
from pagi.utils.tf_utils import tf_build_top_k_mask_op
class DGSAE(SparseAutoencoderComponent):
"""Dentate Gyrus (DG) based on the Sparse Autoencoder."""
@staticmethod
def _build_kernel_init(self, input_area, hidden_size):
"""Custom initialization *does* make a big difference to orthogonality, even with inhibition"""
# Things I've tried:
# kernel_initializer = None # default
# kernel_initializer = tf.initializers.orthogonal(gain=10.0)
# kernel_initializer = tf.initializers.uniform_unit_scaling(factor=1.0)
num_weights = input_area * hidden_size
random_values = np.random.rand(num_weights)
#random_values = random_values * 2.0 - 1.0
knockout_rate = self._hparams.knockout_rate
keep_rate = 1.0 - knockout_rate
initial_mask = np.random.choice([0, 1], size=(num_weights), p=[knockout_rate, keep_rate])
initial_values = random_values * initial_mask * self._hparams.init_scale
for i in range(0, hidden_size):
w_sum = 0.0
for j in range(0, input_area):
#offset = i * input_area + j
offset = j * hidden_size + i
w_ij = initial_values[offset]
w_sum = w_sum + abs(w_ij)
w_norm = 1.0 / w_sum
for j in range(0, input_area):
#offset = i * input_area + j
offset = j * hidden_size + i
w_ij = initial_values[offset]
w_ij = w_ij * w_norm
initial_values[offset] = w_ij
kernel_initializer = tf.constant_initializer(initial_values)
return kernel_initializer
def _build_filtering(self, training_encoding, testing_encoding):
"""Build the encoding filtering."""
top_k_input = training_encoding
top_k2_input = testing_encoding
hidden_size = self._hparams.filters
batch_size = self._hparams.batch_size
k = int(self._hparams.sparsity)
inhibition_decay = self._hparams.inhibition_decay
cells_shape = [hidden_size]
batch_cells_shape = [batch_size, hidden_size]
inhibition = tf.zeros(cells_shape)
filtered = tf.constant(np.zeros(batch_cells_shape), dtype=tf.float32)
training_filtered = filtered
# Inhibit over time within a batch (because we don't bother having repeats for this).
for i in range(0, batch_size):
# Create a mask with a 1 for this batch only
this_batch_mask_np = np.zeros([batch_size,1])
this_batch_mask_np[i][0] = 1.0
this_batch_mask = tf.constant(this_batch_mask_np, dtype=tf.float32)
refraction = 1.0 - inhibition
refraction_2d = tf.expand_dims(refraction, 0) # add batch dim
refracted = tf.abs(top_k_input) * refraction_2d
# Find the "winners". The top k elements in each batch sample. this is
# what top_k does.
# ---------------------------------------------------------------------
top_k_mask = tf_build_top_k_mask_op(refracted, k, batch_size, hidden_size)
# Retrospectively add batch-sparsity per cell: pick the top-k (for now
# k=1 only). TODO make this allow top N per batch.
# ---------------------------------------------------------------------
batch_filtered = training_encoding * top_k_mask # apply mask 3 to output 2
this_batch_filtered = batch_filtered * this_batch_mask
this_batch_topk = top_k_mask * this_batch_mask
fired = tf.reduce_max(this_batch_topk, axis=0) # reduce over batch
inhibition = inhibition * inhibition_decay + fired # set to 1
training_filtered = training_filtered + this_batch_filtered
testing_filtered = training_filtered
return training_filtered, testing_filtered
| [
2,
15069,
357,
34,
8,
13130,
4935,
13077,
40,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.847619 | 1,575 |
import dgl.data
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
import numpy as np
import scipy.sparse as sp
dataset = dgl.data.CoraGraphDataset()
g = dataset[0]
# Split edge set for training and testing
u, v = g.edges()
eids = np.arange(g.number_of_edges())
eids = np.random.permutation(eids)
test_size = int(len(eids) * 0.1)
train_size = g.number_of_edges() - test_size
test_pos_u, test_pos_v = u[eids[:test_size]], v[eids[:test_size]]
train_pos_u, train_pos_v = u[eids[test_size:]], v[eids[test_size:]]
# Find all negative edges and split them for training and testing
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
adj_neg = 1 - adj.todense() - np.eye(g.number_of_nodes())
neg_u, neg_v = np.where(adj_neg != 0)
neg_eids = np.random.choice(len(neg_u), g.number_of_edges())
test_neg_u, test_neg_v = neg_u[neg_eids[:test_size]], neg_v[neg_eids[:test_size]]
train_neg_u, train_neg_v = neg_u[neg_eids[train_size:]], neg_v[neg_eids[train_size:]]
train_g = dgl.remove_edges(g, eids[:test_size])
print(train_g.ndata['feat'].shape[1])
from dgl.nn import SAGEConv
# ----------- 2. create model -------------- #
# build a two-layer GraphSAGE model
model = GraphSAGE(train_g.ndata['feat'].shape[1], 16)
h = model(train_g, train_g.ndata['feat'])
import dgl.function as fn
pred = DotPredictor()
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=g.number_of_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=g.number_of_nodes())
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=g.number_of_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=g.number_of_nodes())
print(train_pos_g)
# print(train_pos_g.shape)
print(train_pos_g.ndata)
pos_score = pred(train_pos_g, h)
print(pos_score)
print(pos_score.shape) | [
11748,
288,
4743,
13,
7890,
198,
198,
11748,
288,
4743,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
340,
861,
10141,
198,
11748,
299,
32152,
355,
45941,
... | 2.324015 | 787 |
# -*- coding:utf-8 -*-
import os
import hashlib
import time
import shutil
print "*******************************************************"
print "**************AWD_Auto_Defend_Framework****************"
print "*******************************************************"
global tgt
tgt = './backup'
while (1):
print "*******************************************************"
print "1.Build dir tree."
print "2.Start file protect module."
print "3.Start file monitor module."
print "4.File backup."
print "5.File backup remove."
print "6.PHP file add log."
choose = int(raw_input('Please Input:'))
print "*******************************************************"
if choose == 1:
file_tree('./')
if choose == 2:
file_md5_defense()
if choose == 3:
file_md5_check()
if choose == 4:
file_backup()
if choose == 5:
file_backup_remove()
if choose == 6:
file_log_add()
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
12234,
8019,
198,
11748,
640,
220,
198,
11748,
4423,
346,
198,
198,
4798,
366,
17174,
8412,
2466,
8162,
1,
198,
4798,
366,
46068,
1174,
12298,
35,
... | 3.207273 | 275 |
import re
# Correct commas inside of a linked field
# Check for valid link
# Correct publication lists
# Create linked objects
# Fetch an object by source_id (numeric identifier used in source DB)
# Quick check of the number of lines
| [
11748,
302,
198,
198,
2,
22941,
725,
292,
2641,
286,
257,
6692,
2214,
198,
198,
2,
6822,
329,
4938,
2792,
198,
198,
2,
22941,
9207,
8341,
198,
198,
2,
13610,
6692,
5563,
198,
198,
2,
376,
7569,
281,
2134,
416,
2723,
62,
312,
357,
... | 3.809524 | 63 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis.backends as backends
from visvis import BaseFigure
def figure(fig=None):
""" figure(fig=None)
Set the specified figure to be the current figure, creating it if
necessary. fig may be a Figure object, a figure number (a positive
integer), or None. Returns the specified or created figure.
"""
# check if backends are loaded
if not backends.currentBackend.name:
backends.use()
# get function to create new figure
newFigure = backends.currentBackend.newFigure
# fig can be a Figure instance
if isinstance(fig, BaseFigure):
if fig._destroyed:
raise Exception("Figure has already been destroyed.")
nr = fig.nr
# ... or a positive integer
elif fig is not None:
# test nr
try:
nr = int(fig)
if nr <= 0:
raise ValueError()
except (ValueError, TypeError):
raise Exception("Figure number should be an integer >=1")
else:
nr = None
# does a figure with that number already exist?
if nr and BaseFigure._figures.has_key(nr):
# make current return that
fig = BaseFigure._figures[nr]
BaseFigure._currentNr = nr
return fig
else:
if nr:
# prepare spot, if no nr given, a spot is chosen in the
# constructor of BaseFigure...
BaseFigure._figures[nr] = None
# create new figure and return
fig = newFigure()
fig.title = '' #_SetTitle("Figure " + str(fig.nr))
return fig
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
2321,
11,
978,
3876,
22864,
198,
2,
198,
2,
6911,
4703,
318,
9387,
739,
262,
2846,
286,
262,
357,
3605,
8,
347,
10305,
13789,
13,
198,
2,
383... | 2.424284 | 733 |
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import datetime
import random
import string
PCG_LOG_DIR_ROOT = os.path.join(os.path.expanduser('~'), '.pcg', 'logs')
PCG_LOG_DIR = os.path.join(
PCG_LOG_DIR_ROOT,
datetime.datetime.now().isoformat().replace(':', '_') + '_{}'.format(
''.join(random.choice(string.ascii_letters) for i in range(3))))
PCG_ROOT_LOGGER = create_logger('pcg_gazebo')
| [
2,
15069,
357,
66,
8,
13130,
532,
383,
23652,
1523,
16588,
329,
21347,
1765,
78,
7035,
198,
2,
1114,
1321,
319,
262,
11756,
6634,
4870,
766,
262,
28536,
2393,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
... | 3.186589 | 343 |
import docker
from docker.utils import create_host_config
from docker.utils.types import Ulimit
from requests.exceptions import ConnectionError, ReadTimeout
import ob2.config as config
| [
11748,
36253,
198,
6738,
36253,
13,
26791,
1330,
2251,
62,
4774,
62,
11250,
198,
6738,
36253,
13,
26791,
13,
19199,
1330,
471,
32374,
198,
6738,
7007,
13,
1069,
11755,
1330,
26923,
12331,
11,
4149,
48031,
198,
198,
11748,
909,
17,
13,
... | 4.108696 | 46 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Tyler - a python tail supporting rolling files
# Copyright (C) 2016 Davide Mastromatteo - @mastro35
# ----------------------------------------------------
import sys
import os
import time
import platform
class Tyler(object):
"""
Creates an iterable object that returns new lines.
"""
def next(self):
"""
Return the next line in the file
"""
my_line = None
try:
my_line = self._get_next_line()
except StopIteration:
raise
return my_line
def __next__(self):
"""`__next__` is the Python 3 version of `next`"""
return self.next()
def _has_file_rolled(self):
"""Check if the file has been rolled"""
# if the size is smaller then before, the file has
# probabilly been rolled
if self._fh:
size = self._getsize_of_current_file()
if size < self.oldsize:
return True
self.oldsize = size
return False
def _open_file(self, filename):
"""Open a file to be tailed"""
if not self._os_is_windows:
self._fh = open(filename, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0
return
# if we're in Windows, we need to use the WIN32 API to open the
# file without locking it
import win32file
import msvcrt
handle = win32file.CreateFile(filename,
win32file.GENERIC_READ,
win32file.FILE_SHARE_DELETE |
win32file.FILE_SHARE_READ |
win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
0,
None)
detached_handle = handle.Detach()
file_descriptor = msvcrt.open_osfhandle(
detached_handle, os.O_RDONLY)
self._fh = open(file_descriptor, "rb")
self.filename = filename
self._fh.seek(0, os.SEEK_SET)
self.oldsize = 0
def _filehandle(self):
"""
Return a filehandle to the file being tailed
"""
# if file is opened and it has been rolled we need to close the file
# and then to reopen it
if self._fh and self._has_file_rolled():
try:
self._fh.close()
except Exception:
pass
self._fh = None
# if the file is closed (or has been closed right now), open it
if not self._fh:
self._open_file(self.filename)
if not self.opened_before:
self.opened_before = True
self._fh.seek(0, os.SEEK_END)
return self._fh
def main():
'''Entry point'''
if len(sys.argv) == 1:
print("Usage: tyler [filename]")
sys.exit(0)
filename = sys.argv[1]
if not os.path.isfile(filename):
print("Specified file does not exists")
sys.exit(8)
my_tyler = Tyler(filename=filename)
while True:
try:
for line in my_tyler:
print(line)
time.sleep(1)
except KeyboardInterrupt:
print("Quit signal received")
sys.exit(0)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
14886,
532,
257,
21015,
7894,
6493,
10708,
3696,
198,
2,
15069,
357,
34,
8,
1584,
2544,
485,
30868,
398,
... | 1.94964 | 1,807 |
from .mdtraj_Topology import from_mdtraj_Topology, to_mdtraj_Topology
from .mdtraj_Trajectory import from_mdtraj_Trajectory
from .mmtf_MMTFDecoder import from_mmtf_MMTFDecoder
from .openmm_PDBFile import from_openmm_PDBFile
from .openmm_GromacsGroFile import from_openmm_GromacsGroFile
from .openmm_Topology import from_openmm_Topology, to_openmm_Topology
from .parmed_Structure import from_parmed_Structure
from .openmm_Modeller import from_openmm_Modeller
from .openmm_Simulation import from_openmm_Simulation
from .pdbfixer_PDBFixer import from_pdbfixer_PDBFixer, to_pdbfixer_PDBFixer
from .openexplorer_Explorer import from_openexplorer_Explorer
from .openexplorer_OpenExplorerReporter import from_openexplorer_OpenExplorerReporter
from .mdanalysis_Universe import from_mdanalysis_Universe
from .pytraj_Topology import from_pytraj_Topology
from .pytraj_Trajectory import from_pytraj_Trajectory
from .rdkit_Mol import from_rdkit_Mol
from .openff_Molecule import from_openff_Molecule
from .openff_Topology import from_openff_Topology
from .nglview_NGLWidget import from_nglview_NGLWidget
from .file_pdb import from_file_pdb, to_file_pdb
from .file_gro import from_file_gro, to_file_gro
from .file_prmtop import from_file_prmtop
from .file_crd import from_file_crd
from .file_h5 import from_file_h5
from .file_msmpk import from_file_msmpk
from .string_aminoacids1 import to_string_aminoacids1
from .string_aminoacids3 import to_string_aminoacids3
from .string_pdb_text import from_string_pdb_text, to_string_pdb_text
from .string_pdb_id import from_string_pdb_id
from .string_smiles import from_string_smiles
| [
6738,
764,
9132,
9535,
73,
62,
9126,
1435,
1330,
422,
62,
9132,
9535,
73,
62,
9126,
1435,
11,
284,
62,
9132,
9535,
73,
62,
9126,
1435,
198,
6738,
764,
9132,
9535,
73,
62,
15721,
752,
652,
1330,
422,
62,
9132,
9535,
73,
62,
15721,
... | 2.782383 | 579 |
from flask import render_template
from app import flask_app
@flask_app.route('/')
@flask_app.route('/software')
@flask_app.route('/publications')
@flask_app.errorhandler(404)
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
6738,
598,
1330,
42903,
62,
1324,
628,
198,
31,
2704,
2093,
62,
1324,
13,
38629,
10786,
14,
11537,
628,
198,
31,
2704,
2093,
62,
1324,
13,
38629,
10786,
14,
43776,
11537,
628,
198,
31,
2... | 2.746269 | 67 |
# -*- coding: utf-8 -*-
import mock
from mig3_client import SubmissionBuilder
def test_minimum_viable_submission(converted_tests):
"""Should produce something"""
submission = SubmissionBuilder("t", "b", converted_tests).build()
assert submission is not None
def test_configuration_id(converted_tests):
"""Should contain target configuration ID used to initialize the builder"""
submission = SubmissionBuilder("t", "b", converted_tests).build()
assert submission.get("target") == "t", submission
def test_build_number(converted_tests):
"""Should contain build number used to initialize the builder"""
submission = SubmissionBuilder("t", "b", converted_tests).build()
assert submission.get("number") == "b", submission
def test_tests():
"""Should contain test results used to initialize the builder"""
submission = SubmissionBuilder("t", "b", ["anything"]).build()
assert submission.get("results") == ["anything"], submission
def test_version_details(converted_tests):
"""Should contain version details from git head commit"""
with mock.patch("mig3_client.git") as patched_git:
patched_git.Repo().head.object.hexsha = "a1" * 20
patched_git.Repo().head.object.author.email = "user@example.com"
submission = SubmissionBuilder("t", "b", converted_tests).build()
assert submission.get("version", {}).get("hash") == ("a1" * 20), submission
assert submission.get("version", {}).get("author") == ("user@example.com"), submission
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15290,
198,
6738,
37011,
18,
62,
16366,
1330,
42641,
32875,
628,
198,
4299,
1332,
62,
39504,
62,
85,
3379,
62,
7266,
3411,
7,
1102,
13658,
62,
41989,
2599,
198,
... | 3.333333 | 456 |
import click
import os
import sys
import subprocess
import termicoder.utils.display as display
import json
lang_map = {
".py": "python",
".c": "c",
".cpp": "cpp",
".cc": "cpp",
".c++": "cpp",
".java": "java"
}
# TODO: a default name for code file
| [
11748,
3904,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
3381,
291,
12342,
13,
26791,
13,
13812,
355,
3359,
198,
11748,
33918,
198,
198,
17204,
62,
8899,
796,
1391,
198,
220,
220,
220,
27071,
9078,
1298,
36... | 2.495495 | 111 |
"""Tests for EVMC Python bindings.
Note, these tests rely on a valid EVMC implementation. Set the EVMC_MODULE
environment variable to your EMVC shared library. For example, if using
evmone compiled in the parent directory:
$ EVMC_MODULE=../evmone/build/lib/libevmone.so python3 test.py
"""
import os
import unittest
from typing import Dict, Optional
import evmc
EVMC_MODULE = os.environ.get("EVMC_MODULE")
if EVMC_MODULE is None:
raise Exception(
f"Please set EVMC_MODULE to the filename of a valid EVMC implementation.")
ZERO256 = (0).to_bytes(32, "big")
ALICE = bytes.fromhex("4be9d79ab0685d9c24ec801e26d1233b61832733")
BOB = bytes.fromhex("65f9e07ccb818f9258ed4b9bc2a8613c04f5db75")
def to_uint256(value: int) -> bytes:
"""Encodes an unsigned integer as a big endian uint256."""
return value.to_bytes(32, "big")
def from_uint256(value: bytes) -> int:
"""Decodes a big endian uint256 into a Python int."""
return int.from_bytes(value, "big")
if __name__ == "__main__":
unittest.main()
| [
37811,
51,
3558,
329,
8696,
9655,
11361,
34111,
13,
198,
198,
6425,
11,
777,
5254,
8814,
319,
257,
4938,
8696,
9655,
7822,
13,
5345,
262,
8696,
9655,
62,
33365,
24212,
198,
38986,
7885,
284,
534,
17228,
15922,
4888,
5888,
13,
1114,
16... | 2.689119 | 386 |
from .context import ezparse
import unittest
if __name__ == '__main__':
unittest.main()
| [
6738,
764,
22866,
1330,
304,
89,
29572,
198,
11748,
555,
715,
395,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.685714 | 35 |
"""
This file defines class GumbelSoftmax.
@author: Clemens Rosenbaum :: cgbr@cs.umass.edu
@created: 6/12/18
"""
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from ..Decision import Decision
class GumbelSoftmax(Decision):
"""
Class GumbelSoftmax defines a decision making procedure that uses the GumbelSoftmax reparameterization trick
to perform differentiable sampling from the categorical distribution.
"""
@staticmethod
class GumbelSoftmaxSampling(nn.Module):
"""
This class defines the core functionality to sample from a gumbel softmax distribution
"""
@staticmethod
| [
37811,
198,
1212,
2393,
15738,
1398,
402,
2178,
417,
18380,
9806,
13,
198,
198,
31,
9800,
25,
3779,
45535,
15564,
24738,
7904,
269,
70,
1671,
31,
6359,
13,
388,
562,
13,
15532,
198,
31,
25598,
25,
718,
14,
1065,
14,
1507,
198,
37811... | 3.281407 | 199 |
import os
import sys
from parsers import parse_midas_data
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("subject_id", help="name of subject to process")
args = parser.parse_args()
subject = args.subject_id
subject_sample_time_map = parse_midas_data.parse_subject_sample_time_map()
for visno in subject_sample_time_map[subject].keys(): # loop over samples
print "Subject:", subject, "Samples for visno ", visno
if len(subject_sample_time_map[subject][visno]) >1:
new_sample_name=subject_sample_time_map[subject][visno][0][0] + 'c'
print new_sample_name
os.system('rm /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_sample_reps/' + new_sample_name + '_1.fastq.gz')
os.system('rm /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_sample_reps/' + new_sample_name + '_2.fastq.gz')
for i in range(0,len(subject_sample_time_map[subject][visno])):
sample = subject_sample_time_map[subject][visno][i][0]
print sample
os.system('cat /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_tech_reps/' + sample + '_1.fastq.gz >> /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_sample_reps/' + new_sample_name + '_1.fastq.gz')
os.system('cat /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_tech_reps/' + sample + '_2.fastq.gz >> /netapp/home/ngarud/shattuck/BenNanditaProject/MIDAS_intermediate_files_hmp/joined_fastq_files_hmp_combine_sample_reps/' + new_sample_name + '_2.fastq.gz' )
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
13544,
364,
1330,
21136,
62,
13602,
292,
62,
7890,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
32796,
62,
3... | 2.410811 | 740 |
import os, sys
from incidents import db, IncidentStatus
if __name__ == '__main__':
if os.path.exists("incidents/incidents.db"):
os.remove("incidents/incidents.db")
db.create_all()
db.session.add(IncidentStatus("Open", 1))
db.session.add(IncidentStatus("Pending", 2))
db.session.add(IncidentStatus("Closed", 3))
db.session.commit()
| [
11748,
28686,
11,
25064,
201,
198,
201,
198,
6738,
10207,
1330,
20613,
11,
32731,
19580,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
611,
28686,
13,
6978,
13,
1069,
1023,
7203... | 2.378882 | 161 |
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements the functions needed
# to monitor the glidein factory
#
# Author:
# Igor Sfiligoi (Dec 11th 2006)
#
import copy
import json
import math
import os
import pickle
import re
import time
from glideinwms.lib import cleanupSupport, logSupport, rrdSupport, timeConversion, util, xmlFormat
# list of rrd files that each site has
RRD_LIST = (
"Status_Attributes.rrd",
"Log_Completed.rrd",
"Log_Completed_Stats.rrd",
"Log_Completed_WasteTime.rrd",
"Log_Counts.rrd",
)
############################################################
#
# Configuration
#
############################################################
#######################################################################################################################
#
# condorQStats
#
# This class handles the data obtained from condor_q
#
#######################################################################################################################
# TODO: ['Downtime'] is added to the self.data[client_name] dictionary only if logRequest is called before logSchedd, logClientMonitor
# This is inconsistent and should be changed, Redmine [#17244]
######################################################################################################################
#
# condorLogSummary
#
# This class handles the data obtained from parsing the glidein log files
#
######################################################################################################################
class condorLogSummary:
"""
This class handles the data obtained from parsing the glidein log files
"""
def reset(self):
"""
Replaces old_stats_data with current_stats_data
Sets current_stats_data to empty.
This is called every iteration in order to later
compare the diff of the previous iteration and current one
to find any newly changed jobs (ie newly completed jobs)
"""
# reserve only those that has been around this time
new_stats_data = {}
for c in list(self.stats_diff.keys()):
# but carry over all the users... should not change that often
new_stats_data[c] = self.current_stats_data[c]
self.old_stats_data = new_stats_data
self.current_stats_data = {}
# and flush out the differences
self.stats_diff = {}
def logSummary(self, client_name, stats):
"""
log_stats taken during during an iteration of perform_work are
added/merged into the condorLogSummary class here.
@type stats: dictionary of glideFactoryLogParser.dirSummaryTimingsOut
@param stats: Dictionary keyed by "username:client_int_name"
client_int_name is needed for frontends with multiple groups
"""
if client_name not in self.current_stats_data:
self.current_stats_data[client_name] = {}
for username in list(stats.keys()):
if username not in self.current_stats_data[client_name]:
self.current_stats_data[client_name][username] = stats[username].get_simple()
else:
self.current_stats_data[client_name][username].merge(stats[username])
self.updated = time.time()
self.updated_year = time.localtime(self.updated)[0]
def computeDiff(self):
"""
This function takes the current_stats_data from the current iteration
and the old_stats_data from the last iteration (see reset() function)
to create a diff of the data in the stats_diff dictionary.
This stats_diff will be a dictionary with two entries for each
status: "Entered" and "Exited" denoting which job ids have recently
changed status, ie.
stats_diff[frontend][username:client_int_name]["Completed"]["Entered"]
"""
for client_name in list(self.current_stats_data.keys()):
self.stats_diff[client_name] = {}
if client_name in self.old_stats_data:
stats = self.current_stats_data[client_name]
for username in list(stats.keys()):
if username in self.old_stats_data[client_name]:
self.stats_diff[client_name][username] = stats[username].diff(
self.old_stats_data[client_name][username]
)
def get_stats_data_summary(self):
"""
Summarizes current_stats_data:
Adds up current_stats_data[frontend][user:client][status]
across all username keys.
@return: returns dictionary stats_data[frontend][status]=count
"""
stats_data = {}
for client_name in list(self.current_stats_data.keys()):
out_el = {}
for s in self.job_statuses:
if not (s in ("Completed", "Removed")): # I don't have their numbers from inactive logs
count = 0
for username in list(self.current_stats_data[client_name].keys()):
client_el = self.current_stats_data[client_name][username].data
if (client_el is not None) and (s in list(client_el.keys())):
count += len(client_el[s])
out_el[s] = count
stats_data[client_name] = out_el
return stats_data
# in: entered_list=self.stats_diff[*]['Entered']
# out: entered_list[job_id]{'duration','condor_started','condor_duration','jobsnr',wastemill':{'validation','idle','nosuccess','badput'}}
# in: entered_list=get_completed_data()
# out: {'Lasted':{'2hours':...,...},'Sum':{...:12,...},'JobsNr':...,
# 'Waste':{'validation':{'0m':...,...},...},'WasteTime':{...:{...},...}}
def get_data_summary(self):
"""
Summarizes stats_diff data (computeDiff should have
already been called)
Sums over username in the dictionary
stats_diff[frontend][username][entered/exited][status]
to make stats_data[client_name][entered/exited][status]=count
@return: dictionary[client_name][entered/exited][status]=count
"""
stats_data = {}
for client_name in list(self.stats_diff.keys()):
out_el = {"Current": {}, "Entered": {}, "Exited": {}}
for s in self.job_statuses:
entered = 0
entered_list = []
exited = 0
for username in list(self.stats_diff[client_name].keys()):
diff_el = self.stats_diff[client_name][username]
if (diff_el is not None) and (s in list(diff_el.keys())):
entered_list += diff_el[s]["Entered"]
entered += len(diff_el[s]["Entered"])
exited -= len(diff_el[s]["Exited"])
out_el["Entered"][s] = entered
if not (s in ("Completed", "Removed")): # I don't have their numbers from inactive logs
count = 0
for username in list(self.current_stats_data[client_name].keys()):
stats_el = self.current_stats_data[client_name][username].data
if (stats_el is not None) and (s in list(stats_el.keys())):
count += len(stats_el[s])
out_el["Current"][s] = count
# and we can never get out of the terminal state
out_el["Exited"][s] = exited
elif s == "Completed":
completed_stats = self.get_completed_stats(entered_list)
completed_counts = self.summarize_completed_stats(completed_stats)
out_el["CompletedCounts"] = completed_counts
stats_data[client_name] = out_el
return stats_data
def get_stats_total(self):
"""
@return: Dictionary with keys (wait,idle,running,held)
"""
total = {"Wait": None, "Idle": None, "Running": None, "Held": None}
for k in list(total.keys()):
tdata = []
for client_name in list(self.current_stats_data.keys()):
for username in self.current_stats_data[client_name]:
sdata = self.current_stats_data[client_name][username].data
if (sdata is not None) and (k in list(sdata.keys())):
tdata = tdata + sdata[k]
total[k] = tdata
return total
def get_diff_summary(self):
"""
Flattens stats_diff differential data.
@return: Dictionary of client_name with sub_keys Wait,Idle,Running,Held,Completed,Removed
"""
out_data = {}
for client_name in list(self.stats_diff.keys()):
client_el = {"Wait": None, "Idle": None, "Running": None, "Held": None, "Completed": None, "Removed": None}
for k in list(client_el.keys()):
client_el[k] = {"Entered": [], "Exited": []}
tdata = client_el[k]
# flatten all usernames into one
for username in list(self.stats_diff[client_name].keys()):
sdiff = self.stats_diff[client_name][username]
if (sdiff is not None) and (k in list(sdiff.keys())):
if k == "Completed":
# for completed jobs, add the username
# not for the others since there is no adequate place in the object
for sdel in sdiff[k]["Entered"]:
sdel[4]["username"] = username
for e in list(tdata.keys()):
for sdel in sdiff[k][e]:
tdata[e].append(sdel) # pylint: disable=unsubscriptable-object
out_data[client_name] = client_el
return out_data
def aggregate_frontend_data(self, updated, diff_summary):
"""
This goes into each frontend in the current entry and aggregates
the completed/stats/wastetime data into completed_data.json
at the entry level
"""
entry_data = {"frontends": {}}
for frontend in list(diff_summary.keys()):
fe_dir = "frontend_" + frontend
completed_filename = os.path.join(monitoringConfig.monitor_dir, fe_dir) + "/Log_Completed.json"
completed_stats_filename = os.path.join(monitoringConfig.monitor_dir, fe_dir) + "/Log_Completed_Stats.json"
completed_wastetime_filename = (
os.path.join(monitoringConfig.monitor_dir, fe_dir) + "/Log_Completed_WasteTime.json"
)
try:
with open(completed_filename) as completed_fp:
completed_data = json.load(completed_fp)
with open(completed_stats_filename) as completed_stats_fp:
completed_stats_data = json.load(completed_stats_fp)
with open(completed_wastetime_filename) as completed_wastetime_fp:
completed_wastetime_data = json.load(completed_wastetime_fp)
entry_data["frontends"][frontend] = {
"completed": completed_data,
"completed_stats": completed_stats_data,
"completed_wastetime": completed_wastetime_data,
}
except OSError as e:
self.log.info("Could not find files to aggregate in frontend %s" % fe_dir)
self.log.info(str(e))
continue
monitoringConfig.write_completed_json("completed_data", updated, entry_data)
def write_job_info(self, scheddName, collectorName):
"""The method itereates over the stats_diff dictionary looking for
completed jobs and then fills out a dictionary that contains the
monitoring information needed for this job. Those info looks like:
{
'schedd_name': 'name',
'collector_name': 'name',
'joblist' : {
'2994.000': {'condor_duration': 1328, 'glidein_duration': 1334, 'condor_started': 1, 'numjobs': 0,
'2997.000': {'condor_duration': 1328, 'glidein_duration': 1334, 'condor_started': 1, 'numjobs': 0
...
}
}
:param scheddName: The schedd name to update the job
:param collectorName: The collector name to update the job
"""
jobinfo = {
"schedd_name": scheddName,
"collector_name": collectorName,
"joblist": {},
}
for _sec_name, sndata in self.stats_diff.items():
for _frname, frdata in sndata.items():
for state, jobs in frdata.items():
if state == "Completed":
for job in jobs["Entered"]:
jobid = job[0]
jobstats = job[4]
# This is the dictionary that is going to be written out as a monitoring classad
jobinfo["joblist"][jobid] = {
# activation_claims is a new key in 3.2.19. Using "get" For backward compatiobility,
# but it can be removed in future versions
"activation_claims": jobstats.get("activations_claims", "unknown"),
"glidein_duration": jobstats["glidein_duration"],
# condor_duration could be missing if the glidein had problems and condor was not started
# set it to 0
# and ser condor_started to None if missing
"condor_duration": jobstats.get("condor_duration", 0),
"condor_started": jobstats.get("condor_started", None),
"numjobs": jobstats.get("stats", {}).get("Total", {}).get("jobsnr", "unknown"),
}
# cannot use monitorAggregatorConfig.jobsummary_relname, looks like a circular import
monitoringConfig.write_file("job_summary.pkl", pickle.dumps(jobinfo))
###############################################################################
#
# factoryStatusData
# added by C.W. Murphy starting on 08/09/10
# this class handles the data obtained from the rrd files
#
###############################################################################
class FactoryStatusData:
"""this class handles the data obtained from the rrd files"""
def getUpdated(self):
"""returns the time of last update"""
return xmlFormat.time2xml(self.updated, "updated", indent_tab=self.tab, leading_tab=self.tab)
def fetchData(self, rrd_file, pathway, res, start, end):
"""Uses rrdtool to fetch data from the clients. Returns a dictionary of lists of data. There is a list for each element.
rrdtool fetch returns 3 tuples: a[0], a[1], & a[2].
[0] lists the resolution, start and end time, which can be specified as arugments of fetchData.
[1] returns the names of the datasets. These names are listed in the key.
[2] is a list of tuples. each tuple contains data from every dataset. There is a tuple for each time data was collected."""
# use rrdtool to fetch data
baseRRDSupport = rrdSupport.rrdSupport()
try:
fetched = baseRRDSupport.fetch_rrd(pathway + rrd_file, "AVERAGE", resolution=res, start=start, end=end)
except:
# probably not created yet
self.log.debug("Failed to load %s" % (pathway + rrd_file))
return {}
# converts fetched from tuples to lists
fetched_names = list(fetched[1])
fetched_data_raw = fetched[2][
:-1
] # drop the last entry... rrdtool will return one more than needed, and often that one is unreliable (in the python version)
fetched_data = []
for data in fetched_data_raw:
fetched_data.append(list(data))
# creates a dictionary to be filled with lists of data
data_sets = {}
for name in fetched_names:
data_sets[name] = []
# check to make sure the data exists
all_empty = True
for data_set in data_sets:
index = fetched_names.index(data_set)
for data in fetched_data:
if isinstance(data[index], (int, float)):
data_sets[data_set].append(data[index])
all_empty = False
if all_empty:
# probably not updated recently
return {}
else:
return data_sets
def getData(self, input_val, monitoringConfig=None):
"""Return the data fetched by rrdtool as a dictionary
This also modifies the rrd data dictionary for the client (input_val) in all RRD files
and appends the client to the list of frontends
Where this side effect is used:
- totals are updated in Entry.writeStats (writing the XML)
- frontend data in check_and_perform_work
"""
if monitoringConfig is None:
monitoringConfig = globals()["monitoringConfig"]
folder = str(input_val)
if folder == self.total:
client = folder
else:
folder_name = folder.split("@")[-1]
client = folder_name.join(["frontend_", "/"])
if client not in self.frontends:
self.frontends.append(client)
for rrd in RRD_LIST:
self.data[rrd][client] = {}
for res_raw in self.resolution:
# calculate the best resolution
res_idx = 0
rrd_res = monitoringConfig.rrd_archives[res_idx][2] * monitoringConfig.rrd_step
period_mul = int(res_raw / rrd_res)
while period_mul >= monitoringConfig.rrd_archives[res_idx][3]:
# not all elements in the higher bucket, get next lower resolution
res_idx += 1
rrd_res = monitoringConfig.rrd_archives[res_idx][2] * monitoringConfig.rrd_step
period_mul = int(res_raw / rrd_res)
period = period_mul * rrd_res
self.data[rrd][client][period] = {}
end = (
int(time.time() / rrd_res) - 1
) * rrd_res # round due to RRDTool requirements, -1 to avoid the last (partial) one
start = end - period
try:
fetched_data = self.fetchData(
rrd_file=rrd, pathway=self.base_dir + "/" + client, start=start, end=end, res=rrd_res
)
for data_set in fetched_data:
self.data[rrd][client][period][data_set] = self.average(fetched_data[data_set])
except TypeError:
self.log.exception("FactoryStatusData:fetchData: ")
return self.data
def getXMLData(self, rrd):
"""Return a XML formatted string the specific RRD file for the data fetched from a given site (all clients+total).
This also has side effects in the getData(self.total) invocation:
- modifies the rrd data dictionary (all RRDs) for the total for this entry
- and appends the total (self.total aka 'total/') to the list of clients (frontends)
@param rrd:
@return: XML formatted string with stats data
"""
# create a string containing the total data
total_xml_str = self.tab + "<total>\n"
# this is invoked to trigger the side effect but the data is retrieved directly from self.data dict below
get_data_total = self.getData(self.total)
try:
total_data = self.data[rrd][self.total]
total_xml_str += (
xmlFormat.dict2string(
total_data,
dict_name="periods",
el_name="period",
subtypes_params={"class": {}},
indent_tab=self.tab,
leading_tab=2 * self.tab,
)
+ "\n"
)
except (NameError, UnboundLocalError):
self.log.exception("FactoryStatusData:total_data: ")
total_xml_str += self.tab + "</total>\n"
# create a string containing the frontend data
frontend_xml_str = self.tab + "<frontends>\n"
for frontend in self.frontends:
fe_name = frontend.split("/")[0]
frontend_xml_str += 2 * self.tab + '<frontend name="' + fe_name + '">\n'
try:
frontend_data = self.data[rrd][frontend]
frontend_xml_str += (
xmlFormat.dict2string(
frontend_data,
dict_name="periods",
el_name="period",
subtypes_params={"class": {}},
indent_tab=self.tab,
leading_tab=3 * self.tab,
)
+ "\n"
)
except (NameError, UnboundLocalError):
self.log.exception("FactoryStatusData:frontend_data: ")
frontend_xml_str += 2 * self.tab + "</frontend>"
frontend_xml_str += self.tab + "</frontends>\n"
data_str = total_xml_str + frontend_xml_str
return data_str
def writeFiles(self, monitoringConfig=None):
"""Write an xml file for the data fetched from a given site.
Write rrd files
NOTE: writeFiles triggers the side effect of updating the rrd for totals (via getXMLData/getData)
@param monitoringConfig:
@return: None
"""
if monitoringConfig is None:
monitoringConfig = globals()["monitoringConfig"]
for rrd in RRD_LIST:
file_name = "rrd_" + rrd.split(".")[0] + ".xml"
xml_str = (
'<?xml version="1.0" encoding="ISO-8859-1"?>\n\n'
+ "<glideFactoryEntryRRDStats>\n"
+ self.getUpdated()
+ "\n"
+ self.getXMLData(rrd)
+ "</glideFactoryEntryRRDStats>"
)
try:
monitoringConfig.write_file(file_name, xml_str)
except OSError:
self.log.exception("FactoryStatusData:write_file: ")
return
##############################################################################
#
# create an XML file out of glidein.descript, frontend.descript,
# entry.descript, attributes.cfg, and params.cfg
#
#############################################################################
class Descript2XML:
"""
create an XML file out of glidein.descript, frontend.descript,
entry.descript, attributes.cfg, and params.cfg
TODO: The XML is used by ... "the monioring page"?
The file created is descript.xml, w/ glideFactoryDescript and glideFactoryEntryDescript elements
"""
def getUpdated(self):
"""returns the time of last update"""
return xmlFormat.time2xml(time.time(), "updated", indent_tab=self.tab, leading_tab=self.tab)
############### P R I V A T E ################
##################################################
##################################################
##################################################
# def tmp2final(fname):
# """
# KEL this exact method is also in glideinFrontendMonitoring.py
# """
# try:
# os.remove(fname + "~")
# except:
# pass
#
# try:
# os.rename(fname, fname + "~")
# except:
# pass
#
# try:
# os.rename(fname + ".tmp", fname)
# except:
# print "Failed renaming %s.tmp into %s" % (fname, fname)
# return
##################################################
# global configuration of the module
monitoringConfig = MonitoringConfig()
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
3717,
376,
7780,
72,
4992,
10302,
11,
11419,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
2,
198,
2,
4935,
25,
198,
2,
220,
220,
48089,
259,
54,
5... | 2.223425 | 10,903 |
# Datetime Packages
from time import mktime
from datetime import timedelta
from datetime import datetime
import pytz
# Celery Imports
from celery.task import Task, PeriodicTask
from celery.utils.log import get_task_logger
# Feedparser
import feedparser
# Models
from models import Feed, FeedItem
# Logger object
LOGGER = get_task_logger(__name__)
class EnqueueFeedUpdates(PeriodicTask):
"""
Updates the research agents for all characters.
"""
# Run every minute
run_every = timedelta(minutes=1)
class UpdateFeed(Task):
"""
Update a feed's items. Remove all old items from the DB.
"""
| [
2,
16092,
8079,
6400,
1095,
198,
6738,
640,
1330,
33480,
2435,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
12972,
22877,
198,
198,
2,
15248,
1924,
1846,
3742,
198,
6738,
18725,
1924,
1... | 3.094059 | 202 |
import random
import yaml
from itertools import chain, groupby
START_TOK = "<START>"
STOP_TOK = "<STOP>"
STOP = object()
START = object()
# instantiate a Markov object with the source file
| [
11748,
4738,
198,
11748,
331,
43695,
198,
6738,
340,
861,
10141,
1330,
6333,
11,
1448,
1525,
198,
198,
2257,
7227,
62,
10468,
42,
796,
33490,
2257,
7227,
24618,
198,
2257,
3185,
62,
10468,
42,
796,
33490,
2257,
3185,
24618,
198,
198,
... | 2.909091 | 66 |
# -*- coding: utf-8 -*-
"""Unit test package for desktop_screen_recorder."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
26453,
1332,
5301,
329,
11364,
62,
9612,
62,
8344,
2875,
526,
15931,
198
] | 2.689655 | 29 |
import base64
import io
import json
import zlib
from requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, text_type
| [
11748,
2779,
2414,
198,
11748,
33245,
198,
11748,
33918,
198,
11748,
1976,
8019,
198,
198,
6738,
7007,
13,
7249,
942,
1330,
8913,
20376,
18464,
35,
713,
198,
198,
6738,
764,
5589,
265,
1330,
7154,
51,
4805,
9774,
2591,
11,
2420,
62,
4... | 3.355556 | 45 |
# core django imports
from django.contrib.sitemaps import Sitemap
# app imports
from posts.models import Post
class PostSiteMap(Sitemap):
"""
A sitemap class indicating the objects, change frequency, and
various pages of our website for better search engine indexing
"""
changefreq = 'weekly'
priority = 0.9
def items(self):
"""
Returns the queryset of objects to include in our sitemap
"""
return Post.published.all()
| [
2,
4755,
42625,
14208,
17944,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
82,
9186,
1686,
1330,
311,
9186,
499,
198,
198,
2,
598,
17944,
198,
6738,
6851,
13,
27530,
1330,
2947,
628,
198,
4871,
2947,
29123,
13912,
7,
50,
9186,
499,
2... | 2.831395 | 172 |
# Generated by Django 3.0.5 on 2020-04-18 18:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
20,
319,
12131,
12,
3023,
12,
1507,
1248,
25,
2816,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.04918 | 61 |
from ...attrs import LIKE_NUM
# Eleven, twelve etc. are written separate: on bir, on iki
_num_words = [
"bir",
"iki",
"üç",
"dörd",
"beş",
"altı",
"yeddi",
"səkkiz",
"doqquz",
"on",
"iyirmi",
"otuz",
"qırx",
"əlli",
"altmış",
"yetmiş",
"səksən",
"doxsan",
"yüz",
"min",
"milyon",
"milyard",
"trilyon",
"kvadrilyon",
"kentilyon",
]
_ordinal_words = [
"birinci",
"ikinci",
"üçüncü",
"dördüncü",
"beşinci",
"altıncı",
"yedinci",
"səkkizinci",
"doqquzuncu",
"onuncu",
"iyirminci",
"otuzuncu",
"qırxıncı",
"əllinci",
"altmışıncı",
"yetmişinci",
"səksəninci",
"doxsanıncı",
"yüzüncü",
"mininci",
"milyonuncu",
"milyardıncı",
"trilyonuncu",
"kvadrilyonuncu",
"kentilyonuncu",
]
_ordinal_endings = ("inci", "ıncı", "nci", "ncı", "uncu", "üncü")
LEX_ATTRS = {LIKE_NUM: like_num}
| [
6738,
2644,
1078,
3808,
1330,
34178,
62,
41359,
628,
198,
2,
37339,
11,
14104,
3503,
13,
389,
3194,
4553,
25,
319,
35122,
11,
319,
220,
5580,
198,
198,
62,
22510,
62,
10879,
796,
685,
198,
220,
220,
220,
366,
65,
343,
1600,
198,
2... | 1.618182 | 605 |
#Project Euler Queestion 5
#2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
#What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
highest_factor = 20
factor = highest_factor
x = highest_factor
while x >= 0:
if (x % factor == 0):
if (factor == 2):
smallest_multiple = x
factor = highest_factor
break
else:
factor -= 1
else:
x += highest_factor
factor = highest_factor
#for factor in range(highest_factor,0,-1):
#print(smallest_multiple, "/", factor, "=", (int(smallest_multiple/factor)))
#print()
print (smallest_multiple, "is the smallest number that is evenly divisible by every number from 1 to", highest_factor) | [
2,
16775,
412,
18173,
4670,
395,
295,
642,
198,
198,
2,
1495,
1238,
318,
262,
18197,
1271,
326,
460,
307,
9086,
416,
1123,
286,
262,
3146,
422,
352,
284,
838,
1231,
597,
17675,
13,
198,
2,
2061,
318,
262,
18197,
3967,
1271,
326,
3... | 2.8125 | 288 |
"""
Define a function to generate dictionary from matrix
do the same for other matrix
read sequences from fasta file
define function with arguments (seq1, seq2, matrix) to score the alignments
call the function for each matrix and each alignment
print results in some nice way
"""
BLOSUM=matrix_to_dict("./BLOSUM62_square.txt")
PAM=matrix_to_dict("./PAM250_square.txt")
aln1=[]
fasta=open("./alignments.fasta","r")
for line in fasta:
if not line.startswith(">"):
aln1.append(line.rstrip())
fasta.close()
aln2=[aln1[2],aln1[3]]
aln3=[aln1[4],aln1[5]]
aln1=aln1[0:2]
#print aln1
#print aln2
#print aln3
aln=[aln1,aln2,aln3]
PAM_scores=[]
BLOSUM_scores=[]
for list in aln:
p=score(list[0],list[1],PAM)
PAM_scores.append(p)
b=score(list[0],list[1],BLOSUM)
BLOSUM_scores.append(b)
print "PAM scores:\n"
for element in PAM_scores:
print_result(element)
print " "
print "BLOSUM scores:\n"
for element in BLOSUM_scores:
print_result(element)
| [
37811,
198,
7469,
500,
257,
2163,
284,
7716,
22155,
422,
17593,
198,
4598,
262,
976,
329,
584,
17593,
198,
961,
16311,
422,
3049,
64,
2393,
198,
13086,
2163,
351,
7159,
357,
41068,
16,
11,
33756,
17,
11,
17593,
8,
284,
4776,
262,
10... | 2.319905 | 422 |
from django import forms
from post.models import Post
from django.forms import ClearableFileInput | [
6738,
42625,
14208,
1330,
5107,
198,
6738,
1281,
13,
27530,
1330,
2947,
198,
198,
6738,
42625,
14208,
13,
23914,
1330,
11459,
540,
8979,
20560
] | 4.083333 | 24 |
# USAGE
# python cnn_regression.py --dataset Houses-dataset/Houses\ Dataset/
# import the necessary packages
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from pyimagesearch import datasets
from pyimagesearch import models
import numpy as np
import argparse
import locale
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", type=str, required=True,
help="path to input dataset of house images")
args = vars(ap.parse_args())
# construct the path to the input .txt file that contains information
# on each house in the dataset and then load the dataset
print("[INFO] loading house attributes...")
inputPath = os.path.sep.join([args["dataset"], "HousesInfo.txt"])
df = datasets.load_house_attributes(inputPath)
# load the house images and then scale the pixel intensities to the
# range [0, 1]
print("[INFO] loading house images...")
images = datasets.load_house_images(df, args["dataset"])
images = images / 255.0
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
split = train_test_split(df, images, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice
# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*
model = models.create_cnn(64, 64, 3, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# train the model
print("[INFO] training model...")
model.fit(x=trainImagesX, y=trainY,
validation_data=(testImagesX, testY),
epochs=200, batch_size=8)
# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict(testImagesX)
# compute the difference between the *predicted* house prices and the
# *actual* house prices, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
print("[INFO] avg. house price: {}, std house price: {}".format(
locale.currency(df["price"].mean(), grouping=True),
locale.currency(df["price"].std(), grouping=True)))
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std)) | [
2,
1294,
11879,
198,
2,
21015,
269,
20471,
62,
2301,
2234,
13,
9078,
1377,
19608,
292,
316,
34336,
12,
19608,
292,
316,
14,
39,
11370,
59,
16092,
292,
316,
14,
198,
198,
2,
1330,
262,
3306,
10392,
198,
6738,
11192,
273,
11125,
13,
... | 3.312088 | 910 |
# https://www.jianshu.com/p/ba8a27cf7da1
# https://www.cnblogs.com/goldsunshine/p/15259246.html
import sqlite3
import peewee
import datetime
import core.orm as orm
db = peewee.SqliteDatabase('../db/auto_work.db')
if __name__ == "__main__":
pass
# test_insert_record()
# test_select_record()
# test_update_record()
# test_delete_record()
# 新增工作项
# work_item = orm.WorkItem(name='自动发送消息')
# orm.insert_work_item(work_item)
# 新增工作流程
# work_flow = orm.WorkFlow(wid=1, name='按回车发消息', order=6)
# orm.insert_work_flow(work_flow)
# 查询工作流程
# orm.select_work_flows(1)
# 查询工作监控
# orm.select_work_monitors(1)
# 新增工作操作
# work_operate = orm.WorkOperate(fm_id=1, fm_type=1, op_type=10, op_content='/Applications/微信.app', order=1)
# orm.insert_work_operate(work_operate)
work_operate = orm.WorkOperate(fm_id=7, fm_type=1, op_type=10, op_content='/Applications/微信.app', order=1)
orm.insert_work_operate(work_operate)
| [
2,
3740,
1378,
2503,
13,
73,
1547,
13415,
13,
785,
14,
79,
14,
7012,
23,
64,
1983,
12993,
22,
6814,
16,
198,
2,
3740,
1378,
2503,
13,
31522,
49096,
13,
785,
14,
70,
10119,
403,
19489,
14,
79,
14,
1314,
25191,
26912,
13,
6494,
19... | 1.857678 | 534 |
from .painters import *
from .textures import * | [
6738,
764,
35436,
1010,
1330,
1635,
198,
6738,
764,
5239,
942,
1330,
1635
] | 3.615385 | 13 |
import numpy as np
import matplotlib.pylab as plt
from .basewidget import BaseWidget
from probeinterface.plotting import plot_probe
class ProbeMapWidget(BaseWidget):
"""
Plot the probe of a recording.
Parameters
----------
recording: RecordingExtractor
The recording extractor object
channel_ids: list
The channel ids to display
figure: matplotlib figure
The figure to be used. If not given a figure is created
ax: matplotlib axis
The axis to be used. If not given an axis is created
**plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe() function
Returns
-------
W: ProbeMapWidget
The output widget
"""
plot_probe_map.__doc__ = ProbeMapWidget.__doc__
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,
355,
458,
83,
198,
198,
6738,
764,
12093,
413,
17484,
1330,
7308,
38300,
198,
198,
6738,
12774,
39994,
13,
29487,
889,
1330,
7110,
62,
1676,
1350,
628,
1... | 2.892193 | 269 |
"""circle_pack.py
Compute circle packings according to the Koebe-Thurston-Andreev theory,
Following a numerical algorithm by C. R. Collins and K. Stephenson,
"A Circle Packing Algorithm", Comp. Geom. Theory and Appl. 2003.
"""
from __future__ import division
from math import pi, acos, sin, e
tolerance = 1 + 10e-12 # how accurately to approximate things
# ======================================================
# The main circle packing algorithm
# ======================================================
def circle_pack(internal, external):
"""Find a circle packing for the given data.
The two arguments should be dictionaries with disjoint keys; the
keys of the two arguments are identifiers for circles in the packing.
The internal argument maps each internal circle to its cycle of
surrounding circles; the external argument maps each external circle
to its desired radius. The return function is a mapping from circle
keys to pairs (center,radius) where center is a complex number."""
# Some sanity checks and preprocessing
if min(external.values()) <= 0:
raise ValueError("external radii must be positive")
radii = dict(external)
for k in internal:
if k in external:
raise ValueError("keys are not disjoint")
radii[k] = 1
# The main iteration for finding the correct set of radii
lastChange = 2
while lastChange > tolerance:
lastChange = 1
for k in internal:
theta = flower(radii, k, internal[k])
hat = radii[k] / (1 / sin(theta / (2 * len(internal[k]))) - 1)
newrad = hat * (1 / (sin(pi / len(internal[k]))) - 1)
kc = max(newrad / radii[k], radii[k] / newrad)
lastChange = max(lastChange, kc)
radii[k] = newrad
# Recursively place all the circles
placements = {}
k1 = next(iter(internal)) # pick one internal circle
placements[k1] = 0j # place it at the origin
k2 = internal[k1][0] # pick one of its neighbors
placements[k2] = radii[k1] + radii[k2] # place it on the real axis
place(placements, radii, internal, k1) # recursively place the rest
place(placements, radii, internal, k2)
return dict((k, (placements[k], radii[k])) for k in radii)
# ======================================================
# Invert a collection of circles
# ======================================================
def InvertPacking(packing, center):
"""Invert with specified center"""
result = {}
for k in packing:
z, r = packing[k]
z -= center
if z == 0:
offset = 1j
else:
offset = z / abs(z)
p, q = z - offset * r, z + offset * r
p, q = 1 / p, 1 / q
z = (p + q) / 2
r = abs((p - q) / 2)
result[k] = z, r
return result
def NormalizePacking(packing, k=None, target=1.0):
"""Make the given circle have radius one (or the target if given).
If no circle is given, the minimum radius circle is chosen instead."""
if k is None:
r = min(r for z, r in packing.values())
else:
z, r = packing[k]
s = target / r
return dict((kk, (zz * s, rr * s)) for kk, (zz, rr) in packing.iteritems())
def InvertAround(packing, k, smallCircles=None):
"""Invert so that the specified circle surrounds all the others.
Searches for the inversion center that maximizes the minimum radius.
This can be expressed as a quasiconvex program, but in a related
hyperbolic space, so rather than applying QCP methods it seems
simpler to use a numerical hill-climbing approach, relying on the
theory of QCP to tell us there are no local maxima to get stuck in.
If the smallCircles argument is given, the optimization
for the minimum radius circle will look only at these circles"""
z, r = packing[k]
if smallCircles:
optpack = {k: packing[k] for k in smallCircles}
else:
optpack = packing
q, g = z, r * 0.4
oldrad, ratio = None, 2
while abs(g) > r * (tolerance - 1) or ratio > tolerance:
rr, ignore1, ignore2, q = max(list(testgrid(optpack, k, z, r, q, g)))
if oldrad:
ratio = rr / oldrad
oldrad = rr
g *= 0.53 + 0.1j # rotate so not always axis-aligned
return InvertPacking(packing, q)
# ======================================================
# Utility routines, not for outside callers
# ======================================================
def acxyz(x, y, z):
"""Angle at a circle of radius x given by two circles of radii y and z"""
try:
return acos(((x + y)**2 + (x + z)**2 - (y + z)**2)
/ (2.0 * (x + y) * (x + z)))
except ValueError:
return pi / 3
except ZeroDivisionError:
return pi
def flower(radius, center, cycle):
"""Compute the angle sum around a given internal circle"""
return sum(acxyz(radius[center], radius[cycle[i - 1]], radius[cycle[i]])
for i in range(len(cycle)))
def place(placements, radii, internal, center):
"""Recursively find centers of all circles surrounding k"""
if center not in internal:
return
cycle = internal[center]
for i in range(-len(cycle), len(cycle) - 1):
if cycle[i] in placements and cycle[i + 1] not in placements:
s, t = cycle[i], cycle[i + 1]
theta = acxyz(radii[center], radii[s], radii[t])
offset = (placements[s] - placements[center]) / \
(radii[s] + radii[center])
offset *= e**(-1j * theta)
placements[t] = placements[center] + \
offset * (radii[t] + radii[center])
place(placements, radii, internal, t)
def testgrid(packing, k, z, r, q, g):
"""Build grid of test points around q with grid size g"""
for i in (-2, -1, 0, 1, 2):
for j in (-2, -1, 0, 1, 2):
center = q + i * g + j * 1j * g
if abs(center - z) < r:
newpack = InvertPacking(packing, center)
newpack = NormalizePacking(newpack, k)
minrad = min(r for z, r in newpack.values())
yield minrad, i, j, center
| [
37811,
45597,
62,
8002,
13,
9078,
198,
7293,
1133,
9197,
2353,
654,
1864,
284,
262,
509,
2577,
1350,
12,
817,
333,
3743,
12,
1870,
631,
85,
4583,
11,
198,
14291,
257,
29052,
11862,
416,
327,
13,
371,
13,
14006,
290,
509,
13,
42838,
... | 2.552869 | 2,440 |
import urllib
import os
import threading
import time
import errno
from functools import partial
import weakref
import base64
import json
import socket
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
from urllib.parse import unquote
from urllib.parse import urlparse
from urllib.parse import parse_qs
"""
HTTP Server interface
"""
#Serve files from lavavu html dir
#Stifle log output
#Optional thread per request version:
"""
HTTP Server manager class
"""
#Ignore SIGPIPE altogether (does not apply on windows)
import sys
if sys.platform != 'win32':
from signal import signal, SIGPIPE, SIG_IGN
signal(SIGPIPE, SIG_IGN)
"""
Main entry point - run server and open browser interface
"""
if __name__ == '__main__':
import lavavu
lv = lavavu.Viewer()
#lv.animate(1) #Required to show viewer window and handle mouse/keyboard events there too
lv.browser()
lv._thread.join() #Wait for server to quit
| [
11748,
2956,
297,
571,
198,
11748,
28686,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
11454,
3919,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
11748,
4939,
5420,
198,
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
17802,
198... | 3.200647 | 309 |
from .views import *
from django.urls import path
urlpatterns = [
path('login/', LoginView.as_view()),
path('register/', RegisterView.as_view()),
path('me/', UserView.as_view())
]
| [
6738,
764,
33571,
1330,
1635,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
38235,
14,
3256,
23093,
7680,
13,
292,
62,
1177,
3419,
828,
198,
220,
220,
220,
3... | 2.608108 | 74 |
import numpy as np
from river.utils.skmultiflow_utils import check_random_state
from .. import base
class Agrawal(base.SyntheticDataset):
r"""Agrawal stream generator.
The generator was introduced by Agrawal et al. [^1], and was a common
source of data for early work on scaling up decision tree learners.
The generator produces a stream containing nine features, six numeric and
three categorical.
There are 10 functions defined for generating binary class labels from the
features. Presumably these determine whether the loan should be approved.
Classification functions are listed in the original paper [^1].
**Feature** | **Description** | **Values**
* `salary` | salary | uniformly distributed from 20k to 150k
* `commission` | commission | 0 if `salary` < 75k else uniformly distributed from 10k to 75k
* `age` | age | uniformly distributed from 20 to 80
* `elevel` | education level | uniformly chosen from 0 to 4
* `car` | car maker | uniformly chosen from 1 to 20
* `zipcode` | zip code of the town | uniformly chosen from 0 to 8
* `hvalue` | house value | uniformly distributed from 50k x zipcode to 100k x zipcode
* `hyears` | years house owned | uniformly distributed from 1 to 30
* `loan` | total loan amount | uniformly distributed from 0 to 500k
Parameters
----------
classification_function
The classification function to use for the generation.
Valid values are from 0 to 9.
seed
If int, `seed` is used to seed the random number generator;
If RandomState instance, `seed` is the random number generator;
If None, the random number generator is the `RandomState` instance used
by `np.random`.
balance_classes
If True, the class distribution will converge to a uniform distribution.
perturbation
The probability that noise will happen in the generation. Each new
sample will be perturbed by the magnitude of `perturbation`.
Valid values are in the range [0.0 to 1.0].
Examples
--------
>>> from river import synth
>>> dataset = synth.Agrawal(classification_function=0,
... seed=42)
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[68690.2154, 81303.5729, 62, 4, 6, 2, 419982.4410, 11, 433088.0728] 1
[98144.9515, 0, 43, 2, 1, 7, 266488.5281, 6, 389.3829] 0
[148987.502, 0, 52, 3, 11, 8, 79122.9140, 27, 199930.4858] 0
[26066.5362, 83031.6639, 34, 2, 11, 6, 444969.2657, 25, 23225.2063] 1
[98980.8307, 0, 40, 0, 6, 1, 1159108.4298, 28, 281644.1089] 0
Notes
-----
The sample generation works as follows: The 9 features are generated
with the random generator, initialized with the seed passed by the
user. Then, the classification function decides, as a function of all
the attributes, whether to classify the instance as class 0 or class
1. The next step is to verify if the classes should be balanced, and
if so, balance the classes. Finally, add noise if `perturbation` > 0.0.
References
----------
[^1]: Rakesh Agrawal, Tomasz Imielinksi, and Arun Swami. "Database Mining:
A Performance Perspective", IEEE Transactions on Knowledge and
Data Engineering, 5(6), December 1993.
"""
def generate_drift(self):
"""
Generate drift by switching the classification function randomly.
"""
new_function = self._rng.randint(10)
while new_function == self.classification_function:
new_function = self._rng.randint(10)
self.classification_function = new_function
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
7850,
13,
26791,
13,
8135,
16680,
361,
9319,
62,
26791,
1330,
2198,
62,
25120,
62,
5219,
198,
198,
6738,
11485,
1330,
2779,
628,
198,
4871,
2449,
1831,
282,
7,
8692,
13,
13940,
429,
6587,... | 2.910045 | 1,334 |
# -*- coding: utf-8 -*-
import frictionless
from pprint import pprint
class DataSchemaInputBase:
"""A base class for importing a data schema, querying it, and validating with it.
This class assumes imported schema are valid Table Schema.
See here: https://specs.frictionlessdata.io/table-schema/
"""
def __init__(self, source=None):
"""Create a new DataSchemaInputBase object."""
self.source = source
self.schema_fallback = None
self.schema = self.load_all_schema()
def load_all_schema(self):
"""Load the schema for all indicators. This should be overridden by a subclass."""
raise NotImplementedError
def validate(self, indicator):
"""Validate the data for an Indicator object.
Parameters
----------
indicator : Indicator
The instance of Indicator to validate
Returns
-------
boolean
True if validation passes, False otherwise
"""
status = True
if indicator.has_data():
df = indicator.data
schema = self.get_schema_for_indicator(indicator)
if schema is not None:
records = df.to_dict('records')
report = frictionless.validate(records, schema=schema)
status = report.valid
if status == False:
pprint(report)
# TODO: This output needs to be much more concise.
return status
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
23822,
1203,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
4871,
6060,
27054,
2611,
20560,
14881,
25,
198,
220,
220,
220,
37227,
32,
2779,
1398,
329,
3333... | 2.365055 | 641 |
import time
from django.conf import settings
from hawkrest import HawkAuthentication
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from api.core.permissions import IsAuthenticated
from .checks import db_check
| [
11748,
640,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
48710,
2118,
1330,
26698,
47649,
3299,
198,
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
42044,
2969,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
182... | 4.15873 | 63 |
import asyncio
import os
from Yukki.YukkiUtilities.tgcallsrun import ASS_ACC
from pyrogram import Client, filters
from pyrogram.errors import UserAlreadyParticipant, UserNotParticipant
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from pytgcalls import StreamType
from pytgcalls.types.input_stream import AudioVideoPiped
from pytgcalls.types.input_stream.quality import (
HighQualityAudio,
HighQualityVideo,
LowQualityVideo,
MediumQualityVideo,
)
from youtubesearchpython import VideosSearch
from Yukki.config import GROUP, CHANNEL
from Yukki import BOT_NAME, BOT_USERNAME, app
from Yukki.YukkiUtilities.tgcallsrun.music import pytgcalls as call_py
from Yukki.YukkiUtilities.helpers.filters import command
from Yukki.YukkiUtilities.helpers.logger import LOG_CHAT
from Yukki.YukkiUtilities.tgcallsrun.queues import (
QUEUE,
add_to_queue,
clear_queue,
get_queue,
)
@app.on_message(command("vplay") & filters.group)
@app.on_message(command("vplaylist") & filters.group)
| [
11748,
30351,
952,
198,
11748,
28686,
198,
6738,
19760,
4106,
13,
56,
2724,
4106,
18274,
2410,
13,
25297,
66,
5691,
5143,
1330,
24994,
62,
26861,
198,
198,
6738,
12972,
39529,
1330,
20985,
11,
16628,
198,
6738,
12972,
39529,
13,
48277,
... | 2.988506 | 348 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2829,
17752,
355,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
3418... | 2.58 | 50 |
import numpy as np
from sklearn.linear_model import Lasso, Ridge
from sklearn.preprocessing import scale
from math import floor
class Regression:
"""
Info:
Regression class which uses sklearn. Includes functions to solve:
* Ordinary Least Squares (OLS).
* Ridge regression.
* Lasso regression.
Initialize:
* X: (N x p) design matrix.
* y: array containing (N x 1) data points.
Methods:
* update_X(X), update X, X_temp attributes
* update_Y(Y), update Y, Y_temp attributes
* svd_inv(A), invert A by using SVD
* ols_fit(svd=False) |
* ridge_fit(alpha,svd=False) |> Saves new attributes beta, p
* lasso_fit(alpha) |
* predict(X), return y_prediction (Note: can only be done after fit)
* mean_squared_error(y, y_pred), return MSE
* r2_score(y, y_pred), return R2
* k_fold_cross_validation(k, method, alpha=1e-3, svd=False), apply k-fold CV
Example:
model = Regression(X, y)
model.ols_fit(svd=True)
y_pred = model.predict(X)
MSE_kfold, R2 = model.k_fold_cross_validation(10, "ols", svd=True)
MSE_train = model.mean_squared_error(y, y_pred)
"""
def svd_inv(self, A):
"""
Info:
Invert matrix A by using Singular Value Decomposition
Input:
* A: matrix
Output
* A_inverted: matrix
"""
U, D, VT = np.linalg.svd(A)
return VT.T @ np.linalg.inv(np.diag(D)) @ U.T
def ols_fit(self, svd=False):
"""
Info:
Find the coefficients of beta: An array of shape (p, 1), where p is the
number of features. Beta is calculated using the X, y attributes of the
instance.
Output:
* beta: The coefficient vector for the OLS scheme.
"""
XTX = self.X_temp.T @ self.X_temp
if svd:
XTX_inv = self.svd_inv(XTX)
else:
XTX_inv = np.linalg.inv(XTX)
self.beta = XTX_inv @ self.X_temp.T @ self.y_temp
self.p = self.beta.shape[0]
return None
def ridge_fit(self, alpha=1e-6):
"""
Info:
Find the coefficients of beta: An array of shape (p, 1), where p is the
number of features. Beta is calculated using the X, y attributes of the
instance.
Input:
* alpha: parameter for this regression type
* svd: if True, SVD is used for matrix inversion
Output:
* beta: The coefficient vector for the Ridge scheme
"""
model = Ridge(alpha=alpha, normalize=True)
model.fit(self.X_temp, self.y_temp)
p = self.X_temp.shape[1]
self.beta = np.transpose(model.coef_)
self.beta[0] = model.intercept_
self.p = self.beta.shape[0]
return None
def lasso_fit(self, alpha=1e-6):
"""
Info:
Find the coefficients of beta: An array of shape (p, 1), where p is the
number of features. Beta is calculated using the X, y attributes of the
instance.
Input:
* alpha: parameter for this regression type
Output:
* beta: The coefficient vector for the Lasso scheme.
"""
model = Lasso(alpha=alpha, normalize=True, tol=0.05, max_iter=2500)
model.fit(self.X_temp, self.y_temp)
p = self.X_temp.shape[1]
self.beta = np.transpose(model.coef_)
self.beta[0] = model.intercept_
self.p = self.beta.shape[0]
return None
def predict(self, X):
"""
Info:
This method can only be called after ols/ridge/lasso_regression() has
been called. It will predict y, given X.
Input:
* X: values of which y will be predicted.
Output:
* y_pred: the y prediction values.
"""
if self.p:
if X.shape[1] != self.p:
raise ValueError(
f"Model has produced a beta with {self.p} features"
+ f" and X in predict(X) has {X.shape[1]} columns."
)
y_pred = X @ self.beta
return y_pred
else:
print(
"Warning, cannot predict because nothing has been fitted yet!"
+ " Try using ols_fit(), ridge_fit() or lasso_fit() first."
)
def mean_squared_error(self, y, y_pred):
"""Evaluate the mean squared error for y, y_pred"""
mse = np.mean((y - y_pred) ** 2)
return mse
def r2_score(self, y, y_pred):
"""Evaluate the R2 (R squared) score for y, y_pred"""
y_mean = np.mean(y)
RSS = np.sum((y - y_pred) ** 2) # residual sum of squares
TSS = np.sum((y - y_mean) ** 2) # total sum of squares
r2 = 1 - RSS / TSS
return r2
def k_fold_cross_validation(self, k, method, alpha=1e-3, svd=False):
"""
Info:
Perform the k-fold cross validation and evaluate the mean squared
error and the R squared score.
Input:
* k
* method: "ols", "ridge" or "lasso"
* alpha: parameter for ridge/lasso, can be ignored for ols
Output:
* MSE
* R2
"""
mse = np.zeros(k)
r2 = np.zeros(k)
N = self.X.shape[0]
p = np.random.permutation(N) # permutation array for shuffling of data
length = floor(N / k) # number of indices per interval k.
for i in range(k):
start = i * length
stop = (i + 1) * length
# split
X_test = self.X[p[start:stop]]
y_test = self.y[p[start:stop]]
self.X_temp = np.concatenate((self.X[p[:start]], self.X[p[stop:]]), axis=0)
self.y_temp = np.concatenate((self.y[p[:start]], self.y[p[stop:]]))
# fit
if method == "ols":
self.ols_fit(svd=svd)
elif method == "ridge":
self.ridge_fit(alpha=alpha)
elif method == "lasso":
self.lasso_fit(alpha=alpha)
else:
raise ValueError('method must be "osl"/"lasso"/"ridge".')
# predict
y_pred = self.predict(X_test)
# evaluate
mse[i] = self.mean_squared_error(y_test, y_pred)
r2[i] = self.r2_score(y_test, y_pred)
# Reset temporary arrays
self.X_temp = self.X
self.y_temp = self.y
# Evaluate mean
MSE = np.mean(mse)
R2 = np.mean(r2)
return MSE, R2
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
406,
28372,
11,
20614,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
5046,
198,
6738,
10688,
1330,
4314,
628,
198,
4871,
3310,
2234,
25,
198,
220,
220,
... | 2.048098 | 3,181 |
import pytest
from py_ecc.utils import prime_field_inv
@pytest.mark.parametrize(
'a,n,result',
[
(0, 7, 0),
(7, 7, 0),
(2, 7, 4),
(10, 7, 5),
]
)
| [
11748,
12972,
9288,
198,
198,
6738,
12972,
62,
68,
535,
13,
26791,
1330,
6994,
62,
3245,
62,
16340,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
705,
64,
11,
77,
11,
20274,
3256,
198,
220,
... | 1.678261 | 115 |
import googlemaps, requests, scalg, json
from geopy.distance import geodesic
from datetime import datetime
from .utils import find_pollution_coords
| [
11748,
23645,
31803,
11,
7007,
11,
629,
14016,
11,
33918,
198,
6738,
4903,
11081,
13,
30246,
1330,
4903,
4147,
291,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
764,
26791,
1330,
1064,
62,
30393,
1009,
62,
1073,
3669,
198
] | 3.634146 | 41 |
'''
将一本小说内容插入到MYSQL中
'''
import time
import pymysql
from selenium import webdriver
from pyquery import PyQuery as pq
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
#url = 'https://book.tianya.cn/chapter-83237-9175309'
#https://book.tianya.cn/chapter-89223 小说页面
# url = 'https://book.tianya.cn/chapter-89223'
browser = webdriver.Chrome(executable_path='D:\chromedriver_win32\chromedriver.exe')
db = pymysql.connect(host='localhost', user='root', password='test123456', port=3306, db='spiders')
cursor = db.cursor()
sql = "INSERT INTO charpter(charpter_id,charpter_name,novel_id) values(%s,%s,%s)"
#点击目录链接
#将数据插入到数据库
if __name__ == '__main__':
#组成url
ids = get_100_novel_id()
urls = make_novel_url(ids)
count = 0
for url in urls:
count += 1
html = click_href(url)
new_datas = get_charpter_info_and_save(html)
save_to_MYSQL(new_datas)
print('第%s部小说章节信息保存完成'%count) | [
7061,
6,
198,
49546,
31660,
17312,
105,
22887,
237,
46237,
112,
37863,
227,
22522,
117,
162,
237,
240,
17739,
98,
26344,
108,
44,
16309,
9711,
40792,
198,
7061,
6,
198,
11748,
640,
198,
11748,
279,
4948,
893,
13976,
198,
6738,
384,
11... | 2.021236 | 518 |
from .matcher import GenericMatcher
| [
6738,
764,
6759,
2044,
1330,
42044,
19044,
2044,
198
] | 4 | 9 |
from .model import ImageWidth, ImageHeight, Camera, ImageList, Record, RecordList
from .view import OpenDialog, ErrorDialog, MainWindow
import datetime
from functools import reduce
import PIL.ImageQt
from PySide2 import QtCore, QtGui, QtWidgets
ImageViewWidth = 800
ImageViewHeight = 600
| [
6738,
764,
19849,
1330,
7412,
30916,
11,
7412,
23106,
11,
20432,
11,
7412,
8053,
11,
13266,
11,
13266,
8053,
198,
6738,
764,
1177,
1330,
4946,
44204,
11,
13047,
44204,
11,
8774,
27703,
198,
11748,
4818,
8079,
198,
6738,
1257,
310,
10141... | 3.511905 | 84 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from functools import update_wrapper
from ssl import PROTOCOL_SSLv23, SSLContext
import flask
from flask import Response
from mo_dots import coalesce, is_data
from mo_files import File, TempFile, URL, mimetype
from mo_future import decorate, text
from mo_json import value2json
from mo_logs import Log
from mo_threads.threads import register_thread, Thread
from pyLibrary.env import git
from pyLibrary.env.big_data import ibytes2icompressed
TOO_SMALL_TO_COMPRESS = 510 # DO NOT COMPRESS DATA WITH LESS THAN THIS NUMBER OF BYTES
def cors_wrapper(func):
"""
Decorator for CORS
:param func: Flask method that handles requests and returns a response
:return: Same, but with permissive CORS headers set
"""
@decorate(func)
output.provide_automatic_options = False
return update_wrapper(output, func)
def dockerflow(flask_app, backend_check):
"""
ADD ROUTING TO HANDLE DOCKERFLOW APP REQUIREMENTS
(see https://github.com/mozilla-services/Dockerflow#containerized-app-requirements)
:param flask_app: THE (Flask) APP
:param backend_check: METHOD THAT WILL CHECK THE BACKEND IS WORKING AND RAISE AN EXCEPTION IF NOT
:return:
"""
global VERSION_JSON
try:
VERSION_JSON = File("version.json").read_bytes()
@cors_wrapper
@cors_wrapper
@cors_wrapper
flask_app.add_url_rule(
str("/__version__"),
None,
version,
defaults={},
methods=[str("GET"), str("POST")],
)
flask_app.add_url_rule(
str("/__heartbeat__"),
None,
heartbeat,
defaults={},
methods=[str("GET"), str("POST")],
)
flask_app.add_url_rule(
str("/__lbheartbeat__"),
None,
lbheartbeat,
defaults={},
methods=[str("GET"), str("POST")],
)
except Exception as e:
Log.error("Problem setting up listeners for dockerflow", cause=e)
VERSION_JSON = None
def add_version(flask_app):
"""
ADD ROUTING TO HANDLE REQUEST FOR /__version__
:param flask_app: THE (Flask) APP
:return:
"""
try:
rev = coalesce(git.get_revision(), "")
branch = "https://github.com/mozilla/ActiveData/tree/" + coalesce(git.get_branch())
version_info = value2json(
{
"source": "https://github.com/mozilla/ActiveData/tree/" + rev,
"branch": branch,
"commit": rev,
},
pretty=True,
).encode('utf8') + text("\n")
Log.note("Using github version\n{{version}}", version=version_info)
@register_thread
@cors_wrapper
flask_app.add_url_rule(
str("/__version__"),
None,
version,
defaults={},
methods=[str("GET"), str("POST")],
)
except Exception as e:
Log.error("Problem setting up listeners for dockerflow", cause=e)
def setup_flask_ssl(flask_app, flask_config):
"""
SPAWN A NEW THREAD TO RUN AN SSL ENDPOINT
REMOVES ssl_context FROM flask_config BEFORE RETURNING
:param flask_app:
:param flask_config:
:return:
"""
if not flask_config.ssl_context:
return
ssl_flask = flask_config.copy()
ssl_flask.debug = False
ssl_flask.port = 443
if is_data(flask_config.ssl_context):
# EXPECTED PEM ENCODED FILE NAMES
# `load_cert_chain` REQUIRES CONCATENATED LIST OF CERTS
with TempFile() as tempfile:
try:
tempfile.write(
File(ssl_flask.ssl_context.certificate_file).read_bytes()
)
if ssl_flask.ssl_context.certificate_chain_file:
tempfile.write(
File(ssl_flask.ssl_context.certificate_chain_file).read_bytes()
)
tempfile.flush()
tempfile.close()
context = SSLContext(PROTOCOL_SSLv23)
context.load_cert_chain(
tempfile.name,
keyfile=File(ssl_flask.ssl_context.privatekey_file).abspath,
)
ssl_flask.ssl_context = context
except Exception as e:
Log.error("Could not handle ssl context construction", cause=e)
Thread.run("SSL Server", runner)
if flask_config.ssl_context and flask_config.port != 80:
Log.warning(
"ActiveData has SSL context, but is still listening on non-encrypted http port {{port}}",
port=flask_config.port,
)
flask_config.ssl_context = None
@register_thread
@cors_wrapper
def options(*args, **kwargs):
"""
USE THIS FOR THE OPTIONS AND HEAD REQUEST TYPES
"""
return Response("", status=200)
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
42... | 2.214862 | 2,355 |
import sys
import re
from collections import OrderedDict
import cipherCheck
import playfair_cipher
import vigenere_cipher
# start of main()
cipherList = ['PLF', 'RTS', 'RFC', 'VIG', 'CES']
# Remember that the first argument of the script is the file name, thus start at index 1
cipherName = str(sys.argv[1])
key = str(sys.argv[2])
encDec = str(sys.argv[3])
inputFile = str(sys.argv[4])
outputFile = str(sys.argv[5])
argumentList = [cipherName, key, encDec, inputFile, outputFile]
cipherCheck.checkCipherName(cipherName, cipherList)
cipherCheck.checkEncDec(encDec)
cipherCheck.checkFileExtension(inputFile, outputFile)
cipherCheck.cipherInterface(cipherName, cipherList, key, encDec, inputFile, outputFile)
| [
11748,
25064,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
38012,
9787,
198,
11748,
711,
22043,
62,
66,
10803,
198,
11748,
410,
9324,
567,
62,
66,
10803,
198,
198,
2,
923,
286,
1388,
3419,
198,
66,
10803,... | 2.921811 | 243 |
# Generated by Django 2.2 on 2019-04-04 16:21
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
13130,
12,
3023,
12,
3023,
1467,
25,
2481,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
# Generated by Django 2.2.10 on 2020-07-21 17:16
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
940,
319,
12131,
12,
2998,
12,
2481,
1596,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
from flask import Flask, render_template, request, make_response, send_file, redirect
import os
import secrets
import json
import requests
import re
import difflib
from flask_mysqldb import MySQL
from difflib import get_close_matches
from dotenv import load_dotenv
from googleapiclient.http import MediaFileUpload
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from httplib2 import Http
app = Flask(__name__)
app.config["MYSQL_HOST"] = os.environ.get('MYSQL_HOST')
app.config["MYSQL_USER"] = os.environ.get('MYSQL_USER')
app.config["MYSQL_PASSWORD"] = os.environ.get('MYSQL_PASSWORD')
app.config["MYSQL_DB"] = os.environ.get('MYSQL_DB')
load_dotenv('.env')
mysql = MySQL(app)
# sanitize user input. i hope this helps from SQL injection attacks
default_user_id = ""
credentials = None
service = None
# login to google drive and get drive service object which i will use later to upload and get urls
@app.route("/", methods=['GET'])
@app.route("/search")
@app.route("/course/<id>", methods=['GET', 'POST'])
@app.route("/subject/<id>", methods=['GET'])
FILE_EXTENSION = ["PDF"]
@app.route("/add_data", methods=["GET", "POST"])
@ app.route("/feedback", methods=['GET', 'POST'])
@ app.route("/question-paper/<id>")
@ app.route("/subject_detail/<id>")
@ app.route("/dashboard", methods=["GET", "POST"])
@ app.route("/login", methods=['GET', 'POST'])
@ app.route("/logout")
@ app.errorhandler(404)
@ app.errorhandler(500)
if __name__ == "__main__":
credentials = getDriveCredentials()
service = getDriveService(credentials)
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
787,
62,
26209,
11,
3758,
62,
7753,
11,
18941,
198,
11748,
28686,
198,
11748,
13141,
198,
11748,
33918,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
814,
8019,
198,
6738,
... | 2.811545 | 589 |
import numpy as np
import warnings
__all__ = ['WavefieldVectorBase']
if __name__ == '__main__':
m = Mesh()
W = WavefieldVectorBase(m)
MW = MyWFV(m)
W._data += np.arange(5).reshape(W._data.shape)
MW._data += np.arange(MW._data.shape[0]).reshape(MW._data.shape) | [
11748,
299,
32152,
355,
45941,
198,
11748,
14601,
628,
198,
834,
439,
834,
796,
37250,
39709,
3245,
38469,
14881,
20520,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
285,
796,
47529,
3419,... | 2.38843 | 121 |
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple
from connection import PrometheusClient
from model import Task, Node
from metrics import Metric, MetricsQueries, Function, FunctionsDescription
class AnalyzerQueries:
"""Class used for namespace"""
def query_performance_metrics(self, time: int, functions_args: List[Tuple[Function, str]],
metrics: List[Metric], window_length: int) -> Dict[Metric, Dict]:
"""performance metrics which needs aggregation over time"""
query_results: Dict[Metric, Dict] = {}
for metric in metrics:
for function, arguments in functions_args:
query_template = "{function}({arguments}{prom_metric}[{window_length}s])"
query = query_template.format(function=function.value,
arguments=arguments,
window_length=window_length,
prom_metric=MetricsQueries[metric])
query_result = self.prometheus_client.instant_query(query, time)
aggregation_name = build_function_call_id(function, arguments)
if metric in query_results:
query_results[metric][aggregation_name] = query_result
else:
query_results[metric] = {aggregation_name: query_result}
return query_results
| [
2,
15069,
357,
66,
8,
12131,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 2.50995 | 804 |
from django.shortcuts import render
from django.http import HttpResponse
from .models import Cup
# Create your views here.
#
# TEST FUNCTION
# FUNCTION FOR HELLO, NAME
# FUNCTION FOR NUMBER TIMES 2
# FUNCTION TO ADD NUMBERS TO SUM
# FUNCTION TO CREATE NEW CUP OBJECT
# FILTERS FOR MANUFACTURE DATE
# CHANGE MATERIAL THROUGH MANUFACTURE DATE
# FUNCTION FOR CUP INDEX TO PRINT
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
764,
27530,
1330,
5454,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
2,
198,
2,
43001,
29397,
4177,
2849,
198,
19... | 2.866667 | 135 |
# Works run by the daemon (using submit)
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
from aiida.orm import CalculationFactory, DataFactory
from aiida.work.run import run, submit, async
from aiida.orm.data.structure import StructureData
from aiida.orm.data.base import Str, Float, Bool
VaspCalculation = CalculationFactory('vasp.vasp')
PwCalculation = CalculationFactory('quantumespresso.pw')
PhonopyCalculation = CalculationFactory('phonopy')
KpointsData = DataFactory("array.kpoints")
ParameterData = DataFactory('parameter')
# Define structure
import numpy as np
cell = [[ 3.1900000572, 0, 0],
[-1.5950000286, 2.762621076, 0],
[ 0.0, 0, 5.1890001297]]
structure = StructureData(cell=cell)
scaled_positions=[(0.6666669, 0.3333334, 0.0000000),
(0.3333331, 0.6666663, 0.5000000),
(0.6666669, 0.3333334, 0.3750000),
(0.3333331, 0.6666663, 0.8750000)]
symbols=['Ga', 'Ga', 'N', 'N']
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
# PHONOPY settings
ph_settings = ParameterData(dict={'supercell': [[2,0,0],
[0,2,0],
[0,0,2]],
'primitive': [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
# 'code': 'phonopy@stern_outside' # comment to use local phonopy
})
# VASP SPECIFIC
if False: # Set TRUE to use VASP or FALSE to use Quantum Espresso
incar_dict = {
# 'PREC' : 'Accurate',
'EDIFF' : 1e-08,
'NELMIN' : 5,
'NELM' : 100,
'ENCUT' : 400,
'ALGO' : 38,
'ISMEAR' : 0,
'SIGMA' : 0.01,
'GGA' : 'PS'
}
es_settings = ParameterData(dict=incar_dict)
from pymatgen.io import vasp as vaspio
#kpoints
#kpoints_pg = vaspio.Kpoints.monkhorst_automatic(
# kpts=[2, 2, 2],
# shift=[0.0, 0.0, 0.0])
#kpoints = ParameterData(dict=kpoints_pg.as_dict())
potcar = vaspio.Potcar(symbols=['Ga', 'N'],
functional='PBE')
settings_dict = {'code': 'vasp541mpi@boston',
'parameters': incar_dict,
'kpoints_per_atom': 1000, # k-point density
'pseudos': potcar.as_dict()}
# pseudos = ParameterData(dict=potcar.as_dict())
es_settings = ParameterData(dict=settings_dict)
# QE SPECIFIC
if False:
parameters_dict = {
'CONTROL': {'calculation': 'scf',
'tstress': True, # Important that this stays to get stress
'tprnfor': True,},
'SYSTEM': {'ecutwfc': 30.,
'ecutrho': 200.,},
'ELECTRONS': {'conv_thr': 1.e-6,}
}
# Kpoints
#kpoints_mesh = 2
#kpoints = KpointsData()
#kpoints.set_kpoints_mesh([kpoints_mesh, kpoints_mesh, kpoints_mesh])
#code = Code.get_from_string('pw@stern_outside')
pseudos = Str('pbe_ps')
settings_dict = {'code': 'pw@stern_outside',
'parameters': parameters_dict,
'kpoints_per_atom': 1000, # k-point density
'pseudos_family': 'pbe_ps'}
es_settings = ParameterData(dict=settings_dict)
# LAMMPS SPECIFIC
if True:
# GaN Tersoff
tersoff_gan = {
'Ga Ga Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 1.0 1.44970 410.132 2.87 0.15 1.60916 535.199',
'N N N': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 1.0 2.38426 423.769 2.20 0.20 3.55779 1044.77',
'Ga Ga N': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N N': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga N ': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 0.0 0.00000 0.00000 2.20 0.20 0.00000 0.00000',
'N N Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 0.0 0.00000 0.00000 2.87 0.15 0.00000 0.00000'}
# Silicon(C) Tersoff
# tersoff_si = {'Si Si Si ': '3.0 1.0 1.7322 1.0039e5 16.218 -0.59826 0.78734 1.0999e-6 1.7322 471.18 2.85 0.15 2.4799 1830.8'}
potential = {'pair_style': 'tersoff',
'data': tersoff_gan}
parameters = {'relaxation': 'tri', # iso/aniso/tri
'pressure': 0.0, # kbars
'vmax': 0.000001, # Angstrom^3
'energy_tolerance': 1.0e-25, # eV
'force_tolerance': 1.0e-25, # eV angstrom
'max_evaluations': 1000000,
'max_iterations': 500000}
settings_dict = {'code_forces': 'lammps_force@stern',
'code_optimize': 'lammps_optimize@stern',
'parameters': parameters,
'potential': potential}
es_settings = ParameterData(dict=settings_dict)
# CODE INDEPENDENT
machine_dict = {'resources': {'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16},
'max_wallclock_seconds': 30 * 60,
}
machine = ParameterData(dict=machine_dict)
from aiida.workflows.wc_phonon import FrozenPhonon
future = submit(FrozenPhonon,
structure=structure,
machine=machine,
es_settings=es_settings,
ph_settings=ph_settings,
# Optional settings
pressure=Float(10),
optimize=Bool(0)
)
print('Running workchain with pk={}'.format(future.pid)) | [
198,
2,
10933,
1057,
416,
262,
33386,
357,
3500,
9199,
8,
198,
198,
6738,
257,
72,
3755,
1330,
3440,
62,
9945,
24330,
11,
318,
62,
9945,
24330,
62,
14578,
198,
361,
407,
318,
62,
9945,
24330,
62,
14578,
33529,
198,
220,
220,
220,
... | 1.814562 | 3,516 |
from utils.mongodb_access import get_references_annotations, get_references_count
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import DBSCAN
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
from itertools import cycle, islice
from scipy.cluster import hierarchy
import seaborn as sns
from sklearn.metrics.pairwise import cosine_similarity
sns.set(color_codes=True)
#print(get_clusters_range('publications', 'zika', 'localhost', 27017, 2010, 1, 2018, 1))
# print('Getting terms')
# references, index = get_references_annotations('publications', 'zika', 'localhost', 27017, 2010, 1, 2014, 12)
# # most_cited = get_references_count('publications', 'zika', 'localhost', 27017, 2010, 1, 2014, 12)
# # to_cluster = []
# # for i, pmid in enumerate(index):
# # if pmid in most_cited:
# # to_cluster.append(references[i])
# t0 = time()
# print('vectorizing')
# vectorizer = TfidfVectorizer(use_idf=True)
# X = vectorizer.fit_transform(references)
# print(vectorizer.get_feature_names())
# print("n_samples: %d, n_features: %d" % X.shape)
# #############################################################################
# #Compute DBSCAN
# db = DBSCAN(metric='cosine', eps=0.5).fit(X)
# core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# core_samples_mask[db.core_sample_indices_] = True
# labels = db.labels_
#
# # Number of clusters in labels, ignoring noise if present.
# n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# unique_labels = set(labels)
# colors = [plt.cm.Spectral(each)
# for each in np.linspace(0, 1, len(unique_labels))]
# y_pred = labels.astype(np.int)
# print(len(unique_labels))
# grouped_ids = dict()
# for i, label in enumerate(labels):
# if label == -1:
# continue
# if label not in grouped_ids:
# grouped_ids[label] = []
# grouped_ids[label].append(index[i])
# print(grouped_ids)
# similarities = cosine_similarity(X.todense())
# print(X.todense())
# clusters = hierarchy.linkage(X.todense(), metric="cosine")
# g = sns.clustermap(similarities, row_linkage=clusters, col_linkage=clusters, xticklabels=False, yticklabels=False)
# #ax = sns.heatmap(similarities)
# plt.show()
# X = X.todense()
# threshold = 0.5
# Z = hierarchy.linkage(X, "average", metric="cosine")
# C = hierarchy.fcluster(Z, threshold, criterion="distance")
# dn = hierarchy.dendrogram(Z, labels=index)
# plt.figure()
# plt.show()
# print(len(set(C)))
# grouped_ids = dict()
# for i, label in enumerate(C):
# if label not in grouped_ids:
# grouped_ids[label] = []
# grouped_ids[label].append(index[i])
# print(grouped_ids)
| [
6738,
3384,
4487,
13,
31059,
375,
65,
62,
15526,
1330,
651,
62,
5420,
4972,
62,
34574,
602,
11,
651,
62,
5420,
4972,
62,
9127,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
... | 2.683817 | 1,069 |
# criando um jogo de adivinha
from random import randint
from time import sleep
# fazendo o computador sortear um numero de 0 ate 5
cpu = randint(0, 5)
print('<< TENTE ADIVINHAR >>')
# pedindo qual numero o jogador vai jogar
jogador = int(input('Jogador tente adivinhar o numero entre (0, 5): '))
# analisando se o jogador acertou ou nao o numero
if jogador == cpu:
print('ACERTOU')
sleep(0.5)
print(f'Computador jogou {cpu} e o Jogador jogou {jogador}')
else:
print('ERROU')
sleep(0.5)
print(f'Computador jogou {cpu} e o Jogador jogou {jogador}') | [
2,
269,
380,
25440,
23781,
474,
24076,
390,
512,
452,
259,
3099,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
198,
2,
277,
1031,
31110,
267,
2653,
7079,
3297,
451,
23781,
997,
3529,
390,
657,
15063,
642,
198,
36166,
... | 2.419492 | 236 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from atados_core.models import Nonprofit, Project
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
6738,
379,
22484,
62,
7295,
13,
27530,
1330,
8504,
9183,
11,
4935,
628
] | 3.27907 | 43 |
__version__ = '0.9.0.dev1'
| [
834,
9641,
834,
796,
705,
15,
13,
24,
13,
15,
13,
7959,
16,
6,
198
] | 1.8 | 15 |
#!/usr/bin/env python
import sys
sys.path.append('../src')
from random import random
from threading import RLock
from time import sleep
from threadingex.threadpoolexecutor import ThreadPoolExecutor
lock = RLock()
result = {}
executor = ThreadPoolExecutor(10)
for i in range(100):
executor.submit(task, i)
executor.start()
executor.shutdown(False)
print executor._state
print result
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
10677,
11537,
198,
198,
6738,
4738,
1330,
4738,
198,
6738,
4704,
278,
1330,
371,
25392,
198,
6738,
640,
1330,
3993,
198,... | 3.038462 | 130 |
import setuptools
from distutils.core import setup
setup(
name = 'PqrUploadModule', # How you named your package folder (MyLib)
packages = ['PqrUploadModule'], # Chose the same as "name"
version = '0.0.12', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Connection AT-TD', # Give a short description about your library
author = 'ppp', # Type in your name
author_email = 'petronije2002@gmail.com', # Type in your E-Mail
url = 'https://github.com/petronije2002/PqrUpload/', # Provide either the link to your github or to your website
download_url = 'https://github.com/petronije2002/PqrUpload/archive/0.0.10.tar.gz', # I explain this later on
keywords = ['Autotask', 'Topdesk', 'Connection'], # Keywords that define your package best
install_requires = [ appnope==0.1.0,
atws==0.5.3,
azure-functions==1.0.4,
backcall==0.1.0,
bleach==3.1.0,
cached-property==1.5.1,
certifi==2019.6.16,
chardet==3.0.4,
decorator==4.4.0,
docutils==0.15.2,
future==0.17.1,
idna==2.8,
imageio==2.5.0,
ipykernel==5.1.2,
ipython==7.8.0,
ipython-genutils==0.2.0,
jedi==0.15.1,
jupyter-client==5.3.1,
jupyter-core==4.5.0,
numpy==1.17.2,
pandas==0.25.1,
parso==0.5.1,
pexpect==4.7.0,
pickleshare==0.7.5,
Pillow==6.1.0,
pkginfo==1.5.0.1,
pqrupload==0.0.1,
prompt-toolkit==2.0.9,
ptyprocess==0.6.0,
Pygments==2.4.2,
python-dateutil==2.8.0,
pytz==2019.2,
pyzmq==18.1.0,
readme-renderer==24.0,
requests==2.22.0,
requests-toolbelt==0.9.1,
six==1.12.0,
suds-jurko==0.6,
tornado==6.0.3,
tqdm==4.36.1,
traitlets==4.3.2,
twine==2.0.0,
urllib3==1.25.3,
wcwidth==0.1.7,
webencodings==0.5.1,
xmltodict==0.12.0] # I get to this in a second
# 'validators',
# 'beautifulsoup4',
# ],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | [
11748,
900,
37623,
10141,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
1438,
796,
705,
47,
80,
81,
41592,
26796,
3256,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
1374,
345,
3706,
534,
5301,
9483,
35... | 2.292237 | 1,095 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train
This sample code is applicable to GPU and Ascend.
"""
import argparse
import os
import mindspore.nn as nn
from mindspore import Model, context
from mindspore.nn import TrainOneStepCell, Accuracy
from mindspore.boost import GradientAccumulation
import mindspore.ops as ops
from mindspore.train.callback import LossMonitor, TimeMonitor
from model_zoo.official.cv.lenet.src.dataset import create_dataset
from model_zoo.official.cv.lenet.src.lenet import LeNet5
class TrainGradAccumulationStepsCell(TrainOneStepCell):
"""construct train accu step cell"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='MindSpore Grad Cumulative Example')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'],
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--data_path', type=str, default="./Data",
help='path where the dataset is saved')
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
ds_train = create_dataset(os.path.join(args.data_path, "train"), 32)
net = LeNet5(10)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
train_net = nn.WithLossCell(net, net_loss)
train_net = TrainGradAccumulationStepsCell(train_net, net_opt, 1.0, 5)
model = Model(train_net)
print("============== Starting Training ==============")
model.train(10, ds_train, callbacks=[time_cb, LossMonitor()])
print("============== Starting Testing ==============")
model = Model(net, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
ds_eval = create_dataset(os.path.join(args.data_path, "test"), 32, 1)
if ds_eval.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
acc = model.eval(ds_eval)
print("============== {} ==============".format(acc))
| [
2,
15069,
33448,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 3.03587 | 920 |
import json
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Iterator
| [
11748,
33918,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
11,
28805,
12514,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
40806,
1352,
628,
198
] | 4.071429 | 28 |
import pytest
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
198
] | 2.307692 | 26 |
import sys
from api.analysis import translators
from api.paths import PROJECT_ROOT
# allow for imports of translator code as "translator.module"
sys.path.append(str(PROJECT_ROOT / "translators"))
# allow for imports of adapter code as "adapter.translator.module"
sys.path.append(str(PROJECT_ROOT / "nlidbTranslator/api"))
# preserve functioning of all original imports in the translators (relative to their root)
for translator in translators:
path_to_translator = str(PROJECT_ROOT / "translators" / translator)
if path_to_translator not in sys.path:
sys.path.append(path_to_translator)
| [
11748,
25064,
198,
198,
6738,
40391,
13,
20930,
1330,
4779,
2024,
198,
6738,
40391,
13,
6978,
82,
1330,
21965,
23680,
62,
13252,
2394,
628,
198,
2,
1249,
329,
17944,
286,
33417,
2438,
355,
366,
7645,
41880,
13,
21412,
1,
198,
17597,
1... | 3.14433 | 194 |
import os
import pathlib
from invoke import (
task,
)
ROOT_PATH = pathlib.Path(__file__).resolve().parent
ROOT_PATH_STR = str(ROOT_PATH)
@task()
def clean(context):
"""
Clean the project
:param context: invoke context
"""
with context.cd(ROOT_PATH_STR):
context.run('git clean -fd')
context.run('git clean -fdX')
@task()
def pytest(context, pty=True):
"""
Run unit tests
:param context: invoke context
:param pty: True to run in a terminal, pass --no-pty to disable
"""
with context.cd(ROOT_PATH_STR):
# HACK: Poke the environment within invoke to make 'invoke pytest' work
# as expected without having to rely on pip or a VE. The
# sys.path.append() and site.addsitedir() suggested workarounds do not
# accomplish this. Pythonista purists will understandably balk at
# this, and I can sympathize, however this is a quick way for me to get
# things moving forward.
# TODO: Organize the project properly so this is no longer necessary.
os.environ["PYTHONPATH"] = ROOT_PATH_STR
context.run('pytest', pty=pty)
@task()
def flake8(context):
"""
Run the flake8 format checker
:param context: invoke context
"""
with context.cd(ROOT_PATH_STR):
context.run('flake8')
@task()
def mypy(context):
"""
Run mypy static code analysis
:param context: invoke context
"""
with context.cd(ROOT_PATH_STR):
context.run('mypy --config-file setup.cfg ./src')
@task(pre=[pytest, flake8, mypy])
def all(context):
"""
Run all tasks other than clean
"""
pass
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
6738,
26342,
1330,
357,
198,
220,
220,
220,
4876,
11,
198,
8,
198,
198,
13252,
2394,
62,
34219,
796,
3108,
8019,
13,
15235,
7,
834,
7753,
834,
737,
411,
6442,
22446,
8000,
198,
13252,
2394,
... | 2.580093 | 643 |
"""Utility functions for manipulating image data.
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: July 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage.io import imread
def load_image_array(image_path):
"""Read image from file to ndarray."""
return np.asarray(imread(image_path))
# TODO(rpeloff) old code, remove if not using?
# def resize_square_crop(image_arr, size=(224, 224), resample=Image.LANCZOS):
# h, w, _ = image_arr.shape
# short_edge = min(w, h)
# h_shift = int((h - short_edge) / 2)
# w_shift = int((w - short_edge) / 2)
# image_resize = Image.fromarray(image_arr).resize(
# size, box=(w_shift, h_shift, w - w_shift, h - h_shift), resample=resample)
# return np.asarray(image_resize)
| [
37811,
18274,
879,
5499,
329,
29349,
2939,
1366,
13,
198,
198,
13838,
25,
6047,
2574,
2364,
198,
17829,
25,
374,
4121,
13,
79,
2357,
13,
417,
2364,
31,
14816,
13,
785,
198,
10430,
25,
2901,
13130,
198,
37811,
628,
198,
6738,
11593,
... | 2.617021 | 329 |
"""
@author Claas Voelcker
Utils module containing some data utility functions
"""
import numpy as np
from src.util.spn_util import get_categoricals
def get_categorical_data(spn, df, dictionary, header=1, types=False, date=False, assert_nan=False):
"""
:param spn:
:param df:
:param dictionary:
:param header:
:param types:
:param date:
:return:
"""
context = dictionary['context']
categoricals = get_categoricals(spn, context)
df_numerical = df.copy(deep=True)
for i in categoricals:
if df_numerical.iloc[:, i].isnull().values.any():
non_nan = np.where(~np.isnan(df_numerical.iloc[:, i]))
else:
non_nan = np.arange(df_numerical.iloc[:, i].size)
transformed = dictionary['features'][i]['encoder'].transform(
df_numerical.values[non_nan, i].squeeze())
df_numerical.iloc[non_nan, i] = transformed
numerical_data = df_numerical.values.astype(float)
categorical_data = {}
for i in categoricals:
non_nan = np.where(~np.isnan(df_numerical.iloc[:, i]))
data = df_numerical.iloc[non_nan].groupby(context.feature_names[i])
data = [data.get_group(x).values.astype(float) for x in data.groups]
categorical_data[i] = data
return numerical_data, categorical_data
def bin_gradient_data(data, gradients, bins):
"""
Computes a histogram of normalized gradient data
:param data: the underlying data
:param gradients: the gradients
:param bins: number of bins
:return: a histogram object
"""
bin_borders = np.linspace(-1, 1, num=bins+1)
query_list = [np.where((gradients >= bin_borders[i]) & (gradients < bin_borders[i+1])) for i in range(len(bin_borders) - 1)]
binned_data = []
for query in query_list:
binned_data.append(data[query[0],:])
return binned_data
| [
37811,
198,
31,
9800,
27166,
292,
20687,
417,
15280,
198,
198,
18274,
4487,
8265,
7268,
617,
1366,
10361,
5499,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
12351,
13,
22602,
13,
2777,
77,
62,
22602,
1330,
651,
62,
... | 2.385006 | 787 |
#!/usr/bin/env python3
"""Check the status of the Hades database.
Try to select data as the different hades users from the database to check if
the database is running and accessible.
"""
import contextlib
import logging
import os
import pwd
import sys
from typing import Iterable
from sqlalchemy import Table, exists, null, select
from sqlalchemy.engine import Engine
from sqlalchemy.exc import DBAPIError
from sqlalchemy.pool import NullPool
from hades import constants
from hades.common import db
from hades.common.cli import (
ArgumentParser, parser as common_parser, setup_cli_logging,
)
from hades.common.privileges import dropped_privileges
from hades.config.base import ConfigError
from hades.config.loader import load_config, print_config_error
logger = logging.getLogger('hades.bin.check_database')
def check_database(
engine: Engine,
user: pwd.struct_passwd,
tables: Iterable[Table],
):
"""Check a set of tables as a user.
:param engine: The SQLAlchemy engine
:param user: The user to switch to
:param tables: The tables to check
:raises DBAPIError: if errors occur.
"""
logger.info("Checking database access as user %s", user.pw_name)
try:
conn = engine.connect()
except DBAPIError as e:
logger.critical("Could not connect to database as %s: %s",
user.pw_name, exc_info=e)
raise
with contextlib.closing(conn):
for table in tables:
try:
check_table(conn, table)
except DBAPIError as e:
logger.critical("Query check for table %s as user %s failed: "
"%s", table.name, user.pw_name, exc_info=e)
raise
def check_table(conn, table):
"""Perform :sql:`SELECT NULL` on a given table."""
conn.execute(select([exists(select([null()]).select_from(table))])).scalar()
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
9787,
262,
3722,
286,
262,
36857,
6831,
13,
198,
198,
23433,
284,
2922,
1366,
355,
262,
1180,
550,
274,
2985,
422,
262,
6831,
284,
2198,
611,
198,
1169,
6831,
318,
2491,
2... | 2.630522 | 747 |
import itertools
import gpuscheduler
import argparse
import os
from itertools import product
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
#s = gpuscheduler.Scheduler('/home/tim/data/git/sched/config/')
#log_base = '/home/tim/logs/'
s = gpuscheduler.HyakScheduler('/gscratch/scrubbed/dettmers/git/sched/config/', verbose=args.verbose)
log_base = '/usr/lusers/dettmers/logs/'
s.update_host_config('home', mem_threshold=1700, util_threshold=30)
s.update_host_config('office', mem_threshold=1700, util_threshold=25)
#s.update_host_config('ari', mem_threshold=2500, util_threshold=25)
cmd = 'OMP_NUM_THREADS=1 python train.py --cuda --data ../data/wikitext-2/ --dataset wt103 --adaptive --n_layer 12 --dropatt 0.0 --optim adam --tgt_len 150 --mem_len 150 --eval_tgt_len 150 --fp16 --dynamic-loss-scale --eval-interval 100 --work_dir=LM-TFM-wt103/ITER/ --log-interval 10'
args2 = {}
args2['conv'] = ''
#args2['dim2'] = ''
#args2['shape2'] = 2
args2['kernel-size'] = 3
#args2['downsample-identity'] = ''
args2['d_emb'] = 400
args2['d_model'] = 400
#args2['n_head'] = 10
args2['d_head'] = 40
args2['d_inner'] = 2000
args2['dropout'] = 0.1
args2['batch_chunk'] = 2
#args2['batch_size'] = 32
#args2['use-batchnorm'] = ''
#args2['lr'] = 0.0006
#args2['max_step'] = 3000
#args2['warmup_step'] = 100
logfolder = 'convtransformers/{0}/'.format('kernelsize3_1d_grid6')
time_hours = 1
cores_per_job = 2
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args3 = {}
#args3['d_model'] = [400]
#args3['n_head'] = [5]
#args3['d_head'] = [40]
args3['lr'] = [0.0006]#, 0.000075, 0.0005]
args3['max_step'] = [10, 20]
args3['warmup_step'] = [200, 400, 600]
args3['d_model'] = [400]
args3['d_head'] = [10]
args3['dropout'] = [0.2]
args3['batch_size'] = [32]
#args3[''] = ['use-batchnorm', '']
#args3['kernel-size'] = [3]
args_prod = []
for key, values in args3.items():
if len(key) == 0:
keyvalues = [' --{0}'.format(v) if len(v) > 0 else '{0}'.format(v) for v in values]
else:
keyvalues = [' --{0} {1}'.format(key, v) for v in values]
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
num_seeds = 2
seed_offset = 0
jobs = []
for seed in range(num_seeds):
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
fp16 = False
job_cmd = cmd.replace('ITER', str(i)) + ' --seed {0}'.format(seed)
for val in values:
job_cmd += ' {0}' .format(val)
jobs.append(job_cmd)
s.add_job(logfolder, 'convtransformer/pytorch/', job_cmd, time_hours, fp16, cores=cores_per_job)
host2cmd = {}
host2cmd['ofir3'] = ' --max-threads 4'
cmds = []
#cmds = ['git stash', 'git checkout 9bf460346ae133d5632066c4364e7d70437a1559'] # O1
#cmds = ['git stash', 'git checkout 85e6f84d7f5c2e92752f87994e1a71ffca4973d9'] # O2
#cmds = ['git stash', 'git checkout 24f59a80352c512106d0f3134fcf71b49ed6065e'] # O2 no float loss
#cmds = ['git stash', 'git checkout master', 'git pull']
#cmds = ['cd $GIT_HOME', 'git clone git@github.com:TimDettmers/convtransformer.git', 'cd convtransformer', 'git checkout max_out_sim', 'bash getdata.sh', 'cd pytorch']
cmds = cmds + ['git stash', 'git checkout master', 'git pull', 'git checkout conv_replication', 'git pull']
remap = {}
remap[('ofir4', 0)] = 1
remap[('ofir4', 1)] = 0
remap[('ofir1', 0)] = 1
remap[('ofir2', 1)] = 0
remap[('ofir2', 0)] = 1
remap[('ofir1', 1)] = 0
remap[('shoob', 2)] = 0
remap[('shoob', 0)] = 2
if args.dry:
for job in jobs:
print(job)
print('total jobs', len(jobs))
print('Jobs will be written to: {0}'.format(logfolder))
if not args.dry:
s.run_jobs(log_base, cmds=cmds, add_fp16=True, host2cmd_adds=host2cmd, remap=remap)
| [
11748,
340,
861,
10141,
198,
11748,
27809,
385,
1740,
18173,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
7293,
1133,
4226,... | 2.29328 | 1,756 |
from yesaide import YesaideRuntimeError
class RawWorker(object):
"""Deepest worker, root of all the interactions, only ensure that
a sqlalchemy database session is available in `self._dbsession`."""
class SupervisedWorker(RawWorker):
"""Worker intended to be used inside a foreman."""
class MappingManagingWorker(SupervisedWorker):
"""Worker intended to be used inside a foreman, to take care of a
specific SQLAlchemy mapping.
"""
def _get(self, sqla_obj_id=None, sqla_obj=None, options=None, **kwargs):
"""Unified internal get for a SQLAlchemy object present in
`sqla_obj_id` or `sqla_obj`, whose type is `self._sqla_map`.
Keyword arguments:
sqla_obj_id -- id (could be a "real" integer id or an uuid)
of the requested object
sqla_obj -- SQLAlchemy object (internal use)
options -- list of SQLAchemy options to apply to the SQL
request
"""
if sqla_obj:
if not isinstance(sqla_obj, self._sqla_map):
raise ValueError("`sqla_obj` doesn't match with the " "registered type.")
return sqla_obj
elif sqla_obj_id:
query = self.base_query(**kwargs)
if self._with_id:
query = query.filter(self._sqla_map.id == sqla_obj_id)
elif self._with_uuid:
query = query.filter(self._sqla_map.uuid == sqla_obj_id)
else:
raise YesaideRuntimeError("Can't determine id field.")
if options is None:
options = []
return query.options(*options).one()
raise TypeError("No criteria provided.")
def get(self, sqla_obj_id=None, sqla_obj=None, options=None, **kwargs):
"""Unified external get for an object present in `sqla_obj_id`
or `sqla_obj`.
See also `ObjectManagingWorker._get()`.
"""
if not sqla_obj_id and not sqla_obj:
if not self._sqla_map_name:
raise TypeError("No criteria provided.")
sqla_obj_key = "{}".format(self._sqla_map_name)
if self._with_id:
sqla_obj_id_key = "{}_id".format(self._sqla_map_name)
elif self._with_uuid:
sqla_obj_id_key = "{}_uuid".format(self._sqla_map_name)
else:
raise YesaideRuntimeError("Can't determine id field.")
if sqla_obj_key in kwargs:
sqla_obj = kwargs[sqla_obj_key]
elif sqla_obj_id_key in kwargs:
sqla_obj_id = kwargs[sqla_obj_id_key]
else:
raise TypeError("No criteria provided.")
return self._get(sqla_obj_id, sqla_obj, options, **kwargs)
def base_query(self, **kwargs):
"""Base sqlalchemy query for this kind of object
Subclasses can override this method to implement custom logic
(filtering inactive objects, security features, etc).
"""
return self._dbsession.query(self._sqla_map)
def serialize(self, items, **kwargs):
"""Transform the given item into an easily serializable item.
Most of the time it transforms a sqlalchemy mapped object into a
dict with strings as keys and strings as values.
A simple implementation would be:
return {'id': item.id}
"""
raise NotImplementedError("Subclasses must implement `serialize()`.")
| [
6738,
3763,
64,
485,
1330,
3363,
64,
485,
41006,
12331,
628,
198,
4871,
16089,
12468,
263,
7,
15252,
2599,
198,
220,
220,
220,
37227,
29744,
395,
8383,
11,
6808,
286,
477,
262,
12213,
11,
691,
4155,
326,
198,
220,
220,
220,
257,
441... | 2.228316 | 1,568 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-27 12:52
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
18,
319,
2177,
12,
940,
12,
1983,
1105,
25,
4309,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.660714 | 56 |
'''
剑指 Offer 16. 数值的整数次方
实现 pow(x, n) ,即计算 x 的 n 次幂函数(即,xn)。不得使用库函数,同时不需要考虑大数问题。
'''
s = Solution()
print(s.myPow(2.0, 10) == 1024.0)
print(s.myPow(2.1, 3) == 9.261)
print(s.myPow(2.0, -2) == 0.25)
| [
7061,
6,
198,
30298,
239,
162,
234,
229,
33085,
1467,
13,
10545,
243,
108,
161,
222,
120,
21410,
46763,
112,
46763,
108,
162,
105,
94,
43095,
198,
22522,
252,
163,
236,
108,
7182,
7,
87,
11,
299,
8,
27332,
120,
234,
39355,
111,
16... | 1.069149 | 188 |
import json
from pygments import highlight
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.data import JsonLexer
import taro.client
from taro import dto, util
from taro.jobs.job import JobInfoCollection,JobInfo
from taroapp import printer
from taroapp.view import instance as view_inst
| [
11748,
33918,
198,
198,
6738,
12972,
11726,
1330,
7238,
198,
6738,
12972,
11726,
13,
18982,
1010,
13,
23705,
282,
1330,
24523,
8479,
1436,
198,
6738,
12972,
11726,
13,
2588,
364,
13,
7890,
1330,
449,
1559,
45117,
263,
198,
198,
11748,
2... | 3.606742 | 89 |
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Tuple, Union
RegisterValue = Union[int, bool]
@dataclass
__all__ = [
"RegisterType",
"ModbusReadSession",
"RegisterValueType",
]
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
360,
713,
11,
309,
29291,
11,
4479,
628,
628,
198,
38804,
11395,
796,
4479,
58,
600,
11,
20512,
60,
628,
198,
31,
1... | 2.890244 | 82 |
# Copyright (c) 2009-2016 Simon van Heeringen <simon.vanheeringen@gmail.com>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Configuration for GimmeMotifs """
import configparser
import sysconfig
import xdg
import os
import sys
import logging
import pkg_resources
import inspect
from gimmemotifs.shutils import which
logger = logging.getLogger("gimme.config")
### CONSTANTS ###
BG_TYPES = ["random", "genomic", "gc", "promoter"]
FA_VALID_BGS = ["random", "promoter", "gc", "custom", "genomic"]
BED_VALID_BGS = ["random", "genomic", "gc", "promoter", "custom"]
BG_RANK = {"custom":1, "promoter":2, "gc":3, "random":4, "genomic":5}
FASTA_EXT = [".fasta", ".fa", ".fsa"]
DIRECT_NAME = "direct"
INDIRECT_NAME = "indirect"
CACHE_DIR = os.path.join(xdg.XDG_CACHE_HOME, "gimmemotifs")
CONFIG_DIR = os.path.join(xdg.XDG_CONFIG_HOME, "gimmemotifs")
MOTIF_CLASSES = ["MDmodule", "MEME", "MEMEW", "Weeder", "GADEM", "MotifSampler", "Trawler", "Improbizer", "BioProspector", "Posmo", "ChIPMunk", "AMD", "HMS", "Homer", "XXmotif"]
class MotifConfig(object):
"""Configuration object for the gimmemotifs module."""
__shared_state = {}
prefix = sysconfig.get_config_var("prefix")
# Default config that is installed with GimmeMotifs
default_config = pkg_resources.resource_filename(
'data', 'cfg/gimmemotifs.default.cfg')
#
package_dir = os.path.dirname(
os.path.abspath(
inspect.getfile(inspect.currentframe())
))
user_config = os.path.join(CONFIG_DIR, "gimmemotifs.cfg")
config_dir = "share/gimmemotifs/gimmemotifs.cfg"
configs = [
user_config,
]
config = None
TOOL_SECTION = "tools"
def parse_denovo_params(user_params=None):
"""Return default GimmeMotifs parameters.
Defaults will be replaced with parameters defined in user_params.
Parameters
----------
user_params : dict, optional
User-defined parameters.
Returns
-------
params : dict
"""
config = MotifConfig()
if user_params is None:
user_params = {}
params = config.get_default_params()
params.update(user_params)
if params.get("torque"):
logger.debug("Using torque")
else:
logger.debug("Using multiprocessing")
params["background"] = [x.strip() for x in params["background"].split(",")]
logger.debug("Parameters:")
for param, value in params.items():
logger.debug(" %s: %s", param, value)
# Maximum time?
if params["max_time"]:
try:
max_time = params["max_time"] = float(params["max_time"])
except Exception:
logger.debug("Could not parse max_time value, setting to no limit")
params["max_time"] = -1
if params["max_time"] > 0:
logger.debug("Time limit for motif prediction: %0.2f hours", max_time)
params["max_time"] = 3600 * params["max_time"]
logger.debug("Max_time in seconds %0.0f", max_time)
else:
logger.debug("No time limit for motif prediction")
return params
#if __name__ == "__main__":
# m = MotifConfig()
# print m.is_configured("meme")
| [
2,
15069,
357,
66,
8,
3717,
12,
5304,
11288,
5719,
679,
1586,
268,
1279,
14323,
261,
13,
10438,
258,
1586,
268,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
770,
8265,
318,
1479,
3788,
13,
921,
460,
17678,
4163,
340,
290,
14,
273,
13... | 2.451517 | 1,351 |
# Generated by Django 4.0 on 2022-01-08 14:12
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
319,
33160,
12,
486,
12,
2919,
1478,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
import pandas as pd
import igraph
from ndex2.nice_cx_network import NiceCXNetwork
import ndex2.client as nc
import ndex2
import networkx as nx
from fa2 import ForceAtlas2
from .utils import *
import math
import json
import os
import numpy as np
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
45329,
1470,
198,
6738,
299,
67,
1069,
17,
13,
44460,
62,
66,
87,
62,
27349,
1330,
18460,
34,
55,
26245,
198,
11748,
299,
67,
1069,
17,
13,
16366,
355,
299,
66,
198,
11748,
299,
67,
106... | 2.940476 | 84 |
# Import library that lets you control the Pi's GPIO pins
import RPi.GPIO as io
# Import time for delays
from time import sleep
# Disables messages about GPIO pins already being in use
io.setwarnings(False)
# Numbering scheme that corresponds to breakout board and pin layout
io.setmode(io.BCM)
led_io_pin = 17
button_io_pin = 23
# Specifies that led_io_pin will be an output
io.setup(led_io_pin, io.OUT)
# Specifies that button_io_pin will be an input
io.setup(button_io_pin, io.IN)
button_on = False
previous_button_input = 0
while True:
# Get the state of the button input
button_input = io.input(button_io_pin)
# Debounce the button
if (previous_button_input == 0 and button_input):
# Toggle the button on and off
button_on = not button_on
previous_button_input = button_input
sleep(0.05)
if button_on:
# Turn the LED on
io.output(led_io_pin, io.HIGH)
else:
# Turn the LED off
io.output(led_io_pin, io.LOW)
| [
2,
17267,
5888,
326,
8781,
345,
1630,
262,
13993,
338,
50143,
20567,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
33245,
220,
198,
2,
17267,
640,
329,
16119,
220,
198,
6738,
640,
1330,
3993,
198,
198,
2,
3167,
2977,
6218,
546,
50143,
... | 2.608808 | 386 |
import urllib2
url = "http://www.google.com/"
# get response from url
response = urllib2.urlopen(url)
fh = open('downloaded_file.html', "w")
fh.write(response.read())
fh.close()
| [
11748,
2956,
297,
571,
17,
198,
6371,
796,
366,
4023,
1378,
2503,
13,
13297,
13,
785,
30487,
198,
2,
651,
2882,
422,
19016,
198,
26209,
796,
2956,
297,
571,
17,
13,
6371,
9654,
7,
6371,
8,
198,
69,
71,
796,
1280,
10786,
2902,
1457... | 2.594203 | 69 |