content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.contrib import admin
from .models import Token
@admin.register(Token)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
29130,
628,
198,
31,
28482,
13,
30238,
7,
30642,
8,
198
] | 3.4 | 25 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: (C) 1998-2015 by David J. Goodger
# License: GPL 2 (see __init__.py)
"""
Concrete quasi-polytrigs (order 1-3) puzzles.
"""
from puzzler.puzzles.polytwigs import (
QuasiPolytwigs123, OneSidedQuasiPolytwigs123)
class QuasiPolytwigs123RoundedRectangle9x2(QuasiPolytwigs123):
"""many solutions"""
width = 10
height = 6
svg_rotation = 0
class QuasiPolytwigs123HexagonRing1(QuasiPolytwigs123):
"""many solutions"""
width = 6
height = 6
holes = set(((1,2,1), (4,2,1)))
class QuasiPolytwigs123HexagonRing2(QuasiPolytwigs123HexagonRing1):
"""many solutions"""
holes = set(((1,4,1), (2,4,1)))
class QuasiPolytwigs123HexagonRing3(QuasiPolytwigs123HexagonRing2):
"""many solutions"""
holes = set(((1,4,0), (2,4,2)))
class QuasiPolytwigs123HexagonRing4(QuasiPolytwigs123HexagonRing1):
"""many solutions"""
holes = set(((2,4,1), (3,4,2)))
svg_rotation = 0
class QuasiPolytwigs123_5x3ParallelogramRing(QuasiPolytwigs123):
"""many solutions"""
width = 6
height = 4
class QuasiPolytwigs123_6x3TrapezoidRing(QuasiPolytwigs123):
"""many solutions"""
width = 7
height = 4
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
720,
7390,
3,
198,
198,
2,
6434,
25,
3271,
4599,
1362,
1279,
11274,
1362,
31,
29412,
13,
2398,
29,
198,
2,
15069... | 2.415414 | 532 |
"""
Script that trains MPNN models on qm8 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load QM8 dataset
tasks, datasets, transformers = dc.molnet.load_qm8(featurizer='MP')
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = [dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")]
# Batch size of models
batch_size = 32
n_atom_feat = 70
n_pair_feat = 8
model = dc.models.MPNNTensorGraph(
len(tasks),
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=5,
M=10,
batch_size=batch_size,
learning_rate=0.0001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=100)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| [
37811,
198,
7391,
326,
13404,
4904,
6144,
4981,
319,
10662,
76,
23,
27039,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62... | 2.626761 | 426 |
from Instrucciones.Instruccion import Instruccion
from Expresion.Relacional import *
| [
201,
198,
6738,
2262,
622,
535,
295,
274,
13,
6310,
622,
535,
295,
1330,
2262,
622,
535,
295,
201,
198,
201,
198,
6738,
5518,
411,
295,
13,
6892,
330,
1538,
1330,
1635,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.425 | 40 |
import numpy as np
import pandas as pd
import torch
import scipy
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from sklearn.metrics import make_scorer, accuracy_score
from pysembles.Utils import TransformTensorDataset
'''
Some common metrics and helper functions.
TODO: Add detailed documentation for each function
'''
# def diversity(model, x, y):
# # This is basically a copy/paste from the GNCLClasifier regularizer, which can also be used for
# # other classifier. I tried to do it with numpy first and I think it should work but I did not
# # really understand numpy's bmm variant, so I opted for the safe route here.
# # Also, pytorch seems a little faster due to gpu support
# if not hasattr(model, "estimators_"):
# return 0
# model.eval()
# x_tensor = torch.tensor(x)
# y_tensor = torch.tensor(y)
# dataset = TransformTensorDataset(x_tensor, y_tensor, transform=None)
# test_loader = torch.utils.data.DataLoader(dataset, batch_size = model.batch_size)
# diversities = []
# for batch in test_loader:
# data, target = batch[0], batch[1]
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
# with torch.no_grad():
# f_bar, base_preds = model.forward_with_base(data)
# if isinstance(model.loss_function, nn.MSELoss):
# n_classes = f_bar.shape[1]
# n_preds = f_bar.shape[0]
# eye_matrix = torch.eye(n_classes).repeat(n_preds, 1, 1).cuda()
# D = 2.0*eye_matrix
# elif isinstance(model.loss_function, nn.NLLLoss):
# n_classes = f_bar.shape[1]
# n_preds = f_bar.shape[0]
# D = torch.eye(n_classes).repeat(n_preds, 1, 1).cuda()
# target_one_hot = torch.nn.functional.one_hot(target, num_classes = n_classes).type(model.get_float_type())
# eps = 1e-7
# diag_vector = target_one_hot*(1.0/(f_bar**2+eps))
# D.diagonal(dim1=-2, dim2=-1).copy_(diag_vector)
# elif isinstance(model.loss_function, nn.CrossEntropyLoss):
# n_preds = f_bar.shape[0]
# n_classes = f_bar.shape[1]
# f_bar_softmax = nn.functional.softmax(f_bar,dim=1)
# D = -1.0*torch.bmm(f_bar_softmax.unsqueeze(2), f_bar_softmax.unsqueeze(1))
# diag_vector = f_bar_softmax*(1.0-f_bar_softmax)
# D.diagonal(dim1=-2, dim2=-1).copy_(diag_vector)
# else:
# D = torch.tensor(1.0)
# batch_diversities = []
# for pred in base_preds:
# diff = pred - f_bar
# covar = torch.bmm(diff.unsqueeze(1), torch.bmm(D, diff.unsqueeze(2))).squeeze()
# div = 1.0/model.n_estimators * 0.5 * covar
# batch_diversities.append(div)
# diversities.append(torch.stack(batch_diversities, dim = 1))
# div = torch.cat(diversities,dim=0)
# return div.sum(dim=1).mean(dim=0).item()
# # dsum = torch.sum(torch.cat(diversities,dim=0), dim = 0)
# # return dsum
# # base_preds = []
# # for e in model.estimators_:
# # ypred = apply_in_batches(e, x, 128)
# # base_preds.append(ypred)
# # f_bar = np.mean(base_preds, axis=0)
# # if isinstance(model.loss_function, nn.MSELoss):
# # n_classes = f_bar.shape[1]
# # n_preds = f_bar.shape[0]
# # eye_matrix = np.eye(n_classes).repeat(n_preds, 1, 1)
# # D = 2.0*eye_matrix
# # elif isinstance(model.loss_function, nn.NLLLoss):
# # n_classes = f_bar.shape[1]
# # n_preds = f_bar.shape[0]
# # D = np.eye(n_classes).repeat(n_preds, 1, 1)
# # target_one_hot = np.zeros((y.size, n_classes))
# # target_one_hot[np.arange(y.size),y] = 1
# # eps = 1e-7
# # diag_vector = target_one_hot*(1.0/(f_bar**2+eps))
# # #D[np.diag_indices(D.shape[0])] = diag_vector
# # for i in range(D.shape[0]):
# # np.fill_diagonal(D[i,:], diag_vector[i,:])
# # elif isinstance(model.loss_function, nn.CrossEntropyLoss):
# # n_preds = f_bar.shape[0]
# # n_classes = f_bar.shape[1]
# # f_bar_softmax = scipy.special.softmax(f_bar,axis=1)
# # D = -1.0 * np.expand_dims(f_bar_softmax, axis=2) @ np.expand_dims(f_bar_softmax, axis=1)
# # # D = -1.0*torch.bmm(f_bar_softmax.unsqueeze(2), f_bar_softmax.unsqueeze(1))
# # diag_vector = f_bar_softmax*(1.0-f_bar_softmax)
# # for i in range(D.shape[0]):
# # np.fill_diagonal(D[i,:], diag_vector[i,:])
# # else:
# # D = np.array([1.0])
# # diversities = []
# # for pred in base_preds:
# # # https://stackoverflow.com/questions/63301019/dot-product-of-two-numpy-arrays-with-3d-vectors
# # # https://stackoverflow.com/questions/51479148/how-to-perform-a-stacked-element-wise-matrix-vector-multiplication-in-numpy
# # diff = pred - f_bar
# # tmp = np.sum(D * diff[:,:,None], axis=1)
# # covar = np.sum(tmp*diff,axis=1)
# # # covar = torch.bmm(diff.unsqueeze(1), torch.bmm(D, diff.unsqueeze(2))).squeeze()
# # div = 1.0/model.n_estimators * 0.5 * covar
# # diversities.append(np.mean(div))
# #return np.sum(diversities)
# def loss(model, x, y):
# model.eval()
# x_tensor = torch.tensor(x)
# y_tensor = torch.tensor(y)
# dataset = TransformTensorDataset(x_tensor, y_tensor, transform=None)
# test_loader = torch.utils.data.DataLoader(dataset, batch_size = model.batch_size)
# losses = []
# for batch in test_loader:
# data, target = batch[0], batch[1]
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
# with torch.no_grad():
# pred = model(data)
# losses.append(model.loss_function(pred, target).mean().item())
# return np.mean(losses)
# def avg_loss(model, x, y):
# if not hasattr(model, "estimators_"):
# return 0
# model.eval()
# x_tensor = torch.tensor(x)
# y_tensor = torch.tensor(y)
# dataset = TransformTensorDataset(x_tensor, y_tensor, transform=None)
# test_loader = torch.utils.data.DataLoader(dataset, batch_size = model.batch_size)
# losses = []
# for batch in test_loader:
# data, target = batch[0], batch[1]
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
# with torch.no_grad():
# f_bar, base_preds = model.forward_with_base(data)
# ilosses = []
# for base in base_preds:
# ilosses.append(model.loss_function(base, target).mean().item())
# losses.append(np.mean(ilosses))
# return np.mean(losses)
# def avg_accurcay(model, x, y):
# if not hasattr(model, "estimators_"):
# return 0
# model.eval()
# x_tensor = torch.tensor(x)
# y_tensor = torch.tensor(y)
# dataset = TransformTensorDataset(x_tensor, y_tensor, transform=None)
# test_loader = torch.utils.data.DataLoader(dataset, batch_size = model.batch_size)
# accuracies = []
# for batch in test_loader:
# data, target = batch[0], batch[1]
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
# with torch.no_grad():
# _, base_preds = model.forward_with_base(data)
# iaccuracies = []
# for base in base_preds:
# iaccuracies.append( 100.0*(base.argmax(1) == target).type(model.get_float_type()) )
# accuracies.append(torch.cat(iaccuracies,dim=0).mean().item())
# return np.mean(accuracies)
# # accuracies = torch.cat(accuracies,dim=0)
# # return accuracies.mean().item() | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
11748,
629,
541,
88,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
... | 2.00879 | 3,982 |
"""Provide Exception classes."""
from dataclasses import dataclass
from pathlib import Path
from typing import Any
@dataclass(frozen=True)
class SettingNotFoundError(Exception):
"""Error class for missing config file."""
conf_path: Path
sample_conf_path: Path
def __str__(self) -> str:
"""Create message for print this object."""
msg = (
"Please make sure "
"that you have created the environment setting file."
"The preferences file should be located at this location: "
f"{str(self.conf_path)}"
"\n"
"If you haven't created the file yet, "
"please copy and edit this sample configuration file: "
f"{str(self.sample_conf_path)}"
)
return msg
@dataclass(frozen=True)
class ResourceNotFoundError(Exception):
"""Error class when resource not found."""
target_resource: str
search_condition: dict[str, Any]
def __str__(self) -> str:
"""Create message for print this object."""
return f"{self.target_resource} not found. Search condition is {self.search_condition}."
| [
37811,
15946,
485,
35528,
6097,
526,
15931,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
628,
198,
31,
19608,
330,
31172,
7,
69,
42005,
28,
17821,
8,
198,
4871,
... | 2.624714 | 437 |
"""
Migration for the Submitty system.
Adds bool to specify if the machine is a worker to Submitty.config
"""
from pathlib import Path
import json
import os
# no need for down as email_enabled is not used in previous builds
| [
37811,
198,
44,
4254,
329,
262,
3834,
76,
9760,
1080,
13,
198,
46245,
20512,
284,
11986,
611,
262,
4572,
318,
257,
8383,
284,
3834,
76,
9760,
13,
11250,
198,
37811,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
33918,
198,
11748,
28... | 3.766667 | 60 |
# Copyright (c) 2009-2017 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" HPMC utilities
"""
from __future__ import print_function
from __future__ import division
# If numpy is unavailable, some utilities will not work
try:
import numpy as np
except ImportError:
np = None
import hoomd
import sys
import colorsys as cs
import re
#replace range with xrange for python3 compatibility
if sys.version_info[0]==2:
range=xrange
# Multiply two quaternions
# Apply quaternion multiplication per http://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
# (requires numpy)
# \param q1 quaternion
# \param q2 quaternion
# \returns q1*q2
# Rotate a vector by a unit quaternion
# Quaternion rotation per http://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
# (requires numpy)
# \param q rotation quaternion
# \param v 3d vector to be rotated
# \returns q*v*q^{-1}
# Construct a box matrix from a hoomd data.boxdim object
# (requires numpy)
# \param box hoomd boxdim object
# \returns numpy matrix that transforms lattice coordinates to Cartesian coordinates
# Given a set of lattice vectors, rotate to produce an upper triangular right-handed box
# as a hoomd boxdim object and a rotation quaternion that brings particles in the original coordinate system to the new one.
# The conversion preserves handedness, so it is left to the user to provide a right-handed set of lattice vectors
# (E.g. returns (data.boxdim(Lx=10, Ly=20, Lz=30, xy=1.0, xz=0.5, yz=0.1), q) )
# (requires numpy)
# \param a1 first lattice vector
# \param a2 second lattice vector
# \param a3 third lattice vector
# \returns (box, q) tuple of boxdim object and rotation quaternion
## Read a single (final) frame pos file and return some data structures.
# Returns particle positions, orientations, types, definitions, and a hoomd.data.boxdim object,
# along with the rotation quaternion that rotates the input to output boxes.
# Note that the orientations array will have (meaningless) quaternions for spheres
# If you need to read multiple frames, consider the POS trajectory reader in Freud.
# (requires numpy)
# \param fname input file name
# \param ndim number of dimensions (default 3) for boxdim object
# \returns dict with keys positions,orientations,types,param_dict,box,q
## Given an HPMC NPT system as input, perform compression to search for dense packings
# This class conveniently encapsulates the scripting and heuristics to search
# for densest packings.
# The read_pos() module may also be of use.
# (requires numpy)
## Construct a hpmc.util.compress object.
# Attach to a hoomd hpmc integrator instance with an existing hpmc.update.npt object.
# \param mc hpmc integrator object
# \param npt_updater hpmc.update.npt object
# \param ptypes list of particle type names
# \param pnums list of number of particles of each type
# \param pvolumes list of particle volumes for each type
# \param pverts list of sets of vertices for each particle type (set empty list for spheres, etc)
# \param num_comp_steps number of steps over which to ramps up pressure (default 1e6)
# \param refine_steps number of steps between checking eta at high pressure (default num_comp_steps/10)
# \param log_file file in which to log hpmc stuff
# \param pmin low pressure end of pressure schedule (default 10)
# \param pmax high pressure end of pressure schedule (default 1e6)
# \param pf_tol tolerance allowed in checking for convergence
# \param allowShearing allow box to shear when searching for dense packing (default True)
# \param tuner_period interval sufficient to get statistics on at least ~10,000 particle overlaps and ~100 box changes (default 1000)
# \param relax number of steps to run at initial box size at each sweep before starting pressure schedule (default 10,000)
# \param quiet suppress the noiser aspects of hoomd during compression (default True)
## Run one or more compression cycles
# \param num_comp_cycles number of compression cycles to run (default 1)
# \returns tuple of lists of packing fractions and corresponding snapshot objects.
## snapshot is a python struct for now, will eventually be replaced with by the hoomd snapshot
# For now, this will be used by the compressor. snapshots can be written to file to_pos method
# In order to write out, the snapshot must be given particle data via the integrator's
# setup_pos_writer() method or all particles will be output as spheres.
# (requires numpy)
#
# \par Quick Example
# \code
# system = init.initmethod(...);
# mc = hpmc.integrate.shape(...);
# mc.shape_param[name].set(...);
# run(...);
# mysnap = hpmc.util.snapshot();
# mc.setup_pos_writer(mysnap, colors=dict(A='ff5984ff'));
# mysnap.to_pos(filename);
# \endcode
## constructor
## \internal Set up particle type definition strings for pos file output
# This method is intended only to be called by an integrator instance as a result of
# a call to the integrator's mc.setup_pos_writer() method.
# \param ptype particle type name (string)
# \param shapedef pos file particle macro for shape parameters through color
# \returns None
## write to a pos file
# \param filename string name of output file in injavis/incsim pos format
# \returns None
## write to a zip file
# Not yet implemented
# \param filename string name of output file in injavis/incsim pos format
# \returns None
class tune(object):
R""" Tune mc parameters.
``hoomd.hpmc.util.tune`` provides a general tool to observe Monte Carlo move
acceptance rates and adjust the move sizes when called by a user script. By
default, it understands how to read and adjust the trial move domain for
translation moves and rotation moves for an ``hpmc.integrate`` instance.
Other move types for integrators or updaters can be handled with a customized
tunable map passed when creating the tuner or in a subclass definition. E.g.
see use an implementation of :py:class:`.tune_npt`
Args:
obj: HPMC Integrator or Updater instance
tunables (list): list of strings naming parameters to tune. By default,
allowed element values are 'd' and 'a'.
max_val (list): maximum allowed values for corresponding tunables
target (float): desired acceptance rate
max_scale (float): maximum amount to scale a parameter in a single update
gamma (float): damping factor (>= 0.0) to keep from scaling parameter values too fast
type (str): Name of a single hoomd particle type for which to tune move sizes.
If None (default), all types are tuned with the same statistics.
tunable_map (dict): For each tunable, provide a dictionary of values and methods to be used by the tuner (see below)
args: Additional positional arguments
kwargs: Additional keyword arguments
Example::
mc = hpmc.integrate.convex_polyhedron()
mc.set_params(d=0.01, a=0.01, move_ratio=0.5)
tuner = hpmc.util.tune(mc, tunables=['d', 'a'], target=0.2, gamma=0.5)
for i in range(10):
run(1e4)
tuner.update()
Note:
You should run enough steps to get good statistics for the acceptance ratios. 10,000 trial moves
seems like a good number. E.g. for 10,000 or more particles, tuning after a single timestep should be fine.
For npt moves made once per timestep, tuning as frequently as 1,000 timesteps could get a rough convergence
of acceptance ratios, which is probably good enough since we don't really know the optimal acceptance ratio, anyway.
Warning:
There are some sanity checks that are not performed. For example, you shouldn't try to scale 'd' in a single particle simulation.
Details:
If ``gamma == 0``, each call to :py:meth:`.update` rescales the current
value of the tunable\(s\) by the ratio of the observed acceptance rate to the
target value. For ``gamma > 0``, the scale factor is the reciprocal of
a weighted mean of the above ratio with 1, according to
scale = (1.0 + gamma) / (target/acceptance + gamma)
The names in ``tunables`` must match one of the keys in ``tunable_map``,
which in turn correspond to the keyword parameters of the MC object being
updated.
``tunable_map`` is a :py:class:`dict` of :py:class:`dict`. The keys of the
outer :py:class:`dict` are strings that can be specified in the ``tunables``
parameter. The value of this outer :py:class:`dict` is another :py:class:`dict`
with the following four keys: 'get', 'acceptance', 'set', and 'maximum'.
A default ``tunable_map`` is provided but can be modified or extended by setting
the following dictionary key/value pairs in the entry for tunable.
* get (:py:obj:`callable`): function called by tuner (no arguments) to retrieve curent tunable value
* acceptance (:py:obj:`callable`): function called by tuner (no arguments) to get relevant acceptance rate
* set (:py:obj:`callable`): function to call to set new value (optional). Must take one argument (the new value).
If not provided, ``obj.set_params(tunable=x)`` will be called to set the new value.
* maximum (:py:class:`float`): maximum value the tuner may set for the tunable parameter
The default ``tunable_map`` defines the :py:obj:`callable` for 'set' to call
:py:meth:`hoomd.hpmc.integrate.mode_hpmc.set_params` with ``tunable={type: newval}``
instead of ``tunable=newval`` if the ``type`` argument is given when creating
the ``tune`` object.
"""
def update(self):
R""" Calculate and set tunable parameters using statistics from the run just completed.
"""
hoomd.util.quiet_status()
# Note: we are not doing any checking on the quality of our retrieved statistics
newquantities = dict()
# For each of the tunables we are watching, compute the new value we're setting that tunable to
for tunable in self.tunables:
oldval = self.tunables[tunable]['get']()
acceptance = self.tunables[tunable]['acceptance']()
max_val = self.tunables[tunable]['maximum']
if (acceptance > 0.0):
# find (damped) scale somewhere between 1.0 and acceptance/target
scale = ((1.0 + self.gamma) * acceptance) / (self.target + self.gamma * acceptance)
else:
# acceptance rate was zero. Try a parameter value an order of magnitude smaller
scale = 0.1
if (scale > self.max_scale):
scale = self.max_scale
# find new value
if (oldval == 0):
newval = 1e-5
hoomd.context.msg.warning("Oops. Somehow {0} went to zero at previous update. Resetting to {1}.\n".format(tunable, newval))
else:
newval = float(scale * oldval)
# perform sanity checking on newval
if (newval == 0.0):
newval = float(1e-6)
if (newval > max_val):
newval = max_val
self.tunables[tunable]['set'](float(newval))
hoomd.util.unquiet_status();
class tune_npt(tune):
R""" Tune the HPMC :py:class:`hoomd.hpmc.update.boxmc` using :py:class:`.tune`.
This is a thin wrapper to ``tune`` that simply defines an alternative
``tunable_map`` dictionary. In this case, the ``obj`` argument must be an instance of
:py:class:`hoomd.hpmc.update.boxmc`. Several tunables are defined.
'dLx', 'dLy', and 'dLz' use the acceptance rate of volume moves to set
``delta[0]``, ``delta[1]``, and ``delta[2]``, respectively in a call to :py:meth:`hoomd.hpmc.update.boxmc.length`.
'dV' uses the volume acceptance to call :py:meth:`hoomd.hpmc.update.boxmc.volume`.
'dxy', 'dxz', and 'dyz' tunables use the shear acceptance to set
``delta[0]``, ``delta[1]``, and ``delta[2]``, respectively in a call to
:py:meth:`hoomd.hpmc.update.boxmc.shear`.
Refer to the documentation for :py:class:`hoomd.hpmc.update.boxmc` for
information on how these parameters are used, since they are not all
applicable for a given use of ``boxmc``.
Note:
A bigger damping factor gamma may be appropriate for tuning box volume
changes because there may be multiple parameters affecting each acceptance rate.
Example::
mc = hpmc.integrate.convex_polyhedron()
mc.set_params(d=0.01, a=0.01, move_ratio=0.5)
updater = hpmc.update.boxmc(mc, betaP=10)
updater.length(0.1, weight=1)
tuner = hpmc.util.tune_npt(updater, tunables=['dLx', 'dLy', 'dLz'], target=0.3, gamma=1.0)
for i in range(10):
run(1e4)
tuner.update()
"""
| [
2,
15069,
357,
66,
8,
3717,
12,
5539,
383,
3310,
658,
286,
262,
2059,
286,
7055,
198,
2,
770,
2393,
318,
636,
286,
262,
40115,
2662,
35,
12,
17585,
1628,
11,
2716,
739,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
198,
4... | 2.914357 | 4,437 |
#!/usr/bin/env python
from cos_sim import *
from topic_model import *
from vectorizer import *
from algorithm_wrapper import *
if __name__ == '__main__':
print "Algorithm module!"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
8615,
62,
14323,
1330,
1635,
198,
6738,
7243,
62,
19849,
1330,
1635,
198,
6738,
15879,
7509,
1330,
1635,
198,
6738,
11862,
62,
48553,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
... | 3.206897 | 58 |
"""Methods for normalizing dpayd post metadata."""
#pylint: disable=line-too-long
import math
import ujson as json
from funcy.seqs import first
from hive.utils.normalize import bbd_amount, rep_log10, safe_img_url, parse_time, utc_timestamp
def post_basic(post):
"""Basic post normalization: json-md, tags, and flags."""
md = {}
try:
md = json.loads(post['json_metadata'])
if not isinstance(md, dict):
md = {}
except Exception:
pass
thumb_url = ''
if md and 'image' in md and md['image']:
thumb_url = safe_img_url(first(md['image'])) or ''
if thumb_url:
md['image'] = [thumb_url]
else:
del md['image']
# clean up tags, check if nsfw
tags = [post['category']]
if md and 'tags' in md and isinstance(md['tags'], list):
tags = tags + md['tags']
tags = set(list(map(lambda tag: (str(tag) or '').strip('# ').lower()[:32], tags))[0:5])
tags.discard('')
is_nsfw = 'nsfw' in tags
body = post['body']
if body.find('\x00') > -1:
#url = post['author'] + '/' + post['permlink']
body = body.replace('\x00', '[NUL]')
# payout date is last_payout if paid, and cashout_time if pending.
is_paidout = (post['cashout_time'][0:4] == '1969')
payout_at = post['last_payout'] if is_paidout else post['cashout_time']
# payout is declined if max_payout = 0, or if 100% is burned
is_payout_declined = False
if bbd_amount(post['max_accepted_payout']) == 0:
is_payout_declined = True
elif len(post['beneficiaries']) == 1:
benny = first(post['beneficiaries'])
if benny['account'] == 'null' and int(benny['weight']) == 10000:
is_payout_declined = True
# payout entirely in SP
is_full_power = int(post['percent_dpay_dollars']) == 0
return {
'json_metadata': md,
'image': thumb_url,
'tags': tags,
'is_nsfw': is_nsfw,
'body': body,
'preview': body[0:1024],
'payout_at': payout_at,
'is_paidout': is_paidout,
'is_payout_declined': is_payout_declined,
'is_full_power': is_full_power,
}
def post_legacy(post):
"""Return legacy fields which may be useful to save.
Some UI's may want to leverage these, but no point in indexing.
"""
_legacy = ['id', 'url', 'root_comment', 'root_author', 'root_permlink',
'root_title', 'parent_author', 'parent_permlink',
'max_accepted_payout', 'percent_dpay_dollars',
'curator_payout_value', 'allow_replies', 'allow_votes',
'allow_curation_rewards', 'beneficiaries']
return {k: v for k, v in post.items() if k in _legacy}
def post_payout(post):
"""Get current vote/payout data and recalculate trend/hot score."""
# total payout (completed and/or pending)
payout = sum([
bbd_amount(post['total_payout_value']),
bbd_amount(post['curator_payout_value']),
bbd_amount(post['pending_payout_value']),
])
# `active_votes` was temporarily missing in dev -- ensure this condition
# is caught ASAP. if no active_votes then rshares MUST be 0. ref: dpay#2568
assert post['active_votes'] or int(post['net_rshares']) == 0
# get total rshares, and create comma-separated vote data blob
rshares = sum(int(v['rshares']) for v in post['active_votes'])
csvotes = "\n".join(map(_vote_csv_row, post['active_votes']))
# trending scores
_timestamp = utc_timestamp(parse_time(post['created']))
sc_trend = _score(rshares, _timestamp, 480000)
sc_hot = _score(rshares, _timestamp, 10000)
return {
'payout': payout,
'rshares': rshares,
'csvotes': csvotes,
'sc_trend': sc_trend,
'sc_hot': sc_hot
}
def _vote_csv_row(vote):
"""Convert a vote object into minimal CSV line."""
rep = rep_log10(vote['reputation'])
return "%s,%s,%s,%s" % (vote['voter'], vote['rshares'], vote['percent'], rep)
def _score(rshares, created_timestamp, timescale=480000):
"""Calculate trending/hot score.
Source: calculate_score - https://github.com/dpays/dpay/blob/8cd5f688d75092298bcffaa48a543ed9b01447a6/libraries/plugins/tags/tags_plugin.cpp#L239
"""
mod_score = rshares / 10000000.0
order = math.log10(max((abs(mod_score), 1)))
sign = 1 if mod_score > 0 else -1
return sign * order + created_timestamp / timescale
def post_stats(post):
"""Get post statistics and derived properties.
Source: contentStats - https://github.com/dpays/condenser/blob/master/src/app/utils/StateFunctions.js#L109
"""
net_rshares_adj = 0
neg_rshares = 0
total_votes = 0
up_votes = 0
for vote in post['active_votes']:
if vote['percent'] == 0:
continue
total_votes += 1
rshares = int(vote['rshares'])
sign = 1 if vote['percent'] > 0 else -1
if sign > 0:
up_votes += 1
if sign < 0:
neg_rshares += rshares
# For graying: sum rshares, but ignore neg rep users and dust downvotes
neg_rep = str(vote['reputation'])[0] == '-'
if not (neg_rep and sign < 0 and len(str(rshares)) < 11):
net_rshares_adj += rshares
# take negative rshares, divide by 2, truncate 10 digits (plus neg sign),
# and count digits. creates a cheap log10, stake-based flag weight.
# result: 1 = approx $400 of downvoting stake; 2 = $4,000; etc
flag_weight = max((len(str(neg_rshares / 2)) - 11, 0))
author_rep = rep_log10(post['author_reputation'])
is_low_value = net_rshares_adj < -9999999999
has_pending_payout = bbd_amount(post['pending_payout_value']) >= 0.02
return {
'hide': not has_pending_payout and (author_rep < 0),
'gray': not has_pending_payout and (author_rep < 1 or is_low_value),
'author_rep': author_rep,
'flag_weight': flag_weight,
'total_votes': total_votes,
'up_votes': up_votes
}
| [
37811,
46202,
329,
3487,
2890,
288,
15577,
67,
1281,
20150,
526,
15931,
198,
2,
79,
2645,
600,
25,
15560,
28,
1370,
12,
18820,
12,
6511,
198,
198,
11748,
10688,
198,
11748,
334,
17752,
355,
33918,
198,
6738,
1257,
948,
13,
41068,
82,
... | 2.304132 | 2,614 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from snipsTools import *
from RelaySwitch import RelaySwitch
from SHT31 import SHT31
from SnipsClients import SnipsMPU
VERSION = '0.3.0'
CONFIG_INI = 'config.ini'
I18N_DIR = 'assets/i18n'
config = SnipsConfigParser.read_configuration_file(CONFIG_INI).get('global')
MQTT_ADDR_HOST = str(config.get('mqtt_host'))
MQTT_ADDR_PORT = str(config.get('mqtt_port'))
MQTT_ADDR = "{}:{}".format(MQTT_ADDR_HOST, MQTT_ADDR_PORT)
SITE_ID = str(config.get('site_id'))
RELAY_GPIO = int(config.get('relay_gpio_bcm'))
TEMP_UNIT = str(config.get('temperature_unit'))
LOCALE = str(config.get('locale'))
i18n = SnipsI18n(I18N_DIR, LOCALE)
relay = RelaySwitch.RelaySwitch('screen', RELAY_GPIO)
sht31 = SHT31.SHT31(TEMP_UNIT)
client = SnipsMPU.SnipsMPU(i18n, MQTT_ADDR, SITE_ID, relay, sht31)
if __name__ == "__main__":
try:
client.start_block()
except KeyboardInterrupt:
relay.clear() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
3013,
2419,
33637,
1330,
1635,
198,
6738,
4718,
323,
38978,
1330,
4718,
323,
38978,
198,
6738,
6006,
51,... | 2.224586 | 423 |
import os,sys,subprocess,re
lines = []
with os.popen("tshark -r "+sys.argv[1]+" -T fields -e tcp.analysis.ack_rtt") as sharky:
for line in sharky:
lines.append(line)
cleanList = []
for dirty in lines:
temp = re.sub("[^\d.]","",dirty)
if temp!="":
cleanList.append(temp)#filtering
numbers = list(map(float,cleanList))#convert to float
print(sum(numbers)/len(numbers))#print result. can use numbers list to produce a plot though
'''
run this script to get RTT
tshark -r <.pcap file name> -T fields -e tcp.analysis.ack_rtt
''' | [
11748,
28686,
11,
17597,
11,
7266,
14681,
11,
260,
198,
198,
6615,
796,
17635,
198,
4480,
28686,
13,
79,
9654,
7203,
912,
71,
668,
532,
81,
43825,
17597,
13,
853,
85,
58,
16,
48688,
1,
532,
51,
7032,
532,
68,
48265,
13,
20930,
13,... | 2.504505 | 222 |
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
5436,
11395,
47139,
1352,
11,
1855,
11395,
47139,
1352,
628
] | 3.777778 | 27 |
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
from Hope import get_response
app = Flask(__name__)
CORS(app)
@app.get("/")
@app.post("/predict")
if __name__ == "__main__":
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
33918,
1958,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
13408,
1330,
651,
62,
26209,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
34,
20673,
7,
1324,... | 2.727273 | 88 |
# PYQT
import sys
#from ...TabPanel import TabPanel
import sip
from q3.ui.engine import qtw,qtc,qtg
from ... import consts, prop, direction
from ...ui import orientation, colors
from ...moduletype import ModuleType
from ...nodeiotype import NodeIoType
from ...q3vector import Q3Vector
from ...EventSignal import EventProps
from ..driverBase import Q3DriverBase
from enum import Enum
from ...valuetype import ValueType
from .IoLinkView import IoLinkView
from .IoNodeView import IoNodeView
from .ModuleViewImpl import ModuleViewImpl
from .GraphViewImpl import GraphViewImpl
#class IoNode:
# pass
# .checkSyncHandler()
#windowDidResize
| [
198,
2,
350,
56,
48,
51,
198,
198,
11748,
25064,
198,
2,
6738,
2644,
33349,
26639,
1330,
16904,
26639,
198,
198,
11748,
31145,
220,
198,
198,
6738,
10662,
18,
13,
9019,
13,
18392,
1330,
10662,
4246,
11,
80,
23047,
11,
80,
25297,
198... | 2.833333 | 252 |
import pytest
from montydb.errors import OperationFailure
from montydb.types import PY3, bson_ as bson
from datetime import datetime
from ...conftest import skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
@skip_if_no_bson
| [
198,
11748,
12972,
9288,
198,
6738,
40689,
5173,
65,
13,
48277,
1330,
14680,
50015,
198,
6738,
40689,
5173,
65,
13,
19199,
1330,
350,
56,
18,
11,
275,
1559,
62,
355,
275,
1559,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
... | 2.022814 | 263 |
# -*- coding: utf-8 -*-
# ้กน็ฎ่ฏฆๆ
่งๅพ ็ฑป
from django.db.models import Q
from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView
from django.core.paginator import Paginator
from project.forms.project_from import ProjectUpdateForm, ProjectCreateForm
from project.models import Project
from util.loginmixin import LoginMixin
class ProjectListView(LoginMixin, ListView):
"""
้กน็ฎๅ่กจ ่งๅพ
"""
model = Project
context_object_name = 'project'
template_name = "project_manage/project/project_list.html"
search_value = ""
order_field = "-updatetime" # ๆๅบๆนๅผ
created_by = ''
pagenum = 5 # ๆฏ้กตๅ้กตๆฐๆฎๆกๆฐ
class ProjectDetailView(LoginMixin, DetailView):
"""
้กน็ฎ่ฏฆๆ
่งๅพ
"""
model = Project
template_name = "project_manage/project/project_detail.html"
class ProjectParticpantDetailView(LoginMixin, DetailView):
"""
้กน็ฎ็ๅๅ ไบบๅ ่ฏฆๆ
"""
model = Project
context_object_name = 'project_particpant'
template_name = "project_manage/project/project_particpant_detail.html"
class ProjectDevDetailView(LoginMixin, DetailView):
"""
้กน็ฎ้จ็ฝฒไฟกๆฏ่ฏฆๆ
่งๅพ
"""
model = Project
context_object_name = 'project_dev'
template_name = "project_manage/project/project_dev_detail.html"
class ProjectCreateView(LoginMixin, CreateView):
"""
ๆทปๅ ้กน็ฎ ่งๅพ
"""
model = Project
form_class = ProjectCreateForm
template_name = "project_manage/project/project_add.html"
class ProjectUpdateView(LoginMixin, UpdateView):
"""
ๆดๆฐ้กน็ฎ ่งๅพ
"""
model = Project
form_class = ProjectUpdateForm
template_name = "project_manage/project/project_update.html"
class ProjectDeleteView(LoginMixin, DeleteView):
"""
ๅ ้ค้กน็ฎ ่งๅพ
"""
# template_name_suffix='_delete'
template_name = "project_manage/project/project_delete.html"
model = Project
success_url = reverse_lazy('prlist')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
16268,
94,
117,
33566,
106,
46237,
99,
46349,
227,
164,
100,
228,
32368,
122,
13328,
109,
119,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
1... | 2.344994 | 829 |
"""
Example of running PyTorch implementation of DDPG on HalfCheetah.
"""
import copy
from gym.envs.mujoco import HalfCheetahEnv
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
from rlkit.torch.ddpg.ddpg import DDPGTrainer
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=1000,
batch_size=128,
),
trainer_kwargs=dict(
use_soft_update=True,
tau=1e-2,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_size=int(1E6),
)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
setup_logger('lrscheduler', variant=variant)
experiment(variant)
| [
37811,
198,
16281,
286,
2491,
9485,
15884,
354,
7822,
286,
360,
6322,
38,
319,
13139,
7376,
316,
993,
13,
198,
37811,
198,
11748,
4866,
198,
198,
6738,
11550,
13,
268,
14259,
13,
76,
23577,
25634,
1330,
13139,
7376,
316,
993,
4834,
85... | 2.154746 | 769 |
from datetime import timedelta
from typing import Optional
from google.protobuf.duration_pb2 import Duration
from feast.protos.feast.core.Aggregation_pb2 import Aggregation as AggregationProto
class Aggregation:
"""
NOTE: Feast-handled aggregations are not yet supported. This class provides a way to register user-defined aggregations.
Attributes:
column: str # Column name of the feature we are aggregating.
function: str # Provided built in aggregations sum, max, min, count mean
time_window: timedelta # The time window for this aggregation.
"""
column: str
function: str
time_window: Optional[timedelta]
@classmethod
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
13,
32257,
62,
40842,
17,
1330,
22920,
198,
198,
6738,
26951,
13,
11235,
418,
13,
5036,
459,
13,
7295,
13,
46384,
43068,
... | 3.260664 | 211 |
import pyximport
pyximport.install(pyimport=True)
from random import randint
import time
import json
from utils import create_rand_array
if __name__ == '__main__':
methods = ['linear', 'binary_search']
# array_sizes = [10**x for x in range(1, 9)]
array_sizes = range(100000, 1000001, 50000)
run_times = 10
times = {x: {} for x in methods}
for arr_size in array_sizes:
arr = create_rand_array(arr_size)
arr_sorted = sorted(arr)
query = -1
for method in methods:
acum_time = 0
for i in range(run_times):
start_time = time.time()
if method == 'binary_search':
idx = find(arr_sorted, query, method)
else:
idx = find(arr, query, method)
end_time = time.time()
acum = round(end_time - start_time, 10)
times[method][arr_size] = acum / run_times
with open('searching_times.json', 'w') as json_file:
json.dump(times, json_file, indent=4, ensure_ascii=False) | [
11748,
12972,
87,
11748,
198,
9078,
87,
11748,
13,
17350,
7,
9078,
11748,
28,
17821,
8,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
640,
198,
11748,
33918,
198,
198,
6738,
3384,
4487,
1330,
2251,
62,
25192,
62,
18747,
628,
628,
628... | 2.099222 | 514 |
"""Support for LG webOS Smart TV."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import json
import logging
import os
from pickle import loads
from typing import Any
from aiowebostv import WebOsClient, WebOsTvPairError
import sqlalchemy as db
import voluptuous as vol
from homeassistant.components import notify as hass_notify
from homeassistant.components.automation import AutomationActionType
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
CONF_CLIENT_SECRET,
CONF_CUSTOMIZE,
CONF_HOST,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import (
Context,
Event,
HassJob,
HomeAssistant,
ServiceCall,
callback,
)
from homeassistant.helpers import config_validation as cv, discovery, entity_registry
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_BUTTON,
ATTR_CONFIG_ENTRY_ID,
ATTR_PAYLOAD,
ATTR_SOUND_OUTPUT,
CONF_ON_ACTION,
CONF_SOURCES,
DATA_CONFIG_ENTRY,
DATA_HASS_CONFIG,
DEFAULT_NAME,
DOMAIN,
PLATFORMS,
SERVICE_BUTTON,
SERVICE_COMMAND,
SERVICE_SELECT_SOUND_OUTPUT,
WEBOSTV_CONFIG_FILE,
WEBOSTV_EXCEPTIONS,
)
CUSTOMIZE_SCHEMA = vol.Schema(
{vol.Optional(CONF_SOURCES, default=[]): vol.All(cv.ensure_list, [cv.string])}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_CUSTOMIZE, default={}): CUSTOMIZE_SCHEMA,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ICON): cv.string,
}
)
],
)
},
),
extra=vol.ALLOW_EXTRA,
)
CALL_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids})
BUTTON_SCHEMA = CALL_SCHEMA.extend({vol.Required(ATTR_BUTTON): cv.string})
COMMAND_SCHEMA = CALL_SCHEMA.extend(
{vol.Required(ATTR_COMMAND): cv.string, vol.Optional(ATTR_PAYLOAD): dict}
)
SOUND_OUTPUT_SCHEMA = CALL_SCHEMA.extend({vol.Required(ATTR_SOUND_OUTPUT): cv.string})
SERVICE_TO_METHOD = {
SERVICE_BUTTON: {"method": "async_button", "schema": BUTTON_SCHEMA},
SERVICE_COMMAND: {"method": "async_command", "schema": COMMAND_SCHEMA},
SERVICE_SELECT_SOUND_OUTPUT: {
"method": "async_select_sound_output",
"schema": SOUND_OUTPUT_SCHEMA,
},
}
_LOGGER = logging.getLogger(__name__)
def read_client_keys(config_file: str) -> dict[str, str]:
"""Read legacy client keys from file."""
if not os.path.isfile(config_file):
return {}
# Try to parse the file as being JSON
with open(config_file, encoding="utf8") as json_file:
try:
client_keys = json.load(json_file)
if isinstance(client_keys, dict):
return client_keys
return {}
except (json.JSONDecodeError, UnicodeDecodeError):
pass
# If the file is not JSON, read it as Sqlite DB
engine = db.create_engine(f"sqlite:///{config_file}")
table = db.Table("unnamed", db.MetaData(), autoload=True, autoload_with=engine)
results = engine.connect().execute(db.select([table])).fetchall()
db_client_keys = {k: loads(v) for k, v in results}
return db_client_keys
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the LG WebOS TV platform."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(DATA_CONFIG_ENTRY, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN not in config:
return True
config_file = hass.config.path(WEBOSTV_CONFIG_FILE)
if not (
client_keys := await hass.async_add_executor_job(read_client_keys, config_file)
):
_LOGGER.debug("No pairing keys, Not importing webOS Smart TV YAML config")
return True
ent_reg = entity_registry.async_get(hass)
tasks = []
for conf in config[DOMAIN]:
host = conf[CONF_HOST]
if (key := client_keys.get(host)) is None:
_LOGGER.debug(
"Not importing webOS Smart TV host %s without pairing key", host
)
continue
if entity_id := ent_reg.async_get_entity_id(Platform.MEDIA_PLAYER, DOMAIN, key):
tasks.append(asyncio.create_task(async_migrate_task(entity_id, conf, key)))
async def async_tasks_cancel(_event: Event) -> None:
"""Cancel config flow import tasks."""
for task in tasks:
if not task.done():
task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_tasks_cancel)
return True
def _async_migrate_options_from_data(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Migrate options from data."""
if entry.options:
return
config = entry.data
options = {}
# Get Preferred Sources
if sources := config.get(CONF_CUSTOMIZE, {}).get(CONF_SOURCES):
options[CONF_SOURCES] = sources
if not isinstance(sources, list):
options[CONF_SOURCES] = sources.split(",")
hass.config_entries.async_update_entry(entry, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set the config entry up."""
_async_migrate_options_from_data(hass, entry)
host = entry.data[CONF_HOST]
key = entry.data[CONF_CLIENT_SECRET]
wrapper = WebOsClientWrapper(host, client_key=key)
await wrapper.connect()
for service, method in SERVICE_TO_METHOD.items():
schema = method["schema"]
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema
)
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = wrapper
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
"notify",
DOMAIN,
{
CONF_NAME: entry.title,
ATTR_CONFIG_ENTRY_ID: entry.entry_id,
},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
if not entry.update_listeners:
entry.async_on_unload(entry.add_update_listener(async_update_options))
async def async_on_stop(_event: Event) -> None:
"""Unregister callbacks and disconnect."""
await wrapper.shutdown()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_on_stop)
)
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_control_connect(host: str, key: str | None) -> WebOsClient:
"""LG Connection."""
client = WebOsClient(host, key)
try:
await client.connect()
except WebOsTvPairError:
_LOGGER.warning("Connected to LG webOS TV %s but not paired", host)
raise
return client
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
client = hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
await hass_notify.async_reload(hass, DOMAIN)
await client.shutdown()
# unregister service calls, check if this is the last entry to unload
if unload_ok and not hass.data[DOMAIN][DATA_CONFIG_ENTRY]:
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class PluggableAction:
"""A pluggable action handler."""
def __init__(self) -> None:
"""Initialize."""
self._actions: dict[Callable[[], None], tuple[HassJob, dict[str, Any]]] = {}
def __bool__(self) -> bool:
"""Return if we have something attached."""
return bool(self._actions)
@callback
def async_attach(
self, action: AutomationActionType, variables: dict[str, Any]
) -> Callable[[], None]:
"""Attach a device trigger for turn on."""
@callback
job = HassJob(action)
self._actions[_remove] = (job, variables)
return _remove
@callback
def async_run(self, hass: HomeAssistant, context: Context | None = None) -> None:
"""Run all turn on triggers."""
for job, variables in self._actions.values():
hass.async_run_hass_job(job, variables, context)
class WebOsClientWrapper:
"""Wrapper for a WebOS TV client with Home Assistant specific functions."""
def __init__(self, host: str, client_key: str) -> None:
"""Set up the client."""
self.host = host
self.client_key = client_key
self.turn_on = PluggableAction()
self.client: WebOsClient | None = None
async def connect(self) -> None:
"""Attempt a connection, but fail gracefully if tv is off for example."""
self.client = WebOsClient(self.host, self.client_key)
with suppress(*WEBOSTV_EXCEPTIONS, WebOsTvPairError):
await self.client.connect()
async def shutdown(self) -> None:
"""Unregister callbacks and disconnect."""
assert self.client
self.client.clear_state_update_callbacks()
await self.client.disconnect()
| [
37811,
15514,
329,
17370,
3992,
2640,
10880,
3195,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
6738,
17268,
13,
39305,
1330,
4889,
540,
198,
6738,
4732,
8019,
1330,
18175,
198,
11748,
33918,
19... | 2.295408 | 4,377 |
import os
import regex
import io
from natsort import natsort_keygen, natsorted
nkey = natsort_keygen()
for subdir, dirs, files in os.walk(
r"I:\INOWROCลAW\DANE_IRON_MOUNTAIN\20190614\DฤBROWA BISKUPIA"
):
dirs.sort(key=nkey)
if not any(fname.upper().endswith(".XML") for fname in os.listdir(subdir)):
continue
for file in natsorted(files):
if file.upper().endswith(".XML"):
xml = os.path.join(subdir, file)
with io.open(xml, "r", encoding="utf-8") as xxml:
for line in xxml:
if regex.match("^ <pzg_dataZgloszenia></pzg", line):
with open(
r"D:\_MACIEK_\python_proby\xml_bez_daty.txt", "a"
) as bezdaty:
bezdaty.write(xml + "\n")
elif regex.match(
"^ <celArchiwalny></cel", line
) and regex.match("^ <pzg_cel></pzg", line):
with open(
r"D:\_MACIEK_\python_proby\xml_bez_celu.txt", "a"
) as bezcelu:
bezcelu.write(xml + "\n")
| [
11748,
28686,
198,
11748,
40364,
198,
11748,
33245,
198,
6738,
299,
1381,
419,
1330,
299,
1381,
419,
62,
2539,
5235,
11,
299,
1381,
9741,
198,
198,
77,
2539,
796,
299,
1381,
419,
62,
2539,
5235,
3419,
198,
198,
1640,
850,
15908,
11,
... | 1.641096 | 730 |
Import("env")
env.Replace(LINKFLAGS = [fixLinkFlag(i) for i in env['LINKFLAGS']])
| [
20939,
7203,
24330,
4943,
198,
220,
220,
220,
220,
198,
24330,
13,
3041,
5372,
7,
43,
17248,
38948,
50,
796,
685,
13049,
11280,
34227,
7,
72,
8,
329,
1312,
287,
17365,
17816,
43,
17248,
38948,
50,
6,
11907,
8,
198
] | 2.175 | 40 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/10/19 ไธๅ2:38
# @Author : Samge
import datetime
import json
class DateEncoder(json.JSONEncoder):
"""
ๅค็ๆฅๆๆ ผๅผ่ฝฌjson
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33448,
14,
940,
14,
1129,
220,
10310,
233,
39355,
230,
17,
25,
2548,
198,
2... | 1.902913 | 103 |
# Author: Jimmy Huang (1902161621@qq.com)
# License: WTFPL
import ECY.utils.lsp.language_server_protocol as lsp
import ECY.utils.interface as scope_
import logging
import os
import threading
global g_logger
g_logger = logging.getLogger('ECY_server')
| [
2,
6434,
25,
12963,
31663,
357,
1129,
2999,
1433,
1433,
2481,
31,
38227,
13,
785,
8,
198,
2,
13789,
25,
41281,
5837,
43,
198,
198,
11748,
13182,
56,
13,
26791,
13,
75,
2777,
13,
16129,
62,
15388,
62,
11235,
4668,
355,
300,
2777,
1... | 2.863636 | 88 |
"""
Q005 Longest Palindromic Substring
Medium
08/27/2019
String. Dynamic programming.
Methods:
Dynamic programming;
Manacher's algo;
Palidromic tree;
Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: "cbbd"
Output: "bb"
"""
s = "aaaaa"
sol = Solution()
print(sol.longestPalindrome(s))
| [
37811,
198,
48,
22544,
5882,
395,
3175,
521,
398,
291,
3834,
8841,
198,
31205,
198,
198,
2919,
14,
1983,
14,
23344,
198,
10100,
13,
26977,
8300,
13,
198,
46202,
25,
198,
220,
220,
220,
26977,
8300,
26,
198,
220,
220,
220,
1869,
3493... | 2.75 | 172 |
# 11 February 2018 Miroslav Gasparek
# Python practical on File Input/Output
import os
# Open File
f = open('data/1OLG.pdb','r')
# What is f?
f
f_str = f.read()
f_str[:1000]
f_list = f.readlines()
f_list
# To rewind back to the beginning, we do f.seek(0)
# Go to the beginning of the file
f.seek(0)
# Read the contents in as a list
f_list = f.readlines()
# Check out the first 10 entries
f_list[:10]
# Strip all whitespaces including newlines from the end of a string
f_list[0].rstrip()
# We must close file when we are done with it
f.close()
# Check if file is closed
print('Is file closed?' ,f.closed)
print()
## Use of context managers with files
with open('data/1OLG.pdb', 'r') as f:
f_lines = f.readlines()
print('In the with block, is the file closed?', f.closed)
print('Out of the with block, is the file closed?',f.closed)
# Check the first three lines
f_lines[:3]
# Print the first ten lines of the file
with open('data/1OLG.pdb', 'r') as f:
counter = 0
for line in f:
print(line.rstrip())
counter += 1
if counter >= 10:
break
print()
# Another way to print first ten lines of the file
with open('data/1OLG.pdb', 'r') as f:
counter = 0
while counter < 10:
print(f.readline().rstrip())
counter += 1
# Check if the file already exists:
LogicVal = os.path.isfile('data/1OLG.pdb')
print('Does given file exist? ',LogicVal)
# Otherwise, get ready to open a file to write.
# if os.path.isfile('mastery.txt'):
# raise RuntimeError('File mastery.txt already exists.')
# with open('mastery.txt', 'w') as f:
# f.write('This is my file.')
# f.write('There are many like it, but this one is mine.')
# f.write('I must master my file like I must master my life.')
# f.write() only accepts strings, so numbers must be converted to strings
with open('gimme_phi.txt', 'w') as f:
f.write('The golden ratio is phi = ')
f.write('{phi: .8f}'.format(phi=1.61803398875))
| [
2,
1367,
3945,
2864,
13756,
4951,
18809,
402,
5126,
533,
74,
198,
2,
11361,
8472,
319,
9220,
23412,
14,
26410,
198,
11748,
28686,
198,
198,
2,
4946,
9220,
198,
69,
796,
1280,
10786,
7890,
14,
16,
3535,
38,
13,
79,
9945,
41707,
81,
... | 2.63369 | 748 |
from rest_framework import serializers
from blog.models import Blog
class BlogSerializer(serializers.ModelSerializer):
"""
Serializer class for Blog Model
"""
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
201,
198,
6738,
4130,
13,
27530,
1330,
14001,
201,
198,
201,
198,
4871,
14001,
32634,
7509,
7,
46911,
11341,
13,
17633,
32634,
7509,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
... | 3.236364 | 55 |
from util.constant import EdgeAttrbutes, EdgeType, NodeAttributes
from util.utils import get_api_qualified_name_from_entity_id
from .common import api_url_match, camel_case_split, longest_common_subsequence
from ..concept_map.common import get_latest_concept_map
from elasticsearch import Elasticsearch
from util.config import Elasticsearch_host, Elasticsearch_port
from util.constant import high_level_node_types
import networkx as nx
import re
concept_map = get_latest_concept_map()
Ntype_attributes = nx.get_node_attributes(concept_map, 'Ntype')
href_attributes = nx.get_node_attributes(concept_map, 'local_href')
api_entities = [
node for node in concept_map if node in Ntype_attributes and node in href_attributes]
es = Elasticsearch(hosts='localhost', port=9200)
splitters = set(
[
",",
".",
"/",
";",
"'",
"`",
"\\",
"[",
"]",
"<",
">",
"?",
":",
'"',
"{",
"}",
"~",
"!",
"@",
"#",
"$",
"%",
"^",
"&",
"(",
")",
"-",
"=",
"_",
"+",
"๏ผ",
"ใ",
"ใ",
"๏ผ",
"โ",
"โ",
"ใ",
"ใ",
"ยท",
"๏ผ",
"โฆ",
"๏ผ",
"๏ผ",
])
def get_gt_candidate(ground_truth_url: str):
'''
ๆ นๆฎSOๅธๅญไธญๅคฉ็ถ็ground-truth๏ผๆ็ดขๅพๅพๅฐๅฏนๅบ็้พๆฅๅฎไฝ
## parameter
`ground_truth_url` : mentionๅฏนๅบ็่ถ
้พๆฅurl
'''
global api_entities
global href_attributes
for entity in api_entities:
if api_url_match(href_attributes[entity], ground_truth_url):
return entity
return None
def simple_candidate_selector(mention: str):
'''
็ฎๅ็ๅ้ๅฎไฝๆฅๆพๅจ๏ผๆฅๆพ่งๅ๏ผ
```
ๅฎไฝ็ๆญฃๅผๅๅญๅmention็ๆ้ฟๅ
ฌๅ
ฑๅญๅบๅ้ฟๅบฆ่พพๅฐmention้ฟๅบฆ็40%ไปฅไธ
```
ๅฎ้ช่ฏๆ่ฟ็ฉๆๅคชๆ
ขไบ
'''
global api_entities
select_threshold = 0.3
for entity in api_entities:
if longest_common_subsequence(entity, mention) >= select_threshold * len(mention):
yield entity
def substring_candidate_selector(mention: str):
'''
ๆ นๆฎๅญไธฒๅบ็ฐ็ๅ้ๅฎไฝๆฅๆพๅจ๏ผๆฅๆพ่งๅ๏ผ
```
mentionไธญ็ไธไธชๅ่ฏๅบ็ฐๅจไบๅฎไฝๅๅญไธญ
```
'''
global api_entities
splitter_pattern = r'[;,\.\s/_\-\(\)\[\]\{\}#]'
temp_tokens = re.split(splitter_pattern, mention)
tokens = []
for token in temp_tokens:
tokens.extend(camel_case_split(token))
for entity in api_entities:
if any([token for token in tokens if token in entity.lower()]):
yield entity
def match_even_one_token(mention: str, entity_name: str):
'''
ๅคๆญmentionไธญๆฏไธๆฏ่ณๅฐๆไธไธชtokenๆฏๅๅฎไฝๅๅฏนๅบ็
ๅ ไธบๅๅงๆฐๆฎไธญๅบ็ฐไบไธไบๅ็็จไปไนmethodไฝๆฏๅผ็จๅดๆฏ็ฑป็ๅผ็จ็ๆ
ๅต๏ผๆไปฅ็จ่ฟไธชไฝฟๅพๆฐๆฎ้ๆดๅ ๆด้ฝไธไบ
'''
tokens = [t.lower() for t in tokenize(mention)]
name = get_api_qualified_name_from_entity_id(entity_name).lower()
return any([t for t in tokens if t in name])
def search_for_possible_ground_truth_entity(mention: str, orignal_ground_truth_entity: str):
'''
ไธๅผๅงๆฝๅๅบ็NEL ground_truthๅฏ่ฝไธ้ ่ฐฑ
ๅฏ่ฝๆ้พๆฅๆฏ็ฑปไฝๆฏmentionๆฏ็ฑปไธญๆนๆณ็ๆ
ๅต
ๆไปฅๅฆๆgrount truthๅน้
ๅคฑ่ดฅๅฐฑ่ฏ็ไธไป็ๆๅ้ๅฏปๆพๆฐ็ground truth
'''
succes = list(concept_map.adj[orignal_ground_truth_entity])
for entity in succes:
if concept_map[orignal_ground_truth_entity][entity][EdgeAttrbutes.Etype] == EdgeType.INCLUDE and match_even_one_token(mention, entity):
return entity
return None
def es_candidate_selector(mention: str):
'''
ๅบไบelastic searchๆจก็ณๅน้
็ๅ้ๅฎไฝๆฅๆพๅจ
'''
mention_tokens = tokenize(mention)
search_term = ' '.join(mention_tokens).lower()
res = es_search(search_term, 'name', 'auto')
for candidate in res:
yield candidate
res = es_search(search_term, 'description', 'auto')
for candidate in res:
yield candidate
res = []
for token in mention_tokens:
if any([s for s in splitters if s in token]):
continue
res.extend(es_wildcard_search(token, 'name'))
for candidate in res:
yield candidate
for token in mention_tokens:
if any([s for s in splitters if s in token]):
continue
res.extend(es_wildcard_search(token, 'description'))
for candidate in res:
yield candidate
return res
def es_candidate_strict_selector(mention: str):
'''
ๅบไบelastic searchไธฅๆ ผๅน้
็ๅ้ๅฎไฝๆฅๆพๅจ
## 2021.4.14ๅชๅจAPI็ๅๅญไธญๅcandidate็ๆ็ดข๏ผ่ไธๆญคๆถไฝฟ็จ็elasticsearchๆฏ็ฑZAMPATHA 1.1็ๆ็๏ผAPIๅๆฏ้ๅธธ็ญ็๏ผๅชๅ
ๅซๅฟ
่ฆไฟกๆฏ
'''
mention_tokens = tokenize(mention)
search_term = ' '.join(mention_tokens).lower()
res = es_search(search_term, 'name')
for candidate in res:
yield candidate
| [
6738,
7736,
13,
9979,
415,
1330,
13113,
8086,
26145,
1769,
11,
13113,
6030,
11,
19081,
29021,
198,
6738,
7736,
13,
26791,
1330,
651,
62,
15042,
62,
22557,
62,
3672,
62,
6738,
62,
26858,
62,
312,
198,
6738,
764,
11321,
1330,
40391,
62,... | 1.788825 | 2,595 |
#! /usr/bin/env python
# coding: utf-8
# This is pos controller for like-car robot
import math
import numpy as np
import rospy
import tf
import tf2_ros
import sensor_msgs.point_cloud2 as pc2
import laser_geometry.laser_geometry as lg
from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud
from geometry_msgs.msg import Twist, Pose, PoseStamped
from sensor_msgs.msg import PointCloud2, LaserScan
from rc_bringup.cfg import PoseControllerConfig
from dynamic_reconfigure.server import Server
from pid_params_saver import YamlParams
#value
velocity = float()
cmd_vel_msg = Twist()
current_pose = Pose()
current_course = float()
goal_pose = Pose()
goal_pose_msg = Pose()
goal_new=Pose()
init_flag = False
max_vel = 1.1 # m/s
min_vel = -1.5 # m/s
max_angle = 25
finish_flag = True
goal_tolerance = 0.5
dist=0.0
#topics
cmd_vel_topic = "/cmd_vel"
vel_topic = "/mavros/local_position/velocity"
goal_topic = "/goal"
pose_topic = "/mavros/local_position/pose"
lidar_topic = "/scan"
point_cloud2_topic="/laserPointCLoud"
target_frame='odom'
source_frame='map'
#cfg_values
pps = YamlParams()
init_server = False
#PointCloud
max_dist_lidar=1.5
sonar_data = list()
#point_cloud = PointCloud2()
pc_msg = PointCloud2()
lp = lg.LaserProjection()
#PID
kP_pose=float
kI_pose=float
kD_pose=float
kP_course=float
kI_course=float
kD_course=float
#reg_functions
v_des=0.0
Ev=0.0
Erot=0.0
upper_limit_of_ki_sum=0.0
lower_limit_of_ki_sum=0.0
u_v=0.0
u_rot=0.0
plot_x=[0]
plot_y=[0]
v=0.0
sumErot=0
sumEv=0
distance=list()
#obs_xy=[list(),list()]
Obs_xy=list()
lid_and_vec=list()
lid_ang_vec_new=list()
phi_new_vec=list()
phi_vec=list()
lidar_arr=list()
x_matrix=list()
y_matrix=list()
nearest_point= list()
i=0
j=0
k=0
Fatt=list()
yn=list()
xn=list()
xn_new=list()
yn_new=list()
phi_new_x=list()
lid_new_x=list()
phi_new_y=list()
lid_new_y=list()
range_dia=None
goal_new_p=list()
step_1=1
aproximation_point=None
point_cloud2=PointCloud2()
#trap function for velocity
#rotatin servo regulator
#velocity motor regulator
def get_distance_to(a,b):
"""
get distance to goal point
:type a: Pose
:type b: Pose
:type dist: float
:param a: current pose
:param b: goal pose
:param dist: distance
:return: dist
"""
pos = np.array([[b.position.x - a.position.x],
[b.position.y - a.position.y]])
dist = np.linalg.norm(pos)
return dist
if __name__ == "__main__":
# init ros node
rospy.init_node('rc_pos_controller', anonymous=True)
rate = rospy.Rate(10) # 10hz
# init dynamic reconfigure server
cfg_srv = Server(PoseControllerConfig, cfg_callback)
set_server_value(cfg_srv)
# Get ros args
if rospy.has_param('~vel_topic'):
vel_topic = rospy.get_param('~vel_topic', vel_topic)
if rospy.has_param('~cmd_vel'):
cmd_vel_topic = rospy.get_param('~cmd_vel', cmd_vel_topic)
if rospy.has_param('~goal_topic'):
goal_topic = rospy.get_param('~goal_topic', goal_topic)
if rospy.has_param('~pose_topic'):
pose_topic = rospy.get_param('~pose_topic', pose_topic)
if rospy.has_param('~max_vel'):
max_vel = rospy.get_param('~max_vel', max_vel)
cfg_srv.update_configuration({"max_vel": max_vel})
if rospy.has_param('~min_vel'):
min_vel = rospy.get_param('~min_vel', min_vel)
cfg_srv.update_configuration({"min_vel": min_vel})
if rospy.has_param('~max_angle'):
max_angle = rospy.get_param('~max_angle', max_angle)
cfg_srv.update_configuration({"max_angle": max_angle})
## PID params
if rospy.has_param('~kP_pose'): #pose means vel in case of repeatativenes but just now
kP_pose = rospy.get_param('~kP_pose', kP_pose)
cfg_srv.update_configuration({"kP_pose": kP_pose})
if rospy.has_param('~kI_pose'):
kI_pose = rospy.get_param('~kI_pose', kI_pose)
cfg_srv.update_configuration({"kI_pose": kI_pose})
if rospy.has_param('~kD_pose'):
kD_pose = rospy.get_param('~kD_pose', kD_pose)
cfg_srv.update_configuration({"kD_pose": kD_pose})
if rospy.has_param('~kP_course'):
kP_course = rospy.get_param('~kP_course', kP_course)
cfg_srv.update_configuration({"kP_course": kP_course})
if rospy.has_param('~kI_course'):
kI_course = rospy.get_param('~kI_course', kI_course)
cfg_srv.update_configuration({"kI_course": kI_course})
if rospy.has_param('~kD_course'):
kD_course = rospy.get_param('~kD_course', kD_course)
cfg_srv.update_configuration({"kD_course": kD_course})
# start subscriber
#rospy.Subscriber(vel_topic, TwistStamped, vel_clb)
rospy.Subscriber(goal_topic, PoseStamped, goal_clb)
rospy.Subscriber(pose_topic, PoseStamped, current_pose_clb)
rospy.Subscriber(lidar_topic, LaserScan, laser_scan_clb)
vec_pub = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
#new_goal_pub = rospy.Publisher(goal_topic, Pose, queue_size=10)
listener = tf.TransformListener()
old_ros_time = rospy.get_time()
currentTime = 0.0
rate.sleep()
try:
while not rospy.is_shutdown():
dt = rospy.get_time() - old_ros_time
currentTime += dt
if(not init_flag):
if currentTime > 1.0:
print("pose controller: not init")
currentTime = 0.0
continue
old_ros_time = rospy.get_time()
cmd_vel_msg = main()
# goal_pose_msg = plan_virtual_fields()
step_1=step_1+1
if finish_flag:
if currentTime > 1.0:
print("pose controller: finish_flag True")
currentTime = 0.0
cmd_vel_msg.linear.x = 0.0
init_flag = False
vec_pub.publish(cmd_vel_msg)
#new_goal_pub.publish(goal_pose_msg) # publish msgs to the robot
rate.sleep()
except KeyboardInterrupt: # if put ctr+c
exit(0)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
770,
318,
1426,
10444,
329,
588,
12,
7718,
9379,
628,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
686,
2777,
8... | 2.115641 | 2,845 |
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from resource_tracker.filters.resource_pool_filter import ResourcePoolFilter
from resource_tracker.forms import ResourcePoolForm, ResourcePoolAttributeDefinitionForm
from resource_tracker.models import ResourcePool, ResourcePoolAttributeDefinition
from resource_tracker.tables.resource_pool_attribute_definition_table import ResourcePoolAttributeDefinitionTable
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
@user_passes_test(lambda u: u.is_superuser)
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
2836,
62,
6603,
274,
62,
9288,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208... | 2.996942 | 327 |
import sys
import serial
import pprint
import time
import enum
import queue
from queue import Queue
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt, QMutex
'''
for char in self.serialport.read():
self.line.append(chr(char))
if chr(char) == '\n':
self.rec_data = "".join(self.line)
self.line.clear()
self.signal_pass_encoder.emit(self.rec_data)
#print(self.rec_data)
'''
'''
class WorkerThread(QObject):
signal = Signal(str)
def __init__(self, serialPort, codegen, commandque:Queue):
self.serialPort = serialPort
self.codegen = codegen
self.commandque = commandque
self.codelist = self.codegen.gcodestr.splitlines()
self.linecount = len(self.codelist)
self.flagexit = False
self.flagStop = False
super().__init__()
self.respondQue = Queue()
def Stop(self):
self.flagStop = True
def Resume(self):
self.flagStop = False
def updateGcode(self, codegen):
self.codegen = codegen
self.codelist = self.codegen.gcodestr.splitlines()
@Slot()
def run(self):
lst = []
while 1:
if self.flagStop:
time.sleep(1)
if self.respondQue.qsize() <= 0:
self.respondQue.put("stopped")
continue
if self.commandque.qsize() > 0:
if self.commandque.get() == "exit":
self.flagexit = True
break
try:
for line in self.codelist:
self.serialPort.write((str(line)+"\r\n").encode('utf-8'))
time.sleep(0.1)
in_waiting = self.serialPort.in_waiting
while in_waiting == 0:
time.sleep(0.5) #1
in_waiting = self.serialPort.in_waiting
jMessage = ""
while "ok" not in jMessage:
while self.serialPort.in_waiting:
lst = self.serialPort.readlines()
for itm in lst:
jMessage += itm.decode('ascii')
self.signal.emit(str(line) + " - " + jMessage)
except serial.SerialException as ex:
print("Error In SerialException" + str(ex))
'''
| [
11748,
25064,
198,
11748,
11389,
198,
11748,
279,
4798,
198,
11748,
640,
198,
11748,
33829,
198,
11748,
16834,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
26672,
3672,
11,
2352,
6978,
198,
6738,
10662,
... | 1.816068 | 1,419 |
"""The module for handling users."""
import admin
from appengine_config import JINJA_ENVIRONMENT
from ast import literal_eval
import base64
from config import PATHS
from datastore import DomainVerification
from datastore import ProxyServer
from datastore import User
from error_handlers import Handle500
from googleapiclient import errors
from google_directory_service import GoogleDirectoryService
import json
import random
import webapp2
import xsrf
def _GenerateUserPayload(users):
"""Generate the user payload data for all users.
I could just pass through all the user's properties here, but that
would expose the private key we have in the datastore along with
various other user data, so I'm explicitly limiting what we show to
an email and key for modifying values.
Args:
users: A list of users with associated properties from the datastore.
Returns:
user_token_payloads: A dictionary with user key id as key and email
as a value.
"""
user_token_payloads = {}
for user in users:
user_token_payloads[user.key.urlsafe()] = user.email
return user_token_payloads
def _MakeInviteCode(user):
"""Create an invite code for the given user.
The invite code is a format created by the uproxy team.
Below is an example of an unencoded invite code for a cloud instance:
{
"networkName": "Cloud",
"networkData": "{
\"host\":\"178.62.123.172\",
\"user\":\"giver\",
\"key\":\"base64_key"
}"
}
It includes the host ip (of the proxy server or load balancer) to connect
the user to, the user username (user's email) to connect with, and
the credential (private key) necessary to authenticate with the host.
TODO: Guard against any future breakage when the invite code format
is changed again. Possibly adding a test on the uproxy-lib side
to fail and point to updating this here.
Args:
user: A user from the datastore to generate an invite code for.
Returns:
invite_code: A base64 encoded dictionary of host, user, and pass which
correspond to the proxy server/load balancer's ip, the user's email, and
the user's private key, respectively. See example above.
"""
invite_code_data = {
'networkName': 'Cloud',
'networkData': {}
}
invite_code_data['networkData']['host'] = _GetInviteCodeIp()
invite_code_data['networkData']['user'] = user.email
invite_code_data['networkData']['pass'] = user.private_key
json_data = json.dumps(invite_code_data)
invite_code = base64.urlsafe_b64encode(json_data)
return invite_code
def _GetInviteCodeIp():
"""Get the ip address for placing in the invite code.
Eventually this method will actually get the load balancer's ip as we will
want in the final version. For now, it is used as a simple stub to just pick
a random proxy server's ip.
Returns:
ip_address: An ip address for an invite code.
"""
proxy_servers = ProxyServer.GetAll()
index = random.randint(0, len(proxy_servers) - 1)
return proxy_servers[index].ip_address
def _RenderUserListTemplate():
"""Render a list of users."""
users = User.GetAll()
user_payloads = _GenerateUserPayload(users)
template_values = {
'user_payloads': user_payloads
}
template = JINJA_ENVIRONMENT.get_template('templates/user.html')
return template.render(template_values)
def _RenderLandingTemplate():
"""Render the default landing page."""
template_values = {
'site_verification_content': DomainVerification.GetOrInsertDefault().content,
}
template = JINJA_ENVIRONMENT.get_template('templates/landing.html')
return template.render(template_values)
def _RenderAddUsersTemplate(directory_users, error=None):
"""Render a user add page that lets users be added by group key."""
template_values = {
'directory_users': directory_users,
}
if error is not None:
template_values['error'] = error
template = JINJA_ENVIRONMENT.get_template('templates/add_user.html')
return template.render(template_values)
def _RenderUserDetailsTemplate(user, invite_code=None):
"""Render a user add page that lets users be added by group key."""
template_values = {
'user': user,
'key': user.key.urlsafe(),
}
if invite_code is not None:
template_values['invite_code'] = invite_code
template = JINJA_ENVIRONMENT.get_template('templates/user_details.html')
return template.render(template_values)
class LandingPageHandler(webapp2.RequestHandler):
"""Display the landing page which doesn't require oauth."""
# pylint: disable=too-few-public-methods
def get(self):
"""Output the landing page template."""
self.response.write(_RenderLandingTemplate())
class ListUsersHandler(webapp2.RequestHandler):
"""List the current users."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Output a list of all current users along with some metadata."""
self.response.write(_RenderUserListTemplate())
class DeleteUserHandler(webapp2.RequestHandler):
"""Delete a given user."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Delete the user corresponding to the passed in key.
If we had access to a delete method then we would not use get here.
"""
urlsafe_key = self.request.get('key')
User.DeleteByKey(urlsafe_key)
self.response.write(_RenderUserListTemplate())
class GetInviteCodeHandler(webapp2.RequestHandler):
"""Get an invite code for a given user."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Output a list of all current users along with the requested token."""
urlsafe_key = self.request.get('key')
user = User.GetByKey(urlsafe_key)
invite_code = _MakeInviteCode(user)
self.response.write(_RenderUserDetailsTemplate(user, invite_code))
class GetNewKeyPairHandler(webapp2.RequestHandler):
"""Create a new key pair for a given user."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Find the user matching the specified key and generate a new key pair."""
urlsafe_key = self.request.get('key')
User.UpdateKeyPair(urlsafe_key)
user = User.GetByKey(urlsafe_key)
self.response.write(_RenderUserDetailsTemplate(user))
class AddUsersHandler(webapp2.RequestHandler):
"""Add users into the datastore."""
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Get the form for adding new users.
If get_all is passed in, all users in the domain will be listed to select.
If a group_key is passed in and that group is found, all users in that
group will be listed to select. We do not list groups within groups though.
If neither is passed in, the form is still displayed without any users
listed.
"""
get_all = self.request.get('get_all')
group_key = self.request.get('group_key')
user_key = self.request.get('user_key')
try:
directory_service = GoogleDirectoryService(admin.OAUTH_DECORATOR)
directory_users = []
if get_all:
directory_users = directory_service.GetUsers()
elif group_key is not None and group_key is not '':
directory_users = directory_service.GetUsersByGroupKey(group_key)
elif user_key is not None and user_key is not '':
directory_users = directory_service.GetUserAsList(user_key)
if directory_users != []:
directory_service.WatchUsers('delete')
directory_service.WatchUsers('makeAdmin')
directory_service.WatchUsers('undelete')
directory_service.WatchUsers('update')
self.response.write(_RenderAddUsersTemplate(directory_users))
except errors.HttpError as error:
self.response.write(_RenderAddUsersTemplate([], error))
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
@xsrf.XSRFProtect
def post(self):
"""Add all of the selected users into the datastore."""
manual = self.request.get('manual')
users_to_add = []
if manual:
user_name = self.request.get('user_name')
user_email = self.request.get('user_email')
decoded_user = {}
decoded_user['name'] = {}
decoded_user['name']['fullName'] = user_name
decoded_user['primaryEmail'] = user_email
users_to_add.append(decoded_user)
else:
users = self.request.get_all('selected_user')
for user in users:
decoded_user = literal_eval(user)
users_to_add.append(decoded_user)
User.InsertUsers(users_to_add)
self.redirect(PATHS['user_page_path'])
class ToggleKeyRevokedHandler(webapp2.RequestHandler):
"""Toggle the revoked status on a user's keys in the datastore."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Lookup the user and toggle the revoked status of keys."""
urlsafe_key = self.request.get('key')
User.ToggleKeyRevoked(urlsafe_key)
user = User.GetByKey(urlsafe_key)
self.response.write(_RenderUserDetailsTemplate(user))
class GetUserDetailsHandler(webapp2.RequestHandler):
"""Display a single user with all associated details and actions."""
# pylint: disable=too-few-public-methods
@admin.OAUTH_DECORATOR.oauth_required
@admin.RequireAppOrDomainAdmin
def get(self):
"""Output details based on the user key passed in."""
urlsafe_key = self.request.get('key')
user = User.GetByKey(urlsafe_key)
self.response.write(_RenderUserDetailsTemplate(user))
APP = webapp2.WSGIApplication([
(PATHS['landing_page_path'], LandingPageHandler),
(PATHS['user_page_path'], ListUsersHandler),
(PATHS['user_delete_path'], DeleteUserHandler),
(PATHS['user_get_invite_code_path'], GetInviteCodeHandler),
(PATHS['user_get_new_key_pair_path'], GetNewKeyPairHandler),
(PATHS['user_add_path'], AddUsersHandler),
(PATHS['user_toggle_revoked_path'], ToggleKeyRevokedHandler),
(PATHS['user_details_path'], GetUserDetailsHandler),
(admin.OAUTH_DECORATOR.callback_path,
admin.OAUTH_DECORATOR.callback_handler()),
], debug=True)
# This is the only way to catch exceptions from the oauth decorators.
APP.error_handlers[500] = Handle500
| [
37811,
464,
8265,
329,
9041,
2985,
526,
15931,
198,
198,
11748,
13169,
198,
6738,
598,
18392,
62,
11250,
1330,
449,
1268,
37048,
62,
1677,
53,
4663,
1340,
10979,
198,
6738,
6468,
1330,
18875,
62,
18206,
198,
11748,
2779,
2414,
198,
6738... | 2.974155 | 3,521 |
from copy import deepcopy
from hypothesis import strategies as st
from hypothesis import given
import ast
import astor
import renderer
import actions
import core_logic
# TODO: Make a directory with a bunch of kwnow valid .py files
# And test with them all
# I'm assuming dummy.py isn't messed up
tree = ast.parse(open("dummy.py", "r").read())
list_of_action_names_strategy = st.lists(
st.sampled_from(list(actions.actions))
)
@given(st.builds(get_copied_tree), list_of_action_names_strategy)
if __name__ == "__main__":
action_sequence_keeps_ast_valid()
| [
6738,
4866,
1330,
2769,
30073,
198,
6738,
14078,
1330,
10064,
355,
336,
198,
6738,
14078,
1330,
1813,
198,
11748,
6468,
198,
11748,
6468,
273,
198,
198,
11748,
9851,
11882,
198,
11748,
4028,
198,
11748,
4755,
62,
6404,
291,
198,
198,
2,... | 2.798122 | 213 |
# -*- coding: utf-8 -*-
import json
import os
import operator as op
from jsonschema import Draft4Validator
from .db import engine
schema_name = "default_event_schema.json"
schema_path = os.path.join(os.path.dirname(__file__), schema_name)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
10088,
355,
1034,
198,
6738,
44804,
684,
2395,
2611,
1330,
13650,
19,
47139,
1352,
198,
198,
6738,
764,
9945,
1330,
3113,... | 2.741573 | 89 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 11:27
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
20,
319,
1584,
12,
2998,
12,
1485,
1367,
25,
1983,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
"""
Module with testing utilities.
"""
import numpy
from scipy import sparse
def orthogonal_linear_chain(
nsites=100,
contact_size=20,
coupling=1.0,
onsite=0.0):
"""
Build the hamiltonian of an orthogonal nearest neighbor
linear chain with the correct arrangement for the contacts.
Note that this simple function goes through a dense matrix and
it is not suitable for very large matrices.
Args:
nsites (int): the number of sites.
coupling (complex): the hopping matrix element.
onsite (float): the hopping matrix element.
"""
if contact_size >= nsites / 2:
raise ValueError("Contacts are too large")
if contact_size < 2 and contact_size % 2 != 0:
raise ValueError("Contacts must have 2 principal layers or multiples.")
mat = numpy.zeros(shape=(nsites, nsites), dtype='complex128')
for i in range(nsites - contact_size):
mat[i - 1, i] = coupling
for i in range(nsites - contact_size, nsites):
mat[i - 1, i] = coupling
mat[0, nsites - contact_size] = coupling
numpy.fill_diagonal(mat, onsite / 2.0)
mat_csr = sparse.csr_matrix(mat)
mat_csr = mat_csr + mat_csr.getH()
mat_csr.sort_indices()
return mat_csr
def orthogonal_square_2d_lattice(
nblocks=10,
block_size=10,
n_contact_blocks=1,
coupling=1.0):
"""
Build a nearest neighbor hamiltonian for a 2d square lattice with contacts
properly arranged. The modeled lattice is:
...*--*--*...
| | |
...*--*--*...
| | |
...*--*--*...
The resulting block hamiltonian is:
0 t t
t 0 t t
t 0 t
t 0 t
t t 0 t
t t 0
"""
if n_contact_blocks < 2 and n_contact_blocks % 2 != 0:
raise ValueError("Contacts must have 2 principal layers or multiples.")
shape = (block_size, block_size)
onsite_block = numpy.zeros(shape=shape, dtype='complex128')
hopping_block = numpy.zeros(shape=shape, dtype='complex128')
for i in range(block_size - 1):
onsite_block[i, i + 1] = coupling
for i in range(block_size):
hopping_block[i, i] = coupling
norbitals = nblocks * block_size
mat = numpy.zeros(shape=(norbitals, norbitals), dtype='complex128')
# Onsite blocks (upper hamiltonian).
for i in range(nblocks):
mat[
i * block_size: (i + 1) * block_size,
i * block_size: (i + 1) * block_size] = onsite_block
# Hopping blocks until second contact.
for i in range(nblocks - n_contact_blocks - 1):
mat[
i * block_size: (i + 1) * block_size,
(i + 1) * block_size: (i + 2) * block_size] = hopping_block
# Second contact.
left_contact_index = (nblocks - n_contact_blocks) * block_size
mat[
left_contact_index: left_contact_index + block_size,
0: block_size] = hopping_block
for i in range(nblocks - n_contact_blocks + 1, nblocks):
mat[
(i - 1) * block_size: i * block_size,
i * block_size: (i + 1) * block_size] = hopping_block
mat_csr = sparse.csr_matrix(mat)
mat_csr = mat_csr + mat_csr.conjugate(copy=True).transpose()
mat_csr.sort_indices()
return mat_csr
| [
37811,
198,
26796,
351,
4856,
20081,
13,
198,
37811,
198,
11748,
299,
32152,
198,
6738,
629,
541,
88,
1330,
29877,
628,
198,
4299,
29617,
519,
20996,
62,
29127,
62,
7983,
7,
198,
220,
220,
220,
220,
220,
220,
220,
36545,
2737,
28,
3... | 2.332624 | 1,410 |
# -*- coding: utf-8 -*-
"""
Created by susy at 2020/1/27
"""
from utils import singleton, get_payload_from_token, get_now_ts, decrypt_user_id, log as logger
from utils.constant import USER_TYPE
from tornado.web import RequestHandler
@singleton
@singleton
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
416,
2341,
88,
379,
12131,
14,
16,
14,
1983,
198,
37811,
198,
6738,
3384,
4487,
1330,
2060,
1122,
11,
651,
62,
15577,
2220,
62,
6738,
62,
30001,
11,... | 2.827957 | 93 |
from textwrap import dedent
from nose.tools import assert_equal # @UnresolvedImport
from tests import check_as_expected, get_repeated_lines, get_actual_result
from superhelp.helpers.func_help import count_args
excess_args = ', '.join(['arg' + str(i) for i in range(100)])
ROOT = 'superhelp.helpers.func_help.'
def test_arg_count():
"""
Testing positional-only arguments so Python 3.8+ only
"""
xpath = 'descendant-or-self::FunctionDef'
inc_posonly_func_snippet = dedent("""\
def multifunc(posonly_arg1=1, posonly_arg2=[], /,
arg1=2, arg2=3, arg3=[], *, kwonly_arg1={}):
pass
""")
func_snippet1 = dedent("""\
def multifunc(arg1=2, arg2=3, arg3=[], *, kwonly_arg1={}):
pass
""")
func_snippet2 = dedent("""\
def multifunc(arg1, arg2, arg3, arg4, arg5, arg6, arg7):
pass
""")
tests = [
(inc_posonly_func_snippet, count_args, 6),
(func_snippet1, count_args, 4),
(func_snippet2, count_args, 7),
]
for snippet, test_func, expected_result in tests:
actual_result = get_actual_result(snippet, xpath, test_func)
assert_equal(expected_result, actual_result)
# test_misc()
# test_arg_count()
| [
6738,
2420,
37150,
1330,
4648,
298,
198,
198,
6738,
9686,
13,
31391,
1330,
6818,
62,
40496,
220,
1303,
2488,
3118,
411,
5634,
20939,
198,
198,
6738,
5254,
1330,
2198,
62,
292,
62,
40319,
11,
651,
62,
45956,
515,
62,
6615,
11,
651,
6... | 2.26151 | 543 |
########################################################################
# test/xslt/test_attribute.py
from amara.writers import WriterError, xmlwriter
from amara.xslt import XsltError
PREFIX_TEMPLATE = xmlwriter.xmlwriter.GENERATED_PREFIX
from xslt_support import _run_xml
SOURCE_XML = """<?xml version="1.0"?><dummy/>"""
def test_attribute_1():
"""`xsl:attribute` as child of literal result element"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>""")
def test_attribute_2():
"""`xsl:attribute` as child of literal result element"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>""")
def test_attribute_3():
"""`xsl:attribute` with namespace"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo" namespace="http://example.com/spam">bar</xsl:attribute>
<xsl:attribute name="y:foo" namespace="http://example.com/eggs">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(prefix0)s="http://example.com/spam" xmlns:y="http://example.com/eggs" %(prefix0)s:foo="bar" y:foo="bar"/>""" % {
'prefix0' : PREFIX_TEMPLATE % 0})
def test_attribute_4():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">bar</xsl:attribute>
<xsl:attribute name="foo">baz</xsl:attribute>
<xsl:attribute name="foo">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="maz"/>""")
def test_attribute_5():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result foo="bar">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>""")
def test_attribute_6():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
<!-- duplicate attrs override previous -->
<!-- we use xsl:if to obscure it a bit -->
<xsl:if test="true()">
<xsl:attribute name="foo">baz</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
)
def test_attribute_7():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="x:foo" xmlns:x="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:org.4suite.4xslt.ns0="http://some-ns/" org.4suite.4xslt.ns0:foo="baz"/>"""
)
def test_attribute_8():
"""adding attributes with the same expanded-name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result x:foo="bar" xmlns:x="http://some-ns/">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:x="http://some-ns/" x:foo="baz"/>"""
)
def test_attribute_9():
"""serialization of linefeed in attribute value"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- linefeed must be serialized as -->
<xsl:attribute name="a">x
y</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result a="x y"/>"""
)
def test_attribute_10():
"""substitution of xmlns prefix in attribute name"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- if an attribute prefix would be xmlns, it must be changed to something else -->
<xsl:attribute name="xmlns:foo" namespace="http://example.com/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(prefix0)s="http://example.com/" %(prefix0)s:foo="bar"/>""" % {
'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_11():
"""attributes in various namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<result>
<!-- correct results are indicated in the attribute values -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
<xsl:attribute name="pre:foo" xmlns:pre="http://ns-for-pre/">local-name foo, namespace http://ns-for-pre/, preferred prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:pre="http://ns-for-pre/" xmlns:%(prefix0)s="http://foo-ns/" xmlns:%(prefix1)s="http://explicit-ns/" %(prefix1)s:bar="local-name bar, namespace http://explicit-ns/, generated prefix" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" pre:foo="local-name foo, namespace http://ns-for-pre/, preferred prefix pre" %(prefix0)s:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>""" % {'prefix0': PREFIX_TEMPLATE % 0,
'prefix1': PREFIX_TEMPLATE % 1}
)
def test_attribute_12():
"""attributes in empty and in-scope default namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- the element should be in the http://foo-ns/ namespace. -->
<!-- the element *may*, but most likely won't, bear the same generated prefix as the in-foo-ns attribute. -->
<result xmlns="http://foo-ns/">
<!-- A default namespace is in scope, but this does not affect the value of 'name' in xsl:attribute. -->
<!-- in-foo-ns attribute does not inherit the default namespace. It *must* have a prefix, bound to http://foo-ns/ -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns="http://foo-ns/" xmlns:%(prefix0)s="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" %(prefix0)s:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>""" % {
'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_13():
"""attributes in empty and in-scope non-default namespaces"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<foo:result xmlns:foo="http://foo-ns/">
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix foo</xsl:attribute>
</foo:result>
</xsl:template>
</xsl:stylesheet>
""",
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the foo.
expected = """<?xml version="1.0" encoding="UTF-8"?>
<foo:result xmlns:foo="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" foo:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix foo"/>"""
)
def test_attribute_14():
"""attributes using in-scope namespaces and duplicate prefixes"""
_run_xml(
source_xml = SOURCE_XML,
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<pre:result xmlns:pre="http://foo-ns/">
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</pre:result>
</xsl:template>
</xsl:stylesheet>
""",
# the bar attribute must have a generated prefix.
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the pre.
expected = """<?xml version="1.0" encoding="UTF-8"?>
<pre:result xmlns:pre="http://foo-ns/" xmlns:%(prefix0)s="http://explicit-ns/" pre:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix pre" %(prefix0)s:bar="local-name bar, namespace http://explicit-ns/, generated prefix"/>""" % {'prefix0': PREFIX_TEMPLATE % 0}
)
def test_attribute_error_1():
"""adding attribute ater non-attributes"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:text>Hello World</xsl:text>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except WriterError, err:
assert err.code == WriterError.ATTRIBUTE_ADDED_TOO_LATE
else:
raise AssertionError("should have failed!")
def test_attribute_error_2():
"""adding attribute to non-element"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<xsl:attribute name="foo">bar</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except WriterError, err:
assert err.code == WriterError.ATTRIBUTE_ADDED_TO_NON_ELEMENT
else:
raise AssertionError("should have failed!")
def test_attribute_error_3():
"""creating non-text during xsl:attribute instantiation"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<xsl:attribute name="foo">
<xsl:comment>no-no</xsl:comment>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.NONTEXT_IN_ATTRIBUTE
else:
raise AssertionError("should have failed!")
def test_attribute_error_4():
"""illegal attribute name ("xmlns")"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="xmlns">http://example.com/</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.BAD_ATTRIBUTE_NAME
else:
raise AssertionError("should have failed!")
def test_attribute_error_5():
"""illegal attribute name (non-QName)"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="#invalid">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.INVALID_QNAME_ATTR
else:
raise AssertionError("should have failed!")
def test_attribute_error_6():
"""illegal namespace-uri"""
try:
_run_xml(
source_xml = """<?xml version="1.0"?><dummy/>""",
transform_xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match='/'>
<result>
<xsl:attribute name="foo" namespace="http://www.w3.org/XML/1998/namespace">bar</xsl:attribute>
<xsl:attribute name="spam" namespace="http://www.w3.org/2000/xmlns/">eggs</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>
""",
expected = None)
except XsltError, err:
assert err.code == XsltError.INVALID_NS_URIREF_ATTR
else:
raise AssertionError("should have failed!")
if __name__ == '__main__':
raise SystemExit("use nosetests")
| [
29113,
29113,
7804,
198,
2,
1332,
14,
34223,
2528,
14,
9288,
62,
42348,
13,
9078,
198,
6738,
716,
3301,
13,
34422,
1330,
26606,
12331,
11,
35555,
16002,
198,
6738,
716,
3301,
13,
34223,
2528,
1330,
1395,
82,
2528,
12331,
198,
198,
47,... | 2.432865 | 6,755 |
import numpy as np
# Add small Gaussian noise to particles to reinvigorate them
| [
11748,
299,
32152,
355,
45941,
198,
198,
2,
3060,
1402,
12822,
31562,
7838,
284,
13166,
284,
6865,
85,
328,
16262,
606,
628,
198
] | 3.608696 | 23 |
from abc import ABC, abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628
] | 4.111111 | 9 |
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing psping installation and cleanup functions.
psping is a tool made for benchmarking Windows networking.
"""
import json
import ntpath
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
TEST_PORT = 5000
PSPING_OUTPUT_FILE = 'psping_out'
PSPING_DIR = 'PSTools'
PSPING_ZIP = PSPING_DIR + '.zip'
PSPING_URL = 'https://download.sysinternals.com/files/' + PSPING_ZIP
flags.DEFINE_integer('psping_packet_size', 1,
'The size of the packet to test the ping with.')
flags.DEFINE_integer('psping_bucket_count', 100,
'For the results histogram, number of columns')
flags.DEFINE_integer('psping_rr_count', 1000,
'The number of pings to attempt')
flags.DEFINE_integer('psping_timeout', 10,
'The time to allow psping to run')
def Install(vm):
"""Installs the psping package on the VM."""
zip_path = ntpath.join(vm.temp_dir, PSPING_ZIP)
vm.DownloadFile(PSPING_URL, zip_path)
vm.UnzipFile(zip_path, vm.temp_dir)
@vm_util.Retry(max_retries=3)
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True):
"""Run the psping latency test.
Uses a TCP request-response time to measure latency.
Args:
sending_vm: the vm to send the tcp request.
receiving_vm: the vm acting as the server.
use_internal_ip: whether or not to use the private IP or the public IP.
Returns:
list of samples representing latency between the two VMs.
"""
server_ip = (receiving_vm.internal_ip if use_internal_ip
else receiving_vm.ip_address)
client_command = (
'cd {psping_exec_dir}; '
'sleep 2;' # sleep to make sure the server starts first.
'.\\psping.exe /accepteula -l {packet_size} -i 0 -q '
'-n {rr_count} -h {bucket_count} {ip}:{port}'
' > {out_file}').format(
psping_exec_dir=sending_vm.temp_dir,
packet_size=FLAGS.psping_packet_size,
rr_count=FLAGS.psping_rr_count,
bucket_count=FLAGS.psping_bucket_count,
ip=server_ip,
port=TEST_PORT,
out_file=PSPING_OUTPUT_FILE)
# PSPing does not have a configurable timeout. To get around this, start the
# server as a background job, then kill it after 10 seconds
server_command = (
'{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format(
psping_exec_dir=sending_vm.temp_dir,
port=TEST_PORT)
process_args = [(_RunPsping, (receiving_vm, server_command), {}),
(_RunPsping, (sending_vm, client_command), {})]
background_tasks.RunParallelProcesses(process_args, 200, 1)
cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format(
psping_exec_dir=sending_vm.temp_dir,
out_file=PSPING_OUTPUT_FILE)
output, _ = sending_vm.RemoteCommand(cat_command)
return ParsePspingResults(output, sending_vm, receiving_vm, use_internal_ip)
# example output
# PsPing v2.10 - PsPing - ping, latency, bandwidth measurement utility
# Copyright (C) 2012-2016 Mark Russinovich
# Sysinternals - www.sysinternals.com
#
# TCP latency test connecting to 10.138.0.2:47001: Connected
# 15 iterations (warmup 5) sending 8192 bytes TCP latency test: 0%
# Connected
# 15 iterations (warmup 5) sending 8192 bytes TCP latency test: 100%
#
# TCP roundtrip latency statistics (post warmup):
# Sent = 10, Size = 8192, Total Bytes: 81920,
# Minimum = 0.19ms, Maxiumum = 0.58ms, Average = 0.27ms
#
# Latency\tCount
# 0.30\t688
# 0.51\t292
# 0.71\t15
# 0.92\t2
# 1.13\t0
# 1.33\t2
# 1.54\t0
# 1.75\t0
# 1.95\t0
# 2.16\t1
def ParsePspingResults(results, client_vm, server_vm, internal_ip_used):
"""Turn psping output into a list of samples.
Args:
results: string of the psping output
client_vm: the VM performing the latency test
server_vm: the VM serving the latency test
internal_ip_used: whether or not the private IP was used.
Returns:
list of samples reflecting the psping results
"""
output_list = [val.rstrip('\r') for val in results.split('\n')]
# There should be exactly one line like this.
data_line = [line for line in output_list if 'Minimum' in line][0]
# split the line up by spaces
data_line = [val for val in data_line.split(' ') if val]
minimum = float(data_line[2].rstrip('ms,'))
maximum = float(data_line[5].rstrip('ms,'))
average = float(data_line[8].rstrip('ms,'))
metadata = {
'internal_ip_used': internal_ip_used,
'sending_zone': client_vm.zone,
'sending_machine_type': client_vm.machine_type,
'receiving_zone': server_vm.zone,
'receiving_machine_type': server_vm.machine_type,
}
samples = [
sample.Sample('latency', average, 'ms', metadata),
sample.Sample('latency:maximum', maximum, 'ms', metadata),
sample.Sample('latency:minimum', minimum, 'ms', metadata),
]
histogram = []
index = 1
for line in output_list:
line_data = [val for val in line.split(' ') if val]
# the line should look like ['entry\tvalue']
if len(line_data) is not 1:
continue
entry_data = line_data[0].split('\t')
if len(entry_data) is not 2:
continue
if 'Latency' in entry_data:
continue
# This is a histogram data line
latency = float(entry_data[0])
count = int(entry_data[1])
histogram.append({'latency': latency,
'count': count,
'bucket_number': index})
index += 1
metadata.update({'histogram': json.dumps(histogram)})
samples.append(sample.Sample('latency:histogram', 0, 'ms', metadata))
return samples
| [
2,
15069,
2864,
2448,
69,
20827,
44199,
4102,
263,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.614215 | 2,434 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the ability to #include a file with an absolute path name. (Which
is not strictly a test of using $CPPPATH, but it's in the ball park...)
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.subdir('include', 'work')
inc1_h = test.workpath('include', 'inc1.h')
inc2_h = test.workpath('include', 'inc2.h')
does_not_exist_h = test.workpath('include', 'does_not_exist.h')
# Verify that including an absolute path still works even if they
# double the separators in the input file. This can happen especially
# on Windows if they use \\ to represent an escaped backslash.
inc2_h = inc2_h.replace(os.sep, os.sep+os.sep)
test.write(['work', 'SConstruct'], """\
Program('prog.c')
""")
test.write(['work', 'prog.c'], """\
#include <stdio.h>
#include "%(inc1_h)s"
#include "%(inc2_h)s"
#if 0
#include "%(does_not_exist_h)s"
#endif
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("%%s\\n", STRING1);
printf("%%s\\n", STRING2);
return 0;
}
""" % locals())
test.write(['include', 'inc1.h'], """\
#define STRING1 "include/inc1.h A\\n"
""")
test.write(['include', 'inc2.h'], """\
#define STRING2 "include/inc2.h A\\n"
""")
test.run(chdir = 'work', arguments = '.')
test.up_to_date(chdir = 'work', arguments = '.')
test.write(['include', 'inc1.h'], """\
#define STRING1 "include/inc1.h B\\n"
""")
test.not_up_to_date(chdir = 'work', arguments = '.')
test.write(['include', 'inc2.h'], """\
#define STRING2 "include/inc2.h B\\n"
""")
test.not_up_to_date(chdir = 'work', arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
11593,
34,
3185,
38162,
9947,
834,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
198,
2,
257,
4866,
286,
428,
3788,
290,
... | 2.812067 | 1,011 |
#gui Uses python 2, else uses python 3
import Tkinter as tk
import time
if __name__ == '__main__':
main() | [
2,
48317,
36965,
21015,
362,
11,
2073,
3544,
21015,
513,
220,
201,
198,
11748,
309,
74,
3849,
355,
256,
74,
201,
198,
11748,
640,
201,
198,
220,
220,
220,
220,
201,
198,
220,
220,
220,
220,
201,
198,
201,
198,
361,
11593,
3672,
83... | 2.245614 | 57 |
from typing import Any, List # pragma: no cover
from amortization.amount import calculate_amortization_amount # pragma: no cover
from amortization.schedule import amortization_schedule # pragma: no cover
| [
6738,
19720,
1330,
4377,
11,
7343,
220,
1303,
23864,
2611,
25,
645,
3002,
198,
198,
6738,
716,
419,
1634,
13,
17287,
1330,
15284,
62,
321,
419,
1634,
62,
17287,
220,
1303,
23864,
2611,
25,
645,
3002,
198,
6738,
716,
419,
1634,
13,
1... | 3.542373 | 59 |
import gzip, json, sys
import numpy as np
import time
import argparse
import os
import util
from sqlitedict import SqliteDict
'''usage: (goes in shell script)
python3 -u code/create_word_index.py \
-outDir 'betatest/out/'
'''
'''global argparse'''
parser = argparse.ArgumentParser(description='Processing list of files...')
parser.add_argument('-outDir', required=True, help="Directory where all outfiles will be written to. Example: 'out/'")
args = parser.parse_args()
'''global variables'''
data = open(args.outDir+"shapes.txt","r")
shapes = data.read().splitlines()
shapes_dict = {}
for item in shapes:
fname,length = item.split(" ")
shapes_dict[fname]= int(length)
if __name__ == '__main__':
## initialize the amalgamated words dict
words = SqliteDict(args.outDir+"words.db")
running_len = 0
print('---argparser---:')
for arg in vars(args):
print(arg, getattr(args, arg))
words_fnames = sorted([args.outDir+fname for fname in os.listdir(args.outDir) if fname.startswith('words_job')])
print('\nnwords_fnames: ', len(words_fnames))
for fname in words_fnames:
print(fname)
start = time.time()
for words_fname in words_fnames:
amalgamate_start = time.time()
cur_len = shapes_dict[words_fname]
cur_words_dict = SqliteDict(words_fname)
idx_iter = range(running_len, running_len+cur_len)
values_iter = iter(cur_words_dict.values())
words.update([(idx, word_tuple) for idx, word_tuple in zip(idx_iter, values_iter)])
words.commit()
cur_words_dict.close()
running_len += cur_len
cur_amalgamate_time = time.time() - amalgamate_start
print('\nTime elapsed adding file %s to words_dict:\t%s' % (words_fname, time.strftime("%H:%M:%S", time.gmtime(cur_amalgamate_time))))
print_start = time.time()
print('\nlen(words): {}'.format(len(words)))
print_time = time.time() - print_start
words.close()
elapsed_time = time.time() - start
print('\n--stats--')
print('\nTime elapsed creating amalgamated word index:\t%s' % (time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))
| [
11748,
308,
13344,
11,
33918,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
7736,
198,
6738,
44161,
863,
713,
1330,
311,
13976,
578,
35,
713,
198,
198,
7061,
6,
... | 2.453423 | 891 |
#open the file
filename = open("./model8.pdb", "r")
#for each model in the file, we would like
#to create a list which contains the coordinates of the Carbon alfa atoms
import math
biglist= [] #crea una lista che contenga tutto!
a = -1 #faccio questo per poter partire dal primo elemento*
for line in filename: #per ogni riga del file
if "MODEL" in line: #se c'รจ la scritta "model"
a+= 1# ecco*
biglist.append([]) #aggiungi alla grade lista una lista vuota
#in cui inseriremo cose: ora capiamo cosa...
if "CA" in line:
line= line.split()
if "A" in line:
inc= 1
else:
inc= 0
line= line[5+inc:8+inc]
for k in range (len(line)):
line[k]= float(line[k])
biglist[a].append(line)
x=0
for i in range(len(biglist[a])):
#print(biglist[0][i],biglist[1][i])
for j in range (3):
cord1= float(biglist[0][i][j])
cord2= float(biglist[1][i][j])
t=cord1-cord2
t=t**2
x=x+t
rmsd= math.sqrt((x/len(biglist[a])))
print(rmsd) | [
2,
9654,
262,
2393,
198,
34345,
796,
1280,
7,
1911,
14,
19849,
23,
13,
79,
9945,
1600,
366,
81,
4943,
198,
2,
1640,
1123,
2746,
287,
262,
2393,
11,
356,
561,
588,
198,
2,
1462,
2251,
257,
1351,
543,
4909,
262,
22715,
286,
262,
2... | 2.214953 | 428 |
#!/usr/bin/env python
from ftplib import FTP
import argparse, codes3d, configparser, gzip, os, re, requests, subprocess, sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download the default data files for CoDeS3D. These will be downloaded into the default directories specified in the supplied config file (docs/conf.py by default).")
parser.add_argument("-s","--snps",action="store_true",default=False,help="Download dbSNP data (build 146).")
parser.add_argument("-i","--hic",nargs='*',help="Download Hi-C data. If no arguments are given, this will download Hi-C datasets for cell-lines\
GM12878, HeLa, HMEC, HUVEC, IMR90, K562, KBM7, and NHEK. Additionally, any of these can be passed as an argument in a space-separated list, e.g.\
`-i GM12878 NHEK`. NOTE: THIS COMMAND DOWNLOADS HUNDREDS OF GIGABYTES OF DATA, AND MAY RUN FOR A LONG TIME. Consider downloading individual cell lines.")
parser.add_argument("-g","--gene",action="store_true",default=False,help="Download GENCODE gene reference from GTEx portal.")
parser.add_argument("-e","--cis_eqtls",action="store_true",default=False,help="Download cis-eQTLs found in GTEx analysis v6.")
parser.add_argument("-p","--hg19",action="store_true",default=False,help="Download GRCh37.p13 (hg19) build of the human genome.")
parser.add_argument("-a","--all",action="store_true",default=False,help="Download all default data files. NOTE: ALL DATA FILES TOTAL HUNDREDS OF GIGABYTES. It may be safest to \
download default datasets individually in case of failure.")
parser.add_argument("-c","--config_file",default=os.path.join(os.path.dirname(__file__),"../docs/codes3d.conf"),help="The configuration file to use to resolve library directories.")
parser.add_argument("-b","--do_not_build_dbs",action="store_true",default=False,help="Do not build associated databases/indices for downloaded data (default: False).")
parser.add_argument("-t","--do_not_tidy_up",action="store_true",default=False,help="Do not remove intermediate files after building databases (default: False).")
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_file)
if args.all:
args.snps = True
args.hic = True
args.gene = True
args.cis_eqtls = True
args.hg19 = True
if args.snps:
download_snp_data(config,args.do_not_build_dbs,args.do_not_tidy_up)
if args.hic:
download_hic_data(args.hic,config,args.do_not_build_dbs,args.do_not_tidy_up)
if args.gene:
download_gene_reference(config,args.do_not_build_dbs,args.do_not_tidy_up)
if args.cis_eqtls:
download_cis_eqtls(config,args.do_not_build_dbs,args.do_not_tidy_up)
if args.hg19:
download_human_genome(config,args.do_not_build_dbs)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
10117,
489,
571,
1330,
45854,
198,
11748,
1822,
29572,
11,
12416,
18,
67,
11,
4566,
48610,
11,
308,
13344,
11,
28686,
11,
302,
11,
7007,
11,
850,
14681,
11,
25064,
628,
19... | 2.871822 | 944 |
import os
import numpy as np
if __name__ == "__main__":
getdataset() | [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
651,
19608,
292,
316,
3419
] | 2.517241 | 29 |
# -*- coding: utf-8 -*-
from simmate.toolkit.transformations.base import Transformation
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
985,
9830,
13,
25981,
15813,
13,
35636,
602,
13,
8692,
1330,
49127,
628
] | 3.103448 | 29 |
import sys
import fileinput
import numpy as np
if __name__ == '__main__':
main() | [
11748,
25064,
198,
11748,
2393,
15414,
198,
11748,
299,
32152,
355,
45941,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419
] | 2.866667 | 30 |
from bs4 import BeautifulSoup
import requests
import json
link='https://www.worldometers.info/coronavirus/'
source = requests.get(link).text
soup = BeautifulSoup(source, 'lxml') | [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
33918,
198,
8726,
11639,
5450,
1378,
2503,
13,
6894,
40077,
13,
10951,
14,
10215,
261,
615,
19397,
14,
6,
198,
10459,
796,
7007,
13,
1136,
7,
8726,
737,
5239,
... | 3.016667 | 60 |
"""
SatelliteContentHostsCount - command ``psql -d foreman -c 'select count(*) from hosts'``
========================================================================================
The SatelliteContentHostsCount parser reads the output of
``psql -d foreman -c 'select count(*) from hosts'``.
Sample output of ``psql -d foreman -c 'select count(*) from hosts'``::
count
-------
13
(1 row)
Examples::
>>> type(clients)
<class 'insights.parsers.satellite_content_hosts_count.SatelliteContentHostsCount'>
>>> clients.count
13
"""
from insights import parser, CommandParser
from insights.specs import Specs
from insights.parsers import SkipException, ParseException
@parser(Specs.satellite_content_hosts_count)
class SatelliteContentHostsCount(CommandParser):
"""
Read the ``psql -d foreman -c 'select count(*) from hosts'``
and set the hosts count to property ``count``.
Attributes:
count (int): The count of satellite content hosts
"""
| [
37811,
198,
50,
26493,
19746,
17932,
82,
12332,
532,
3141,
7559,
862,
13976,
532,
67,
1674,
805,
532,
66,
705,
19738,
954,
7,
28104,
422,
11453,
6,
15506,
198,
23926,
4770,
2559,
198,
198,
464,
33530,
19746,
17932,
82,
12332,
30751,
9... | 3.230769 | 312 |
import os
import random
import shortuuid
from PIL import Image
from config import config
from model import Game, Pack
| [
11748,
28686,
198,
11748,
4738,
198,
198,
11748,
1790,
12303,
312,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
6738,
4566,
1330,
4566,
198,
6738,
2746,
1330,
3776,
11,
6400,
628,
628,
198
] | 3.757576 | 33 |
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from pydantic import BaseModel, Field
| [
2,
7560,
416,
4818,
321,
375,
417,
12,
8189,
5235,
25,
198,
2,
220,
220,
29472,
25,
220,
1332,
13,
17752,
198,
2,
220,
220,
41033,
25,
13130,
12,
2998,
12,
2075,
51,
405,
25,
405,
25,
405,
10,
405,
25,
405,
198,
198,
6738,
115... | 2.825397 | 63 |
from setuptools import setup, find_packages
setup(
name="redis-orm",
description="Redis object relation mapper",
author="minamorl",
author_email="minamorl@minamorl.com",
version="0.4.1",
packages=find_packages(),
tests_require=['tox'],
install_requires=[
"redis",
"python-dateutil",
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
445,
271,
12,
579,
1600,
198,
220,
220,
220,
6764,
2625,
7738,
271,
2134,
8695,
285,
11463,
1600,
198,
220,
220,
220,
1772... | 2.328767 | 146 |
import codecs
from corehq.apps.sms.util import clean_phone_number
from corehq.apps.sms.api import incoming
from corehq.apps.sms.models import SQLSMSBackend
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
from corehq.messaging.smsbackends.unicel.forms import UnicelBackendForm
from django.conf import settings
OUTBOUND_URLBASE = "http://www.unicel.in/SendSMS/sendmsg.php"
class InboundParams(object):
"""
A constant-defining class for incoming sms params
"""
SENDER = "send"
MESSAGE = "msg"
# 1 if message is multipart message, 0 otherwise
UDHI = "UDHI"
# gateway message id
MID = "MID"
# 8 if message is a unicode hex string, 0 if ascii
DCS = "DCS"
class OutboundParams(object):
"""
A constant-defining class for outbound sms params
"""
SENDER = "send"
MESSAGE = "msg"
USERNAME = "uname"
PASSWORD = "pass"
DESTINATION = "dest"
# constant additional parameters when sending a unicode message
UNICODE_PARAMS = [("udhi", 0),
("dcs", 8)]
def create_from_request(request, backend_id=None):
"""
From an inbound request (representing an incoming message),
create a message (log) object with the right fields populated.
"""
sender = request.GET[InboundParams.SENDER]
message = request.GET[InboundParams.MESSAGE]
if len(sender) == 10:
# add india country code
sender = '91' + sender
is_unicode = request.GET.get(InboundParams.DCS, "") == "8"
if is_unicode:
message = codecs.decode(codecs.decode(message, 'hex'), 'utf_16_be')
backend_message_id = request.GET.get(InboundParams.MID, None)
log = incoming(sender, message, SQLUnicelBackend.get_api_id(), backend_message_id=backend_message_id,
backend_id=backend_id)
return log
| [
11748,
40481,
82,
198,
198,
6738,
4755,
71,
80,
13,
18211,
13,
82,
907,
13,
22602,
1330,
3424,
62,
4862,
62,
17618,
198,
6738,
4755,
71,
80,
13,
18211,
13,
82,
907,
13,
15042,
1330,
15619,
198,
6738,
4755,
71,
80,
13,
18211,
13,
... | 2.541953 | 727 |
import sys
import json
import numpy as np
from typing import Any, Union
from flask import Response, Blueprint, request
sys.path.append("./robot_work_zone_estimation")
from app.extensions import mongo, cache
from app.route_utils import load_classes
from app.fill_databse import FillDatabase
from app.objects_utils import get_wrapper
from app.mongo_controller import MongoController
from app.image_decoding_utils import deocode_image
from app.faces_utils import check_persons, detect_faces
from robot_work_zone_estimation.src.workzone import Workzone
from robot_work_zone_estimation.src.calibrate_camera_utils import CameraParams
from app.nn_inference.faces.wrappers.face_recognition_lib_wrapper import FaceRecognitionLibWrapper
from robot_work_zone_estimation.src.aruco_zone_estimation import ArucoZoneEstimator, ARUCO_MARKER_SIZE
bp_main = Blueprint('blueprint_main', __name__)
# TODO: cache wrappers
@bp_main.route('/face/<target>', methods=['POST'])
def face_processing(target: str) -> Union[Response, Any]:
"""
Detect or recognize faces on image
Parameters
----------
target: str
'detect' for face detection. Return response with boxes.
'recognize' for face recognition. Return response with recognized ids.
Return
------
Response with target result
"""
image = deocode_image(request.data)
if target == "recognize":
db_controller = MongoController(mongo)
face_locations = detect_faces(image)
recognition_result = check_persons(image, db_controller)
response = {'faces_id': list(recognition_result),
'faces_loc': list(face_locations)
}
response = json.dumps(response)
return Response(response, status=200, mimetype="application/json")
elif target == "detect":
face_locations = detect_faces(image)
response = {'faces_loc': list(face_locations)}
response = json.dumps(response)
return Response(response, status=200, mimetype="application/json")
else:
return Response("empty", status=404)
@bp_main.route("/object/<target>", methods=['POST'])
def objects_processing(target: str) -> Response:
"""
Apply object detection or segmentation on on image
Parameters
----------
target: str
'segmentation' for semantic segmentation. Return response with RLE masks.
'detection' for object detection. Return response with detected boxes and classes.
'keypoints' for human keypoints detection. Return responce with [x, y] coordinates
of each keypoint.
Return
------
Response with target result
"""
image = deocode_image(request.data)
wrapper, target_name = get_wrapper(target)
res = wrapper.predict((image, ))[0]
response = {target_name: res.to_dict(f"/object/{target}")}
response = json.dumps(response)
return Response(response, status=200, mimetype="application/json")
@cache.cached(timeout=350, key_prefix="zone_estimator")
@bp_main.route("/workzone", methods=['POST'])
@bp_main.route("/fill_db")
def fill_database() -> str:
"""
Fill database with descriptors extracted from images
"""
controller = MongoController(mongo)
config = {"model_type": "cnn", "number_of_times_to_upsample": 0}
face_det = FaceRecognitionLibWrapper(config)
FillDatabase(controller, face_det)("./face_database")
return "<p>Workers updated</p>"
# TODO : cachle loading
@bp_main.route('/classes/<target>')
def get_availabel_classes(target: str) -> Response:
"""
Return Json with all available object detection and segmentation classes
"""
if target == "all":
return load_classes("classes.txt")
elif target == "segmentation":
return load_classes("segmentation_classes.txt")
elif target == "detection":
return load_classes("detection_classes.txt")
elif target == "keypoints":
return load_classes("keypoints_classes.txt")
else:
return Response("WRONG TARGET")
| [
11748,
25064,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
4377,
11,
4479,
198,
6738,
42903,
1330,
18261,
11,
39932,
11,
2581,
198,
198,
17597,
13,
6978,
13,
33295,
7,
1911,
14,
305,
13645,
62,
1818,
... | 2.783858 | 1,462 |
from app.helpers.units import SpeedUnits
| [
6738,
598,
13,
16794,
364,
13,
41667,
1330,
8729,
3118,
896,
628
] | 3.5 | 12 |
##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
JSON serialization/deserialization for incident priority
"""
from enum import Enum
from typing import cast
from .._priority import IncidentPriority
from ._json import registerDeserializer, registerSerializer
__all__ = ()
class IncidentPriorityJSONValue(Enum):
"""
Incident priority JSON values
"""
high = 1
normal = 3
low = 5
registerSerializer(IncidentPriority, serializeIncidentPriority)
registerDeserializer(IncidentPriority, deserializeIncidentPriority)
| [
2235,
198,
2,
4091,
262,
2393,
27975,
38162,
9947,
329,
6634,
1321,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.596091 | 307 |
# Tab
TAB_SIZE = 1
tab = ' ' * TAB_SIZE
# Colors
USING_COLOR = 'magenta'
FINAL_COLOR = 'cyan'
ANS_COLOR = 'cyan'
INVALID_COLOR = 'red'
VAR_COLOR = 'red'
FUNC_ERR_COLOR = 'green' | [
2,
16904,
201,
198,
5603,
33,
62,
33489,
796,
352,
201,
198,
8658,
796,
705,
705,
1635,
309,
6242,
62,
33489,
201,
198,
201,
198,
2,
29792,
201,
198,
2937,
2751,
62,
46786,
220,
220,
220,
220,
220,
220,
220,
220,
796,
705,
19726,
... | 1.678571 | 140 |
import numpy as np
import math
import tensorflow as tf
import sys
sys.path.append('../')
from libs.utils.coordinate_convert import backward_convert
if __name__ == "__main__":
quad = np.array([[278, 418, 308, 331, 761, 581, 691, 668],
[758, 418, 348, 331, 241, 581, 591, 668],
[624, 112, 490, 93, 496, 50, 630, 68]], np.float32)
box = quad2rbox(quad)
print(box)
box_tf = quad2rbox_tf(quad)
with tf.Session() as sess:
box_tf_ = sess.run(box_tf)
print(box_tf_)
print(backward_convert(quad, False))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
11537,
198,
6738,
9195,
82,
13,
26791,
13,
37652,
4559,
62,
1102,
1851,
1330,
... | 2.062937 | 286 |
import subprocess
from setuptools import setup, find_packages, Extension
setup(
name='telegram_fdw',
version='0.1.0',
author='Dickson S. Guedes',
license='PostgreSQL',
packages=['telegram_fdw']
)
| [
11748,
850,
14681,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
11,
27995,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
660,
30536,
62,
16344,
86,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
... | 2.614458 | 83 |
import gym
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from gym import spaces
from gym.utils import seeding
from stable_baselines3.common.vec_env import DummyVecEnv
import decimal
matplotlib.use("Agg")
| [
11748,
11550,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
11550,
1330,
9029,
198,
6738,
11550,
13,
267... | 3.125 | 80 |
intpt import jaa.lls
| [
600,
457,
1330,
474,
7252,
13,
297,
82,
198
] | 2.333333 | 9 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import json
import subprocess
import bincopy
import gendef
import LPC_gendef
sys.path.append(os.path.abspath(".."))
from ui import LPC_uicore
from ui import LPC_uidef
from ui import uidef
from ui import uivar
from ui import uilang
from run import rundef
from run import LPC_rundef
from mem import memdef
from mem import LPC_memdef
from utils import elf
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
33918,
198,
11748,
850,
14681,
198,
11748,
275,
1... | 2.71875 | 160 |
tPalavras = (
'aprender',
'programar',
'linguagem',
'python',
'curso',
'gratis',
'try'
)
for i in tPalavras:
print(f'\nNa palavra {i.upper()} temos: ', end = '')
for letra in i:
if letra.lower() in 'aeiou':
print(letra, end = ' ')
| [
83,
11531,
615,
8847,
796,
357,
201,
198,
220,
220,
220,
705,
499,
13287,
3256,
201,
198,
220,
220,
220,
705,
23065,
283,
3256,
201,
198,
220,
220,
220,
705,
1359,
84,
363,
368,
3256,
201,
198,
220,
220,
220,
705,
29412,
3256,
201... | 1.782353 | 170 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
from random import random
import pandas as pd
import numpy as np
from itertools import compress
import scipy.sparse as sp
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from rdkit.Chem.Scaffolds import MurckoScaffold
import pgl
from pgl.utils import paddle_helper
try:
from dataset.Dataset import Subset
from dataset.Dataset import ChemDataset
except:
from Dataset import Subset
from Dataset import ChemDataset
log = logging.getLogger("logger")
if __name__ == "__main__":
file_path = os.path.dirname(os.path.realpath(__file__))
proj_path = os.path.join(file_path, '../')
sys.path.append(proj_path)
from utils.config import Config
from dataset.Dataset import Subset
from dataset.Dataset import ChemDataset
config_file = "./finetune_config.yaml"
args = Config(config_file)
log.info("loading dataset")
dataset = ChemDataset(args)
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, args)
log.info("Train Examples: %s" % len(train_dataset))
log.info("Val Examples: %s" % len(valid_dataset))
log.info("Test Examples: %s" % len(test_dataset))
import ipdb
ipdb.set_trace()
log.info("preprocess finish")
| [
2,
15069,
357,
66,
8,
12131,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845... | 3 | 636 |
import torch
__all__ = ['ResidualFeedForwardNet']
class ResidualFeedFowardBlock(torch.nn.Module):
'''Block of two feed-forward layer with a reisdual connection:
f(W1^T x + b1) f(W2^T h1 + b2 ) h2 + x
x ------------------> h1 --------------------> h2 ----------> y
| ^
| Residual connection |
+----------------------------------------------+
'''
| [
11748,
28034,
198,
198,
834,
439,
834,
796,
37250,
4965,
312,
723,
18332,
39746,
7934,
20520,
198,
198,
4871,
1874,
312,
723,
18332,
37,
46138,
12235,
7,
13165,
354,
13,
20471,
13,
26796,
2599,
198,
220,
220,
220,
705,
7061,
12235,
28... | 1.913208 | 265 |
from numpy import NaN
import pandas as pd
import unittest
import numpy as np
'''
The series.count function that we are testing does:
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
This means that series will count number of things in a series, except for NaN:s.
If the series have multindex (several indexes), you have to pick a level.
A level is a thing used when you have multiple indexes. Say you have two indexes for each entry ,
then level 0 is the first index and level 1 is the second index. Given a multiindex series, count will count
how many values each value in the chosen index holds.
If you have an index [1,2,3] for values [a,b,c], it will return :
1 1
2 1
3 1
If the index for some reason is [1,1,2,3] for [a,b,c,d] it will return:
1 2
2 1
3 1
Finally if one of the values is NaN, index [1,1,2,3] for values [a,b,c,NaN], the NaN isn't counted:
1 2
2 1
3 0
##############################################
Analysis with interface based approach:
The parameters the function takes is a series and maybe a level.
So the input domain can be:
"A series and no level"
"A series with a level"
A note is that you get a warning when you add the levels parameter becaues it will be removed in a future version.
In the futer you will have to perform a group by before you do the count, like this:
s.groupby(level=1).count()
instead of:
pandas.Series.count(s,1)
'''
class TestSeriesCount(unittest.TestCase):
'''Functions for testing the count in pandas series functions. Count returns elements in a series,
but will leave out NaN:s. '''
def test_series_count_blackbox(self):
"A series and no level"
#Want to test just a small ordinary case.
counted_sequence1 = pd.Series.count(pd.Series(self.sequence1))
self.assertEqual(counted_sequence1, 3)
# Testing if NaN is dropped from the count.
counted_sequence2 = pd.Series.count(pd.Series(self.sequence2))
self.assertEqual(counted_sequence2, 2)
# Testing empty series.
counted_sequence3 = pd.Series.count(pd.Series(self.sequence3))
self.assertEqual(counted_sequence3, 0)
# Testing series with just a 0.
counted_sequence4 = pd.Series.count(pd.Series(self.sequence4))
self.assertEqual(counted_sequence4, 1)
# Testing series with just a NaN.
counted_sequence5 = pd.Series.count(pd.Series(self.sequence5))
self.assertEqual(counted_sequence5, 0)
# Testing series with alot of NaN:s and a 2 other.
counted_sequence6 = pd.Series.count(pd.Series(self.sequence6))
self.assertEqual(counted_sequence6, 2)
# Testing series with 1000000 of 0 and a one 1.
counted_sequence7 = pd.Series.count(pd.Series(self.sequence7))
self.assertEqual(counted_sequence7, 1000001)
def test_series_count_multiindex(self):
"A series with a level"
# should count [1,1,1] becaus we have three different indexes
counted_sequence8 = pd.Series.count(self.sequence8,0).values.tolist()
self.assertEqual(counted_sequence8, [1,1,1])
#this one should count index 16 twice, and give us [1,2]
counted_sequence9 = pd.Series.count(self.sequence8,1).values.tolist()
self.assertEqual(counted_sequence9, [1,2])
#this one should count index 16 twice, and give us [1,2]
counted_sequence9 = pd.Series.count(self.sequence8,1).values.tolist()
self.assertEqual(counted_sequence9, [1,2])
if __name__ == '__main__':
unittest.main() | [
6738,
299,
32152,
1330,
11013,
45,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
628,
220,
220,
220,
220,
198,
7061,
6,
198,
464,
2168,
13,
9127,
2163,
326,
356,
389,
4856,
857,... | 2.671683 | 1,462 |
from flask import Flask , render_template, request, jsonify
import Uni_To_Zg
import Zg_To_Uni
import Win_To_Uni
import Uni_To_Win
app = Flask(__name__)
@app.route("/")
@app.route("/about")
@app.route("/convert", methods=["POST"])
@app.route("/zawgyitouni")
@app.route("/convert1", methods=["POST"])
@app.route("/wintouni")
@app.route("/convert2", methods=["POST"])
@app.route("/unitowin")
@app.route("/convert3", methods=["POST"])
@app.route("/wintozawgyi")
@app.route("/convert4", methods=["POST"])
@app.route("/zawgyitowin")
@app.route("/convert5", methods=["POST"])
@app.route("/firstpages")
if __name__ == "__main__":
app.run(debug=True)
| [
198,
6738,
42903,
1330,
46947,
837,
8543,
62,
28243,
11,
2581,
11,
33918,
1958,
198,
11748,
43376,
62,
2514,
62,
57,
70,
198,
11748,
1168,
70,
62,
2514,
62,
3118,
72,
198,
11748,
7178,
62,
2514,
62,
3118,
72,
198,
11748,
43376,
62,
... | 2.296552 | 290 |
import asyncio
from pathlib import Path
import uvloop
from terra_sdk.client.lcd import AsyncLCDClient
from terra_sdk.client.lcd.api.tx import CreateTxOptions
from terra_sdk.core.wasm import MsgExecuteContract, MsgInstantiateContract, MsgStoreCode
from terra_sdk.key.mnemonic import MnemonicKey
from terra_sdk.util.contract import get_code_id, get_contract_address, read_file_as_b64
uvloop.install()
asyncio.run(main())
| [
11748,
30351,
952,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
334,
85,
26268,
198,
198,
6738,
1059,
430,
62,
21282,
74,
13,
16366,
13,
75,
10210,
1330,
1081,
13361,
5639,
35,
11792,
198,
6738,
1059,
430,
62,
21282,
74,
13,
... | 2.845638 | 149 |
import threading
import time
from threading import Thread
import numpy as np
import pytest
from ding.utils.data.structure import Cache
@pytest.mark.unittest
| [
11748,
4704,
278,
198,
11748,
640,
198,
6738,
4704,
278,
1330,
14122,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
44852,
13,
26791,
13,
7890,
13,
301,
5620,
1330,
34088,
628,
198,
31,
9078,
9288,
... | 3.354167 | 48 |
from click.testing import CliRunner
import unittest
from mock import patch, call, Mock
from tests.cli import assert_exit_code
from floyd.cli.experiment import delete
from tests.cli.mocks import mock_exp, mock_task_inst, mock_access_token
class TestExperimentDelete(unittest.TestCase):
"""
Tests Experiment CLI delete functionality `floyd delete`
"""
@patch('floyd.cli.experiment.TaskInstanceClient')
@patch('floyd.cli.experiment.ModuleClient')
@patch('floyd.cli.experiment.ExperimentClient')
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.TaskInstanceClient')
@patch('floyd.cli.experiment.ModuleClient')
@patch('floyd.cli.experiment.ExperimentClient.get', side_effect=mock_exp)
@patch('floyd.cli.experiment.ExperimentClient.delete')
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.TaskInstanceClient')
@patch('floyd.cli.experiment.ModuleClient')
@patch('floyd.cli.experiment.ExperimentClient.get', side_effect=mock_exp)
@patch('floyd.cli.experiment.ExperimentClient.delete')
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.TaskInstanceClient')
@patch('floyd.cli.experiment.ModuleClient')
@patch('floyd.cli.experiment.ExperimentClient.get', side_effect=mock_exp)
@patch('floyd.cli.experiment.ExperimentClient.delete', return_value=False)
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.ExperimentClient')
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.ExperimentClient')
@patch('floyd.manager.auth_config.AuthConfigManager.get_access_token', side_effect=mock_access_token)
@patch('floyd.manager.auth_config.AuthConfigManager.get_auth_header', return_value="Bearer " + mock_access_token().token)
@patch('floyd.cli.experiment.TaskInstanceClient.get', side_effect=mock_task_inst)
@patch('floyd.cli.experiment.get_module_task_instance_id', return_value='123')
@patch('floyd.cli.experiment.ModuleClient.delete')
@patch('floyd.cli.experiment.ExperimentClient.delete', return_value=False)
@patch('floyd.cli.experiment.ExperimentClient.get')
| [
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
11748,
555,
715,
395,
198,
6738,
15290,
1330,
8529,
11,
869,
11,
44123,
198,
198,
6738,
5254,
13,
44506,
1330,
6818,
62,
37023,
62,
8189,
198,
6738,
781,
12192,
13,
44506,
13,
23100,
... | 2.887142 | 1,081 |
import argparse
from pathlib import Path, PurePath
from shutil import copy
import sys
from grascii.appdirs import user_data_dir
from grascii.config import APP_NAME
description = "Install a Grascii Dictionary"
DICTIONARY_PATH = Path(user_data_dir(APP_NAME), "dictionaries")
def main() -> None:
"""Run the install command using arguments from sys.argv."""
argparser = argparse.ArgumentParser(description)
build_argparser(argparser)
args = argparser.parse_args(sys.argv[1:])
cli_install(args)
if __name__ == "__main__":
main()
| [
198,
11748,
1822,
29572,
198,
6738,
3108,
8019,
1330,
10644,
11,
17129,
15235,
198,
6738,
4423,
346,
1330,
4866,
198,
11748,
25064,
198,
198,
6738,
1036,
292,
979,
72,
13,
1324,
15908,
82,
1330,
2836,
62,
7890,
62,
15908,
198,
6738,
1... | 2.841837 | 196 |
from django import forms
from django.contrib.admin import widgets
from crispy_forms.helper import FormHelper
from crispy_forms.bootstrap import FormActions, TabHolder, Tab
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from .models import Container, Channel, Transfer, Balance
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
1330,
40803,
198,
6738,
42807,
62,
23914,
13,
2978,
525,
1330,
5178,
47429,
198,
6738,
42807,
62,
23914,
13,
18769,
26418,
1330,
5178,
32,
2733,
11,
1690... | 3.875 | 80 |
# coding: utf-8
import sys
if sys.version_info[0:2] != (2, 6):
raise Exception("Must use python 2.6")
import os
# Inner Reuse
from inner_reuse.configurer import Configurer
from inner_reuse import tic, toc
# App
from _code.gtest_runner import run_gtests
RUNNER_TASK_FILE = os.sep.join(['_code', 'task_files', 'main_task.py'])
BIN_FILE_NAME = os.sep.join(['..', 'statistic', 'exec', 'gtest-extend.bin'])
count_cores = 4
test_run_gtest_no_parallel() | [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
25064,
198,
198,
361,
25064,
13,
9641,
62,
10951,
58,
15,
25,
17,
60,
14512,
357,
17,
11,
718,
2599,
198,
220,
220,
220,
5298,
35528,
7203,
34320,
779,
21015,
362,
13,
21,
4943,
198,
1... | 2.449198 | 187 |
import logging
import sys
import os
import traceback
import json
from datetime import datetime
import string
import random
from collections import OrderedDict
import time
import pickle
from functools import partial
import ipdb
import torch
from torch.utils.data import DataLoader
import numpy as np
import sklearn
from utils import utils, analysis
from models.loss import l2_reg_loss
from datasets.dataset import ImputationDataset, TransductionDataset, ClassiregressionDataset, collate_unsuperv, collate_superv
logger = logging.getLogger('__main__')
NEG_METRICS = {'loss'} # metrics for which "better" is less
val_times = {"total_time": 0, "count": 0}
def pipeline_factory(config):
"""For the task specified in the configuration returns the corresponding combination of
Dataset class, collate function and Runner class."""
task = config['task']
if task == "imputation":
return partial(ImputationDataset, mean_mask_length=config['mean_mask_length'],
masking_ratio=config['masking_ratio'], mode=config['mask_mode'],
distribution=config['mask_distribution'], exclude_feats=config['exclude_feats']),\
collate_unsuperv, UnsupervisedRunner
if task == "transduction":
return partial(TransductionDataset, mask_feats=config['mask_feats'],
start_hint=config['start_hint'], end_hint=config['end_hint']), collate_unsuperv, UnsupervisedRunner
if (task == "classification") or (task == "regression"):
return ClassiregressionDataset, collate_superv, SupervisedRunner
else:
raise NotImplementedError("Task '{}' not implemented".format(task))
def setup(args):
"""Prepare training session: read configuration from file (takes precedence), create directories.
Input:
args: arguments object from argparse
Returns:
config: configuration dictionary
"""
config = args.__dict__ # configuration dictionary
if args.config_filepath is not None:
logger.info("Reading configuration ...")
try: # dictionary containing the entire configuration settings in a hierarchical fashion
config.update(utils.load_config(args.config_filepath))
except:
logger.critical("Failed to load configuration file. Check JSON syntax and verify that files exist")
traceback.print_exc()
sys.exit(1)
# Create output directory
initial_timestamp = datetime.now()
output_dir = config['output_dir']
if not os.path.isdir(output_dir):
raise IOError(
"Root directory '{}', where the directory of the experiments will be created, must exist".format(output_dir))
output_dir = os.path.join(output_dir, config['experiment_name'])
formatted_timestamp = initial_timestamp.strftime("%Y-%m-%d_%H-%M-%S")
config['initial_timestamp'] = formatted_timestamp
if (not config['no_timestamp']) or (len(config['experiment_name']) == 0):
rand_suffix = "".join(random.choices(string.ascii_letters + string.digits, k=3))
output_dir += "_" + formatted_timestamp + "_" + rand_suffix
config['output_dir'] = output_dir
config['save_dir'] = os.path.join(output_dir, 'checkpoints')
config['pred_dir'] = os.path.join(output_dir, 'predictions')
config['tensorboard_dir'] = os.path.join(output_dir, 'tb_summaries')
utils.create_dirs([config['save_dir'], config['pred_dir'], config['tensorboard_dir']])
# Save configuration as a (pretty) json file
with open(os.path.join(output_dir, 'configuration.json'), 'w') as fp:
json.dump(config, fp, indent=4, sort_keys=True)
logger.info("Stored configuration file in '{}'".format(output_dir))
return config
def convert_metrics_per_batch_to_per_sample(metrics, target_masks):
"""
Args:
metrics: list of len(num_batches), each element: list of len(num_metrics), each element: (num_active_in_batch,) metric per element
target_masks: list of len(num_batches), each element: (batch_size, seq_len, feat_dim) boolean mask: 1s active, 0s ignore
Returns:
metrics_array = list of len(num_batches), each element: (batch_size, num_metrics) metric per sample
"""
metrics_array = []
for b, batch_target_masks in enumerate(target_masks):
num_active_per_sample = np.sum(batch_target_masks, axis=(1, 2))
batch_metrics = np.stack(metrics[b], axis=1) # (num_active_in_batch, num_metrics)
ind = 0
metrics_per_sample = np.zeros((len(num_active_per_sample), batch_metrics.shape[1])) # (batch_size, num_metrics)
for n, num_active in enumerate(num_active_per_sample):
new_ind = ind + num_active
metrics_per_sample[n, :] = np.sum(batch_metrics[ind:new_ind, :], axis=0)
ind = new_ind
metrics_array.append(metrics_per_sample)
return metrics_array
def evaluate(evaluator):
"""Perform a single, one-off evaluation on an evaluator object (initialized with a dataset)"""
eval_start_time = time.time()
with torch.no_grad():
aggr_metrics, per_batch = evaluator.evaluate(epoch_num=None, keep_all=True)
eval_runtime = time.time() - eval_start_time
print()
print_str = 'Evaluation Summary: '
for k, v in aggr_metrics.items():
if v is not None:
print_str += '{}: {:8f} | '.format(k, v)
logger.info(print_str)
logger.info("Evaluation runtime: {} hours, {} minutes, {} seconds\n".format(*utils.readable_time(eval_runtime)))
return aggr_metrics, per_batch
def validate(val_evaluator, tensorboard_writer, config, best_metrics, best_value, epoch):
"""Run an evaluation on the validation set while logging metrics, and handle outcome"""
logger.info("Evaluating on validation set ...")
eval_start_time = time.time()
with torch.no_grad():
aggr_metrics, per_batch = val_evaluator.evaluate(epoch, keep_all=True)
eval_runtime = time.time() - eval_start_time
logger.info("Validation runtime: {} hours, {} minutes, {} seconds\n".format(*utils.readable_time(eval_runtime)))
global val_times
val_times["total_time"] += eval_runtime
val_times["count"] += 1
avg_val_time = val_times["total_time"] / val_times["count"]
avg_val_batch_time = avg_val_time / len(val_evaluator.dataloader)
avg_val_sample_time = avg_val_time / len(val_evaluator.dataloader.dataset)
logger.info("Avg val. time: {} hours, {} minutes, {} seconds".format(*utils.readable_time(avg_val_time)))
logger.info("Avg batch val. time: {} seconds".format(avg_val_batch_time))
logger.info("Avg sample val. time: {} seconds".format(avg_val_sample_time))
print()
print_str = 'Epoch {} Validation Summary: '.format(epoch)
for k, v in aggr_metrics.items():
tensorboard_writer.add_scalar('{}/val'.format(k), v, epoch)
print_str += '{}: {:8f} | '.format(k, v)
logger.info(print_str)
if config['key_metric'] in NEG_METRICS:
condition = (aggr_metrics[config['key_metric']] < best_value)
else:
condition = (aggr_metrics[config['key_metric']] > best_value)
if condition:
best_value = aggr_metrics[config['key_metric']]
utils.save_model(os.path.join(config['save_dir'], 'model_best.pth'), epoch, val_evaluator.model)
best_metrics = aggr_metrics.copy()
pred_filepath = os.path.join(config['pred_dir'], 'best_predictions')
np.savez(pred_filepath, **per_batch)
return aggr_metrics, best_metrics, best_value
| [
11748,
18931,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
12854,
1891,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
4731,
198,
11748,
4738,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
640,
19... | 2.612669 | 2,889 |
import logging
import math
import os
import shutil
import time
import cv2
import imageio
import numpy as np
import scipy.sparse
import tensorflow as tf
import utils
from lib import graph, mesh_renderer
from lib.mesh_io import write_obj
logger = logging.getLogger('x')
class BaseModel():
"""
Mesh Convolutional Autoencoder which uses the Chebyshev approximation.
"""
def build_graph(self):
"""Build the computational graph of the model."""
# self.graph = tf.Graph()
# with self.graph.as_default():
# Inputs.
with tf.name_scope('inputs'):
data_idxs = [x for x in range(len(self.train_image_paths))]
image_dataset = tf.data.Dataset.from_tensor_slices(data_idxs)
# image_dataset = image_dataset.map(
# lambda start_idx: tf.py_func(self.load_image_bin, [start_idx], [tf.float32, tf.float32]))
image_dataset = image_dataset.map(
lambda start_idx: tf.py_func(self.load_image_bin, [start_idx], tf.float32))
image_dataset = image_dataset.shuffle(buffer_size=self.buffer_size)
image_dataset = image_dataset.batch(self.batch_size)
image_dataset = image_dataset.repeat()
image_iterator = image_dataset.make_one_shot_iterator()
# self.train_rgbas, self.train_2dlms = image_iterator.get_next()
self.train_rgbas = image_iterator.get_next()
self.train_rgbas.set_shape([self.batch_size, self.img_size, self.img_size, 4])
self.train_images = (self.train_rgbas[..., :3] + 1) * 127.5
# self.train_2dlms.set_shape([self.batch_size, len(self.lm_3d_idx), 2])
self.refer_faces = [
tf.convert_to_tensor(x['faces'], dtype=tf.int32, name='refer_faces_{}'.format(i))
for i, x in enumerate(self.refer_meshes)
]
self.ph_rgbas = tf.placeholder(tf.float32, (self.batch_size, self.img_size, self.img_size, 4),
'input_rgbas')
self.input_images = (self.ph_rgbas[..., :3] + 1) * 127.5
# self.input_images = tf.floor((self.ph_rgbas[..., 2::-1] + 1) * 127.5)
self.ph_2dlms = tf.placeholder(tf.float32, (self.batch_size, len(self.lm_3d_idx), 2),
'input_2dlm')
self.ph_ren_lambda = tf.placeholder(tf.float32, (), 'render_lambda')
self.ph_ref_lambda = tf.placeholder(tf.float32, (), 'refine_lambda')
# self.ph_adv_lambda = tf.placeholder(tf.float32, (), 'adv_lambda')
with tf.gfile.GFile(os.path.join(self.root_dir, 'data/FaceReconModel.pb'), 'rb') as f:
face_rec_graph_def = tf.GraphDef()
face_rec_graph_def.ParseFromString(f.read())
image_emb, self.coeff = get_emb_coeff('facerec', self.train_images)
image_emb_test, self.coeff_test = get_emb_coeff('facerec_test', self.input_images)
with tf.gfile.GFile(os.path.join(self.root_dir, 'data/FaceNetModel.pb'), 'rb') as f:
face_net_graph_def = tf.GraphDef()
face_net_graph_def.ParseFromString(f.read())
image_feat = get_img_feat('facenet', self.train_images)
image_feat_test = get_img_feat('facenet_test', self.input_images)
self.image_emb = tf.concat([image_emb, image_feat], axis=-1)
self.image_emb_test = tf.concat([image_emb_test, image_feat_test], axis=-1)
pred_results = self.inference(self.train_rgbas, self.coeff, self.image_emb)
self.vert_pred = pred_results['vertice']
self.pca_text_pred = pred_results['pca_texture']
self.gcn_text_pred = pred_results['gcn_texture']
self.pca_color_pred = pred_results['pca_color']
self.gcn_color_pred = pred_results['gcn_color']
self.proj_color_pred = pred_results['proj_color']
self.pca_render_pred = pred_results['pca_render_color']
self.gcn_render_pred = pred_results['gcn_render_color']
self.lm_proj_pred = pred_results['lm_project']
# render_mask = self._erosion2d(self.train_rgbas[..., 3:])
render_mask = self.pca_render_pred[..., 3:] * self.train_rgbas[..., 3:]
gcn_render_image = (self.gcn_render_pred[..., :3] + 1) * 127.5
self.gcn_overlay = gcn_render_image[..., :3] * render_mask +\
self.train_images[..., :3] * (1 - render_mask)
gcn_image_feat = get_img_feat('facenet_gcn', self.gcn_overlay)
self.all_loss, self.pca_loss, self.gcn_loss, self.proj_loss, self.refine_loss, self.perc_loss, self.var_loss, self.sym_loss = self.compute_loss(
self.train_rgbas, self.pca_render_pred, self.gcn_render_pred, self.pca_text_pred,
self.gcn_text_pred, self.proj_color_pred, self.pca_color_pred, self.gcn_color_pred,
image_feat, gcn_image_feat, self.regularization)
test_results = self.inference(self.ph_rgbas, self.coeff_test, self.image_emb_test,
is_training=False, reuse=True, get_inter=True)
self.vert_test = test_results['vertice']
self.norm_test = test_results['normal']
self.pca_text_test = test_results['pca_texture']
self.gcn_text_test = test_results['gcn_texture']
self.pca_color_test = test_results['pca_color']
self.gcn_color_test = test_results['gcn_color']
self.proj_color_test = test_results['proj_color']
self.pca_ren_tex_test = test_results['pca_render_text']
self.gcn_ren_tex_test = test_results['gcn_render_text']
self.pca_ren_clr_test = test_results['pca_render_color']
self.gcn_ren_clr_test = test_results['gcn_render_color']
self.lm_proj_test = test_results['lm_project']
# render_mask_test = self._erosion2d(self.ph_rgbas[..., 3:])
render_mask_test = self.pca_ren_clr_test[..., 3:] * self.ph_rgbas[..., 3:]
gcn_ren_image_test = (self.gcn_ren_clr_test[..., :3] + 1) * 127.5
self.gcn_over_test = gcn_ren_image_test[..., :3] * render_mask_test +\
self.input_images[..., :3] * (1 - render_mask_test)
gcn_image_feat_test = get_img_feat('facenet_gcn_test', self.gcn_over_test)
self.test_all_loss, self.test_pca_loss, self.test_gcn_loss, self.test_proj_loss, self.test_refine_loss, self.test_perc_loss, _, _ = self.compute_loss(
self.ph_rgbas, self.pca_ren_clr_test, self.gcn_ren_clr_test, self.pca_text_test,
self.gcn_text_test, self.proj_color_test, self.pca_color_test, self.gcn_color_test,
image_feat_test, gcn_image_feat_test, self.regularization, True)
self.d_loss = None
if self.gan:
real_image = self.train_rgbas[..., :3]
fake_image = self.gcn_overlay / 127.5 - 1.0
self.g_loss, self.d_loss = self.compute_gan_loss(real_image, fake_image)
self.all_loss = self.all_loss + self.g_loss
real_img_test = self.ph_rgbas[..., :3]
fake_img_test = self.gcn_over_test / 127.5 - 1.0
self.test_g_loss, self.test_d_loss = self.compute_gan_loss(real_img_test, fake_img_test,
reuse=True)
self.test_all_loss = self.test_all_loss + self.test_g_loss
self.gen_train, self.dis_train = self.training(self.all_loss, self.d_loss)
# self.op_encoder = self.encoder(self.ph_data, reuse=True)
# self.op_decoder = self.decoder(self.ph_z, reuse=True)
# Initialize variables, i.e. weights and biases.
self.op_init = tf.global_variables_initializer()
# Summaries for TensorBoard and Save for model parameters.
self.op_summary = tf.summary.merge_all()
var_all = tf.global_variables()
trainable_vars = tf.trainable_variables()
bn_vars = [x for x in var_all if 'BatchNorm/moving' in x.name]
global_vars = [x for x in var_all if 'training' in x.name]
vars_to_save = trainable_vars + bn_vars + global_vars
self.op_saver = tf.train.Saver(var_list=vars_to_save, max_to_keep=3)
logger.info('Successfully Build Graph')
def compute_loss(self, input_image, pca_render, gcn_render, pca_texture, gcn_texture, proj_color,
pca_color, gcn_color, input_feat, gcn_feat, regularization, get_inter=False):
"""Adds to the inference model the layers required to generate loss."""
with tf.name_scope('loss'):
with tf.name_scope('data_loss'):
skin_mask = self._erosion2d(input_image[..., 3:])
gcn_render_mask = tf.round(gcn_render[..., 3:]) * skin_mask
# pca_render_loss = tf.losses.mean_squared_error(
pca_render_loss = tf.losses.absolute_difference(
predictions=pca_render[..., :3] * gcn_render_mask, labels=input_image[..., :3] *
gcn_render_mask, reduction=tf.losses.Reduction.SUM) / tf.reduce_sum(gcn_render_mask)
# gcn_render_loss = tf.losses.mean_squared_error(
gcn_render_loss = tf.losses.absolute_difference(
predictions=gcn_render[..., :3] * gcn_render_mask, labels=input_image[..., :3] *
gcn_render_mask, reduction=tf.losses.Reduction.SUM) / tf.reduce_sum(gcn_render_mask)
# project_loss_image = tf.losses.mean_squared_error(
project_loss_image = tf.losses.absolute_difference(
predictions=gcn_color * proj_color[..., 3:],
labels=proj_color[..., :3] * proj_color[..., 3:], reduction=tf.losses.Reduction.MEAN)
# project_loss_pca = tf.losses.mean_squared_error(
project_loss_pca = tf.losses.absolute_difference(
predictions=gcn_color * (1 - proj_color[..., 3:]),
labels=pca_color * (1 - proj_color[..., 3:]), reduction=tf.losses.Reduction.MEAN)
project_loss = project_loss_image + 0.3 * project_loss_pca
# refine_loss = tf.losses.mean_squared_error(
refine_loss = tf.losses.absolute_difference(predictions=gcn_texture, labels=pca_texture,
reduction=tf.losses.Reduction.MEAN)
perception_loss = 1 - tf.reduce_mean(utils.cosine(input_feat, gcn_feat))
var_losses = []
gcn_skin_texture = tf.gather(gcn_texture, self.bfm.skin_index, axis=1)
for i in range(3):
_, variance = tf.nn.moments(gcn_skin_texture[..., i], axes=1)
var_losses.append(variance)
var_loss = tf.reduce_mean(var_losses)
sym_diff = tf.gather(gcn_texture, self.bfm.left_index, axis=1) - tf.gather(
gcn_texture, self.bfm.right_index, axis=1)
sym_loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(sym_diff) + 1e-16, axis=-1)))
# adj_tensor = tf.constant(self.adjacent.reshape(
# [1, self.num_vert, self.num_vert, 1]),
# dtype=tf.int32,
# shape=[1, self.num_vert, self.num_vert, 1])
# coo = self.adjacent.tocoo()
# indices = np.mat([0, self.adjacent.row, self.adjacent.col, 0]).transpose()
# values = np.ones_like(self.adjacent.data, np.float32)
# adj_tensor = tf.SparseTensor(indices, values, self.adjacent.shape)
# # adj_tensor = tf.SparseTensor(self.adjacent.indices,
# # np.clip(self.adjacent.data, 0, 1),
# # self.adjacent.shape)
# expand = tf.ones([1, self.num_vert, self.num_vert, 3], dtype=tf.float32)
# expand = expand * tf.expand_dims(gcn_texture, axis=1)
# exp_trans = tf.transpose(expand, [0, 2, 1, 3])
# # vertical = tf.ones([self.num_vert, self.num_vert, 3], dtype=tf.float32)
# # vertical = vertical * tf.expand_dims(gcn_texture, axis=2)
# smooth_loss = tf.abs((expand - exp_trans) * adj_tensor)
# test = tf.sparse_to_dense(smooth_loss.indices, )
#TODO: need attention
# data_loss = self.ph_ref_lambda * refine_loss + self.ph_ren_lambda * (
# gcn_render_loss + 0.2 * project_loss +
# 0.2 * perception_loss) + 0.1 * sym_loss
data_loss = self.ph_ref_lambda * refine_loss + self.ph_ren_lambda * (
project_loss + 0.2 * perception_loss + 0.5 * sym_loss + 0.01 * var_loss)
# if not get_inter:
# self.skin_mask = skin_mask
# self.gcn_render_mask = gcn_render_mask
# self.gcn_render_image = gcn_render[..., :3]
# self.input_image_rgb = input_image[..., :3]
# self.pca_render_image = pca_render[..., :3]
with tf.name_scope('regularization'):
regularization *= tf.add_n(self.regularizers)
loss = data_loss + regularization
tf.summary.scalar('loss/data_loss', data_loss)
tf.summary.scalar('loss/pca_render_loss', pca_render_loss)
tf.summary.scalar('loss/gcn_render_loss', gcn_render_loss)
tf.summary.scalar('loss/project_loss', project_loss)
tf.summary.scalar('loss/refine_loss', refine_loss)
tf.summary.scalar('loss/perception_loss', perception_loss)
tf.summary.scalar('loss/var_loss', var_loss)
tf.summary.scalar('loss/sym_loss', sym_loss)
tf.summary.scalar('loss/regularization', regularization)
logger.info('Successfully Computed Losses')
return loss, pca_render_loss, gcn_render_loss, project_loss, refine_loss, perception_loss, var_loss, sym_loss
def training(self, g_loss, d_loss=None, decay_rate=0.98):
"""Adds to the loss model the Ops required to generate and apply gradients."""
with tf.name_scope('training'):
# Learning rate.
global_step = tf.Variable(0, name='global_step', trainable=False)
if decay_rate != 1:
learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,
self.decay_steps, decay_rate, staircase=True)
else:
learning_rate = self.learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
check_grads = []
all_vars = tf.trainable_variables()
mesh_gen_vars = [x for x in all_vars if x.name.startswith('mesh_generator')]
g_grads = optimizer.compute_gradients(g_loss, var_list=mesh_gen_vars)
check_gradients(g_grads)
if d_loss is not None:
image_dis_vars = [x for x in all_vars if x.name.startswith('image_disc')]
d_grads = optimizer.compute_gradients(d_loss, var_list=image_dis_vars)
check_gradients(d_grads)
with tf.control_dependencies(check_grads):
op_g_grad = optimizer.apply_gradients(g_grads, global_step=global_step)
if d_loss is not None:
op_d_grad = optimizer.apply_gradients(d_grads, global_step=global_step)
# The op return the learning rate.
update_bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies([op_g_grad] + update_bn_ops):
gen_train = tf.identity(learning_rate, name='control')
dis_train = None
if d_loss is not None:
with tf.control_dependencies([op_d_grad] + update_bn_ops):
dis_train = tf.identity(learning_rate, name='control')
logger.info('Successfully Build Training Optimizer')
return gen_train, dis_train
def b1relu(self, inputs):
"""Bias and ReLU. One bias per filter."""
# N, M, F = x.get_shape()
_, _, F = inputs.get_shape()
b = self._bias_variable([1, 1, int(F)], regularization=False)
#TODO replace with tf.nn.elu
# return tf.nn.relu(inputs + b)
return tf.nn.elu(inputs + b)
def b2relu(self, inputs):
"""Bias and ReLU. One bias per vertex per filter."""
# N, M, F = x.get_shape()
_, M, F = inputs.get_shape()
b = self._bias_variable([1, int(M), int(F)], regularization=False)
return tf.nn.relu(inputs + b)
def fc(self, inputs, Mout, relu=True):
"""Fully connected layer with Mout features."""
# N, Min = x.get_shape()
_, Min = inputs.get_shape()
W = self._weight_variable([int(Min), Mout], regularization=True)
b = self._bias_variable([Mout], regularization=True)
x = tf.matmul(inputs, W) + b
return tf.nn.relu(x) if relu else x
| [
11748,
18931,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
640,
198,
198,
11748,
269,
85,
17,
198,
11748,
2939,
952,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
82,
29572,
198,
11748,
... | 2.202368 | 7,096 |
"""Functions for copying resources *between* filesystem.
"""
from __future__ import print_function, unicode_literals
import typing
from .errors import FSError
from .opener import manage_fs
from .path import abspath, combine, frombase, normpath
from .tools import is_thread_safe
from .walk import Walker
if False: # typing.TYPE_CHECKING
from typing import Callable, Optional, Text, Union
from .base import FS
from .walk import Walker
_OnCopy = Callable[[FS, Text, FS, Text], object]
def copy_fs(
src_fs, # type: Union[FS, Text]
dst_fs, # type: Union[FS, Text]
walker=None, # type: Optional[Walker]
on_copy=None, # type: Optional[_OnCopy]
workers=0, # type: int
):
# type: (...) -> None
"""Copy the contents of one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (URL or instance).
dst_fs (FS or str): Destination filesystem (URL or instance).
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only want
to consider a sub-set of the resources in ``src_fs``.
on_copy (callable):A function callback called after a single file copy
is executed. Expected signature is ``(src_fs, src_path, dst_fs,
dst_path)``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
return copy_dir(
src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers
)
def copy_fs_if_newer(
src_fs, # type: Union[FS, Text]
dst_fs, # type: Union[FS, Text]
walker=None, # type: Optional[Walker]
on_copy=None, # type: Optional[_OnCopy]
workers=0, # type: int
):
# type: (...) -> None
"""Copy the contents of one filesystem to another, checking times.
If both source and destination files exist, the copy is executed
only if the source file is newer than the destination file. In case
modification times of source or destination files are not available,
copy file is always executed.
Arguments:
src_fs (FS or str): Source filesystem (URL or instance).
dst_fs (FS or str): Destination filesystem (URL or instance).
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only want
to consider a sub-set of the resources in ``src_fs``.
on_copy (callable):A function callback called after a single file copy
is executed. Expected signature is ``(src_fs, src_path, dst_fs,
dst_path)``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
return copy_dir_if_newer(
src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers
)
def _source_is_newer(src_fs, src_path, dst_fs, dst_path):
# type: (FS, Text, FS, Text) -> bool
"""Determine if source file is newer than destination file.
Arguments:
src_fs (FS): Source filesystem (instance or URL).
src_path (str): Path to a file on the source filesystem.
dst_fs (FS): Destination filesystem (instance or URL).
dst_path (str): Path to a file on the destination filesystem.
Returns:
bool: `True` if the source file is newer than the destination
file or file modification time cannot be determined, `False`
otherwise.
"""
try:
if dst_fs.exists(dst_path):
namespace = ("details", "modified")
src_modified = src_fs.getinfo(src_path, namespace).modified
if src_modified is not None:
dst_modified = dst_fs.getinfo(dst_path, namespace).modified
return dst_modified is None or src_modified > dst_modified
return True
except FSError: # pragma: no cover
# todo: should log something here
return True
def copy_file(
src_fs, # type: Union[FS, Text]
src_path, # type: Text
dst_fs, # type: Union[FS, Text]
dst_path, # type: Text
):
# type: (...) -> None
"""Copy a file from one filesystem to another.
If the destination exists, and is a file, it will be first truncated.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a file on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a file on the destination filesystem.
"""
with manage_fs(src_fs, writeable=False) as _src_fs:
with manage_fs(dst_fs, create=True) as _dst_fs:
if _src_fs is _dst_fs:
# Same filesystem, so we can do a potentially optimized
# copy
_src_fs.copy(src_path, dst_path, overwrite=True)
else:
# Standard copy
with _src_fs.lock(), _dst_fs.lock():
if _dst_fs.hassyspath(dst_path):
with _dst_fs.openbin(dst_path, "w") as write_file:
_src_fs.getfile(src_path, write_file)
else:
with _src_fs.openbin(src_path) as read_file:
_dst_fs.setbinfile(dst_path, read_file)
def copy_file_internal(
src_fs, # type: FS
src_path, # type: Text
dst_fs, # type: FS
dst_path, # type: Text
):
# type: (...) -> None
"""Low level copy, that doesn't call manage_fs or lock.
If the destination exists, and is a file, it will be first truncated.
This method exists to optimize copying in loops. In general you
should prefer `copy_file`.
Arguments:
src_fs (FS): Source filesystem.
src_path (str): Path to a file on the source filesystem.
dst_fs (FS: Destination filesystem.
dst_path (str): Path to a file on the destination filesystem.
"""
if src_fs is dst_fs:
# Same filesystem, so we can do a potentially optimized
# copy
src_fs.copy(src_path, dst_path, overwrite=True)
elif dst_fs.hassyspath(dst_path):
with dst_fs.openbin(dst_path, "w") as write_file:
src_fs.getfile(src_path, write_file)
else:
with src_fs.openbin(src_path) as read_file:
dst_fs.setbinfile(dst_path, read_file)
def copy_file_if_newer(
src_fs, # type: Union[FS, Text]
src_path, # type: Text
dst_fs, # type: Union[FS, Text]
dst_path, # type: Text
):
# type: (...) -> bool
"""Copy a file from one filesystem to another, checking times.
If the destination exists, and is a file, it will be first truncated.
If both source and destination files exist, the copy is executed only
if the source file is newer than the destination file. In case
modification times of source or destination files are not available,
copy is always executed.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a file on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a file on the destination filesystem.
Returns:
bool: `True` if the file copy was executed, `False` otherwise.
"""
with manage_fs(src_fs, writeable=False) as _src_fs:
with manage_fs(dst_fs, create=True) as _dst_fs:
if _src_fs is _dst_fs:
# Same filesystem, so we can do a potentially optimized
# copy
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
_src_fs.copy(src_path, dst_path, overwrite=True)
return True
else:
return False
else:
# Standard copy
with _src_fs.lock(), _dst_fs.lock():
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
copy_file_internal(_src_fs, src_path, _dst_fs, dst_path)
return True
else:
return False
def copy_structure(
src_fs, # type: Union[FS, Text]
dst_fs, # type: Union[FS, Text]
walker=None, # type: Optional[Walker]
):
# type: (...) -> None
"""Copy directories (but not files) from ``src_fs`` to ``dst_fs``.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
dst_fs (FS or str): Destination filesystem (instance or URL).
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only
want to consider a sub-set of the resources in ``src_fs``.
"""
walker = walker or Walker()
with manage_fs(src_fs) as _src_fs:
with manage_fs(dst_fs, create=True) as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
for dir_path in walker.dirs(_src_fs):
_dst_fs.makedir(dir_path, recreate=True)
def copy_dir(
src_fs, # type: Union[FS, Text]
src_path, # type: Text
dst_fs, # type: Union[FS, Text]
dst_path, # type: Text
walker=None, # type: Optional[Walker]
on_copy=None, # type: Optional[_OnCopy]
workers=0, # type: int
):
# type: (...) -> None
"""Copy a directory from one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on the destination filesystem.
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only
want to consider a sub-set of the resources in ``src_fs``.
on_copy (callable, optional): A function callback called after
a single file copy is executed. Expected signature is
``(src_fs, src_path, dst_fs, dst_path)``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
on_copy = on_copy or (lambda *args: None)
walker = walker or Walker()
_src_path = abspath(normpath(src_path))
_dst_path = abspath(normpath(dst_path))
from ._bulk import Copier
with src() as _src_fs, dst() as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
_thread_safe = is_thread_safe(_src_fs, _dst_fs)
with Copier(num_workers=workers if _thread_safe else 0) as copier:
_dst_fs.makedir(_dst_path, recreate=True)
for dir_path, dirs, files in walker.walk(_src_fs, _src_path):
copy_path = combine(_dst_path, frombase(_src_path, dir_path))
for info in dirs:
_dst_fs.makedir(info.make_path(copy_path), recreate=True)
for info in files:
src_path = info.make_path(dir_path)
dst_path = info.make_path(copy_path)
copier.copy(_src_fs, src_path, _dst_fs, dst_path)
on_copy(_src_fs, src_path, _dst_fs, dst_path)
def copy_dir_if_newer(
src_fs, # type: Union[FS, Text]
src_path, # type: Text
dst_fs, # type: Union[FS, Text]
dst_path, # type: Text
walker=None, # type: Optional[Walker]
on_copy=None, # type: Optional[_OnCopy]
workers=0, # type: int
):
# type: (...) -> None
"""Copy a directory from one filesystem to another, checking times.
If both source and destination files exist, the copy is executed only
if the source file is newer than the destination file. In case
modification times of source or destination files are not available,
copy is always executed.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on the destination filesystem.
walker (~fs.walk.Walker, optional): A walker object that will be
used to scan for files in ``src_fs``. Set this if you only
want to consider a sub-set of the resources in ``src_fs``.
on_copy (callable, optional): A function callback called after
a single file copy is executed. Expected signature is
``(src_fs, src_path, dst_fs, dst_path)``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
on_copy = on_copy or (lambda *args: None)
walker = walker or Walker()
_src_path = abspath(normpath(src_path))
_dst_path = abspath(normpath(dst_path))
from ._bulk import Copier
with src() as _src_fs, dst() as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
_thread_safe = is_thread_safe(_src_fs, _dst_fs)
with Copier(num_workers=workers if _thread_safe else 0) as copier:
_dst_fs.makedir(_dst_path, recreate=True)
namespace = ("details", "modified")
dst_state = {
path: info
for path, info in walker.info(_dst_fs, _dst_path, namespace)
if info.is_file
}
src_state = [
(path, info)
for path, info in walker.info(_src_fs, _src_path, namespace)
]
for dir_path, copy_info in src_state:
copy_path = combine(_dst_path, frombase(_src_path, dir_path))
if copy_info.is_dir:
_dst_fs.makedir(copy_path, recreate=True)
elif copy_info.is_file:
# dst file is present, try to figure out if copy
# is necessary
try:
src_modified = copy_info.modified
dst_modified = dst_state[dir_path].modified
except KeyError:
do_copy = True
else:
do_copy = (
src_modified is None
or dst_modified is None
or src_modified > dst_modified
)
if do_copy:
copier.copy(_src_fs, dir_path, _dst_fs, copy_path)
on_copy(_src_fs, dir_path, _dst_fs, copy_path)
| [
37811,
24629,
2733,
329,
23345,
4133,
1635,
23395,
9,
29905,
13,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
19720,
198,
198,
6738,
764,
48277,
1330,
376,
5188,
... | 2.232611 | 6,642 |
import numpy as np
import pandas as pd
import sys
import minimize as mini
import math
# f(x, y) = 100(y-xยฒ)ยฒ + (1-x)ยฒ
mims = []
methods = ["newton", "BFGS", "gradient"]
colors = ["darkblue", "yellow", "purple"]
for i in range(len(methods)):
mims.append(mini.Minimizer(f, 2, np.array([0, 0])))
mims[i].f_grad = grad_f
mims[i].f_hess = hess_f
mims[i].iterate(method=methods[i], log=True)
mims[i].iterate(method=methods[i], log=False)
vet_f = np.vectorize(lambda in1, in2: f((in1, in2)))
drawer = mini.Drawer()
drawer.draw_f(vet_f, mims[0])
for i in range(len(methods)):
drawer.draw_path(vet_f, mims[i], mims[i].x, color=colors[i], density=20)
drawer.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
11748,
17775,
355,
9927,
198,
11748,
10688,
198,
198,
2,
277,
7,
87,
11,
331,
8,
796,
1802,
7,
88,
12,
87,
31185,
8,
31185,
1343,
357,
16... | 2.196774 | 310 |
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# Note the name of the file is based on this URL:
# https://www.hackerrank.com/challenges/torque-and-development/problem
# The problem name is "Roads and Libraries"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
num_queries = int(input())
for _query in range(num_queries):
num_cities, num_roads, cost_lib, cost_road = list(
map(int, input().split()))
roads = []
for _road in range(num_roads):
roads.append(list(map(int, input().rstrip().split())))
result = roadsAndLibraries(num_cities, cost_lib, cost_road, roads)
fptr.write(str(result) + '\n')
fptr.close()
| [
198,
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
2,
5740,
262,
1438,
286,
262,
2393,
318,
1912,
319,
42... | 2.495208 | 313 |
import aiohttp
from enum import Enum
from models.user import User
| [
11748,
257,
952,
4023,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
4981,
13,
7220,
1330,
11787,
198
] | 3.526316 | 19 |
#!/usr/bin/env python
# coding: utf-8
import argparse
import logging
from multiprocessing import Pool
import pandas as pd
import papermill as pm
from pathlib import Path
from tqdm import tqdm
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
for lib in ['blib2to3', 'papermill']:
logger = logging.getLogger(lib)
logger.setLevel(logging.WARNING)
from niddk_covid_sicr import get_data_prefix, get_ending, list_rois
notebook_path = Path(__file__).parent.parent / 'notebooks'
# Parse all the command-line arguments
parser = argparse.ArgumentParser(description=('Executes all of the analysis '
'notebooks'))
parser.add_argument('model_name',
help='Name of the Stan model file (without extension)')
parser.add_argument('-dp', '--data_path', default='./data',
help='Path to directory containing the data files')
parser.add_argument('-fp', '--fits_path', default='./fits',
help='Path to directory containing pickled fit files')
parser.add_argument('-rp', '--results_path', default='./results/vis-notebooks',
help=('Path to directory where resulting notebooks '
'will be stored'))
parser.add_argument('-mp', '--models_path', default='./models',
help='Path to directory containing .stan files')
parser.add_argument('-r', '--rois', default=[], nargs='+',
help='Space separated list of ROIs')
parser.add_argument('-n', '--n_threads', type=int, default=16, nargs='+',
help='Number of threads to use for analysis')
parser.add_argument('-f', '--fit_format', type=int, default=1,
help='Version of fit format')
parser.add_argument('-v', '--verbose', type=int, default=0,
help='Verbose error reporting')
args = parser.parse_args()
for key, value in args.__dict__.items():
if '_path' in key and 'results' not in key:
assert Path(value).is_dir(),\
"%s is not a directory" % Path(value).resolve()
# pathlibify some paths
data_path = Path(args.data_path)
fits_path = Path(args.fits_path)
models_path = Path(args.models_path)
results_path = Path(args.results_path)
results_path.mkdir(parents=True, exist_ok=True)
assert any([x.name.endswith('.csv') for x in data_path.iterdir()]),\
"No .csv files found in data_path %s" % (data_path.resolve())
assert any([x.name.endswith('.stan') for x in models_path.iterdir()]),\
"No .stan files found in models_path %s" % (models_path.resolve())
assert any([x.name.endswith('.pkl') or x.name.endswith('.csv')
for x in fits_path.iterdir()]),\
"No .pkl or .csv files found in fits_path %s" % (fits_path.resolve())
ending = get_ending(args.fit_format)
if not args.rois:
data_rois = list_rois(data_path, get_data_prefix(), '.csv')
fit_rois = list_rois(fits_path, args.model_name, ending)
args.rois = list(set(data_rois).intersection(fit_rois))
args.n_threads = min(args.n_threads, len(args.rois))
print("Running visualization notebook for %d rois on model '%s'" %
(len(args.rois), args.model_name))
# Make sure all ROI pickle files exist
for roi in args.rois:
file = fits_path / ('%s_%s%s' % (args.model_name, roi, ending))
assert file.is_file(), "No such %s file: %s" % (ending, file.resolve())
# Function to be execute on each ROI
# Top progress bar (how many ROIs have finished)
pbar = tqdm(total=len(args.rois), desc="All notebooks", leave=True)
# Execute up to 16 ROIs notebooks at once
pool = Pool(processes=args.n_threads)
jobs = {roi: pool.apply_async(execute,
[args.model_name, roi, data_path, fits_path,
models_path, notebook_path, results_path,
args.fit_format],
{'verbose': args.verbose},
callback=update)
for roi in args.rois}
pool.close()
pool.join()
print('\n')
error_table = pd.Series({roi: job.get() for roi, job in jobs.items()})
error_table = error_table[error_table != 'None']
if len(error_table):
print("Errors:")
print(error_table)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
3348,
... | 2.381921 | 1,770 |
#!/usr/bin/env python3
"""
Library providing convenient classes and methods for writing data to files.
"""
import logging
import json
import pickle
try:
import yaml
except ImportError:
yaml = None
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Serializer():
""" Parent Serializer class """
ext = ""
woptions = ""
roptions = ""
@classmethod
def marshal(cls, input_data):
""" Override for marshalling """
raise NotImplementedError()
@classmethod
def unmarshal(cls, input_string):
""" Override for unmarshalling """
raise NotImplementedError()
class YAMLSerializer(Serializer):
""" YAML Serializer """
ext = "yml"
woptions = "w"
roptions = "r"
@classmethod
@classmethod
class JSONSerializer(Serializer):
""" JSON Serializer """
ext = "json"
woptions = "w"
roptions = "r"
@classmethod
@classmethod
class PickleSerializer(Serializer):
""" Picke Serializer """
ext = "p"
woptions = "wb"
roptions = "rb"
@classmethod
@classmethod
def get_serializer(serializer):
""" Return requested serializer """
if serializer == "json":
return JSONSerializer
if serializer == "pickle":
return PickleSerializer
if serializer == "yaml" and yaml is not None:
return YAMLSerializer
if serializer == "yaml" and yaml is None:
logger.warning("You must have PyYAML installed to use YAML as the serializer."
"Switching to JSON as the serializer.")
return JSONSerializer
def get_serializer_from_ext(ext):
""" Get the sertializer from filename extension """
if ext == ".json":
return JSONSerializer
if ext == ".p":
return PickleSerializer
if ext in (".yaml", ".yml") and yaml is not None:
return YAMLSerializer
if ext in (".yaml", ".yml") and yaml is None:
logger.warning("You must have PyYAML installed to use YAML as the serializer.\n"
"Switching to JSON as the serializer.")
return JSONSerializer
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
23377,
4955,
11282,
6097,
290,
5050,
329,
3597,
1366,
284,
3696,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
33918,
198,
11748,
2298,
293,
198,
198,
28311,
25,
198,
... | 2.572816 | 824 |
from bisect import bisect
from datetime import datetime
#Adapted from here https://stackoverflow.com/questions/33415475/how-to-get-current-date-and-time-from-gps-unsegment-time-in-python
_LEAP_DATES = ((1981, 6, 30), (1982, 6, 30), (1983, 6, 30),
(1985, 6, 30), (1987, 12, 31), (1989, 12, 31),
(1990, 12, 31), (1992, 6, 30), (1993, 6, 30),
(1994, 6, 30), (1995, 12, 31), (1997, 6, 30),
(1998, 12, 31), (2005, 12, 31), (2008, 12, 31),
(2012, 6, 30), (2015, 6, 30), (2016, 12, 31))
# go here to check if any new leap seconds are added:
# ftp://hpiers.obspm.fr/iers/bul/bulc
LEAP_DATES = tuple(datetime(*i, 23, 59, 59) for i in _LEAP_DATES)
def utcleap(gps_sec):
'''converts gps time to utc time, accounting for leap years'''
epoch_date = datetime.utcfromtimestamp(gps_sec)
return gps_sec - bisect(LEAP_DATES, epoch_date)
| [
6738,
47457,
478,
1330,
47457,
478,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
48003,
276,
422,
994,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
31380,
21526,
2425,
14,
4919,
12,
1462,
12,
1136,
12,
14421,
... | 2.133641 | 434 |
from typer import Typer
from ...impl.utils import STORE, echo
app = Typer()
ALLOWED_KEYS = {"shell", "index"}
@app.command("set")
@app.command("reset")
| [
6738,
1259,
525,
1330,
7039,
525,
198,
198,
6738,
2644,
23928,
13,
26791,
1330,
3563,
6965,
11,
9809,
198,
198,
1324,
796,
7039,
525,
3419,
628,
198,
7036,
3913,
1961,
62,
7336,
16309,
796,
19779,
29149,
1600,
366,
9630,
20662,
628,
1... | 2.711864 | 59 |
from __future__ import print_function
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import pickle
from torchvision import transforms
# import matplotlib.pyplot as plt
# x = x.view(x.shape[1], x.shape[2])
# plt.imshow(x)
# plt.show()
# temp = transforms.ToPILImage()(x)
# temp = transforms.ToTensor()(temp)
#
#
#
# temp = temp.view(temp.shape[1],temp.shape[2])
# plt.imshow(temp)
# plt.show()
import pandas as pd
#x,y -> train data, ์ต์ปค๊ฐ p,n์ ์ผ๋ง๋ ์ ๋ถ๋ฅ ํ๊ฒ ํ๋ ๋ฐ์ดํฐ์ธ์ง๋ฅผ ๋ํ๋ด๋ฉฐ, p,n์ ์ฑ๋ฅ์ด ์ถ๋ ฅ๋๋๊น ์ค์ ๋ฐ์ดํฐ ์์ฒด์ ํ๋ฆฌํฐ๋ ๊ณ ๋ ค??
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
1891,
2412,
13,
66,
463,
20471,
355,
269,
463,
20471,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
... | 1.919822 | 449 |
#!/usr/bin/env python3
"""settings.py: detection module settings file """
__author__ = "Ahmed Hermas"
__copyright__ = "Copyright 2022, ยฉ UOL "
__license__ = "MIT"
__version__ = "0.1.0"
__email__ = "a7medhermas@gmail.com"
MODELS_CONFIG = {
'hog' :
{'loader':'_hog_loader',
'detector':'_getface_hog'},
'mmod':
{'loader':'_mmod_loader',
'detector':'_getface__mmod'},
'retina':
{'loader':'_retina_loader',
'detector':'_getface_retina'},
'cascade':
{'loader':'_cascade_loader',
'detector':'_getface_cascade'},
'mediapipe':
{'loader':'_mediapipe_loader',
'detector':'_getface_mediapipe'}
} | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
33692,
13,
9078,
25,
13326,
8265,
6460,
2393,
37227,
198,
198,
834,
9800,
834,
796,
366,
10910,
1150,
2332,
5356,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
33160,... | 2.029326 | 341 |
# -*- coding: utf-8 -*-
from ... import arquivos
from . import blocos
from . import registros
from .blocos import Bloco0
from .blocos import Bloco1
from .blocos import Bloco9
from .blocos import BlocoA
from .blocos import BlocoC
from .blocos import BlocoD
from .blocos import BlocoF
from .blocos import BlocoI
from .blocos import BlocoM
from .blocos import BlocoP
from .registros import Registro0000
from .registros import Registro9999
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
2644,
1330,
610,
421,
452,
418,
198,
6738,
764,
1330,
24003,
418,
198,
6738,
764,
1330,
4214,
4951,
198,
6738,
764,
2436,
420,
418,
1330,
1086,
25634,
15,
1... | 2.825806 | 155 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'register_dia.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from background import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
30238,
62,
67,
544,
13,
9019,
6,
201,
198,
2,
201,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
1... | 2.701923 | 104 |
import os,sys,time,json
import win32api,win32con
import config
steam_screenshot_dir = config.get_config('steam_screenshot_dir')
if __name__ == '__main__':
time.sleep(3)
snapscreen()
time.sleep(0.5)
print(get_raw_location())
| [
11748,
28686,
11,
17597,
11,
2435,
11,
17752,
198,
11748,
1592,
2624,
15042,
11,
5404,
2624,
1102,
198,
11748,
4566,
198,
198,
21465,
62,
1416,
26892,
62,
15908,
796,
4566,
13,
1136,
62,
11250,
10786,
21465,
62,
1416,
26892,
62,
15908,
... | 2.520833 | 96 |
# individual network settings for each actor + critic pair
# see networkforall for details
from networkforall import Network
from utilities import hard_update, gumbel_softmax, onehot_from_logits
from torch.optim import Adam
import torch
import numpy as np
# add OU noise for exploration
from OUNoise import OUNoise
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'
| [
2,
1981,
3127,
6460,
329,
1123,
8674,
1343,
4014,
5166,
198,
2,
766,
3127,
1640,
439,
329,
3307,
198,
198,
6738,
3127,
1640,
439,
1330,
7311,
198,
6738,
20081,
1330,
1327,
62,
19119,
11,
308,
2178,
417,
62,
4215,
9806,
11,
530,
8940... | 3.561404 | 114 |
"""This module is intended to safety import Windows-specific features
from `ctypes` stdlib module on non-windows platform -- they are
replaced by mock objects. Despite the code which uses that features
becomes partially unoperable in this case, we can import it and
generate documentation for instance
"""
import warnings
from ctypes import * # noqa
from unittest.mock import Mock
try:
from ctypes import WinDLL # noqa
except ImportError:
warnings.warn(
'ctypes.WinDLL is not available on non-Windows system. The code is not functional on '
'current platform, but in order to be able import it we mock WinDLL with '
'unittest.mock.Mock object'
)
WinDLL = Mock()
| [
37811,
1212,
8265,
318,
5292,
284,
3747,
1330,
3964,
12,
11423,
3033,
198,
6738,
4600,
310,
9497,
63,
14367,
8019,
8265,
319,
1729,
12,
28457,
3859,
1377,
484,
389,
198,
260,
21820,
416,
15290,
5563,
13,
7945,
262,
2438,
543,
3544,
32... | 3.263889 | 216 |
from metaflow.decorators import StepDecorator
| [
6738,
1138,
1878,
9319,
13,
12501,
273,
2024,
1330,
5012,
10707,
273,
1352,
628
] | 3.357143 | 14 |
import matplotlib.pyplot as plt
import json
import os
from collections import Counter
import pandas as pd
DATASET_DIR = 'datasets'
EXPERIMENT_DIR = 'combfreq'
RESULTS = os.path.join(DATASET_DIR, EXPERIMENT_DIR, 'simulation_results_scaled_tf.json')
RESULTS_EXTENDED = os.path.join(DATASET_DIR, EXPERIMENT_DIR, 'simulation_results_scaled_tf_extended.json')
with open(RESULTS_EXTENDED, "r") as file:
simulation_results = json.load(file)
results = simulation_results["games"]
scores = []
for result in results:
scores.append(str(results[result]["score"]))
count = Counter(scores)
df = pd.DataFrame.from_dict(count, orient='index').sort_index(axis = 0)
print(df)
df.plot(xlabel='Scores', ylabel='Number of games', title='Score distribution', kind='bar')
# plt.show()
plt.savefig(os.path.join('graphs', 'analysis_unlimited.png'))
# plt.savefig(os.path.join('graphs', 'analysis.png'))
# print(df) | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
17268,
1330,
15034,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
35,
1404,
1921,
2767,
62,
34720,
796,
705,
19608,
292,
... | 2.706587 | 334 |
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from oslo_versionedobjects import fields
@base.CinderObjectRegistry.register
@base.CinderObjectRegistry.register
| [
2,
220,
220,
220,
15069,
1853,
8180,
10501,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846... | 3.464 | 250 |
# stdlib
import argparse
import logging
import logging.config
import signal
import sys
import os
# third party lib
import requests
# Internal lib
from client.lib import shared
from lib import settings
import worker
from worker import factory
from lib.logging_config import return_client_config
# Routes
server_host = settings.ServerHTTP.external_host
server_port = settings.ServerHTTP.port
if __name__ == '__main__':
main()
| [
2,
14367,
8019,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
2,
2368,
2151,
9195,
198,
11748,
7007,
198,
198,
2,
18628,
9195,
198,
6738,
5456... | 3.528 | 125 |
from .cdn import return_cdn_avatar
# Set interaction opcodes
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
RESUME = 6
RECONNECT = 7
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
# Set application command types
SLASH = 2
USER = 2
MESSAGE = 3
# Set message response types
CHANNEL_WITH_SOURCE = 4
DEFERRED_CHANNEL_WITH_SOURCE = 5
DEFERRED_UPDATE_MESSAGE = 6
UPDATE_MESSAGE = 7
AUTOCOMPLETE_RESULT = 8
__version__ = '1.1.6' | [
6738,
764,
32341,
1330,
1441,
62,
32341,
62,
615,
9459,
198,
198,
2,
5345,
10375,
1034,
40148,
198,
26288,
47,
11417,
796,
657,
198,
13909,
7227,
12473,
1404,
796,
352,
198,
25256,
5064,
56,
796,
362,
198,
19535,
38340,
796,
718,
198,... | 2.433526 | 173 |