content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Baker, Aiden
# 2/10/2021
# DoNow1.2
print("Sensei, I am ready for the next stage in my training!") | [
2,
14372,
11,
317,
14029,
198,
2,
362,
14,
940,
14,
1238,
2481,
198,
2,
2141,
3844,
16,
13,
17,
198,
198,
4798,
7203,
41166,
72,
11,
314,
716,
3492,
329,
262,
1306,
3800,
287,
616,
3047,
2474,
8
] | 2.589744 | 39 |
"""Importing routines for tif data."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional, Tuple, TypeVar
import numpy as np
import tables
from landshark.basetypes import (
ArraySource,
CategoricalArraySource,
ContinuousArraySource,
CoordinateArraySource,
IdWorker,
Worker,
)
from landshark.category import CategoryMapper
from landshark.image import ImageSpec
from landshark.iteration import batch_slices, with_slices
from landshark.metadata import (
CategoricalFeatureSet,
CategoricalTarget,
ContinuousFeatureSet,
ContinuousTarget,
FeatureSet,
Target,
)
from landshark.multiproc import task_list
from landshark.normalise import Normaliser
log = logging.getLogger(__name__)
T = TypeVar("T")
| [
37811,
20939,
278,
31878,
329,
256,
361,
1366,
526,
15931,
198,
198,
2,
15069,
13130,
9429,
43708,
357,
6601,
5333,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,... | 3.283619 | 409 |
'''
https://community.topcoder.com/stat?c=problem_statement&pm=13458
'''
| [
7061,
6,
198,
5450,
1378,
28158,
13,
4852,
66,
12342,
13,
785,
14,
14269,
30,
66,
28,
45573,
62,
26090,
5,
4426,
28,
1485,
29334,
198,
7061,
6,
198
] | 2.517241 | 29 |
T = int(input())
lst = []
while T>0:
l = [0,0]
a,b = input().split()
Pc, Pr = int(a), int(b)
Pcs = str(Pc)
Prs = str(Pr)
if len(Pcs)==1:
cdigit = 1
else:
if Pc%9==0:
cdigit = Pc//9
else:
cdigit = Pc//9 +1
if len(Prs) == 1:
rdigit = 1
else:
if Pr%9==0:
rdigit = Pr//9
else:
rdigit = Pr//9 +1
if cdigit == rdigit:
l[0]=1
l[1]=cdigit
elif cdigit > rdigit:
l[1]=rdigit
l[0]=1
else:
l[0]=0
l[1]=cdigit
lst.append(l)
T = T-1
for items in lst:
for i in range(len(items)):
if len(items)-1 == i:
print(items[i],end="\n")
else:
print(items[i],end=" ")
| [
51,
796,
493,
7,
15414,
28955,
198,
75,
301,
796,
17635,
198,
4514,
309,
29,
15,
25,
198,
220,
220,
220,
300,
796,
685,
15,
11,
15,
60,
198,
220,
220,
220,
257,
11,
65,
796,
5128,
22446,
35312,
3419,
198,
220,
220,
220,
350,
6... | 1.580321 | 498 |
# Copyright 2017 The TensorFlow Lattice Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CalibratedEtl tests."""
# Dependency imports
import numpy as np
from tensorflow_lattice.python.estimators import calibrated_etl
from tensorflow_lattice.python.estimators import hparams as tfl_hparams
from tensorflow_lattice.python.lib import keypoints_initialization
from tensorflow_lattice.python.lib import test_data
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.platform import test
_NUM_KEYPOINTS = 50
if __name__ == '__main__':
test.main()
| [
2,
15069,
2177,
383,
309,
22854,
37535,
406,
1078,
501,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.602339 | 342 |
import sys
if __name__ == "__main__":
sol()
| [
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1540,
3419,
198
] | 2.318182 | 22 |
#!/usr/bin/python3
"""
Post processes the line and toggle coverage reports from verilator into
something which is easier to read and review.
"""
import os
import re
import sys
import yaml
import argparse
import jinja2
class AnnotatedFile(object):
"""
Describes a single annotated file.
"""
def __init__(self, in_file):
"""
Create a new annotated source file represetnation
"""
self.lines = []
self.splitlines = []
self.filename = os.path.basename(in_file.name)
self.lines = [l.replace("\t"," ") for l in in_file.readlines()]
matcher = re.compile(" [0-9].*")
# Very crudely parse the annotated source file into a list of
# tuples. First item of tuple is None, or integer for number of
# times the line is hit. The second item is the line itself.
for line in self.lines:
check = matcher.match(line)
if(line[0]=="%"):
spl = line.partition(" ")
self.splitlines.append((0,spl[2][:-1]))
elif(check):
spl = line[1:].partition(" ")
self.splitlines.append((int(spl[0][1:]),spl[2][:-1]))
else:
self.splitlines.append(("",line[:-1]))
def getscore(self):
"""
Return the number of covered lines as a percentage.
"""
num_hits = 0.0
num_miss = 0.0
for line in self.splitlines:
if(line[0] == 0):
num_miss += 1.0
if(line[0] != None):
num_hits += 1.0
total = num_hits + num_miss
return (num_hits / total) * 100.0
def writeout(self, to_file):
"""
Write out the annotated file with a rendered jinja template to
the specified file path.
"""
ld = jinja2.FileSystemLoader(
os.path.expandvars("$RVM_HOME/verif/coverage"))
env = jinja2.Environment(loader = ld)
template = env.get_template("report-template.html")
result = template.render(lines = self.splitlines,
filename= self.filename)
with open(to_file, "w") as fh:
fh.write(result)
def parseargs():
"""
Parses and returns all command line arguments to the program.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-o","--output", type=str,
help="output directory path.",
default="./work/cov-rpt")
parser.add_argument("-i","--input", nargs='+',
type=argparse.FileType('r'),
help="List of input files", default=[])
args = parser.parse_args()
return args
def writeOverview(scores, path):
"""
Writes an overview of the coverage scores for each file.
"""
ld = jinja2.FileSystemLoader(
os.path.expandvars("$RVM_HOME/verif/coverage"))
env = jinja2.Environment(loader = ld)
template = env.get_template("overview-template.html")
result = template.render(scores = scores)
with open(path, "w") as fh:
fh.write(result)
def main():
"""
Main entry point for using the script
"""
args = parseargs()
scores = []
for inputfile in args.input:
print("Parsing %s" % inputfile.name)
af = AnnotatedFile(inputfile)
name = os.path.basename(inputfile.name)+".html"
outputfile = os.path.join(args.output,name)
print("Writing to %s" % outputfile)
af.writeout(outputfile)
scores.append((af.filename,af.getscore()))
writeOverview(scores,os.path.join(args.output,"overview.html"))
sys.exit(0)
if(__name__=="__main__"):
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
37811,
198,
6307,
7767,
262,
1627,
290,
19846,
5197,
3136,
422,
3326,
346,
1352,
656,
198,
18927,
543,
318,
4577,
284,
1100,
290,
2423,
13,
198,
37811,
198,
198,
11748,
28686,
198,
... | 2.178344 | 1,727 |
import torch
import warnings
from typing import Callable
# Typing hints: a Point is a tensor with some shape, and a scalar is, well, a single-element tensor
Point = torch.Tensor
Scalar = torch.Tensor
class Manifold(object):
"""Base class for Manifolds with an associated length(). In other words, a Length Space
"""
def project(self, pt: Point) -> Point:
"""Project a point from the ambient space onto the manifold.
"""
return self._project(pt) if not self.contains(pt) else pt
def _project(self, pt: Point) -> Point:
"""Project without first checking contains()
"""
# Default behavior (to be overridden by subclasses): just return the point. Equivalent to saying the manifold
# is identical to the ambient space.
return pt
def contains(self, pt: Point, atol: float = 1e-6) -> bool:
"""Check whether the given point is within 'atol' tolerance of the manifold.
Manifold.contains checks match to ambient shape only. Further specialization done by subclasses.
"""
return pt.size()[-self.ambient:] == self.shape
def _has_implemented_closed_form_geodesic(self) -> bool:
"""Check if there is an implementation of a closed-form geodesic for this space.
"""
return False
class HyperSphere(Manifold):
"""Manifold of points on the surface of a dim-dimensional sphere (ambient dim+1).
"""
class VectorSpace(Manifold):
"""VectorSpace is a manifold of arbitrarily-sized vectors with the default metric being Euclidean.
"""
class Matrix(VectorSpace):
"""Manifold of Matrices of size (rows, cols)
"""
class SymmetricMatrix(Matrix):
"""Manifold of Symmetric Matrices of size (rows, rows)
"""
class SPDMatrix(SymmetricMatrix):
"""Manifold of Symmetric Positive (Semi-)Definite Matrices
"""
class DistMatrix(SymmetricMatrix):
"""Manifold of Pairwise-distance matrices, i.e. a SymmetricMatrix with zero diagonals and non-negative off-diagonals
"""
__all__ = ['Point', 'Scalar', 'Manifold', 'HyperSphere', 'Matrix', 'SPDMatrix', 'DistMatrix']
| [
11748,
28034,
198,
11748,
14601,
198,
6738,
19720,
1330,
4889,
540,
628,
198,
2,
17134,
278,
20269,
25,
257,
6252,
318,
257,
11192,
273,
351,
617,
5485,
11,
290,
257,
16578,
283,
318,
11,
880,
11,
257,
2060,
12,
30854,
11192,
273,
1... | 2.942308 | 728 |
#!/usr/bin/env python
from os.path import join
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4 as nc4
from e3sm_case_output import day_str
REF_CASE_NAME = "timestep_ctrl"
TEST_CASE_NAME = "timestep_all_10s"
OUTPUT_DIR = "/p/lustre2/santos36/timestep_precip/"
LAND_TROPICS = True
TROPICS_ONLY = False
if LAND_TROPICS:
TROPICS_ONLY = True
START_YEAR = 3
START_MONTH = 3
END_YEAR = 4
END_MONTH = 2
suffix = '_y{}m{}-y{}m{}'.format(day_str(START_YEAR),
day_str(START_MONTH),
day_str(END_YEAR),
day_str(END_MONTH))
if TROPICS_ONLY:
if LAND_TROPICS:
suffix += '_lndtropics'
else:
suffix += '_tropics'
log_file = open("plot_precip_log{}.txt".format(suffix), 'w')
nmonths = (END_YEAR - START_YEAR) * 12 - (START_MONTH - 1) + END_MONTH
imonths = list(range(nmonths))
curr_month = START_MONTH
curr_year = START_YEAR
months = []
years = []
for i in range(nmonths):
months.append(curr_month)
years.append(curr_year)
curr_month += 1
if curr_month > 12:
curr_month = 1
curr_year += 1
out_file_template = "{}.freq.{}-{}.nc"
first_file_name = out_file_template.format(REF_CASE_NAME, "00"+day_str(START_YEAR),
day_str(START_MONTH))
first_file = nc4.Dataset(join(OUTPUT_DIR, first_file_name), 'r')
ncol = len(first_file.dimensions['ncol'])
nbins = len(first_file.dimensions['nbins'])
bin_lower_bounds = first_file['bin_lower_bounds'][:]
bin_width = np.log(bin_lower_bounds[2] / bin_lower_bounds[1])
lat = first_file['lat'][:]
lon = first_file['lon'][:]
area = first_file['area'][:]
# For tropics_only cases, just use a weight of 0 for all other columns.
if TROPICS_ONLY:
if LAND_TROPICS:
# Just pick a random file with the same grid as the run.
landfrac_file_name = '/p/lustre2/santos36/timestep_monthly_avgs/timestep_ctrl.0001-01.nc'
landfrac_file = nc4.Dataset(landfrac_file_name, 'r')
landfrac = landfrac_file['LANDFRAC'][0,:]
for i in range(ncol):
if np.abs(lat[i]) > 30.:
area[i] = 0.
else:
area[i] *= landfrac[i]
landfrac_file.close()
else:
for i in range(ncol):
if np.abs(lat[i]) > 30.:
area[i] = 0.
area_sum = area.sum()
weights = area/area_sum
first_file.close()
ref_sample_num_total = 0
test_sample_num_total = 0
prec_vars = ("PRECC", "PRECL", "PRECT")
ref_num_avgs = {}
ref_amount_avgs = {}
for var in prec_vars:
ref_num_avgs[var] = np.zeros((nbins,))
ref_amount_avgs[var] = np.zeros((nbins,))
test_num_avgs = {}
test_amount_avgs = {}
for var in prec_vars:
test_num_avgs[var] = np.zeros((nbins,))
test_amount_avgs[var] = np.zeros((nbins,))
for i in range(nmonths):
year = years[i]
year_string = "00" + day_str(year)
month = months[i]
month_string = day_str(month)
print("On year {}, month {}.".format(year, month))
out_file_name = out_file_template.format(REF_CASE_NAME, year_string, month_string)
out_file = nc4.Dataset(join(OUTPUT_DIR, out_file_name), 'r')
ref_sample_num_total += out_file.sample_num
for var in prec_vars:
num_name = "{}_num".format(var)
amount_name = "{}_amount".format(var)
for j in range(ncol):
ref_num_avgs[var] += out_file[num_name][j,:] * weights[j]
for j in range(ncol):
ref_amount_avgs[var] += out_file[amount_name][j,:] * weights[j]
out_file_name = out_file_template.format(TEST_CASE_NAME, year_string, month_string)
out_file = nc4.Dataset(join(OUTPUT_DIR, out_file_name), 'r')
test_sample_num_total += out_file.sample_num
for var in prec_vars:
num_name = "{}_num".format(var)
amount_name = "{}_amount".format(var)
for j in range(ncol):
test_num_avgs[var] += out_file[num_name][j,:] * weights[j]
for j in range(ncol):
test_amount_avgs[var] += out_file[amount_name][j,:] * weights[j]
for var in prec_vars:
ref_num_avgs[var] /= ref_sample_num_total
ref_amount_avgs[var] /= ref_sample_num_total
test_num_avgs[var] /= test_sample_num_total
test_amount_avgs[var] /= test_sample_num_total
# Threshold for precipitation to be considered "extreme", in mm/day.
PRECE_THRESHOLD = 97.
ibinthresh = -1
for i in range(nbins):
if bin_lower_bounds[i] > PRECE_THRESHOLD:
ibinthresh = i
break
if ibinthresh == -1:
print("Warning: extreme precip threshold greater than largest bin bound.")
for var in prec_vars:
# Leave out zero bin from loglog plot.
plt.loglog(bin_lower_bounds[1:], ref_num_avgs[var][1:], 'k')
plt.loglog(bin_lower_bounds[1:], test_num_avgs[var][1:], 'r')
plt.title("Frequency distribution of precipitation ({}/{}-{}/{})".format(
day_str(START_MONTH), day_str(START_YEAR),
day_str(END_MONTH), day_str(END_YEAR)))
plt.xlabel("Precipitation intensity (mm/day)")
plt.ylabel("fraction")
plt.savefig("{}_freq{}.png".format(var, suffix))
plt.close()
plt.semilogx(bin_lower_bounds[1:], ref_amount_avgs[var][1:] / bin_width, 'k')
plt.semilogx(bin_lower_bounds[1:], test_amount_avgs[var][1:] / bin_width, 'r')
if var == "PRECT":
print("Extreme precipitation rate for reference: ",
ref_amount_avgs[var][ibinthresh:].sum(),
file=log_file)
print("Extreme precipitation rate for test: ",
test_amount_avgs[var][ibinthresh:].sum(), "(Diff = ",
test_amount_avgs[var][ibinthresh:].sum() - ref_amount_avgs[var][ibinthresh:].sum(), ")",
file=log_file)
plt.title("Amounts of precipitation ({}/{}-{}/{})".format(
day_str(START_MONTH), day_str(START_YEAR),
day_str(END_MONTH), day_str(END_YEAR)))
plt.xlabel("Precipitation intensity (mm/day)")
plt.ylabel("Average precipitation amount (mm/day)")
plt.savefig("{}_amount{}.png".format(var, suffix))
plt.close()
log_file.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
2... | 2.100275 | 2,912 |
# coding=utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
from positions.fields import PositionField
from .utils import slugify
STATUS = (
(0, _(u'Active')),
(1, _(u'Editing')),
(127, _(u'Deleted')),
)
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
6116,
13,
25747,
1330,
23158,
15878,... | 2.538462 | 104 |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of generic interfaces for MONAI transforms.
"""
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Generator, Hashable, Iterable, List, Optional, Tuple
import numpy as np
import torch
from monai import transforms
from monai.config import KeysCollection
from monai.utils import MAX_SEED, ensure_tuple
__all__ = ["apply_transform", "Randomizable", "RandomizableTransform", "Transform", "MapTransform"]
def apply_transform(transform: Callable, data, map_items: bool = True):
"""
Transform `data` with `transform`.
If `data` is a list or tuple and `map_data` is True, each item of `data` will be transformed
and this method returns a list of outcomes.
otherwise transform will be applied once with `data` as the argument.
Args:
transform: a callable to be used to transform `data`
data: an object to be transformed.
map_items: whether to apply transform to each item in `data`,
if `data` is a list or tuple. Defaults to True.
Raises:
Exception: When ``transform`` raises an exception.
"""
try:
if isinstance(data, (list, tuple)) and map_items:
return [transform(item) for item in data]
return transform(data)
except Exception as e:
if not isinstance(transform, transforms.compose.Compose):
# log the input data information of exact transform in the transform chain
datastats = transforms.utility.array.DataStats(data_shape=False, value_range=False)
datastats._logger.info("input data information of the runtime error transform:")
if isinstance(data, (list, tuple)):
data = data[0]
if isinstance(data, dict):
for k, v in data.items():
_log_stats(data=v, prefix=k)
else:
_log_stats(data=data)
raise RuntimeError(f"applying transform {transform}") from e
class Randomizable(ABC):
"""
An interface for handling random state locally, currently based on a class variable `R`,
which is an instance of `np.random.RandomState`.
"""
R: np.random.RandomState = np.random.RandomState()
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Randomizable":
"""
Set the random state locally, to control the randomness, the derived
classes should use :py:attr:`self.R` instead of `np.random` to introduce random
factors.
Args:
seed: set the random state with an integer seed.
state: set the random state with a `np.random.RandomState` object.
Raises:
TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``.
Returns:
a Randomizable instance.
"""
if seed is not None:
_seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed
_seed = _seed % MAX_SEED
self.R = np.random.RandomState(_seed)
return self
if state is not None:
if not isinstance(state, np.random.RandomState):
raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.")
self.R = state
return self
self.R = np.random.RandomState()
return self
def randomize(self, data: Any) -> None:
"""
Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.
all :py:attr:`self.R` calls happen here so that we have a better chance to
identify errors of sync the random state.
This method can generate the random factors based on properties of the input data.
Raises:
NotImplementedError: When the subclass does not override this method.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
class Transform(ABC):
"""
An abstract class of a ``Transform``.
A transform is callable that processes ``data``.
It could be stateful and may modify ``data`` in place,
the implementation should be aware of:
#. thread safety when mutating its own states.
When used from a multi-process context, transform's instance variables are read-only.
#. ``data`` content unused by this transform may still be used in the
subsequent transforms in a composed transform.
#. storing too much information in ``data`` may not scale.
See Also
:py:class:`monai.transforms.Compose`
"""
@abstractmethod
def __call__(self, data: Any):
"""
``data`` is an element which often comes from an iteration over an
iterable, such as :py:class:`torch.utils.data.Dataset`. This method should
return an updated version of ``data``.
To simplify the input validations, most of the transforms assume that
- ``data`` is a Numpy ndarray, PyTorch Tensor or string
- the data shape can be:
#. string data without shape, `LoadImage` transform expects file paths
#. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,
except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and
`AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)
#. most of the post-processing transforms expect
``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``
- the channel dimension is not omitted even if number of channels is one
This method can optionally take additional arguments to help execute transformation operation.
Raises:
NotImplementedError: When the subclass does not override this method.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
class RandomizableTransform(Randomizable, Transform):
"""
An interface for handling random state locally, currently based on a class variable `R`,
which is an instance of `np.random.RandomState`.
This is mainly for randomized data augmentation transforms. For example::
class RandShiftIntensity(RandomizableTransform):
def randomize():
self._offset = self.R.uniform(low=0, high=100)
def __call__(self, img):
self.randomize()
return img + self._offset
transform = RandShiftIntensity()
transform.set_random_state(seed=0)
"""
def randomize(self, data: Any) -> None:
"""
Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.
all :py:attr:`self.R` calls happen here so that we have a better chance to
identify errors of sync the random state.
This method can generate the random factors based on properties of the input data.
"""
self._do_transform = self.R.rand() < self.prob
class MapTransform(Transform):
"""
A subclass of :py:class:`monai.transforms.Transform` with an assumption
that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``.
The ``keys`` parameter will be used to get and set the actual data
item to transform. That is, the callable of this transform should
follow the pattern:
.. code-block:: python
def __call__(self, data):
for key in self.keys:
if key in data:
# update output data with some_transform_function(data[key]).
else:
# raise exception unless allow_missing_keys==True.
return data
Raises:
ValueError: When ``keys`` is an empty iterable.
TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``.
"""
@abstractmethod
def __call__(self, data):
"""
``data`` often comes from an iteration over an iterable,
such as :py:class:`torch.utils.data.Dataset`.
To simplify the input validations, this method assumes:
- ``data`` is a Python dictionary
- ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element
of ``self.keys``, the data shape can be:
#. string data without shape, `LoadImaged` transform expects file paths
#. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,
except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and
`AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)
#. most of the post-processing transforms expect
``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``
- the channel dimension is not omitted even if number of channels is one
Raises:
NotImplementedError: When the subclass does not override this method.
returns:
An updated dictionary version of ``data`` by applying the transform.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def key_iterator(
self,
data: Dict[Hashable, Any],
*extra_iterables: Optional[Iterable],
) -> Generator:
"""
Iterate across keys and optionally extra iterables. If key is missing, exception is raised if
`allow_missing_keys==False` (default). If `allow_missing_keys==True`, key is skipped.
Args:
data: data that the transform will be applied to
extra_iterables: anything else to be iterated through
"""
# if no extra iterables given, create a dummy list of Nones
ex_iters = extra_iterables if extra_iterables else [[None] * len(self.keys)]
# loop over keys and any extra iterables
_ex_iters: List[Any]
for key, *_ex_iters in zip(self.keys, *ex_iters):
# all normal, yield (what we yield depends on whether extra iterables were given)
if key in data.keys():
yield (key,) + tuple(_ex_iters) if extra_iterables else key
# if missing keys not allowed, raise
elif not self.allow_missing_keys:
raise KeyError(f"Key was missing ({key}) and allow_missing_keys==False")
| [
2,
15069,
12131,
532,
33448,
25000,
20185,
42727,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921... | 2.659042 | 4,197 |
TRAINING_DATA_FILE = "gold_entities.jsonl"
KB_FILE = "kb"
KB_MODEL_DIR = "nlp_kb"
OUTPUT_MODEL_DIR = "nlp"
PRIOR_PROB_PATH = "prior_prob.csv"
ENTITY_DEFS_PATH = "entity_defs.csv"
ENTITY_FREQ_PATH = "entity_freq.csv"
ENTITY_DESCR_PATH = "entity_descriptions.csv"
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
| [
51,
3861,
1268,
2751,
62,
26947,
62,
25664,
796,
366,
24267,
62,
298,
871,
13,
17752,
75,
1,
198,
22764,
62,
25664,
796,
366,
32812,
1,
198,
22764,
62,
33365,
3698,
62,
34720,
796,
366,
21283,
79,
62,
32812,
1,
198,
2606,
7250,
38... | 2.049383 | 162 |
#Guru
#takes fasta alignments, a distance metric and builds neighbor joining trees
import os, sys
from galaxy import eggs
from galaxy.tools.util import hyphy_util
#Retrieve hyphy path, this will need to be the same across the cluster
tool_data = sys.argv.pop()
HYPHY_PATH = os.path.join( tool_data, "HYPHY" )
HYPHY_EXECUTABLE = os.path.join( HYPHY_PATH, "HYPHY" )
#Read command line arguments
input_filename = os.path.abspath(sys.argv[1].strip())
output_filename = os.path.abspath(sys.argv[2].strip())
tree_contents = sys.argv[3].strip()
nuc_model = sys.argv[4].strip()
analysis = sys.argv[5].strip()
if tree_contents == "":
print >> sys.stderr, "Please specify a valid tree definition."
sys.exit()
tree_filename = hyphy_util.get_filled_temp_filename(tree_contents)
if analysis == "local":
fitter_filename = hyphy_util.get_filled_temp_filename(hyphy_util.SimpleLocalFitter)
else:
fitter_filename = hyphy_util.get_filled_temp_filename(hyphy_util.SimpleGlobalFitter)
tabwriter_filename = hyphy_util.get_filled_temp_filename(hyphy_util.TabWriter)
FastaReader_filename = hyphy_util.get_filled_temp_filename(hyphy_util.FastaReader)
#setup Config file
config_filename = hyphy_util.get_dnds_config_filename(fitter_filename, tabwriter_filename, "Universal", tree_filename, input_filename, nuc_model, output_filename, FastaReader_filename)
#Run Hyphy
hyphy_cmd = "%s BASEPATH=%s USEPATH=/dev/null %s" % (HYPHY_EXECUTABLE, HYPHY_PATH, config_filename)
hyphy = os.popen(hyphy_cmd, 'r')
#print hyphy.read()
hyphy.close()
#remove temporary files
os.unlink(fitter_filename)
os.unlink(tabwriter_filename)
os.unlink(tree_filename)
os.unlink(FastaReader_filename)
os.unlink(config_filename)
if nuc_model == "000000":
model = "F81"
elif nuc_model == "010010":
model = "HKY85"
else:
model = "REV"
print "Analysis: %s; Model: %s; Tree: %s" %(analysis, model, tree_contents)
| [
2,
38,
14717,
201,
198,
2,
83,
1124,
3049,
64,
10548,
902,
11,
257,
5253,
18663,
290,
12188,
4780,
9679,
7150,
201,
198,
11748,
28686,
11,
25064,
201,
198,
6738,
16161,
1330,
9653,
201,
198,
6738,
16161,
13,
31391,
13,
22602,
1330,
... | 2.547464 | 769 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NFSN DNS Driver
"""
import re
from libcloud.common.exceptions import BaseHTTPError
from libcloud.common.nfsn import NFSNConnection
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.types import RecordAlreadyExistsError
from libcloud.dns.types import Provider, RecordType
from libcloud.utils.py3 import httplib
__all__ = [
"NFSNDNSDriver",
]
# The NFSN API does not return any internal "ID" strings for any DNS records.
# This means that we must set all returned Record objects' id properties to
# None. It also means that we cannot implement libcloud APIs that rely on
# record_id, such as get_record(). Instead, the NFSN-specific
# ex_get_records_by() method will return the desired Record objects.
#
# Additionally, the NFSN API does not provide ways to create, delete, or list
# all zones, so create_zone(), delete_zone(), and list_zones() are not
# implemented.
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
7054,
37,
... | 3.757511 | 466 |
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
/collated endpoints. Designed to return info about audit by audit, pop & srvtype.
Accepts a regex filter for the main name
```swagger-yaml
/collated/{collatedType}/ :
get:
description: |
Returns data that get's collated in the collate module. Can get results deliminated by audit for audits
( yes you can get audits by audit ), pops & srvtypes.
responses:
200:
description: OK
tags:
- audits
parameters:
- name: collatedType
in: path
description: |
The type of collated value that you wish to see. Initially can only be pop,
srvtype or acoll (For audit). In the future may include more collations if
additional collations are added.
schema:
type: string
enum: [pop, srvtype, acoll]
required: true
- name: typefilter
in: query
description: |
A regex to match for the collated type (pop, srvtype or audit). [PCRE](https://mariadb.com/kb/en/mariadb/regexp/) type
regular expressions are accepted. Matched on the $collatedType column of the table in question. Should be encased in
parenthesis as it's evaluated by [ast.literal_eval](https://docs.python.org/3.6/library/ast.html) on the backend as
part of it's sanitization.
schema:
type: string
required: false
- name: auditID
in: query
description: |
An audit ID to check against. Will filter results to just the auditID that you're interested in. For example, specifying
7 with a collatedType of "pop" will lead to all of the pops returning their pass/fail/exempt amounts for auditID #7.
schema:
type: string
required: false
```
'''
import json
import ast
import time
import os
import hashlib
from flask import current_app, Blueprint, g, request, jsonify, send_from_directory, abort
import manoward
collated = Blueprint('api2_collated', __name__)
@collated.route("/collated/", methods=['GET'])
@collated.route("/collated/<collatedType>", methods=['GET'])
@collated.route("/collated/<collatedType>/", methods=['GET'])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
7061,
6,
198,
15269,
2864,
11,
569,
35,
5653,
198,
26656,
15385,
739,
262,
2846,
286,
262,
347,
10305,
362,
12,
565,
682,
5964,
13,
4091,
38559,
24290,
2393,
329,
2846,
13,
... | 2.650519 | 867 |
# add.py
# The CIL add instruction
# Copyright 2010 Marty Dill - see LICENSE for details
from Instruction import Instruction
import unittest
from Stack import StackStateException
from Instructions.Instruction import register
from Variable import Variable
register('add', add)
| [
2,
751,
13,
9078,
198,
2,
383,
327,
4146,
751,
12064,
198,
2,
15069,
3050,
29876,
44322,
532,
766,
38559,
24290,
329,
3307,
198,
198,
6738,
46486,
1330,
46486,
198,
11748,
555,
715,
395,
198,
6738,
23881,
1330,
23881,
9012,
16922,
198... | 4.444444 | 63 |
#!/Users/denisroldan/Projects/talentum-2015-examples/python/demo-django/env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
2,
48443,
14490,
14,
6559,
271,
305,
335,
272,
14,
16775,
82,
14,
39240,
298,
388,
12,
4626,
12,
1069,
12629,
14,
29412,
14,
9536,
78,
12,
28241,
14208,
14,
24330,
14,
8800,
14,
29412,
198,
6738,
42625,
14208,
13,
7295,
1330,
4542,
... | 2.757143 | 70 |
# this file contains only those settings which are used in developing phase
# and extends base.py settings, which has all the default settings
from django_reusable.settings.base import *
from django_reusable.settings.third_party import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
2,
428,
2393,
4909,
691,
883,
6460,
543,
389,
973,
287,
5922,
7108,
198,
2,
290,
14582,
2779,
13,
9078,
6460,
11,
543,
468,
477,
262,
4277,
6460,
198,
198,
6738,
42625,
14208,
62,
260,
31979,
13,
33692,
13,
8692,
1330,
1635,
198,
... | 2.809045 | 199 |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo gpipe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.core import tshape
from lingvo.core.gpipe import FeatureExtractionLayer
from lingvo.core.gpipe import PartitionSequentialLayers
from lingvo.core.gpipe import PipeliningLayer
from lingvo.core.layers import Conv2DLayerNoPadding
from lingvo.core.layers import FetchLayer
from six.moves import range
class _TimestepAccumulator(base_layer.Accumulator):
"""Simple accumulator for counting timesteps in the pipeline."""
class _SimpyLayer(base_layer.BaseLayer):
"""Simpy Layer with accumulator that counts time step."""
@classmethod
@base_layer.initializer
@classmethod
def _BuildDummyPipelineCnn(num_splits=4,
num_micro_batches=8,
micro_batch_size=None):
"""Construct a dummy layer that consist of 16 3x3 conv layers.
In addition, each conv layer increments a count every time step.
Args:
num_splits: number of cells for pipeline cnn
num_micro_batches: number of time steps.
micro_batch_size: Size of a micro batch.
Returns:
A PipeliningLayer layer.
"""
assert num_splits in [1, 2, 4, 8, 16]
num_layers = 16
layers = []
for i in range(num_layers):
layers.append(_SimpyLayer.Params().Set(name='layer_{}'.format(i)))
if num_splits == 1:
p = FeatureExtractionLayer.Params().Set(name='seq', sub=layers)
else:
cell_tpl = []
layers_per_split = num_layers // num_splits
num_act_outputs = 0
num_act_inputs = 0
act_fetch_layers = None
for split in range(num_splits):
sub = layers[split * layers_per_split:(split + 1) * layers_per_split]
if split == 0:
sub.append(FetchLayer.Params().Set(name='fetch'))
num_act_outputs = 1
act_fetch_layers = ['fetch']
else:
num_act_inputs = 1
act_fetch_layers = []
split_layer = FeatureExtractionLayer.Params().Set(
name='split_{}'.format(split),
sub=sub,
act_fetch_layers=act_fetch_layers,
num_act_inputs=num_act_inputs,
num_act_outputs=num_act_outputs)
cell_tpl.append(split_layer)
p = PipeliningLayer.Params().Set(
name='pipeline',
num_micro_batches=num_micro_batches,
micro_batch_size=micro_batch_size,
cell_tpl=cell_tpl,
before_tpl=[])
layer = p.Instantiate()
return layer
if __name__ == '__main__':
tf.test.main()
| [
2,
406,
600,
355,
25,
21015,
17,
11,
21015,
18,
198,
2,
15069,
13130,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
1... | 2.716828 | 1,236 |
"""Manager Messages."""
from enum import Enum
from pathlib import Path
from typing import Dict, Optional
from pydantic import BaseModel
from astoria import __version__
from astoria.common.code_status import CodeStatus
from astoria.common.disks import DiskInfo, DiskTypeCalculator, DiskUUID
from astoria.common.metadata import Metadata
class ManagerMessage(BaseModel):
"""Common data that all manager messages output."""
class Status(Enum):
"""Running Status of the manager daemon."""
STOPPED = "STOPPED"
RUNNING = "RUNNING"
status: Status
astoria_version: str = __version__
class ProcessManagerMessage(ManagerMessage):
"""
Status message for Process Manager.
Published to astoria/astprocd
"""
code_status: Optional[CodeStatus]
disk_info: Optional[DiskInfo]
class MetadataManagerMessage(ManagerMessage):
"""
Status message for Metadata Manager.
Published to /astoria/astmetad
"""
metadata: Metadata
class DiskManagerMessage(ManagerMessage):
"""
Status message for Disk Manager.
Published to /astoria/astdiskd
"""
disks: Dict[DiskUUID, Path]
def calculate_disk_info(
self,
default_usercode_entrypoint: str,
) -> Dict[DiskUUID, DiskInfo]:
"""
Calculate the disk info of the disks in the message.
As astdiskd only gives us information about the path of each disk,
we need to calculate the type of each disk in the message.
:param default_usercode_entrypoint: default entrypoint from astoria config
:returns: A dictionary of disk UUIDs and disk information.
"""
disk_type_calculator = DiskTypeCalculator(default_usercode_entrypoint)
return {
uuid: DiskInfo(
uuid=uuid,
mount_path=path,
disk_type=disk_type_calculator.calculate(path),
)
for uuid, path in self.disks.items()
}
| [
37811,
13511,
43534,
526,
15931,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
198,
6738,
6468,
7661,
1330,
1... | 2.673854 | 742 |
size = int(input())
matrix = []
for _ in range(size):
matrix.append([int(x) for x in input().split()])
primary_diagonal_sum = 0
secondary_diagonal_sum = 0
for i in range(len(matrix)):
primary_diagonal_sum += matrix[i][i]
secondary_diagonal_sum += matrix[i][size - i - 1]
total = abs(primary_diagonal_sum - secondary_diagonal_sum)
print(total)
| [
7857,
796,
493,
7,
15414,
28955,
198,
198,
6759,
8609,
796,
17635,
198,
198,
1640,
4808,
287,
2837,
7,
7857,
2599,
198,
220,
220,
220,
17593,
13,
33295,
26933,
600,
7,
87,
8,
329,
2124,
287,
5128,
22446,
35312,
3419,
12962,
198,
198... | 2.615942 | 138 |
# -*- coding: utf-8 -*-
from zvt.recorders.eastmoney.trading.holder_trading_recorder import *
from zvt.recorders.eastmoney.trading.manager_trading_recorder import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1976,
36540,
13,
8344,
6361,
13,
23316,
26316,
13,
2213,
4980,
13,
13829,
62,
2213,
4980,
62,
8344,
2875,
1330,
1635,
198,
6738,
1976,
36540,
13,
8344,
6361,
13,
... | 2.75 | 60 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from dataset import load_data
from vae import VAE
from conv_vae import ConvVAE
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
# Define the VAE network architecture
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='digit_model_all', help='Name of model to train')
parser.add_argument('--seed', type=int, default='0', help='Sets the random seed for both numpy and tf')
parser.add_argument('--dataset', type=str, default='mnist', help='Name of dataset to load')
parser.add_argument('--vae_type', type=str, default='vae', help='Either a standard VAE (vae) or a convolutational VAE (conv)')
parser.add_argument('--batch_size', type=int, default='100', help='Sets the batch size')
parser.add_argument('--learn_rate', type=float, default='1e-5', help='Sets the learning rate')
parser.add_argument('--n_epochs', type=int, default='50', help='Number of training epochs')
parser.add_argument('--latent_dim', type=int, default='2', help='Latent dimensionality of the VAE')
parser.add_argument('--keep_prob', type=float, default='1.0', help='Sets the dropout rate')
parser.add_argument('--gpu_device', type=int, default=0, help='Specifying which GPU device to use')
parser.add_argument('--log_device_placement', type=bool, default=False, help='Logs the devices that operations and tensors are assigned to')
parser.add_argument('--gpu_memory_frac', type=float, default=0.8, help='Specifying what fraction of your GPU memory to occupy')
parser.add_argument('--display_step', type=int, default='5', help='Display step during training')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
198,
11748,
11192,
273,
11125,
355,
48700,... | 3.072347 | 622 |
"""public facing api."""
from typing import Dict, Tuple
import os
import json
import base64
from . import version as app_version
from cogeo_mosaic.utils import get_hash, _aws_put_data
from lambda_proxy.proxy import API
app = API(name="cogeo-watchbot-api", debug=True)
@app.route("/upload", methods=["POST"], cors=True)
@app.pass_event
def upload_job_spec(event: Dict, body: str) -> Tuple[str, str, str]:
"""Send Job definition to process."""
if event.get("isBase64Encoded"):
body = base64.b64decode(body).decode()
jobid = get_hash(body=body, version=app_version)
body = json.loads(body)
# Check if we are not overwriding a mosaic
mosaicid = body.get("mosaicid", jobid)
# TODO
# Validate schema
key = f"jobs/{jobid}.json"
bucket = os.environ["MOSAIC_BUCKET"]
_aws_put_data(key, bucket, json.dumps(body).encode("utf-8"))
return ("OK", "text/plain", mosaicid)
| [
37811,
11377,
6476,
40391,
526,
15931,
198,
198,
6738,
19720,
1330,
360,
713,
11,
309,
29291,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
2779,
2414,
198,
198,
6738,
764,
1330,
2196,
355,
598,
62,
9641,
198,
6738,
763,
469,
... | 2.571031 | 359 |
import os
from .base import *
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = True
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# ------------------------------------------------------------------------------
SECRET_KEY = env('DJANGO_SECRET_KEY')
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# ------------------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'PORT': 5432,
'NAME': env('POSTGRES_DBNAME'),
'USER': env('POSTGRES_USER'),
'PASSWORD': env('POSTGRES_PASSWORD'),
}
}
# EMAIL CONFIGURATION
# Set MailHog as the local mail server
# ------------------------------------------------------------------------------
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
| [
11748,
28686,
198,
6738,
764,
8692,
1330,
1635,
198,
198,
2,
16959,
198,
2,
16529,
26171,
198,
30531,
796,
6407,
198,
198,
2,
10729,
26087,
25626,
4261,
6234,
198,
2,
4091,
25,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
7... | 3.247024 | 336 |
"""
All of pandas' ExtensionArrays.
See :ref:`extending.extension-types` for more.
"""
from pandas.core.arrays import (
Categorical, DatetimeArray, IntegerArray, IntervalArray, PandasArray,
PeriodArray, SparseArray, TimedeltaArray)
__all__ = [
'Categorical',
'DatetimeArray',
'IntegerArray',
'IntervalArray',
'PandasArray',
'PeriodArray',
'SparseArray',
'TimedeltaArray',
]
| [
37811,
198,
3237,
286,
19798,
292,
6,
27995,
3163,
20477,
13,
198,
198,
6214,
1058,
5420,
25,
63,
2302,
1571,
13,
2302,
3004,
12,
19199,
63,
329,
517,
13,
198,
37811,
198,
6738,
19798,
292,
13,
7295,
13,
3258,
592,
1330,
357,
198,
... | 2.552147 | 163 |
import mmcv
import numpy as np
from mmdet3d.core.points import BasePoints, get_points_type
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations
@PIPELINES.register_module()
class MyResize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used. If the input dict contains the key
"scale_factor" (if MultiScaleFlipAug does not give img_scale but
scale_factor), the actual scale will be computed by image shape and
scale_factor.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio \
range and multiply it with the image scale.
- ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
sample a scale from the multiscale range.
- ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
sample a scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
override (bool, optional): Whether to override `scale` and
`scale_factor` so as to call resize twice. Default False. If True,
after the first resizing, the existed `scale` and `scale_factor`
will be ignored so the second resizing can be allowed.
This option is a work-around for multiple times of resize in DETR.
Defaults to False.
"""
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
where ``img_scale`` is the selected image scale and \
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
``img_scale`` is sampled scale and None is just a placeholder \
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where \
``scale`` is sampled ratio multiplied with ``img_scale`` and \
None is just a placeholder to be consistent with \
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into \
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
imgs = results['img']
results['img'] = [imgs[i] for i in range(len(imgs))]
for key in results.get('img_fields', ['img']):
for idx in range(len(results['img'])):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results[key][idx],
results['scale'],
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results[key][idx].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results[key][idx],
results['scale'],
return_scale=True,
backend=self.backend)
results[key][idx] = img
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img_shape'] = img.shape
# in case that there is no padding
results['pad_shape'] = img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
if self.bbox_clip_border:
img_shape = results['img_shape']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
results[key] = bboxes
def _resize_masks(self, results):
"""Resize masks with ``results['scale']``"""
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
results[key] = results[key].rescale(results['scale'])
else:
results[key] = results[key].resize(results['img_shape'][:2])
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
if 'scale_factor' in results:
img_shape = results['img'][0].shape[:2]
scale_factor = results['scale_factor']
assert isinstance(scale_factor, float)
results['scale'] = tuple(
[int(x * scale_factor) for x in img_shape][::-1])
else:
self._random_scale(results)
else:
if not self.override:
assert 'scale_factor' not in results, (
'scale and scale_factor cannot be both set.')
else:
results.pop('scale')
if 'scale_factor' in results:
results.pop('scale_factor')
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
return results
@PIPELINES.register_module()
class MyNormalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
for idx in range(len(results['img'])):
results[key][idx] = mmcv.imnormalize(results[key][idx], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
@PIPELINES.register_module()
class MyPad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
for key in results.get('img_fields', ['img']):
if self.size is not None:
padded_img = mmcv.impad(
results[key], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
for idx in range(len(results[key])):
padded_img = mmcv.impad_to_multiple(
results[key][idx], self.size_divisor, pad_val=self.pad_val)
results[key][idx] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
"""Pad masks according to ``results['pad_shape']``."""
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
def _pad_seg(self, results):
"""Pad semantic segmentation map according to
``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key], shape=results['pad_shape'][:2])
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_masks(results)
self._pad_seg(results)
return results
@PIPELINES.register_module()
class LoadMultiViewImageFromFiles(object):
"""Load multi channel images from a list of separate channel files.
Expects results['img_filename'] to be a list of filenames.
Args:
to_float32 (bool): Whether to convert the img to float32.
Defaults to False.
color_type (str): Color type of the file. Defaults to 'unchanged'.
"""
def __call__(self, results):
"""Call function to load multi-view image from files.
Args:
results (dict): Result dict containing multi-view image filenames.
Returns:
dict: The result dict containing the multi-view image data. \
Added keys and values are described below.
- filename (str): Multi-view image filenames.
- img (np.ndarray): Multi-view image arrays.
- img_shape (tuple[int]): Shape of multi-view image arrays.
- ori_shape (tuple[int]): Shape of original image arrays.
- pad_shape (tuple[int]): Shape of padded image arrays.
- scale_factor (float): Scale factor.
- img_norm_cfg (dict): Normalization configuration of images.
"""
filename = results['img_filename']
if self.img_scale is None:
img = np.stack(
[mmcv.imread(name, self.color_type) for name in filename], axis=-1)
else:
img = np.stack(
[self.pad(mmcv.imread(name, self.color_type)) for name in filename], axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
# unravel to list, see `DefaultFormatBundle` in formating.py
# which will transpose each image separately and then stack into array
results['img'] = [img[..., i] for i in range(img.shape[-1])]
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
# results['scale_factor'] = [1.0, 1.0]
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
return results
def __repr__(self):
"""str: Return a string that describes the module."""
return "{} (to_float32={}, color_type='{}')".format(
self.__class__.__name__, self.to_float32, self.color_type)
@PIPELINES.register_module()
class LoadPointsFromMultiSweeps(object):
"""Load points from multiple sweeps.
This is usually used for nuScenes dataset to utilize previous sweeps.
Args:
sweeps_num (int): Number of sweeps. Defaults to 10.
load_dim (int): Dimension number of the loaded points. Defaults to 5.
use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4].
file_client_args (dict): Config dict of file clients, refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details. Defaults to dict(backend='disk').
pad_empty_sweeps (bool): Whether to repeat keyframe when
sweeps is empty. Defaults to False.
remove_close (bool): Whether to remove close points.
Defaults to False.
test_mode (bool): If test_model=True used for testing, it will not
randomly sample sweeps but select the nearest N frames.
Defaults to False.
"""
def _load_points(self, pts_filename):
"""Private function to load point clouds data.
Args:
pts_filename (str): Filename of point clouds data.
Returns:
np.ndarray: An array containing point clouds data.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
pts_bytes = self.file_client.get(pts_filename)
points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError:
mmcv.check_file_exist(pts_filename)
if pts_filename.endswith('.npy'):
points = np.load(pts_filename)
else:
points = np.fromfile(pts_filename, dtype=np.float32)
return points
def _remove_close(self, points, radius=1.0):
"""Removes point too close within a certain radius from origin.
Args:
points (np.ndarray): Sweep points.
radius (float): Radius below which points are removed.
Defaults to 1.0.
Returns:
np.ndarray: Points after removing.
"""
if isinstance(points, np.ndarray):
points_numpy = points
elif isinstance(points, BasePoints):
points_numpy = points.tensor.numpy()
else:
raise NotImplementedError
x_filt = np.abs(points_numpy[:, 0]) < radius
y_filt = np.abs(points_numpy[:, 1]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
return points[not_close]
def __call__(self, results):
"""Call function to load multi-sweep point clouds from files.
Args:
results (dict): Result dict containing multi-sweep point cloud \
filenames.
Returns:
dict: The result dict containing the multi-sweep points data. \
Added key and value are described below.
- points (np.ndarray): Multi-sweep point cloud arrays.
"""
points = results['points']
points.tensor[:, 4] = 0
sweep_points_list = [points]
ts = results['timestamp']
if self.pad_empty_sweeps and len(results['sweeps']) == 0:
for i in range(self.sweeps_num):
if self.remove_close:
sweep_points_list.append(self._remove_close(points))
else:
sweep_points_list.append(points)
else:
if len(results['sweeps']) <= self.sweeps_num:
choices = np.arange(len(results['sweeps']))
elif self.test_mode:
choices = np.arange(self.sweeps_num)
else:
choices = np.random.choice(
len(results['sweeps']), self.sweeps_num, replace=False)
for idx in choices:
sweep = results['sweeps'][idx]
points_sweep = self._load_points(sweep['data_path'])
points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim)
if self.remove_close:
points_sweep = self._remove_close(points_sweep)
sweep_ts = sweep['timestamp'] / 1e6
points_sweep[:, :3] = points_sweep[:, :3] @ sweep[
'sensor2lidar_rotation'].T
points_sweep[:, :3] += sweep['sensor2lidar_translation']
points_sweep[:, 4] = ts - sweep_ts
points_sweep = points.new_point(points_sweep)
sweep_points_list.append(points_sweep)
points = points.cat(sweep_points_list)
points = points[:, self.use_dim]
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
return f'{self.__class__.__name__}(sweeps_num={self.sweeps_num})'
@PIPELINES.register_module()
class PointSegClassMapping(object):
"""Map original semantic class to valid category ids.
Map valid classes as 0~len(valid_cat_ids)-1 and
others as len(valid_cat_ids).
Args:
valid_cat_ids (tuple[int]): A tuple of valid category.
"""
def __call__(self, results):
"""Call function to map original semantic class to valid category ids.
Args:
results (dict): Result dict containing point semantic masks.
Returns:
dict: The result dict containing the mapped category ids. \
Updated key and value are described below.
- pts_semantic_mask (np.ndarray): Mapped semantic masks.
"""
assert 'pts_semantic_mask' in results
pts_semantic_mask = results['pts_semantic_mask']
neg_cls = len(self.valid_cat_ids)
for i in range(pts_semantic_mask.shape[0]):
if pts_semantic_mask[i] in self.valid_cat_ids:
converted_id = self.valid_cat_ids.index(pts_semantic_mask[i])
pts_semantic_mask[i] = converted_id
else:
pts_semantic_mask[i] = neg_cls
results['pts_semantic_mask'] = pts_semantic_mask
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += '(valid_cat_ids={})'.format(self.valid_cat_ids)
return repr_str
@PIPELINES.register_module()
class NormalizePointsColor(object):
"""Normalize color of points.
Args:
color_mean (list[float]): Mean color of the point cloud.
"""
def __call__(self, results):
"""Call function to normalize color of points.
Args:
results (dict): Result dict containing point clouds data.
Returns:
dict: The result dict containing the normalized points. \
Updated key and value are described below.
- points (np.ndarray): Points after color normalization.
"""
points = results['points']
assert points.shape[1] >= 6, \
f'Expect points have channel >=6, got {points.shape[1]}'
points[:, 3:6] = points[:, 3:6] - np.array(self.color_mean) / 256.0
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += '(color_mean={})'.format(self.color_mean)
return repr_str
@PIPELINES.register_module()
class LoadPointsFromFile(object):
"""Load Points From File.
Load sunrgbd and scannet points from file.
Args:
load_dim (int): The dimension of the loaded points.
Defaults to 6.
coord_type (str): The type of coordinates of points cloud.
Available options includes:
- 'LIDAR': Points in LiDAR coordinates.
- 'DEPTH': Points in depth coordinates, usually for indoor dataset.
- 'CAMERA': Points in camera coordinates.
use_dim (list[int]): Which dimensions of the points to be used.
Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4
or use_dim=[0, 1, 2, 3] to use the intensity dimension.
shift_height (bool): Whether to use shifted height. Defaults to False.
file_client_args (dict): Config dict of file clients, refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details. Defaults to dict(backend='disk').
"""
def _load_points(self, pts_filename):
"""Private function to load point clouds data.
Args:
pts_filename (str): Filename of point clouds data.
Returns:
np.ndarray: An array containing point clouds data.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
pts_bytes = self.file_client.get(pts_filename)
points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError:
mmcv.check_file_exist(pts_filename)
if pts_filename.endswith('.npy'):
points = np.load(pts_filename)
else:
points = np.fromfile(pts_filename, dtype=np.float32)
return points
def __call__(self, results):
"""Call function to load points data from file.
Args:
results (dict): Result dict containing point clouds data.
Returns:
dict: The result dict containing the point clouds data. \
Added key and value are described below.
- points (np.ndarray): Point clouds data.
"""
pts_filename = results['pts_filename']
points = self._load_points(pts_filename)
points = points.reshape(-1, self.load_dim)
points = points[:, self.use_dim]
attribute_dims = None
if self.shift_height:
floor_height = np.percentile(points[:, 2], 0.99)
height = points[:, 2] - floor_height
points = np.concatenate([points, np.expand_dims(height, 1)], 1)
attribute_dims = dict(height=3)
points_class = get_points_type(self.coord_type)
points = points_class(
points, points_dim=points.shape[-1], attribute_dims=attribute_dims)
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__ + '('
repr_str += 'shift_height={}, '.format(self.shift_height)
repr_str += 'file_client_args={}), '.format(self.file_client_args)
repr_str += 'load_dim={}, '.format(self.load_dim)
repr_str += 'use_dim={})'.format(self.use_dim)
return repr_str
@PIPELINES.register_module()
class LoadAnnotations3D(LoadAnnotations):
"""Load Annotations3D.
Load instance mask and semantic mask of points and
encapsulate the items into related fields.
Args:
with_bbox_3d (bool, optional): Whether to load 3D boxes.
Defaults to True.
with_label_3d (bool, optional): Whether to load 3D labels.
Defaults to True.
with_mask_3d (bool, optional): Whether to load 3D instance masks.
for points. Defaults to False.
with_seg_3d (bool, optional): Whether to load 3D semantic masks.
for points. Defaults to False.
with_bbox (bool, optional): Whether to load 2D boxes.
Defaults to False.
with_label (bool, optional): Whether to load 2D labels.
Defaults to False.
with_mask (bool, optional): Whether to load 2D instance masks.
Defaults to False.
with_seg (bool, optional): Whether to load 2D semantic masks.
Defaults to False.
poly2mask (bool, optional): Whether to convert polygon annotations
to bitmasks. Defaults to True.
seg_3d_dtype (dtype, optional): Dtype of 3D semantic masks.
Defaults to int64
file_client_args (dict): Config dict of file clients, refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details.
"""
def _load_bboxes_3d(self, results):
"""Private function to load 3D bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D bounding box annotations.
"""
results['gt_bboxes_3d'] = results['ann_info']['gt_bboxes_3d']
results['bbox3d_fields'].append('gt_bboxes_3d')
return results
def _load_labels_3d(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded label annotations.
"""
results['gt_labels_3d'] = results['ann_info']['gt_labels_3d']
return results
def _load_masks_3d(self, results):
"""Private function to load 3D mask annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D mask annotations.
"""
pts_instance_mask_path = results['ann_info']['pts_instance_mask_path']
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
mask_bytes = self.file_client.get(pts_instance_mask_path)
pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int)
except ConnectionError:
mmcv.check_file_exist(pts_instance_mask_path)
pts_instance_mask = np.fromfile(
pts_instance_mask_path, dtype=np.long)
results['pts_instance_mask'] = pts_instance_mask
results['pts_mask_fields'].append('pts_instance_mask')
return results
def _load_semantic_seg_3d(self, results):
"""Private function to load 3D semantic segmentation annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing the semantic segmentation annotations.
"""
pts_semantic_mask_path = results['ann_info']['pts_semantic_mask_path']
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
mask_bytes = self.file_client.get(pts_semantic_mask_path)
# add .copy() to fix read-only bug
pts_semantic_mask = np.frombuffer(
mask_bytes, dtype=self.seg_3d_dtype).copy()
except ConnectionError:
mmcv.check_file_exist(pts_semantic_mask_path)
pts_semantic_mask = np.fromfile(
pts_semantic_mask_path, dtype=np.long)
results['pts_semantic_mask'] = pts_semantic_mask
results['pts_seg_fields'].append('pts_semantic_mask')
return results
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D bounding box, label, mask and
semantic segmentation annotations.
"""
results = super().__call__(results)
if self.with_bbox_3d:
results = self._load_bboxes_3d(results)
if results is None:
return None
if self.with_label_3d:
results = self._load_labels_3d(results)
if self.with_mask_3d:
results = self._load_masks_3d(results)
if self.with_seg_3d:
results = self._load_semantic_seg_3d(results)
return results
def __repr__(self):
"""str: Return a string that describes the module."""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, '
repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, '
repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, '
repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, '
repr_str += f'{indent_str}with_bbox={self.with_bbox}, '
repr_str += f'{indent_str}with_label={self.with_label}, '
repr_str += f'{indent_str}with_mask={self.with_mask}, '
repr_str += f'{indent_str}with_seg={self.with_seg}, '
repr_str += f'{indent_str}poly2mask={self.poly2mask})'
return repr_str
| [
11748,
8085,
33967,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
8085,
15255,
18,
67,
13,
7295,
13,
13033,
1330,
7308,
40710,
11,
651,
62,
13033,
62,
4906,
198,
6738,
8085,
15255,
13,
19608,
292,
1039,
13,
38272,
1330,
350,
406... | 2.200555 | 15,143 |
import os
from multiprocessing.dummy import Pool as ThreadPool
import requests
import xlsxwriter
from pyslurpers import JsonSlurper
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
if __name__ == "__main__":
total_seconds = 0
config = JsonSlurper.create(
file_name="local.settings.json" if os.path.exists("local.settings.json") else "settings.json"
)
# Note: both dates are inclusive
sinceDateInt = dateToInt(config.since_date)
untilDateInt = dateToInt(config.until_date)
workitems = []
s = requests.Session()
# Sometimes Gitlab is unstable, and also has load limits
retries = Retry(total=8,
backoff_factor=0.4,
status_forcelist=[500, 502, 503, 504, 429])
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
tickets = loadTicketsFromCommits(config.since_date, config.until_date)
pool = ThreadPool(config.threads)
results = pool.map(loadIssueInfo, tickets.values())
pool.close()
pool.join()
workitems.sort(key=lambda wi: wi['issue'])
print('-' * 80)
for workitem in workitems:
ticketNumber = workitem['issue']
print('{}\t{}\t{}'.format(ticketNumber, format_date(workitem['created']), workitem['time']))
print('-' * 80)
print('{} total spent {} h'.format(config.user_name, total_seconds / 3600))
filename = 'timespent_%s_%s.xlsx' % (config.since_date, config.until_date)
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
hours_format = workbook.add_format({'num_format': '#,##0.000'})
bold_format = workbook.add_format({'bold': True})
worksheet.write(0, 0, 'Time spent since %s till %s' % (config.since_date, config.until_date), bold_format)
row = 3
worksheet.write(2, 0, 'Ticket', bold_format)
worksheet.write(2, 1, 'Date', bold_format)
worksheet.write(2, 2, 'Time Spent', bold_format)
for workitem in workitems:
ticketNumber = workitem['issue']
spentAt = format_date(workitem['created'])
amountOfTime = workitem['time']
worksheet.write(row, 0, ticketNumber)
worksheet.write(row, 1, spentAt)
worksheet.write(row, 2, amountOfTime, hours_format)
row = row + 1
worksheet.write(row, 0, 'Total:', bold_format)
worksheet.write(row, 2, '=SUM(C{}:C{})'.format(4, row), hours_format)
worksheet.set_column(0, 0, 10)
worksheet.set_column(1, 1, 20)
worksheet.set_column(2, 2, 10)
workbook.close()
os.system('start %s' % filename)
# check: there are commits on this day but no time spent
for workitem in workitems:
spentAt = dateToInt(workitem['created'])
ticketNumber = workitem['issue']
if spentAt in tickets[ticketNumber].dates:
tickets[ticketNumber].dates.remove(spentAt)
else:
print("Warn: Ticket {}: time spent but no commits: {}".format(ticketNumber, spentAt))
for ticket in tickets:
dates_without_spent_time = tickets[ticket].dates
if len(dates_without_spent_time) > 0:
print("Warn: Ticket: {}: there are commits but no time spent: {}".format(ticket, dates_without_spent_time))
print("Note: Tickets that don't have merged MRs in specified period are not counted")
| [
11748,
28686,
198,
6738,
18540,
305,
919,
278,
13,
67,
13513,
1330,
19850,
355,
14122,
27201,
198,
198,
11748,
7007,
198,
11748,
2124,
7278,
87,
16002,
198,
6738,
279,
893,
75,
333,
19276,
1330,
449,
1559,
11122,
333,
525,
198,
6738,
... | 2.537594 | 1,330 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.kafka import KafkaService
from kafkatest.services.streams import StreamsBrokerCompatibilityService
from kafkatest.services.verifiable_consumer import VerifiableConsumer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import LATEST_0_11_0, LATEST_0_10_2, LATEST_0_10_1, LATEST_0_10_0, LATEST_1_0, LATEST_1_1, \
LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, LATEST_2_4, LATEST_2_5, LATEST_2_6, LATEST_2_7, LATEST_2_8, \
KafkaVersion
class StreamsBrokerCompatibility(Test):
"""
These tests validates that
- Streams works for older brokers 0.11 (or newer)
- Streams w/ EOS-alpha works for older brokers 0.11 (or newer)
- Streams w/ EOS-v2 works for older brokers 2.5 (or newer)
- Streams fails fast for older brokers 0.10.0, 0.10.2, and 0.10.1
- Streams w/ EOS-v2 fails fast for older brokers 2.4 or older
"""
input = "brokerCompatibilitySourceTopic"
output = "brokerCompatibilitySinkTopic"
@cluster(num_nodes=4)
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
@cluster(num_nodes=4)
@parametrize(broker_version=str(LATEST_2_6))
@parametrize(broker_version=str(LATEST_2_5))
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
@cluster(num_nodes=4)
@parametrize(broker_version=str(LATEST_2_8))
@parametrize(broker_version=str(LATEST_2_7))
@parametrize(broker_version=str(LATEST_2_6))
@parametrize(broker_version=str(LATEST_2_5))
@cluster(num_nodes=4)
@parametrize(broker_version=str(LATEST_0_10_2))
@parametrize(broker_version=str(LATEST_0_10_1))
@parametrize(broker_version=str(LATEST_0_10_0))
@cluster(num_nodes=4)
@parametrize(broker_version=str(LATEST_2_4))
@parametrize(broker_version=str(LATEST_2_3))
@parametrize(broker_version=str(LATEST_2_2))
@parametrize(broker_version=str(LATEST_2_1))
@parametrize(broker_version=str(LATEST_2_0))
@parametrize(broker_version=str(LATEST_1_1))
@parametrize(broker_version=str(LATEST_1_0))
@parametrize(broker_version=str(LATEST_0_11_0))
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
7054,
37,
... | 2.423253 | 1,531 |
import json
import os
import time
import unittest
from contextlib import closing
import requests
from arch.api.utils import file_utils
from fate_flow.settings import HTTP_PORT, API_VERSION
if __name__ == '__main__':
unittest.main()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
555,
715,
395,
198,
6738,
4732,
8019,
1330,
9605,
198,
198,
11748,
7007,
198,
6738,
3934,
13,
15042,
13,
26791,
1330,
2393,
62,
26791,
198,
198,
6738,
10030,
62,
11125,
13,
... | 3.24 | 75 |
from django.conf.urls import patterns, url
from projects.views import ProjectsView
urlpatterns = patterns('',
url(r'^$', ProjectsView.as_view(), name='list'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
6738,
4493,
13,
33571,
1330,
29898,
7680,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
3256,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
29898,
7680,
13,... | 3.074074 | 54 |
# dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = 'mail@simon-dirmeier.net'
class DataFrameColumn:
"""
Class that represents one column in a dataframe.
"""
def size(self):
"""
Getter for number if items in the column.
:return: returns the number of items
"""
return len(self.__vals)
@property
def values(self):
"""
Getter for the column values.
:return: returns the values of the column
"""
return self.__vals
@property
def colname(self):
"""
Getter for the column name.
:return: returns the column name
"""
return self.__colname
| [
2,
1366,
14535,
25,
257,
1366,
12,
14535,
7822,
1262,
2446,
48426,
198,
2,
198,
2,
15069,
357,
34,
8,
1584,
11288,
36202,
49468,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
1366,
14535,
13,
198,
2,
198,
2,
1366,
14535,
318,
1479,
... | 2.933063 | 493 |
my_list = ['Monty', 'Python']
delimiter = ' '
output = delimiter.join(my_list)
print(output) | [
1820,
62,
4868,
796,
37250,
26031,
88,
3256,
705,
37906,
20520,
198,
12381,
320,
2676,
796,
705,
705,
198,
22915,
796,
46728,
2676,
13,
22179,
7,
1820,
62,
4868,
8,
198,
4798,
7,
22915,
8
] | 2.628571 | 35 |
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E128
"""Tests for compile_topic_icons.py"""
from __future__ import absolute_import
import cStringIO
import json
import os
import shutil
import PIL.Image
from shared.testutil import testsize
from kake import make
from kake.lib import compile_rule
import kake.lib.testutil
from topic_icons import icon_util
FAKE_HEAD = 'a4fe8ce585e1ddfdf179a77641fa6a2709eff22b'
icon_manifest = {
'sizes': ['128c', '416', '608', '800', '1200'],
'formats': {
'png': ['png'],
'jpg': ['jpeg'],
'jpeg': ['jpeg'],
},
'inherited_icons': {
'a': 'foo.png',
'c': 'baz.jpg',
'b': 'bar.png',
'd': 'foo.png',
},
'non_inherited_icons': {
'e': 'wop.png',
},
'md5sums': {
'foo.png': '3f5aaa',
'baz.jpg': '43e0e1',
'wop.png': '19e37f',
'bar.png': '31a6b7',
},
'base_url': 'https://cdn.kastatic.org/genfiles/topic-icons/icons/',
'webapp_commit': FAKE_HEAD,
}
_MAX_WIDTH = max([w for (w, _) in icon_util.SUPPORTED_SIZE_CONFIGS])
_MAX_HEIGHT = ((_MAX_WIDTH * icon_util.ASPECT_RATIO[1]) /
icon_util.ASPECT_RATIO[0])
class TopicIconsTest(kake.lib.testutil.KakeTestBase):
"""Sets up the filesystem."""
@testsize.medium
class CompileTopicIconsTest(TopicIconsTest):
"""Sets up the filesystem."""
def _build_icon_with_name(self, filename, size_config=(_MAX_WIDTH, False),
format='png'):
"""Build an icon through Kake and return the resulting image."""
size_config_str = icon_util.serialize_size_config(size_config)
outfile = 'genfiles/topic-icons/icons-src/%s.%s.%s' % (
filename, size_config_str, format)
make.build(outfile)
return PIL.Image.open(self._abspath(outfile))
@testsize.medium
class CompileTopicIconManifestTest(TopicIconsTest):
"""Sets up the filesystem."""
| [
2,
16926,
46,
7,
4033,
259,
2599,
4259,
777,
300,
600,
8563,
357,
4023,
1378,
431,
79,
23,
13,
961,
83,
704,
420,
82,
13,
952,
14,
268,
14,
20979,
12,
16,
13,
22,
13,
87,
14,
600,
305,
13,
6494,
2,
18224,
12,
40148,
8,
198,
... | 2.160215 | 930 |
#!/usr/bin/env python3
import sys
from common import eprint
from pathlib import Path
import argparse
import pathlib
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25064,
198,
6738,
2219,
1330,
304,
4798,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
1822,
29572,
198,
11748,
3108,
8019,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
... | 3.018868 | 53 |
# coding=utf-8
from test_base import BaseTestCase
if __name__ == '__main__':
TestCase.main()
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
1332,
62,
8692,
1330,
7308,
14402,
20448,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
6208,
20448,
13,
12417,
3419,
198
] | 2.589744 | 39 |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def preprocess(image, erodeK, blurK, blurSigma, lowT, upT):
"""
Preprocess an image by eroding (opt.), blurring (opt.), and then applying
Canny edge detector.
Args
- image: numpy nd-array of dim (m, n, c)
- erodeK: size of kernal for erode operation. Dimension: (erodeK, erodeK)
- blurK: size of kernal for gaussing blur. Dimension: (blurK, blurK)
- blurSigma: sigma used for applying gaussian blur.
- lowT: low threshold value for Canny operator.
- highT: high threshold value for Canny operator.
Returns
- edge-image: numpy nd-array of dim (m, n)
HINTS:
- Apply your own preprocessing, if required. Shortly describe how your implementation
is different, in the writeup.
"""
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
eroded_img = cv2.erode(gray_img, np.ones((erodeK,erodeK),np.uint8), 1)
smoothed_img = cv2.blur(eroded_img, (blurK,blurK), blurSigma)
# Canny
edged = cv2.Canny(smoothed_img, lowT, upT)
return edged
def hough_peaks(H, numpeaks=1, threshold=100, nhood_size=5):
"""
Returns the top numpeaks from the accumulator H
Args
- H: Hough Space (Voting Accumulator)
- numpeaks: Number of peaks to return
- threshold: Minimum number of votes to get considered for picked
- nhood_size: neighborhood size. Only one peak will be chosen from
any neighborhood.
Returns
- peak coordinates: numpy nd-array of dim (numpeaks, 2)
"""
peaks = np.zeros((numpeaks,2), dtype=np.uint64)
temp_H = H.copy()
for i in range(numpeaks):
_,max_val,_,max_loc = cv2.minMaxLoc(temp_H) # find maximum peak
if max_val > threshold:
peaks[i] = max_loc
(c,r) = max_loc
t = nhood_size//2.0
temp_H[clip(r-t):int(r+t+1), clip(c-t):int(c+t+1)] = 0
else:
peaks = peaks[:i]
break
return peaks[:,::-1]
def hough_lines_draw(img, outfile, peaks, rhos, thetas):
"""
Returns the image with hough lines drawn.
Args
- img: Image on which lines will be drawn
- outfile: The output file. The file will be saved.
- peaks: peaks returned by hough_peaks
- rhos: array of rhos used in Hough Space
- thetas: array of thetas used in Hough Space
Returns
- img: after drwaing lines on it.
"""
for peak in peaks:
rho = rhos[peak[0]]
theta = thetas[peak[1]] * np.pi / 180.0
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite(outfile, img)
return img
def hough_circles_draw(img, outfile, peaks, radius):
"""
Returns the image with hough circles drawn.
Args
- img: Image on which circles will be drawn
- outfile: The output file. The file will be saved.
- peaks: peaks returned by hough_peaks. Contails tuple of (y, x) coordinates.
- radius: radius of the circle
Returns
- img: after drwaing circles on it.
"""
for peak in peaks:
cv2.circle(img, tuple(peak[::-1]), radius, (0,255,0), 2)
cv2.imwrite(outfile, img)
return img
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4299,
662,
14681,
7,
9060,
11,
1931,
1098,
42,
11,
23671,
42,
11,
23671,
50,
13495,
11,
1877,
51,
11,
... | 2.1804 | 1,602 |
#=========================================================================
# __init__
#=========================================================================
from ValRdyBundle import InValRdyBundle, OutValRdyBundle
from ParentChildBundle import ParentReqRespBundle, ChildReqRespBundle
from NetMsg import NetMsg
from MemMsg import MemMsg, MemReqMsg, MemRespMsg
from MemMsg import MemMsg4B, MemReqMsg4B, MemRespMsg4B
from MemMsg import MemMsg16B, MemReqMsg16B, MemRespMsg16B
from XcelMsg import XcelReqMsg, XcelRespMsg
from CP2Msg import CP2Msg
from valrdy import valrdy_to_str
| [
2,
23926,
2559,
28,
198,
2,
11593,
15003,
834,
198,
2,
23926,
2559,
28,
198,
198,
6738,
3254,
49,
9892,
33,
31249,
220,
220,
220,
220,
220,
220,
220,
1330,
554,
7762,
49,
9892,
33,
31249,
11,
3806,
7762,
49,
9892,
33,
31249,
198,
... | 2.540441 | 272 |
guest=['Alberto','Miguel','Jose','Luis']
print('Hola '+guest[0]+', te gustaría cenar esta noche?')
print('Hola '+guest[1]+', te gustaría cenar esta noche?')
print('Hola '+guest[2]+', te gustaría cenar esta noche?')
print('Hola '+guest[3]+', te gustaría cenar esta noche?')
print()
print(guest[1])
guest[1] = 'Eduardo'
print()
print('Hola '+guest[0]+', te gustaría cenar esta noche?')
print('Hola '+guest[1]+', te gustaría cenar esta noche?')
print('Hola '+guest[2]+', te gustaría cenar esta noche?')
print('Hola '+guest[3]+', te gustaría cenar esta noche?')
print()
print('Hey he encontrado una mesa mas grande')
guest.insert(0,'Rocio')
guest.insert(2,'Alejandra')
guest.insert(5,'Irma')
print()
print('Hola '+guest[0]+', te gustaría cenar esta noche?')
print('Hola '+guest[1]+', te gustaría cenar esta noche?')
print('Hola '+guest[2]+', te gustaría cenar esta noche?')
print('Hola '+guest[3]+', te gustaría cenar esta noche?')
print('Hola '+guest[4]+', te gustaría cenar esta noche?')
print('Hola '+guest[5]+', te gustaría cenar esta noche?')
print()
print('Una gran disculpa, pero lamentablemente solo podre invitar a dos personas :(')
print()
popped_guest= guest.pop()
print('Lo siento '+popped_guest+' pero no puedo invitarte a cenar')
print()
popped_guest= guest.pop()
print('Lo siento '+popped_guest+' pero no puedo invitarte a cenar')
popped_guest= guest.pop()
print('Lo siento '+popped_guest+' pero no puedo invitarte a cenar')
popped_guest= guest.pop()
print('Lo siento '+popped_guest+' pero no puedo invitarte a cenar')
popped_guest= guest.pop()
print('Lo siento '+popped_guest+' pero no puedo invitarte a cenar')
print()
print('Hola '+guest[0]+', aun eres un invitado a la cena')
print('Hola '+guest[1]+', aun eres un invitado a la cena')
del guest[1]
del guest[0]
print()
print(guest)
| [
5162,
395,
28,
17816,
2348,
32371,
41707,
44,
328,
2731,
41707,
23409,
41707,
25596,
271,
20520,
198,
4798,
10786,
39,
5708,
705,
10,
5162,
395,
58,
15,
48688,
3256,
573,
35253,
283,
29690,
269,
268,
283,
1556,
64,
299,
30848,
8348,
8... | 2.27673 | 795 |
""" This file defines utilities used to enforce user roles. """
from flask import current_user
from functools import wraps
from enum import Enum
| [
37811,
770,
2393,
15738,
20081,
973,
284,
4605,
2836,
9176,
13,
37227,
198,
6738,
42903,
1330,
1459,
62,
7220,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
33829,
1330,
2039,
388,
198
] | 4.393939 | 33 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 MIT Probabilistic Computing Project.
# Released under Apache 2.0; refer to LICENSE.txt.
from collections import Counter
import numpy as np
from cgpm.utils.general import get_prng
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.transition_hypers import transition_hyper_grids
from cgpm2.transition_hypers import transition_hypers
from cgpm2.transition_rows import transition_rows
from cgpm2.walks import get_cgpms_by_output_index
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
2864,
17168,
30873,
14991,
2569,
38589,
4935,
13,
198,
2,
28728,
739,
24843,
362,
13,
15,
26,
3522,
284,
38559,
24290,
13,
14116,
13,
198,
... | 2.941799 | 189 |
import logging
import time
from sys import stderr, stdout, exit
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from signal import SIGINT, SIGTERM, Signals, sigwait
from typing import Any, Dict, Optional
from daemon.daemon import DaemonContext # type: ignore[reportMissingTypeStubs]
from pid import PidFile # type: ignore[reportMissingTypeStubs]
__version__ = "1.1.0"
LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
640,
198,
6738,
25064,
1330,
336,
1082,
81,
11,
14367,
448,
11,
8420,
198,
6738,
1822,
29572,
1330,
45751,
7469,
13185,
22087,
8479,
1436,
11,
45751,
46677,
11,
28531,
10223,
198,
6738,
6737,
1330,
33993,
12394,
... | 3.360902 | 133 |
# -*- coding: utf-8 -*-
"""
Utility/common code of library.
"""
__author__ = 'Grzegorz Latuszek, Marcin Usielski, Michal Ernst, Tomasz Krol'
__copyright__ = 'Copyright (C) 2018-2021, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com, marcin.usielski@nokia.com, michal.ernst@nokia.com, tomasz.krol@nokia.com'
import logging
import time
import gc
import pprint
import os
import signal
import traceback
from functools import partial
from functools import wraps
from types import FunctionType, MethodType
from moler.connection_observer import ConnectionObserver
from moler.exceptions import MolerException
from moler.exceptions import ExecutionException
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
18274,
879,
14,
11321,
2438,
286,
5888,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
8642,
89,
1533,
273,
89,
5476,
385,
43130,
11,
13067,
259,
4021... | 2.990826 | 218 |
import time
import socket
import re
import select
import errno
import os
import sys
import platform
import subprocess as ssubprocess
import sshuttle.helpers as helpers
from sshuttle.helpers import log, debug1, debug2, debug3
POLL_TIME = 60 * 15
NETSTAT_POLL_TIME = 30
CACHEFILE = os.path.expanduser('~/.sshuttle.hosts')
_nmb_ok = True
_smb_ok = True
hostnames = {}
queue = {}
try:
null = open('/dev/null', 'wb')
except IOError:
_, e = sys.exc_info()[:2]
log('warning: %s\n' % e)
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
| [
11748,
640,
198,
11748,
17802,
198,
11748,
302,
198,
11748,
2922,
198,
11748,
11454,
3919,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
3859,
198,
198,
11748,
850,
14681,
355,
264,
7266,
14681,
198,
11748,
26678,
315,
7100,
13,
16794... | 2.546256 | 227 |
# Library imports
import sys
import numpy as np
# User defined library imports
from file_read import read_shp, get_values
from grid import create_grid
from a_star import search
from prompt_read import read_search_prompt, read_grid_prompt
# Locate file pats
path = "shape/crime_dt"
# Get data's into data frame
df, bbox = read_shp(path)
# Convert it into numpy array to work with
values = get_values(df, np.float32)
# Get grid configuration from user input
grid_size, threshold = read_grid_prompt()
# Create grid with crime points
grid, fig, ax = create_grid(values, bbox, threshold=threshold, grid_size=grid_size, plot=True)
# Get search start end points from user input
start, end = read_search_prompt(grid)
# Define costs
cost = [1, 1.3, 1.5]
# Search for optimal path
path = search(grid, cost, start, end, [fig, ax])
| [
2,
10074,
17944,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
11787,
5447,
5888,
17944,
198,
6738,
2393,
62,
961,
1330,
1100,
62,
1477,
79,
11,
651,
62,
27160,
198,
6738,
10706,
1330,
2251,
62,
25928,
198,
6738,... | 3.165385 | 260 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import pickle
device = "cuda" if torch.cuda.is_available() else "cpu"
# returns two zero tensors for the initial state of the lstm
| [
11748,
28034,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
198,
25202,
796,
366,
66,
15339,
1,
611,
28034,
13,
66,
15339,
13,
271,... | 3.140845 | 71 |
import discord
from discord.ext import commands
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198
] | 4.8 | 10 |
import os
import logging
| [
11748,
28686,
198,
11748,
18931,
628
] | 4.333333 | 6 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
import skopt
import os
import numpy as np
import shutil
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from framework import lib
from framework import model_neural_trad
from framework import evaluation
from framework import data
from framework import config
########################################################################################
########################################################################################
########################################################################################
########################################################################################
if len(sys.argv) == 1:
corpora = 'lm1b,mscoco,flickr8k'.split(',')
else:
corpora = sys.argv[1].split(',')
datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
capgen_size = datasources['train'].size
capgen_test = data.load_datasources('mscoco')['test'].shuffle(0).take(datasources['test'].num_groups, whole_groups=True) #MSCOCO test is never used in langmodtrans experiments so we can validate on it
del datasources
lib.create_dir(config.hyperpar_dir+'/langmodtrans')
for corpus in corpora:
lib.create_dir(config.hyperpar_dir+'/langmodtrans/'+corpus)
print('='*100)
print(lib.formatted_clock())
print(corpus, '1 (language model)')
print()
if lib.file_exists(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_best.txt'):
print('Found ready')
print()
continue
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'post_image_activation',
'rnn_type',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'geomeanpplx',
'duration',
sep='\t'
)
datasources = data.load_datasources(corpus)
datasources['train'] = datasources['train'].shuffle(0).take(capgen_size)
vocab = datasources['train'].tokenize_sents().text_sents.get_vocab(config.min_token_freq)
dataset = data.Dataset(
vocab = vocab,
train_datasource = datasources['train'],
val_datasource = datasources['val'],
test_datasource = capgen_test,
)
dataset.compile_sents()
test_index_sents = dataset.test.index_sents
if not lib.file_exists(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search.txt'):
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search.txt', 'w', encoding='utf-8') as f:
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'post_image_activation',
'rnn_type',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'geomeanpplx',
'duration',
sep='\t', file=f
)
if not lib.file_exists(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search_errors.txt'):
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search_errors.txt', 'w', encoding='utf-8') as f:
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'post_image_activation',
'rnn_type',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'error',
'duration',
sep='\t', file=f
)
opt = skopt.Optimizer(
[
skopt.space.Categorical(config.hyperpar_space['init_method'], name='init_method'),
skopt.space.Real(*config.hyperpar_space['max_init_weight'], 'log-uniform', name='max_init_weight'),
skopt.space.Integer(*config.hyperpar_space['embed_size'], name='embed_size'),
skopt.space.Integer(*config.hyperpar_space['rnn_size'], name='rnn_size'),
skopt.space.Categorical([None], name='post_image_size'),
skopt.space.Categorical([None], name='pre_output_size'),
skopt.space.Categorical(['none'], name='post_image_activation'),
skopt.space.Categorical(config.hyperpar_space['rnn_type'], name='rnn_type'),
skopt.space.Categorical(config.hyperpar_space['optimizer'], name='optimizer'),
skopt.space.Real(*config.hyperpar_space['learning_rate'], 'log-uniform', name='learning_rate'),
skopt.space.Categorical([False], name='normalize_image'),
skopt.space.Real(*config.hyperpar_space['weights_reg_weight'], 'log-uniform', name='weights_reg_weight'),
skopt.space.Categorical([0.0], name='image_dropout_prob'),
skopt.space.Categorical([0.0], name='post_image_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['embedding_dropout_prob'], 'uniform', name='embedding_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['rnn_dropout_prob'], 'uniform', name='rnn_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['max_gradient_norm'], 'log-uniform', name='max_gradient_norm'),
skopt.space.Integer(*config.hyperpar_space['minibatch_size'], name='minibatch_size'),
skopt.space.Categorical([1], name='beam_width'),
],
n_initial_points=config.hyperpar_num_random_evals,
base_estimator='RF',
acq_func='EI',
acq_optimizer='auto',
random_state=0,
)
i = 0
already_seen = set()
best_hyperpar = None
best_cost = None
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search.txt', 'r', encoding='utf-8') as f:
for line in f.read().strip().split('\n')[1:]:
i += 1
[
entry_num,
init_method,
max_init_weight,
embed_size,
rnn_size,
post_image_size,
pre_output_size,
post_image_activation,
rnn_type,
optimizer,
learning_rate,
normalize_image,
weights_reg_weight,
image_dropout_prob,
post_image_dropout_prob,
embedding_dropout_prob,
rnn_dropout_prob,
max_gradient_norm,
minibatch_size,
beam_width,
cost,
duration,
] = line.split('\t')
next_hyperpar = [
init_method,
float(max_init_weight),
int(embed_size),
int(rnn_size),
int(post_image_size) if post_image_size != 'None' else None,
int(pre_output_size) if pre_output_size != 'None' else None,
post_image_activation,
rnn_type,
optimizer,
float(learning_rate),
normalize_image == 'True',
float(weights_reg_weight),
float(image_dropout_prob),
float(post_image_dropout_prob),
float(embedding_dropout_prob),
float(rnn_dropout_prob),
float(max_gradient_norm),
int(minibatch_size),
int(beam_width),
]
cost = float(cost)
duration = int(duration)
if i < config.hyperpar_num_random_evals + config.hyperpar_num_evals:
num_hyperpars = 1
while standardize_hyperpar(opt.ask(num_hyperpars)[-1]) != next_hyperpar:
print(i, '<<FOUND HYPERPARAMS THAT RESULTED IN ERRORS LAST TIME>>')
num_hyperpars += 1
opt.tell(prepare_hyperpar_for_tell(next_hyperpar), cost)
if best_cost is None or cost < best_cost:
best_hyperpar = next_hyperpar
best_cost = cost
already_seen.add(tuple(next_hyperpar))
print(i, *next_hyperpar, cost, lib.format_duration(duration), '******' if cost == best_cost else '', sep='\t')
for _ in range(i, config.hyperpar_num_random_evals + config.hyperpar_num_evals):
i += 1
num_hyperpars = 1
while True:
t = lib.Timer()
next_hyperpar = standardize_hyperpar(opt.ask(num_hyperpars)[-1]) #This allows us to get different hyperparameters every time the previous hyperparameters resulted in <<SEEN>>, <<NAN>>, or <<EMPTY>>
num_hyperpars += 1
print(i, *next_hyperpar, sep='\t', end='\t')
if tuple(next_hyperpar) in already_seen:
duration = t.get_duration()
print('<<SEEN>>', lib.format_duration(duration), sep='\t')
continue
try:
cost = objective(next_hyperpar)
except model_neural_trad.NotANumberError:
duration = t.get_duration()
print('<<NAN>>', lib.format_duration(duration), sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search_errors.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, 'nan', duration, sep='\t', file=f)
continue
except model_neural_trad.EmptyBeamError:
duration = t.get_duration()
print('<<EMPTY>>', lib.format_duration(duration), sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search_errors.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, 'empty', duration, sep='\t', file=f)
continue
except InfinitePerplexityError:
duration = t.get_duration()
print('<<INFPPLX>>', lib.format_duration(duration), sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search_errors.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, 'infpplx', duration, sep='\t', file=f)
continue
break
duration = t.get_duration()
opt.tell(prepare_hyperpar_for_tell(next_hyperpar), cost)
if best_cost is None or cost < best_cost:
best_hyperpar = next_hyperpar
best_cost = cost
shutil.copyfile(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_model.hdf5', config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_model_best.hdf5')
already_seen.add(tuple(next_hyperpar))
print(cost, lib.format_duration(duration), '******' if cost == best_cost else '', sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_search.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, cost, duration, sep='\t', file=f)
print('-'*100)
print(lib.formatted_clock())
print('best found:')
print('', *best_hyperpar, best_cost, sep='\t')
print()
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_best.txt', 'w', encoding='utf-8') as f:
print('loggeomeanpplx', best_cost, sep='\t', file=f)
print('init_method', best_hyperpar[0], sep='\t', file=f)
print('max_init_weight', best_hyperpar[1], sep='\t', file=f)
print('embed_size', best_hyperpar[2], sep='\t', file=f)
print('rnn_size', best_hyperpar[3], sep='\t', file=f)
print('post_image_size', best_hyperpar[4], sep='\t', file=f)
print('pre_output_size', best_hyperpar[5], sep='\t', file=f)
print('post_image_activation', best_hyperpar[6], sep='\t', file=f)
print('rnn_type', best_hyperpar[7], sep='\t', file=f)
print('optimizer', best_hyperpar[8], sep='\t', file=f)
print('learning_rate', best_hyperpar[9], sep='\t', file=f)
print('normalize_image', best_hyperpar[10], sep='\t', file=f)
print('weights_reg_weight', best_hyperpar[11], sep='\t', file=f)
print('image_dropout_prob', best_hyperpar[12], sep='\t', file=f)
print('post_image_dropout_prob', best_hyperpar[13], sep='\t', file=f)
print('embedding_dropout_prob', best_hyperpar[14], sep='\t', file=f)
print('rnn_dropout_prob', best_hyperpar[15], sep='\t', file=f)
print('max_gradient_norm', best_hyperpar[16], sep='\t', file=f)
print('minibatch_size', best_hyperpar[17], sep='\t', file=f)
print('beam_width', best_hyperpar[18], sep='\t', file=f)
best_prefix_params = model_neural_trad.TradNeuralModel.get_saved_prefix_params(vocab, config.hyperpar_dir+'/langmodtrans/'+corpus+'/1_model_best.hdf5')
langmod_embed_size = best_hyperpar[2]
langmod_rnn_size = best_hyperpar[3]
langmod_rnn_type = best_hyperpar[7]
langmod_embedding_dropout_prob = best_hyperpar[14]
########################################################################################
print('-'*100)
print(lib.formatted_clock())
print(corpus, '2 (caption generator)')
print()
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'rnn_type',
'post_image_activation',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'WMD',
'duration',
sep='\t'
)
datasources = data.load_datasources(config.langmodtrans_capgen_dataset)
vocab = datasources['train'].tokenize_sents().text_sents.get_vocab(config.min_token_freq).intersection(best_prefix_params.vocab)
dataset = data.Dataset(
vocab = vocab,
train_datasource = datasources['train'],
val_datasource = datasources['val'],
test_datasource = data.load_datasources('mscoco')['val'].shuffle(0).take(datasources['test'].num_groups, whole_groups=True),
)
dataset.compile_sents()
test_images = dataset.test.get_images()
test_sents = dataset.test.get_text_sent_groups()
best_prefix_params = best_prefix_params.convert_to_new_vocabulary(vocab)
if not lib.file_exists(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search.txt'):
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search.txt', 'w', encoding='utf-8') as f:
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'post_image_activation',
'rnn_type',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'WMD',
'duration',
sep='\t', file=f
)
if not lib.file_exists(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search_errors.txt'):
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search_errors.txt', 'w', encoding='utf-8') as f:
print(
'#',
'init_method',
'max_init_weight',
'embed_size',
'rnn_size',
'post_image_size',
'pre_output_size',
'post_image_activation',
'rnn_type',
'optimizer',
'learning_rate',
'normalize_image',
'weights_reg_weight',
'image_dropout_prob',
'post_image_dropout_prob',
'embedding_dropout_prob',
'rnn_dropout_prob',
'max_gradient_norm',
'minibatch_size',
'beam_width',
'error',
'duration',
sep='\t', file=f
)
opt = skopt.Optimizer(
[
skopt.space.Categorical(config.hyperpar_space['init_method'], name='init_method'),
skopt.space.Real(*config.hyperpar_space['max_init_weight'], 'log-uniform', name='max_init_weight'),
skopt.space.Categorical([langmod_embed_size], name='embed_size'),
skopt.space.Categorical([langmod_rnn_size], name='rnn_size'),
skopt.space.Integer(*config.hyperpar_space['post_image_size'], name='post_image_size'),
skopt.space.Categorical([None], name='pre_output_size'),
skopt.space.Categorical(config.hyperpar_space['post_image_activation'], name='post_image_activation'),
skopt.space.Categorical([langmod_rnn_type], name='rnn_type'),
skopt.space.Categorical(config.hyperpar_space['optimizer'], name='optimizer'),
skopt.space.Real(*config.hyperpar_space['learning_rate'], 'log-uniform', name='learning_rate'),
skopt.space.Categorical(config.hyperpar_space['normalize_image'], name='normalize_image'),
skopt.space.Real(*config.hyperpar_space['weights_reg_weight'], 'log-uniform', name='weights_reg_weight'),
skopt.space.Real(*config.hyperpar_space['image_dropout_prob'], 'uniform', name='image_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['post_image_dropout_prob'], 'uniform', name='post_image_dropout_prob'),
skopt.space.Categorical([langmod_embedding_dropout_prob], name='embedding_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['rnn_dropout_prob'], 'uniform', name='rnn_dropout_prob'),
skopt.space.Real(*config.hyperpar_space['max_gradient_norm'], 'log-uniform', name='max_gradient_norm'),
skopt.space.Integer(*config.hyperpar_space['minibatch_size'], name='minibatch_size'),
skopt.space.Integer(*config.hyperpar_space['beam_width'], name='beam_width'),
],
n_initial_points=config.hyperpar_num_random_evals,
base_estimator='RF',
acq_func='EI',
acq_optimizer='auto',
random_state=0,
)
i = 0
already_seen = set()
best_hyperpar = None
best_cost = None
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search.txt', 'r', encoding='utf-8') as f:
for line in f.read().strip().split('\n')[1:]:
i += 1
[
entry_num,
init_method,
max_init_weight,
embed_size,
rnn_size,
post_image_size,
pre_output_size,
post_image_activation,
rnn_type,
optimizer,
learning_rate,
normalize_image,
weights_reg_weight,
image_dropout_prob,
post_image_dropout_prob,
embedding_dropout_prob,
rnn_dropout_prob,
max_gradient_norm,
minibatch_size,
beam_width,
cost,
duration,
] = line.split('\t')
next_hyperpar = [
init_method,
float(max_init_weight),
int(embed_size),
int(rnn_size),
int(post_image_size),
int(pre_output_size) if pre_output_size != 'None' else None,
post_image_activation,
rnn_type,
optimizer,
float(learning_rate),
normalize_image == 'True',
float(weights_reg_weight),
float(image_dropout_prob),
float(post_image_dropout_prob),
float(embedding_dropout_prob),
float(rnn_dropout_prob),
float(max_gradient_norm),
int(minibatch_size),
int(beam_width),
]
cost = -float(cost)
duration = int(duration)
if i < config.hyperpar_num_random_evals + config.hyperpar_num_evals:
num_hyperpars = 1
while standardize_hyperpar(opt.ask(num_hyperpars)[-1]) != next_hyperpar:
print(i, '<<FOUND HYPERPARAMS THAT RESULTED IN ERRORS LAST TIME>>')
num_hyperpars += 1
opt.tell(prepare_hyperpar_for_tell(next_hyperpar), cost)
if best_cost is None or cost < best_cost:
best_hyperpar = next_hyperpar
best_cost = cost
already_seen.add(tuple(next_hyperpar))
print(i, *next_hyperpar, -cost, lib.format_duration(duration), '******' if cost == best_cost else '', sep='\t')
for _ in range(i, config.hyperpar_num_random_evals + config.hyperpar_num_evals):
i += 1
num_hyperpars = 1
while True:
t = lib.Timer()
next_hyperpar = standardize_hyperpar(opt.ask(num_hyperpars)[-1]) #This allows us to get different hyperparameters every time the previous hyperparameters resulted in <<SEEN>>, <<NAN>>, or <<EMPTY>>
num_hyperpars += 1
print(i, *next_hyperpar, sep='\t', end='\t')
if tuple(next_hyperpar) in already_seen:
duration = t.get_duration()
print('<<SEEN>>', lib.format_duration(duration), sep='\t')
continue
try:
cost = objective(next_hyperpar)
except model_neural_trad.NotANumberError:
duration = t.get_duration()
print('<<NAN>>', lib.format_duration(duration), sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search_errors.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, 'nan', duration, sep='\t', file=f)
continue
except model_neural_trad.EmptyBeamError:
duration = t.get_duration()
print('<<EMPTY>>', lib.format_duration(duration), sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search_errors.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, 'empty', duration, sep='\t', file=f)
continue
break
duration = t.get_duration()
opt.tell(prepare_hyperpar_for_tell(next_hyperpar), cost)
if best_cost is None or cost < best_cost:
best_hyperpar = next_hyperpar
best_cost = cost
already_seen.add(tuple(next_hyperpar))
print(-cost, lib.format_duration(duration), '******' if cost == best_cost else '', sep='\t')
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_search.txt', 'a', encoding='utf-8') as f:
print(i, *next_hyperpar, -cost, duration, sep='\t', file=f)
print('-'*100)
print(lib.formatted_clock())
print('best found:')
print('', *best_hyperpar, -best_cost, sep='\t')
print()
with open(config.hyperpar_dir+'/langmodtrans/'+corpus+'/2_best.txt', 'w', encoding='utf-8') as f:
print('WMD', -best_cost, sep='\t', file=f)
print('init_method', best_hyperpar[0], sep='\t', file=f)
print('max_init_weight', best_hyperpar[1], sep='\t', file=f)
print('embed_size', best_hyperpar[2], sep='\t', file=f)
print('rnn_size', best_hyperpar[3], sep='\t', file=f)
print('post_image_size', best_hyperpar[4], sep='\t', file=f)
print('pre_output_size', best_hyperpar[5], sep='\t', file=f)
print('post_image_activation', best_hyperpar[6], sep='\t', file=f)
print('rnn_type', best_hyperpar[7], sep='\t', file=f)
print('optimizer', best_hyperpar[8], sep='\t', file=f)
print('learning_rate', best_hyperpar[9], sep='\t', file=f)
print('normalize_image', best_hyperpar[10], sep='\t', file=f)
print('weights_reg_weight', best_hyperpar[11], sep='\t', file=f)
print('image_dropout_prob', best_hyperpar[12], sep='\t', file=f)
print('post_image_dropout_prob', best_hyperpar[13], sep='\t', file=f)
print('embedding_dropout_prob', best_hyperpar[14], sep='\t', file=f)
print('rnn_dropout_prob', best_hyperpar[15], sep='\t', file=f)
print('max_gradient_norm', best_hyperpar[16], sep='\t', file=f)
print('minibatch_size', best_hyperpar[17], sep='\t', file=f)
print('beam_width', best_hyperpar[18], sep='\t', file=f)
| [
11748,
1341,
8738,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
20520,
796,
705,
17,
6,
19... | 1.882542 | 14,320 |
from __future__ import print_function
#
# I should write a decent test of the python binding...
#
import sys, os, DLFCN
sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
from pluginCondDBPyInterface import *
a = FWIncantation()
os.putenv("CORAL_AUTH_PATH","/afs/cern.ch/cms/DB/conddb")
rdbms = RDBMS()
dbName = "sqlite_file:testExample.db"
# dbName = "oracle://cms_orcoff_prod/CMS_COND_20X_ECAL"
#logName = "oracle://cms_orcoff_prod/CMS_COND_21X_POPCONLOG"
#rdbms.setLogger(logName)
from CondCore.Utilities import iovInspector as inspect
dumpSummaries(dbName)
dumpContents(dbName)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
2,
198,
2,
314,
815,
3551,
257,
7709,
1332,
286,
262,
21015,
12765,
986,
198,
2,
628,
198,
198,
11748,
25064,
11,
28686,
11,
23641,
4851,
45,
198,
17597,
13,
2617,
25404,
9654,
331... | 2.385542 | 249 |
from emannotationschemas.models import make_dataset_models, InvalidSchemaField
from emannotationschemas.models import make_annotation_model_from_schema
from emannotationschemas.models import Base
import pytest
import marshmallow as mm
| [
6738,
795,
34574,
602,
2395,
5356,
13,
27530,
1330,
787,
62,
19608,
292,
316,
62,
27530,
11,
17665,
27054,
2611,
15878,
198,
6738,
795,
34574,
602,
2395,
5356,
13,
27530,
1330,
787,
62,
1236,
14221,
62,
19849,
62,
6738,
62,
15952,
261... | 3.646154 | 65 |
"""
The canonical example of a function that can't be
learned with a simple linear model is XOR
"""
import numpy as np
from joelnet.train import train
from joelnet.nn import NeuralNet
from joelnet.layers import Linear, Tanh
inputs = np.array([
[0, 0],
[1, 0],
[0, 1],
[1, 1]
])
targets = np.array([
[1, 0],
[0, 1],
[0, 1],
[1, 0]
])
net = NeuralNet([
Linear(input_size=2, output_size=2),
Tanh(),
Linear(input_size=2, output_size=2)
])
train(net, inputs, targets)
for x, y in zip(inputs, targets):
predicted = net.forward(x)
print(x, predicted, y)
| [
37811,
198,
464,
40091,
1672,
286,
257,
2163,
326,
460,
470,
307,
198,
35720,
276,
351,
257,
2829,
14174,
2746,
318,
1395,
1581,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2525,
417,
3262,
13,
27432,
1330,
4512,
1... | 2.326923 | 260 |
import sys, uuid
from collections import namedtuple
import seqtools.structure.transcript
from seqtools.range import GenomicRange
Bed12Options = namedtuple('Bed12Options',
['sequence',
'ref',
'gene_name',
'payload'])
Bed12Fields = namedtuple('Bed12Fields',
[
'chrom',
'chromStart',
'chromEnd',
'name',
'score',
'strand',
'thickStart',
'thickEnd',
'itemRgb',
'blockCount',
'blockSizes',
'blockStarts'])
class Bed12(seqtools.structure.transcript.Transcript):
""" Bed format with 9 optional fields
:param bed_line: one line of a bed file
:type bed_line: string
"""
@property
@staticmethod
def Options(**kwargs):
""" A method for declaring options for the class"""
construct = Bed12Options #IMPORTANT! Set this
names = construct._fields
d = {}
for name in names: d[name] = None #default values
"""set defaults here"""
for k,v in kwargs.iteritems():
if k in names: d[k] = v
else: raise ValueError('Error '+k+' is not an options property')
"""Create a set of options based on the inputs"""
return construct(**d)
def _line_to_entry(self,line):
"""parse the line into entries and keys"""
f = line.rstrip().split("\t")
"""
'chrom'
'chromStart'
'chromEnd'
'name'
'score'
'strand'
'thickStart'
'thickEnd'
'itemRgb'
'blockCount'
'blockSizes'
'blockStarts'
"""
return Bed12Fields(
f[0],
int(f[1]),
int(f[2]),
f[3],
int(f[4]),
f[5],
int(f[6]),
int(f[7]),
[int(x) for x in f[8].rstrip(',').split(',')],
int(f[9]),
[int(x) for x in f[10].rstrip(',').split(',')],
[int(x) for x in f[11].rstrip(',').split(',')])
| [
11748,
25064,
11,
334,
27112,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
33756,
31391,
13,
301,
5620,
13,
7645,
6519,
198,
6738,
33756,
31391,
13,
9521,
1330,
5215,
10179,
17257,
198,
198,
45896,
1065,
29046,
796,
3706,
83,
29... | 2.235802 | 810 |
#! /usr/bin/python
import sys, struct
import xml.dom.minidom
from lmcp import LMCPObject
## ===============================================================================
## Authors: AFRL/RQQA
## Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
##
## Copyright (c) 2017 Government of the United State of America, as represented by
## the Secretary of the Air Force. No copyright is claimed in the United States under
## Title 17, U.S. Code. All Other Rights Reserved.
## ===============================================================================
## This file was auto-created by LmcpGen. Modifications will be overwritten.
from afrl.cmasi import Task
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
11,
2878,
198,
11748,
35555,
13,
3438,
13,
1084,
312,
296,
198,
6738,
300,
76,
13155,
1330,
37125,
8697,
10267,
198,
198,
2235,
38093,
25609,
855,
198,
2235,
46665,
25,
... | 4.25731 | 171 |
import machine
import ubinascii
# These defaults are overwritten with the contents of /config.json by
# load_config()
CONFIG = {
"broker": "192.168.1.19",
"sensor_pin": 0,
"led_pin": 2,
"client_id": b"esp8266_" + ubinascii.hexlify(machine.unique_id()),
"topic": b"home",
"sleep_seconds": 60,
"wireless_networks": {
"wifi1": "passwork1",
"wifi2": "password2"
}
}
| [
11748,
4572,
198,
11748,
334,
8800,
292,
979,
72,
198,
198,
2,
2312,
26235,
389,
6993,
9108,
351,
262,
10154,
286,
1220,
11250,
13,
17752,
416,
198,
2,
3440,
62,
11250,
3419,
198,
10943,
16254,
796,
1391,
198,
220,
220,
220,
366,
79... | 2.185185 | 189 |
import time
import redis
import csv
import sys
import os
conn = redis.Redis()
filename = sys.argv[1]
lines = []
read = csv.reader(open(filename))
lines = [l for l in read]
nice = os.path.splitext(os.path.basename(filename))[0]
# We are doing True so that we'll have a continual flow of data from
# our sample data.
while True:
try:
conn.sadd('ecgs-keys', nice)
for row in lines:
time.sleep(0.007)
# JavaScript time is in ms, so we need to increase Pythons time
# to match JavaScript.
data = [str(time.time() * 1000)] + row[1:]
conn.publish('ecg-%s' % nice, '|'.join(data))
except KeyboardInterrupt:
conn.srem('ecgs-keys', nice)
raise
| [
11748,
640,
198,
11748,
2266,
271,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
37043,
796,
2266,
271,
13,
7738,
271,
3419,
198,
198,
34345,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
6615,
796,
17635,
198,
... | 2.316614 | 319 |
from gcsa.gadget import Gadget
from .base_serializer import BaseSerializer
| [
6738,
308,
6359,
64,
13,
70,
324,
1136,
1330,
39266,
198,
6738,
764,
8692,
62,
46911,
7509,
1330,
7308,
32634,
7509,
628
] | 3.454545 | 22 |
from django import forms | [
6738,
42625,
14208,
1330,
5107
] | 4.8 | 5 |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
import requests
from six.moves import http_client as http
from glance.tests.functional.v2 import metadef_base
| [
2,
15069,
357,
66,
8,
1946,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
239... | 3.726829 | 205 |
import magma as m
from riscv_mini.cache import make_CacheIO
from riscv_mini.data_path import Datapath, make_HostIO
from riscv_mini.control import Control
| [
11748,
2153,
2611,
355,
285,
198,
198,
6738,
374,
2304,
85,
62,
45313,
13,
23870,
1330,
787,
62,
30562,
9399,
198,
6738,
374,
2304,
85,
62,
45313,
13,
7890,
62,
6978,
1330,
16092,
499,
776,
11,
787,
62,
17932,
9399,
198,
6738,
374,
... | 3 | 52 |
# main script
# read in list of gendered words from genderwords
# switch them through an intermediary step
import csv
import re
with open ('./data/genderwords.csv') as words:
reader = csv.reader(words)
mydict = {row[0]: row[1] for row in reader}
with open('./data/Harry.txt', 'r') as f1:
with open('./data/Harriet.txt', 'w') as f2:
for line in f1:
x = line
# iterate through the dictionary on that one line making any changes necessary for individual words
for oldword, newword in mydict.items():
regex_match = r'\b(%s)+\b'
reg = regex_match % oldword
x = re.sub(reg, oldword+'silly', x)
f2.write(x)
f2.close()
f1.close()
with open('./data/Harriet.txt', 'r') as f4:
with open('./data/final.txt', 'w') as f3:
for line2 in f4:
x2 = line2
for oldword, newword in mydict.items():
regex_match = r'\b(%s)+\b'
sil = oldword+'silly'
reg = regex_match % sil
x2 = re.sub(reg, newword, x2)
f3.write(x2)
f3.close()
f4.close()
words.close()
# her can correspond with him or his.
# same with his and her or hers.
# need to figure out by part of speech
# x = line.replace(" "+oldword+" ", " "+oldword+"silly")
# then f2.write(line.replace(" "+oldword+"silly", " "+neword+" ")
'''
# script to read in file, replace words, and output new file
import re
import pandas as pd
# read in file
gdf = pd.read_csv('./data/genderwords.csv') # import list of genderedword pairs as a dataframe
print(gdf)
df = gdf.loc[:, ('froms', 'tos')]
print(df)
genderword_list = []
future_genderword_list = []
genderword_list = genderword_list.append(df.froms.values.to_list())
future_genderword_list = future_genderword_list.append(df.tos.values.to_list())
with open('./data/Harriet.txt', 'r') as f:
content = f.read()
for word in range(len(genderword_list)):
print(word)
newtxt = re.sub(word, sword+'ooyt ', content)
content = newtxt
print(content[:550])
# output the new text into a new file
'''
| [
2,
1388,
4226,
198,
2,
1100,
287,
1351,
286,
308,
30398,
2456,
422,
5279,
10879,
198,
2,
5078,
606,
832,
281,
45193,
2239,
198,
198,
11748,
269,
21370,
198,
11748,
302,
198,
198,
4480,
1280,
357,
4458,
14,
7890,
14,
8388,
10879,
13,... | 2.111111 | 1,080 |
import os, sys
PROJECT_DIR = '/www/slicknot'
activate_this = os.path.join(PROJECT_DIR, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
sys.path.append(PROJECT_DIR)
from slicknot import app as application
| [
11748,
28686,
11,
25064,
198,
198,
31190,
23680,
62,
34720,
796,
31051,
2503,
14,
6649,
624,
1662,
6,
198,
198,
39022,
62,
5661,
796,
28686,
13,
6978,
13,
22179,
7,
31190,
23680,
62,
34720,
11,
705,
8800,
3256,
705,
39022,
62,
5661,
... | 2.781609 | 87 |
#%%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv("data_eda.csv")
# choose the relevant columns
#%%
df.columns
#%%
df_model = df[[
"avg_salary",
"Rating",
"Size",
"Type of ownership",
"Industry",
"Sector",
"Revenue",
"num_comp",
"hourly",
"employer_provided",
"job_state",
"same_state",
"age",
"python_yn",
"spark",
"aws",
"excel",
"job_simp",
"seniority",
"desc_len",
]]
#%%
# get dummy data
df_dum = pd.get_dummies(df_model)
df_dum
#%%
# train test split
from sklearn.model_selection import train_test_split
X = df_dum.drop("avg_salary", axis=1)
y = df_dum.avg_salary.values
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=42)
# multiple linear regression
#%%
import statsmodels.api as sm
X_sm = X = sm.add_constant(X)
model = sm.OLS(y, X_sm)
#%%
model.fit().summary()
#%%
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.model_selection import cross_val_score
lm = LinearRegression()
lm.fit(X_train, y_train)
#%%
# np.mean(
# cross_val_score(lm,
# X_train,
# y_train,
# scoring="neg_mean_absolute_error",
# cv=3))
cross_val_score(lm, X_train, y_train, scoring="neg_mean_absolute_error", cv=3)
# lasso regression
#%%
# THIS IS THE CODE THAT WAS USED ON TRIAL BASIS FOR THE LASSO REGRESSION MODEL
# lm_1 = Lasso(alpha=0.13)
# lm_1 = Lasso(alpha=0.13)
# lm_1.fit(X_train, y_test)
# np.mean(
# cross_val_score(lm_1,
# X_train,
# y_train,
# scoring="neg_mean_absolute_error",
# cv=3))
# alpha = []
# error = []
# for i in range(1, 100):
# alpha.append(i / 100)
# lm_1 = Lasso(alpha=(i / 100))
# error.append(
# np.mean(
# cross_val_score(lm_1,
# X_train,
# y_train,
# scoring="neg_mean_absolute_error",
# cv=3)))
# plt.plot(alpha, error)
# LASSO REGRESSION USED:
lm_l = Lasso(alpha=0.13)
lm_l.fit(X_train, y_train)
np.mean(
cross_val_score(lm_l,
X_train,
y_train,
scoring="neg_mean_absolute_error",
cv=3))
alpha = []
error = []
for i in range(1, 100):
alpha.append(i / 100)
lml = Lasso(alpha=(i / 100))
error.append(
np.mean(
cross_val_score(lml,
X_train,
y_train,
scoring="neg_mean_absolute_error",
cv=3)))
plt.plot(alpha, error)
#%%
err = tuple(zip(alpha, error))
df_err = pd.DataFrame(err, columns=["alpha", "error"])
df_err[df_err.error == max(df_err.error)]
#%%
# random forest
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
np.mean(
cross_val_score(rf,
X_train,
y_train,
scoring="neg_mean_absolute_error",
cv=3))
# tune models GridSearchCV
# this is the tuning part, using grid search as mentioned above
#%%
from sklearn.model_selection import GridSearchCV
parameters = {
"n_estimators": range(10, 300, 10),
"criterion": (
"mse",
"mae",
),
"max_features": ("auto", "sqrt", "log2"),
}
#%%
gs = GridSearchCV(rf, parameters, scoring="neg_mean_absolute_error", cv=3)
gs.fit(X_train, y_train)
#%%
gs.best_score_
#%%
gs.best_estimator_
# test end samples
# %%
# tpred_lm = lm.predict(X_train, y_train)
# tpred_lm_1 = lm_1.predict(X_train, y_train)
tpred_lm = lm.predict(X_test)
tpred_lml = lm_l.predict(X_test)
tpred_rf = gs.best_estimator_.predict(X_test)
# %%
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, tpred_lm)
#%%
mean_absolute_error(y_test, tpred_lml)
#%%
mean_absolute_error(y_test, tpred_rf)
#%%
mean_absolute_error(y_test, (tpred_lm + tpred_rf) / 2)
# %%
((tpred_lm + tpred_rf) / 2)
#%%
# pickling the model
import pickle
pickl = {"model": gs.best_estimator_}
pickle.dump(pickl, open("model_file" + ".p", "wb"))
# %%
file_name = "model_file.p"
with open(file_name, "rb") as pickled:
data = pickle.load(pickled)
model = data["model"]
#%%
model.predict(X_test.iloc[1, :].values.reshape(1, -1))
#%%
# X_test.iloc[1, :].values
# %%
| [
2,
16626,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
7203,
7890,
62,
18082,
13,
40664,
49... | 1.86258 | 2,496 |
from moonfire_tokenomics.data_types import Allocation, AllocationRecord, Blockchain, Category, CommonType, Sector, Token
bond = Token(
name="BOND",
project="Barnbridge",
sector=Sector.DEFI,
blockchain=[Blockchain.ETH],
category=[Category.GOV],
capped=True,
allocations=[
Allocation(
month=0,
records=[
AllocationRecord(type="Yield Farming", common_type=CommonType.ECOSYSTEM, share=0.08),
AllocationRecord(type="Uniswap LP Rewards", common_type=CommonType.ECOSYSTEM, share=0.2),
AllocationRecord(type="Staking Rewards", common_type=CommonType.ECOSYSTEM, share=0.048),
AllocationRecord(type="Var Pool Incentives", common_type=CommonType.ECOSYSTEM, share=0.175),
AllocationRecord(type="Community Reserve", common_type=CommonType.ECOSYSTEM, share=0.177),
AllocationRecord(type="Core Team", common_type=CommonType.TEAM, share=0.125),
AllocationRecord(type="DAO Treasury", common_type=CommonType.TREASURY, share=0.1),
AllocationRecord(type="Investors", common_type=CommonType.INVESTORS, share=0.075),
AllocationRecord(type="Advisors", common_type=CommonType.ADVISORS, share=0.02),
],
),
],
sources=[
"https://barnbridge.com/token-bond/",
],
year=2020,
)
| [
6738,
8824,
6495,
62,
30001,
31994,
13,
7890,
62,
19199,
1330,
1439,
5040,
11,
1439,
5040,
23739,
11,
29724,
11,
21743,
11,
8070,
6030,
11,
25892,
11,
29130,
198,
198,
65,
623,
796,
29130,
7,
198,
220,
220,
220,
1438,
2625,
33,
1867... | 2.293924 | 609 |
from bisect import insort
from bsddb3 import db
from builtins import object
from builtins import range
from collections import Counter
from collections import MutableMapping
from collections import defaultdict
from copy import copy
from copy import deepcopy
from distutils.spawn import find_executable
from functools import reduce
from future import standard_library
from future.utils import with_metaclass
from io import StringIO
from math import acos
from math import atan2
from math import ceil
from math import log10
from math import pi
from math import pow
from math import sin
from math import sqrt
from math import tanh
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_pdf import FigureCanvasPdf
from matplotlib.figure import Figure
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from multiprocessing import Array
from multiprocessing import Pool
from numpy import arange
from numpy import array
from operator import itemgetter
from optparse import OptionParser
from os import path
from os import remove
from os import system
from os import unlink
from past.utils import old_div
from pickle import dump
from pickle import dumps
from pickle import load
from pickle import loads
from pprint import pprint
from pylab import colorbar
from pylab import figure
from pylab import scatter
from pylab import show
from pylab import subplot
from queue import Queue
from random import choice
from random import uniform
from scipy import ndimage
from scipy import optimize
from scipy import sparse
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy.signal import argrelextrema
from scipy.signal import find_peaks_cwt
from scipy.stats import norm
from shutil import copy2
from shutil import copyfile
from sklearn import linear_model
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from struct import pack
from struct import unpack
from sys import argv
from sys import exit
from sys import stdout
from time import ctime
from time import gmtime
from time import localtime
from time import mktime
from time import sleep
from time import strftime
from time import strptime
from time import time
from weakref import WeakKeyDictionary
from xml.sax.handler import ContentHandler
from zlib import compress
from zlib import decompress
import _thread
import abc
import argparse
import atexit
import base64
import collections
import colorsys
import copy
import datetime
import fnmatch
import gc
import getpass
import glob
import importlib
import itertools
import json
import math
import matplotlib
import matplotlib.cm
import matplotlib.mlab
import matplotlib.pyplot
import multiprocessing
import numpy
import numpy.linalg
import operator
import os
import os.path
import pickle
import platform
import pprint
import pylab
import queue
import random
import re
import scipy
import scipy.ndimage
import scipy.spatial.distance
import select
import shelve
import shutil
import signal
import sklearn.decomposition
import socket
import string
import struct
import subprocess
import sys
import tempfile
import tensorflow
import tensorflow.keras.models
import threading
import time
import traceback
import warnings
import weakref
import webbrowser
import xml.etree.ElementTree
import xml.sax
import zlib
| [
6738,
47457,
478,
1330,
1035,
419,
198,
6738,
275,
82,
1860,
65,
18,
1330,
20613,
198,
6738,
3170,
1040,
1330,
2134,
198,
6738,
3170,
1040,
1330,
2837,
198,
6738,
17268,
1330,
15034,
198,
6738,
17268,
1330,
13859,
540,
44,
5912,
198,
... | 3.712206 | 893 |
class Base(object):
"""
A class which ensures all subclasses have a basic override of the to string method, and a toLily method
"""
def toLily(self):
'''
Method which in any sub classes produces a string, which is a line of lilypond scripting representing the class
and its variables.
:return: None, but would normally return str.
'''
raise(NotImplementedError)
| [
198,
4871,
7308,
7,
15252,
2599,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
1398,
543,
19047,
477,
850,
37724,
423,
257,
4096,
20957,
286,
262,
284,
4731,
2446,
11,
290,
257,
284,
43,
813,
2446,
198,
220,
220,
220,
37227,
... | 2.810458 | 153 |
import warnings
from abc import ABC
from typing import List
from requests import Response
from pybokio._routers.account_routers import AccountIsAuthenticatedRouter, AccountLoginRouter, AccountLogoutRouter
from pybokio.client._base_client import BaseClient
from pybokio.exceptions import AuthenticationError
from pybokio.options import ConnectionMethod
| [
11748,
14601,
198,
6738,
450,
66,
1330,
9738,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
7007,
1330,
18261,
198,
198,
6738,
12972,
65,
482,
952,
13557,
472,
1010,
13,
23317,
62,
472,
1010,
1330,
10781,
3792,
47649,
3474,
49,
39605,
... | 3.901099 | 91 |
"""
Unit and regression test for the molssi_2019 package.
"""
# Import package, test suite, and other packages as needed
import molssi_2019
import pytest
import sys
def test_molssi_2019_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "molssi_2019" in sys.modules
| [
37811,
198,
26453,
290,
20683,
1332,
329,
262,
18605,
824,
72,
62,
23344,
5301,
13,
198,
37811,
198,
198,
2,
17267,
5301,
11,
1332,
18389,
11,
290,
584,
10392,
355,
2622,
198,
11748,
18605,
824,
72,
62,
23344,
198,
11748,
12972,
9288,... | 3.351064 | 94 |
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from bmh_lims.users.api.views import UserViewSet
from bmh_lims.database.api.views import SampleViewSet, WorkflowBatchViewSet, LabViewSet, ProjectViewSet, \
WorkflowSampleViewSet, WorkflowDefinitionViewSet, WorkflowSampleBatchCreateViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("samples", SampleViewSet, basename='samples')
router.register("workflow_samples", WorkflowSampleViewSet, basename='workflow_samples')
router.register("workflow_batches", WorkflowBatchViewSet, basename='workflow_batches')
router.register("workflow_samplebatch_create", WorkflowSampleBatchCreateViewSet, basename='workflow_samplebatch_create')
router.register("workflow_definitions", WorkflowDefinitionViewSet, basename='workflow_definitions')
router.register("labs", LabViewSet, basename='labs')
router.register("projects", ProjectViewSet, basename='projects')
app_name = "api"
urlpatterns = router.urls
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
1334,
62,
30604,
13,
472,
1010,
1330,
15161,
49,
39605,
11,
17427,
49,
39605,
198,
198,
6738,
275,
76,
71,
62,
2475,
82,
13,
18417,
13,
15042,
13,
33571,
1330,
11787,
7680,
7248,
... | 3.239521 | 334 |
# Classes are templates, objects are instances of that class.
# I'll use coins as examples of classes and objects :
coin1 = Penny() # Instantiating the coin class to this coin1 object.
print(type()) # Should print <class'__main__Penny'>
coin1.value # Should print out 1.0 | [
2,
38884,
389,
24019,
11,
5563,
389,
10245,
286,
326,
1398,
13,
198,
198,
2,
314,
1183,
779,
10796,
355,
6096,
286,
6097,
290,
5563,
1058,
198,
198,
3630,
16,
796,
25965,
3419,
1303,
2262,
17096,
803,
262,
10752,
1398,
284,
428,
107... | 3.571429 | 77 |
import cv2
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import numpy
import math
img = cv2.imread('thinning.png',0)
nodes = []
nodeMap = np.empty([512, 512], dtype=int)
edgesOfNodes = []
edges = []
#I need list of all edges with duplicates
#[start id] [end id]
#each edge can have connections to multiple other edges...
# ok
def neighbours(x,y,image):
"Return 8-neighbours of image point P1(x,y), in a clockwise order"
img = image
x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1
return [
(x_1, y),
(x_1, y1),
(x, y1),
(x1, y1), # P2,P3,P4,P5
(x1, y),
(x1, y_1),
(x, y_1),
(x_1, y_1)
] # P6,P7,P8,P9
for y in range(0, 512):
for x in range(0, 512):
# threshold the pixel
pixel = img[y,x]
if pixel == 255:
curNodeId = len(nodes)
nodeMap[x,y] = curNodeId
nodes.append( (x,y) )
edgesOfNodes.append( [] )
for curNodeId, node in enumerate(nodes):
x = node[0]
y = node[1]
#print("NODE NUMBER " + str(curNodeId))
for nb in neighbours(x,y,img):
if(img[nb[1], nb[0]] == 255):
#print("found neigbour of " + str(x) + " " + str(y) + " : " + str(nb))
theOtherNode = nodeMap[nb[0], nb[1]]
#print("coords of theOtherNode:" + str(nodes[theOtherNode]))
edge = [curNodeId,theOtherNode];
#print("adding edge" + str(edge))
edges.append(edge)
#print("added edge count of edges: " + str(len(edges) - 1))
#if not curNodeId in edgesOfNodes:
# edgesOfNodes[curNodeId] = []
edgesOfNodes[curNodeId].append(len(edges) - 1)
#if(curNodeId > 200):
#print(edgesOfNodes)
#exit()
#print("appending edge " + str(nodes[theOtherNode]) + " - " + str(node) )
#smarterEdges.append( [curNodeId,theOtherNode] )
#print("ids are " + str([curNodeId,theOtherNode]) )
foundFaces = []
currentFaceBeingBuilt = []
visitedEdges = {}
for curEdgeId, curEdge in enumerate(edges):
startingNodeId = curEdge[0]
startingPoint = nodes[startingNodeId]
#print("starting point: " + str(startingPoint))
if(curEdgeId in visitedEdges): continue
print("first node coordinates " + str(nodes[curEdge[0]]))
currentFaceBeingBuilt.append(curEdge[0])
visitedEdges[curEdgeId] = True
#print(curEdge)
nextNode = curEdge[1]
#print("next node has id " + str(curEdge[1]))
nextNodeCoords = nodes[curEdge[1]]
#print("looks like " + str(nextNodeCoords))
#print(nextNode)
prevNode = startingPoint
#print("new cycle")
for k in range(1000):
#print("K " + str(k))
if(nextNode == startingNodeId):
#print("found start")
foundFaces.append(currentFaceBeingBuilt)
currentFaceBeingBuilt = []
break
print("next node coordinates " + str(nodes[nextNode]))
currentFaceBeingBuilt.append(nextNode)
node1 = nodes[nextNode]
#print(node1)
nextNodeIds = edgesOfNodes[nextNode]
#print("that is connected to nodes with ids " + str(nextNodeIds))
largestAngle = None
largestEdgeId = None
for potentialNextEdgeId in nextNodeIds:
if(potentialNextEdgeId in visitedEdges): continue
v1 = numpy.subtract(node1, prevNode)
edge2 = edges[potentialNextEdgeId]
node2 = nodes[edge2[1]]
v2 = numpy.subtract(node2, node1)
dot = v1[0]*v2[0] + v1[1]*v2[1] # dot product
det = v1[0]*v2[1] - v1[1]*v2[0] # determinant
angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
#print("round angle " + str(round(angle,5)))
if(round(angle,5) == round(math.pi,5)):
continue
#print("wtf")
#print("s1 " + str(startingPoint))
#print("dot " + str(dot))
#print("det " + str(det))
#print("v1 " + str(v1))
#print("n1 " + str(node1))
#print("n2 " + str(node2))
#print("v2 " + str(v2))
#print("angle " + str(angle))
if(angle > largestAngle):
#print(prevNode)
#print(node1)
#print(node2)
#print("largest " + str(largestAngle))
#print("edge we are checking out " + str((nodes[edge2[0]], nodes[edge2[1]])))
#print("s1 " + str(startingPoint))
#print("v1 " + str(v1))
#print("n1 " + str(node1))
#print("n2 " + str(node2))
#print("v2 " + str(v2))
#print("angle " + str(angle))
largestAngle = angle
largestEdgeId = potentialNextEdgeId
nextNode = edge2[1]
#print("s1 " + str(startingPoint))
#print("v1 " + str(v1))
#print("n1 " + str(node1))
#print("n2 " + str(node2))
#print("v2 " + str(v2))
#print("angle " + str(angle))
visitedEdges[largestEdgeId] = True
prevNode = node1
#print("k is " + str(k))
#print("started from " + str(startingNodeId))
#print("next node: " + str(nextNode))
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
fig, ax = plt.subplots()
import array
for face in foundFaces:
#print("face iteration")
#print(numpy.asarray(nodes[node]))
l = []
for nodeId in face:
node = nodes[nodeId]
#print("coords of node " + str(node))
l.append( [ node[0] , node[1]] )
#print(nodes[node])
if(len(face) == 3):
area = 0.5 * ((l[1][0]-l[0][0])*(l[2][1]-l[0][1])-(l[2][0]-l[0][0])*(l[1][1]-l[0][1]))
#print("triangle area " + str(area))
if area == 0.5: continue
xs, ys = zip(*l)
ax.fill(xs,ys,edgecolor='black')
#print(__list)
#polygon = Polygon(__list, True)
#patches.append(polygon)
#print face
plt.gca().invert_yaxis()
#plt.show()
import osgeo.ogr, osgeo.osr #we will need some packages
from osgeo import ogr #and one more for the creation of a new field
spatialReference = osgeo.osr.SpatialReference() #will create a spatial reference locally to tell the system what the reference will be
spatialReference.ImportFromEPSG(4326) #here we define this reference to be wgs84..
driver = osgeo.ogr.GetDriverByName('ESRI Shapefile') # will select the driver foir our shp-file creation.
shapeData = driver.CreateDataSource('/usr/local/Cellar/nginx/1.15.2/html/n/result.shp') #so there we will store our data
layer = shapeData.CreateLayer('Example', spatialReference, osgeo.ogr.wkbPoint) #this will create a corresponding layer for our data with given spatial information.
layer_defn = layer.GetLayerDefn()
layer = shapeData.CreateLayer('layer', spatialReference, osgeo.ogr.wkbPoint) #this will create a corresponding layer for our data with given spatial information.
layer_defn = layer.GetLayerDefn() # gets parameters of the current shapefile
index = 0
readerDict = csv.DictReader(csvfile, delimiter=delimiter)
for field in ['foo', 'bar']:
new_field = ogr.FieldDefn(field, ogr.OFTString) #we will create a new field with the content of our header
layer.CreateField(new_field)
row = {'LAT': 20, 'LON': 30, 'foo': 'lol', 'bar': 'hah' }
print(row['LAT'], row['LON'])
point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)
point.AddPoint(float(row['LON']), float(row['LAT'])) #we do have LATs and LONs as Strings, so we convert them
feature = osgeo.ogr.Feature(layer_defn)
feature.SetGeometry(point) #set the coordinates
feature.SetFID(index)
for field in readerDict.fieldnames:
i = feature.GetFieldIndex(field)
feature.SetField(i, row[field])
layer.CreateFeature(feature)
#index += 1
shapeData.Destroy() #lets close the shapefile
#for i in range(num_polygons):
# polygon = Polygon(np.random.rand(num_sides ,2), True)
# patches.append(polygon)
#p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
#colors = 100*np.random.rand(len(patches))
#p.set_array(np.array(colors))
#ax.add_collection(p)
#plt.show()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
198,
11748,
10688,
198,
198,
9600,
796,
269,
85,
17,
13,
320,... | 2.111194 | 4,011 |
from lecturelists import *
import requests
import re
import schedule
import time
with open('token.txt', 'r') as f:
bot_token = f.read().replace('\n','')
with open('secret.txt', 'r') as f:
err_bot_token, admin_no = f.read().replace('\n','').split(',')
handler()
schedule.every(1).minutes.do(handler)
while True:
schedule.run_pending()
time.sleep(1)
| [
6738,
19143,
20713,
1330,
1635,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
7269,
198,
11748,
640,
198,
198,
4480,
1280,
10786,
30001,
13,
14116,
3256,
705,
81,
11537,
355,
277,
25,
198,
197,
13645,
62,
30001,
796,
277,
13,
961,
22... | 2.671642 | 134 |
#! /usr/bin/python3
# A Motor class used for abstraction purposes
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
317,
12533,
1398,
973,
329,
34651,
4959,
198
] | 3.35 | 20 |
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.datasets import make_classification
from .scorer import Scorer
def test_classification(models, one_hot=False):
'''
'''
for name, model in models.items():
X, y = make_classification(
n_samples=10000,
n_features=10,
n_redundant=4
)
for train_i, test_i in StratifiedShuffleSplit().split(X, y):
X_train, y_train = X[train_i], y[train_i]
X_test, y_test = X[test_i], y[test_i]
model.fit(X_train, y_train)
predicted = model.predict(X_test)
scorer = Scorer(predicted, y_test)
print(f'Model {name}')
print('')
print(f'F1 score: {scorer.f1_score()}')
print(f'Accuracy: {scorer.accuracy()}')
print(f'Precision: {scorer.precision()}')
print(f'Recall: {scorer.recall()}')
print('')
| [
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
29186,
1431,
2484,
18137,
41205,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
787,
62,
4871,
2649,
198,
6738,
764,
1416,
11934,
1330,
1446,
11934,
628,
198,
4299,
1332,
62,
4871,
264... | 2.051111 | 450 |
#!/usr/bin/env python
# tests.memtest
# Short script to exercise the corpus reader memory usage.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Tue Apr 19 16:38:28 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: memtest.py [0753dd0] benjamin@bengfort.com $
"""
Short script to exercise the corpus reader memory usage. Basically the script
runs through every document in the corpus and prints information about the
document to see how much memory is being used.
"""
##########################################################################
## Imports
##########################################################################
import os
import time
import argparse
from nltk import FreqDist
from functools import partial
from minke.corpus import BaleenCorpusReader
##########################################################################
## Static Variables
##########################################################################
PROJECT = os.path.join(os.path.dirname(__file__), "..")
CORPUS = os.path.join(PROJECT, "fixtures", "corpus")
def main(args):
"""
Runs a sequential scan over the corpus, for the given method, allowing
you to check Activity Monitor to ensure that memory isn't being overused.
"""
# Construct the corpus and fetch reader method
corpus = BaleenCorpusReader(args.corpus)
# If describe, print describe and exit
if args.describe:
print(corpus.describes(categories=args.categories))
return
# Find the method and set up counting
method = getattr(corpus, args.method)
counts = FreqDist()
start = time.time()
# Create the partial closure for the fields
if args.method == 'fields':
method = partial(method, (args.fields))
# Begin the sequential scan
for idx, item in enumerate(method(categories=args.categories)):
if args.limit is not None and idx >= args.limit:
break
try:
if args.method == 'docs':
if not args.quiet:
print(u"{: >7}: {}".format(idx+1, item['title']))
elif args.method == 'fields':
if not args.quiet:
print(u"{: >7}: {}".format(idx+1, item))
elif args.method == 'html':
if not args.quiet:
print(u"{}\n\n".format(item))
elif args.method == 'sizes':
print(u"{},{}".format(*item))
elif args.method in ('words',):
counts[item] += 1
if args.method in ('paras', 'sents', 'words'):
if not args.quiet:
if (idx + 1) % 1000 == 0:
print(u"{} {} scanned".format(idx+1, args.method))
except KeyboardInterrupt:
break
# Print out the time and effort
print(u"Scanned {} {} in {:0.3f} seconds.".format(
idx, args.method, time.time() - start
))
# Print out counts if we're doing that
if args.method == 'words':
for item in counts.most_common(100):
print(u" {}: {}".format(*item))
if __name__ == '__main__':
# Command line arguments
args = {
('--describe',): {
'action': 'store_true',
'help': 'describe the corpus and exit',
},
('-Q', '--quiet'): {
'action':'store_true',
'help': 'limit the amount of output being printed'
},
('-c', '--categories'): {
'default': None,
'nargs': '*',
'metavar': 'CAT',
'help': 'specify the categories to stream over',
},
('-f', '--fields'): {
'default': None,
'nargs': '*',
'metavar': 'FIELD',
'help': 'for the fields method, specify the desired fields'
},
('-n', '--limit'): {
'type': int,
'default': None,
'metavar': 'N',
'help': 'limit the number of rows scanned in the corpus'
},
('-d', '--corpus'): {
'type': str,
'default': CORPUS,
'help': 'change the place to look for the corpus root'
},
('method',): {
'nargs': '?',
'type': str,
'default': 'docs',
'choices': ['docs', 'fields', 'html', 'paras', 'sents', 'words', 'sizes'],
'help': 'specify the scanning method to use',
}
}
# Create the parser
parser = argparse.ArgumentParser()
for pargs, kwargs in args.items():
parser.add_argument(*pargs, **kwargs)
# Parse the arguments and execute main
options = parser.parse_args()
main(options)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
5254,
13,
11883,
9288,
198,
2,
10073,
4226,
284,
5517,
262,
35789,
9173,
4088,
8748,
13,
198,
2,
198,
2,
6434,
25,
220,
220,
14533,
14964,
3319,
1279,
11848,
1516,
3319,
31,
17080... | 2.339391 | 2,036 |
import pytest
from lxml.etree import XPathEvalError
from pat import pat
def test_valid_xpath(html_blob):
"""Ensures a valid XPath produces results
"""
# extract only the first table
xpath = '/descendant::table[1]'
results = pat(html_blob, xpath_query=xpath)
assert len(results) == 1
def test_invalid_xpath(html_blob):
"""Ensures an invalid XPath halts execution
"""
# invalid xpath
invalid_xpath = '/descendant:table[1]'
with pytest.raises(XPathEvalError):
pat(html_blob, xpath_query=invalid_xpath)
| [
11748,
12972,
9288,
198,
198,
6738,
300,
19875,
13,
316,
631,
1330,
11961,
776,
36,
2100,
12331,
198,
198,
6738,
1458,
1330,
1458,
628,
198,
4299,
1332,
62,
12102,
62,
87,
6978,
7,
6494,
62,
2436,
672,
2599,
198,
220,
220,
220,
3722... | 2.561644 | 219 |
# Generated by Django 2.2.6 on 2019-10-14 02:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
940,
12,
1415,
7816,
25,
2857,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
142... | 3.019231 | 52 |
#Programs that calculate the averages of numbers.
#call the program from main.
#The first program is for a set amount of three numbers.
#The second program is more flexible for a user defined amount of numbers.
main()
| [
2,
15167,
82,
326,
15284,
262,
25694,
286,
3146,
13,
201,
198,
201,
198,
201,
198,
2,
13345,
262,
1430,
422,
1388,
13,
201,
198,
201,
198,
2,
464,
717,
1430,
318,
329,
257,
900,
2033,
286,
1115,
3146,
13,
201,
198,
201,
198,
2,
... | 3.338028 | 71 |
import datetime
import platform
import dico
import dico_command
import psutil
from dico.utils import rgb
from . import __version__
from .utils import parse_bytesize, parse_second
try:
from dico_interaction import __version__ as __inter_version__
except ImportError:
__inter_version__ = None
privileged = ["GUILD_MEMBERS", "GUILD_PRESENCES"]
| [
11748,
4818,
8079,
198,
11748,
3859,
198,
198,
11748,
288,
3713,
198,
11748,
288,
3713,
62,
21812,
198,
11748,
26692,
22602,
198,
6738,
288,
3713,
13,
26791,
1330,
46140,
198,
198,
6738,
764,
1330,
11593,
9641,
834,
198,
6738,
764,
2679... | 3.094828 | 116 |
from sys import stdout
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import mean_squared_error, r2_score
# dataset
data = pd.read_csv("../../../_data/peach_spectra_brix.csv")
data.head()
y_train = data['Brix'].values
x_train = data.values[:, 1:]
# Plot the mses
# Plot the data
wl = np.arange(1100, 2300, 2)
X2 = savgol_filter(x_train, 17, polyorder=2, deriv=2)
r2s = []
mses = []
rpds = []
xticks = np.arange(1, 41)
for n_comp in xticks:
y_cv, r2, mse, rpd = optimise_pls_cv(X2, y_train, n_comp)
r2s.append(r2)
mses.append(mse)
rpds.append(rpd)
# plot_metrics(mses, 'MSE', 'min')
# plot_metrics(rpds, 'RPD', 'max')
# plot_metrics(r2s, 'R2', 'max')
y_cv, r2, mse, rpd = optimise_pls_cv(X2, y_train, 7)
print('R2: %0.4f, MSE: %0.4f, RPD: %0.4f' %(r2, mse, rpd))
plt.figure(figsize=(6, 6))
with plt.style.context('ggplot'):
plt.scatter(y_train, y_cv, color='red')
plt.plot(y_train, y_train, '-g', label='Expected regression line')
z = np.polyfit(y_train, y_cv, 1)
plt.plot(np.polyval(z, y_train), y_train, color='blue', label='Predicted regression line')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.legend()
plt.plot()
plt.show()
| [
6738,
25064,
1330,
14367,
448,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
629,
541,
88,
13,
12683,
282,
1330,
6799,
70,
349,
... | 2.170047 | 641 |
# -*- coding: utf-8 -*-
# @Time : 2019/5/11 15:12
# @Author : LegenDong
# @User : legendong
# @File : __init__.py.py
# @Software: PyCharm
from .channel_attention_layer import *
from .nan_attention_layer import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
14,
20,
14,
1157,
1315,
25,
1065,
198,
2,
2488,
13838,
220,
1058,
3564,
268,
35,
506,
198,
2,
2488,
12982,
220,
220,
220,
... | 2.322917 | 96 |
import os
from distutils.util import strtobool
DEBUG = strtobool(os.environ.get('DEBUG', 'yes'))
SECRET_KEY = os.environ.get('SECRET', 'defaultsecret')
APPLICATION_ROOT = '/api/assistant'
API_URL = 'http://localhost/api/accounts'
| [
11748,
28686,
198,
6738,
1233,
26791,
13,
22602,
1330,
965,
83,
672,
970,
198,
198,
30531,
796,
965,
83,
672,
970,
7,
418,
13,
268,
2268,
13,
1136,
10786,
30531,
3256,
705,
8505,
6,
4008,
198,
23683,
26087,
62,
20373,
796,
28686,
13... | 2.697674 | 86 |
import math
from metrics.Metric import Metric
class TPRNormDiff(Metric):
"""
This metric calculates the normalized TPR difference.
Multiple protected classes are treated as one large group, so that this compares the privileged
class to all non-privileged classes as a group.
"""
class TNRNormDiff(TPRNormDiff):
"""This metric calculates the TNR ratio."""
def calc_tpr_protected(actual, predicted, sensitive, unprotected_vals, positive_pred, is_tnr=False):
"""
Returns P(C=YES|Y=YES, sensitive=privileged) and P(C=YES|Y=YES, sensitive=not privileged)
in that order where C is the predicited classification and where all not privileged values are
considered equivalent. Assumes that predicted and sensitive have the same lengths.
If `is_tnr` is true, this actually computes the TNRs instead of the TPRs.
"""
unprotected_true_pos = 0.0
unprotected_pos_label = 0.0
protected_true_pos = 0.0
protected_pos_label = 0.0
for protected_val, predicted_val, label in zip(sensitive, predicted, actual):
if not is_tnr: # do TPR in this case
criterion = str(label) == str(positive_pred) # prediction should have been positive
else:
criterion = str(label) != str(positive_pred) # prediction should have been negative
if criterion:
if str(predicted_val) == str(label): # prediction was correct
if protected_val in unprotected_vals:
unprotected_true_pos += 1
else:
protected_true_pos += 1
if protected_val in unprotected_vals:
unprotected_pos_label += 1
else:
protected_pos_label += 1
unprotected_tpr = 0.0
if unprotected_pos_label > 0:
unprotected_tpr = unprotected_true_pos / unprotected_pos_label
protected_tpr = 0.0
if protected_pos_label > 0:
protected_tpr = protected_true_pos / protected_pos_label
return unprotected_tpr, protected_tpr
| [
11748,
10688,
198,
198,
6738,
20731,
13,
9171,
1173,
1330,
3395,
1173,
628,
198,
4871,
309,
4805,
35393,
28813,
7,
9171,
1173,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
18663,
43707,
262,
39279,
309,
4805,
3580,
13,
62... | 2.682058 | 758 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for x86"""
import logging
import re
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .conv2d import _get_default_config
from .conv2d_int8 import is_int8_hw_support, _get_default_config_int8
from ..utils import get_const_tuple
from ..nn import conv2d_legalize, conv2d_alter_layout
from ..nn.utils import get_pad_tuple
logger = logging.getLogger("topi")
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWio_matcher = re.compile("^OIHW[0-9]+i[0-9]+o$")
@conv2d_alter_layout.register("cpu")
@conv2d_legalize.register("cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Dilation not supported yet. Return None if dilation is not (1, 1)
dilation = attrs.get_int_tuple("dilation")
if not (dilation[0] == 1 and dilation[1] == 1):
return None
# No legalization for depthwise convolutions yet.
groups = attrs.get_int("groups")
if groups != 1:
return None
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# Get the conv attrs
new_attrs = {k: attrs[k] for k in attrs.keys()}
is_int8_inputs = False
# If both the inputs are int8, we can add 128 to make the input dtype uint8, and then adjust the
# output. This will help picking up Intel VNNI instructions.
# Original --> C = A (conv) B
# A and B are int8
# C = (A + 128 - 128) (conv) B
# C = (A' conv B) - 128 (conv) B
# where A' = A + 128
# and 128 (conv) B is basically a reduce on CRS axis for weights.
if data_tensor.dtype == "int8" and kernel_tensor.dtype == "int8":
is_int8_inputs = True
padding = attrs.get_int_tuple("padding")
kh, kw = attrs.get_int_tuple("kernel_size")
pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(0, 1, 2))
pad_width = ((0, 0), (pt, pb), (pl, pr), (0, 0))
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, 0), (pt, pb), (pl, pr))
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(1, 2, 3))
adjust_shift = relay.expand_dims(adjust_shift, axis=1, num_newaxis=2)
else:
return None
data = relay.cast(data, "int32")
data = relay.add(data, relay.const(128, "int32"))
data = relay.cast(data, "uint8")
# Do external padding as pad value has to be 128.
if any(padding):
data = relay.nn.pad(data, pad_width=pad_width, pad_value=128)
new_attrs["padding"] = (0, 0)
# The data type is now shifted to uint8
data_dtype = "uint8"
# Multiply 128 to adjust shift.
adjust_shift = relay.multiply(adjust_shift, relay.const(128, "int32"))
# Legalize if the datatypes are suitable for fast Int8 instructions. Int8 instructions require
# input channel to be a multiple of 4 and output channels to be a multiple of 16. For input
# channels, we pad both the inputs and weights input channels. For output channels, we pad the
# weight and stride_slice the output.
if is_int8_hw_support(data_dtype, kernel_dtype):
# Flags to remember if the expr is modified
ic_modified = False
oc_modified = False
# Find the value of input and output channel.
in_channel = -1
out_channel = -1
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
else:
return None
if in_channel % 4 != 0:
new_in_channel = ((in_channel + 4) // 4) * 4
diff = new_in_channel - in_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, diff), (0, 0)))
ic_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
ic_modified = True
else:
return None
new_out_channel = out_channel
if out_channel % 16 != 0:
new_out_channel = ((out_channel + 16) // 16) * 16
diff = new_out_channel - out_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
oc_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
else:
return None
if oc_modified:
new_attrs["channels"] = new_out_channel
out = tvm.relay.nn.conv2d(data, kernel, **new_attrs)
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.nn.conv2d(data, kernel, **new_attrs)
if is_int8_inputs:
out = relay.subtract(out, adjust_shift)
return out
return None
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.329943 | 3,146 |
import sys
import os
from BWSTableEditors import *
# returns value and modify_mode
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
11748,
28686,
198,
6738,
37869,
2257,
540,
18378,
669,
1330,
1635,
628,
198,
2,
5860,
1988,
290,
13096,
62,
14171,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
13... | 2.931818 | 44 |
from django.conf import settings
from django.conf.urls import patterns, url
from mapentity.registry import MapEntityOptions
from geotrek.altimetry.views import (ElevationProfile, ElevationChart,
ElevationArea, serve_elevation_chart)
urlpatterns = patterns(
'',
url(r'^%s/profiles/(?P<model_name>.+)-(?P<pk>\d+).png$' % settings.MEDIA_URL.strip('/'), serve_elevation_chart),
)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
6738,
3975,
26858,
13,
2301,
4592,
1330,
9347,
32398,
29046,
198,
198,
6738,
4903,
313,
37818,
13,
2501,
320,
11973... | 2.38764 | 178 |
import datetime
import exceptions
import string
from dateutil import parser
SCHEDULE_FIELDS = {'A': ['back_reference_sched_name', 'back_reference_tran_id_number', 'conduit_city', 'conduit_name', 'conduit_state', 'conduit_street1', 'conduit_street2', 'conduit_zip', 'contribution_aggregate', 'contribution_amount', 'contribution_date', 'contribution_date_formatted', 'contribution_purpose_code', 'contribution_purpose_descrip', 'contributor_city', 'contributor_employer', 'contributor_first_name', 'contributor_last_name', 'contributor_middle_name', 'contributor_name', 'contributor_occupation', 'contributor_organization_name', 'contributor_prefix', 'contributor_state', 'contributor_street_1', 'contributor_street_2', 'contributor_suffix', 'contributor_zip', 'donor_candidate_district', 'donor_candidate_fec_id', 'donor_candidate_first_name', 'donor_candidate_last_name', 'donor_candidate_middle_name', 'donor_candidate_name', 'donor_candidate_office', 'donor_candidate_prefix', 'donor_candidate_state', 'donor_candidate_suffix', 'donor_committee_fec_id', 'donor_committee_name', 'election_code', 'election_other_description', 'entity_type', 'filer_committee_id_number', 'filing_number', 'form_type', 'line_sequence', 'memo_code', 'memo_text_description', 'reference_code', 'superseded_by_amendment', 'transaction_id'], 'B': ['back_reference_sched_name', 'back_reference_tran_id_number', 'beneficiary_candidate_district', 'beneficiary_candidate_fec_id', 'beneficiary_candidate_first_name', 'beneficiary_candidate_last_name', 'beneficiary_candidate_middle_name', 'beneficiary_candidate_name', 'beneficiary_candidate_office', 'beneficiary_candidate_prefix', 'beneficiary_candidate_state', 'beneficiary_candidate_suffix', 'beneficiary_committee_fec_id', 'beneficiary_committee_name', 'category_code', 'communication_date', 'conduit_city', 'conduit_name', 'conduit_state', 'conduit_street_1', 'conduit_street_2', 'conduit_zip', 'election_code', 'election_other_description', 'entity_type', 'expenditure_amount', 'expenditure_date', 'expenditure_date_formatted', 'expenditure_purpose_code', 'expenditure_purpose_descrip', 'filer_committee_id_number', 'filing_number', 'form_type', 'line_sequence', 'memo_code', 'memo_text_description', 'payee_city', 'payee_first_name', 'payee_last_name', 'payee_middle_name', 'payee_name', 'payee_organization_name', 'payee_prefix', 'payee_state', 'payee_street_1', 'payee_street_2', 'payee_suffix', 'payee_zip', 'ref_to_sys_code_ids_acct', 'refund_or_disposal_of_excess', 'semi_annual_refunded_bundled_amt', 'superseded_by_amendment', 'transaction_id'], 'E': ['back_reference_sched_name', 'back_reference_tran_id_number', 'calendar_y_t_d_per_election_office', 'candidate_district', 'candidate_first_name', 'candidate_id_number', 'candidate_last_name', 'candidate_middle_name', 'candidate_name', 'candidate_office', 'candidate_prefix', 'candidate_state', 'candidate_suffix', 'category_code', 'completing_first_name', 'completing_last_name', 'completing_middle_name', 'completing_prefix', 'completing_suffix', 'date_signed', 'date_signed_formatted', 'effective_date', 'election_code', 'election_other_description', 'entity_type', 'expenditure_amount', 'expenditure_date', 'expenditure_date_formatted', 'dissemination_date','dissemination_date_formatted', 'expenditure_purpose_code', 'expenditure_purpose_descrip', 'filer_committee_id_number', 'filing_number', 'form_type', 'line_sequence', 'memo_code', 'memo_text_description', 'payee_city', 'payee_cmtte_fec_id_number', 'payee_first_name', 'payee_last_name', 'payee_middle_name', 'payee_name', 'payee_organization_name', 'payee_prefix', 'payee_state', 'payee_street_1', 'payee_street_2', 'payee_suffix', 'payee_zip', 'superseded_by_amendment', 'support_oppose_code', 'transaction_id'], 'O': ['filer_committee_id_number', 'filing_number', 'form_parser', 'form_type', 'line_sequence', 'line_dict', 'superseded_by_amendment', 'transaction_id']}
# these candidates / committees have submitted what appear to be fictitious filings
# there's not really a procedure for FEC to deal with them, so the filings are received
# and eventually marked as F99's instead of F3's. But that's till farther down the line.
BLACKLISTED_CANDIDATES = ['C00507947', 'C00428599']
BLACKLISTED_COMMITTEES = ['P20003851', 'P80003205']
def recode_to_utf8(text):
"""
FEC spec allows ascii 9,10,11,13,32-126,128-156,160-168,173.
"""
return text.decode('cp1252').encode('utf8')
# *** NOTE THIS IS RUN ON EVERY SINGLE ENTRY ***
# Optimize whenever possible.
##
## CYCLE UTILITIES
##
def get_cycle(year):
"""
Takes a four-digit year string or integer and pads it to an even number.
Returns a string for some reason. Returns None for unparsable years.
"""
try:
this_year = int(year)
if this_year % 2 != 0:
this_year = this_year + 1
return str(this_year)
except ValueError:
return None
def is_valid_cycle(string_cycle):
"""
>>> dates = [2001,2000,2006,2020,2086,2003,2014,2010,2015,2016]
>>> [is_valid_four_digit_string_cycle(d) for d in dates]
[False, True, True, True, False, False, True, True, False, True]
"""
# Figure out this year; pad by 1 if it's an odd-numbered
# year because election cycles are (helpfully) on even-
# numbered years.
this_year = int(get_cycle(datetime.date.today().year))
# If you're not passing something that can be an integer,
# I am not even interested in helping you.
try:
# The only hard-coded date is a six-year horizon in the future.
# Assumption is that three cycles (two Presidential) is the
# furthest in the future we care about.
if int(string_cycle) in range(2000, this_year + 8, 2):
return True
except ValueError:
pass
return False
def get_four_digit_year(two_digit_string):
"""
>>> dates = [99,98,75,62,18,14,16,20]
>>> [get_four_digit_year(d) for d in dates]
['1999', '1998', '1975', '1962', '2018', '2014', '2016', '2020']
"""
try:
two_digit_year = int(two_digit_string)
this_year = int(str(datetime.date.today().year)[2:4])
if two_digit_year <= this_year + 10:
four_digit_year = 2000 + two_digit_year
else:
four_digit_year = 1900 + two_digit_year
return str(four_digit_year)
except ValueError:
return None
##
## SCHEDULE PARSERS
##
def skeda_from_skedadict(line_dict, filing_number, line_sequence, is_amended):
"""
We can either pass the header row in or not; if not, look it up.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['line_sequence'] = line_sequence
line_dict['superseded_by_amendment'] = is_amended
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
def skeda_from_f65(line_dict, filing_number, line_sequence, is_amended):
"""
Enter 48-hour contributions to candidate as if it were a sked A.
Will later be superseded by periodic F3 report.
This is almost to skeda_from_skedadict?
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['superseded_by_amendment'] = is_amended
line_dict['line_sequence'] = line_sequence
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
def skeda_from_f56(line_dict, filing_number, line_sequence, is_amended):
"""
Example: See filing ID 847857.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['superseded_by_amendment'] = is_amended
line_dict['line_sequence'] = line_sequence
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
line_dict['contribution_amount'] = line_dict['contribution_amount']
return line_dict
def skeda_from_f92(line_dict, filing_number, line_sequence, is_amended):
"""
Electioneering communication contributions.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['superseded_by_amendment'] = is_amended
line_dict['line_sequence'] = line_sequence
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
def skeda_from_f132(line_dict, filing_number, line_sequence, is_amended):
"""
Inaugural donations.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['superseded_by_amendment'] = is_amended
line_dict['line_sequence'] = line_sequence
line_dict['filing_number'] = filing_number
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
def skeda_from_f133(line_dict, filing_number, line_sequence, is_amended):
"""
Inaugural donor REFUNDS.
"""
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
line_dict['line_sequence'] = line_sequence
line_dict['superseded_by_amendment'] = is_amended
line_dict['filing_number'] = filing_number
# Map refund to contributions.
line_dict['contribution_amount'] = line_dict['refund_amount']
line_dict['contribution_date'] = line_dict['refund_date']
del line_dict['refund_date']
del line_dict['refund_amount']
line_dict['contribution_amount'] = line_dict['contribution_amount']
if line_dict['contribution_amount'] > 0:
# Flip signs if this number is positive.
line_dict['contribution_amount'] = 0-line_dict['contribution_amount']
if line_dict['contribution_date']:
try:
line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date'])
except ValueError:
pass
return line_dict
def otherline_from_line(line_dict, filing_number, line_sequence, is_amended, filer_id):
"""
http://initd.org/psycopg/docs/extras.html#hstore-data-type
"""
try:
# Some lines have illegal transaction ids -- longer than 20 characters. Truncate those.
line_dict['transaction_id'] = line_dict['transaction_id'][:20]
except KeyError:
# Some lines are actually summary lines (F3S) and have no transaction ids, so don't freak out about this.
pass
line_dict['superseded_by_amendment'] = is_amended
line_dict['line_sequence'] = line_sequence
line_dict['filing_number'] = filing_number
line_dict['filer_committee_id_number'] = filer_id
try:
# Text records use rec_type instead of form.
line_dict['form_type'] = line_dict['rec_type']
except KeyError:
pass
return line_dict
def transform_line(line_dict, flat_filing):
"""
Returns a tuple: ('skedletter', datadict)
"""
filing_id = flat_filing['filing_id']
line_sequence = line_dict['line_sequence']
is_amended = line_dict.get('is_amended', False)
filer_id = flat_filing['fec_id']
if line_dict['form_parser'] == 'SchA':
return ('A', skeda_from_skedadict(line_dict, filing_id, line_sequence, is_amended))
elif line_dict['form_parser'] == 'SchB':
return ('B', skedb_from_skedbdict(line_dict, filing_id, line_sequence, is_amended))
elif line_dict['form_parser'] == 'SchE':
return ('E', skede_from_skededict(line_dict, filing_id, line_sequence, is_amended))
elif line_dict['form_parser'] == 'SchA3L':
#these look very similar to skedA's
return ('A3L', skeda_from_skedadict(line_dict, filing_id, line_sequence, is_amended))
# Treat 48-hour contribution notices like sked A.
# Requires special handling for amendment, since these are superceded by regular F3 forms.
elif line_dict['form_parser'] == 'F65':
return ('A', skeda_from_f65(line_dict, filing_id, line_sequence, is_amended))
# Disclosed donor to non-commmittee. Rare.
elif line_dict['form_parser'] == 'F56':
return ('A', skeda_from_f56(line_dict, filing_id, line_sequence, is_amended))
# Disclosed electioneering donor.
elif line_dict['form_parser'] == 'F92':
return ('A', skeda_from_f92(line_dict, filing_id, line_sequence, is_amended))
# Inaugural donors.
elif line_dict['form_parser'] == 'F132':
return ('A', skeda_from_f132(line_dict, filing_id, line_sequence, is_amended))
# Inaugural refunds.
elif line_dict['form_parser'] == 'F133':
return ('A', skeda_from_f133(line_dict, filing_id, line_sequence, is_amended))
# IE's disclosed by non-committees. Note that they use this for * both * quarterly and 24-hour notices.
# There's not much consistency with this -- be careful with superceding stuff.
elif line_dict['form_parser'] == 'F57':
return ('E', skede_from_f57(line_dict, filing_id, line_sequence, is_amended))
# If this is some other kind of line, just dump it in `other lines.`
else:
return ('O', otherline_from_line(line_dict, filing_id, line_sequence, is_amended, filer_id))
| [
11748,
4818,
8079,
198,
11748,
13269,
198,
11748,
4731,
198,
198,
6738,
3128,
22602,
1330,
30751,
198,
198,
50,
3398,
1961,
24212,
62,
11674,
3698,
5258,
796,
1391,
6,
32,
10354,
37250,
1891,
62,
35790,
62,
1416,
704,
62,
3672,
3256,
... | 2.617647 | 5,236 |
# Equation (c) Baltasar 2019 MIT License <baltasarq@gmail.com>
from equations.Equation import Equation
from equations.Component import create
from solver.solve import solve
from parser import parser
if __name__ == "__main__":
eqs = parser("x = 5 + 3\n2 = y + 3\nz = x + y")
print("\n".join([str(eq) for eq in eqs]))
print(solve(eqs))
| [
2,
7889,
341,
357,
66,
8,
9035,
42391,
13130,
17168,
13789,
1279,
65,
2501,
42391,
80,
31,
14816,
13,
785,
29,
198,
198,
6738,
27490,
13,
23588,
341,
1330,
7889,
341,
198,
6738,
27490,
13,
21950,
1330,
2251,
198,
6738,
1540,
332,
13... | 2.740157 | 127 |
from aiortc_media_proxy.server import init
init()
| [
6738,
257,
72,
419,
66,
62,
11431,
62,
36436,
13,
15388,
1330,
2315,
198,
198,
15003,
3419,
198
] | 2.833333 | 18 |
import json
import logging
import os
import sys
from argparse import ArgumentParser
import re
import numpy as np
import pandas as pd
import torch
from transformers import GPT2Tokenizer
from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough
from src.data.nli import TransformersSeqPairDataset
from src.models.pg_trainer import AutoregressivePGTrainer
parser = ArgumentParser()
parser.add_argument("--experiment_dir", type=str, default="debug")
parser.add_argument("--paraphrase_path", type=str,
default="/home/matej/Documents/paraphrase-nli/experiments/SciTail_NLI/PARAPHRASE_IDENTIFICATION/id-scitail-roberta-base-argmax/all_para_id.csv")
parser.add_argument("--pretrained_name_or_path", type=str, default="gpt2")
parser.add_argument("--model_type", type=str, default="gpt2",
choices=["gpt2"])
parser.add_argument("--num_epochs", type=int, default=10)
parser.add_argument("--max_seq_len", type=int, default=79)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--early_stopping_rounds", type=int, default=5)
parser.add_argument("--validate_every_n_examples", type=int, default=5000)
parser.add_argument("--random_seed", type=int, default=17)
parser.add_argument("--use_cpu", action="store_true")
if __name__ == "__main__":
args = parser.parse_args()
DEVICE = torch.device("cpu") if args.use_cpu else torch.device("cuda")
if not os.path.exists(args.experiment_dir):
os.makedirs(args.experiment_dir)
if args.random_seed is not None:
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
with open(os.path.join(args.experiment_dir, "experiment_config.json"), "w") as f:
json.dump(vars(args), fp=f, indent=4)
# Set up logging to file and stdout
logger = logging.getLogger()
logger.setLevel(logging.INFO)
for curr_handler in [logging.StreamHandler(sys.stdout),
logging.FileHandler(os.path.join(args.experiment_dir, "experiment.log"))]:
curr_handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.addHandler(curr_handler)
for k, v in vars(args).items():
v_str = str(v)
v_str = f"...{v_str[-(50 - 3):]}" if len(v_str) > 50 else v_str
logging.info(f"|{k:30s}|{v_str:50s}|")
# No AutoTokenizerFast at the moment?
if args.model_type == "gpt2":
tokenizer_cls = GPT2Tokenizer
else:
raise NotImplementedError(f"Model_type '{args.model_type}' is not supported")
tokenizer = tokenizer_cls.from_pretrained(args.pretrained_name_or_path)
tokenizer.add_special_tokens({
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"additional_special_tokens": ["<PARA>"]
})
tokenizer.save_pretrained(args.experiment_dir)
SEPARATOR_ID = int(tokenizer.encode("<PARA>", add_special_tokens=False)[0])
df = pd.read_csv(args.paraphrase_path)
# Basic data cleaning - remove NAs (?), duplicate pairs, pairs with one sequence very short
df = df.loc[mask_not_na(df["sequence1"], df["sequence2"])]
df = df.iloc[inds_unique(df["sequence1"], df["sequence2"])]
df = df.loc[mask_long_enough(df["sequence1"], df["sequence2"])]
df = df.loc[df["label"] == 1].reset_index(drop=True)
df["formatted"] = list(map(
lambda pair: f"{pair[0]} <PARA> {pair[1]} {tokenizer.eos_token}",
zip(df["sequence1"].tolist(), df["sequence2"].tolist())
))
num_ex = df.shape[0]
indices = np.random.permutation(num_ex)
train_df = df.iloc[indices[:int(0.7 * num_ex)]]
dev_df = df.iloc[indices[int(0.7 * num_ex): int(0.85 * num_ex)]]
test_df = df.iloc[indices[int(0.85 * num_ex):]]
train_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "train.csv"), sep=",", index=False)
dev_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "dev.csv"), sep=",", index=False)
test_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "test.csv"), sep=",", index=False)
_encoded_train = tokenizer.batch_encode_plus(
train_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_train_labels = _encoded_train["input_ids"].clone()
for idx_ex in range(_train_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_train_labels[idx_ex, idx_token] = -100
if _encoded_train["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_train["labels"] = _train_labels
_encoded_dev = tokenizer.batch_encode_plus(
dev_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_dev_labels = _encoded_dev["input_ids"].clone()
for idx_ex in range(_dev_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_dev_labels[idx_ex, idx_token] = -100
if _encoded_dev["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_dev["labels"] = _dev_labels
_encoded_test = tokenizer.batch_encode_plus(
test_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_test_labels = _encoded_test["input_ids"].clone()
for idx_ex in range(_test_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_test_labels[idx_ex, idx_token] = -100
if _encoded_test["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_test["labels"] = _test_labels
train_set = TransformersSeqPairDataset(**_encoded_train)
dev_set = TransformersSeqPairDataset(**_encoded_dev)
test_set = TransformersSeqPairDataset(**_encoded_test)
logging.info(f"Loaded {len(train_set)} training examples, {len(dev_set)} dev examples and "
f"{len(test_set)} test examples")
pg_trainer = AutoregressivePGTrainer(args.experiment_dir,
pretrained_model_name_or_path=args.pretrained_name_or_path,
tokenizer_path=args.experiment_dir,
batch_size=args.batch_size,
learning_rate=args.learning_rate,
validate_every_n_steps=args.validate_every_n_examples,
early_stopping_tol=args.early_stopping_rounds,
device=("cuda" if not args.use_cpu else "cpu"))
pg_trainer.run(train_dataset=train_set, val_dataset=dev_set, num_epochs=args.num_epochs)
# Reload best model
pg_trainer = AutoregressivePGTrainer.from_pretrained(args.experiment_dir)
dev_prompts = dev_df["sequence1"].apply(lambda s: f"{s} <PARA>")
test_prompts = test_df["sequence1"].apply(lambda s: f"{s} <PARA>")
dev_df["sequence2"].to_csv(os.path.join(args.experiment_dir, "dev_ref.txt"), sep=",", index=False, header=False)
test_df["sequence2"].to_csv(os.path.join(args.experiment_dir, "test_ref.txt"), sep=",", index=False, header=False)
dev_df["sequence1"].to_csv(os.path.join(args.experiment_dir, "dev_input_copy.txt"), sep=",", index=False, header=False)
test_df["sequence1"].to_csv(os.path.join(args.experiment_dir, "test_input_copy.txt"), sep=",", index=False, header=False)
strategies = {
"greedy": {},
"beam": {"num_beams": 5, "early_stopping": True},
"top_p": {"do_sample": True, "top_p": 0.9, "top_k": 0},
"top_k": {"do_sample": True, "top_k": 10}
}
for curr_strat, strat_kwargs in strategies.items():
dev_pred_para = pg_trainer.generate(dev_prompts.tolist(), max_seq_len=args.max_seq_len, strategy=strat_kwargs)
with open(os.path.join(args.experiment_dir, f"dev_{curr_strat}_hyp.txt"), "w", encoding="utf-8") as f:
for _txt in dev_pred_para:
print(re.sub(r"(\n)+", " ", _txt.strip()), file=f)
test_pred_para = pg_trainer.generate(test_prompts.tolist(), max_seq_len=args.max_seq_len, strategy=strat_kwargs)
with open(os.path.join(args.experiment_dir, f"test_{curr_strat}_hyp.txt"), "w", encoding="utf-8") as f:
for _txt in test_pred_para:
print(re.sub(r"(\n)+", " ", _txt.strip()), file=f)
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
11748,
302,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
673... | 2.410448 | 3,216 |
import os
import platform
import shutil
import subprocess
lib_filename = 'libterraform.dll' if platform.system() == 'Windows' else 'libterraform.so'
header_filename = 'libterraform.h'
tf_filename = 'libterraform.go'
root = os.path.dirname(os.path.abspath(__file__))
terraform_dirname = os.path.join(root, 'terraform')
tf_path = os.path.join(root, tf_filename)
tf_package_name = 'github.com/hashicorp/terraform'
plugin_patch_filename = 'plugin_patch.go'
plugin_dirname = os.path.join(root, 'go-plugin')
plugin_patch_path = os.path.join(root, plugin_patch_filename)
plugin_package_name = 'github.com/hashicorp/go-plugin'
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
if not os.path.exists(os.path.join(terraform_dirname, '.git')):
raise BuildError(f'The directory {terraform_dirname} not exists or init. '
f'Please execute `git submodule init && git submodule update` to init it.')
if not os.path.exists(os.path.join(plugin_dirname, '.git')):
raise BuildError(f'The directory {plugin_dirname} not exists or init. '
f'Please execute `git submodule init && git submodule update` to init it.')
target_plugin_patch_path = os.path.join(plugin_dirname, plugin_patch_filename)
target_tf_path = os.path.join(terraform_dirname, tf_filename)
target_tf_mod_path = os.path.join(terraform_dirname, 'go.mod')
lib_path = os.path.join(terraform_dirname, lib_filename)
header_path = os.path.join(terraform_dirname, header_filename)
# Patch go-plugin
print(' - Patching go-plugin package')
shutil.copyfile(plugin_patch_path, target_plugin_patch_path)
with open(target_tf_mod_path) as f:
mod_content = f.read()
with open(target_tf_mod_path, 'w') as f:
modified_mod_content = f'{mod_content}\n' \
f'replace github.com/hashicorp/go-plugin v1.4.3 => ../go-plugin'
f.write(modified_mod_content)
# Build libterraform
shutil.copyfile(tf_path, target_tf_path)
try:
print(' - Building libterraform')
subprocess.check_call(
['go', 'build', '-buildmode=c-shared', f'-o={lib_filename}', tf_package_name],
cwd=terraform_dirname
)
shutil.move(lib_path, os.path.join(root, 'libterraform', lib_filename))
finally:
# Remove external files
for path in (target_plugin_patch_path, target_tf_path, header_path, lib_path):
if os.path.exists(path):
os.remove(path)
# Recover go.mod
with open(target_tf_mod_path, 'w') as f:
f.write(mod_content)
return setup_kwargs
if __name__ == '__main__':
build({})
| [
11748,
28686,
198,
11748,
3859,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
198,
8019,
62,
34345,
796,
705,
8019,
353,
430,
687,
13,
12736,
6,
611,
3859,
13,
10057,
3419,
6624,
705,
11209,
6,
2073,
705,
8019,
353,
430,
687,
... | 2.352089 | 1,173 |
import torch
from torch.utils.data import DataLoader
from baseline_module.baseline_model_builder import BaselineModelBuilder
from config import BaselineConfig
from data import AGNEWS_Dataset
from tools import logging
if __name__ == "__main__":
baseline_model_builder = BaselineModelBuilder('AGNEWS',
'LSTM',
BaselineConfig.train_device,
is_load=True)
test_dataset_orig = AGNEWS_Dataset(
train_data=False,
attack_vocab=baseline_model_builder.vocab,
debug_mode=False)
test_data = DataLoader(test_dataset_orig,
batch_size=BaselineConfig.batch_size,
shuffle=False,
num_workers=4)
logging(
eval_bert_baseline_Classification(baseline_model_builder.net,
test_data))
| [
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
14805,
62,
21412,
13,
12093,
4470,
62,
19849,
62,
38272,
1330,
6455,
4470,
17633,
32875,
198,
6738,
4566,
1330,
6455,
4470,
16934,
198,
6738,
1366,
1330,
... | 1.879541 | 523 |
from __future__ import absolute_import, division, print_function
from gltbx.images import img_data
inspector_img = img_data(width=32, height=32, mask=-1, encoded_data = """\
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\
ffffffffffffffffffffffffffffffffffffecececd4d4d4d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9\
d9d9d9d9d9d9d9d9d9dbdbd8deded8dbdbd8d2d2dac8c8dcc2c2ddc1c2ddc7c7dcd1d1dadadad8\
deded8dcdcd8d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d5d5d5e8e8e8e4e4e4\
dcdcdce6e6e6e5e5e5e5e5e5e5e5e5e5e5e5e5e5e5e9e9e5e8e8e5d0d1e9a9abf1888bf77478fb\
6b6efd676bfe676bfe6a6efd7276fb8588f8a4a6f2cccdeae7e7e5e9e9e4e5e5e5e5e5e5e5e5e5\
e5e5e5e5e5e5e5e5e5dfdfdfdfdfdfe5e5e5dddddde7e7e7e6e6e6e6e6e6e6e6e6e7e7e6eaeae5\
cbcceb8f91f7696dfe6064ff6266ff6468ff6569ff656aff666aff6569ff6468ff6266ff6065ff\
676bff898cf8c5c6eceae9e5e8e8e6e6e6e6e6e6e6e6e6e6e6e6e6e0e0e0e0e0e0e5e5e5dddddd\
e7e7e7e6e6e6e6e6e6e9e9e5e3e3e7a1a3f3686cfe6165ff6569ff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff666aff6569ff6266ff6569ff989af5dfdfe7e9e9e5e6e6e6\
e6e6e6e6e6e6e0e0e0e0e0e0e5e5e5dddddde7e7e7e6e6e6e9e9e5dbdbe88689f96064ff6569ff\
666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6569ff666aff666aff\
666aff6569ff6064ff7d81fad4d5e9eaeae5e6e6e6e6e6e6e0e0e0e0e0e0e5e5e5dddddde7e7e7\
e8e8e6dcdce87f82fa6165ff666aff666aff666aff666aff666aff666aff666aff666aff666aff\
666aff6468ff6468ff696cff6367ff666aff666aff666aff666aff6166ff767afbd4d5e9e9e9e5\
e6e6e6e0e0e0e0e0e0e5e5e5dddddde8e8e7e5e5e6898cf86065ff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff666aff6569ff6f72ffcacbffe4e4ff9496ff6266ff666aff\
666aff666aff666aff6165ff7f82fae0e0e7e8e8e6e0e0e0e0e0e0e5e5e5ddddddecece6a9abf2\
6064ff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6064ff\
b5b6ffffffffffffffd3d4ff6165ff666aff666aff666aff666aff666aff6064ff9b9ef4ebeae5\
e0e0e0e0e0e0e5e5e5e0e0ddd5d6ea6d70fd6569ff666aff666aff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff6165ffd3d4ffffffffffffffb4b6ff6065ff666aff666aff\
666aff666aff666aff6569ff676bffcbcbece4e4dfe0e0e0e5e5e5e3e2dc9fa2f56065ff666aff\
666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6266ff979aff\
e8e8ffcfd0ff6f73ff6569ff666aff666aff666aff666aff666aff666aff6165ff9194f7e4e4df\
e0e0e0e6e6e5d6d6df7579fd6468ff666aff666aff666aff666aff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff6266ff686cff6569ff6468ff666aff666aff666aff666aff\
666aff666aff666aff6569ff6d71fed0d1e3e2e2e0e9e9e4bbbce46468ff666aff666aff666aff\
666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6165ff696dff777bff\
676bff666aff666aff666aff666aff666aff666aff666aff666aff666aff6266ffb1b2e9e5e5df\
eaeae4a1a3e96165ff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff\
666aff6468ff6367ff979affe0e1fff7f7ff7f83ff6367ff666aff666aff666aff666aff666aff\
666aff666aff666aff6165ff9598efe5e5dfeaeae48f91ed6266ff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff666aff6166ff7074ffc6c7fffffffffffffff2f2ff7276ff\
6468ff666aff666aff666aff666aff666aff666aff666aff666aff6266ff8487f2e3e3e0e9e8e4\
8789ee6266ff666aff666aff666aff666aff666aff666aff666aff666aff666aff6468ff8689ff\
e5e6ffffffffffffffffffffc9caff6165ff666aff666aff666aff666aff666aff666aff666aff\
666aff666aff6367ff7d80f3e1e1e0e9e9e4878aee6266ff666aff666aff666aff666aff666aff\
666aff666aff666aff666aff6266ffadb0ffa2a4ffd0d1ffffffffffffff9598ff6165ff666aff\
666aff666aff666aff666aff666aff666aff666aff666aff6367ff7e81f3e1e1e0eaeae49194ec\
6166ff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6266ff696dff\
ebebffffffffedeeff6e72ff6569ff666aff666aff666aff666aff666aff666aff666aff666aff\
666aff6266ff8789f2e3e3e0eaeae4a5a7e86165ff666aff666aff666aff666aff666aff666aff\
666aff666aff666aff666aff6065ff9598ffffffffffffffc0c2ff6064ff666aff666aff666aff\
666aff666aff666aff666aff666aff666aff666aff6165ff9a9ceee6e5dfe8e8e4c1c2e3666aff\
666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff6166ffccceffffffff\
fefeff8d90ff6165ff666aff666aff666aff666aff666aff666aff666aff666aff666aff666aff\
6266ffb7b8e8e5e5dfe6e6e5dadade7b7efc6367ff666aff666aff666aff666aff666aff666aff\
666aff666aff6468ff787bfff6f6ffffffffe7e7ff676bff6064ff666aff666aff666aff666aff\
666aff666aff666aff666aff666aff6468ff7175fdd6d6e2e2e2e0e5e5e5e3e3dcaaacf26065ff\
666aff666aff666aff666aff666aff666aff666aff666aff6064ffa7a9ffffffffffffffb6b7ff\
7e81ffaeb0ff666aff666aff666aff666aff666aff666aff666aff666aff666aff6065ff9c9ef5\
e5e5dfe0e0e0e5e5e5dfdfdddddde97478fc6368ff666aff666aff666aff666aff666aff666aff\
666aff6469ffd9daffffffffffffffe8e9ffebebffa0a3ff6468ff666aff666aff666aff666aff\
666aff666aff666aff6569ff6c70fed4d4eae3e3e0e0e0e0e5e5e5ddddddecebe6b7b8ef6266ff\
666aff666aff666aff666aff666aff666aff6367ff7d80fffafaffffffffffffffe2e3ff8689ff\
6064ff666aff666aff666aff666aff666aff666aff666aff666aff6065ffaaacf2ebebe5e0e0e0\
e0e0e0e5e5e5dddddde7e7e7e9e9e69a9cf56064ff666aff666aff666aff666aff666aff6266ff\
8d8ffffafaffe7e8ffaaacff6b6fff6266ff666aff666aff666aff666aff666aff666aff666aff\
666aff6065ff8d90f7e6e6e6e7e7e6e0e0e0e0e0e0e5e5e5dddddde7e7e7e7e7e6e4e4e69093f7\
6064ff666aff666aff666aff666aff666aff696dff7c7fff6d71ff6064ff6569ff666aff666aff\
666aff666aff666aff666aff666aff666aff6064ff8589f9dfdfe7e8e8e6e6e6e6e0e0e0e0e0e0\
e5e5e5dddddde7e7e7e6e6e6e8e8e6e4e4e69a9cf56266ff6468ff666aff666aff666aff666aff\
6367ff6569ff666aff666aff666aff666aff666aff666aff666aff666aff6468ff6165ff9092f7\
e0e0e7e9e9e5e6e6e6e6e6e6e0e0e0e0e0e0e5e5e5dddddde7e7e7e6e6e6e6e6e6e7e7e6e9e9e5\
b7b8ef7376fc6064ff6468ff666aff666aff666aff666aff666aff666aff666aff666aff666aff\
666aff6468ff6064ff6e72fdaeb0f1e7e7e6e8e8e6e6e6e6e6e6e6e6e6e6e0e0e0e0e0e0e5e5e5\
dddddde7e7e7e6e6e6e6e6e6e6e6e6e6e6e6ebebe5dbdce8a6a8f27679fc6367ff6165ff6266ff\
6468ff6468ff6468ff6468ff6267ff6165ff6266ff7376fc9fa1f4d7d7e9ebebe5e7e7e6e6e6e6\
e6e6e6e6e6e6e6e6e6e0e0e0e0e0e0e4e4e4d9d9d9e4e4e4e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3\
e4e4e2e8e8e2dcdce4bdbeea9c9ef08486f57679f87074f97074f97579f88285f6989bf1b9baeb\
d9dae4e8e8e2e5e5e2e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3e3dcdcdcdededef0f0f0dadada\
dddddddddddddddddddddddddddddddddddddddddddddddddededde1e1dce2e2dcdfdfddd9d9de\
d5d5ded5d5dfd9d9dedededde2e2dce2e1dcdededddddddddddddddddddddddddddddddddddddd\
dddddddddddddadadaecececffffffffffffffffffffffffffffffffffffffffffffffffffffff\
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\
""")
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
1278,
83,
65,
87,
13,
17566,
1330,
33705,
62,
7890,
198,
198,
1040,
806,
273,
62,
9600,
796,
33705,
62,
7890,
7,
10394,
28,
2624,
11,
6001,
2... | 1.837823 | 3,527 |
from solution import valid
from solution import valid2
with open('input.txt', 'r') as f:
passphrases = [p.split() for p in f]
assert valid(['aa bb cc dd ee'.split()]) == 1
assert valid(['aa bb cc dd aa'.split()]) == 0
assert valid(['aa bb cc dd aaa'.split()]) == 1
print(valid(passphrases))
assert valid2(['abcde fghij'.split()]) == 1
assert valid2(['abcde xyz ecdab'.split()]) == 0
assert valid2(['a ab abc abd abf abj'.split()]) == 1
assert valid2(['iiii oiii ooii oooi oooo'.split()]) == 1
assert valid2(['oiii ioii iioi iiio'.split()]) == 0
print(valid2(passphrases))
| [
6738,
4610,
1330,
4938,
198,
6738,
4610,
1330,
4938,
17,
198,
198,
4480,
1280,
10786,
15414,
13,
14116,
3256,
705,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
1208,
746,
81,
1386,
796,
685,
79,
13,
35312,
3419,
329,
279,
287,
277,
... | 2.559471 | 227 |
# Python Web Scraping
# Title : requests
# Date : 2020-08-10
# Creator : tunealog
import requests
res = requests.get("https://google.com")
res_err = requests.get("https://tunealog.tistory.com")
# Response Code : 200 - No Problem
print("Response Code : ", res.status_code)
# Response Code : 403 - Problem
print("Response Code : ", res_err.status_code)
if res_err.status_code == requests.codes.ok:
print("No Problem")
else:
print("Problem [Error Code ", res_err.status_code, "]")
# Start Web Scraping
res.raise_for_status()
print("Start Web Scraping")
# Error Log
# res_err.raise_for_status()
# print("Start Web Scraping")
# 14110
print(len(res.text))
with open("mygoogle.html", "w", encoding="utf-8") as f:
f.write(res.text)
| [
2,
11361,
5313,
1446,
2416,
278,
198,
198,
2,
11851,
1058,
7007,
198,
2,
7536,
1058,
12131,
12,
2919,
12,
940,
198,
2,
21038,
1058,
14009,
11794,
198,
198,
11748,
7007,
198,
411,
796,
7007,
13,
1136,
7203,
5450,
1378,
13297,
13,
785... | 2.811321 | 265 |
import pyautogui
import time
import pyperclip
# 打开审查元素位置 921.6
# 2022/01/15
urls = ["tomford-product.html?productId=400463&goodsId=524636&warehouseId=10","tomford-product.html?productId=413813&goodsId=537981&warehouseId=10","tomford-product.html?productId=438131&goodsId=562140&warehouseId=10","tomford-product.html?productId=424801&goodsId=548831&warehouseId=10","tomford-product.html?productId=416242&goodsId=540402&warehouseId=10","tomford-product.html?productId=419681&goodsId=543818&warehouseId=10","tomford-product.html?productId=413809&goodsId=537977&warehouseId=10","tomford-product.html?productId=400469&goodsId=524642&warehouseId=10","tomford-product.html?productId=404550&goodsId=528722&warehouseId=10",
"tomford-product.html?productId=426234&goodsId=550259&warehouseId=10","tomford-product.html?productId=425596&goodsId=549626&warehouseId=10","tomford-product.html?productId=413811&goodsId=537979&warehouseId=10","tomford-product.html?productId=414252&goodsId=538420&warehouseId=10","tomford-product.html?productId=414248&goodsId=538416&warehouseId=10","tomford-product.html?productId=412492&goodsId=536660&warehouseId=10","tomford-product.html?productId=407520&goodsId=531688&warehouseId=10","tomford-product.html?productId=400467&goodsId=524640&warehouseId=10","tomford-product.html?productId=407976&goodsId=532144&warehouseId=10",
"tomford-product.html?productId=406764&goodsId=530934&warehouseId=10","tomford-product.html?productId=406182&goodsId=530353&warehouseId=10","tomford-product.html?productId=404556&goodsId=528728&warehouseId=10","tomford-product.html?productId=400367&goodsId=524540&warehouseId=10","tomford-product.html?productId=400437&goodsId=524610&warehouseId=10"]
for u in urls:
print(u)
page={
"x":608,
"y":65,
"sleep":3,
"name":"url_paste",
"content":"http://www.cdfgsanya.com/%s"%(u),
"action_name":"访问链接",
}
pyautogui_action(page)
action_item_click_list = [
{
"x": 1207,
"y": 176,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "清空console",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content":
'''
result=[]
result.push(document.getElementsByClassName("detail-box-title")[0].innerText)
result.push(document.getElementsByClassName("product-name")[0].innerText)
result.push(document.getElementsByClassName("product-code-value")[0].innerText)
result.push(document.getElementsByClassName("price-now")[0].innerText)
cxs=document.getElementsByClassName("promotion-item")
cxs_info = []
for (i=0;i<cxs.length;i++){
cxs_info.push(cxs[i].innerText)
}
ths=document.getElementsByClassName("property-item-title")
tds=document.getElementsByClassName("property-item-value")
kv={}
for (i=0;i<ths.length;i++){
kv[ths[i].innerText]=tds[i].innerText
}
result_info = {
"detail-box-title":result[0],
"product-name":result[1],
"product-code-value":result[2],
"price-now":result[3],
"promotion-item":cxs_info,
"property-item":kv,
}
''',
"action_name": "get店铺信息",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content":
"""
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.style.zIndex=9999999999999999999
""",
"action_name": "写入文本框textarea",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": rf'dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"',
"action_name": "文本框展示",
},
{
"x": 1376,
"y": 997,
"sleep": 0.5,
"name": "select_all_and_copy_and_paste",
"content": 'document.body.append(dom)',
"action_name": "文本框展示",
},
{
"x": 1026,
"y": 149,
"sleep": 0.5,
"name": "select_all_and_copy",
"content": "",
"action_name": "copy"
},
{
"x": 457,
"y": 23,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "点击选项卡_pages",
},
{
"x": 445,
"y": 232,
"sleep": 0.5,
"name": "select_all_and_paste",
"content": "",
"action_name": "提交",
},
{
"x": 586,
"y": 244,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "submit",
},
{
"x": 137,
"y": 24,
"sleep": 0.5,
"name": "move_to_click",
"content": "",
"action_name": "切换pgy页面",
},
]
for action_item_click in action_item_click_list:
pyautogui_action(action_item_click)
'''
result=[]
result.push(document.getElementsByClassName("shop-name")[0].innerText.split("\n")[0])
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[0].getAttribute("class").split("mid-str")[1])
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[1].innerText)
result.push(document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[2].innerText)
result.push(document.getElementsByClassName("tel")[0].innerText)
result.push(document.getElementsByClassName("address")[0].innerText)
result_info = {
"shop-name":result[0],
"star":result[1]*0.1,
"comment":result[2],
"consume":result[3],
"tel":result[4],
"address":result[5]
}
dom=document.createElement("div")
dom.id="wlb_cover"
dom.style.position="fixed"
dom.style.top="0px"
dom.style.right="0px"
dom.style.zIndex=9999999999999999999
dom.innerHTML="<textarea id=\"wlb_cover_textarea\">"+JSON.stringify(result_info)+"</textarea>"
document.body.append(dom)
shop-name = document.getElementsByClassName("shop-name")[0].innerText.split("\n")[0]
star = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[0].getAttribute("class").split("mid-str")[1]
comment = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[1].innerText
consume = document.getElementsByClassName("brief-info")[0].getElementsByTagName("span")[2].innerText
tel = document.getElementsByClassName("tel")[0].innerText
address = document.getElementsByClassName("address")[0].innerText
'''
| [
11748,
12972,
2306,
519,
9019,
198,
11748,
640,
198,
11748,
12972,
525,
15036,
198,
198,
2,
10545,
231,
241,
28156,
222,
22522,
94,
162,
253,
98,
17739,
225,
163,
112,
254,
19526,
235,
163,
121,
106,
860,
2481,
13,
21,
198,
2,
33160... | 2.008175 | 3,425 |
__author__ = ["Francisco Clavero"]
__email__ = ["fcoclavero32@gmail.com"]
__status__ = "Prototype"
""" Ignite trainer for a Bimodal GAN architecture. """
from abc import ABC
from typing import Callable
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from vscvs.decorators import kwargs_parameter_dict
from vscvs.loss_functions import ContrastiveLoss
from vscvs.metrics import AverageDistancesMultimodalSiamesePairs
from vscvs.metrics import LossBimodalSiamesePairs
from vscvs.metrics import LossMultimodalGAN
from vscvs.models import InterModalDiscriminator
from vscvs.models import MultimodalEncoder
from vscvs.models import ResNextNormalized
from ..engines.gan import create_multimodal_gan_evaluator
from ..engines.gan import create_multimodal_gan_siamese_evaluator
from ..engines.gan import create_multimodal_gan_siamese_trainer
from ..engines.gan import create_multimodal_gan_trainer
from ..engines.gan import prepare_bimodal_batch_variables
from .gan import AbstractGANTrainer
class AbstractBiModalGANTrainer(AbstractGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture."""
def __init__(self, *args, mode_embedding_networks=None, loss_weight=None, **kwargs):
"""
:param args: AbstractGANTrainer arguments
:type: Tuple
:param mode_embedding_networks: the embedding networks for each mode. They will be used as generators for the
generative adversarial formulation.
:type: List[torch.nn.Module]
:param loss_weight: manual rescaling weight given to the loss of each batch element. If given, has to be a
Tensor of size `batch_size`.
:type: torch.Tensor
:param kwargs: AbstractGANTrainer keyword arguments
:type: Dict
"""
self.loss_weight = loss_weight
super().__init__(*args, generator_network=MultimodalEncoder(*mode_embedding_networks), **kwargs)
@property
@overrides
def loss(self):
""" """
return BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight)
@overrides
def _create_evaluator_engine(self):
""" """
loss = LossMultimodalGAN(self.loss)
return create_multimodal_gan_evaluator(
*self.model,
device=self.device,
metrics={"generator_loss": loss[0], "discriminator_loss": loss[1]},
prepare_batch_variables=prepare_bimodal_batch_variables
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_trainer(
*self.model,
*self.optimizer,
self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
class AbstractBiModalGANSiameseTrainer(AbstractBiModalGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture with
the addition of a contrastive term in the loss functions.
Args:
Returns:
"""
def __init__(self, *args, margin=0.2, **kwargs):
"""
:param args: AbstractBiModalGANTrainer arguments
:type: Tuple
:param margin: parameter for the contrastive loss, defining the acceptable threshold for considering the
embeddings of two examples as dissimilar. Dissimilar image pairs will be pushed apart unless their distance
is already greater than the margin. Similar sketch–image pairs will be pulled together in the feature space.
:type: float
:param kwargs: AbstractBiModalGANTrainer keyword arguments
:type: Dict
"""
self.margin = margin
super().__init__(*args, **kwargs)
@property
@overrides
def loss(self):
""" """
return (
BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight),
ContrastiveLoss(margin=self.margin, reduction=self.loss_reduction),
)
@overrides
def _create_evaluator_engine(self):
""" """
average_distances = AverageDistancesMultimodalSiamesePairs()
loss = LossBimodalSiamesePairs(self.loss)
return create_multimodal_gan_siamese_evaluator(
*self.model,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables,
metrics={
"Average Distance/positive": average_distances[0],
"Average Distance/negative": average_distances[1],
"Loss/generator": loss[0],
"Loss/discriminator": loss[1],
}
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_siamese_trainer(
*self.model,
*self.optimizer,
*self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
@kwargs_parameter_dict
def train_gan_bimodal(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANTrainer(optimizer_mixin, AbstractBiModalGANTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run()
@kwargs_parameter_dict
def train_gan_bimodal_siamese(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANSiameseTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANSiameseTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANSiameseTrainer(optimizer_mixin, AbstractBiModalGANSiameseTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANSiameseTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run()
| [
834,
9800,
834,
796,
14631,
42885,
4861,
1012,
8770,
78,
8973,
198,
834,
12888,
834,
796,
14631,
16072,
420,
5031,
332,
78,
2624,
31,
14816,
13,
785,
8973,
198,
834,
13376,
834,
796,
366,
19703,
8690,
1,
628,
198,
37811,
16583,
578,
... | 2.455242 | 2,871 |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"generate_translator": "00_data.ipynb",
"default_translator_path": "00_data.ipynb",
"strip_html": "00_data.ipynb",
"OriTraTranslation": "00_data.ipynb",
"create_ori_trans_dataframe": "00_data.ipynb",
"print_dataframe_table": "00_data.ipynb",
"plot_text_length": "00_data.ipynb",
"PrettyDataset": "00_data.ipynb",
"ForwardHook": "01_model.ipynb",
"get_linear_layer_activations_states": "01_model.ipynb"}
modules = ["data.py",
"model.py"]
doc_url = "https://pandya6988.github.io/diin_text/"
git_url = "https://github.com/pandya6988/diin_text/tree/master/"
| [
2,
47044,
7730,
1677,
1137,
11617,
11050,
41354,
39345,
0,
8410,
5626,
48483,
0,
198,
198,
834,
439,
834,
796,
14631,
9630,
1600,
366,
18170,
1600,
366,
23144,
62,
15390,
62,
28751,
1600,
366,
18300,
62,
6371,
8973,
198,
198,
9630,
79... | 2.084932 | 365 |