content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from collections import deque
import numpy as np | [
6738,
17268,
1330,
390,
4188,
198,
11748,
299,
32152,
355,
45941
] | 4.363636 | 11 |
from typing import Dict, Callable, Any, Optional
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.decorators import apply_defaults
from airflow_clickhouse_plugin.hooks.clickhouse_hook import ClickHouseHook
| [
6738,
19720,
1330,
360,
713,
11,
4889,
540,
11,
4377,
11,
32233,
198,
198,
6738,
45771,
13,
82,
641,
669,
13,
25410,
62,
82,
22854,
1330,
311,
13976,
47864,
198,
6738,
45771,
13,
26791,
13,
12501,
273,
2024,
1330,
4174,
62,
12286,
8... | 3.492308 | 65 |
from enum import Enum
from typing import Any, Dict, Optional
import numpy as np
from .basemodels import ProtoModel
# Encoders, to be deprecated
ndarray_encoder = {np.ndarray: lambda v: v.flatten().tolist()}
class ComputeError(ProtoModel):
"""The type of error message raised"""
error_type: str # Error enumeration not yet strict
error_message: str
extras: Optional[Dict[str, Any]] = None
qcschema_input_default = "qcschema_input"
qcschema_output_default = "qcschema_output"
qcschema_optimization_input_default = "qcschema_optimization_input"
qcschema_optimization_output_default = "qcschema_optimization_output"
qcschema_molecule_default = "qcschema_molecule"
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
12093,
368,
375,
1424,
1330,
45783,
17633,
198,
198,
2,
14711,
375,
364,
11,
284,
307,
... | 2.796748 | 246 |
import json
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from ..utils.mime_types import GMimeTypes
MSGStatus = namedtuple('MSGStatus', ['value', 'color'])
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
| [
11748,
33918,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
11485,
26791,
13,
76,
524,
62,
19199,
1330,
6951,
524,
31431,
198,
198,
5653,... | 2.967033 | 91 |
# 引入库文件,基于telethon
from telethon import events
# 从上级目录引入 jdbot,chat_id变量
from .. import jdbot, chat_id
# 格式基本固定,本例子表示从chat_id处接收到包含hello消息后,要做的事情
@jdbot.on(events.NewMessage(chats=chat_id, pattern='hello'))
# 定义自己的函数名称
| [
2,
10263,
120,
243,
17739,
98,
41753,
241,
23877,
229,
20015,
114,
171,
120,
234,
161,
253,
118,
12859,
236,
46813,
400,
261,
198,
6738,
5735,
400,
261,
1330,
2995,
198,
2,
220,
20015,
236,
41468,
163,
118,
100,
33566,
106,
37605,
2... | 1.169312 | 189 |
# coding: utf-8
from .client import APNSClient
from .response import Response
from .notification import Notification, Payload, PayloadAlert, PRIORITY_LOW, PRIORITY_HIGH
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
764,
16366,
1330,
3486,
8035,
11792,
198,
6738,
764,
26209,
1330,
18261,
198,
6738,
764,
1662,
2649,
1330,
42808,
11,
7119,
2220,
11,
7119,
2220,
36420,
11,
4810,
41254,
9050,
62,
43,
... | 3.333333 | 51 |
import ansiblemetrics.utils as utils
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumImportedPlaybooks(AnsibleMetric):
""" This class measures the number of imported playbooks in a playbook.
"""
def count(self):
"""Return the number of imported playbooks.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_imported_playbooks import NumImportedPlaybooks
playbook = '''
- name: Include a play after another play
import_playbook: otherplays.yml
- name: This fails because I'm inside a play already
import_playbook: stuff.yaml
'''
NumImportedPlaybooks(playbook).count()
>> 2
Returns
-------
int
number of imported playbooks
"""
script = self.playbook
keys = utils.all_keys(script)
return sum(1 for i in keys if i == 'import_playbook')
| [
11748,
9093,
856,
4164,
10466,
13,
26791,
355,
3384,
4487,
198,
6738,
9093,
856,
4164,
10466,
13,
504,
856,
62,
4164,
1173,
1330,
28038,
856,
9171,
1173,
628,
198,
4871,
31835,
3546,
9213,
11002,
12106,
7,
2025,
82,
856,
9171,
1173,
2... | 2.355353 | 439 |
"""Helper script for building and installing Biopython on Python 3.
Note that we can't just use distutils.command.build_py function build_py_2to3
in setup.py since (as far as I can see) that does not allow us to alter the
2to3 options. In particular, we need to turn off the long fixer for some of
our files.
This code is intended to be called from setup.py automatically under Python 3,
and is not intended for end users. The basic idea follows the approach taken
by NumPy with their setup.py file calling tools/py3tool.py to do the 2to3
conversion automatically.
This calls the lib2to3 library functions to convert the Biopython source code
from Python 2 to Python 3, tracking changes to files so that unchanged files
need not be reconverted making development much easier (i.e. if you edit one
source file, doing 'python setup.py install' will only reconvert the one file).
This is done by the last modified date stamps (which will be updated by git if
you switch branches).
NOTE - This is intended to be run under Python 3 (not under Python 2), but
care has been taken to make it run under Python 2 enough to give a clear error
message. In particular, this meant avoiding with statements etc.
"""
import sys
if sys.version_info[0] < 3:
sys.stderr.write("Please run this under Python 3\n")
sys.exit(1)
import shutil
import os
import lib2to3.main
from io import StringIO
if __name__ == "__main__":
python2_source = "."
python3_source = "build/py%i.%i" % sys.version_info[:2]
main(python2_source, python3_source)
| [
37811,
47429,
4226,
329,
2615,
290,
15975,
8436,
404,
7535,
319,
11361,
513,
13,
198,
198,
6425,
326,
356,
460,
470,
655,
779,
1233,
26791,
13,
21812,
13,
11249,
62,
9078,
2163,
1382,
62,
9078,
62,
17,
1462,
18,
198,
259,
9058,
13,
... | 3.44469 | 452 |
#!/usr/bin/env python3
import sys
ADIRS = [(x,y) for x in [-1,0,1] for y in [-1,0,1] if not x == y == 0]
if __name__ == '__main__':
main(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
198,
2885,
4663,
50,
796,
47527,
87,
11,
88,
8,
329,
2124,
287,
25915,
16,
11,
15,
11,
16,
60,
329,
331,
287,
25915,
16,
11,
15,
11,
16,
60,
611,
40... | 2.039474 | 76 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_flask_echelon
----------------------------------
Tests for `flask_echelon` module.
"""
import pytest
from flask import Flask, _request_ctx_stack
from flask_login import AnonymousUserMixin, LoginManager, UserMixin
from pymongo import MongoClient
from flask_echelon import AccessCheckFailed, EchelonManager, MemberTypes
from flask_echelon.helpers import has_access, require_echelon
# only use one MongoClient instance
DB = MongoClient().test_flask_echelon
class User(UserMixin):
"""Mocks Flask-Login User"""
@property
class AnonUser(AnonymousUserMixin):
"""Mocks Flask-Login Anon User"""
@pytest.fixture
def test_000_init():
"""Can be initialized as a Flask plugin or standalone"""
EchelonManager()
with pytest.raises(Exception):
EchelonManager(app=Flask(__name__))
EchelonManager(app=Flask(__name__), database=DB)
EchelonManager(database=DB)
def test_001_define_echelon():
"""Can define interactions successfully"""
manager = EchelonManager(database=DB)
echelon = "foo::bar::baz"
manager.define_echelon(echelon, name="I test things", help="It's a test, ok?")
def test_003_update_echelon(foobarbaz):
"""Can update a given interaction in place"""
manager, echelon = foobarbaz
manager.define_echelon(echelon, name="I just changed", help="Me too!")
assert 'just changed' in manager.get_echelon(echelon)['name']
assert len(manager.all_echelons.values()) == 1
if __name__ == "__main__":
pytest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
9288,
62,
2704,
2093,
62,
721,
2978,
261,
198,
3880,
438,
198,
198,
51,
3558,
329,
4600,
2704,
2093,... | 2.813953 | 559 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import proxy2
from openstack.workflow.v2 import execution as _execution
from openstack.workflow.v2 import workflow as _workflow
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.812155 | 181 |
#!/usr/bin/env python
import sys
if len(sys.argv) < 2:
sys.exit('[usage] python %s <fastqc_path1> [fastqc_path2] [fastqc_path3] ... ')
from bs4 import BeautifulSoup
import pandas as pd
import os
import glob
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
362,
25,
198,
220,
220,
220,
25064,
13,
37023,
10786,
58,
26060,
60,
21015,
4064,
82,
1279,
7217,
80,
66,
62,
69... | 2.361111 | 108 |
"""
Prelim script for looking at netcdf files and producing some trends
These estimates can also be used for P03 climate estimation
"""
#==============================================================================
__title__ = "Global Climate Trends"
__author__ = "Arden Burrell"
__version__ = "v1.0(13.02.2019)"
__email__ = "arden.burrell@gmail.com"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
#==============================================================================
# ============================= Primary functions =============================
#==============================================================================
def RollingWindow(
fname, var, method, window, period, gridres, region,
yr_start=1982, yr_end = 2015, force = False, plot=True):
"""Function to perform a rolling window smoothing on the precipitation and climate data
args
fname: String
string of the netcdf to be opened
var: string
string of the variable name within the netcdf
window: int
the number of time periods to be used
period: str
description of the accumulation period
gridres: str
description of the resolution of the grid
region: str
descript of the data region
yr_start
the first year to be included in trend analysis
yr_end
the last year to be included in trend analysis
force: bool
force the creation of new netcdf files
plot: bool
true makes plots
"""
# ========== Open the dataset ==========
ds = xr.open_dataset(fname)
print("Starting rolling window calculations for %s" % var)
# ========== build an output file name ==========
fout = (
'./results/netcdf/TerraClimate_%s_RollingMean_%s_%sto%d_%s%s.nc' % (
period, var, method, yr_end, region, gridres))
# ========== Test if a file alread exists ==========
if all([os.path.isfile(fout), not force]):
warn.warn("Loading existing file, force is needed to overwrite")
ds_trend = xr.open_dataset(fout)
kys = [n for n in ds_trend.data_vars]
else:
# ========== Create the global attributes ==========
global_attrs = GlobalAttributes(ds, var)
# ========== Create the rolling window means ==========
results = []
years = []
# ========== Pull out the data seasonality ==========
annual = ds[var]
# ========== Loop over each of the mooving windows ==========
for win in window:
rmean = annual.rolling(time=win).mean()
dst = rmean.sel(time=slice('%d-01-01' % yr_start, '%d-12-31' % yr_end))
# ========== Get the trend ==========
trends, kys = _fitvals(dst, method=method)
# ========== add a correction for multiple comparisons ==========
if "pvalue" in kys:
trends, kys = MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_bh")
results.append(trends)
years.append(yr_start-win)
# ========== convert data to netcdf format ==========
layers, encoding = dsmaker(ds, var, results, kys, years, method)
ds_trend = xr.Dataset(layers, attrs= global_attrs)
try:
print("Starting write of data")
ds_trend.to_netcdf(fout,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
print(".nc file created")
ipdb.set_trace()
except Exception as e:
print(e)
warn.warn(" \n something went wrong with the save, going interactive")
ipdb.set_trace()
#
if plot:
warn.warn("plotting has not been implemented in this function yet. Going interactive")
ipdb.set_trace()
#==============================================================================
# ========================= Netcdf Creation Functions =========================
#==============================================================================
def GlobalAttributes(ds, var):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
attr = OrderedDict()
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["title"] = "Trend in Climate (%s)" % (var)
attr["summary"] = "Annual and season trends in %s" % var
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__)
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["institution"] = "University of Leicester"
attr["date_created"] = str(pd.Timestamp.now())
# ++++++++++ Netcdf Summary infomation ++++++++++
attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def dsmaker(ds, var, results, keys, start_years, method):
"""
Build a summary of relevant paramters
args
ds: xarray ds
Dataset containing the infomation im intepereting
var: str
name of the variable
return
ds xarray dataset
"""
# sys.exit()
# date = [dt.datetime(ds['time.year'].max() , 12, 31)]
times = OrderedDict()
tm = [dt.datetime(yr , 12, 31) for yr in start_years]
times["time"] = pd.to_datetime(tm)
times["calendar"] = 'standard'
times["units"] = 'days since 1900-01-01 00:00'
times["CFTime"] = date2num(
tm, calendar=times["calendar"], units=times["units"])
dates = times["CFTime"]
try:
lat = ds.lat.values
lon = ds.lon.values
except AttributeError:
lat = ds.latitude.values
lon = ds.longitude.values
# dates = [dt.datetime(yr , 12, 31) for yr in start_years]
# ipdb.set_trace()
# ========== Start making the netcdf ==========
layers = OrderedDict()
encoding = OrderedDict()
# ========== loop over the keys ==========
try:
for pos in range(0, len(keys)):
# ipdb.set_trace()
if type(results[0]) == np.ndarray:
Val = results[pos][np.newaxis,:, :]
else:
# multiple variables
Val = np.stack([res[pos] for res in results])
ky = keys[pos]
# build xarray dataset
DA=xr.DataArray(Val,
dims = ['time', 'latitude', 'longitude'],
coords = {'time': dates,'latitude': lat, 'longitude': lon},
attrs = ({
'_FillValue':9.96921e+36,
'units' :"1",
'standard_name':ky,
'long_name':"%s %s" % (method, ky)
}),
)
DA.longitude.attrs['units'] = 'degrees_east'
DA.latitude.attrs['units'] = 'degrees_north'
DA.time.attrs["calendar"] = times["calendar"]
DA.time.attrs["units"] = times["units"]
layers[ky] = DA
encoding[ky] = ({'shuffle':True,
# 'chunksizes':[1, ensinfo.lats.shape[0], 100],
'zlib':True,
'complevel':5})
return layers, encoding
except Exception as e:
warn.warn("Code failed with: \n %s \n Going Interactive" % e)
ipdb.set_trace()
raise e
#===============================================================================
# ============================= Internal Functions =============================
#===============================================================================
def MultipleComparisons(trends, kys, aplha = 0.10, MCmethod="fdr_by"):
"""
Takes the results of an existing trend detection aproach and modifies them to
account for multiple comparisons.
args
trends: list
list of numpy arrays containing results of trend analysis
kys: list
list of what is in results
years:
years of accumulation
"""
if MCmethod == "fdr_by":
print("Adjusting for multiple comparisons using Benjamini/Yekutieli")
elif MCmethod == "fdr_bh":
print("Adjusting for multiple comparisons using Benjamini/Hochberg")
else:
warn.warn("unknown MultipleComparisons method, Going Interactive")
ipdb.set_trace()
# ========== Locate the p values and reshape them into a 1d array ==========
# ++++++++++ Find the pvalues ++++++++++
index = kys.index("pvalue")
pvalue = trends[index]
isnan = np.isnan(pvalue)
# ++++++++++ pull out the non nan pvalus ++++++++++
# pvalue1d = pvalue.flatten()
pvalue1d = pvalue[~isnan]
# isnan1d = isnan.flatten()
# =========== Perform the MC correction ===========
pvalue_adj = smsM.multipletests(pvalue1d, method=MCmethod, alpha=0.10)
# ++++++++++ reformat the data into array ++++++++++
MCR = ["Significant", "pvalue_adj"]
for nm in MCR:
# make an empty array
re = np.zeros(pvalue.shape)
re[:] = np.NAN
if nm == "Significant":
re[~isnan] = pvalue_adj[MCR.index(nm)].astype(int).astype(float)
else:
re[~isnan] = pvalue_adj[MCR.index(nm)]
# +++++ add the significant and adjusted pvalues to trends+++++
trends.append(re)
kys.append(nm)
return trends, kys
def cbvals(var, ky):
"""Function to store all the colorbar infomation i need """
cmap = None
vmin = None
vmax = None
if ky == "slope":
if var == "tmean":
vmax = 0.07
vmin = -0.07
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
elif var =="ppt":
vmin = -3.0
vmax = 3.0
cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
elif ky == "pvalue":
cmap = mpc.ListedColormap(palettable.matplotlib.Inferno_20.hex_colors)
vmin = 0.0
vmax = 1.0
elif ky == "rsquared":
cmap = mpc.ListedColormap(palettable.matplotlib.Viridis_20.hex_colors)
vmin = 0.0
vmax = 1.0
# cmap =
elif ky == "intercept":
cmap = mpc.ListedColormap(palettable.cmocean.sequential.Ice_20_r.mpl_colors)
if var == "tmean":
# vmax = 0.07
# vmin = -0.07
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Balance_20.mpl_colors)
# ipdb.set_trace()
pass
elif var =="ppt":
vmin = 0
vmax = 1000
# cmap = mpc.ListedColormap(palettable.cmocean.diverging.Curl_20_r.mpl_colors)
return cmap, vmin, vmax
# @jit
def _fitvals(dvt, method="polyfit"):
"""
Takes the ds[var] and performs some form of regression on it
"""
vals = dvt.values
try:
years = pd.to_datetime(dvt.time.values).year
t0 = pd.Timestamp.now()
print("testing with %s from %d to %d starting at: %s" % (
method, pd.to_datetime(dvt.time.values).year.min(),
pd.to_datetime(dvt.time.values).year.max(), str(t0)))
except AttributeError:
years = pd.to_datetime(dvt.year.values).year
t0 = pd.Timestamp.now()
print("testing with %s from %d to %d starting at: %s" % (
method, pd.to_datetime(dvt.year.values).year.min(),
pd.to_datetime(dvt.year.values).year.max(), str(t0)))
vals2 = vals.reshape(len(years), -1)
if method=="polyfit":
# Do a first-degree polyfit
vals2[np.isnan(vals2)] = 0
regressions = np.polyfit(years, vals2, 1)
regressions[regressions== 0] = np.NAN
trends = [regressions[0,:].reshape(vals.shape[1], vals.shape[2])]
kys = ["slope"]
elif method == "theilsen":
regressions = alongaxFAST(vals2, scipyTheilSen)
trds = regressions.reshape(4, vals.shape[1], vals.shape[2])
trends = []
for n in range(0, trds.shape[0]):
trends.append(trds[n, :, :])
kys = ["slope", "intercept", "rho", "pvalue"]
elif method == "scipyols":
# regressions = alongax(vals2, scipyols)
regressions = alongaxFAST(vals2, scipyols)
trds = regressions.reshape(4, vals.shape[1], vals.shape[2])
trends = []
for n in range(0, trds.shape[0]):
trends.append(trds[n, :, :])
kys = ["slope", "intercept", "rsquared", "pvalue"]
tdelta = pd.Timestamp.now() - t0
print("\n Time taken to get regression coefficients using %s: %s" % (method, str(tdelta)))
# ipdb.set_trace()
return trends, kys
# @jit
# @jit
def alongaxFAST(array, myfunc, t0=pd.Timestamp.now(), lineflick=10000):
""" Fastest wave i've yet found to loop over an entire netcdf file
array 2d numpy array
myfunc function i want to apply
lineflick frequency that i want to see the lines, increasing this number
increases speed
returns
res 2d array with the results
"""
# build an empyt array to hold the result
# res = np.zeros((array.shape[1], 4))
res = np.zeros((4, array.shape[1]))
res[:] = np.NAN
# locate and remove any nan rows
ana = ~bn.anynan(array, axis=0)
array2 = array[:, ana]
# build a holder
vals = np.zeros((4, array2.shape[1]))
for line in range(0, array2.shape[1]):
_lnflick(line, array2.shape[1], t0, lineflick=lineflick)
out = myfunc(array2[:, line])
# vals.append(out)
vals[:, line] = out
res[:, ana] = vals
return res
# @jit
def scipyTheilSen(array):
"""
Function for rapid TheilSen slop estimation with time.
the regression is done with an independent variable
rangeing from 0 to array.shape to make the intercept
the start which simplifies calculation
args:
array np : numpy array of annual max VI over time
return
result np : slope, intercept
"""
try:
# if bn.allnan(array):
# return np.array([np.NAN, np.NAN, np.NAN, np.NAN])
slope, intercept, _, _ = stats.mstats.theilslopes(
array, np.arange(array.shape[0]))
rho, pval = stats.spearmanr(
array, np.arange(array.shape[0]))
# change = (slope*array.shape[0])
return np.array([slope, intercept, rho, pval])
except Exception as e:
print(e)
warn.warn("unhandeled Error has occured")
ipdb.set_trace()
return np.array([np.NAN, np.NAN, np.NAN, np.NAN])
# @jit
def scipyols(array):
"""
Function for rapid OLS with time. the regression is done with
an independent variable rangeing from 0 to array.shape to make
the intercept the start which simplifies calculation
args:
array np : numpy array of annual max VI over time
return
result np : change(total change between start and end)
slopem intercept, rsquared, pvalue, std_error
"""
# +++++ Get the OLS +++++
try:
# if bn.allnan(array):
# return np.array([np.NAN, np.NAN, np.NAN, np.NAN])
slope, intercept, r_value, p_value, std_err = stats.linregress(np.arange(array.shape[0]), array)
# +++++ calculate the total change +++++
# change = (slope*array.shape[0])
# +++++ return the results +++++
return np.array([slope, intercept, r_value**2, p_value])
except Exception as e:
# print(e)
# warn.warn("unhandeled Error has occured")
# ipdb.set_trace()
return np.array([np.NAN, np.NAN, np.NAN, np.NAN])
#==============================================================================
if __name__ == '__main__':
main() | [
37811,
198,
47,
2411,
320,
4226,
329,
2045,
379,
2010,
66,
7568,
3696,
290,
9194,
617,
11257,
198,
198,
4711,
7746,
460,
635,
307,
973,
329,
350,
3070,
4258,
31850,
198,
198,
37811,
198,
2,
23926,
25609,
855,
198,
198,
834,
7839,
83... | 2.631148 | 6,100 |
from django.contrib import admin
from .models import ToiletLecture, Toilet
@admin.register(Toilet)
@admin.register(ToiletLecture)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
1675,
41550,
43,
478,
495,
11,
1675,
41550,
628,
198,
31,
28482,
13,
30238,
7,
2514,
41550,
8,
628,
198,
31,
28482,
13,
30238,
7,
2514,
41550,
43,
478... | 2.934783 | 46 |
say = 'всем привет!'
print(say)
#print(dir(say))
print(say.islower())
print(say.isupper())
say_2 = say.upper()
print(say_2)
# say_3 = say_2.capitalize()
say_3 = say.capitalize()
print(say_3)
if say.endswith('!')
print('Эмоционально сказано') | [
16706,
796,
705,
38857,
21727,
16843,
43108,
12466,
123,
21169,
18849,
38857,
16843,
20375,
13679,
198,
4798,
7,
16706,
8,
198,
2,
4798,
7,
15908,
7,
16706,
4008,
198,
4798,
7,
16706,
13,
3044,
789,
28955,
198,
4798,
7,
16706,
13,
271... | 1.864662 | 133 |
"""Module to provide generic utilities for other accelerometer modules."""
from collections import OrderedDict
import datetime
import json
import math
import os
import pandas as pd
import re
DAYS = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']
TIME_SERIES_COL = 'time'
def formatNum(num, decimalPlaces):
"""return str of number formatted to number of decimalPlaces
When writing out 10,000's of files, it is useful to format the output to n
decimal places as a space saving measure.
:param float num: Float number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: Number formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.formatNum(2.567, 2)
2.57
"""
fmt = '%.' + str(decimalPlaces) + 'f'
return float(fmt % num)
def meanSDstr(mean, std, numDecimalPlaces):
"""return str of mean and stdev numbers formatted to number of decimalPlaces
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(std, numDecimalPlaces))
outStr += ')'
return outStr
def meanCIstr(mean, std, n, numDecimalPlaces):
"""return str of mean and 95% confidence interval numbers formatted
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int n: Number of observations
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
stdErr = std / math.sqrt(n)
lowerCI = mean - 1.96 * stdErr
upperCI = mean + 1.96 * stdErr
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(lowerCI, numDecimalPlaces))
outStr += ' - '
outStr += str(formatNum(upperCI, numDecimalPlaces))
outStr += ')'
return outStr
def toScreen(msg):
"""Print msg str prepended with current time
:param str mgs: Message to be printed to screen
:return: Print msg str prepended with current time
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.toScreen("hello")
2018-11-28 10:53:18 hello
"""
timeFormat = '%Y-%m-%d %H:%M:%S'
print(f"\n{datetime.datetime.now().strftime(timeFormat)}\t{msg}")
def writeCmds(accDir, outDir, cmdsFile='processCmds.txt', accExt="cwa", cmdOptions="", filesCSV=None):
"""Generate a text file listing processing commands for files found under accDir/
:param str accDir: Directory with accelerometer files to process
:param str outDir: Output directory to be created containing the processing results
:param str cmdsFile: Output .txt file listing all processing commands
:param str accExt: Acc file type e.g. cwa, CWA, bin, BIN, gt3x...
:param str cmdOptions: String of processing options e.g. "--epochPeriod 10"
Type 'python3 accProccess.py -h' for full list of options
:return: New file written to <cmdsFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeProcessingCommands("myAccDir/", "myResults/", "myProcessCmds.txt")
<cmd options written to "myProcessCmds.txt">
"""
# Use filesCSV if provided, else retrieve all accel files under accDir/
if filesCSV in os.listdir(accDir):
filesCSV = pd.read_csv(os.path.join(accDir, filesCSV), index_col="fileName")
filesCSV.index = accDir.rstrip("/") + "/" + filesCSV.index.astype('str')
filePaths = filesCSV.index.to_numpy()
else:
filesCSV = None
# List all accelerometer files under accDir/
filePaths = []
accExt = accExt.lower()
for root, dirs, files in os.walk(accDir):
for file in files:
if file.lower().endswith((accExt,
accExt + ".gz",
accExt + ".zip",
accExt + ".bz2",
accExt + ".xz")):
filePaths.append(os.path.join(root, file))
with open(cmdsFile, 'w') as f:
for filePath in filePaths:
# Use the file name as the output folder name for the process,
# keeping the same directory structure of accDir/
# Example: If filePath is {accDir}/group0/subject123.cwa then
# outputFolder will be {outDir}/group0/subject123/
outputFolder = filePath.replace(accDir.rstrip("/"), outDir.rstrip("/")).split(".")[0]
cmd = f"accProcess {filePath} --outputFolder {outputFolder} {cmdOptions}"
if filesCSV is not None:
# Grab additional options provided in filesCSV (e.g. calibration params)
cmdOptionsCSV = ' '.join(['--{} {}'.format(col, filesCSV.loc[filePath, col])
for col in filesCSV.columns])
cmd += " " + cmdOptionsCSV
f.write(cmd)
f.write('\n')
print('Processing list written to ', cmdsFile)
def collateSummary(resultsDir, outputCsvFile="all-summary.csv"):
"""Read all *-summary.json files under <resultsDir> and merge into one CSV file
Each json file represents summary data for one participant.
Therefore output CSV file contains summary for all participants.
:param str resultsDir: Directory containing JSON files
:param str outputCsvFile: Output CSV filename
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.collateSummary("data/", "data/all-summary.csv")
<summary CSV of all participants/files written to "data/all-summary.csv">
"""
# Load all *-summary.json files under resultsDir/
jdicts = []
for root, dirs, files in os.walk(resultsDir):
for file in files:
if file.lower().endswith("-summary.json"):
with open(os.path.join(root, file), 'r') as f:
jdicts.append(json.load(f, object_pairs_hook=OrderedDict))
summary = pd.DataFrame.from_dict(jdicts) # merge to a dataframe
refColumnOrder = next((item for item in jdicts if item['quality-goodWearTime'] == 1), None)
summary = summary[list(refColumnOrder.keys())] # maintain intended column ordering
summary['eid'] = summary['file-name'].str.split('/').str[-1].str.split('.').str[0] # infer ID from filename
summary.to_csv(outputCsvFile, index=False)
print('Summary of', str(len(summary)), 'participants written to:', outputCsvFile)
def identifyUnprocessedFiles(filesCsv, summaryCsv, outputFilesCsv):
"""identify files that have not been processed
Look through all processed accelerometer files, and find participants who do
not have records in the summary csv file. This indicates there was a problem
in processing their data. Therefore, output will be a new .csv file to
support reprocessing of these files
:param str filesCsv: CSV listing acc files in study directory
:param str summaryCsv: Summary CSV of processed dataset
:param str outputFilesCsv: Output csv listing files to be reprocessed
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.identifyUnprocessedFiles("study/files.csv", study/summary-all-files.csv",
"study/files-reprocess.csv")
<Output csv listing files to be reprocessed written to "study/files-reprocess.csv">
"""
fileList = pd.read_csv(filesCsv)
summary = pd.read_csv(summaryCsv)
output = fileList[~fileList['fileName'].isin(list(summary['file-name']))]
output = output.rename(columns={'Unnamed: 1': ''})
output.to_csv(outputFilesCsv, index=False)
print('Reprocessing for ', len(output), 'participants written to:',
outputFilesCsv)
def updateCalibrationCoefs(inputCsvFile, outputCsvFile):
"""read summary .csv file and update coefs for those with poor calibration
Look through all processed accelerometer files, and find participants that
did not have good calibration data. Then assigns the calibration coefs from
previous good use of a given device. Output will be a new .csv file to
support reprocessing of uncalibrated files with new pre-specified calibration coefs.
:param str inputCsvFile: Summary CSV of processed dataset
:param str outputCsvFile: Output CSV of files to be reprocessed with new
calibration info
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.updateCalibrationCoefs("data/summary-all-files.csv", "study/files-recalibration.csv")
<CSV of files to be reprocessed written to "study/files-recalibration.csv">
"""
d = pd.read_csv(inputCsvFile)
# select participants with good spread of stationary values for calibration
goodCal = d.loc[(d['quality-calibratedOnOwnData'] == 1) & (d['quality-goodCalibration'] == 1)]
# now only select participants whose data was NOT calibrated on a good spread of stationary values
badCal = d.loc[(d['quality-calibratedOnOwnData'] == 1) & (d['quality-goodCalibration'] == 0)]
# sort files by start time, which makes selection of most recent value easier
goodCal = goodCal.sort_values(['file-startTime'])
badCal = badCal.sort_values(['file-startTime'])
calCols = ['calibration-xOffset(g)', 'calibration-yOffset(g)', 'calibration-zOffset(g)',
'calibration-xSlope(g)', 'calibration-ySlope(g)', 'calibration-zSlope(g)',
'calibration-xTemp(C)', 'calibration-yTemp(C)', 'calibration-zTemp(C)',
'calibration-meanDeviceTemp(C)']
# print output CSV file with suggested calibration parameters
noOtherUses = 0
nextUses = 0
previousUses = 0
f = open(outputCsvFile, 'w')
f.write('fileName,calOffset,calSlope,calTemp,meanTemp\n')
for ix, row in badCal.iterrows():
# first get current 'bad' file
participant, device, startTime = row[['file-name', 'file-deviceID', 'file-startTime']]
device = int(device)
# get calibration values from most recent previous use of this device
# (when it had a 'good' calibration)
prevUse = goodCal[calCols][(goodCal['file-deviceID'] == device) &
(goodCal['file-startTime'] < startTime)].tail(1)
try:
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = prevUse.iloc[0]
previousUses += 1
except Exception:
nextUse = goodCal[calCols][(goodCal['file-deviceID'] == device) &
(goodCal['file-startTime'] > startTime)].head(1)
if len(nextUse) < 1:
print('no other uses for this device at all: ', str(device),
str(participant))
noOtherUses += 1
continue
nextUses += 1
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = nextUse.iloc[0]
# now construct output
out = participant + ','
out += str(ofX) + ' ' + str(ofY) + ' ' + str(ofZ) + ','
out += str(slpX) + ' ' + str(slpY) + ' ' + str(slpZ) + ','
out += str(tmpX) + ' ' + str(tmpY) + ' ' + str(tmpZ) + ','
out += str(calTempAvg)
f.write(out + '\n')
f.close()
print('previousUses', previousUses)
print('nextUses', nextUses)
print('noOtherUses', noOtherUses)
print('Reprocessing for ', str(previousUses + nextUses),
'participants written to:', outputCsvFile)
def writeFilesWithCalibrationCoefs(inputCsvFile, outputCsvFile):
"""read summary .csv file and write files.csv with calibration coefs
Look through all processed accelerometer files, and write a new .csv file to
support reprocessing of files with pre-specified calibration coefs.
:param str inputCsvFile: Summary CSV of processed dataset
:param str outputCsvFile: Output CSV of files to process with calibration info
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeFilesWithCalibrationCoefs("data/summary-all-files.csv",
>>> "study/files-calibrated.csv")
<CSV of files to be reprocessed written to "study/files-calibrated.csv">
"""
d = pd.read_csv(inputCsvFile)
calCols = ['calibration-xOffset(g)', 'calibration-yOffset(g)', 'calibration-zOffset(g)',
'calibration-xSlope(g)', 'calibration-ySlope(g)', 'calibration-zSlope(g)',
'calibration-xTemp(C)', 'calibration-yTemp(C)', 'calibration-zTemp(C)',
'calibration-meanDeviceTemp(C)']
# print output CSV file with suggested calibration parameters
f = open(outputCsvFile, 'w')
f.write('fileName,calOffset,calSlope,calTemp,meanTemp\n')
for ix, row in d.iterrows():
# first get current file information
participant = str(row['file-name'])
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = row[calCols]
# now construct output
out = participant + ','
out += str(ofX) + ' ' + str(ofY) + ' ' + str(ofZ) + ','
out += str(slpX) + ' ' + str(slpY) + ' ' + str(slpZ) + ','
out += str(tmpX) + ' ' + str(tmpY) + ' ' + str(tmpZ) + ','
out += str(calTempAvg)
f.write(out + '\n')
f.close()
print('Files with calibration coefficients for ', str(len(d)),
'participants written to:', outputCsvFile)
def date_parser(t):
'''
Parse date a date string of the form e.g.
2020-06-14 19:01:15.123+0100 [Europe/London]
'''
tz = re.search(r'(?<=\[).+?(?=\])', t)
if tz is not None:
tz = tz.group()
t = re.sub(r'\[(.*?)\]', '', t)
return pd.to_datetime(t, utc=True).tz_convert(tz)
def date_strftime(t):
'''
Convert to time format of the form e.g.
2020-06-14 19:01:15.123+0100 [Europe/London]
'''
tz = t.tz
return t.strftime(f'%Y-%m-%d %H:%M:%S.%f%z [{tz}]')
def writeTimeSeries(e, labels, tsFile):
""" Write activity timeseries file
:param pandas.DataFrame e: Pandas dataframe of epoch data. Must contain
activity classification columns with missing rows imputed.
:param list(str) labels: Activity state labels
:param dict tsFile: output CSV filename
:return: None
:rtype: void
"""
cols = ['acc'] + labels
if 'MET' in e.columns:
cols.append('MET')
if 'imputed' in e.columns:
cols.append('imputed')
e = e[cols]
# make output time format contain timezone
# e.g. 2020-06-14 19:01:15.123000+0100 [Europe/London]
e.index = e.index.to_series().apply(date_strftime)
e.to_csv(tsFile, compression='gzip')
| [
37811,
26796,
284,
2148,
14276,
20081,
329,
584,
8320,
15635,
13103,
526,
15931,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
19798,
292,
3... | 2.488527 | 6,145 |
import datetime
import arrow
from django.conf import settings
from django.utils import timezone
#: Django datetime formatting string for ``YYYY-MM-DD hh:mm``.
ISODATETIME_DJANGOFORMAT = 'Y-m-d H:i'
ARROW_ISOFORMAT_NOSECONDS = 'YYYY-MM-DD HH:mm'
ARROW_ISOFORMAT_WITHSECONDS = 'YYYY-MM-DD HH:mm:ss'
def get_current_datetime():
"""
Get the current datetime as a ``datetime.datetime`` object.
We use this because it is easier to mock in unit tests than a built-in
or third party implementation.
"""
return timezone.now()
def default_timezone_datetime(*args, **kwargs):
"""
Create a timezone-aware ``datetime.datetime`` object.
The parameters are the same as for ``datetime.datetime``.
"""
datetimeobject = datetime.datetime(*args, **kwargs)
return make_timezone_aware_in_default_timezone(datetimeobject)
def isoformat_noseconds(datetimeobject):
"""
Format the given ``datetime.datetime`` object as ``YYYY-MM-DD hh:mm``.
"""
return arrow.get(datetimeobject).format(ARROW_ISOFORMAT_NOSECONDS)
def isoformat_withseconds(datetimeobject):
"""
Format the given ``datetime.datetime`` object as ``YYYY-MM-DD hh:mm``.
"""
return arrow.get(datetimeobject).format(ARROW_ISOFORMAT_WITHSECONDS)
def datetime_with_same_day_of_week_and_time(weekdayandtimesource_datetime, target_datetime):
"""
Returns a new datetime object with the same time and day of week as
the given ``target_datetime``, with the day of week moved forward
to match the ``weekdayandtimesource_datetime``, and the time matching the
``weekdayandtimesource_datetime``.
This means that if you send in a ``weekdayandtimesource_datetime`` with tuesday
as the weekday, the return value will be a datetime object with the day set
to the next tuesday unless the current day is monday or tuesday.
"""
weekdayandtimesource_datetime = timezone.localtime(weekdayandtimesource_datetime)
target_datetime = timezone.localtime(target_datetime)
weekdayandtimesource_weekday = weekdayandtimesource_datetime.isoweekday()
target_weekday = target_datetime.isoweekday()
if weekdayandtimesource_weekday > target_weekday:
added_days = weekdayandtimesource_weekday - target_weekday
else:
added_days = 7 - target_weekday + weekdayandtimesource_weekday
new_datetimeobject = target_datetime + datetime.timedelta(days=added_days)
new_datetimeobject = new_datetimeobject.replace(hour=weekdayandtimesource_datetime.hour,
minute=weekdayandtimesource_datetime.minute,
second=weekdayandtimesource_datetime.second,
microsecond=weekdayandtimesource_datetime.microsecond)
return new_datetimeobject
URL_DATETIME_FORMAT = 'X'
def datetime_to_url_string(datetime_obj):
"""
Converts datetime object to URL-friendly string.
Args:
datetime_obj (``django.utils.timezone``): ``datetime`` obj to convert.
Returns:
(str): Datetime as string specified by :attr:`.URL_DATETIME_FORMAT`.
"""
return arrow.get(datetime_obj).format(URL_DATETIME_FORMAT)
def datetime_url_string_to_datetime(datetime_string):
"""
Convert URL-friendly string to ``django.utils.timezone`` datetime object.
Args:
datetime_string (str): String to convert.
Returns:
(``django.utils.timezone``): Converted datetime object from string.
"""
return arrow.get(datetime_string, URL_DATETIME_FORMAT).datetime
| [
11748,
4818,
8079,
198,
198,
11748,
15452,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
2,
25,
37770,
4818,
8079,
33313,
4731,
329,
7559,
26314,
26314,
12,
12038,
1... | 2.592672 | 1,392 |
# Copyright 2019, Offchain Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eth_abi
import eth_utils
from ..std import sized_byterange, stack
REVERT_CODE = 0
INVALID_CODE = 1
RETURN_CODE = 2
STOP_CODE = 3
INVALID_SEQUENCE_CODE = 4
EVM_OUTPUT_TYPES = {
RETURN_CODE: EVMCall,
REVERT_CODE: EVMRevert,
INVALID_CODE: EVMInvalid,
INVALID_SEQUENCE_CODE: EVMInvalidSequence,
STOP_CODE: EVMStop,
}
| [
2,
15069,
13130,
11,
3242,
7983,
23500,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
1... | 2.916149 | 322 |
# -*- coding: utf-8 -*-
import os
from copy import deepcopy
from uuid import uuid4
from openprocurement.api.utils import get_now
from openprocurement.tender.cfaua.tests.base import (
BaseTenderWebTest, test_tender_data, test_lots
)
from tests.base.test import DumpsWebTestApp, MockWebTestMixin
from tests.base.constants import DOCS_URL
TARGET_DIR = 'docs/source/agreementcfaua/tutorial/'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
6738,
1280,
36942,
495,
434,
13,
15042,
13,
26791,
1330,
651,
62... | 2.769231 | 143 |
# Copyright 2012 VPAC, http://www.vpac.org
# Copyright 2013-2016 Marcus Furlong <furlongm@gmail.com>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
from django.shortcuts import get_object_or_404, render
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from rest_framework import viewsets, permissions
from util.filterspecs import Filter, FilterBar
from packages.models import PackageName, Package, PackageUpdate, \
Erratum, ErratumReference
from arch.models import PackageArchitecture
from packages.serializers import PackageNameSerializer, \
PackageSerializer, PackageUpdateSerializer, ErratumSerializer, \
ErratumReferenceSerializer
@login_required
@login_required
class PackageNameViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows package names to be viewed or edited.
"""
queryset = PackageName.objects.all()
serializer_class = PackageNameSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class PackageViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows packages to be viewed or edited.
"""
queryset = Package.objects.all()
serializer_class = PackageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class PackageUpdateViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows packages updates to be viewed or edited.
"""
queryset = PackageUpdate.objects.all()
serializer_class = PackageUpdateSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ErratumViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows errata to be viewed or edited.
"""
queryset = Erratum.objects.all()
serializer_class = ErratumSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ErratumReferenceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows erratum references to be viewed or edited.
"""
queryset = ErratumReference.objects.all()
serializer_class = ErratumReferenceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
| [
2,
15069,
2321,
23342,
2246,
11,
2638,
1378,
2503,
13,
85,
33587,
13,
2398,
198,
2,
15069,
2211,
12,
5304,
17068,
376,
6371,
506,
1279,
69,
6371,
506,
76,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
17106,
... | 3.371257 | 835 |
import math
# -*- coding:utf-8 -*-
if __name__ == '__main__':
length = float(input("length: "))
print("squares:", length ** 2)
print("cubes:", length ** 3)
print("circles:", math.pi * length ** 2)
print("squares:", 4.0 / 3.0 * length ** 3 * math.pi)
| [
11748,
10688,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4129,
796,
12178,
7,
15414,
7203,
13664,
25,
366,
4008,
198,
220,
220,... | 2.365217 | 115 |
import pytest
import damast
import flask
import json
from database.testdata import place_table
from conftest import get_headers
_places = [ (p.id, True) for p in place_table ]
_places += [ (i, False) for i in (0, 224, 6214, 36) ]
@pytest.fixture(params=_places)
@pytest.mark.parametrize('ids', [False, True])
def test_get_place_evidence(client_ro, minimal_testuser, place, ids):
'''test getting a list of evidence for a place'''
user,password = minimal_testuser
place_id, exists = place
with flask.current_app.pg.get_cursor(readonly=True) as c:
headers = get_headers(client_ro, user.name, password)
rt = F'/rest/place/{place_id}/evidence'
if ids:
rt += '-ids'
rv = client_ro.get(rt, headers=headers)
if 'user' in user.roles and 'readdb' in user.roles:
if exists:
evidence_data = json.loads(rv.data)
assert rv.status_code == 200
count = c.one('''
SELECT count(*)
FROM evidence E
JOIN place_instance PI
ON E.place_instance_id = PI.id
JOIN place P
ON PI.place_id = P.id
WHERE P.id = %s
;''', (place_id,))
if ids:
assert type(evidence_data) == list
assert len(evidence_data) == count
else:
assert type(evidence_data) == dict
assert 'evidence' in evidence_data
assert len(evidence_data['evidence']) == count
else:
assert rv.status_code == 404
else:
assert rv.status_code == 403
_nouser_routes = [
'/rest/place/{}/evidence',
'/rest/place/{}/evidence-ids',
]
_nouser_methods = [ 'GET' ]
@pytest.mark.parametrize('route', _nouser_routes)
@pytest.mark.parametrize('method', _nouser_methods)
| [
11748,
12972,
9288,
198,
11748,
1801,
459,
198,
11748,
42903,
198,
11748,
33918,
198,
6738,
6831,
13,
9288,
7890,
1330,
1295,
62,
11487,
198,
198,
6738,
369,
701,
395,
1330,
651,
62,
50145,
198,
198,
62,
23625,
796,
685,
357,
79,
13,
... | 1.951076 | 1,022 |
"""NWCC info model."""
from sqlalchemy import (
Column,
Integer,
Text,
Date
)
from .meta import Base
class MyModel(Base):
"""."""
__tablename__ = 'models'
id = Column(Integer, primary_key=True)
page = Column(Text)
category = Column(Text)
subcategory = Column(Text)
title = Column(Text)
img = Column(Text)
imgsrc = Column(Text)
markdown = Column(Text)
extra = Column(Text)
date = Column(Date)
# Index('my_index', MyModel.name, unique=True, mysql_length=255)
| [
37811,
27605,
4093,
7508,
2746,
526,
15931,
198,
198,
6738,
44161,
282,
26599,
1330,
357,
198,
220,
220,
220,
29201,
11,
198,
220,
220,
220,
34142,
11,
198,
220,
220,
220,
8255,
11,
198,
220,
220,
220,
7536,
198,
8,
198,
6738,
764,
... | 2.533654 | 208 |
#import you graphing library
import matplotlib.pyplot as plt
#open you log file (saved as .txt)
log = open("canlog.txt", "r")
#read the contents of you log file
log = log.read()
#removes everything before the first T in tempurate (removes T aswell)
log = log.split("T", 2)
log[1].replace("----------------------------------------------", "")
log = log[2].rsplit("-", 2)[0]
log = log.rsplit("m", 1)[0]
log = log.split(",")
#index = time in seconds
index = 0
for i in log:
#remove all unessary characters
i = i.replace("emperature = ", "")
i = i.replace("T", "")
i = i.replace("C", "")
i = i.replace("m", "")
i = i.replace("\n", "")
i = i.replace("Pressure = ", "")
i = i.replace("Altitude = ", "")
i = i.replace(" ", "")
#change all numbers to floats
log[index] = float(i)
#increase time
index += 1
#declare all different values lists
temps = []
alti = []
pressure = []
index2 = 0
#split all numbers into their respective lists
for i in range(0, len(log), 3):
temps.append(log[i])
pressure.append(log[i+1])
alti.append(log[i+2])
#very important to add all of your labels
plt.title("Cansat Pressure")
plt.xlabel("time(sec)")
plt.ylabel("pressure(millibars)")
#plot your desired values
plt.plot(pressure)
plt.show()
| [
2,
11748,
345,
23360,
722,
5888,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
2,
9654,
345,
2604,
2393,
357,
82,
9586,
355,
764,
14116,
8,
201,
198,
6404,
796,
1280,
7203,
5171,
6404,
13,
14116,
1600,... | 2.476636 | 535 |
"""Init."""
from . import types, validation
| [
37811,
31768,
526,
15931,
198,
198,
6738,
764,
1330,
3858,
11,
21201,
198
] | 3.461538 | 13 |
# 这次不折腾啥面向对象了
# 简单好用直接上函数式
import os
from collections import Counter
import json
# 获取语言文件,处理得到一个 dict
if __name__ == '__main__':
key_total = [] # 存储所有语言文件 key
en_dict_total = {} # 存储所有的英文语言文件键值对
zh_dict_total = {} # 存储所有的中文语言文件键值对
key_duplicate = [] # 存储重复的语言文件 key
dict_total = {} # 最后的输出列表
for modid in file_finder('../project/assets'):
en_dict = lang_to_dict('../project/assets/{}/lang/en_us.lang'.format(modid))
zh_dict = lang_to_dict('../project/assets/{}/lang/zh_cn.lang'.format(modid))
en_dict_total[modid] = en_dict
zh_dict_total[modid] = zh_dict
key_total.extend(list(en_dict.keys()))
_dict = Counter(key_total)
for _key in _dict:
if _dict[_key] > 1:
key_duplicate.append(_key)
for i in key_duplicate:
dict_one = {}
list_en = []
list_zh = []
for j in en_dict_total.keys():
if i in en_dict_total[j].keys() and i in zh_dict_total[j].keys():
dict_one[j] = {en_dict_total[j][i]: zh_dict_total[j][i]}
list_en.append(en_dict_total[j][i])
list_zh.append(zh_dict_total[j][i])
if len(dict_one) > 1:
if len(set(list_en)) > 1 or len(set(list_zh)) > 1:
dict_total[i] = dict_one
print(json.dumps(dict_total, ensure_ascii=False))
print(len(dict_total))
| [
2,
5525,
123,
247,
162,
105,
94,
38834,
162,
232,
246,
164,
227,
122,
161,
243,
98,
165,
251,
95,
28938,
239,
43380,
117,
164,
109,
94,
12859,
228,
198,
2,
13328,
106,
222,
39355,
243,
25001,
121,
18796,
101,
33566,
112,
162,
236,... | 1.651712 | 847 |
from itertools import islice
import tqdm
from uvcgan.config import Args
from uvcgan.data import get_data
from uvcgan.torch.funcs import get_torch_device_smart, seed_everything
from uvcgan.cgan import construct_model
from uvcgan.utils.log import setup_logging
from .metrics import LossMetrics
from .callbacks import TrainingHistory
from .transfer import transfer
| [
6738,
340,
861,
10141,
1330,
318,
75,
501,
198,
11748,
256,
80,
36020,
198,
198,
6738,
334,
28435,
1030,
13,
11250,
220,
220,
220,
220,
220,
1330,
943,
14542,
198,
6738,
334,
28435,
1030,
13,
7890,
220,
220,
220,
220,
220,
220,
220,... | 2.902985 | 134 |
#!/usr/bin/env python
errno = 50159747054
name = 'Bob'
print('Hello, %s there is a 0x%x error' % (name, errno))
str_ = 'Hello, %(name)s there is a 0x%(errno)x error' % {'name': name, 'errno': errno}
print('Dictionary in old style')
print(str_)
print('New style')
str_ = 'Hey {name}, there is a 0x{errno:x} error'.format(name=name, errno=errno)
print(str_)
print('Literal String Interpolation')
str_ = f'Hello, {name}!'
print(str_)
a, b = 5, 10
str_ = f'Five plus ten is {a+b}, not {2*(a+b)}'
print(str_)
print(greet('Bob', 'going'))
str_ = f'Hey {name}, there is a {errno:#x} error!'
print(str_)
print('Template')
from string import Template
t = Template('Hey, $name!')
t.substitute(name=name)
print(t.substitute(name=name))
t = Template('Hey, $name! There is a $errno error!')
print(t.substitute(name=name, errno=hex(errno)))
SECRET = 'this-is-a-secret'
err = Error()
user_input = '{error}.__init__()'
user_input = '{error.__init__.__globals__[SECRET]}'
# print(err.__init__.__globals__)
print(user_input.format(error=err))
user_input = Template('${error.__init__.__globals__[SECRET]}')
print(user_input.substitute(error=err))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
8056,
3919,
796,
24555,
3270,
4524,
2154,
4051,
198,
3672,
796,
705,
18861,
6,
198,
198,
4798,
10786,
15496,
11,
4064,
82,
612,
318,
257,
657,
87,
4,
87,
4049,
6,
4064,
357,
3... | 2.401261 | 476 |
#!/usr/bin/env python3
from le_utils.constants.languages import getlang
from html2text import html2text
from le_utils.constants import licenses
from ricecooker.chefs import SushiChef
from ricecooker.classes.nodes import (ChannelNode, ExerciseNode, VideoNode, TopicNode)
from ricecooker.classes.questions import PerseusQuestion
from ricecooker.classes.files import VideoFile, YouTubeSubtitleFile
import subprocess
import re
import os
import pickle
import requests
import copy
FILE_URL_REGEX = re.compile('[\\\]*/content[\\\]*/assessment[\\\]*/khan[\\\]*/(?P<build_path>\w+)[\\\]*/(?P<filename>\w+)\.?(?P<ext>\w+)?', flags=re.IGNORECASE)
REPLACE_STRING = "/content/assessment/khan"
cwd = os.getcwd()
IMAGE_DL_LOCATION = 'file://' + cwd + '/build'
# recursive function to traverse tree and return parent node
# utility function to remove topic nodes with no content under them
if __name__ == "__main__":
"""
This code will run when the sushi chef is called from the command line.
"""
chef = KASushiChef()
chef.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
443,
62,
26791,
13,
9979,
1187,
13,
75,
33213,
1330,
651,
17204,
198,
6738,
27711,
17,
5239,
1330,
27711,
17,
5239,
198,
6738,
443,
62,
26791,
13,
9979,
1187,
1330,
16625,
... | 2.977143 | 350 |
import tensorflow as tf
import numpy as np
import os
import sys
sys.path.append('./utils')
sys.path.append('./models')
import dataset as dataset
import model_shape as model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', './train_shape',
"""Directory where to write summaries and checkpoint.""")
tf.app.flags.DEFINE_string('base_dir', './data/ShapeNetCore_im2avatar',
"""The path containing all the samples.""")
tf.app.flags.DEFINE_string('cat_id', '02958343',
"""The category id for each category: 02958343, 03001627, 03467517, 04379243""")
tf.app.flags.DEFINE_string('data_list_path', './data_list',
"""The path containing data lists.""")
tf.app.flags.DEFINE_integer('train_epochs', 501, """Training epochs.""")
tf.app.flags.DEFINE_integer('batch_size', 30, """Batch size.""")
tf.app.flags.DEFINE_integer('gpu', 0, """""")
tf.app.flags.DEFINE_float('learning_rate', 0.0003, """""")
tf.app.flags.DEFINE_float('wd', 0.00001, """""")
tf.app.flags.DEFINE_integer('epochs_to_save',20, """""")
tf.app.flags.DEFINE_integer('decay_step',20000, """for lr""")
tf.app.flags.DEFINE_float('decay_rate', 0.7, """for lr""")
IM_DIM = 128
VOL_DIM = 64
BATCH_SIZE = FLAGS.batch_size
TRAIN_EPOCHS = FLAGS.train_epochs
GPU_INDEX = FLAGS.gpu
BASE_LEARNING_RATE = FLAGS.learning_rate
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPU_INDEX)
TRAIN_DIR = os.path.join(FLAGS.train_dir, FLAGS.cat_id)
if not os.path.exists(TRAIN_DIR):
os.makedirs(TRAIN_DIR)
LOG_FOUT = open(os.path.join(TRAIN_DIR, 'log_train.txt'), 'w')
#LOG_FOUT.write(str(tf.flags._global_parser.parse_args())+'\n')
if __name__ == '__main__':
main()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
26791,
11537,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
27530,
11537,
198,... | 2.245305 | 852 |
'''
1. Write a Python program to count the frequency of words in a file.
2. Write a Python program to get the file size of a plain file.
3. Write a Python program to write a list to a file.
'''
| [
7061,
6,
198,
16,
13,
19430,
257,
11361,
1430,
284,
954,
262,
8373,
286,
2456,
287,
257,
2393,
13,
198,
198,
17,
13,
19430,
257,
11361,
1430,
284,
651,
262,
2393,
2546,
286,
257,
8631,
2393,
13,
198,
198,
18,
13,
19430,
257,
11361... | 3.438596 | 57 |
# schemas of datastructures commonly used in cooltools
# including description DataFrame dtypes/columns definitions
diag_expected_dtypes = {
"region1": "string",
"region2": "string",
"dist": "Int64",
"n_valid": "Int64",
}
block_expected_dtypes = {
"region1": "string",
"region2": "string",
"n_valid": "Int64",
}
# cooler weight names that are potentially divisive
# cooltools supports only multiplicative weight for now
DIVISIVE_WEIGHTS_4DN = ["KR", "VC", "VC_SQRT"]
| [
2,
3897,
5356,
286,
4818,
459,
1356,
942,
8811,
973,
287,
3608,
31391,
198,
2,
1390,
6764,
6060,
19778,
288,
19199,
14,
28665,
82,
17336,
198,
10989,
363,
62,
40319,
62,
67,
19199,
796,
1391,
198,
220,
220,
220,
366,
36996,
16,
1298... | 2.823864 | 176 |
import logging
from typing import Optional
import numpy as np
from cfod.analysis import waterfall
logging.basicConfig(format="%(levelname)s:%(message)s")
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
30218,
375,
13,
20930,
1330,
42467,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
18982,
2625,
4,
7,
5715,
3672,
8,
82,
25,
4,
7,
205... | 3.2 | 60 |
from .network import Network
from .vae_class import VAE
from . import activations
from . import losses | [
6738,
764,
27349,
1330,
7311,
201,
198,
6738,
764,
33353,
62,
4871,
1330,
13753,
36,
201,
198,
6738,
764,
1330,
1753,
602,
201,
198,
6738,
764,
1330,
9089
] | 3.75 | 28 |
import base64
import random
import sys
| [
11748,
2779,
2414,
198,
11748,
4738,
198,
11748,
25064,
628
] | 4 | 10 |
from torch_rl.envs import GoalEnv, GoalEnvLogger, EnvLogger, RunningMeanStdNormalize
import gym
import roboschool
import torch_rl.envs
from torch_rl import config
from torch_rl.utils import logger
from tqdm import tqdm
import numpy as np
import argparse
from torch_rl.utils import *
addarg('--env', type=bool, default='OsimArm2D-v1')
p = parser.parse_args()
env_name = p.env
if 'Osim' in env_name:
env = make_osim(env_name)
else:
env = gym.make(env_name)
steps = 600
target_indices = [0,1]
curr_indices = [1,2]
# Configure logging, all data will automatically be saved to root_dir in the TRL_DATA_PATH
root_dir = 'random_' + str(env_name[:-3]).lower()
config.set_root(root_dir, force=True)
config.configure_logging(clear=False, output_formats=['tensorboard', 'json'])
env = GoalEnv(GoalEnvLogger(EnvLogger(env), target_indices=target_indices, curr_indices=curr_indices, precision=1e-2), target_indices=target_indices, curr_indices=curr_indices, precision=1e-2, sparse=True)
env.reset()
np.random.seed(666)
# Do 50000 random environment steps
for i in tqdm(range(steps)):
done = False
while not done:
_,_,done,_ = env.step(env.action_space.sample())
env.reset()
logger.dumpkvs()
| [
6738,
28034,
62,
45895,
13,
268,
14259,
1330,
25376,
4834,
85,
11,
25376,
4834,
85,
11187,
1362,
11,
2039,
85,
11187,
1362,
11,
18162,
5308,
272,
1273,
67,
26447,
1096,
198,
11748,
11550,
198,
11748,
3857,
418,
1251,
198,
11748,
28034,
... | 2.580169 | 474 |
# -*- coding: utf-8 -*-
from openid import message
from openid import oidutil
from openid.extensions import sreg
import urllib.request
import urllib.parse
import urllib.error
import unittest
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1280,
312,
1330,
3275,
198,
6738,
1280,
312,
1330,
267,
312,
22602,
198,
6738,
1280,
312,
13,
2302,
5736,
1330,
264,
2301,
198,
198,
11748,
2956,
297,
571,
13,
2... | 2.725275 | 91 |
from flibia_aac.utils import pretty_datetime
| [
6738,
781,
41145,
62,
64,
330,
13,
26791,
1330,
2495,
62,
19608,
8079,
198
] | 3.214286 | 14 |
# Generated by Django 2.1.1 on 2018-11-05 01:27
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
1157,
12,
2713,
5534,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/python
"""
Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import sys
import json
from pybvc.controller.controller import Controller
from pybvc.netconfdev.vrouter.vrouter5600 import VRouter5600
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
if __name__ == "__main__":
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) == False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
except:
print ("Failed to get Controller device attributes")
exit(0)
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s" % (ctrlIpAddr, nodeName, nodeIpAddr))
result = vrouter.get_loopback_interfaces_cfg()
status = result.get_status()
if(status.eq(STATUS.OK) == True):
print "Loopback interfaces config:"
dpIfCfg = result.get_data()
print json.dumps(dpIfCfg, indent=4)
else:
print ("\n")
print ("!!!Failed, reason: %s" % status.brief().lower())
print ("%s" % status.detailed())
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
198,
15269,
357,
66,
8,
1853,
11,
220,
11177,
4503,
19266,
48811,
2149,
18421,
36230,
50,
11,
19387,
198,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
396,
3890,
290,
779,
287,
2... | 2.752022 | 1,113 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 11:11:39 2021
@author: alef
"""
import os
import sys
import platform
def uid():
"""
uid() -> retorna a identificação do usuário
corrente ou None se não for possível identificar
"""
# Variáveis de ambiente para cada
# Sistema operacional
us = {'Windows': 'USERNAME',
'Linux': 'USER'}
u = us.get(platform.system())
return os.environ.get(u)
print('Usuário: ', uid())
print('Plataforma: ', platform.platform())
print('Diretório corrente: ', os.path.abspath(os.curdir))
exep, exef = os.path.split(sys.executable)
print('Executável: ', exef)
print('Diretório do executável: ', exep) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
3158,
1467,
1367,
25,
1157,
25,
2670,
33448,
198,
198,
31,
9800,
25,
31341,
69,
19... | 2.294498 | 309 |
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, disconnect
from streamer import Stream, auth, preprocessor, clf_path
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/', methods=['GET'])
@socketio.on('connect', namespace='/topic')
@socketio.on('disconnect', namespace='/topic')
if __name__ == '__main__':
# Create stream object with given credentials
global stream
stream = Stream(auth, preprocessor, clf_path, socketio)
# Streaming filter
stream_thread = stream.filter(
track=["jakarta"],
threaded=True
)
socketio.run(app, host='0.0.0.0', port=8000)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
6738,
42903,
62,
44971,
952,
1330,
47068,
9399,
11,
27588,
11,
22837,
198,
6738,
4269,
263,
1330,
13860,
11,
6284,
11,
662,
41341,
11,
537,
69,
62,
6978,
198,
198,
1324,
... | 2.852174 | 230 |
# 실수 2개 입력받아 나눈 결과 계산하기
a, b = map(float, input().split())
print(format(a/b, ".3f")) | [
2,
23821,
233,
97,
168,
230,
246,
362,
166,
108,
250,
23821,
252,
227,
167,
254,
98,
167,
108,
249,
168,
243,
226,
31619,
224,
246,
167,
230,
230,
220,
166,
110,
108,
166,
111,
120,
220,
166,
111,
226,
168,
224,
108,
47991,
246,... | 1.135135 | 74 |
from __future__ import division
# Enter your code here. Read input from STDIN. Print output to STDOUT
N = int(raw_input())
X = map(float, raw_input().strip().split(' '))
Y = map(float, raw_input().strip().split(' '))
rankX = rank(X)
rankY = rank(Y)
di = [(rX - rY) ** 2 for rX, rY in zip(rankX, rankY)]
ans = 1 - (6 * sum(di)) / (N * ((N ** 2) -1))
print round(ans, 3)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
2,
6062,
534,
2438,
994,
13,
4149,
5128,
422,
48571,
1268,
13,
12578,
5072,
284,
48571,
12425,
198,
45,
796,
493,
7,
1831,
62,
15414,
28955,
198,
55,
796,
3975,
7,
22468,
11,
8246,
62,
15414... | 2.558621 | 145 |
from ...types import serializable
from ...util import none_or
from ..errors import MalformedXML
from .revision import Revision
from .redirect import Redirect
class Page(serializable.Type):
"""
Page meta data and a :class:`~mw.xml_dump.Revision` iterator. Instances of
this class can be called as iterators directly. E.g.
.. code-block:: python
page = mw.xml_dump.Page( ... )
for revision in page:
print("{0} {1}".format(revision.id, page_id))
"""
__slots__ = (
'id',
'title',
'namespace',
'redirect',
'restrictions',
'revisions'
)
@classmethod
@classmethod
| [
6738,
2644,
19199,
1330,
11389,
13821,
198,
6738,
2644,
22602,
1330,
4844,
62,
273,
198,
198,
6738,
11485,
48277,
1330,
4434,
12214,
55,
5805,
198,
6738,
764,
260,
10178,
1330,
46604,
198,
6738,
764,
445,
1060,
1330,
2297,
1060,
628,
19... | 2.385965 | 285 |
#!/usr/bin/python3.8
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -- Imports --
import System.Core as Core
import pygame
import numpy as np
import os, time, numpy, math
from Library import CoreUtils as UTILS
from Library import CorePrimitives as Shape
from Library import CorePaths
print("Taiyou ContentManager version " + Core.Get_ContentManagerVersion())
DefaultImage = None
FontRenderingDisabled = False
ImageRenderingDisabled = False
RectangleRenderingDisabled = False
ImageTransparency = False
SoundDisabled = False
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
23,
198,
2,
220,
220,
15069,
12131,
943,
363,
549,
292,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
22... | 3.466238 | 311 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 's5df46sf46s46dfgvfbfbfhGFGFGFB56BF'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '..', 'rating.db')
| [
11748,
28686,
198,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
23683,
26087,
62,
20373,
796,
705,
82,
20,
7568,
3510,
28202,
3510,
82,
3510,
7568,
... | 2.073684 | 95 |
import http.client
import urllib
| [
11748,
2638,
13,
16366,
198,
11748,
2956,
297,
571,
628
] | 3.4 | 10 |
# -*- coding: utf-8 -*-
import os
import time
import requests
import datetime
import webbrowser
# Device creation and authorization
# Extracting code from a string
if __name__ == '__main__':
print("Now the site will open in your browser, after authorization, paste the entire line.")
time.sleep(3)
webbrowser.open(endpoints.redirect, new=2)
line = input("Line: ")
code = line_processing(line)
if code != False:
authorization = main(code)
if authorization != False:
print("Information:\n\nAccount name: %s\nAccount id: %s\nSecret: %s\nDevice id: %s" % (authorization[0], authorization[1], authorization[2], authorization[3]))
else:
print("An error occurred while creating the device!")
else:
print("The line from the website was inserted incorrectly! Try again!")
input('Press any key...')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
7007,
198,
11748,
4818,
8079,
198,
11748,
3992,
40259,
198,
198,
2,
16232,
6282,
290,
19601,
198,
198,
2,
29677,
278,
243... | 3.1341 | 261 |
#
# Example file for working with classes
#
if __name__ == "__main__":
main()
| [
2,
198,
2,
17934,
2393,
329,
1762,
351,
6097,
198,
2,
198,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1388,
3419,
198
] | 2.766667 | 30 |
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "Sep 18, 2017 2:32:42 PM"
from config import CONFIG
from libs.core.logger import getLogger
from .base.uisettings import UISettings
from .base.constants import DEVICE_SETTINGS
from libs.device.ui.base.exceptions import SetupWizardError
| [
2,
1439,
2489,
10395,
416,
8222,
25607,
13,
198,
2,
921,
2314,
13096,
393,
2648,
1997,
1231,
11728,
13,
198,
2,
1002,
345,
836,
470,
4236,
11,
1394,
9480,
290,
836,
470,
804,
379,
2438,
8966,
322,
0,
198,
198,
834,
9800,
834,
796,... | 3.345324 | 139 |
import random
from graphbrain.cli import wrapper
from graphbrain.parsers import create_parser
if __name__ == '__main__':
wrapper(extract_sentences, text='extract sentences')
| [
11748,
4738,
198,
198,
6738,
4823,
27825,
13,
44506,
1330,
29908,
198,
6738,
4823,
27825,
13,
79,
945,
364,
1330,
2251,
62,
48610,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
29908,
7,
... | 3.309091 | 55 |
import param
import plotly.graph_objs as go
from ...core import util
from ...operation import interpolate_curve
from .element import ElementPlot, ColorbarPlot
| [
11748,
5772,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
198,
6738,
2644,
7295,
1330,
7736,
198,
6738,
2644,
27184,
1330,
39555,
378,
62,
22019,
303,
198,
6738,
764,
30854,
1330,
11703,
43328,
11,
5315,
5657,
43328,
... | 3.652174 | 46 |
import threading
import time
import logging
from fastapi import APIRouter
from pydantic import BaseModel
from starlette.requests import Request
from . import nexus, commons, view_model, data_updater
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/search/{search_string}")
# debugging only
@router.get("/next/{stop_code}")
# curl -X POST http://localhost:8000/nexus/start-client -d {\"code\":\"vic\"}
@router.post("/start-client")
| [
11748,
4704,
278,
198,
11748,
640,
198,
11748,
18931,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
3491,
21348,
13,
8897,
3558,
1330,
19390,
198,
198,
6738,
764,
1330,
4... | 2.961783 | 157 |
from pathlib import Path
import json
from pprint import pprint
import os
import base64
from time import sleep
from datetime import datetime, timedelta
# Pip installed Modules
from impgrpc import IMPClient
from protobuf_to_dict import protobuf_to_dict
node_ip = "127.0.0.1"
imp = IMPClient(
f"{node_ip}:8881"
)
node_ip = "127.0.0.1"
imp = IMPClient(
f"{node_ip}:9991"
)
for i in imp.subscribe():
print(f"Amount Spent: {i.amount} \n\n")
pprint(protobuf_to_dict(i.data))
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
33918,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
28686,
198,
11748,
2779,
2414,
198,
6738,
640,
1330,
3993,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
2,
2514... | 2.550265 | 189 |
import os
import errno
import fire
import json
import yaml
import shutil
import urllib
from time import sleep
import logging
from boto3 import session
from botocore.exceptions import ClientError
logging.basicConfig(
format='%(asctime)s|%(name).10s|%(levelname).5s: %(message)s',
level=logging.WARNING)
log = logging.getLogger('greengo')
log.setLevel(logging.DEBUG)
DEFINITION_FILE = 'greengo.yaml'
MAGIC_DIR = '.gg'
STATE_FILE = os.path.join(MAGIC_DIR, 'gg_state.json')
ROOT_CA_URL = "https://www.symantec.com/content/en/us/enterprise/verisign/roots/VeriSign-Class%203-Public-Primary-Certification-Authority-G5.pem"
DEPLOY_TIMEOUT = 90 # Timeout, seconds
# TODO: Refactor.
# Connectors, Loggers, and Subscription code are all the same, exactly.
###############################################################################
# UTILITY FUNCTIONS
def pretty(d):
"""Pretty object as YAML."""
return yaml.safe_dump(d, default_flow_style=False)
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
11454,
3919,
198,
11748,
2046,
198,
11748,
33918,
198,
11748,
331,
43695,
198,
11748,
4423,
346,
198,
11748,
2956,
297,
571,
198,
6738,
640,
1330,
3993,
198,
11748,
18931,
198,
6738,
275,
2069,
18,
1330,
6246,
... | 2.812672 | 363 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Fast Marching test using a two-layer model."""
import time
from math import asin, tan
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
from pygimli.mplviewer import drawMesh, drawField, drawStreamLines
from pygimli.physics.traveltime import fastMarch
r"""
Solve the Hamilton-Jacobi (HJ) equation, known as the Eikonal equation
.. math::
|\grad u(x)| & = f(x) \\
||\grad t||_2 &= s
\grad t^2 &= s^2~\cite{SunFomel2000}
where :math:`t` denote traveltime for a spatial distributed slowness :math:`s`
In case f(x) = 1, the solution gives the distance from the boundary
"""
if __name__ == '__main__':
xmin, xmax, zlay = -20., 150., 20.
# create PLC (piece-wise linear complex) of two layers
plc = pg.Mesh(2)
nodes = []
# 0-----------1
# | |
# 5-----------2
# | |
# 4-----------3
nodes.append(plc.createNode(xmin, 0., 0.))
nodes.append(plc.createNode(xmax, 0., 0.))
nodes.append(plc.createNode(xmax, -zlay, 0.))
nodes.append(plc.createNode(xmax, -zlay * 2, 0.))
nodes.append(plc.createNode(xmin, -zlay * 2, 0.))
nodes.append(plc.createNode(xmin, -zlay, 0.))
# connect the nodes
for i in range(5):
plc.createEdge(nodes[i], nodes[i + 1])
plc.createEdge(nodes[5], nodes[0])
plc.createEdge(nodes[5], nodes[2])
# insert region markers into the two layers and make mesh
tri = pg.TriangleWrapper(plc)
plc.addRegionMarker(pg.RVector3(0., -zlay + .1), 0, 3.) # 10m^2 max area
plc.addRegionMarker(pg.RVector3(0., -zlay - .1), 1, 10.)
tri.setSwitches('-pzeAfaq34.6')
mesh = pg.Mesh(2)
tri.generate(mesh)
mesh.createNeighbourInfos()
print(mesh)
# make velocity model
v = [1000., 3000.]
slomap = pg.stdMapF_F() # map for mapping real slowness values
for i, vi in enumerate(v):
slomap.insert(i, 1. / vi)
mesh.mapCellAttributes(slomap) # map values to attributes using map
# initialize source position and trvel time vector
source = pg.RVector3(0., 0.)
times = pg.RVector(mesh.nodeCount(), 0.)
# initialize sets and tags
upwind, downwind = set(), set()
upTags, downTags = np.zeros(mesh.nodeCount()), np.zeros(mesh.nodeCount())
# define initial condition
cell = mesh.findCell(source)
for i, n in enumerate(cell.nodes()):
times[n.id()] = cell.attribute() * n.pos().distance(source)
upTags[n.id()] = 1
for i, n in enumerate(cell.nodes()):
tmpNodes = pg.commonNodes(n.cellSet())
for nn in tmpNodes:
if not upTags[nn.id()] and not downTags[nn.id()]:
downwind.add(nn)
downTags[nn.id()] = 1
# start fast marching
tic = time.time()
while len(downwind) > 0:
fastMarch(mesh, downwind, times, upTags, downTags)
print(time.time() - tic, "s")
# compare with analytical solution along the x axis
x = np.arange(0., 100., 0.5)
t = pg.interpolate(mesh, times, x, x * 0., x * 0.)
tdirect = x / v[0] # direct wave
alfa = asin(v[0] / v[1]) # critically refracted wave angle
xreflec = tan(alfa) * zlay * 2. # first critically refracted
trefrac = (x - xreflec) / v[1] + xreflec * v[1] / v[0]**2
tana = np.where(trefrac < tdirect, trefrac, tdirect) # minimum of both
print("min(dt)=",
min(t - tana) * 1000,
"ms max(dt)=",
max(t - tana) * 1000,
"ms")
# plot traveltime field, a few lines
fig = plt.figure()
a = fig.add_subplot(211)
drawMesh(a, mesh)
drawField(a, mesh, times, True, 'Spectral')
drawStreamLines(a, mesh, times, nx=50, ny=50)
# plot calculated and measured travel times
a2 = fig.add_subplot(212)
plt.plot(x, t, 'b.-', x, tdirect, 'r-', x, trefrac, 'g-')
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
22968,
2805,
278,
1332,
1262,
257,
734,
12,
29289,
2746,
526,
15931,
198,
11748,
640,
198,
6738,
10688,
1330,
35... | 2.257858 | 1,718 |
import os
import datetime
# To download the docx file, run a command along the following lines:
#
# curl https://www.mass.gov/doc/covid-19-cases-in-massachusetts-as-of-march-27-2020-accessible/download > 3-27-20.docx
#
# For some reason, docx files were not available for some dates. Then, run the following command to get the PDF instead:
#
# curl https://www.mass.gov/doc/covid-19-cases-in-massachusetts-as-of-march-27-2020/download > 3-27-20.pdf
if __name__ == '__main__':
day = datetime.datetime.today().day
month = datetime.datetime.today().month
year = datetime.datetime.today().year - 2000
args = {'day': day, 'month': month, 'year': year}
run_OH(args)
| [
11748,
28686,
198,
11748,
4818,
8079,
198,
198,
2,
1675,
4321,
262,
2205,
87,
2393,
11,
1057,
257,
3141,
1863,
262,
1708,
3951,
25,
198,
2,
198,
2,
29249,
3740,
1378,
2503,
13,
22208,
13,
9567,
14,
15390,
14,
66,
709,
312,
12,
112... | 2.783673 | 245 |
from .theme import Theme
DEFAULT = Theme(
(255, 255, 255),
(0, 0, 0),
[
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(192, 192, 192),
(128, 128, 128),
(255, 0, 0),
(0, 255, 0),
(255, 255, 0),
(0, 0, 255),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
],
)
| [
6738,
764,
43810,
1330,
26729,
198,
198,
7206,
38865,
796,
26729,
7,
198,
220,
220,
220,
357,
13381,
11,
14280,
11,
14280,
828,
198,
220,
220,
220,
357,
15,
11,
657,
11,
657,
828,
198,
220,
220,
220,
685,
198,
220,
220,
220,
220,
... | 1.574913 | 287 |
"""
Project creator.
"""
import os
import shutil
import datetime
from yozuch import config, logger
from yozuch.utils import makedirs
| [
37811,
198,
16775,
13172,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
4818,
8079,
198,
6738,
331,
8590,
794,
1330,
4566,
11,
49706,
198,
6738,
331,
8590,
794,
13,
26791,
1330,
285,
4335,
17062,
628
] | 3.375 | 40 |
import torch
import numpy as np
import torch.nn as nn
from model_base import *
from net_deblur import *
from net_pwc import*
from reblur_package import *
from flow_utils import *
from image_proc import *
from losses import *
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
2746,
62,
8692,
1330,
1635,
198,
6738,
2010,
62,
11275,
75,
333,
1330,
1635,
198,
6738,
2010,
62,
79,
86,
66,
1330,
9,
198... | 3.183099 | 71 |
import unittest
from typing import Optional
from ...errors import (
PetroniaSerializationFormatError
)
from ..serial.converter import (
serialize_to_json,
deserialize_from_json,
create_instance,
)
from ...internal_.bus_events import (
RegisterEventEvent,
EVENT_ID_REGISTER_EVENT,
)
from ...internal_.bus_constants import (
QUEUE_EVENT_NORMAL,
GLOBAL_EVENT_PROTECTION,
)
| [
198,
11748,
555,
715,
395,
198,
6738,
19720,
1330,
32233,
198,
6738,
2644,
48277,
1330,
357,
198,
220,
220,
220,
4767,
1313,
544,
32634,
1634,
26227,
12331,
198,
8,
198,
6738,
11485,
46911,
13,
1102,
332,
353,
1330,
357,
198,
220,
220... | 2.701987 | 151 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/thori/ccccc/src/vrpn_client_ros/include;/opt/ros/melodic/include".split(';') if "/home/thori/ccccc/src/vrpn_client_ros/include;/opt/ros/melodic/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lvrpn_client_ros;/opt/ros/melodic/lib/libvrpn.a;/opt/ros/melodic/lib/libquat.a;-lm;-lpthread".split(';') if "-lvrpn_client_ros;/opt/ros/melodic/lib/libvrpn.a;/opt/ros/melodic/lib/libquat.a;-lm;-lpthread" != "" else []
PROJECT_NAME = "vrpn_client_ros"
PROJECT_SPACE_DIR = "/home/thori/ccccc/devel"
PROJECT_VERSION = "0.2.2"
| [
2,
7560,
422,
3797,
5116,
14,
11215,
539,
14,
28243,
14,
35339,
13,
22866,
13,
14751,
13,
259,
198,
34,
1404,
42,
1268,
62,
47,
8120,
11879,
62,
47,
31688,
10426,
796,
13538,
198,
31190,
23680,
62,
40492,
38,
62,
10943,
16254,
62,
... | 2.178788 | 330 |
import numpy as np
from numba import jit
@jit
@jit
@jit
@jit
def inject_source(up, wit, isx, isy, ne, vel2, dt2):
"""
wit: source wavelet, single time sample
dt2: dt**2
"""
up[isx + ne, isy + ne] += vel2[isx + ne, isy + ne] * dt2 * wit
@jit
def inject_sources(up, src, isy, ne, vel2, dt2):
"""
wit: source wavelet, single time sample
dt2: dt**2
"""
up[ne:-ne, isy + ne] += vel2[ne:-ne, isy + ne] * dt2 * src[:]
# 2nd order Keys boundary condition
CT1 = np.cos(np.pi / 6.)
CT2 = np.cos(np.pi / 12.)
CM12 = CT1 * CT2
CP12 = CT1 + CT2
@jit
@jit
| [
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
474,
270,
628,
198,
31,
45051,
628,
198,
31,
45051,
628,
198,
31,
45051,
628,
198,
31,
45051,
198,
4299,
8677,
62,
10459,
7,
929,
11,
20868,
11,
318,
87,
11,
318,
88,
11,
... | 2.090909 | 286 |
"""
RP Distributor
"""
from __future__ import print_function
import re
import os
import sys
import shutil
from os.path import isfile, isdir, join, dirname, realpath, relpath
base_dir = realpath(dirname(__file__))
rp_dir = realpath(join(base_dir, "../../"))
os.chdir(base_dir)
sys.path.insert(0, rp_dir)
from rplibs.six.moves import input
ignores = [
# data
"skybox-blend.zip",
"skybox.jpg",
"skybox-2.jpg",
"default_cubemap/source",
"default_cubemap/source_2",
"default_cubemap/filter.compute.glsl",
"default_cubemap/filter.py",
"data/generate_txo_files.py",
"README.md",
"environment_brdf/generate_reference.py",
"run_mitsuba.bat",
".mip",
".xml",
".exr",
".psd",
".diff",
".pyc",
".pdb",
"environment_brdf/res/",
"film_grain/generate.py",
"film_grain/grain.compute.glsl",
"ies_profiles/PREVIEWS.jpg",
# rpcore
"native/scripts",
"native/source",
"native/win_amd",
"native/win_i386",
"native/.gitignore",
"native/build.py",
"native/CMakeLists",
"native/update_module_builder.py",
"native/config.ini",
# rpplugins
".ffxml",
"bloom/resources/SOURCE.txt",
"bloom/resources/lens_dirt.png",
"clouds/resources/generate_",
"clouds/resources/noise.inc",
"clouds/resources/precompute.py",
"color_correction/resources/film_luts_raw",
"color_correction/resources/generate_",
"plugin_prefab",
"scattering/resources/hosek_wilkie_scattering",
# Avoid infinite recursion
"rp_distributor",
# toolkit
"ui/res",
"compile_ui.bat",
".ui",
".qrc",
"pathtracing_reference",
"poisson_disk_generator",
"render_service/resources",
]
if __name__ == "__main__":
distribute()
| [
37811,
201,
198,
201,
198,
20031,
46567,
273,
201,
198,
201,
198,
37811,
201,
198,
201,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,
201,
198,
201,
198,
11748,
302,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,... | 2.077348 | 905 |
import os
from django.db import models
from django.conf import settings
from filebrowser.base import FileObject
from uploadfield.conf import TEMP_DIR
from uploadfield.utils import check_existing, makedir, move_file, delete_file
| [
11748,
28686,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
2393,
40259,
13,
8692,
1330,
9220,
10267,
198,
198,
6738,
9516,
3245,
13,
10414,
1330,
309,
39494,
62,
34720,
1... | 3.666667 | 63 |
from pathlib import Path
COMP_CHECK_CV_REPO_DIR = Path.home() / ".comp-check-cvs-cache"
COMP_CHECK_CV_CACHE_DIR = (COMP_CHECK_CV_REPO_DIR / "master" / "pyessv-archive-eg-cvs").as_posix()
EG_DATA_DIR = "tests/example_data"
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
9858,
47,
62,
50084,
62,
33538,
62,
2200,
16402,
62,
34720,
796,
10644,
13,
11195,
3419,
1220,
27071,
5589,
12,
9122,
12,
66,
14259,
12,
23870,
1,
198,
9858,
47,
62,
50084,
62,
33538,
62,
34,... | 2.309278 | 97 |
#coding:utf8
'''
Created on 2011-3-23
@author: sean_lan
'''
from app.share.dbopear import dbuser
INITTOWN = 1700
from twisted.python import log
class User:
'''用户类'''
def __init__(self, account,password,dynamicId = -1):
'''
@param id: int 用户的id
@param name: str用户的名称
@param password: str 用户的密码
@param pid: int 邀请者的id
@param dynamicId: str 登录时客户端的动态ID
@param characterId: dict 用户的角色
@param isEffective: bool 是否是有效的
'''
self.id = 0
self.account = account;
self.name = ""
self.password = password
self.pid = 0
self.dynamicId = dynamicId
self.isEffective = True
self.characterId = 0
self.shape = 0;
self.node = "";#game node
self.Ischaracterlogined = False;
self.scene_node = "";#scene node
self.Ischaracterlocked = False;#only used when client disconnected
self.characterInfo = {}
self.initUser();
def initUser(self):
'''初始化用户类'''
data = dbuser.getUserInfoByUsername(self.account,self.password)
if not data:
self.isEffective = False
return
if not data['enable']:
self.isEffective = False
self.id = data.get('id',0)
self.pid = data.get('pid',0)
self.characterId = data.get('characterId',0)
def getNickName(self):
'''获取账号名
'''
return self.name
def getNode(self):
'''返回角色所在的节点服务ID'''
return self.node
def setNode(self,node):
'''设置角色的节点服务ID
@param node: int 节点的id
'''
self.node = node
return
def CheckEffective(self):
'''检测账号是否有效'''
return self.isEffective
def checkClient(self,dynamicId):
'''检测客户端ID是否匹配'''
return self.dynamicId == dynamicId
def getCharacterInfo(self):
'''获取角色的信息'''
if not self.characterInfo:
self.characterInfo = dbuser.getUserCharacterTotalInfo(self.characterId)
#log.msg('User getCharacterInfo ',self.characterId,self.characterInfo)
self.name = self.characterInfo['nickname'];
self.shape = self.characterInfo['figure'];
return self.characterInfo
def setDynamicId(self,dynamicId):
'''设置动态ID
@param dynamicId: int 客户端动态ID
'''
self.dynamicId = dynamicId
def getDynamicId(self):
'''获取用户动态ID'''
return self.dynamicId
def creatNewCharacter(self ,nickname ,profession,shape,tm,scene,x,y):
'''创建新角色
'''
if len(nickname)<2 or len(nickname)>20:
return False
if self.characterId:
return False
if not dbuser.checkCharacterName(nickname):
return False
characterId = dbuser.creatNewCharacter(nickname, profession,shape, self.id,1,tm,scene,x,y)
if characterId:
self.characterId = characterId
self.name = nickname;
self.shape = shape;
return True;
return False;
| [
2,
66,
7656,
25,
40477,
23,
198,
7061,
6,
198,
41972,
319,
2813,
12,
18,
12,
1954,
198,
198,
31,
9800,
25,
384,
272,
62,
9620,
198,
7061,
6,
198,
6738,
598,
13,
20077,
13,
9945,
404,
451,
1330,
20613,
7220,
198,
198,
1268,
22470... | 1.823837 | 1,720 |
# --------------------------------------------------------------------------
# Example demonstrates how to export OneDrive files into local file system
# --------------------------------------------------------------------------
import os
import tempfile
from examples import acquire_token_client_credentials
from office365.graph_client import GraphClient
from tests import test_user_principal_name
client = GraphClient(acquire_token_client_credentials)
drive = client.users[test_user_principal_name].drive # get user's drive
with tempfile.TemporaryDirectory() as local_path:
drive_items = drive.root.children.get().execute_query()
file_items = [item for item in drive_items if not item.file.is_server_object_null] # files only
for drive_item in file_items:
with open(os.path.join(local_path, drive_item.name), 'wb') as local_file:
drive_item.download(local_file).execute_query() # download file content
print("File '{0}' has been downloaded".format(local_file.name))
| [
2,
16529,
35937,
198,
2,
17934,
15687,
703,
284,
10784,
1881,
24825,
3696,
656,
1957,
2393,
1080,
198,
2,
16529,
35937,
198,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
198,
6738,
6096,
1330,
12831,
62,
30001,
62,
16366,
62,
66,
... | 3.547038 | 287 |
"""
Copyright 2020 Zakru
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from urllib.error import HTTPError
from .requests import request_list, request_object, ObjectFromDict
def get_recipe_ids():
"""Gets a :class:`list` of :class:`str` objects containing all
recipe IDs from the Wynncraft API.
:returns: A list of all recipeIDs as :class:`str`
:rtype: :class:`list`
"""
return request_list('https://api.wynncraft.com/v2/recipe/list')
def get_recipe(id):
"""Gets a Recipe as an
:class:`ObjectFromDict <wynn.requests.ObjectFromDict>` object from
the Wynncraft API.
Format: https://docs.wynncraft.com/Recipe-API/#recipe-object
:param name: The ID of the Recipe
:type name: :class:`str`
:returns: The Recipe returned by the API or ``None`` if not found
:rtype: :class:`ObjectFromDict <wynn.requests.ObjectFromDict>`
"""
try:
response = request_list(
'https://api.wynncraft.com/v2/recipe/get/{0}',
id
)
if not response:
return None
return ObjectFromDict(response[0])
except HTTPError as e:
if e.code == 400:
return None
raise
def search_recipes(query, args):
"""Searches for recipes from the Wynncraft API. See
https://docs.wynncraft.com/Recipe-API/#search for query
format.
:param query: See above link
:type query: :class:`str`
:param args: See above link
:type args: :class:`str`
:returns: A list of recipes as
:class:`ObjectFromDict <wynn.requests.ObjectFromDict>`. Empty if
the query failed.
:rtype: :class:`list`
"""
try:
return list(map(ObjectFromDict, request_list(
'https://api.wynncraft.com/v2/recipe/search/{0}/{1}',
query, args,
)))
except HTTPError as e:
if e.code == 400:
return []
raise e
| [
37811,
198,
15269,
12131,
32605,
622,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
30073,
286,
428,
3788,
290,
3917,
10314,
3696,
198,
7,
1169,
366,
25423,
12340,
284,
1730,
287,
26... | 2.701493 | 1,072 |
import unittest
import os
import sys
root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(
__file__))))
sys.path.append(root)
from i_jss import _load_json_schema
from i_jss import compare
class IJssTest(unittest.TestCase):
"""
Tests the compare() function from i_jss with fixed schemas found at '/schemas/'.
"""
def test_equiv(self):
""" Tests equivalence by reflexivity. """
result = compare('schemas/s1.json', 'schemas/s1.json', 'schemas/result.json')
self.assertEqual(result, "Success.")
result = _load_json_schema('schemas/result.json')
self.assertEqual(result["result"], "equivalent")
def test_superset(self):
""" Tests superset with s1 being a superset of s2 """
result = compare('schemas/s1.json', 'schemas/s2.json', 'schemas/result.json')
self.assertEqual(result, "Success.")
result = _load_json_schema('schemas/result.json')
self.assertEqual(result["result"], "superset")
def test_subset(self):
""" Tests super set with s2 being a subset of s1 """
result = compare('schemas/s2.json', 'schemas/s1.json', 'schemas/result.json')
self.assertEqual(result, "Success.")
result = _load_json_schema('schemas/result.json')
self.assertEqual(result["result"], "subset")
def test_difference(self):
""" Tests super set with s1 being a superset of s2 """
result = compare('schemas/s1.json', 'schemas/s3.json', 'schemas/result.json')
self.assertEqual(result, "Success.")
result = _load_json_schema('schemas/result.json')
self.assertEqual(result["result"], "incomparable")
result = compare('schemas/s2.json', 'schemas/s3.json', 'schemas/result.json')
self.assertEqual(result, "Success.")
result = _load_json_schema('schemas/result.json')
self.assertEqual(result["result"], "incomparable")
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
15763,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
... | 2.385542 | 830 |
import os
import numpy as np
import argparse
from sklearn import metrics
from random import shuffle, sample, seed
import tensorflow as tf
from tensorflow import keras
from tensorflow.random import set_seed
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, TimeDistributed, LSTM, Input, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input as preprocess_input_v1
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input as preprocess_input_v2
from tensorflow.keras.applications.efficientnet import *
if __name__ == '__main__':
# parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model",
type=str,
default='mobilenet',
nargs="?",
help="Model: mobilenet or efficientnet.")
parser.add_argument("--model_version",
type=str,
default='V1',
nargs="?",
help="Mobile net version: V1 or V2. Efficient net scaling: B0, B1, B2, B3, B4, B5, B6 or B7.")
parser.add_argument("--dataset_path",
type=str,
default='/lus_stratification/generate_model/croppedi2p0/',
nargs="?",
help="Dataset's absolute path")
parser.add_argument("--results_path",
type=str,
default='/lus_stratification/generate_model/results/',
nargs="?",
help="Results's absolute path")
parser.add_argument("--train_test_divide",
type=float,
default=0.75,
nargs="?",
help="Train test divide value between 0.0 and 1.0")
parser.add_argument("--epochs",
type=int,
default=10,
nargs="?",
help="Epochs value between 1 and infinite")
parser.add_argument("--batch_size",
type=int,
default=32,
nargs="?",
help="Batch size value")
parser.add_argument("--steps_per_epoch",
type=int,
default=44,
nargs="?",
help="Steps per epoch value")
parser.add_argument("--use_steps_per_epoch",
type=int,
default=0,
nargs="?",
help="Use steps per epoch value: 1 use, other not use. Default 0.")
parser.add_argument("--optimizer",
type=str,
default='adam',
nargs="?",
help="Optimizer")
parser.add_argument("--loss",
type=str,
default='binary_crossentropy',
nargs="?",
help="Loss")
parser.add_argument("--label_dataset_zero",
type=str,
default='N0',
nargs="?",
help="Label dataset 0: N0, B0, M0, S0, C0, P0.")
parser.add_argument("--label_dataset_one",
type=str,
default='N1',
nargs="?",
help="Label dataset 1: N1, B1, M1, S1, C1, P1.")
parser.add_argument("--strategy",
type=str,
default='combined',
nargs="?",
help="Create sets strategy: combined or by_patients.")
parser.add_argument("--random_seed",
type=int,
default=12345,
nargs="?",
help="Random seed for reproducible results")
args = parser.parse_args()
# reproducible results
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(args.random_seed)
seed(args.random_seed)
set_seed(args.random_seed)
# images params
MAX_FRAMES = 5
IMG_SIZE = 224
CHANNELS = 3
# get the model without the denses
if (args.model == 'mobilenet'):
if (args.model_version == 'V1'):
base_model = MobileNet(weights='imagenet', include_top=False)
elif (args.model_version == 'V2'):
base_model = MobileNetV2(weights='imagenet', include_top=False)
elif (args.model == 'efficientnet'):
if args.model_version == 'B0':
base_model = EfficientNetB0(weights='imagenet', include_top=False)
if args.model_version == 'B1':
base_model = EfficientNetB1(weights='imagenet', include_top=False)
if args.model_version == 'B2':
base_model = EfficientNetB2(weights='imagenet', include_top=False)
if args.model_version == 'B3':
base_model = EfficientNetB3(weights='imagenet', include_top=False)
if args.model_version == 'B4':
base_model = EfficientNetB4(weights='imagenet', include_top=False)
if args.model_version == 'B5':
base_model = EfficientNetB5(weights='imagenet', include_top=False)
if args.model_version == 'B6':
base_model = EfficientNetB6(weights='imagenet', include_top=False)
if args.model_version == 'B7':
base_model = EfficientNetB7(weights='imagenet', include_top=False)
last_layer = base_model.layers[-1]
new_top_layer_global_avg_pooling = GlobalAveragePooling2D()(last_layer.output)
cnn = Model(base_model.input, new_top_layer_global_avg_pooling)
# we will only train the LSTM and the new denses
for layer in base_model.layers:
layer.trainable = False
# see CNN model structure
cnn.summary()
# input several frames for the LSTM
inputs = Input(shape=(MAX_FRAMES, IMG_SIZE, IMG_SIZE, CHANNELS))
# add recurrency, time distributed layers
encoded_frames = TimeDistributed(cnn)(inputs)
encoded_sequence = LSTM(256)(encoded_frames)
# classification layers
new_dense = Dense(1024, activation='relu')(encoded_sequence)
predictions = Dense(2, activation='softmax')(new_dense)
model = Model(inputs=[inputs], outputs=predictions)
# compile model
model.compile(optimizer=args.optimizer, loss=args.loss, metrics = ["accuracy"])
# get the data
print('***** Load files...')
X_train, y_train, X_train_names, X_val, y_val, X_val_names = create_sets(args.dataset_path,
args.label_dataset_zero,
args.label_dataset_one,
args.model,
args.model_version,
IMG_SIZE,
MAX_FRAMES,
CHANNELS,
args.train_test_divide)
# see final model structure (CNN + LSTM)
model.summary()
# input shape
print("Input shape")
print(X_train.shape)
# input sample shape
print("Input sample shape")
print(X_train[0].shape)
print(X_train[1].shape)
print(X_train[15].shape)
print(X_train[42].shape)
# fit model
if (args.use_steps_per_epoch == 1):
results = model.fit(X_train, y_train, epochs=args.epochs, steps_per_epoch=args.steps_per_epoch, batch_size=args.batch_size, validation_data=(X_val, y_val))
else:
results = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_data=(X_val, y_val))
print('#' * 40)
print("Finished! Saving model")
# save model
model.save(args.results_path + 'covid19_model_temporal_'
+ args.model + args.model_version + "_for_" + args.label_dataset_zero + "_" + args.label_dataset_one)
print('#' * 40)
print("Model saved!")
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
4738,
1330,
36273,
11,
6291,
11,
9403,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
... | 2.01905 | 3,937 |
from multiprocessing import Pool
if __name__ == '__main__':
numbers = range(10)
pool = Pool()
result = pool.map(cube, numbers)
pool.close()
pool.join()
print(result) | [
6738,
18540,
305,
919,
278,
1330,
19850,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3146,
796,
2837,
7,
940,
8,
198,
220,
220,
220,
5933,
796,
19850,
3419,
628,
220,
220,
220,
1255,
796,
... | 2.4625 | 80 |
from __future__ import division
import configparser
import logging
import os
import queue
import re
import threading
import time
import docker
from zabbixdocker.lib.zabbix import ZabbixMetric, ZabbixSender
class DockerContainersService(threading.Thread):
""" This class implements the service which sends containers metrics """
def __init__(self, config: configparser.ConfigParser, stop_event: threading.Event, docker_client: docker.APIClient,
zabbix_sender: ZabbixSender):
"""
Initialize the instance
:param config: the configuration parser
:param stop_event: the event to stop execution
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
"""
super(DockerContainersService, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._workers = []
self._queue = queue.Queue()
self._config = config
self._stop_event = stop_event
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
def run(self):
"""
Execute the thread
"""
worker = DockerContainersWorker(self._config, self._docker_client, self._zabbix_sender, self._queue)
worker.setDaemon(True)
self._workers.append(worker)
self._logger.info("service started")
if self._config.getint("containers", "startup") > 0:
self._stop_event.wait(self._config.getint("containers", "startup"))
for worker in self._workers:
worker.start()
while True:
self._execute()
if self._stop_event.wait(self._config.getint("containers", "interval")):
break
self._logger.info("service stopped")
def _execute(self):
"""
Execute the metrics sending
"""
self._logger.debug("requesting containers metrics")
self._queue.put("metrics")
class DockerContainersWorker(threading.Thread):
""" This class implements a containers worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient, zabbix_sender: ZabbixSender,
containers_queue: queue.Queue):
"""
Initialize the instance
:param config: the configuration parser
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
:param containers_queue: the containers queue
"""
super(DockerContainersWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
self._containers_queue = containers_queue
def run(self):
"""
Execute the thread
"""
while True:
self._logger.debug("waiting execution queue")
item = self._containers_queue.get()
if item is None:
break
self._logger.info("sending containers metrics")
try:
metrics = []
containers = self._docker_client.containers(all=True)
containers_total = len(containers)
containers_running = 0
containers_created = 0
containers_stopped = 0
containers_healthy = 0
containers_unhealthy = 0
for container in containers:
container_name = container["Names"][0][1:]
if container["Status"].startswith("Up"):
containers_running += 1
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.status[%s]" % container_name,
"%d" % 1
)
)
if container["Status"].find("(healthy)") != -1:
containers_healthy += 1
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.health[%s]" % container_name,
"%d" % 1
)
)
elif container["Status"].find("(unhealthy)") != -1:
containers_unhealthy += 1
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.health[%s]" % container_name,
"%d" % 0
)
)
elif container["Status"] == "Created":
containers_created += 1
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.status[%s]" % container_name,
"%d" % 0
)
)
else:
containers_stopped += 1
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.status[%s]" % container_name,
"%d" % 0
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.total",
"%d" % containers_total
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.running",
"%d" % containers_running
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.created",
"%d" % containers_created
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stopped",
"%d" % containers_stopped
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.healthy",
"%d" % containers_healthy
)
)
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.unhealthy",
"%d" % containers_unhealthy
)
)
if len(metrics) > 0:
self._logger.debug("sending %d metrics" % len(metrics))
self._zabbix_sender.send(metrics)
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to send containers metrics")
class DockerContainersStatsService(threading.Thread):
""" The class implements the service which send containers statistics metrics """
def __init__(self, config: configparser.ConfigParser, stop_event: threading.Event, docker_client: docker.APIClient,
zabbix_sender: ZabbixSender):
"""
Initialize the instance
:param config: the configuration parser
:param stop_event: the event to stop execution
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
"""
super(DockerContainersStatsService, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._queue = queue.Queue()
self._workers = []
self._config = config
self._stop_event = stop_event
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
self._containers_stats = {}
def run(self):
"""
Execute the thread
"""
for _ in (range(self._config.getint("containers_stats", "workers"))):
worker = DockerContainersStatsWorker(self._config, self._docker_client, self._queue, self._containers_stats)
worker.setDaemon(True)
self._workers.append(worker)
self._logger.info("service started")
if self._config.getint("containers_stats", "startup") > 0:
self._stop_event.wait(self._config.getint("containers_stats", "startup"))
for worker in self._workers:
worker.start()
while True:
self._execute()
if self._stop_event.wait(self._config.getint("containers_stats", "interval")):
break
self._logger.info("service stopped")
def _execute(self):
"""
Execute the service
"""
self._logger.info("sending available containers statistics metrics")
try:
metrics = []
containers = self._docker_client.containers(all=True)
for container_id in set(self._containers_stats) - set(map(lambda c: c["Id"], containers)):
del self._containers_stats[container_id]
containers_running = 0
for container in containers:
container_name = container["Names"][0][1:]
if container["Status"].startswith("Up"):
self._queue.put(container)
containers_running += 1
if container["Id"] not in self._containers_stats:
continue
container_stats = self._containers_stats[container["Id"]]["data"]
clock = self._containers_stats[container["Id"]]["clock"]
cpu = \
(container_stats["cpu_stats"]["cpu_usage"]["total_usage"] -
container_stats["precpu_stats"]["cpu_usage"]["total_usage"]) / \
(container_stats["cpu_stats"]["system_cpu_usage"] -
container_stats["precpu_stats"]["system_cpu_usage"]) * 100
cpu_system = \
(container_stats["cpu_stats"]["cpu_usage"]["usage_in_kernelmode"] -
container_stats["precpu_stats"]["cpu_usage"]["usage_in_kernelmode"]) / \
(container_stats["cpu_stats"]["system_cpu_usage"] -
container_stats["precpu_stats"]["system_cpu_usage"]) * 100
cpu_user = \
(container_stats["cpu_stats"]["cpu_usage"]["usage_in_usermode"] -
container_stats["precpu_stats"]["cpu_usage"]["usage_in_usermode"]) / \
(container_stats["cpu_stats"]["system_cpu_usage"] -
container_stats["precpu_stats"]["system_cpu_usage"]) * 100
cpu_periods = \
container_stats["cpu_stats"]["throttling_data"]["periods"] - \
container_stats["precpu_stats"]["throttling_data"]["periods"]
cpu_throttled_periods = \
container_stats["cpu_stats"]["throttling_data"]["throttled_periods"] - \
container_stats["precpu_stats"]["throttling_data"]["throttled_periods"]
cpu_throttled_time = \
container_stats["cpu_stats"]["throttling_data"]["throttled_time"] - \
container_stats["precpu_stats"]["throttling_data"]["throttled_time"]
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu[%s]" % (
container_name),
"%.2f" % cpu,
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu_system[%s]" % (
container_name),
"%.2f" % cpu_system,
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu_user[%s]" % (
container_name),
"%.2f" % cpu_user,
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu_periods[%s]" % (
container_name),
"%d" % cpu_periods,
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu_throttled_periods[%s]" % (
container_name),
"%d" % cpu_throttled_periods,
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.cpu_throttled_time[%s]" % (
container_name),
"%d" % cpu_throttled_time,
clock))
memory = \
container_stats["memory_stats"]["usage"] / container_stats["memory_stats"]["limit"] * 100
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory[%s]" % (
container_name),
"%.2f" % memory,
clock))
proc = container_stats["pids_stats"]["current"]
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.proc[%s]" % (
container_name),
"%d" % proc,
clock))
if (
"cpu_stats" in container_stats and
"cpu_usage" in container_stats["cpu_stats"] and
"percpu_usage" in container_stats["cpu_stats"]["cpu_usage"] and
isinstance(container_stats["cpu_stats"]["cpu_usage"]["percpu_usage"], int)
):
for i in range(len(container_stats["cpu_stats"]["cpu_usage"]["percpu_usage"])):
percpu = (container_stats["cpu_stats"]["cpu_usage"]["percpu_usage"][i] -
container_stats["precpu_stats"]["cpu_usage"]["percpu_usage"][i]) / \
(container_stats["cpu_stats"]["system_cpu_usage"] -
container_stats["precpu_stats"]["system_cpu_usage"]) * 100
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.percpu[%s,%d]" % (
container_name, i),
"%.2f" % percpu,
clock))
if (
"memory_stats" in container_stats and
"stats" in container_stats["memory_stats"]
):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_active_anon[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["active_anon"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_active_file[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["active_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_cache[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["cache"]),
clock))
if "dirty" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_dirty[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["dirty"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_hierarchical_memory_limit[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["hierarchical_memory_limit"]),
clock))
if "hierarchical_memsw_limit" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_hierarchical_memsw_limit[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["hierarchical_memsw_limit"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_inactive_anon[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["inactive_anon"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_inactive_file[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["inactive_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_mapped_file[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["mapped_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_pgfault[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["pgfault"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_pgmajfault[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["pgmajfault"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_pgpgin[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["pgpgin"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_pgpgout[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["pgpgout"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_rss[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["rss"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_rss_huge[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["rss_huge"]),
clock))
if "swap" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_swap[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["swap"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_unevictable[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["unevictable"]),
clock))
if "writeback" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_writeback[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["stats"]["writeback"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.max_usage[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["max_usage"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.usage[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["usage"]),
clock))
if "failcnt" in container_stats["memory_stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.failcnt[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["failcnt"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.limit[%s]" % (
container_name),
"%d" % (
container_stats["memory_stats"]["limit"]),
clock))
if containers_running == 1:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_active_anon",
"%d" % (
container_stats["memory_stats"]["stats"]["total_active_anon"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_active_file",
"%d" % (
container_stats["memory_stats"]["stats"]["total_active_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_cache",
"%d" % (
container_stats["memory_stats"]["stats"]["total_cache"]),
clock))
if "total_dirty" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_dirty",
"%d" % (
container_stats["memory_stats"]["stats"]["total_dirty"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_inactive_anon",
"%d" % (
container_stats["memory_stats"]["stats"]["total_inactive_anon"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_inactive_file",
"%d" % (
container_stats["memory_stats"]["stats"]["total_inactive_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_mapped_file",
"%d" % (
container_stats["memory_stats"]["stats"]["total_mapped_file"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_pgfault",
"%d" % (
container_stats["memory_stats"]["stats"]["total_pgfault"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_pgmajfault",
"%d" % (
container_stats["memory_stats"]["stats"]["total_pgmajfault"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_pgpgout",
"%d" % (
container_stats["memory_stats"]["stats"]["total_pgpgout"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_pgpgin",
"%d" % (
container_stats["memory_stats"]["stats"]["total_pgpgin"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_rss",
"%d" % (
container_stats["memory_stats"]["stats"]["total_rss"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_rss_huge",
"%d" % (
container_stats["memory_stats"]["stats"]["total_rss_huge"]),
clock))
if "total_swap" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_swap",
"%d" % (
container_stats["memory_stats"]["stats"]["total_swap"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_unevictable",
"%d" % (
container_stats["memory_stats"]["stats"]["total_unevictable"]),
clock))
if "total_writeback" in container_stats["memory_stats"]["stats"]:
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.memory_stats.stats_total_writeback",
"%d" % (
container_stats["memory_stats"]["stats"]["total_writeback"]),
clock))
if 'networks' in container_stats:
for container_stats_network_ifname in list(container_stats["networks"].keys()):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.rx_bytes[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["rx_bytes"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.rx_dropped[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["rx_dropped"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.rx_errors[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["rx_errors"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.rx_packets[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["rx_packets"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.tx_bytes[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["tx_bytes"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.tx_dropped[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["tx_dropped"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.tx_errors[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["tx_errors"]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.networks.tx_packets[%s,%s]" % (
container_name,
container_stats_network_ifname),
"%d" % (
container_stats["networks"][container_stats_network_ifname]["tx_packets"]),
clock))
if (
"blkio_stats" in container_stats and
"io_serviced_recursive" in container_stats["blkio_stats"] and
"io_service_bytes_recursive" in container_stats["blkio_stats"] and
isinstance(container_stats["blkio_stats"]["io_serviced_recursive"], int) and
isinstance(container_stats["blkio_stats"]["io_service_bytes_recursive"], int)
):
for i in range(len(container_stats["blkio_stats"]["io_serviced_recursive"])):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.blkio_stats.io[%s,%d,%d,%s]" % (
container_name,
container_stats["blkio_stats"]["io_serviced_recursive"][i]["major"],
container_stats["blkio_stats"]["io_serviced_recursive"][i]["minor"],
container_stats["blkio_stats"]["io_serviced_recursive"][i]["op"]),
"%d" % (
container_stats["blkio_stats"]["io_serviced_recursive"][i]["value"]),
clock))
for i in range(len(container_stats["blkio_stats"]["io_service_bytes_recursive"])):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.stats.blkio_stats.io_bytes[%s,%d,%d,%s]" % (
container_name,
container_stats["blkio_stats"]["io_service_bytes_recursive"][i]["major"],
container_stats["blkio_stats"]["io_service_bytes_recursive"][i]["minor"],
container_stats["blkio_stats"]["io_service_bytes_recursive"][i]["op"]),
"%d" % (
container_stats["blkio_stats"]["io_service_bytes_recursive"][i]["value"]),
clock))
if len(metrics) > 0:
self._logger.debug("sending %d metrics" % len(metrics))
self._zabbix_sender.send(metrics)
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to send containers statistics metrics")
class DockerContainersStatsWorker(threading.Thread):
""" This class implements a containers stats worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient,
containers_stats_queue: queue.Queue, containers_stats: dict):
"""
Initialize the instance
:param config: the configuration parser
:param docker_client: the docker client
:param containers_stats_queue: the container stats queue
:param containers_stats: the container stats data
"""
super(DockerContainersStatsWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._containers_stats_queue = containers_stats_queue
self._containers_stats = containers_stats
def run(self):
"""
Execute the thread
"""
while True:
self._logger.debug("waiting execution queue")
container = self._containers_stats_queue.get()
if container is None:
break
self._logger.info("querying statistics metrics for container %s" % container["Id"])
try:
data = self._docker_client.stats(container, decode=False, stream=False)
self._containers_stats[container["Id"]] = {
"data": data,
"clock": int(time.time())
}
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to get statistics metrics for container %s" % container["Id"])
class DockerContainersTopService(threading.Thread):
""" This class implements a service which sends containers top metrics """
def __init__(self, config: configparser.ConfigParser, stop_event: threading.Event, docker_client: docker.APIClient,
zabbix_sender: ZabbixSender):
"""
Initialize the instance
:param config: the configuration parser
:param stop_event: the event to stop execution
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
"""
super(DockerContainersTopService, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._workers = []
self._queue = queue.Queue()
self._config = config
self._stop_event = stop_event
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
self._containers_top = {}
def run(self):
"""
Execute the thread
"""
for _ in (range(self._config.getint("containers_top", "workers"))):
worker = DockerContainersTopWorker(self._config, self._docker_client, self._queue, self._containers_top)
worker.setDaemon(True)
self._workers.append(worker)
self._logger.info("service started")
if self._config.getint("containers_top", "startup") > 0:
self._stop_event.wait(self._config.getint("containers_top", "startup"))
for worker in self._workers:
worker.start()
while True:
self._execute()
if self._stop_event.wait(self._config.getint("containers_top", "interval")):
break
self._logger.info("service stopped")
def _execute(self):
"""
Execute the service
"""
self._logger.info("sending available containers top metrics")
try:
metrics = []
containers = self._docker_client.containers()
for container_id in set(self._containers_top) - set(map(lambda c: c["Id"], containers)):
del self._containers_top[container_id]
for container in containers:
container_name = container["Names"][0][1:]
if container["Status"].startswith("Up"):
self._queue.put(container)
if container["Id"] not in self._containers_top:
continue
container_top = self._containers_top[container["Id"]]["data"]
clock = self._containers_top[container["Id"]]["clock"]
if not isinstance(container_top["Processes"], int):
continue
for i in range(len(container_top["Processes"])):
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.top.cpu[%s,%s]" % (
container_name,
container_top["Processes"][i][1]),
"%s" % (container_top["Processes"][i][2]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.top.mem[%s,%s]" % (
container_name,
container_top["Processes"][i][1]),
"%s" % (container_top["Processes"][i][3]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.top.vsz[%s,%s]" % (
container_name,
container_top["Processes"][i][1]),
"%s" % (container_top["Processes"][i][4]),
clock))
metrics.append(
ZabbixMetric(
self._config.get("zabbix", "hostname"),
"docker.containers.top.rss[%s,%s]" % (
container_name,
container_top["Processes"][i][1]),
"%s" % (container_top["Processes"][i][5]),
clock))
if len(metrics) > 0:
self._logger.debug("sending %d metrics" % len(metrics))
self._zabbix_sender.send(metrics)
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to send containers top metrics")
class DockerContainersTopWorker(threading.Thread):
""" This class implements a containers top worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient,
containers_top_queue: queue.Queue, containers_top: dict):
"""
Initialize the instance
:param config: the configuration parser
:param docker_client: the docker client
:param containers_top_queue: the containers top queue
:param containers_top: the containers top data
"""
super(DockerContainersTopWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._containers_top_queue = containers_top_queue
self._containers_top = containers_top
def run(self):
"""
Execute the thread
"""
while True:
self._logger.debug("waiting execution queue")
container = self._containers_top_queue.get()
if container is None:
break
self._logger.info("querying top metrics for container %s" % container["Id"])
try:
data = self._docker_client.top(container, "aux")
self._containers_top[container["Id"]] = {
"data": data,
"clock": int(time.time())
}
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to get top metrics for container %s" % container["Id"])
class DockerContainersRemoteService(threading.Thread):
""" This class implements a service which execute remote commands to send custom containers metrics """
def __init__(self, config: configparser.ConfigParser, stop_event: threading.Event, docker_client: docker.APIClient,
zabbix_sender: ZabbixSender):
"""
Initialize the instance
:param config: the configuration parser
:param stop_event: the event to stop execution
:param docker_client: the docker client
:param zabbix_sender: the zabbix sender
"""
super(DockerContainersRemoteService, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._workers = []
self._queue = queue.Queue()
self._config = config
self._stop_event = stop_event
self._docker_client = docker_client
self._zabbix_sender = zabbix_sender
self._containers_outputs = {}
self._counter = 0
self._lock = threading.Lock()
def run(self):
"""
Execute the thread
"""
for _ in (range(self._config.getint("containers_remote", "workers"))):
worker = DockerContainersRemoteWorker(self._config, self._docker_client, self, self._queue,
self._containers_outputs)
worker.setDaemon(True)
self._workers.append(worker)
self._logger.info("service started")
if self._config.getint("containers_remote", "startup") > 0:
self._stop_event.wait(self._config.getint("containers_remote", "startup"))
for worker in self._workers:
worker.start()
while True:
self._execute()
if self._stop_event.wait(self._config.getint("containers_remote", "interval")):
break
self._logger.info("service stopped")
def _execute(self):
"""
Execute the service
"""
with self._lock:
if self._counter > self._config.getint("containers_remote", "interval"):
self._counter = 0
self._counter += 1
self._logger.info("sending available containers trappers metrics")
try:
metrics = []
containers = self._docker_client.containers()
for container_id in set(self._containers_outputs) - set(map(lambda c: c["Id"], containers)):
del self._containers_outputs[container_id]
for container in containers:
if container["Status"].startswith("Up"):
self._queue.put(container)
if container["Id"] not in self._containers_outputs:
continue
container_output = self._containers_outputs[container["Id"]]["data"]
clock = self._containers_outputs[container["Id"]]["clock"]
if self._config.getboolean("containers_remote", "trappers") is False:
continue
for line in container_output.splitlines():
if self._config.getboolean("containers_remote", "trappers_timestamp"):
m = re.match(r'^([^\s]+) (([^\s\[]+)(?:\[([^\s]+)\])?) '
r'(\d+) (?:"?((?:\\.|[^"])+)"?)$', line)
if m is None:
continue
hostname = self._config.get("zabbix", "hostname") if m.group(1) == "-" \
else m.group(1)
key = m.group(2)
timestamp = int(m.group(5)) if m.group(5) == int(m.group(5)) else clock
value = re.sub(r'\\(.)', "\\1", m.group(6))
metrics.append(ZabbixMetric(hostname, key, value, timestamp))
else:
m = re.match(r'^([^\s]+) (([^\s\[]+)(?:\[([^\s]+)\])?) '
r'(?:"?((?:\\.|[^"])+)"?)$', line)
if m is None:
continue
hostname = self._config.get("zabbix", "hostname") if m.group(1) == "-" \
else m.group(1)
key = m.group(2)
timestamp = clock
value = re.sub(r'\\(.)', "\\1", m.group(5))
metrics.append(ZabbixMetric(hostname, key, value, timestamp))
if len(metrics) > 0:
self._logger.debug("sending %d metrics" % len(metrics))
self._zabbix_sender.send(metrics)
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to send containers trappers metrics")
pass
class DockerContainersRemoteWorker(threading.Thread):
""" This class implements a containers remote worker thread """
def __init__(self, config: configparser.ConfigParser, docker_client: docker.APIClient,
containers_remote_service: DockerContainersRemoteService, containers_remote_queue: queue.Queue,
containers_outputs: dict):
"""
Initialize the instance
:param config: the configuration parser
:param docker_client: the docker client
:param containers_remote_service: the containers remote service
:param containers_remote_queue: the containers remote queue
:param containers_outputs: the containers outputs data
"""
super(DockerContainersRemoteWorker, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self._docker_client = docker_client
self._containers_remote_service = containers_remote_service
self._containers_remote_queue = containers_remote_queue
self._containers_outputs = containers_outputs
def run(self):
"""
Execute the thread
"""
while True:
self._logger.debug("waiting execution queue")
container = self._containers_remote_queue.get()
if container is None:
break
self._logger.info("executing remote command(s) in container %s" % container["Id"])
paths = self._config.get("containers_remote", "path").split(os.pathsep)
delays = self._config.get("containers_remote", "delay").split(os.pathsep)
for index, path in enumerate(paths):
delay = min(int(delays[index]) if ((len(delays) > index) and (int(delays[index]) > 0)) else 1,
int(self._config.get("containers_remote", "interval")))
if self._containers_remote_service.counter() % delay != 0:
self._logger.debug("command is delayed to next execution")
continue
try:
execution = self._docker_client.exec_create(
container,
"/bin/sh -c \"stat %s >/dev/null 2>&1 && /usr/bin/find %s -type f -maxdepth 1 -perm /700"
" -exec {} \\; || /bin/true\"" % (path, path),
stderr=True,
tty=True, user=self._config.get("containers_remote", "user"))
data = self._docker_client.exec_start(execution["Id"])
inspect = self._docker_client.exec_inspect(execution["Id"])
if inspect["ExitCode"] == 0:
self._containers_outputs[container["Id"]] = {
"data": str(data, 'utf-8'),
"clock": int(time.time())
}
except (IOError, OSError, LookupError, ValueError):
self._logger.error("failed to execute remote command in container %s" % container["Id"])
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
4566,
48610,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
16834,
198,
11748,
302,
198,
11748,
4704,
278,
198,
11748,
640,
198,
198,
11748,
36253,
198,
198,
6738,
1976,
6485,
844,
... | 1.708835 | 33,335 |
# -*- coding: utf-8 -*-
from pylab import imshow,cm,colorbar,hot,show,xlabel,ylabel,connect, plot, figure, draw, axis, gcf,legend
from numpy import ones, sum, arange, transpose, log
import matplotlib.colors as colors
from matplotlib.widgets import RectangleSelector
from colormap import change_colormap
from matplotlib.axis import XAxis, YAxis
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.font_manager import fontManager, FontProperties
import wx
from matplotlib.image import FigureImage
from matplotlib.figure import Figure
from matplotlib.pyplot import figure, get_fignums
from zoom_colorbar import zoom_colorbar
from osrefl.loaders.reduction.cmapmenu import CMapMenu
from matplotlib.cm import get_cmap
import matplotlib.cbook as cbook
import matplotlib
#from binned_data_class3 import plottable_2d_data
#from wxrebin3 import rebinned_data
#import __main__
class MyNavigationToolbar(NavigationToolbar2WxAgg):
"""
Extend the default wx toolbar with your own event handlers
"""
SET_SLICEMODE = wx.NewId()
def set_slicemode(self, *args):
'activate slice to rectangle mode'
if self._slicemode:
self._slicemode = False
self.data_frame_instance.sliceplots_off()
else:
self._slicemode = True
self.data_frame_instance.sliceplots_on()
class plot_2d_data(wx.Frame):
"""Generic 2d plotting routine - inputs are:
- data (2d array of values),
- x and y extent of the data,
- title of graph, and
- pixel mask to be used during summation - must have same dimensions as data
(only data entries corresponding to nonzero values in pixel_mask will be summed)
- plot_title, x_label and y_label are added to the 2d-plot as you might expect"""
def toggle_sliceplots(self):
"""switch between views with and without slice plots"""
if self.show_sliceplots == True:
self.sliceplots_off()
else: # self.show_sliceplots == False
self.sliceplots_on()
def show_slice_overlay(self, x_range, y_range, x, slice_y_data, y, slice_x_data):
"""sum along x and z within the box defined by qX- and qZrange.
sum along qx is plotted to the right of the data,
sum along qz is plotted below the data.
Transparent white rectangle is overlaid on data to show summing region"""
from matplotlib.ticker import FormatStrFormatter, ScalarFormatter
if self.fig == None:
print('No figure for this dataset is available')
return
fig = self.fig
ax = fig.ax
extent = fig.im.get_extent()
if fig.slice_overlay == None:
fig.slice_overlay = ax.fill([x_range[0],x_range[1],x_range[1],x_range[0]],[y_range[0],y_range[0],y_range[1],y_range[1]],fc='white', alpha=0.3)
fig.ax.set_ylim(extent[2],extent[3])
else:
fig.slice_overlay[0].xy = [(x_range[0],y_range[0]), (x_range[1],y_range[0]), (x_range[1],y_range[1]), (x_range[0],y_range[1])]
fig.sz.clear()
default_fmt = ScalarFormatter(useMathText=True)
default_fmt.set_powerlimits((-2,4))
fig.sz.xaxis.set_major_formatter(default_fmt)
fig.sz.yaxis.set_major_formatter(default_fmt)
fig.sz.xaxis.set_major_formatter(FormatStrFormatter('%.2g'))
fig.sz.set_xlim(x[0], x[-1])
fig.sz.plot(x, slice_y_data)
fig.sx.clear()
fig.sx.yaxis.set_major_formatter(default_fmt)
fig.sx.xaxis.set_major_formatter(default_fmt)
fig.sx.yaxis.set_ticks_position('right')
fig.sx.yaxis.set_major_formatter(FormatStrFormatter('%.2g'))
fig.sx.set_ylim(y[0], y[-1])
fig.sx.plot(slice_x_data, y)
fig.im.set_extent(extent)
fig.canvas.draw()
def sliceplot(self, xy_range, ax = None):
"""sum along x and z within the box defined by qX- and qZrange.
sum along qx is plotted to the right of the data,
sum along qz is plotted below the data.
Transparent white rectangle is overlaid on data to show summing region"""
self.sliceplots_on()
x_range, y_range = xy_range
x, slice_y_data, y, slice_x_data = self.do_xy_slice(x_range, y_range)
self.x = x
self.slice_y_data = slice_y_data
self.y = y
self.slice_x_data = slice_x_data
self.slice_xy_range = xy_range
self.show_slice_overlay(x_range, y_range, x, slice_y_data, y, slice_x_data)
def do_xy_slice(self, x_range, y_range):
""" slice up the data, once along x and once along z.
returns 4 arrays: a y-axis for the x data,
an x-axis for the y data."""
#params = self.params
print 'doing xy slice'
data = self.data
pixels = self.pixel_mask
# zero out any pixels in the sum that have zero in the pixel count:
data[pixels == 0] = 0
normalization_matrix = ones(data.shape)
normalization_matrix[pixels == 0] = 0
x_min = min(x_range)
x_max = max(x_range)
y_min = min(y_range)
y_max = max(y_range)
x_size,y_size = data.shape
global_x_range = (self.x_max - self.x_min)
global_y_range = (self.y_max - self.y_min)
x_pixel_min = round( (x_min - self.x_min) / global_x_range * x_size )
x_pixel_max = round( (x_max - self.x_min) / global_x_range * x_size )
y_pixel_min = round( (y_min - self.y_min) / global_y_range * y_size )
y_pixel_max = round( (y_max - self.y_min) / global_y_range * y_size )
#correct any sign switches:
if (x_pixel_min > x_pixel_max):
new_min = x_pixel_max
x_pixel_max = x_pixel_min
x_pixel_min = new_min
if (y_pixel_min > y_pixel_max):
new_min = y_pixel_max
y_pixel_max = y_pixel_min
y_pixel_min = new_min
new_x_min = x_pixel_min / x_size * global_x_range + self.x_min
new_x_max = x_pixel_max / x_size * global_x_range + self.x_min
new_y_min = y_pixel_min / y_size * global_y_range + self.y_min
new_y_max = y_pixel_max / y_size * global_y_range + self.y_min
x_pixel_min = int(x_pixel_min)
x_pixel_max = int(x_pixel_max)
y_pixel_min = int(y_pixel_min)
y_pixel_max = int(y_pixel_max)
y_norm_factor = sum(normalization_matrix[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=1)
x_norm_factor = sum(normalization_matrix[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=0)
# make sure the normalization has a minimum value of 1 everywhere,
# to avoid divide by zero errors:
y_norm_factor[y_norm_factor == 0] = 1
x_norm_factor[x_norm_factor == 0] = 1
slice_y_data = sum(data[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=1) / y_norm_factor
slice_x_data = sum(data[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=0) / x_norm_factor
#slice_y_data = slice_y_data
#slice_x_data = slice_x_data
x_vals = arange(slice_y_data.shape[0], dtype = 'float') / slice_y_data.shape[0] * (new_x_max - new_x_min) + new_x_min
y_vals = arange(slice_x_data.shape[0], dtype = 'float') / slice_x_data.shape[0] * (new_y_max - new_y_min) + new_y_min
return x_vals, slice_y_data, y_vals, slice_x_data
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
279,
2645,
397,
1330,
545,
12860,
11,
11215,
11,
8043,
5657,
11,
8940,
11,
12860,
11,
87,
18242,
11,
2645,
9608,
11,
8443,
11,
7110,
11,
3785,
11,
3197,
11,
16... | 2.204606 | 3,387 |
import unittest
from storages import webcontent
| [
11748,
555,
715,
395,
198,
198,
6738,
336,
273,
1095,
1330,
3992,
11299,
628,
628,
628
] | 3.375 | 16 |
soma = lambda x, y: x + y
div = lambda x, y: x / y
sub = lambda x, y: x - y
mult = lambda x, y: x * y
a, b = int(input('Número: ')), int(input('Número: '))
print(''' 1 - Soma
2 - Subtração
3 - Divisão
4 - Multiplicação
''')
resp = int(input('Digite um operação: '))
if resp == 1:
print(f'Soma: {soma(a, b)}')
elif resp == 2:
print(f'Subtração: {sub(a, b)}')
elif resp == 3:
print(f'Divisão: {div(a, b):.1f}')
elif resp == 4:
print(f'Multiplicação: {mult(a, b)}')
else:
print('Operação inválida!')
| [
82,
6086,
796,
37456,
2124,
11,
331,
25,
2124,
1343,
331,
198,
7146,
796,
37456,
2124,
11,
331,
25,
2124,
1220,
331,
198,
7266,
796,
37456,
2124,
11,
331,
25,
2124,
532,
331,
198,
16680,
796,
37456,
2124,
11,
331,
25,
2124,
1635,
... | 2.035294 | 255 |
from hello import hello
hello("Cython")
| [
6738,
23748,
1330,
23748,
198,
198,
31373,
7203,
34,
7535,
4943,
198
] | 3.416667 | 12 |
# Generated by Django 3.1.5 on 2021-05-01 02:54
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2713,
12,
486,
7816,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import math | [
11748,
10688
] | 5.5 | 2 |
# -*- coding: utf-8 -*-
"""hypertext transfer protocol (HTTP/2)
:mod:`pcapkit.protocols.application.httpv2` contains
:class:`~pcapkit.protocols.application.httpv2.HTTPv2`
only, which implements extractor for Hypertext Transfer
Protocol (HTTP/2) [*]_, whose structure is described as
below:
======= ========= ===================== ==========================
Octets Bits Name Description
======= ========= ===================== ==========================
0 0 ``http.length`` Length
3 24 ``http.type`` Type
4 32 ``http.flags`` Flags
5 40 Reserved
5 41 ``http.sid`` Stream Identifier
9 72 ``http.payload`` Frame Payload
======= ========= ===================== ==========================
.. [*] https://en.wikipedia.org/wiki/HTTP/2
"""
import collections
from typing import TYPE_CHECKING
from pcapkit.const.http.error_code import ErrorCode as RegType_ErrorCode
from pcapkit.const.http.frame import Frame as RegType_Frame
from pcapkit.const.http.setting import Setting as RegType_Setting
from pcapkit.corekit.multidict import OrderedMultiDict
from pcapkit.protocols.application.http import HTTP
from pcapkit.protocols.data.application.httpv2 import HTTP as DataType_HTTP
from pcapkit.protocols.data.application.httpv2 import \
ContinuationFrame as DataType_ContinuationFrame
from pcapkit.protocols.data.application.httpv2 import \
ContinuationFrameFlags as DataType_ContinuationFrameFlags
from pcapkit.protocols.data.application.httpv2 import DataFrame as DataType_DataFrame
from pcapkit.protocols.data.application.httpv2 import DataFrameFlags as DataType_DataFrameFlags
from pcapkit.protocols.data.application.httpv2 import GoawayFrame as DataType_GoawayFrame
from pcapkit.protocols.data.application.httpv2 import HeadersFrame as DataType_HeadersFrame
from pcapkit.protocols.data.application.httpv2 import \
HeadersFrameFlags as DataType_HeadersFrameFlags
from pcapkit.protocols.data.application.httpv2 import PingFrame as DataType_PingFrame
from pcapkit.protocols.data.application.httpv2 import PingFrameFlags as DataType_PingFrameFlags
from pcapkit.protocols.data.application.httpv2 import PriorityFrame as DataType_PriorityFrame
from pcapkit.protocols.data.application.httpv2 import PushPromiseFrame as DataType_PushPromiseFrame
from pcapkit.protocols.data.application.httpv2 import \
PushPromiseFrameFlags as DataType_PushPromiseFrameFlags
from pcapkit.protocols.data.application.httpv2 import RstStreamFrame as DataType_RstStreamFrame
from pcapkit.protocols.data.application.httpv2 import SettingsFrame as DataType_SettingsFrame
from pcapkit.protocols.data.application.httpv2 import \
SettingsFrameFlags as DataType_SettingsFrameFlags
from pcapkit.protocols.data.application.httpv2 import UnassignedFrame as DataType_UnassignedFrame
from pcapkit.protocols.data.application.httpv2 import \
WindowUpdateFrame as DataType_WindowUpdateFrame
from pcapkit.utilities.exceptions import ProtocolError
if TYPE_CHECKING:
from typing import Any, Callable, DefaultDict, NoReturn, Optional
from typing_extensions import Literal
FrameParser = Callable[['HTTPv2', RegType_Frame, int, str, int], DataType_HTTP]
__all__ = ['HTTPv2']
class HTTPv2(HTTP):
"""This class implements Hypertext Transfer Protocol (HTTP/2)."""
#: Parsed packet data.
_info: 'DataType_HTTP'
##########################################################################
# Defaults.
##########################################################################
#: DefaultDict[RegType_Frame, str | FrameParser]: Frame code to method
#: mapping, c.f. :meth:`read`. Method names are expected to be referred to
#: the class by ``_read_http_${name}``, and if such name not found, the
#: value should then be a method that can parse the frame by itself.
__frame__ = collections.defaultdict(
lambda: 'none',
{
RegType_Frame.DATA: 'data', # DATA
RegType_Frame.HEADERS: 'headers', # HEADERS
RegType_Frame.PRIORITY: 'priority', # PRIORITY
RegType_Frame.RST_STREAM: 'rst_stream', # RST_STREAM
RegType_Frame.SETTINGS: 'settings', # SETTINGS
RegType_Frame.PUSH_PROMISE: 'push_promise', # PUSH_PROMISE
RegType_Frame.PING: 'ping', # PING
RegType_Frame.GOAWAY: 'goaway', # GOAWAY
RegType_Frame.WINDOW_UPDATE: 'window_update', # WINDOW_UPDATE
RegType_Frame.CONTINUATION: 'continuation', # CONTINUATION
},
) # type: DefaultDict[int, str | FrameParser]
##########################################################################
# Properties.
##########################################################################
@property
def alias(self) -> 'Literal["HTTP/2"]':
"""Acronym of current protocol."""
return 'HTTP/2'
@property
def length(self) -> 'Literal[9]':
"""Header length of current protocol."""
return 9
##########################################################################
# Methods.
##########################################################################
def read(self, length: 'Optional[int]' = None, **kwargs: 'Any') -> 'DataType_HTTP':
"""Read Hypertext Transfer Protocol (HTTP/2).
Structure of HTTP/2 packet [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+=+=============================================================+
| Frame Payload (0...) ...
+---------------------------------------------------------------+
Args:
length: Length of packet data.
Keyword Args:
**kwargs: Arbitrary keyword arguments.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length is None:
length = len(self)
if length < 9:
raise ProtocolError('HTTP/2: invalid format', quiet=True)
_tlen = self._read_unpack(3)
_type = self._read_unpack(1)
_flag = self._read_binary(1)
_rsid = self._read_binary(4)
if _tlen != length:
raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True)
http_type = RegType_Frame.get(_type)
http_sid = int(_rsid[1:], base=2)
if http_type in (RegType_Frame.SETTINGS, RegType_Frame.PING) and http_sid != 0:
raise ProtocolError(f'HTTP/2: [Type {_type}] invalid format', quiet=True)
name = self.__frame__[http_type] # type: str | FrameParser
if isinstance(name, str):
meth_name = f'_read_http_{name}'
meth = getattr(self, meth_name, self._read_http_none) # type: Callable[[RegType_Frame, int, str, int], DataType_HTTP]
http = meth(http_type, length, _flag, http_sid)
else:
http = name(self, http_type, length, _flag, http_sid)
return http
def make(self, **kwargs: 'Any') -> 'NoReturn':
"""Make (construct) packet data.
Keyword Args:
**kwargs: Arbitrary keyword arguments.
Returns:
Constructed packet data.
"""
raise NotImplementedError
@classmethod
def id(cls) -> 'tuple[Literal["HTTPv2"]]': # type: ignore[override]
"""Index ID of the protocol.
Returns:
Index ID of the protocol.
"""
return (cls.__name__,) # type: ignore[return-value]
@classmethod
def register_frame(cls, code: 'RegType_Frame', meth: 'str | FrameParser') -> 'None':
"""Register a frame parser.
Args:
code: HTTP frame type code.
meth: Method name or callable to parse the frame.
"""
cls.__frame__[code] = meth
##########################################################################
# Data models.
##########################################################################
def __length_hint__(self) -> 'Literal[9]':
"""Total length of corresponding protocol."""
return 9
##########################################################################
# Utilities.
##########################################################################
def _read_http_none(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_UnassignedFrame':
"""Read HTTP packet with unassigned type.
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if any((int(bit, base=2) for bit in flags)):
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
data = DataType_UnassignedFrame(
length=length,
type=frame,
flags=None,
sid=sid,
data=self._read_fileng(length - 9) or None,
)
return data
def _read_http_data(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_DataFrame':
"""Read HTTP/2 ``DATA`` frames.
Structure of HTTP/2 ``DATA`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+---------------+-----------------------------------------------+
| Data (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_DataFrameFlags(
END_STREAM=bool(int(flags[0], base=2)), # bit 0
PADDED=bool(int(flags[3], base=2)), # bit 3
)
if _flag.PADDED:
_plen = self._read_unpack(1)
else:
_plen = 0
if _plen > length - 10:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
if _flag.PADDED:
_dlen = length - _plen - 1
else:
_dlen = length - _plen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_data = self._read_fileng(_dlen)
_pads = self._read_binary(_plen)
data = DataType_DataFrame(
length=length,
type=frame,
flags=_flag,
pad_len=_plen,
sid=sid,
data=_data,
)
return data
def _read_http_headers(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_HeadersFrame':
"""Read HTTP/2 ``HEADERS`` frames.
Structure of HTTP/2 ``HEADERS`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_HeadersFrameFlags(
END_STREAM=bool(int(flags[0], base=2)), # bit 0
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
PADDED=bool(int(flags[3], base=2)), # bit 3
PRIORITY=bool(int(flags[5], base=2)), # bit 5
)
if _flag.PRIORITY:
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
_elen = 5
_excl = bool(int(_edep[0], base=2))
_deps = int(_edep[1:], base=2)
else:
_edep = _wght = _excl = _deps = None # type: ignore[assignment]
_elen = 0
if _flag.PADDED:
_plen = self._read_unpack(1)
_dlen = length - _plen - _elen - 1
else:
_plen = 0
_dlen = length - _plen - _elen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_frag = self._read_fileng(_dlen) or None
_pads = self._read_binary(_plen)
data = DataType_HeadersFrame(
length=length,
type=frame,
flags=_flag,
pad_len=_plen,
sid=sid,
excl_dependency=_excl,
stream_dependency=_deps,
weight=_wght,
fragment=_frag,
)
return data
def _read_http_priority(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PriorityFrame':
"""Read HTTP/2 ``PRIORITY`` frames.
Structure of HTTP/2 ``PRIORITY`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------------------------------------------------------+
|E| Stream Dependency (31) |
+-+-------------+-----------------------------------------------+
| Weight (8) |
+-+-------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 9:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
data = DataType_PriorityFrame(
length=length,
type=frame,
flags=None,
sid=sid,
excl_dependency=bool(int(_edep[0], base=2)),
stream_dependency=int(_edep[1:], base=2),
weight=_wght + 1,
)
return data
def _read_http_rst_stream(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_RstStreamFrame':
"""Read HTTP/2 ``RST_STREAM`` frames.
Structure of HTTP/2 ``RST_STREAM`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_code = self._read_unpack(4)
data = DataType_RstStreamFrame(
length=length,
type=frame,
flags=None,
sid=sid,
error=RegType_ErrorCode.get(_code, _code),
)
return data
def _read_http_settings(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_SettingsFrame':
"""Read HTTP/2 ``SETTINGS`` frames.
Structure of HTTP/2 ``SETTINGS`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Identifier (16) |
+-------------------------------+-------------------------------+
| Value (32) |
+---------------------------------------------------------------+
| ...... |
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length % 6 != 0 or sid != 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_SettingsFrameFlags(
ACK=bool(int(flags[0], base=2)), # bit 0
)
if _flag.ACK and length != 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_sets = OrderedMultiDict() # type: OrderedMultiDict[RegType_Setting, int]
for _ in range(length // 6):
_stid = self._read_unpack(2)
_pval = self._read_unpack(4)
_pkey = RegType_Setting.get(_stid)
_sets.add(_pkey, _pval)
data = DataType_SettingsFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
settings=_sets,
)
return data
def _read_http_push_promise(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PushPromiseFrame':
"""Read HTTP/2 ``PUSH_PROMISE`` frames.
Structure of HTTP/2 ``PUSH_PROMISE`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|R| Promised Stream ID (31) |
+-+-----------------------------+-------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length < 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_PushPromiseFrameFlags(
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
PADDED=bool(int(flags[3], base=2)), # bit 3
)
if _flag.PADDED:
_plen = self._read_unpack(1)
_dlen = length - _plen - 5
else:
_plen = 0
_dlen = length - _plen - 4
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_rpid = self._read_binary(4)
_frag = self._read_fileng(_dlen) or None
_pads = self._read_binary(_plen)
data = DataType_PushPromiseFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
pad_len=_plen,
promised_sid=int(_rpid[1:], base=2),
fragment=_frag,
)
return data
def _read_http_ping(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_PingFrame':
"""Read HTTP/2 ``PING`` frames.
Structure of HTTP/2 ``PING`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| |
| Opaque Data (64) |
| |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 8:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_flag = DataType_PingFrameFlags(
ACK=bool(int(flags[0], base=2)), # bit 0
)
_data = self._read_fileng(8)
data = DataType_PingFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
data=_data,
)
return data
def _read_http_goaway(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_GoawayFrame':
"""Read HTTP/2 ``GOAWAY`` frames.
Structure of HTTP/2 ``GOAWAY`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_dlen = length - 8
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_rsid = self._read_binary(4)
_code = self._read_unpack(4)
_data = self._read_fileng(_dlen) or None
data = DataType_GoawayFrame(
length=length,
type=frame,
flags=None,
sid=sid,
last_sid=int(_rsid[1:], base=2),
error=RegType_ErrorCode.get(_code),
debug_data=_data,
)
return data
def _read_http_window_update(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_WindowUpdateFrame':
"""Read HTTP/2 ``WINDOW_UPDATE`` frames.
Structure of HTTP/2 ``WINDOW_UPDATE`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Window Size Increment (31) |
+-+-------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
if length != 4:
raise ProtocolError(f'HTTP/2: [Type {frame}] invalid format', quiet=True)
_size = self._read_binary(4)
data = DataType_WindowUpdateFrame(
length=length,
type=frame,
flags=None,
sid=sid,
increment=int(_size[1:], base=2),
)
return data
def _read_http_continuation(self, frame: 'RegType_Frame', length: 'int', flags: 'str', sid: 'int') -> 'DataType_ContinuationFrame':
"""Read HTTP/2 ``CONTINUATION`` frames.
Structure of HTTP/2 ``CONTINUATION`` frame [:rfc:`7540`]:
.. code-block:: text
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
Args:
frame: Frame type.
length: Length of packet data.
flags: Flags of the frame.
sid: Stream ID.
Returns:
Parsed packet data.
Raises:
ProtocolError: If the packet is malformed.
"""
_flag = DataType_ContinuationFrameFlags(
END_HEADERS=bool(int(flags[2], base=2)), # bit 2
)
_frag = self._read_fileng(length) or None
data = DataType_ContinuationFrame(
length=length,
type=frame,
flags=_flag,
sid=sid,
fragment=_frag,
)
return data
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
49229,
5239,
4351,
8435,
357,
40717,
14,
17,
8,
198,
198,
25,
4666,
25,
63,
79,
11128,
15813,
13,
11235,
4668,
82,
13,
31438,
13,
4023,
85,
17,
63,
4909,
198,... | 2.148029 | 14,031 |
import numpy as np
from scipy import linalg
from scipy.sparse import csr_matrix
from scipy.linalg import svd, eigvals
from pyamg.util.linalg import approximate_spectral_radius,\
infinity_norm, norm, condest, cond,\
ishermitian, pinv_array
from pyamg import gallery
from numpy.testing import TestCase, assert_almost_equal, assert_equal,\
assert_array_almost_equal
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
300,
1292,
70,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
269,
27891,
62,
6759,
8609,
198,
6738,
629,
541,
88,
13,
75,
1292,
70,
1330,
264,
20306,
11,
304,
328,
1278... | 2.773723 | 137 |
# -*- coding: UTF8 -*-
"""
TODO:
- Fix arguments of find_contact()
- Implement PING protocol
"""
import socket
import random
import logging
from unittest import mock
from typing import Tuple, Optional
from json.decoder import JSONDecodeError
from .node import Node
from .config import Config
from .request import Request
from .requests import Requests
from .encryption import Encryption
from .message import Message, OwnMessage
from .contact import Contact, OwnContact, Beacon
from .utils import decode_json, get_primary_local_ip_address, get_timestamp
from .validation import is_valid_received_message, is_valid_request, is_valid_contact, verify_received_aes_key
| [
2,
532,
9,
12,
19617,
25,
41002,
23,
532,
9,
12,
201,
198,
201,
198,
37811,
201,
198,
201,
198,
51,
3727,
46,
25,
201,
198,
12,
13268,
7159,
286,
1064,
62,
32057,
3419,
201,
198,
12,
48282,
350,
2751,
8435,
201,
198,
201,
198,
... | 3.221198 | 217 |
import json
from .exceptions import ParseError
import six
class JSONParser(Parser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
def parse(self, data, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', "utf-8")
if not data:
return
try:
data = data.decode(encoding)
return json.loads(data)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
| [
11748,
33918,
198,
6738,
764,
1069,
11755,
1330,
2547,
325,
12331,
198,
11748,
2237,
628,
198,
198,
4871,
19449,
46677,
7,
46677,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
23042,
274,
19449,
12,
46911,
1143,
1366,
13,
198,
... | 2.390244 | 287 |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class WatermarkRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'input': 'ObsObjInfo',
'template_id': 'str',
'text_context': 'str',
'image_watermark': 'ImageWatermark',
'text_watermark': 'TextWatermark'
}
attribute_map = {
'input': 'input',
'template_id': 'template_id',
'text_context': 'text_context',
'image_watermark': 'image_watermark',
'text_watermark': 'text_watermark'
}
def __init__(self, input=None, template_id=None, text_context=None, image_watermark=None, text_watermark=None):
"""WatermarkRequest - a model defined in huaweicloud sdk"""
self._input = None
self._template_id = None
self._text_context = None
self._image_watermark = None
self._text_watermark = None
self.discriminator = None
if input is not None:
self.input = input
if template_id is not None:
self.template_id = template_id
if text_context is not None:
self.text_context = text_context
if image_watermark is not None:
self.image_watermark = image_watermark
if text_watermark is not None:
self.text_watermark = text_watermark
@property
def input(self):
"""Gets the input of this WatermarkRequest.
:return: The input of this WatermarkRequest.
:rtype: ObsObjInfo
"""
return self._input
@input.setter
def input(self, input):
"""Sets the input of this WatermarkRequest.
:param input: The input of this WatermarkRequest.
:type: ObsObjInfo
"""
self._input = input
@property
def template_id(self):
"""Gets the template_id of this WatermarkRequest.
水印模板。可通过新建水印模板接口创建水印模板。
:return: The template_id of this WatermarkRequest.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this WatermarkRequest.
水印模板。可通过新建水印模板接口创建水印模板。
:param template_id: The template_id of this WatermarkRequest.
:type: str
"""
self._template_id = template_id
@property
def text_context(self):
"""Gets the text_context of this WatermarkRequest.
文字水印内容,内容需做Base64编码,若类型为文字水印 (type字段为Text),则此配置项不能为空 示例:若想添加文字水印“测试文字水印”,那么Content的值为:5rWL6K+V5paH5a2X5rC05Y2w
:return: The text_context of this WatermarkRequest.
:rtype: str
"""
return self._text_context
@text_context.setter
def text_context(self, text_context):
"""Sets the text_context of this WatermarkRequest.
文字水印内容,内容需做Base64编码,若类型为文字水印 (type字段为Text),则此配置项不能为空 示例:若想添加文字水印“测试文字水印”,那么Content的值为:5rWL6K+V5paH5a2X5rC05Y2w
:param text_context: The text_context of this WatermarkRequest.
:type: str
"""
self._text_context = text_context
@property
def image_watermark(self):
"""Gets the image_watermark of this WatermarkRequest.
:return: The image_watermark of this WatermarkRequest.
:rtype: ImageWatermark
"""
return self._image_watermark
@image_watermark.setter
def image_watermark(self, image_watermark):
"""Sets the image_watermark of this WatermarkRequest.
:param image_watermark: The image_watermark of this WatermarkRequest.
:type: ImageWatermark
"""
self._image_watermark = image_watermark
@property
def text_watermark(self):
"""Gets the text_watermark of this WatermarkRequest.
:return: The text_watermark of this WatermarkRequest.
:rtype: TextWatermark
"""
return self._text_watermark
@text_watermark.setter
def text_watermark(self, text_watermark):
"""Sets the text_watermark of this WatermarkRequest.
:param text_watermark: The text_watermark of this WatermarkRequest.
:type: TextWatermark
"""
self._text_watermark = text_watermark
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WatermarkRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
302,
198,
11748,
2237,
628,
198,
198,
6738,
289,
84,
707,
68,
291,
75,
2778,
21282,
74,
7295,
13,
26791,
13,
4023,
62,
26791,
1330,
5336,
270,
1096,
62,
1640,
62,
46911,
1634,
628,
... | 1.97476 | 3,130 |
"""
Max Sum Contiguous Subarray
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example:
Given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
For this problem, return the maximum sum.
"""
# @param A : tuple of integers
# @return an integer
"""
Testing Code
"""
s = Solution()
arr = [-120, -202, -293, -60, -261, -67, 10, 82, -334, -393, -428, -182, -138, -167, -465, -347, -39, -51, -61, -491,
-216, -36, -281, -361, -271, -368, -122, -114, -53, -488, -327, -182, -221, -381, -431, -161, -59, -494, -406,
-298, -268, -425, -88, -320, -371, -5, 36, 89, -194, -140, -278, -65, -38, -144, -407, -235, -426, -219, 62,
-299, 1, -454, -247, -146, 24, 2, -59, -389, -77, -19, -311, 18, -442, -186, -334, 41, -84, 21, -100, 65, -491,
94, -346, -412, -371, 89, -56, -365, -249, -454, -226, -473, 91, -412, -30, -248, -36, -95, -395, -74, -432, 47,
-259, -474, -409, -429, -215, -102, -63, 80, 65, 63, -452, -462, -449, 87, -319, -156, -82, 30, -102, 68, -472,
-463, -212, -267, -302, -471, -245, -165, 43, -288, -379, -243, 35, -288, 62, 23, -444, -91, -24, -110, -28,
-305, -81, -169, -348, -184, 79, -262, 13, -459, -345, 70, -24, -343, -308, -123, -310, -239, 83, -127, -482,
-179, -11, -60, 35, -107, -389, -427, -210, -238, -184, 90, -211, -250, -147, -272, 43, -99, 87, -267, -270,
-432, -272, -26, -327, -409, -353, -475, -210, -14, -145, -164, -300, -327, -138, -408, -421, -26, -375, -263, 7,
-201, -22, -402, -241, 67, -334, -452, -367, -284, -95, -122, -444, -456, -152, 25, 21, 61, -320, -87, 98, 16,
-124, -299, -415, -273, -200, -146, -437, -457, 75, 84, -233, -54, -292, -319, -99, -28, -97, -435, -479, -255,
-234, -447, -157, 82, -450, 86, -478, -58, 9, -500, -87, 29, -286, -378, -466, 88, -366, -425, -38, -134, -184,
32, -13, -263, -371, -246, 33, -41, -192, -14, -311, -478, -374, -186, -353, -334, -265, -169, -418, 63, 77, 77,
-197, -211, -276, -190, -68, -184, -185, -235, -31, -465, -297, -277, -456, -181, -219, -329, 40, -341, -476, 28,
-313, -78, -165, -310, -496, -450, -318, -483, -22, -84, 83, -185, -140, -62, -114, -141, -189, -395, -63, -359,
26, -318, 86, -449, -419, -2, 81, -326, -339, -56, -123, 10, -463, 41, -458, -409, -314, -125, -495, -256, -388,
75, 40, -37, -449, -485, -487, -376, -262, 57, -321, -364, -246, -330, -36, -473, -482, -94, -63, -414, -159,
-200, -13, -405, -268, -455, -293, -298, -416, -222, -207, -473, -377, -167, 56, -488, -447, -206, -215, -176,
76, -304, -163, -28, -210, -18, -484, 45, 10, 79, -441, -197, -16, -145, -422, -124, 79, -464, -60, -214, -457,
-400, -36, 47, 8, -151, -489, -327, 85, -297, -395, -258, -31, -56, -500, -61, -18, -474, -426, -162, -79, 25,
-361, -88, -241, -225, -367, -440, -200, 38, -248, -429, -284, -23, 19, -220, -105, -81, -269, -488, -204, -28,
-138, 39, -389, 40, -263, -297, -400, -158, -310, -270, -107, -336, -164, 36, 11, -192, -359, -136, -230, -410,
-66, 67, -396, -146, -158, -264, -13, -15, -425, 58, -25, -241, 85, -82, -49, -150, -37, -493, -284, -107, 93,
-183, -60, -261, -310, -380]
print(s.maxSubArray(arr))
## OutPut === 217
| [
37811,
198,
11518,
5060,
2345,
29709,
3834,
18747,
198,
198,
16742,
262,
48627,
850,
18747,
1626,
281,
7177,
357,
38301,
379,
1551,
530,
1271,
8,
543,
468,
262,
4387,
2160,
13,
198,
198,
1890,
1672,
25,
198,
198,
15056,
262,
7177,
259... | 1.936047 | 1,720 |
from app.room import Room | [
6738,
598,
13,
3823,
1330,
10096
] | 4.166667 | 6 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 09:02:05 2020
@author: Sven Serneels, Ponalytics.
Code for radial transform functions largely adapted from
R code by Jakob Raymaekers
"""
import numpy as np
def quad(dd, p, n):
"""
Computes the quadratic radial function
args:
dd: vector of distances
p: number of variables in original data
n: number of rows in original data
returns:
xi: radial function
"""
d_hmed = np.sort(dd,axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
idx = np.where(dd > d_hmed)[0]
xi = np.ones((n,1))
xi[idx] = (1 / np.square(dd[idx])) * (d_hmed**2)
return(xi)
def ss(dd, p,*args,prec=1e-10):
"""
Computes the spatial sign radial function
args:
dd: vector of distances
p: dimension of original data
*args flag to be able to pass on n - has no effect
returns:
xi: radial function
"""
dd = np.maximum(dd,prec)
xi = 1 / dd
return(xi)
def winsor(dd, p, n) :
"""
Computes the Winsor radial function
args:
dd: vector of distances
p: number of variables in original data
n: number of rows in original data
returns:
xi: radial function
"""
d_hmed = np.sort(dd,axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
idx = np.where(dd > d_hmed)[0]
xi = np.ones((n,1))
xi[idx] = (1 / dd[idx]) * d_hmed
return(xi)
def ball(dd, p, n):
"""
Computes the Ball radial function
args:
dd: vector of distances
p: number of variables in original data
n: number of rows in original data
returns:
xi: radial function
"""
dWH = np.power(dd,2/3)
dWH_hmed = np.sort(dWH,axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
d_hmed = np.power(dWH_hmed,3/2)
idx = np.where(dd > d_hmed)[0]
xi = np.ones((n,1))
xi[idx] = 0
return(xi)
def shell(dd, p, n) :
"""
Computes the Shell radial function
args:
dd: vector of distances
p: number of variables in original data
n: number of rows in original data
returns:
xi: radial function
"""
dWH = np.power(dd,2/3)
dWH_hmed = np.sort(dWH,axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
dWH_hmad = np.sort(np.abs(dWH - dWH_hmed),axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
cutoff1 = np.power(np.maximum(0, dWH_hmed - dWH_hmad),3/2)
cutoff2 = np.power(dWH_hmed + dWH_hmad,3/2)
idxlow = np.where(dd < cutoff1)[0]
idxhigh = np.where(dd > cutoff2)[0]
xi = np.ones((n,1))
xi[idxlow] = 0
xi[idxhigh] = 0
return(xi)
def linear_redescending(dd, p,n):
"""
# Computes the Linear redescending radial function
args:
dd: vector of distances
p: number of variables in original data
n: number of rows in original data
returns:
xi: radial function
"""
dWH = np.power(dd,2/3)
dWH_hmed = np.sort(dWH,axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
dWH_hmad = np.sort(np.abs(dWH - dWH_hmed),axis=0)[int(np.floor((n + p + 1) / 2))-1][0]
d_hmed = dWH_hmed**(3/2)
cutoff = (dWH_hmed + 1.4826 * dWH_hmad)**(3/2)
idxmid = np.where(np.logical_and(dd > d_hmed,dd <= cutoff))[0]
idxhigh = np.where(dd > cutoff)[0]
xi = np.ones((n,1))
xi[idxmid] = 1 - (dd[idxmid,:] - d_hmed) / (cutoff - d_hmed)
xi[idxhigh] = 0
return(xi)
def _norms(X,**kwargs):
"""
Casewise norms of a matrix
"""
return(np.linalg.norm(X,axis=1,keepdims=True,**kwargs))
def _gsspp(X,p,n,fun=ss):
"""
Generalized Spatial Sign Pre-Processing for Centred Data
"""
return(np.multiply(X,fun(_norms(X),p,n)))
def _spatial_sign(X,**kwargs):
"""
Spatial Sign Pre-Processing for Centred Data
"""
return(X/_norms(X))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
1526,
1679,
7769,
25,
2999,
25,
2713,
12131,
198,
198,
31,
9800,
25,
44611,
2930,
7... | 2.0624 | 1,875 |
#!/usr/bin/env python
import sys
import os
import time
import mitogen.core
times = []
for x in range(5):
t0 = mitogen.core.now()
os.spawnvp(os.P_WAIT, sys.argv[1], sys.argv[1:])
t = mitogen.core.now() - t0
times.append(t)
print('+++', t)
print('all:', times)
print('min %s max %s diff %s' % (min(times), max(times), (max(times) - min(times))))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
10255,
6644,
13,
7295,
628,
198,
22355,
796,
17635,
198,
1640,
2124,
287,
2837,
7,
20,
2599,
198,
220,
220,
220,... | 2.277778 | 162 |
from tvm.relay import op, var, Var, Function, Clause, PatternConstructor, PatternVar, Match, const
from tvm.relay import TupleGetItem, Tuple, TensorType, TupleType, If
from network import Network
from common import Linear
import numpy as np
from tvm.relay.quantize import quantize
x = CharRNNGen20(input_size=22, hidden_size=23, output_size=34)
from tvm.relay.transform import ToANormalForm, PartialEvaluate, ToGraphNormalForm, Sequential
p = Sequential([ToANormalForm(), PartialEvaluate(), ToGraphNormalForm()])
x.mod = p(x.mod)
x.mod["main"] = x.mod["f_0"]
#rnn = quantize(x.mod["main"], x.mod)
print(x.mod["main"])
| [
6738,
256,
14761,
13,
2411,
323,
1330,
1034,
11,
1401,
11,
12372,
11,
15553,
11,
28081,
11,
23939,
42316,
273,
11,
23939,
19852,
11,
13225,
11,
1500,
198,
6738,
256,
14761,
13,
2411,
323,
1330,
309,
29291,
3855,
7449,
11,
309,
29291,
... | 2.875 | 216 |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
import time
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
session=messages.StringField(1),
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_BY_TYPE_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
type=messages.StringField(2),
)
SESS_BY_SPEAKER_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
SESS_BY_TIME_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
startTime=messages.IntegerField(1, variant=messages.Variant.INT32),
)
CONF_BY_CITY_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
city=messages.StringField(1),
)
MEMCACHE_ANNOUNCEMENTS_KEY = "Recent Announcements"
MEMCACHE_FEATURED_SPEAKER_KEY = "Featured Speaker"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
pf = ProfileForm()
# Loop through all fields and copy data to form
for field in pf.all_fields():
if hasattr(prof, field.name):
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# use endpoint method to get user info
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# if no profile exists, create one
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
prof = self._getProfileFromUser()
# save new data
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# turn date to string
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# build out conference object
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# set defaults if needed
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# format dates
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create conference and create send email task
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# return conferences within query params
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "")
for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
p_key = ndb.Key(Profile, getUserId(user))
conferences = Conference.query(ancestor=p_key)
prof = p_key.get()
displayName = getattr(prof, 'displayName')
return ConferenceForms(
items=[self._copyConferenceToForm(conf, displayName) for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, SessionForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Experimentation"""
q = Session.query()
q = q.order(Session.startTime)
q = q.filter(Session.startTime < 1900)
# create empty list for session not matching type of session criteria
sessionsTypeExcluded = []
for i in q:
if i.typeOfSession != "workshop":
sessionsTypeExcluded.append(i)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in sessionsTypeExcluded]
)
@endpoints.method(SESS_BY_TIME_REQUEST, SessionForms,
path='filterPlayground2',
http_method='GET', name='filterPlayground2')
def filterPlayground2(self, request):
"""Filter Experimentation #2 - Get Sessions in 4 hour window"""
start = request.startTime
end = request.startTime + 400
q = Session.query()
q = q.order(Session.startTime)
q = q.filter(Session.startTime >= start)
q = q.filter(Session.startTime <= end)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in q]
)
@endpoints.method(CONF_BY_CITY_REQUEST, ConferenceForms,
path='filterPlayground3',
http_method='GET', name='filterPlayground3')
def filterPlayground3(self, request):
"""Filter Experimentation #3 - Get Conferences in certain city"""
q = Conference.query()
q = q.order(Conference.city)
q = q.filter(Conference.city == request.city)
return ConferenceForms(
items=[self._copyConferenceToForm(i, "") for i in q]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser()
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
if reg:
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
else:
if wsck in prof.conferenceKeysToAttend:
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser()
keys = prof.conferenceKeysToAttend
listofkeys = []
for i in keys:
confFromNdb = ndb.Key(urlsafe=i)
listofkeys.append(confFromNdb)
conferences = ndb.get_multi(listofkeys)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")\
for conf in conferences]
)
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
announcement = ""
if memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY):
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
return StringMessage(data=announcement)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/session',
http_method='GET', name='getConferenceSession')
def getConferenceSession(self, request):
"""Return all sessions within a conference"""
p_key = ndb.Key(Conference, request.websafeConferenceKey)
sessions = Session.query(ancestor=p_key)
if not sessions:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in sessions]
)
@endpoints.method(SESSION_POST_REQUEST, SessionForm, path='session',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new conference."""
return self._createSessionObject(request)
def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm/request."""
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
p_key = ndb.Key(Conference, request.websafeConferenceKey)
s_id = Session.allocate_ids(size=1, parent=p_key)[0]
s_key = ndb.Key(Session, s_id, parent=p_key)
data['key'] = s_key
del data['websafeConferenceKey']
# create session and create task for assigning featured speaker to memcache
Session(**data).put()
if data['speaker']:
taskqueue.add(url='/tasks/assign_featured_speaker', params={'speaker': data['speaker']})
return self._copySessionToForm(request)
@endpoints.method(SESS_BY_TYPE_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/session/{type}', http_method='GET', name='conferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Get all session with a specified type"""
p_key = ndb.Key(Conference, request.websafeConferenceKey)
sessionType = request.type
q = Session.query(ancestor=p_key)
q = q.filter(Session.typeOfSession == sessionType)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in q]
)
@endpoints.method(SESS_BY_SPEAKER_REQUEST, SessionForms,
path='sessions/{speaker}', http_method='GET', name='conferenceSessionsBySpeaker')
def getConferenceSessionsBySpeaker(self, request):
"""Get all sessions with specified speaker"""
sessionSpeaker = request.speaker
q = Session.query()
q = q.filter(Session.speaker == sessionSpeaker)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in q]
)
@endpoints.method(SESSION_GET_REQUEST, BooleanMessage,
path='sessions/{session}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add session to user wishlist"""
prof = self._getProfileFromUser()
prof.sessionWishList.append(request.session)
prof.put()
return BooleanMessage(data=True)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='conference/{websafeConferenceKey}/mysessions', http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Get all sessions in logged in user's wishlist"""
# get user
prof = self._getProfileFromUser()
# find sessions in conf
q = Session.query()
sessionsInWishlist = []
for i in q:
key = i.key.urlsafe()
if key in prof.sessionWishList:
sessionsInWishlist.append(i)
return SessionForms(
items=[self._copySessionToForm(sesh) for sesh in sessionsInWishlist]
)
@endpoints.method(SESSION_GET_REQUEST, BooleanMessage,
path='sessions/{session}/delete', http_method="POST", name='deleteSessionInWishlist')
def deleteSessionInWishlist(self, request):
"""Delete specified session from wishlist"""
prof = self._getProfileFromUser()
if request.session in prof.sessionWishList:
wishlist = prof.sessionWishList
index = wishlist.index(request.session)
del prof.sessionWishList[index]
prof.put()
print prof.sessionWishList
return BooleanMessage(data=True)
@staticmethod
def _cacheFeaturedSpeaker(speaker):
"""Create Featured Speaker announcement & assign to memcache"""
featspeak = speaker
if featspeak:
announcement = '%s %s' % ('The featured speaker is', featspeak)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, announcement)
else:
announcement = "Nothing now."
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='getFeaturedSpeaker', http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Get featured speaker from memcache"""
# key = self.request.get('key')
announcement = memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY)
return StringMessage(data=announcement)
api = endpoints.api_server([ConferenceApi])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
41124,
13,
9078,
1377,
35774,
4355,
4495,
4382,
12,
1589,
11361,
2034,
7117,
7824,
26,
198,
220,
220,
220,
3544,
3012,
10130,
5268,
13033,
198,
198,
3,
7390,
25,
4495,... | 2.29641 | 10,138 |
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'weightedMean' function below.
#
# The function accepts following parameters:
# 1. INTEGER_ARRAY X
# 2. INTEGER_ARRAY W
#
if __name__ == '__main__':
n = int(input().strip())
vals = list(map(int, input().rstrip().split()))
weights = list(map(int, input().rstrip().split()))
weightedMean(n, vals, weights)
| [
2,
48443,
8800,
14,
29412,
18,
201,
198,
201,
198,
11748,
10688,
201,
198,
11748,
28686,
201,
198,
11748,
4738,
201,
198,
11748,
302,
201,
198,
11748,
25064,
201,
198,
201,
198,
2,
201,
198,
2,
13248,
262,
705,
6551,
276,
5308,
272,... | 2.455056 | 178 |
'''
SYNBIOCHEM (c) University of Manchester 2018
SYNBIOCHEM is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import re
import sys
from urllib.request import urlopen
import xml.sax
_RE = r'(\d+)\.\.(\d+)'
class EnaHandler(xml.sax.ContentHandler):
'''Handler for ENA XML files.'''
def get_start(self):
'''Get start.'''
return self.__start
def get_end(self):
'''Get end.'''
return self.__end
def is_complement(self):
'''Get complement flah.'''
return self.__complement
def get_start_end_comp(ena_id):
'''Get start, end, is complement.'''
url = 'https://www.ebi.ac.uk/ena/data/view/%s&display=xml&download=xml' \
% ena_id
return _parse(urlopen(url))
def parse(filename):
'''Parse ENA XML file.'''
with open(filename) as fle:
return _parse(fle)
def _parse(src):
'''Parse source data.'''
parser = xml.sax.make_parser()
handler = EnaHandler()
parser.setContentHandler(handler)
parser.parse(src)
return handler.get_start(), handler.get_end(), handler.is_complement()
def main(args):
'''main method.'''
print(parse(args[0]))
if __name__ == '__main__':
main(sys.argv[1:])
| [
7061,
6,
198,
23060,
45,
3483,
46,
3398,
3620,
357,
66,
8,
2059,
286,
9502,
2864,
198,
198,
23060,
45,
3483,
46,
3398,
3620,
318,
11971,
739,
262,
17168,
13789,
13,
198,
198,
2514,
1570,
257,
4866,
286,
428,
5964,
11,
3187,
1279,
... | 2.40884 | 543 |
from arrays import DynamicArray
import fileinput
import random
class WordController:
'''
'''
def read_from_file(self):
'''
'''
fl = open(self._file, "r")
learned_words_full = DynamicArray()
learned_words = DynamicArray()
for line in fl:
line = line.strip()
if '===' not in line and line != '' and line[0] not in '123456789':
line = line.split(',')
learned_words_full.append(line)
learned_words.append(line[0])
return learned_words_full, learned_words
def write_to_file(self):
'''
'''
fl1 = open('translations.txt', 'r')
fl2 = open(self._file, "r")
word_num = int(fl2.readline())
for line in fl1:
line = line.strip().split(",")
if line[0] == str(word_num + 1):
info = "Word: "+line[1]+"\nTranslation: "+ line[3]+\
"\nDefinition: "+line[4]
fl2 = open(self._file, "a")
fl2.write(line[1] + "," + line[3] + "," + line[4] + "\n")
break
fl2 = open(self._file, "r")
for line in fileinput.FileInput(self._file, inplace=1):
n = str(word_num)
m = str(word_num + 1)
line=line.replace(n,m)
print(line)
return info
def word_string(self):
'''
'''
fl = open(self._file, 'r')
words = ''
for line in fl:
line = line.strip()
if line not in '123456789' and '===' not in line and line != '':
words += line +'\n'
return words
class User:
'''
'''
def choose_section(self, inpt):
'''
'''
if inpt == "1" or inpt == "Learn new word":
return self.learn_new_word()
if inpt == '2' or inpt == "Test yourself":
return self.test_yourself( 2)
if inpt == '3' or inpt == "Wordlist":
return print(self.see_word_list())
else:
return "Invalid input"
def learn_new_word(self):
'''
'''
info = self.word_controller.write_to_file()
print(info)
return info
def test_yourself(self, number):
'''
'''
learned_words_full, learned_words = self.word_controller.read_from_file()
if len(learned_words) < 2:
return False
else:
check_word = random.choice(learned_words)
index = learned_words.index(check_word)
true_trans = learned_words_full[index][1]
for time in range(number):
word1 = random.choice(learned_words_full)
trans1 = word1[1]
word2 = random.choice(learned_words_full)
trans2 = word2[1]
word3 = random.choice(learned_words_full)
trans3 = word3[1]
false_answ = [trans1, trans2, trans3, true_trans]
false_answ.sort()
return (true_trans, false_answ, check_word)
def see_word_list(self):
'''
'''
words = self.word_controller.word_string()
return words
def check_answer(answer, true_trans):
'''
'''
if answer == true_trans:
return True
else:
return False
if __name__ == "__main__":
a = WordController("julia_poch.txt")
print(a.write_to_file()) | [
6738,
26515,
1330,
26977,
19182,
198,
11748,
2393,
15414,
198,
11748,
4738,
198,
198,
4871,
9678,
22130,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
705,
7061,
628,
220,
220,
220,
825,
1100,
62,
6738,
62,
7753,
7,
944,
25... | 1.924192 | 1,794 |
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
| [
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
9945,
796,
16363,
2348,
26599,
3419,
198
] | 3.105263 | 19 |
num = raw_input()
cnt = 0
for c in num:
if c != '4' and c != '7': continue
cnt += 1
ans = 'YES'
for c in str(cnt):
if c != '4' and c != '7': ans = 'NO'
print ans | [
22510,
796,
8246,
62,
15414,
3419,
198,
198,
66,
429,
796,
657,
198,
1640,
269,
287,
997,
25,
198,
220,
220,
220,
611,
269,
14512,
705,
19,
6,
290,
269,
14512,
705,
22,
10354,
2555,
198,
220,
220,
220,
269,
429,
15853,
352,
198,
... | 2.108434 | 83 |
from dataclasses import dataclass, asdict
import numpy as np
@dataclass
class Example:
"""Example of a molecule"""
#: SMILES string for molecule
smiles: str
#: SELFIES for molecule, as output from :func:`selfies.encoder`
selfies: str
#: Tanimoto similarity relative to base
similarity: float
#: Output of model function
yhat: float
#: Index relative to other examples
index: int
#: PCA projected position from similarity
position: np.ndarray = None
#: True if base
is_origin: bool = False
#: Index of cluster, can be -1 for no cluster
cluster: int = 0
#: Label for this example
label: str = None
# to make it look nicer
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
355,
11600,
198,
11748,
299,
32152,
355,
45941,
628,
198,
31,
19608,
330,
31172,
198,
4871,
17934,
25,
198,
220,
220,
220,
37227,
16281,
286,
257,
27756,
37811,
628,
220,
220,
220,
1... | 2.885246 | 244 |
from src.neural_networks.art_fuzzy import ARTFUZZY
from src.utils.functions import *
import numpy as np
import pytest
@pytest.fixture()
| [
6738,
12351,
13,
710,
1523,
62,
3262,
5225,
13,
433,
62,
69,
4715,
88,
1330,
5923,
10234,
52,
30148,
56,
198,
6738,
12351,
13,
26791,
13,
12543,
2733,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
628,
198,
... | 2.62963 | 54 |
import os
import numpy as np
import time
import tensorflow as tf
import sklearn.model_selection
from khan.data.dataset import RawDataset
from khan.training.trainer_multi_tower import TrainerMultiTower, flatten_results, initialize_module
from data_utils import HARTREE_TO_KCAL_PER_MOL
from data_loaders import DataLoader
from concurrent.futures import ThreadPoolExecutor
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing
import argparse
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
1341,
35720,
13,
19849,
62,
49283,
198,
198,
6738,
479,
7637,
13,
7890,
13,
19608,
292,
316,
1330,
16089,
27354,
2... | 3.217391 | 161 |
from __future__ import division
import random
import time
import pdb
import torch
import torch.nn.functional as F
from torch_geometric.utils import to_dense_adj
from torch import tensor
from torch.optim import Adam
from train_edges import train_edges
from test_edges import test_edges
from negative_sampling import negative_sampling
from torch_geometric.utils import (remove_self_loops, add_self_loops)
from sklearn.metrics import roc_auc_score, precision_recall_fscore_support, confusion_matrix
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
from train_eval import run_cs, run_
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
279,
9945,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
62,
469,
16996,
13,
26791,
1330,
284,
62,
67,
1072,
... | 3.348066 | 181 |