content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""A TaskRecord backend using mongodb
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from pymongo import Connection
from bson import Binary
from IPython.utils.traitlets import Dict, List, Unicode, Instance
from .dictdb import BaseDB
#-----------------------------------------------------------------------------
# MongoDB class
#-----------------------------------------------------------------------------
class MongoDB(BaseDB):
"""MongoDB TaskRecord backend."""
connection_args = List(config=True,
help="""Positional arguments to be passed to pymongo.Connection. Only
necessary if the default mongodb configuration does not point to your
mongod instance.""")
connection_kwargs = Dict(config=True,
help="""Keyword arguments to be passed to pymongo.Connection. Only
necessary if the default mongodb configuration does not point to your
mongod instance."""
)
database = Unicode(config=True,
help="""The MongoDB database name to use for storing tasks for this session. If unspecified,
a new database will be created with the Hub's IDENT. Specifying the database will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
_connection = Instance(Connection) # pymongo connection
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
# print rec
rec = self._binary_buffers(rec)
self._records.insert(rec)
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
r = self._records.find_one({'msg_id': msg_id})
if not r:
# r will be '' if nothing is found
raise KeyError(msg_id)
return r
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
rec = self._binary_buffers(rec)
self._records.update({'msg_id':msg_id}, {'$set': rec})
def drop_matching_records(self, check):
"""Remove a record from the DB."""
self._records.remove(check)
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._records.remove({'msg_id':msg_id})
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys and 'msg_id' not in keys:
keys.append('msg_id')
matches = list(self._records.find(check,keys))
for rec in matches:
rec.pop('_id')
return matches
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
cursor = self._records.find({},{'msg_id':1}).sort('submitted')
return [ rec['msg_id'] for rec in cursor ]
| [
37811,
32,
15941,
23739,
30203,
1262,
285,
506,
375,
65,
198,
198,
30515,
669,
25,
198,
198,
9,
1855,
371,
42,
198,
37811,
198,
2,
10097,
32501,
198,
2,
220,
15069,
357,
34,
8,
3050,
12,
9804,
220,
383,
6101,
7535,
7712,
4816,
198... | 2.703588 | 1,282 |
# -*- coding: utf-8 -*-
#
from __future__ import division
import sympy
from .helpers import untangle2
class WilliamsShunnJameson(object):
"""
D.M. Williams, L. Shunn, A. Jameson,
Symmetric quadrature rules for simplexes based on sphere close packed
lattice arrangements,
Journal of Computational and Applied Mathematics,
266 (2014) 18–38,
<https://doi.org/10.1016/j.cam.2014.01.007>.
Abstract:
Sphere close packed (SCP) lattice arrangements of points are well-suited
for formulating symmetric quadrature rules on simplexes, as they are
symmetric under affine transformations of the simplex unto itself in 2D and
3D. As a result, SCP lattice arrangements have been utilized to formulate
symmetric quadrature rules with Np = 1, 4, 10, 20, 35, and 56 points on the
3-simplex (Shunn and Ham, 2012). In what follows, the work on the 3-simplex
is extended, and SCP lattices are employed to identify symmetric quadrature
rules with Np = 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, and 66 points on the
2-simplex and Np = 84 points on the 3-simplex. These rules are found to be
capable of exactly integrating polynomials of up to degree 17 in 2D and up
to degree 9 in 3D.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
10558,
88,
198,
198,
6738,
764,
16794,
364,
1330,
1418,
9248,
17,
628,
198,
4871,
6484,
2484,
20935,
14731,... | 3.024272 | 412 |
"""
Unittests for asa plugin
Uses the mock_device.py script to test the plugin.
"""
__author__ = "Dave Wapstra <dwapstra@cisco.com>"
import os
import yaml
import unittest
import unicon
from unicon import Connection
from unicon.core.errors import SubCommandFailure
from unicon.mock.mock_device import mockdata_path
with open(os.path.join(mockdata_path, 'asa/asa_mock_data.yaml'), 'rb') as data:
mock_data = yaml.safe_load(data.read())
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
3118,
715,
3558,
329,
355,
64,
13877,
198,
198,
5842,
274,
262,
15290,
62,
25202,
13,
9078,
4226,
284,
1332,
262,
13877,
13,
198,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
27984,
370,
499,
12044,
1279,
67,
86,
499,... | 2.725275 | 182 |
import json
from grafana_backup.dashboardApi import create_folder
| [
11748,
33918,
198,
6738,
7933,
69,
2271,
62,
1891,
929,
13,
42460,
3526,
32,
14415,
1330,
2251,
62,
43551,
628
] | 3.35 | 20 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 27/02/2016
Updated: 19/09/2017
# Description
Towers of Hanoi is a mathematical game. It consists of 3 rods, and a number of
disks of different sizes, which can slide onto any rod. The game starts with the
disks in a neat stack in ascending order of size on one rod, the smallest at the
top, thus making a conical shape.
The objective of the game is to move the entire stack to another rod, obeying
the following rules:
1. Only 1 disk can be moved at a time.
2. Each move consists of taking the upper disk from one of the stacks and
placing it on top of another stack, i.e. a disk can only be moved if it is
the uppermost disk on its stack.
3. No disk may be placed on top of a smaller disk.
With 3 disks, the game can be solved with at least 7 moves (best case).
The minimum number of moves required to solve a tower of hanoi game
is 2^n - 1, where n is the number of disks.
For simplicity, in the following algorithm
the source (='A'), auxiliary (='B') and destination (='C') rodes are fixed, and
therefore the algorithm always shows the steps to go from 'A' to 'C'.
# References
- https://en.wikipedia.org/wiki/Tower_of_Hanoi
- http://www.cut-the-knot.org/recurrence/hanoi.shtml
- http://stackoverflow.com/questions/105838/real-world-examples-of-recursion
"""
__all__ = ["hanoi"]
def _hanoi(n: int, ls: list, src='A', aux='B', dst='C') -> list:
"""Recursively solve the Towers of Hanoi game for n disks.
The smallest disk, which is the topmost one at the beginning, is called 1,
and the largest one is called n.
src is the start rod where all disks are set in a neat stack in ascending
order.
aux is the third rod.
dst is similarly the destination rod."""
if n > 0:
_hanoi(n - 1, ls, src, dst, aux)
ls.append((n, src, dst))
_hanoi(n - 1, ls, aux, src, dst)
return ls
def hanoi(n: int) -> list:
"""Returns a list L of tuples each of them representing a move to be done,
for n number of disks and 3 rods.
L[i] must be done before L[i + 1], for all i.
L[i][0] := the disk number (or id).
Numbers start from 1 and go up to n.
L[i][1] := the source rod from which to move L[i][0].
L[i][2] := the destination rod to which to move L[i][0].
The disk with the smallest radius (at the top) is the disk number 1,
its successor in terms or radius' size is disk number 2, and so on.
So the largest disk is disk number n."""
assert n >= 0
return _hanoi(n, [])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
2,
30277,
12,
10951,
198,
198,
13838,
25,
12996,
2806,
354,
4533,
198,
198,
41972,
25,
2681,
14,... | 2.922385 | 889 |
# from .trie import Trie
# from .trie import merge
from .trie_advanced import Trie
from .trie_advanced import merge | [
2,
422,
764,
83,
5034,
1330,
309,
5034,
198,
2,
422,
764,
83,
5034,
1330,
20121,
198,
6738,
764,
83,
5034,
62,
32225,
2903,
1330,
309,
5034,
198,
6738,
764,
83,
5034,
62,
32225,
2903,
1330,
20121
] | 3.108108 | 37 |
from pyspark import SparkConf, SparkContext
from google.cloud import storage
import json
from datetime import datetime
SAVE_DIR = 'categories'
GCP_BUCKET = 'gs://big_data_econ'
sc = SparkContext.getOrCreate()
# Read in all json files into an RDD
# Use 'wholeTextFiles' to prevent fragmenting of json objects
months = sc.wholeTextFiles(GCP_BUCKET + '/articles_subset/*.json')
# Jsonnify each text string into a dictionary
months = months.map(lambda x: json.loads(x[1]))
articles = months.flatMap(lambda x: x)
# Aggregate category counts for each year
categories = articles.map(lambda article: get_year_categories(article))
# Calculate average article wordcount for a each year
year_categories = categories.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
df = year_categories.map(lambda x: (x[0][0], x[0][1], x[1])).toDF()
df = df.selectExpr('_1 as year', '_2 as category', '_3 as count')
# Save data to Google Cloud Bucket
df.coalesce(1).write.format('csv').save('gs://big_data_econ/csvs/' + SAVE_DIR) | [
6738,
279,
893,
20928,
1330,
17732,
18546,
11,
17732,
21947,
198,
6738,
23645,
13,
17721,
1330,
6143,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
4090,
6089,
62,
34720,
796,
705,
66,
26129,
6,
198,
38,
8697,
... | 2.927746 | 346 |
import unittest
from .. import *
| [
11748,
555,
715,
395,
198,
6738,
11485,
1330,
1635,
628
] | 3.4 | 10 |
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.post_list, name='post-list'),
path('<int:pk>/like/', views.post_like, name='post-like'),
path('create/', views.post_create, name='post-create'),
path('<int:post_pk>/comments/create/', views.comment_create, name='comment-create'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
24875,
6,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
7353,
62,
4868,
11,
1438,
... | 2.656489 | 131 |
from django.contrib import admin
from .models import Product, UserProduct, Recipe, RecipeIngredient, UserShoppingList
admin.site.register(Recipe, RecipeAdmin)
admin.site.register(UserProduct, UserProductAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(RecipeIngredient, RecipeIngredientAdmin)
admin.site.register(UserShoppingList, UserShoppingListAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
8721,
11,
11787,
15667,
11,
26694,
11,
26694,
27682,
445,
1153,
11,
11787,
2484,
33307,
8053,
628,
628,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
3... | 3.513761 | 109 |
import requests
from elitedata.fixtures.fixture_fixer import fix_all
__author__ = 'Jingyu_Yao'
# eddb.io data locations
commodities = "http://eddb.io/archive/v3/commodities.json"
systems = "http://eddb.io/archive/v3/systems.json"
stations_lite = "http://eddb.io/archive/v3/stations_lite.json"
fixture_directory = "elitedata/fixtures/"
if __name__ == "__main__":
ingest() | [
11748,
7007,
198,
198,
6738,
1288,
863,
1045,
13,
69,
25506,
13,
69,
9602,
62,
13049,
263,
1330,
4259,
62,
439,
198,
198,
834,
9800,
834,
796,
705,
41,
278,
24767,
62,
56,
5488,
6,
198,
198,
2,
1225,
9945,
13,
952,
1366,
7064,
1... | 2.550336 | 149 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 15:41:33 2019
@author: nooteboom
"""
import numpy as np
import matplotlib.pylab as plt
from netCDF4 import Dataset
import matplotlib
import cartopy.crs as ccrs
import seaborn as sns
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.ticker as mticker
import math
from numba import jit
import cartopy.mpl.ticker as cticker
from matplotlib.lines import Line2D
sns.set_style("whitegrid")
sns.set_context("paper")
fs = 17
font = {'size' : fs}
matplotlib.rc('font', **font)
#variables
sp = 6
dd = 10
projection = ccrs.PlateCarree(180)
exte = [1, 360, -75, 72]
exte2 = [-179, 181, -75, 72]
Cs = 2.0
ddeg = 1
cmap2 = 'coolwarm' # For the surface area
cmap3 = 'hot'# For the average travel distance
vssurf = [0,17]
#%%
@jit(nopython=True)
@jit(nopython=True)
@jit(nopython=True)
@jit(nopython=True)
@jit(nopython=True)
#%%
maxminlat = -76
minmaxlat = 71
minmaxlon = 359 - 180
maxminlon = 0 - 180
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/highres/timeseries/timeseries_per_location_ddeg1_sp6_dd10_tempresmonmean.nc')
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf = np.flip(surf[idxlat],0)
land = np.full(surf.shape, np.nan); land[surf==0] = 1;
surf[surf==0] = np.nan
surf_temp = np.nanmean(surf) / 10**5.
#%%
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/highres/timeseries/timeseries_per_location_ddeg1_sp25_dd10_tempresmonmean.nc')
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf = np.flip(surf[idxlat],0)
surf[surf==0] = np.nan
surf_temp2 = np.nanmean(surf) / 10**5.
avgd50, surf50hr, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/highres/timeseries/timeseries_per_location_ddeg1_sp25_dd10_tempres5.nc')
surf50hr[surf50hr==0] = np.nan
surf50mean = np.nanmean(surf50hr) / 10**5.
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/highres/timeseries/timeseries_per_location_ddeg%d_sp%d_dd%d_tempres5_ds2.nc'%(ddeg,sp,dd))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf_highres = np.flip(surf[idxlat],0)
highres_surf = surf_highres.copy()
highres_surf[highres_surf==0] = np.nan
highres_surf = np.nanmean(highres_surf) / 10**5.
print('highres_surf: ',highres_surf)
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.1f_ddeg%d_sp%d_dd%d.nc'%(0.0,ddeg,sp,dd))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf_lr = np.flip(surf[idxlat],0)
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.1f_ddeg%d_sp%d_dd%d.nc'%(Cs,ddeg,sp,dd))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf_lr2 = np.flip(surf[idxlat],0)
sns.set_style("darkgrid")
sns.set_context("paper")
fs = 14 # fontsize
si = 141
lw = 2 # linewidth
sp1 = 6
sp2 = 25
color1 = 'k'
color2 = 'red'
color3 = 'k'
#% Load the data
CS = np.array([0., 0.25, 0.5, 1.0, 2.0, 5.0])
CS50 = np.array([0., 0.25, 0.5, 1.0, 2.0, 5.0])
cs = Cs
sur = np.zeros(len(CS))
sur50 = np.zeros(len(CS50))
surgm = np.zeros(len(CS))
sur50gm = np.zeros(len(CS50))
for j in range(len(CS)):
if(CS[j]!=0.25):
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.1f_ddeg1_sp%d_dd10.nc'%(CS[j],sp1))
else:
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.2f_ddeg1_sp%d_dd10.nc'%(CS[j],sp1))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
if(CS[j]==cs):
surf_cs = np.flip(surf[idxlat],0)
surf = surf[idxlat]
surf[surf==0] = np.nan
sur[j] = np.nanmean(surf) / 10**5.
if(CS[j]!=0.25):
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_gm_Cs%.1f_ddeg1_sp%d_dd10.nc'%(CS[j],sp1))
else:
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_gm_Cs%.2f_ddeg1_sp%d_dd10.nc'%(CS[j],sp1))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
if(CS[j]==cs):
surf_gm = np.flip(surf[idxlat],0)
surf = surf[idxlat]
surf[surf==0] = np.nan
surgm[j] = np.nanmean(surf) / 10**5.
for j in range(len(CS50)):
if(CS50[j] != 0.25):
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.1f_ddeg1_sp%d_dd10.nc'%(CS50[j],sp2))
else:
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_Cs%.2f_ddeg1_sp%d_dd10.nc'%(CS50[j],sp2))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf = surf[idxlat]
surf[surf==0] = np.nan
sur50[j] = np.nanmean(surf) / 10**5.
if(CS[j]!=0.25):
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_gm_Cs%.1f_ddeg1_sp%d_dd10.nc'%(CS50[j],sp2))
else:
avgd, surf, Lons, Lats = calc_fields(name = '/Volumes/HardDisk/POP/output/lowres/timeseries/timeseries_per_location_smagorinksi_wn_gm_Cs%.2f_ddeg1_sp%d_dd10.nc'%(CS50[j],sp2))
idxlat = np.logical_and(Lats>=maxminlat, Lats<=minmaxlat)
idxlon = np.logical_and(Lons>=maxminlon, Lons<=minmaxlon)
assert (idxlon==True).all()
surf = surf[idxlat]
surf[surf==0] = np.nan
sur50gm[j] = np.nanmean(surf) / 10**5.
plt.plot(CS, sur)
plt.plot(CS50, sur50)
plt.plot(CS, surgm, '--')
plt.plot(CS50, sur50gm, '--')
plt.scatter([0], [surf_temp])
plt.show()
#%% start figure
fig = plt.figure(figsize=(19,15))
grid = plt.GridSpec(3, 24, wspace=0., hspace=0.4)
#% subplot (a)
ax = plt.subplot(grid[0, :12], projection=projection)#plt.subplot(2,2,1, projection=projection)
plt.title('(a) $R_{0.1}$', fontsize=fs)
g = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
g.xlocator = mticker.FixedLocator([-180,-90, -0, 90, 180])
g.xlabels_top = False
g.ylabels_right = False
g.xlabels_bottom = False
g.xlabel_style = {'fontsize': fs}
g.ylabel_style = {'fontsize': fs}
g.xformatter = LONGITUDE_FORMATTER
g.yformatter = LATITUDE_FORMATTER
g.ylocator = mticker.FixedLocator([-75,-50,-25, 0, 25, 50, 75, 100])
ax.set_extent(exte, ccrs.PlateCarree())
plt.imshow(surf_highres/10.**5, vmin=vssurf[0], vmax=vssurf[1], extent = exte2, transform=ccrs.PlateCarree(),
cmap=cmap3, zorder = 0)
#land = np.full(avgd.shape, np.nan); land[surf==0] = 1;
plt.imshow(land, vmin=0, vmax=1.6, extent = exte2, transform=ccrs.PlateCarree(), cmap='binary', zorder = 0)
#% subplot (b)
ax = plt.subplot(grid[0, 12:], projection=projection)#plt.subplot(2,2,2, projection=projection)
plt.title('(b) $R_{1m}$', fontsize=fs)
g = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
g.xlabels_top = False
g.ylabels_right = False
g.ylabels_left = False
g.xlabels_bottom = False
g.xlabel_style = {'fontsize': fs}
g.ylabel_style = {'fontsize': fs}
g.xformatter = LONGITUDE_FORMATTER
g.yformatter = LATITUDE_FORMATTER
g.xlocator = mticker.FixedLocator([-180,-90, -0, 90, 180])
g.ylocator = mticker.FixedLocator([-75,-50,-25, 0, 25, 50, 75, 100])
ax.set_extent(exte, ccrs.PlateCarree())
#ax.set_xticks([0., 90., 180., 270., 360.], crs=ccrs.PlateCarree())
#ax.set_xticklabels([0., 90., 180., 270., 360.], fontsize=fs)
lon_formatter = cticker.LongitudeFormatter()
lat_formatter = cticker.LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.grid(linewidth=2, color='black', alpha=0., linestyle='--')
plt.imshow(surf_lr/10**5., vmin=vssurf[0], vmax=vssurf[1], extent = exte2, transform=ccrs.PlateCarree(),
cmap=cmap3, zorder = 0)
#land = np.full(avgd.shape, np.nan); land[surf==0] = 1;
plt.imshow(land, vmin=0, vmax=1.6, extent = exte2, transform=ccrs.PlateCarree(), cmap='binary', zorder = 0)
#% subplot (c)
ax = plt.subplot(grid[1, :12], projection=projection)#plt.subplot(2,2,3, projection=projection)
plt.title('(c) $R_{1md}$, $c_s$=%.1f'%(Cs), fontsize=fs)
g = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
g.xlabels_top = False
g.ylabels_right = False
g.xlabels_bottom = False
g.xlabel_style = {'fontsize': fs}
g.ylabel_style = {'fontsize': fs}
g.xformatter = LONGITUDE_FORMATTER
g.yformatter = LATITUDE_FORMATTER
g.xlocator = mticker.FixedLocator([-180,-90, -0, 90, 180])
g.ylocator = mticker.FixedLocator([-75,-50,-25, 0, 25, 50, 75, 100])
ax.set_extent(exte, ccrs.PlateCarree())
ax.set_xticks([0., 90., 180., 270., 360.], crs=ccrs.PlateCarree())
ax.set_xticklabels([0., 90., 180., 270., 360.], fontsize=fs)
lon_formatter = cticker.LongitudeFormatter()
lat_formatter = cticker.LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.grid(linewidth=2, color='black', alpha=0., linestyle='--')
im2 = plt.imshow(surf_lr2/10**5., vmin=vssurf[0], vmax=vssurf[1], extent = exte2, transform=ccrs.PlateCarree(),
cmap=cmap3, zorder = 0)
#land = np.full(avgd.shape, np.nan); land[surf==0] = 1;
plt.imshow(land, vmin=0, vmax=1.6, extent = exte2, transform=ccrs.PlateCarree(), cmap='binary', zorder = 0)
#% subplot (d)
ax = plt.subplot(grid[1, 12:], projection=projection)#plt.subplot(2,2,3, projection=projection)
plt.title('(d) $R_{1mdb}$, $c_s$=%.1f'%(Cs), fontsize=fs)
g = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
g.xlabels_top = False
g.ylabels_right = False
g.ylabels_left = False
g.xlabels_bottom = False
g.xlabel_style = {'fontsize': fs}
g.ylabel_style = {'fontsize': fs}
g.xformatter = LONGITUDE_FORMATTER
g.yformatter = LATITUDE_FORMATTER
g.xlocator = mticker.FixedLocator([-180,-90, -0, 90, 180])
g.ylocator = mticker.FixedLocator([-75,-50,-25, 0, 25, 50, 75, 100])
ax.set_extent(exte, ccrs.PlateCarree())
ax.set_xticks([0., 90., 180., 270., 360.], crs=ccrs.PlateCarree())
ax.set_xticklabels([0., 90., 180., 270., 360.], fontsize=fs)
lon_formatter = cticker.LongitudeFormatter()
lat_formatter = cticker.LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.grid(linewidth=2, color='black', alpha=0., linestyle='--')
im2 = plt.imshow(surf_gm/10**5., vmin=vssurf[0], vmax=vssurf[1], extent = exte2, transform=ccrs.PlateCarree(),
cmap=cmap3, zorder = 0)
#land = np.full(avgd.shape, np.nan); land[surf==0] = 1;
plt.imshow(land, vmin=0, vmax=1.6, extent = exte2, transform=ccrs.PlateCarree(), cmap='binary', zorder = 0)
#%
dsWD = [4,7,4,7] # the line dash of the first configuration
dsWD2 = [4,7,4,7] # the line dash of the bolus configuration
ax = plt.subplot(grid[2, 12:-1])
plt.title('(e)', fontsize=fs)
plt.xlabel('$c_s$', fontsize=fs)
a0 = sns.lineplot(x=CS, y=np.full(len(CS),highres_surf), linewidth=lw,
color=color1, zorder=1)
a0.lines[0].set_dashes(dsWD)
a0 = sns.lineplot(x=CS, y=np.full(len(CS),surf_temp), linewidth=lw,
color=color1, zorder=1)
a0.lines[1].set_linestyle(":")
a0 = sns.lineplot(x=CS, y=np.full(len(CS),surf50mean), linewidth=lw,
color=color2, zorder=1)
a0.lines[2].set_dashes(dsWD)
a0 = sns.lineplot(x=CS, y=np.full(len(CS),surf_temp2), linewidth=lw,
color=color2, zorder=1)
a0.lines[3].set_linestyle(":")
sns.lineplot(x=CS, y=sur, color=color1, linewidth=lw, zorder=10)
sns.scatterplot(x=CS, y=sur, color=color1, s=si, zorder=11)
sns.lineplot(x=CS,y=surgm, linewidth = lw, ax=ax, color=color1,
zorder=9)
sns.scatterplot(x=CS,y=surgm, ax=ax, color=color1, s=si, zorder=12,
legend=False, marker="^")
sns.lineplot(x=CS50, y=sur50, color=color2, linewidth=lw, zorder=10)
sns.scatterplot(x=CS50, y=sur50, color=color2, s=si, zorder=11)
sns.lineplot(x=CS50,y=sur50gm, linewidth = lw, ax=ax, color=color2,
zorder=9)
sns.scatterplot(x=CS50,y=sur50gm, ax=ax, color=color2, s=si, zorder=12,
legend=False, marker="^")
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fs)
ax.set_ylim(0,7.5)
ax.set_ylabel('surface area (10$^5$ km$^2$)', fontsize=fs, color = color1)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fs)
lw = 2
colo = 'k'
legend_el = [Line2D([0], [0], dashes=dsWD, color=colo, lw=lw, label='$R_{0.1}$'),
Line2D([0], [0], linestyle=':', color=colo, lw=lw, label='$R_{0.1m}$'),
Line2D([0], [0], linestyle='-', marker='o', markersize=8, color=colo, lw=lw, label='$W_d(R_{0.1}$, $R_{1m}$/ $R_{1md}$)'),
Line2D([0], [0], linestyle='-', marker='^', markersize=8, color=colo, lw=lw, label='$W_d(R_{0.1}$, $R_{1mb}$/ $R_{1mdb}$)')]
#first_legend = plt.legend(handles=legend_el, title='Configuration',loc=4, fontsize=fs, bbox_to_anchor=(0., .01, 1., .022))
first_legend = ax.legend(handles=legend_el, title='Configuration', fontsize=fs, loc='center right', bbox_to_anchor=(-0.1, 0.2))
ax2 = plt.gca().add_artist(first_legend)
legend_el = [Line2D([0], [0], linestyle='solid', color=color1, lw=lw, label='$w_f=6$'),
Line2D([0], [0], linestyle='solid', color=color2, lw=lw, label='$w_f=25$')]
#plt.legend(handles=legend_el, title='Sinking speed (m/day)',loc=4, fontsize=fs, bbox_to_anchor=(0., .52, 1., .102))
ax.legend(handles=legend_el, title='Sinking speed (m/day)', fontsize=fs, loc='center right', bbox_to_anchor=(-0.1, 0.65))
#% final
#fig.subplots_adjust(bottom=0.17)
#cbar_ax = fig.add_axes([0.11, 0.05, 0.35, 0.07])
#cbar_ax.set_visible(False)
#cbar = fig.colorbar(im2, ax=cbar_ax, orientation = 'horizontal', fraction = 1.2)
#cbar.ax.xaxis.set_label_position('bottom')
#cbar.ax.set_xlabel('$10^5$ km$^2$', fontsize=fs)
#cbar.ax.tick_params(labelsize=fs)
#cbar.set_ticklabels([1,2,3,4])
#fig.subplots_adjust(bottom=0.17)
cbar_ax = fig.add_axes([0.135, 0.285, 0.35, 0.07])
cbar_ax.set_visible(False)
cbar = fig.colorbar(im2, ax=cbar_ax, orientation = 'horizontal', fraction = 1.2,
aspect=18)
cbar.ax.xaxis.set_label_position('bottom')
cbar.ax.set_xlabel('$10^5$ km$^2$', fontsize=fs)
cbar.ax.tick_params(labelsize=fs)
plt.savefig('figure3_withandwithoutbolus.pdf',bbox_inches='tight',pad_inches=0)
plt.show() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
5979,
1467,
1315,
25,
3901,
25,
2091,
13130,
198,
198,
31,
9800,
25,
645,
1258,
21... | 2.082483 | 7,444 |
# Copyright 2021 Spencer Phillip Young
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides the clipboard functionality for Linux via ``xclip``
"""
import warnings
from .base import ClipboardBase, ClipboardSetupException, ClipboardException
from typing import Union
import shutil
import subprocess
| [
2,
220,
15069,
33448,
15971,
29470,
6960,
198,
2,
198,
2,
220,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
28... | 3.556962 | 237 |
import autode as ade
import numpy as np
from autode.methods import ORCA
from autode.mol_graphs import make_graph
from autode.pes.pes_2d import PES2d
ade.Config.n_cores = 24
n_points = 10
if __name__ == '__main__':
reac = ade.Reactant('sn2_init.xyz', charge=-1)
make_graph(reac)
prod = ade.Product('sn2_final.xyz', charge=-1)
make_graph(prod)
pes = PES2d(reac, prod,
r1s=np.linspace(3.4, 1.3, n_points),
r1_idxs=(0, 2), # F-C
r2s=np.linspace(1.7, 2.9, n_points),
r2_idxs=(2, 1)) # C-Cl
pes.calculate(name='orca_sn2_surface',
method=ORCA(),
keywords=ade.OptKeywords(['PBE0', 'ma-def2-SVP', 'LooseOpt']))
energies = np.zeros(shape=(n_points, n_points))
for i in range(n_points):
for j in range(n_points):
energies[i, j] = pes.species[i, j].energy
np.savetxt('orca_sn2_surface.txt', energies)
| [
11748,
1960,
1098,
355,
512,
68,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1960,
1098,
13,
24396,
82,
1330,
6375,
8141,
198,
6738,
1960,
1098,
13,
43132,
62,
34960,
82,
1330,
787,
62,
34960,
198,
6738,
1960,
1098,
13,
12272,
13,
... | 1.850277 | 541 |
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from djblets.auth.views import register
from djblets.configforms.views import ConfigPagesView
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.decorators import augment_method_from
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.accounts.forms.registration import RegistrationForm
from reviewboard.accounts.pages import get_page_classes
@csrf_protect
def account_register(request, next_url='dashboard'):
"""
Handles redirection to the appropriate registration page, depending
on the authentication type the user has configured.
"""
siteconfig = SiteConfiguration.objects.get_current()
auth_backends = get_enabled_auth_backends()
if (auth_backends[0].supports_registration and
siteconfig.get("auth_enable_registration")):
response = register(request, next_page=reverse(next_url),
form_class=RegistrationForm)
return response
return HttpResponseRedirect(reverse("login"))
class MyAccountView(ConfigPagesView):
"""Displays the My Account page containing user preferences.
The page will be built based on registered pages and forms. This makes
it easy to plug in new bits of UI for the page, which is handy for
extensions that want to offer customization for users.
"""
title = _('My Account')
css_bundle_names = [
'account-page',
]
js_bundle_names = [
'3rdparty-jsonlint',
'config-forms',
'account-page',
]
@method_decorator(login_required)
@augment_method_from(ConfigPagesView)
@property
@property
@cached_property
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
... | 3.036603 | 683 |
import logging
from typing import List
from pythoncommons.file_utils import FileUtils
from pythoncommons.zip_utils import ZipFileUtils
from yarndevtools.common.shared_command_utils import CommandType
from yarndevtools.constants import (
LATEST_DATA_ZIP_LINK_NAME,
)
LOG = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
7343,
198,
6738,
21015,
9503,
684,
13,
7753,
62,
26791,
1330,
9220,
18274,
4487,
198,
6738,
21015,
9503,
684,
13,
13344,
62,
26791,
1330,
38636,
8979,
18274,
4487,
198,
198,
6738,
331,
283,
358,
199... | 3.08 | 100 |
from flask import jsonify, request
from markote.api.api_blueprint import api_blueprint
from markote.oauth import oauth
@api_blueprint.route('/notebooks/<notebook_id>/sections', methods=['POST'])
@api_blueprint.route('/sections/<section_id>/pages', methods=['GET'])
| [
6738,
42903,
1330,
33918,
1958,
11,
2581,
198,
6738,
1317,
1258,
13,
15042,
13,
15042,
62,
17585,
4798,
1330,
40391,
62,
17585,
4798,
198,
6738,
1317,
1258,
13,
12162,
1071,
1330,
267,
18439,
628,
198,
31,
15042,
62,
17585,
4798,
13,
... | 2.988889 | 90 |
import unicode_tex
| [
11748,
28000,
1098,
62,
16886,
198
] | 3.166667 | 6 |
# Copyright SYS113 2019. MIT license , see README.md file.
# import libraries
from re import search
from traceback import format_exc
from tzlocal import get_localzone
from datetime import datetime
from platform import system, release, machine
from getpass import getuser
from os.path import isfile
from inspect import getframeinfo, stack
from negar.countriesWithTheirCapital import countries
# helper function for country capital
# helper function for negar module errors printing
# helper function for justify text center with fixed length
# helper function for create header row
# helper function for create each log row
# create text log function ...
# create error log function ...
| [
2,
15069,
311,
16309,
16616,
13130,
13,
17168,
5964,
837,
766,
20832,
11682,
13,
9132,
2393,
13,
198,
198,
2,
1330,
12782,
220,
198,
6738,
302,
1330,
2989,
198,
6738,
12854,
1891,
1330,
5794,
62,
41194,
198,
6738,
256,
89,
12001,
1330... | 4.254545 | 165 |
import sys
import getopt
import numpy as np
from kid_readout.utils import acquire
if __name__ == '__main__':
# Defaults
f_initial = np.load('/data/readout/resonances/current.npy')
shift_ppm = 0
suffix = "temperature"
# Add option?
attenuation_list = [41, 38, 35, 32, 29, 26, 23]
try:
opts, args = getopt.getopt(sys.argv[1:], "f:s:x:", ("initial=", "shift_ppm=", "suffix="))
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-f", "--initial"):
f_off = np.load(arg)
elif opt in ("-s", "--shift_ppm"):
shift_ppm = float(arg)
elif opt in ("-x", "--suffix"):
suffix = arg
f_initial *= 1 - 1e-6 * shift_ppm
acquire.sweeps_and_streams(f_initial, attenuation_list, suffix=suffix)
| [
11748,
25064,
198,
11748,
651,
8738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
5141,
62,
961,
448,
13,
26791,
1330,
12831,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
1303,
2896,
13185,
... | 2.105528 | 398 |
# Definition for singly-linked list.
| [
2,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 2.5 | 18 |
# ==========================
# general python modules
# ==========================
import feedparser
import os
import numpy as np
from functools import wraps
from threading import Thread
import sys
# ==========================
# python-temegam-bot modules
# ==========================
from telegram.ext import Updater, CommandHandler
import telegram as telegram
# ===============================
# create necessary folders
# ===============================
if not os.path.exists('users'):
os.makedirs('users')
# ===============================
# admin list
# ===============================
fid = open('./admin_only/admin_list.txt', 'r')
LIST_OF_ADMINS = [int(adm) for adm in fid.readline().split()]
fid.close()
# ==========================
# The following function reads the TOKEN from a file.
# This file is not incuded in the github-repo for obvious reasons
# ==========================
# ==============================================================
# function to get current release released in Kantjer's web site
# ==============================================================
# ===============================================
# assign the latest release to a global variable
# ===============================================
LatestABC, LatestMsg = get_current_release()
# ==========================
# restriction decorator
# ==========================
# ==========================
# start - welcome message
# ==========================
# ==========================
# help - short guide
# ==========================
# =====================================================
# check the current release and send message to users
# if an update is fount
# =====================================================
# =====================================================
# notify to all users the current release
# =====================================================
@restricted
# =====================================================
# send message to all active users
# =====================================================
@restricted
# =========================================
# bot - main
# =========================================
if __name__ == '__main__':
main()
| [
2,
36658,
2559,
28,
198,
2,
220,
2276,
21015,
13103,
198,
2,
36658,
2559,
28,
198,
11748,
3745,
48610,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
4704,
278,
1330,
14122,
... | 4.854945 | 455 |
import gym
import numpy as np
from actor_critic_traces import ActorCriticEligibilityTraces
from continuous_actor_critic_tile_coding import ContinuousActorCriticTileCoding
NUM_TILINGS = 8
if __name__ == "__main__":
do_demo()
| [
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
8674,
62,
22213,
291,
62,
2213,
2114,
1330,
27274,
18559,
291,
36,
4604,
2247,
2898,
2114,
198,
6738,
12948,
62,
11218,
62,
22213,
291,
62,
40927,
62,
66,
7656,
1330,
4... | 2.936709 | 79 |
import numpy as np
from keras import backend as K
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import csv
from sklearn.neighbors import KDTree
import matplotlib.pyplot as plt
from model.config import *
from tensorflow.python.ops import *
import seaborn as sns
import pandas as pd
# acc: p=1(MSE): 0.74 p=2: 0.80,0.74,0.78 p=3: 0.75,0.78,0.78,0.75 p=4: 0.76 p=5: 0.75 p=10: 0.69
| [
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
30203,
355,
509,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1855,
11518,
3351,
36213,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
269,
21370,
198,
6738,
1341,
3572... | 2.409836 | 183 |
# coding: utf-8
'''
Created on 14 февр. 2018 г.
@author: keen
'''
import pandas
import numpy
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
import math
from scipy import sparse
from sklearn.preprocessing import normalize
from sklearn.impute import SimpleImputer
import CO2_tree as co2t
import CO2_forest as co2f
from numpy import random
from random import randint
from sklearn.metrics import log_loss
from sklearn.model_selection import KFold
kf = KFold(n_splits=5)
#from memory_profiler import profile
#@profile
#tbl=pandas.read_csv("BNP/train.csv",sep=',')
#mtx = tbl.as_matrix()
#x_mtx = mtx[:,2:]
#y_mtx = mtx[:,1]
#y = numpy.asarray(y_mtx,dtype=int)
#for i in xrange(y.shape[0]):
# y[i] = y[i] + 1
#res_arr = my_func(x_mtx)
#res_arr = Imputer(strategy='median',copy=False,axis=0).fit_transform(res_arr)
#x = normalize(res_arr,axis=0)
#numpy.save("BNP_X",x.data)
#numpy.save("BNP_IndX",x.indices)
#numpy.save("BNP_PtrX",x.indptr)
#numpy.save("BNP_DataY",y)
test()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
7061,
6,
198,
41972,
319,
1478,
220,
141,
226,
16843,
38857,
21169,
13,
2864,
12466,
111,
13,
198,
198,
31,
9800,
25,
13795,
198,
7061,
6,
198,
198,
11748,
19798,
292,
198,
11748,
299,
321... | 2.322222 | 450 |
import torch
import numpy as np
import torch.nn as nn
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198
] | 3.111111 | 18 |
# Generated by Django 2.0.7 on 2018-07-14 12:01
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
22,
319,
2864,
12,
2998,
12,
1415,
1105,
25,
486,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
6... | 2.818182 | 44 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy, Model
from config import config
# base class shared by all models. Needed to instantiate SQLAlchemy object.
# globally accessible database connection
db = SQLAlchemy(model_class=BaseModel)
from models import User, Role, Project, ProjectType, Location
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
11,
9104,
198,
198,
6738,
4566,
1330,
4566,
198,
198,
2,
2779,
1398,
4888,
416,
477,
4981,
13,
10664,
276,
284,
9113,
9386,
16363,
2348,
26599,... | 4.025 | 80 |
from add_nums_with_return import add_nums
total = add_nums(1, 2, 3, 4, 5)
print(total) | [
6738,
751,
62,
77,
5700,
62,
4480,
62,
7783,
1330,
751,
62,
77,
5700,
198,
198,
23350,
796,
751,
62,
77,
5700,
7,
16,
11,
362,
11,
513,
11,
604,
11,
642,
8,
198,
4798,
7,
23350,
8
] | 2.289474 | 38 |
import torch
if __name__ == '__main__':
print(get_all_gpu_names()) | [
11748,
28034,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
4798,
7,
1136,
62,
439,
62,
46999,
62,
14933,
28955
] | 2.615385 | 26 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from gammapy.modeling import Parameter, Parameters
@pytest.mark.parametrize(
"method,value,factor,scale",
[
# Check method="scale10" in detail
("scale10", 2e-10, 2, 1e-10),
("scale10", 2e10, 2, 1e10),
("scale10", -2e-10, -2, 1e-10),
("scale10", -2e10, -2, 1e10),
# Check that results are OK for very large numbers
# Regression test for https://github.com/gammapy/gammapy/issues/1883
("scale10", 9e35, 9, 1e35),
# Checks for the simpler method="factor1"
("factor1", 2e10, 1, 2e10),
("factor1", -2e10, 1, -2e10),
],
)
@pytest.fixture()
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
6738,
... | 2.221289 | 357 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import tempfile
import http
import infra.network
import infra.path
import infra.proc
import infra.net
import infra.e2e_args
import suite.test_requirements as reqs
APP_SCRIPT = """
return {
["POST text"] = [[
export default function(request)
{
if (request.headers['content-type'] !== 'text/plain')
throw new Error('unexpected content-type: ' + request.headers['content-type']);
const text = request.body.text();
if (text !== 'text')
throw new Error('unexpected body: ' + text);
return { body: 'text' };
}
]],
["POST json"] = [[
export default function(request)
{
if (request.headers['content-type'] !== 'application/json')
throw new Error('unexpected content type: ' + request.headers['content-type']);
const obj = request.body.json();
if (obj.foo !== 'bar')
throw new Error('unexpected body: ' + obj);
return { body: { foo: 'bar' } };
}
]],
["POST binary"] = [[
export default function(request)
{
if (request.headers['content-type'] !== 'application/octet-stream')
throw new Error('unexpected content type: ' + request.headers['content-type']);
const buf = request.body.arrayBuffer();
if (buf.byteLength !== 42)
throw new Error(`unexpected body size: ${buf.byteLength}`);
return { body: new ArrayBuffer(42) };
}
]],
["POST custom"] = [[
export default function(request)
{
if (request.headers['content-type'] !== 'foo/bar')
throw new Error('unexpected content type: ' + request.headers['content-type']);
const text = request.body.text();
if (text !== 'text')
throw new Error('unexpected body: ' + text);
return { body: 'text' };
}
]]
}
"""
@reqs.description("Test content types")
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = "libjs_generic"
run(args)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
362,
13,
15,
13789,
13,
198,
11748,
20218,
7753,
198,
11748,
2638,
198,
11748,
1167,
430,
13,
27349,
198,
11748,
1167,
430,
13,
6978,
198,... | 2.652687 | 763 |
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
from sympy import oo
from indeterminatebeam.indeterminatebeam import (
Support,
Beam,
PointTorque,
PointLoad,
PointLoadV,
PointLoadH,
DistributedLoad,
DistributedLoadV,
DistributedLoadH,
TrapezoidalLoad,
TrapezoidalLoadV,
TrapezoidalLoadH,
)
import unittest
##The unit testing refers to example 1 as described in the full documentation.
##In future more complex indeterminate beams should be added to ensure the validity of the program.
##In future more attention should be paid to raising error based on incorrect user values.
if __name__ == '__main__':
unittest.main(verbosity=2) | [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
10786,
40720,
6,
4008,
198,
198,
6738,
10558,
88,
1330,
267,
78,
198,
6738,
773,
13221,
378,
40045,
13,
521,
13221,
37... | 2.861111 | 252 |
#!/usr/bin/env python
from typing import Optional
from fastapi import FastAPI, HTTPException
import uvicorn
import sys
from os import path
import nest_asyncio
nest_asyncio.apply()
SRC_PATH = './src'
sys.path.insert(0, SRC_PATH)
app = FastAPI()
from hugging_classifier import HuggingClassifier, model_param_bert_pt, NEWS_OM_MODEL, logger
clf = HuggingClassifier(modelParam=model_param_bert_pt, train_mode=False)
clf.load_prediction_model(model_dir=NEWS_OM_MODEL, num_categories=3, labels=['-1','0','1'])
@app.post("/clf_info")
@app.post("/clf_predict/")
try:
logger.info("Start News OM Classification API Server....")
port_num = 9090
uvicorn.run(app, host='0.0.0.0', port=port_num, log_level='info')
except Exception as ex:
logger.error("Can't load Server : ", ex) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
19720,
1330,
32233,
198,
6738,
3049,
15042,
1330,
12549,
17614,
11,
14626,
16922,
198,
11748,
334,
25531,
1211,
198,
11748,
25064,
198,
6738,
28686,
1330,
3108,
198,
11748,
16343,
62... | 2.670068 | 294 |
#!/usr/bin/env python
#import freenect
import rospy
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import copy
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
# calibrate kinect to world
# https://github.com/amiller/libfreenect-goodies/blob/master/calibkinect.py
# also check out mouse_and_match
if __name__ == "__main__":
# cv2.VideoCapture.grab()
# cv2.VideoCapture.retrieve()
#
# cv2.VideoCapture.read()
x = Pong_Vision()
x.main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
198,
2,
11748,
277,
1361,
478,
198,
11748,
686,
2777,
88,
198,
6738,
269,
85,
62,
9458,
1330,
327,
85,
37385,
11,
327,
85,
37385,
12331,
198,
11748,
299,
32152,
355,
45941,
198,... | 2.53304 | 227 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_devices
~~~~~~~~~~~
Test the device handling code.
"""
import logging
import mock
import sys
import uuid
from contextlib import nested
from netaddr import IPAddress
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import calico.felix.devices as devices
import calico.felix.futils as futils
import calico.felix.test.stub_utils as stub_utils
# Logger
log = logging.getLogger(__name__)
# Canned mock calls representing clean entry to/exit from a context manager.
M_ENTER = mock.call().__enter__()
M_CLEAN_EXIT = mock.call().__exit__(None, None, None)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
1946,
12,
5304,
17030,
64,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1... | 3.233766 | 385 |
import re
from .base import first_element, BaseResponseMixin, BaseElementWrapper
from lxml import etree
class ProductError(ValueError, BaseElementWrapper):
"""
Error wrapper for any error returned back for any call to the Products api.
"""
namespaces = {
'a': 'http://mws.amazonservices.com/schema/Products/2011-10-01',
'b': 'http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd'
}
@property
@first_element
@property
@first_element
@property
@first_element
| [
11748,
302,
198,
6738,
764,
8692,
1330,
717,
62,
30854,
11,
7308,
31077,
35608,
259,
11,
7308,
20180,
36918,
2848,
198,
6738,
300,
19875,
1330,
2123,
631,
628,
198,
198,
4871,
8721,
12331,
7,
11395,
12331,
11,
7308,
20180,
36918,
2848,
... | 2.668317 | 202 |
import numpy as np
rgb1 = np.array(np.arange(8 * 8 * 3).reshape((8, 8, 3)), dtype='uint8')
toGray(rgb1)
rgb2 = np.array(
[[[ 0, 0, 2],
[ 2, 3, 5],
[ 6, 7, 9],
[ 9, 10, 12],
[ 11, 12, 14],
[ 14, 15, 17],
[ 17, 18, 20],
[ 20, 21, 23]],
[[ 23, 24, 26],
[ 26, 27, 29],
[ 29, 30, 32],
[ 32, 33, 35],
[ 34, 35, 37],
[ 37, 38, 40],
[ 41, 42, 44],
[ 44, 45, 47]],
[[ 49, 50, 52],
[ 51, 52, 54],
[ 55, 56, 58],
[ 58, 59, 61],
[ 60, 61, 63],
[ 63, 64, 66],
[ 67, 68, 70],
[ 70, 71, 73]],
[[ 71, 72, 74],
[ 74, 75, 77],
[ 78, 79, 81],
[ 81, 82, 84],
[ 83, 84, 86],
[ 86, 87, 89],
[ 90, 91, 93],
[ 92, 93, 95]],
[[ 96, 97, 99],
[ 98, 99, 101],
[102, 103, 105],
[105, 106, 108],
[107, 108, 110],
[110, 111, 113],
[114, 115, 117],
[117, 118, 120]],
[[118, 119, 121],
[121, 122, 124],
[125, 126, 128],
[128, 129, 131],
[130, 131, 133],
[133, 134, 136],
[137, 138, 140],
[139, 140, 142]],
[[144, 145, 147],
[147, 148, 150],
[151, 152, 154],
[154, 155, 157],
[156, 157, 159],
[159, 160, 162],
[162, 163, 165],
[165, 166, 168]],
[[168, 169, 171],
[171, 172, 174],
[174, 175, 177],
[177, 178, 180],
[179, 180, 182],
[182, 183, 185],
[186, 187, 189],
[189, 190, 192]]])
toGray(rgb2)
# rgb1
# 0 3 6 9 12 15 18 21
# 24 27 30 33 36 39 42 45
# 48 51 54 57 60 63 66 69
# 72 75 78 81 84 87 90 93
# 96 99 102 105 108 111 114 117
# 120 123 126 129 132 135 138 141
# 144 147 150 153 156 159 162 165
# 168 171 174 177 180 183 186 189
# rgb2
# 0 2 6 9 11 14 17 20
# 23 26 29 32 34 37 41 44
# 49 51 55 58 60 63 67 70
# 71 74 78 81 83 86 90 92
# 96 98 102 105 107 110 114 117
# 118 121 125 128 130 133 137 139
# 144 147 151 154 156 159 162 165
# 168 171 174 177 179 182 186 189
| [
11748,
299,
32152,
355,
45941,
628,
198,
81,
22296,
16,
796,
45941,
13,
18747,
7,
37659,
13,
283,
858,
7,
23,
1635,
807,
1635,
513,
737,
3447,
1758,
19510,
23,
11,
807,
11,
513,
36911,
288,
4906,
11639,
28611,
23,
11537,
198,
1462,
... | 1.87876 | 1,097 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
2,
15069,
1946,
11942,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,... | 3.627119 | 177 |
from asciimatics.widgets import Frame, ListBox, Layout, Divider, Text, \
Button, TextBox, Widget
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, NextScene, StopApplication
import data
import sys
import sqlite3
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=True, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene | [
6738,
355,
979,
320,
23372,
13,
28029,
11407,
1330,
25184,
11,
7343,
14253,
11,
47639,
11,
4777,
1304,
11,
8255,
11,
3467,
198,
220,
220,
220,
20969,
11,
8255,
14253,
11,
370,
17484,
198,
6738,
355,
979,
320,
23372,
13,
29734,
1330,
... | 2.894118 | 170 |
from datetime import datetime, timedelta
from test.adr_event_generator import AdrEvent, AdrEventStatus, generate_payload
from unittest import mock
import pytest
from freezegun import freeze_time
from oadr2 import controller, event
from oadr2.poll import OpenADR2
from oadr2.schemas import NS_A
TEST_DB_ADDR = "%s/test2.db"
responseCode = 'pyld:eiCreatedEvent/ei:eiResponse/ei:responseCode'
requestID = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/pyld:requestID'
optType = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/ei:optType'
venID = 'pyld:eiCreatedEvent/ei:venID'
eventResponse = "pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse"
def test_6_test_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The presence of any string except “false” in the oadrDisributeEvent
testEvent element is treated as a trigger for a test event.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, test_event=True
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, [])
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
@pytest.mark.parametrize(
"response_required",
[
pytest.param(
False,
id="response required"
),
pytest.param(
True,
id="response not required"
),
]
)
def test_12_response_required(response_required, tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
The VEN must respond to an event in oadrDistributeEvent based upon the
value in each event’s oadrResponseRequired element as follows:
Always – The VEN shall respond to the event with an oadrCreatedEvent
eventResponse . This includes unchanged, new, changed, and cancelled
events
Never – The VEN shall not respond to the event with a oadrCreatedEvent
eventResponse
Note that oadrCreatedEvent event responses SHOULD be returned in one
message, but CAN be returned in separate messages.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, response_required=response_required
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert bool(reply) == response_required
def test_18_overlaping_events(tmpdir):
"""
VEN/VTN, EiEvent Service
The VEN/VTN must honor the following rules with regards to overlapping
active periods...
DR events with overlapping active periods may be issued, but only if they
are from different marketContexts and only if the programs have a priority
associated with them. DR events for programs with higher priorities
supersede the events of programs with lower priorities. If two programs with
overlapping events have the same priority then the program whose event
was activated first takes priority.
The behavior of a VEN is undefined with respect to the receipt on an
overlapping event in the same market context. The VTN shall not send
overlapping events in the same market context, including events that could
potentially overlap a randomized event cancellation. Nothing in this rule
should preclude a VEN from opting into overlapping events in different
market contexts.
"""
expected_events = [
AdrEvent(
id="FooEvent1",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, market_context="context1", priority=1
),
AdrEvent(
id="FooEvent2",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE, market_context="context2", priority=2
),
]
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN",
market_contexts="context1,context2"
)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload(expected_events))
active_events = event_handler.get_active_events()
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status(active_events)
assert (signal_level, evt_id, remove_events) == (2.0, "FooEvent2", [])
def test_19_valid_invalid_events(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If an oadrDistributeEvent payload has as mix of valid and invalid events,
the implementation shall only respond to the relevant valid events and not
reject the entire message.
"""
expected_events = [
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
),
AdrEvent(
id="FooFailed",
start=datetime.utcnow() + timedelta(seconds=160),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, ven_ids=["Wrong_Ven"]
),
AdrEvent(
id="AnotherFooEvent",
start=datetime.utcnow() + timedelta(seconds=260),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
)
]
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload(expected_events))
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
for event_reply in reply.iterfind(eventResponse, namespaces=NS_A):
event_id = event_reply.findtext("ei:qualifiedEventID/ei:eventID", namespaces=NS_A)
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
if event_id == "FooFailed":
assert event_reply.findtext("ei:responseCode", namespaces=NS_A) == "403"
assert event_reply.findtext("ei:optType", namespaces=NS_A) == "optOut"
else:
assert event_reply.findtext("ei:responseCode", namespaces=NS_A) == "200"
assert event_reply.findtext("ei:optType", namespaces=NS_A) == "optIn"
def test_21a_ven_id_validation(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
If venID, vtnID, or EventID is included in payloads, the receiving entity must
validate the ID values are as expected and generate an error if no ID is
present or an unexpected value is received.
Exception: A VEN shall not generate an error upon receipt of a cancelled
event whose eventID is not previously known.
"""
expected_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, ven_ids=["Wrong_Ven"]
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optOut"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.parametrize(
"expected_event",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, resource_ids=["resource_id"], ven_ids=[]
),
id="resource_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, party_ids=["party_id"], ven_ids=[]
),
id="party_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, group_ids=["group_id"], ven_ids=[]
),
id="group_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
),
id="ven_id"
),
]
)
def test_22_target_validation(expected_event, tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If no sub elements are present in oadrDistributeEvent eiTarget, the
presumption is that the recipient is the intended target of the event. If
multiple criteria are present in eiTarget subelements, the values are OR’d
togther to determine whether the VEN is a target for the event. However,
the VENs behavior with respect to responding to an event when it matches
one of the eiTarget criteria is implementation dependent.
"""
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
resource_id="resource_id",
party_id="party_id",
group_id="group_id"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.skip(reason="No need to test")
def test_23_oadrRequestEvent():
"""
VEN/VTN, EiEvent Service, oadrRequestEvent Payload
oadrRequestEvent many only be sent in the VEN to VTN direction
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_25_error_reporting():
"""
VEN/VTN, EiEvent Service
VTN and VEN: The following rules must be followed with respect to
application level responses with respect to multiple events:
1)If the Response indicates success, there is no need to examine each
element in the Responses.
2)If some elements fail and other succeed, the Response will indicate the
error, and the recipient should evaluate each element in Responses to
discover which components of the operation failed.
Exception: For oadrCreatedEvent, the presence of a failure indication in
eventResponse:responseCode shall not force a failure indication in
eiResponse:responseCode. Typical behavior would be for the VEN to report
a success indication in eiResponse:responseCode and indicate any event
specific errors in eventResponse:responseCode. The
"""
assert False
def test_30_start_time_randomization(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The VEN must randomize the dtstart time of the event if a value is present
in the startafter element. Event completion times are determined by adding
the event duration to the randomized dtstart time. Modifications to an event
should maintain the same random offset, unless the startafter element itself
is modified.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, start_after=timedelta(minutes=2)
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event.start != expected_event.start
assert (active_event.start - expected_event.start) < timedelta(minutes=2)
@pytest.mark.skip(reason="Covered in other tests")
def test_31_active_period_subelements():
"""
# VEN, EiEvent Service, oadrDistributeEvent Payload
# The VEN must recognize and act upon values specified in the subelements
# of activePeriod including:
# dtStart
# duration
# tolerence
# x-eiRampUp (positive and negative)
# x-eiRecovery (positive and negative)
# Note: x-eiRampup and x-eiRecovery are not testable requirements
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_32_intervals_subelements():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
The VEN must recognize and act upon values specified in the subelements
of intervals including:
duration
signalPayload
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_31_event_error_indication():
"""
VEN/VTN
The implementation must provide an application layer error indication as a
result of the following conditions:
Schema does not validate
Missing expected information
Payload not of expected type
ID not as expected
Illogical request – Old date on new event, durations don’t add up
correctly, etc.
Etc.
"""
assert False
def test_35_response_created_event(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
The eiResponses element in oadrCreatedEvent is mandatory, except when
an error condition is reported in eiResponse.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert bool(reply.find("pyld:eiCreatedEvent/ei:eventResponses", namespaces=NS_A))
def test_36_cancellation_acknowledgement(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
An event cancellation received by the VEN must be acknowledged with an
oadrCreatedEvent with the optType element set as follows, unless the
oadrResponseRequired is set to ‘never”:
optIn = Confirm to cancellation
optOut = Cannot cancel
Note: Once an event cancellation is acknowledged by the VEN, the event
shall not be included in subsequent oadrCreatedEvent payloads unless the
VTN includes this event in a subsequent oadrDistributeEvent payload.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.CANCELLED
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
db_mock.assert_not_called()
@pytest.mark.skip(reason="No need to test")
def test_37_push_pull_model():
"""
VEN
A VEN Implementation must support pull model and can optionally also
support push
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_41_request_id():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
The VTN must send a requestID value as part of the oadrDistributeEvent payload.
Note: The requestID value is not required to be unique, and in fact may be the
same for all oadrDistributeEvent payloads. That there are two requestID fields
in oadrDistributeEvent. The feild that must be populated with a requestID is
located at oadrDistributeEvent:requestID
"""
assert False
def test_42_request_id(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
A VEN receiving an oadrDistributeEvent eiEvent must use the received requestID
value in the EiCreatedEvent eventResponse when responding to the event. This
includes any and all subsequent EiCreatedEvent messages that may be sent to
change the opt status of the VEN.
The eiResponse:requestID in oadrCreatedEvent shall be left empty if the
payload contains eventResponses. The VTN shall
look inside each
eventResponse for the relevant requestID
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, start_after=timedelta(minutes=2)
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(
'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/pyld:requestID',
namespaces=NS_A
) == "OadrDisReq092520_152645_178"
@pytest.mark.skip(reason="No need to test")
def test_43_request_id_uniqueness():
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The VEN must make no assumptions regarding the uniqueness of requestID values
received from the VTN in the oadrDistributePayload
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_44_empty_request_id():
"""
VEN/VTN
With the exception of oadrDistributeEvent and oadrCreatedEvent payloads,
requestID may be an empty element in other payloads and if a requestID value is
present, it may be ignored
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_45_schema_location():
"""
VEN/VTN
Messages sent between VENs and VTNs shall
*not* include a
schemaLocation attribute
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_46_optional_elements():
"""
VEN/VTN
Optional elements do not need to be included in outbound payloads, but if
they are, the VEN or VTN receiving the payload must understand and act
upon those optional elements
"""
assert False
def test_47_unending_event(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
An event with an overall duration of 0 indicates an event with no defined
end time and will remain active until explicitly cancelled.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=0), level=1.0)],
status=AdrEventStatus.ACTIVE
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, [])
with freeze_time(datetime.utcnow() + timedelta(seconds=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
with freeze_time(datetime.utcnow() + timedelta(minutes=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
with freeze_time(datetime.utcnow() + timedelta(hours=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
test_event.status = AdrEventStatus.CANCELLED
test_event.mod_number += 1
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
@pytest.mark.parametrize(
"expected_event",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, market_context="http://bad.context"
),
id="market_context"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, signal_name="bad"
),
id="signal_name"
),
]
)
def test_48_payload_error_indication(expected_event, tmpdir):
"""
When a VTN or VEN receives schema compliant oadr payload that has
logical errors, the receiving device must provide an application layer error
indication of 4xx. The detailed error message number is informational and
not a requirement for response to a specific scenario. If the error is in an
event contained in an oadrDistributeEvent payload, it should be reported in
the eventResponse element of oadrCreatedEvent. The following logical
errors must be detected by implementations:
VEN receives non-matching market context
VEN receives non-matching eiTarget
VEN receives unsupported signalName
VTN receives non-matching eventID in oadrCreatedEvent Response
VTN receives mismatched modificationNumber in oadrCreatedEvent
"""
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
market_contexts="http://market.context",
db_path=TEST_DB_ADDR % tmpdir,
resource_id="resource_id",
party_id="party_id",
group_id="group_id"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optOut"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.skip(reason="No need to test")
def test_50_distributed_event():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
In both the push and pull model, oadrDistributeEvent MUST contain all
existing events which have the eventStatus element set to either FAR,
NEAR, or ACTIVE. Events with an eventStatus of cancelled MUST be
included in the payload upon change to the modificationNumber and MAY
be included in subsequent payloads.
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_52_cancellation_acknowledgment():
"""
VTN, EiEvent Service, oadrDistributeEvent Payload
If a VTN requests acknowledgment of a cancelled event with
oadrResponserequired of always, the VTN shall continue to send the
cancelled event to the VEN until the event is acknowledged, eventStatus
transitions to the complete state, or some well defined number of retries is
attempted
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_53_http_transport():
"""
VEN/VTN
Shall implement the simple http transport. Including support for the
following mandatory http headers:
Host
Content-Length
Content-Type of application/xml
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_54_polling_frequency():
"""
VEN
HTTP PULL VEN’s MUST be able to guarantee worst case latencies for the
delivery of information from the VTN by using deterministic and well defined
polling frequencies. The VEN SHOULD support the ability for its polling
frequency to be configured to support varying latency requirements. If the
VEN intends to poll for information at varying frequencies based upon
attributes of the information being exchanged (e.g. market context) then the
VEN MUST support the configuration of polling frequencies on a per
attribute basis.
"""
assert False
def test_55_max_polling_frequency():
"""
VEN
HTTP PULL VEN’s MUST NOT poll the VTN on average greater than some
well defined and deterministic frequency. THE VEN SHOULD support the
ability for the maximum polling frequency to be configured.
"""
with pytest.raises(AssertionError):
OpenADR2(
event_config=dict(
ven_id="TH_VEN"
),
vtn_base_uri="",
vtn_poll_interval=9,
start_thread=False,
)
def test_56_new_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is not aware
then it should process the event and add it to its list of known events
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow()+timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, [])
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
with freeze_time(datetime.utcnow()+timedelta(seconds=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
def test_57_modified_event(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is already
aware of, but with a higher modification number then the VEN should
replace the previous event with the new one In its list of known events.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
test_event.mod_number = 1
test_event.status = AdrEventStatus.ACTIVE
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
def test_58_modified_event_error(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is already
aware of, but which has a lower modification number than one in which the
VEN is already aware then this is an ERROR and the VEN should respond
with the appropriate error code. Note that this is true regardless of the
event state including cancelled.
"""
test_event1 = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=5
)
test_event2 = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=3
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event1]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event1.to_obj()
assert active_event == expected_event
event_handler.handle_payload(generate_payload([test_event2]))
active_event = event_handler.get_active_events()[0]
assert active_event == expected_event
def test_59_event_cancellation(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with the eventStatus set to cancelled and
has an eventID that the VEN is aware of then the VEN should cancel the
existing event and delete it from its list of known events.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=1
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event == test_event.to_obj()
with freeze_time():
test_event.status = AdrEventStatus.CANCELLED
test_event.mod_number += 1
test_event.end = datetime.utcnow()
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event == test_event.to_obj()
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
def test_60_new_cancelled_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
If the VTN sends an oadrEvent with the eventStatus set to cancelled and
has an eventID that the VEN is not aware of then the VEN should ignore
the event since it is not currently in its list of known events, but still must
respond with the createdEvent if required to do so by oadrResponseRequired
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.CANCELLED, mod_number=1
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(
responseCode,
namespaces=NS_A
) == "200"
assert reply.findtext(
optType,
namespaces=NS_A
) == "optIn"
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
@pytest.mark.skip(reason="Covered in other tests")
def test_61_implied_cancellation():
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends the oadrDistributeEvent payload and it does not contain
an event for which the VEN is aware (i.e. in its list of known events) then
the VEN must delete it from its list of known event (i.e. implied cancel).
Exception: A VEN that has an active event that cannot be immediately
stopped for operational reasons, may leave the event in its data store until
the event expires or the event can be stopped.
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_62_response():
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
The VEN must process EVERY oadrEvent event message (new, modified,
cancelled, etc.) that it receives from the VTN in an oadrDistributeEvent
payload and it MUST reply with a createdEvent message for every EIEvent
message in which the responseRequired is set to always. Furthermore if
the responseRequired is set to never, the VEN MUST NOT respond with a
createdEvent message. It is at the complete discretion of the VTN as to
whether responses are required from the VEN. Note that this rule is
universal and applies to all scenarios including the following:
The event is one in which the VEN is already aware.
The event is being cancelled and the VEN did not even know it existed
It does not matter how the EIEvent payloads were delivered, i.e.
PUSH, PULL or as the result of being delivered in an ALL payload
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_64_polling_cycle():
"""
VEN, EiEvent Service
A pull VEN shall respond to all received events before initiating another
polling cycle.
"""
assert False
def test_65_cancellation_time_randomization(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
When an event containing a randomization value in the startafter element is
cancelled, either explicitly or implicitly, the VEN MUST randomize its
termination of the event. The randomization window should be between 0
and a duration equal to the value specified in startafter.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(minutes=5),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, start_after=timedelta(minutes=2)
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
with freeze_time():
test_event.mod_number += 1
test_event.status = AdrEventStatus.CANCELLED
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event.end != datetime.utcnow()
assert (active_event.start - datetime.utcnow()) < timedelta(minutes=2)
@pytest.mark.skip(reason="No need to test")
def test_66_cancelled_event_handling():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent, Payload
If a VTN sends an oadrDistributeEvent payload containing an event with a
startafter element with a value greater than zero, the VTN must continue to
include the event in oadrDistributeEvent payloads, even if the event is
complete, until current time is equal to dtStart plus duration plus startafter.
The receipt of an eventStatus equal to completed shall not cause the VEN
to change its operational status with respect to executing the event.
"""
assert False
@pytest.mark.skip(reason="Cant test here")
def test_67_tls_support():
"""
VEN/VTN
VTN and VEN shall support TLS 1.0 and may support higher versions of
TLS provided that they can still interoperate with TLS 1.0 implementations.
The default cipher suite selection shall be as follows:
The VEN client shall offer at least at least one of the default cipher
suites listed below
The VEN server shall must support at least one of the default cipher
suites listed below and must select one of the default cipher suites
regardless of other cipher suites that may be offered by the VTN
client
The VTN client must offer both the default cipher suites listed
below.
The VTN server must support both of the default cipher suites listed
below and must select one of listed the default cipher suites
regardless of other ciphers that may be offered by the VEN client
Default cipher suites:
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
TLS_RSA_WITH_AES_128_CBC_SHA
Note that a VTN or VEN may be configured to support any TLS version and
cipher suite combination based on the needs of a specific deployment.
However in the absence of changes to the default configuration of the VTN
or VEN, the behavior of the devices shall be as noted above.
"""
assert False
@pytest.mark.skip(reason="Cant test here")
def test_68_cert_support():
"""
VEN/VTN
Both VTNs and VENs shall support client and server X.509v3 certificates. A
VTN must support both an ECC and RSA certificate. A VEN must support
either an RSA or ECC certificate and may support both. RSA certificates
must be signed with a minimum key length of 2048 bits. ECC certificates
must be signed with a minimum key length of 224 bits. ECC Hybrid
certificates must be signed with a 256 bit key signed with a RSA 2048 bit
key.
"""
assert False
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
1332,
13,
41909,
62,
15596,
62,
8612,
1352,
1330,
1215,
81,
9237,
11,
1215,
81,
9237,
19580,
11,
7716,
62,
15577,
2220,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
... | 2.675777 | 14,672 |
# Uses python3
import sys
if __name__ == '__main__':
input = sys.stdin.read();
n, m = map(int, input.split())
print(get_fibonacci_huge_naive(n, m)) | [
2,
36965,
21015,
18,
198,
11748,
25064,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5128,
796,
25064,
13,
19282,
259,
13,
961,
9783,
198,
220,
220,
220,
299,
11,
285,
796,
3975,
7,
600,
... | 2.3 | 70 |
import cv2
import os
import random
from deeplab.utils.picture_utils import *
# png_file_path = r'E:\leftImg8bit_demoVideo\leftImg8bit\demoVideo\stuttgart_01\stuttgart_01_000000_000001_leftImg8bit.png'
output_video_path = '/media/xzq/DA18EBFA09C1B27D/exp/train_on_train_set/video/stuttgart_01.mp4'
files = os.listdir('/media/xzq/DA18EBFA09C1B27D/exp/train_on_train_set/test')
out_num = len(files)
png_file_path = '/media/xzq/DA18EBFA09C1B27D/exp/train_on_train_set/test/0.png'
img = cv2.imread(png_file_path) # 读取第一张图片
# # print(img)
fps = 25
imgInfo = img.shape
size = (imgInfo[1], imgInfo[0]) # 获取图片宽高度信息
# print(size)
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
videoWrite = cv2.VideoWriter(output_video_path, fourcc, fps, size)# 根据图片的大小,创建写入对象 (文件名,支持的编码器,5帧,视频大小(图片大小))
#videoWrite = cv2.VideoWriter('0.mp4',fourcc,fps,(1920,1080))
#
print(out_num)
fileDir = '/media/xzq/DA18EBFA09C1B27D/exp/train_on_train_set/test'
for i in range(0, out_num):
fileName = fileDir + '/%d.png' % i #循环读取所有的图片,假设以数字顺序命名
print(fileName)
# print(i)
img = cv2.imread(fileName)
videoWrite.write(img)# 将图片写入所创建的视频对象
(parent_path, file_name) = os.path.split(output_video_path)
output_video_path = parent_path + "\\" + "segment_" + file_name
print(output_video_path)
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
4738,
198,
6738,
390,
68,
489,
397,
13,
26791,
13,
34053,
62,
26791,
1330,
1635,
628,
198,
198,
2,
279,
782,
62,
7753,
62,
6978,
796,
374,
6,
36,
7479,
9464,
3546,
70,
23,
2545,
... | 1.749656 | 727 |
# urls.py
from django.conf.urls import url
from apps.todo.views import TaskCreate
from apps.todo.views import TaskList
from apps.todo.views import TaskRetrieve
from apps.todo.views import TaskUpdate
from apps.todo.views import TaskDestroy
from apps.todo.views import TaskIndex
urlpatterns = [
url(r'new-task/$', TaskCreate.as_view(), name='new-task'),
url(r'(?P<pk>[0-9]+)/update$', TaskUpdate.as_view(), name='task-update'),
url(r'(?P<pk>[0-9]+)/destroy$',
TaskDestroy.as_view(), name='task-destroy'),
url(r'(?P<pk>[0-9]+)/$', TaskRetrieve.as_view(), name='task'),
url(r'list/$', TaskList.as_view(), name='tasks'),
url(r'$', TaskIndex.as_view(), name='tasks'),
]
| [
2,
2956,
7278,
13,
9078,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
6725,
13,
83,
24313,
13,
33571,
1330,
15941,
16447,
198,
6738,
6725,
13,
83,
24313,
13,
33571,
1330,
15941,
8053,
198,
6738,
6725,
13,
... | 2.510791 | 278 |
# MissingInteger - Find the smallest positive integer that does not occur in a given sequence.
# Given an array A of N integers, returns the smallest positive integer (greater than 0)
# that does not occur in A.
# For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
# A = [1, 2, 3], the function should return 4.
# A = [−1, −3], the function should return 1.
# Important
# N is an integer within the range [1..100,000];
# each element of array A is an integer within the range [−1,000,000..1,000,000].
# Testing
A = [1,3,6,4,1,2] # result 5
print(solution(A))
# Detected time complexity: O(N) or O(N * log(N))
| [
2,
25639,
46541,
532,
9938,
262,
18197,
3967,
18253,
326,
857,
407,
3051,
287,
257,
1813,
8379,
13,
220,
198,
2,
11259,
281,
7177,
317,
286,
399,
37014,
11,
5860,
262,
18197,
3967,
18253,
357,
18223,
263,
621,
657,
8,
220,
198,
2,
... | 2.710843 | 249 |
import click
@click.group('create')
@command_group.command()
| [
11748,
3904,
628,
198,
31,
12976,
13,
8094,
10786,
17953,
11537,
628,
198,
31,
21812,
62,
8094,
13,
21812,
3419,
198
] | 3.095238 | 21 |
from datetime import datetime
from pathlib import Path
from tempfile import TemporaryDirectory
from fetchmesh.bgp import Collector, RISCollector, RouteViewsCollector
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
20218,
7753,
1330,
46042,
43055,
198,
198,
6738,
21207,
76,
5069,
13,
65,
31197,
1330,
17573,
11,
45698,
31337,
273,
11,
18956,
7680,
82,
31337,
273,
628... | 3.976744 | 43 |
from .basefile import BaseAnVILFile
from .basefolder import BaseAnVILFolder
from .google import GoogleAnVILFile
import gs_chunked_io as gscio
| [
6738,
764,
8692,
7753,
1330,
7308,
2025,
53,
4146,
8979,
198,
6738,
764,
8692,
43551,
1330,
7308,
2025,
53,
4146,
41092,
198,
6738,
764,
13297,
1330,
3012,
2025,
53,
4146,
8979,
198,
198,
11748,
308,
82,
62,
354,
2954,
276,
62,
952,
... | 3.041667 | 48 |
# (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
class StringExpression:
"""This class represents a string constant expression, e.g. 'Hello, world!'"""
| [
2,
357,
34,
8,
15069,
33448,
13182,
14326,
37,
13,
198,
2,
198,
2,
770,
3788,
318,
11971,
739,
262,
2846,
286,
262,
24843,
10483,
594,
10628,
362,
13,
15,
198,
2,
543,
460,
307,
6492,
379,
2638,
1378,
2503,
13,
43073,
13,
2398,
... | 3.69697 | 132 |
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db import connection
from pierre.site_search.settings import SORT_MAPPINGS
# Adapted from http://www.djangosnippets.org/snippets/1328/
class IndexField (models.Field):
"""
Field type used by Postgres for full-text indexing
Uses the tsvector object, which is built into Postgres 8.3.
Users of earlier versions can get the tsearch2 package here:
www.sai.msu.su/~meg.../V2
"""
| [
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
1330,
4637,
198,
198,
6738,
17748,
260,
13,
15654,
62,
12947,
13,
33692... | 2.994012 | 167 |
import rethinkdb
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase,CreateError
from django.utils import timezone
import time
##push defaults
SESSION_RETHINK_HOST = getattr(settings, 'SESSION_RETHINK_HOST', 'localhost')
SESSION_RETHINK_PORT = getattr(settings, 'SESSION_RETHINK_PORT', '28015')
SESSION_RETHINK_DB = getattr(settings, 'SESSION_RETHINK_DB', 'test')
SESSION_RETHINK_TABLE = getattr(settings, 'SESSION_RETHINK_TABLE', 'django_sessions')
SESSION_RETHINK_AUTH = getattr(settings, 'SESSION_RETHINK_AUTH', '')
##
| [
11748,
36437,
9945,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
82,
6202,
13,
1891,
2412,
13,
8692,
1330,
23575,
14881,
11,
16447,
12331,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
113... | 2.772947 | 207 |
import pathlib
from roo.files.rprofile import RProfile
import textwrap
from roo.files.rprofile import _find_rprofile_marker_zone
| [
11748,
3108,
8019,
198,
198,
6738,
686,
78,
13,
16624,
13,
81,
13317,
1330,
371,
37046,
198,
11748,
2420,
37150,
198,
198,
6738,
686,
78,
13,
16624,
13,
81,
13317,
1330,
4808,
19796,
62,
81,
13317,
62,
4102,
263,
62,
11340,
628,
628... | 3.044444 | 45 |
# Generated by Django 2.0.2 on 2018-02-10 10:22
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
17,
319,
2864,
12,
2999,
12,
940,
838,
25,
1828,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
... | 2.951613 | 62 |
from casbin import persist
from casbin import model
from .file_adapter import FileAdapter
import os | [
6738,
6124,
8800,
1330,
21160,
198,
6738,
6124,
8800,
1330,
2746,
198,
6738,
764,
7753,
62,
324,
3429,
1330,
9220,
47307,
198,
11748,
28686
] | 4.125 | 24 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .dummy_launcher import DummyLauncher
from .launcher import Launcher, create_launcher, unsupported_launcher
try:
from .caffe_launcher import CaffeLauncher
except ImportError:
CaffeLauncher = unsupported_launcher('caffe', "Caffe isn't installed. Please, install it before using.")
try:
from .dlsdk_launcher import DLSDKLauncher
except ImportError:
DLSDKLauncher = unsupported_launcher('dlsdk', "Inference Engine Python isn't installed."
" Please, install it before using.")
__all__ = ['create_launcher', 'Launcher', 'CaffeLauncher', 'DLSDKLauncher', 'DummyLauncher']
| [
37811,
198,
15269,
357,
66,
8,
2864,
8180,
10501,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.172775 | 382 |
from utils.mixins import CustomModelForm
from .models import Card
from utils.models import Configuration
from django import forms
from utils.custom_form_widgets import MonthYearWidget
| [
6738,
3384,
4487,
13,
19816,
1040,
1330,
8562,
17633,
8479,
198,
6738,
764,
27530,
1330,
5172,
198,
6738,
3384,
4487,
13,
27530,
1330,
28373,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
3384,
4487,
13,
23144,
62,
687,
62,
28029,
114... | 4 | 46 |
######################################################################
######################################################################
# Copyright Tsung-Hsien Wen, Cambridge Dialogue Systems Group, 2017 #
######################################################################
######################################################################
import operator
import sys
import os
import json
import random
week = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
| [
29113,
29113,
4242,
2235,
198,
29113,
29113,
4242,
2235,
198,
2,
220,
15069,
309,
9854,
12,
39,
82,
2013,
31164,
11,
14457,
34709,
11998,
4912,
11,
2177,
1303,
198,
29113,
29113,
4242,
2235,
198,
29113,
29113,
4242,
2235,
198,
11748,
10... | 5.597826 | 92 |
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
import torchvision.utils as vutils
import os
from tqdm import tqdm
from discriminator import Discriminator
from generator import Generator
from utils import custom_init, compute_acc, to_device, get_default_device, denorm, show_images
from config import *
dataset = CIFAR10(
root=data_dir, download=True,
transform=transforms.Compose([
transforms.Scale((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean, std) # (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
])
)
dataloader = DataLoader(dataset, batch_size=batch_size)
device = get_default_device() # check gpu if available else cpu
# instantiate generator
netG = Generator(noise_dim).to(device) # hidden latent vector length
netG.apply(custom_init) # apply custom intitialization to generator
print(netG)
# instantiate discriminator
netD = Discriminator(in_channels=3)
netD = to_device(netD, device)
print(netD)
# defining Optimizer
optimD = optim.Adam(netD.parameters(), lr)
optimG = optim.Adam(netG.parameters(), lr)
# defining Loss
disc_criterion = nn.BCELoss()
aux_criterion = nn.NLLLoss()
# noise for evaluation
eval_noise = torch.FloatTensor(batch_size, noise_dim, 1, 1).normal_(0, 1)
eval_noise_ = np.random.normal(0, 1, (batch_size, noise_dim))
eval_label = np.random.randint(0, num_classes, batch_size)
eval_onehot = np.zeros((batch_size, num_classes))
eval_onehot[np.arange(batch_size), eval_label] = 1
eval_noise_[np.arange(batch_size), :num_classes] = eval_onehot[np.arange(batch_size)]
eval_noise_ = (torch.from_numpy(eval_noise_))
eval_noise.data.copy_(eval_noise_.view(batch_size, noise_dim, 1, 1))
eval_noise.to(device)
# create directory to save images
os.makedirs(save_dir, exist_ok=True)
# Training
for epoch in range(epochs):
with tqdm(dataloader, unit="batch") as tepoch:
for i, data in enumerate(tepoch):
tepoch.set_description(f"Epoch--[ {epoch}/{epochs}]")
image, label = to_device(data[0], device), to_device(data[1], device)
# First train discriminator
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# zero gradient of optimizer in every epoch
optimD.zero_grad()
# feed the batch of real image into the discriminator
disc_output, aux_output = netD(image)
disc_error_real = disc_criterion(disc_output, torch.ones_like(disc_output))
aux_error_real = aux_criterion(aux_output, label)
total_error_real = disc_error_real + aux_error_real
D_x = disc_output.data.mean()
# get the current classification accuracy
accuracy = compute_acc(aux_output, label)
# generating noise by random sampling
noise = torch.normal(0, 1, (batch_size, noise_dim), dtype=torch.float).to(device)
# generating label for entire batch
fake_label = torch.randint(0, 10, (batch_size,), dtype=torch.long).to(
device) # num of classes in CIFAR10 is 10
fake_image = netG(noise) # generator generate fake image
# passing fake image to the discriminator
disc_output_fake, aux_output_fake = netD(fake_image.detach()) # we will be using this tensor later on
disc_error_fake = disc_criterion(disc_output_fake, torch.zeros_like(
disc_output_fake)) # Train discriminator that it is fake image
aux_error_fake = aux_criterion(aux_output_fake, fake_label)
total_error_fake = disc_error_fake + aux_error_fake
total_error = total_error_fake + total_error_real
total_error.backward()
optimD.step()
# Now we train the generator as we have finished updating weights of the discriminator
optimG.zero_grad()
disc_output_fake, aux_output_fake = netD(fake_image)
disc_error_fake = disc_criterion(disc_output_fake, torch.ones_like(disc_output_fake)) # Fool the discriminator that it is real
aux_error_fake = aux_criterion(aux_output_fake, fake_label)
total_error_gen = disc_error_fake + aux_error_fake
total_error_gen.backward()
optimG.step()
tepoch.set_postfix(Loss_Discriminator =total_error_fake.item(), Loss_Generator=total_error_gen.item(), Accuracy=accuracy)
# if i % 100 == 0:
# print(
# "Epoch--[{} / {}], Loss_Discriminator--[{}], Loss_Generator--[{}],Accuracy--[{}]".format(epoch,
# epochs,
# total_error_fake,
# total_error_gen,
# accuracy))
# save generated samples at each epoch
save_samples(epoch, eval_noise)
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
28034,
10178,
13,
19608,
292,
1039,
1330,
327,
5064,
1503,
940,
198,
11748,
28034,
1017... | 2.173241 | 2,459 |
#!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2019-2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import json
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from lib.output import *
from lib.request import send
from config import *
from selenium import webdriver
browser = None
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
34,
8,
13130,
12,
42334,
11,
7232,
2047,
316,
16302,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
... | 3.727763 | 371 |
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import uic
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_() | [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
198,
6738,
9485,
48,
83,
20,
1330,
334,
291,
628,
198,
198,
1324,
796,
33734,
54,
312,
11407,
13,
48,
23416,
7,
17597,
... | 2.578125 | 64 |
"""
Support for file variables.
"""
import sys
import copy
import os
import shutil
from six import iteritems
#Public Symbols
__all__ = ['FileRef']
_file_meta = {
'binary': bool,
}
class FileRef(object):
"""
A reference to a file on disk. As well as containing metadata information,
it supports :meth:`open` to read and write the file's contents.
"""
def open(self, mode):
""" Open file for reading or writing. """
if self.meta.get('binary') and 'b' not in mode:
mode += 'b'
return open(self._abspath(), mode)
def _abspath(self):
""" Return absolute path to file. """
if os.path.isabs(self.fname):
return self.fname
else:
return os.path.join(self.parent_dir, self.fname)
def validate(self, src_fref):
"""
validate() is called on a target `FileRef` to ensure that the source
is a `FileRef` and that it has matching metadata. Currently, the only
metadata is a binary flag. Other metadata may be added in the future.
If the metadata does not match, an exception will be raised.
Args
----
src_fref : `FileRef`
Source `FileRef` object.
"""
if not isinstance(src_fref, FileRef):
raise TypeError("Source for FileRef '%s' is not a FileRef." %
self.fname)
for name, typ in iteritems(_file_meta):
if name in self.meta or name in src_fref.meta:
tgtval = typ(self.meta.get(name))
srcval = typ(src_fref.meta.get(name))
if tgtval != srcval:
raise ValueError("Source FileRef has (%s=%s) and dest has (%s=%s)."%
(name, srcval, name, tgtval))
def _same_file(self, fref):
"""Returns True if this FileRef and the given FileRef refer to the
same file.
"""
# TODO: check here if we're on the same host
return self._abspath() == fref._abspath()
def _assign_to(self, src_fref):
"""Called by the framework during data passing when a target FileRef
is connected to a source FileRef. Validation is performed and the
source file will be copied over to the destination path if it differs
from the path of the source.
"""
self.validate(src_fref)
# If we refer to the same file as the source, do nothing
if self._same_file(src_fref):
return
with src_fref.open("r") as src, self.open("w") as dst:
shutil.copyfileobj(src, dst)
| [
37811,
198,
15514,
329,
2393,
9633,
13,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
4866,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
2237,
1330,
11629,
23814,
198,
198,
2,
15202,
41327,
10220,
198,
834,
439,
834,
796,
3725... | 2.287456 | 1,148 |
##@package producer
#@author Sebastien MATHIEU
import os,shutil, csv
from .agent.stateAgent import StateAgent
from .fsu import FSU
from .fsp import FSP
from .brp import BRP
from . import options,tools
from .spbid import SPBid, SPObligationBid
## Producer agent.
| [
2235,
31,
26495,
9920,
201,
198,
2,
31,
9800,
22787,
2013,
337,
12599,
10008,
52,
201,
198,
201,
198,
11748,
28686,
11,
1477,
22602,
11,
269,
21370,
201,
198,
201,
198,
6738,
764,
25781,
13,
5219,
36772,
1330,
1812,
36772,
201,
198,
... | 2.708738 | 103 |
# Generated by Django 2.2.3 on 2019-07-16 18:58
from django.db import migrations, models
import rooms.models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
18,
319,
13130,
12,
2998,
12,
1433,
1248,
25,
3365,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
9519,
13,
27530,
628
] | 3 | 37 |
# Generated by Django 3.1.5 on 2021-01-22 13:52
from django.db import migrations, models
import safe_transaction_service.contracts.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
486,
12,
1828,
1511,
25,
4309,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
198,
11748,
3338,
62,
7645,
2673,
62,
15271,
13,
28484,
82,
13,... | 3.065217 | 46 |
import re
from decimal import Decimal
import math
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
from scipy.special import lambertw
from lmfit import Model, Parameters
from uncertainties import ufloat
def logWithZeros(x):
'''
return log10 of array that may contain zeros
'''
out = []
if len(x) > 0:
for xi in x:
if xi == 0.:
out.append(0.)
else:
out.append(np.log10(xi))
return np.array(out)
def johnson(x, ksp, kcat):
'''
implementation of the modified form of the Michaelis-Menten equation presented in Johnson AJ, Beilstein J Org Chem 2019.
'''
return (ksp*x) / (1 + (ksp*x)/kcat)
def SM(x, km, vmax):
'''
implementation of the Schnell-Mendoza equation using the scipy lambertw function
'''
t = x[0]
so = x[1]
z = so / km * np.exp(so / km - vmax / km * t)
return km * lambertw(z)
def linear(x, m, b):
'''
straight line
'''
return m*x + b
def logarithmic(x, yo, b, to):
'''
logarithmic equation from Lu & Fei et. al, 2003
'''
return yo + b*np.log(1 + x*to)
def mmfit(x, km, vmax):
'''
Michaelis Menten equation
'''
return vmax * x / (km + x)
def icfit(x, bottom, top, slope, p50):
'''
IC50 equation
'''
return bottom + (top-bottom)/(1+10**((-p50-x)*slope))
| [
11748,
302,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
10688,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
198,
6738,
629,
541,
88,
13... | 2.235294 | 646 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import six
from novaclient import client as nc
from novaclient import exceptions
from novaclient import shell as novashell
from bilean.common import exception
from bilean.common.i18n import _
from bilean.common.i18n import _LW
from bilean.engine.clients import client_plugin
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
| [
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
... | 3.307958 | 289 |
from assertpy import assert_that
from src.utils.strings.streams import indent_lines, truncate_lines
| [
6738,
6818,
9078,
1330,
6818,
62,
5562,
198,
198,
6738,
12351,
13,
26791,
13,
37336,
13,
5532,
82,
1330,
33793,
62,
6615,
11,
40122,
378,
62,
6615,
628,
198
] | 3.551724 | 29 |
import copy
import matplotlib.pyplot as plt
import numpy
import numpy as np
import pandas as pd
from tqdm import tqdm
from .utils import _find_cols, _update_feature_name
from .utils import ohe_to_ord as alibi_ohe_to_ord
from .utils import ord_to_ohe as alibi_ord_to_ohe
np.random.seed(555)
| [
11748,
4866,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
19... | 2.596639 | 119 |
//Problem - https://www.codechef.com/MAY21B/problems/MODEQ
for _ in range(int(input())):
n,m = list(map(int,input().split()))
count = 0
mod = [1]*(n+1)
for i in range(2,n+1):
x = m % i
count += mod[x]
for j in range(x,n+1,i):
mod[j] += 1
print(count) | [
1003,
40781,
532,
3740,
1378,
2503,
13,
19815,
721,
258,
69,
13,
785,
14,
44,
4792,
2481,
33,
14,
1676,
22143,
14,
49058,
48,
198,
198,
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
197,
77,
11,
76,
796,
1351,
7,
88... | 2.081967 | 122 |
#!/usr/bin/python
# coding:utf-8
import pymysql
from managehtml import *
from md5 import *
import os
sqlservername='localhost'
sqluser='simpledrive'
sqlpasswd='simpledrive'
sqldatabase='simpledrive'
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
2,
19617,
25,
40477,
12,
23,
201,
198,
201,
198,
11748,
279,
4948,
893,
13976,
201,
198,
6738,
6687,
6494,
1330,
1635,
201,
198,
6738,
45243,
20,
1330,
1635,
201,
198,
11748,
28686,
20... | 2.52381 | 84 |
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
| [
11748,
2603,
45977,
62,
3605,
62,
2302,
1631,
355,
285,
710,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10558,
88,
355,
599,
198,
6738,
10537,
62,
9122,
1330,
6252,
198,
198,
87,
11,
331,
11,
1976,
796,
599,
13,
1837,
2022,
102... | 2.661765 | 68 |
"""
Wrap plotting functionality
"""
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
from bokeh.charts import Bar
| [
37811,
198,
220,
220,
220,
41028,
29353,
11244,
198,
37811,
198,
198,
6738,
1489,
365,
71,
13,
20521,
1330,
6805,
198,
6738,
1489,
365,
71,
13,
29487,
889,
1330,
3785,
198,
6738,
1489,
365,
71,
13,
37540,
1330,
3268,
24027,
198,
6738,... | 3.323077 | 65 |
import datetime
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from core.models import MatierePremiere, Biocarburant, Pays, Entity, ProductionSite, Depot
from certificates.models import ISCCCertificate, DBSCertificate
from api.v3.common.urls import urlpatterns
from django_otp.plugins.otp_email.models import EmailDevice
| [
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
4755,
13,
2... | 3.272727 | 121 |
from database_connector import connect, postgresql_to_dataframe
import pandas as pd
from pandas.api.types import CategoricalDtype
import datetime
import textdistance
import difflib
import hashlib
import pickle
from dil_preprocess import get_url_data, basic_pruning
from dil_predict import init, predict_trees, reduce_leaky_endpoints
from dil_postprocess import get_working_incs, get_dyn_urls, get_working_urls_channels, get_dyn_results
def get_crawl_data():
"""Return the data from node_crawler site table."""
conn = connect()
column_names = ["job_id", "site_id", "site", "cookies", "counter", "crawl_status", "crawler"]
df = postgresql_to_dataframe(conn, "select * from sites", column_names)
conn.close()
return df
def get_pipeline_overview():
"""Return the data from the complete pipeline."""
# Connect to the database
conn = connect()
column_names = ["id", "site", "login", "cookies", "cookie_end", "num_urls",
"num_basic_pruning", "num_input_rows", "crawl_end", "dyn_conf_urls",
"dyn_conf_firefox", "dyn_conf_chrome", "dyn_end",
"dyn_conf_retest_urls", "dyn_conf_retest_firefox", "dyn_conf_retest_chrome", "dyn_retest_end",
"confirmed_urls", "confirmed_urls_firefox", "confirmed_urls_chrome",
"count", "tranco_rank", "confirmed_leak_urls", "confirmed_df_dict",
]
non_cat = ["login", "dyn_conf_urls", "dyn_conf_retest_urls", "confirmed_urls", "cookies", "confirmed_leak_urls", "confirmed_df_dict"]
# Execute the "SELECT *" query
site_results = postgresql_to_dataframe(conn, "select * from db_site_results", column_names, non_cat=non_cat)
conn.close()
return site_results
def get_leak_data():
"""Return the data from dbcon_leakresult."""
conn = connect()
column_names = ["id", "loading_time", "timed_out", "apg_url", "complete_time",
"retest_num", "cookies", "site", "browser_id", "events_id", "global_properties_id",
"object_properties_id", "test_id", "window_properties_id",
]
non_cat = ["cookies"]
# Execute the "SELECT *" query
leak_results = postgresql_to_dataframe(conn, "select * from dbcon_leakresult", column_names, non_cat=non_cat)
conn.close()
return leak_results
def get_isotime(iso):
"""Converts a isostr to datetime or returns None."""
try:
return datetime.datetime.fromisoformat(iso)
except ValueError:
None
# return datetime.datetime.fromordinal(datetime.date(year=1980, month=1, day=1).toordinal()
def calc_diff(time1, time2):
"""Returns the difference between two time objects or returns None."""
try:
return time1 - time2
except TypeError:
return None
def get_time(row):
"""Calculate the timing of a row."""
start = get_isotime(row["cookie_end"])
end_crawl = get_isotime(row["crawl_end"])
end_dyn = get_isotime(row["dyn_end"])
end_final = get_isotime(row["dyn_retest_end"])
return (row["site"], row["tranco_rank"], calc_diff(end_crawl, start), calc_diff(end_dyn, end_crawl), calc_diff(end_final, end_dyn))
def display_timing(df):
"""Calculate and display information on timimg."""
time_crawl = df.loc[df["crawl_end"] != ""].apply(get_time, axis=1, result_type="expand")
time_crawl = time_crawl.rename(columns={0: "site", 1: "tranco_rank", 2: "crawling time", 3: "dynamic confirmation time", 4: "dynamic reconfirmation time"})
display(time_crawl) # if time is over 9 hours, this could be because of a bug in our pipeline: e.g., ning, chess and vimeo
display(time_crawl.agg(["min", "max", "mean", "std"]))
def get_cookie_stats(row):
"""Row has a column cookies with a list of cookie dicts.
Every entry in the list will get transformed to one row in a df that is returned.
"""
try:
cookies = row.iloc[0]["cookies"]
except IndexError:
return None
if type(cookies) == list:
cookie_count = len(cookies)
row["name"] = "Not set"
row["value"] = "Not set"
row["secure"] = "Not set"
row["httpOnly"] = "Not set"
row["sameSite"] = "Not set"
row = row.loc[row.index.repeat(cookie_count)]
for count, cookie in enumerate(cookies):
row["name"].iloc[count] = cookie["name"]
row["value"].iloc[count] = cookie["value"]
row["secure"].iloc[count] = cookie.get("secure", "Not set")
row["httpOnly"].iloc[count] = cookie.get("httpOnly", "Not set")
row["sameSite"].iloc[count] = cookie.get("sameSite", "Not set")
# Collect stats for each cookie, guess if session cookie (regex on Name + nature of value?), record security attributes (how many use sameSite, etc)
# Later see if there is a relation between vulnerable sites and the cookie settings of these sites?!
# print(cookie["name"], cookie["value"], cookie.get("secure", "Not set"), cookie.get("httpOnly", "Not set"), cookie.get("sameSite", "Not set"))
return row
def show_only_first(df1, df2, info, head=3):
"""Show all rows only existing in the first df, both frames have a column: id."""
c = df1.merge(df2, on="id")
res = df1.loc[~df1.id.isin(c.id)]
if len(res) > 0:
print(f"{info} for {len(res)} sites")
with pd.option_context("max_columns", None):
display(res.head(head))
return res
def get_pipeline_stats(df, log=True):
"""Df is a (sub)frame of db_site_results.
Get info of how many sites went missing in the various steps.
"""
cookies_found = df.loc[df["cookies"] != {}]
pipeline_started = df.loc[df["login"].str.contains(r"pipepline|actual site")]
started_cookie_hunter = df.loc[df["login"].str.contains("pipepline")]
started_manual = df.loc[df["login"].str.contains("actual site")]
# Add the ones that failed in the unpruned run ("Bug": we update the wrong cookiehunter entries for the unpruned runs, so we need to do this)
pipeline_started = pipeline_started.append(df.loc[df["site"].isin(["bravenet.com", "amazon.in", "faucetcrypto.com", "bshare.cn"])])
cookies_found = cookies_found.append(df.loc[df["site"].isin(["bravenet.com", "amazon.in", "faucetcrypto.com", "bshare.cn"])])
started_cookie_hunter = started_cookie_hunter.append(df.loc[df["site"].isin(["bravenet.com", "amazon.in", "faucetcrypto.com", "bshare.cn"])])
crawled = df.loc[df["crawl_end"] != ""]
crawled_min = df.loc[df["num_urls"] >= 1]
crawled_success = df.loc[df["num_urls"] >= 3]
pruned = df.loc[df["num_basic_pruning"] > 0]
num_input_rows = df.loc[df["num_input_rows"] > 0]
pot_ft = df.loc[df["dyn_conf_firefox"] > 0]
pot_ct = df.loc[df["dyn_conf_chrome"] > 0]
pot = df.loc[df["id"].isin(list(set(pot_ft["id"].values.tolist()) | set(pot_ct["id"].values.tolist())))]
pot_both = df.loc[df["id"].isin(list(set(pot_ft["id"].values.tolist()) & set(pot_ct["id"].values.tolist())))]
pot_fr = df.loc[df["dyn_conf_retest_firefox"] > 0]
pot_cr = df.loc[df["dyn_conf_retest_chrome"] > 0]
pot_r = df.loc[df["id"].isin(list(set(pot_fr["id"].values.tolist()) | set(pot_cr["id"].values.tolist())))]
pot_r_both = df.loc[df["id"].isin(list(set(pot_fr["id"].values.tolist()) & set(pot_cr["id"].values.tolist())))]
conf_f = df.loc[df["confirmed_urls_firefox"] > 0]
conf_c = df.loc[df["confirmed_urls_chrome"] > 0]
conf = df.loc[df["id"].isin(list(set(conf_f["id"].values.tolist()) | set(conf_c["id"].values.tolist())))]
conf_both = df.loc[df["id"].isin(list(set(conf_f["id"].values.tolist()) & set(conf_c["id"].values.tolist())))]
info_text = (
f"Cookiehunter:\n"
f"Total sites attempted: {len(df)}, some success (cookies collected): {len(cookies_found)}, full success (pipeline started): {len(pipeline_started)}\n"
f"Pipeline started cookiehunter: {len(started_cookie_hunter)}, started selenium login replay: {len(started_manual)}\n"
f"\nCrawling:\n"
f"Crawl started: {len(crawled)}, at least one URL crawled: {len(crawled_min)}, at least three URLs crawled: {len(crawled_success)}\n"
f"\nPruning:\n"
f"At least one URL remains after basic pruninng: {len(pruned)}, at least one input row for trees: {len(num_input_rows)}\n"
f"Trees:\n"
f"At least one potential vulnerable firefox: {len(pot_ft)}, at least one potential vulnerable chrome: {len(pot_ct)}\n"
f"At least one potential vulnerable either: {len(pot)}, at least one potential vulnerable both: {len(pot_both)}\n"
f"\nSingle confirmation:\n"
f"At least one different observation firefox: {len(pot_fr)}, at least one different observation chrome: {len(pot_cr)}\n"
f"At least one different observation either: {len(pot_r)}, at least one different observation both: {len(pot_r_both)}\n"
f"\nDouble confirmation:\n"
f"At least one vulnerable firefox: {len(conf_f)}, at least one vulnerable chrome: {len(conf_c)}\n"
f"At least one vulnerable either: {len(conf)}, at least one vulnerable both: {len(conf_both)}\n"
)
if log:
print(info_text)
# Sanity checks, should not occur
show_only_first(pipeline_started, cookies_found, "Started without cookies")
show_only_first(pipeline_started, crawled, "Started but not crawled")
show_only_first(crawled_min, crawled, "Crawl check")
show_only_first(crawled_success, crawled_min, "Crawl check")
show_only_first(pruned, num_input_rows, "No input rows after pruning")
if log:
print("For some sites our testing infrastructure was partially down during testing (67 sites), after the infrastructure was ready again. We retested but for 21 the login failed (e.g., google SSO changed behavior in between and does not allow selenium anymore). We remove these from the following test")
cookie_hunter_second_failed = show_only_first(crawled, pipeline_started, "Crawled without started", 21)
# Remove the sites that failed a second login, and did never got tested properly
df = df.loc[~df.index.isin(cookie_hunter_second_failed.index)]
# Interesting cases
if log:
show_only_first(crawled, crawled_min, "Not crawled properly (e.g., cert error)")
show_only_first(pot, crawled_success, "Potential vulnerable with less than 3 URLs crawled")
show_only_first(crawled_min, pruned, "Crawled but excluded after basic pruning")
show_only_first(num_input_rows, pot, "No potential leaks after tree pruning")
show_only_first(pot, pot_r, "No observed difference in potential URLs")
show_only_first(pot_r, conf, "No confirmed URLs after retesting")
show_only_first(conf_f, conf_c, "Only in firefox confirmed")
show_only_first(conf_c, conf_f, "Only in chrome confirmed")
return df, conf_both, conf
sec_rel_headers = [
"content-type",
"x-frame-options",
"content-disposition",
"cross-origin-opener-policy",
"x-content-type-options",
"cross-origin-resource-policy",
"content-security-policy",
"location",
]
to_test = sec_rel_headers + ["code"]
acc = {}
def process_responses(row):
"""Get only the relevant data from the crawl."""
global acc
headers = row["resp_headers"] # All headers in the db are saved as lowercase
sec_df = {}
sec_df["url"] = row["req_url"]
sec_df["site"] = row["site"]
sec_df["real_site"] = row["real_site"]
sec_df["cookies"] = row["cookies"]
sec_df["code"] = row["resp_code"]
sec_df["body"] = row["resp_body_hash"]
headers_basic_pruned = {}
for header in sec_rel_headers:
header_val = headers.get(header, "Empty")
# Remove some info from headers here to deduplicate (e.g., filename in content-disposition?)
if header == "content-disposition":
header_val = header_val.split(";")[0]
# Add post-processing for CSP
sec_df[header] = header_val
if not header == "content-security-policy":
headers_basic_pruned[header] = header_val
for header in headers:
count = acc.get(header, 0)
acc[header] = count + 1
# Calculate hashes of the responses, either hash everything, remove some headers including randomness or only keep the tree headers (basic pruning)
hash_all = [sec_df["url"], sec_df["site"], sec_df["code"], headers, sec_df["body"]]
headers_min_pruned = headers.copy()
for header in ["date", "server", "cache-control", "last-modified", "etag", "vary", "expires", "age"]:
headers_min_pruned.pop(header, None)
hash_min_pruned = [sec_df["url"], sec_df["site"], sec_df["code"], headers_min_pruned, sec_df["body"]]
hash_basic_pruned = [sec_df["url"], sec_df["site"], sec_df["code"], headers_basic_pruned, sec_df["body"]]
sec_df["hash_all"] = hashlib.sha1(pickle.dumps(hash_all)).hexdigest()
sec_df["hash_min_pruned"] = hashlib.sha1(pickle.dumps(hash_min_pruned)).hexdigest()
sec_df["hash_basic_pruned"] = hashlib.sha1(pickle.dumps(hash_basic_pruned)).hexdigest()
return sec_df
def display_response_summary(df, index="cookies", check=None):
"""Display response groups."""
if check is None:
global to_test
to_check = to_test.copy()
to_check.remove("content-security-policy")
else:
to_check = check
table_dict = {}
with pd.option_context("max_columns", 200):
display(df.groupby(index).nunique())
for prop in to_check:
pivot = df.pivot_table(index=index, columns=prop, aggfunc="size", fill_value=0)
pivot.loc["Total"] = pivot.sum()
res = pivot.loc[:, pivot.max().sort_values(ascending=False).index]
display(res)
table_dict[prop] = res
# display(df[prop].value_counts().to_frame())
pivot = df.pivot_table(index=index, columns=to_check, aggfunc="size", fill_value=0)
pivot.loc["Total"] = pivot.sum()
res = pivot.loc[:, pivot.max().sort_values(ascending=False).index]
res
display(res)
table_dict["total"] = res
return table_dict
def display_changed(df):
"""Display rows where different headers/status-code are observed for cookies/no-cookies"""
# Drop the ones with only one or more than two observations
count_urls = df.groupby(["url", "site", "real_site"])["cookies"].count()
display(count_urls.value_counts())
count_index = count_urls[count_urls == 2].index
df = df.set_index(["url", "site", "real_site"])
df = df.loc[count_index]
df = df.reset_index()
print(df.info())
# Drop the ones that are the same for cookies/no-cookies
df = df.drop_duplicates(subset=to_test + ["url", "site", "real_site"], keep=False)
# Display remaining ones
display(df.sort_values(["site", "real_site", "url", "cookies"]))
def parse_apg_url(apg_url):
"""Return the method, url and browser from an apg_url."""
method = apg_url.split("/apg/")[1].split("/?url=")[0]
url = apg_url.split("/?url=")[1].split("&browser")[0]
try:
browser = apg_url.split("&browser=")[1].split("&")[0]
except IndexError:
browser = None
return method, url, browser
def parse_method_url(row, col, acc):
"""Get URL, method and browser from the apg url."""
row_dict = row[col]
site = row["site"]
if type(row_dict) == dict:
browser_l = []
method_l = []
url_l = []
l = []
for browser in row_dict:
for apg_url in row_dict[browser]:
method = apg_url.split("/apg/")[1].split("/?url=")[0]
url = apg_url.split("/?url=")[1]
browser_l.append(browser)
method_l.append(method)
url_l.append(url)
l.append([browser, method, url])
acc.append({"site": site, "browser": browser, "method": method, "url": url})
return [browser_l, method_l, url_l]
def get_query(string, pos=1):
"""Get query parameter of a URL."""
try:
return string.split("?")[pos]
except IndexError:
if pos == 1:
return ""
else:
return string
def row_sym(row):
"""Calculates the simmilarity between the value_cookies and value_no_cookies."""
return textdistance.jaro.normalized_similarity(row["value_cookies"], row["value_no_cookies"])
def get_distances(df):
"""Shows the edits between two postMessages."""
for _, row in df.loc[df["method"] == "gp_window_postMessage"].iterrows():
cases = [(row["value_cookies"], row["value_no_cookies"])]
for a, b in cases:
print('{} => {}'.format(a,b))
for i,s in enumerate(difflib.ndiff(a, b)):
if s[0]==' ': continue
elif s[0]=='-':
print(u'Delete "{}" from position {}'.format(s[-1],i))
elif s[0]=='+':
print(u'Add "{}" to position {}'.format(s[-1],i))
print()
def get_conf_dfs(df, log=False):
"""Df is info df, return the collection of dfs in the confirmed_df_dict column with some extra information."""
df_all = pd.DataFrame()
for _, row in df.iterrows():
site = row["site"]
try:
df_frame = pd.DataFrame(row["confirmed_df_dict"])
# Fix old data, that has no confirmed_df_dict
if len(df_frame) == 0:
print(site) # technologyreview is not vulnerable according to our new definition of "same"
df_frame, _, _ = get_working_urls_channels(get_dyn_results(site))
df_frame["site"] = site
df_frame["url_len"] = df_frame["url"].str.len()
df_frame["url_query"] = df_frame["url"].apply(get_query)
df_frame["url_base"] = df_frame["url"].apply(get_query, pos=0) # Only the base of the URL without query parameters (maybe the same URL was found vulnerable several times with different query parameters)
df_frame["url_query_len"] = df_frame["url_query"].str.len()
df_frame["jaro"] = df_frame.apply(row_sym, axis=1)
# display(df_frame.sort_values(["url_len", "url", "inc_method", "method", "browser"]).head())
df_chrome = df_frame.loc[df_frame["browser"] == "chrome"]
df_firefox = df_frame.loc[df_frame["browser"] == "firefox"]
df_all = df_all.append(df_frame)
if log:
print(f"{df_frame['url'].nunique()} unique URLs, total vuln: {len(df_frame)}, chrome vuln: {len(df_chrome)}, firefox vuln: {len(df_firefox)}")
except KeyError as e:
print(f"Error: {e}")
display(site)
return df_all
def get_info_frames(df_all, leak_set=None, leave=[1, 2], conv_method=False):
"""Get the most important results in two info frames"""
# Remove rows?!
df_all = df_all.copy()
if leak_set is not None:
df_all["in"] = df_all.apply(remove_leak_urls, dyn_conf_data=leak_set, axis=1)
df_all = df_all.loc[df_all["in"].isin(leave)] # Only leave leak channels that were tested in both browsers ([2]), in only one browser ([1]) or do nothing ([1, 2])
# Convert leak method to category
if conv_method:
# Remove the ones that are pruned in the attack page already?
method_cats = CategoricalDtype(categories=["event_set", "event_list", "load_count", "gp_download_bar_height", "gp_securitypolicyviolation", "gp_window_getComputedStyle", "gp_window_hasOwnProperty", "gp_window_onblur", "gp_window_onerror", "op_el_buffered", "op_el_contentDocument", "op_el_duration", "op_el_height", "op_el_media_error", "op_el_naturalHeight", "op_el_naturalWidth", "op_el_networkState", "op_el_paused", "op_el_readyState", "op_el_seekable", "op_el_sheet", "op_el_videoHeight", "op_el_videoWidth", "op_el_width", "op_frame_count", "op_win_CSS2Properties", "op_win_history_length", "op_win_opener", "op_win_origin", "op_win_window"], ordered=True)
df_all["method"] = df_all["method"].astype(method_cats)
inc_methods = df_all.groupby("inc_method")
leak_methods = df_all.groupby("method")
df_all["group_key_fake"] = "browsers"
browsers = df_all.groupby("group_key_fake")
leak_channels = df_all.groupby(["inc_method", "method"])
sites = df_all.groupby("site")
inc_sites = df_all.groupby(["site", "inc_method"])
info_frame = pd.DataFrame(columns=["type", "subtype", "leak urls", "chrome_channels", "firefox_channels", "chrome_sites", "firefox_sites", "sites"])
info_frame_new = pd.DataFrame(columns=["type", "subtype", "confirmed leak URLs any browser", "confirmed leak URLs both browsers", "confirmed leak URLs only one browser", "confirmed leak URLs firefox", "confirmed leak URLs chrome", "confirmed URLs any browser", "confirmed URLs both browsers",
"confirmed URLs only one browser", "confirmed URLs firefox", "confirmed URLs chrome",
"confirmed base URLs browser", "confirmed base URLs both browsers",
"confirmed base URLs only one browser", "confirmed base URLs firefox", "confirmed base URLs chrome",
"confirmed sites any browser", "confirmed sites both browsers", "confirmed sites only one browser",
"confirmed sites firefox", "confirmed sites chrome",
"confirmed channels any browser", "confirmed channels both browser", "confirmed channels only one browser", "confirmed channels firefox", "confirmed channels chrome"])
info_frame, info_frame_new = info_grouping(browsers, "browsers", info_frame, info_frame_new)
info_frame, info_frame_new = info_grouping(inc_methods, "inc_methods", info_frame, info_frame_new)
info_fame, info_frame_new = info_grouping(leak_methods, "leak_methods", info_frame, info_frame_new)
info_fame, info_frame_new = info_grouping(leak_channels, "leak_channels", info_frame, info_frame_new)
info_fame, info_frame_new = info_grouping(sites, "sites", info_frame, info_frame_new)
info_fame, info_frame_new = info_grouping(inc_sites, "inc_sites", info_frame, info_frame_new)
return info_frame, info_frame_new
def get_only_both(df_dict, keys=("chrome", "firefox"), log=False):
"""Get info on entries only in one, in both and combined.
df_dict: dict with keys chrome and firefox, with list as values."""
try:
c_set = set(df_dict[keys[0]].itertuples(index=False, name=None))
except KeyError:
c_set = set()
try:
f_set = set(df_dict[keys[1]].itertuples(index=False, name=None))
except KeyError:
f_set = set()
both = list(c_set & f_set)
combined = list(c_set | f_set)
only_one = list(c_set ^ f_set)
only = {keys[0]: [], keys[1]: []}
for entry in only_one:
try:
key = keys[0] if entry in c_set else keys[1]
except KeyError:
key = keys[1]
only[key].append(entry)
first = len(c_set)
second = len(f_set)
combined = len(combined)
both = len(both)
only_first = len(only[keys[0]])
only_second = len(only[keys[1]])
if log:
print()
print(f"{keys[0]}: {first}, {keys[1]}: {second}")
print(f"Combined: {combined}")
print(f"Both: {both}")
#display(both)
print(f"Only in one: {len(only_one)}, {keys[0]}: {only_first}, {keys[1]}: {only_second}")
# display(only)
df0 = pd.DataFrame(only[keys[0]])
df0["key"] = keys[0]
df1 = pd.DataFrame(only[keys[1]])
df1["key"] = keys[1]
return df0.append(df1)
return first, second, combined, both, only_first, only_second
def url_list_to_tuples(l, sites, site_cat=False):
"""Convert a list of leak url dicts to list of tuples."""
df_list = []
for apg_dict, site in zip(l, sites):
if apg_dict is None:
continue
for browser in apg_dict:
for url in apg_dict[browser]:
method, url, _ = parse_apg_url(url)
# df_list.append({"method": method, "url": url, "browser": browser})
df_list.append((method, url, browser, site, "nogroup"))
# df = pd.DataFrame(df_list)
# print(df_list[:5])
df = pd.DataFrame(df_list, columns=["method", "url", "browser", "site", "nogroup"]).sort_values(["browser", "method", "site", "url"])
method_cats = CategoricalDtype(categories=['audio', 'embed', 'embed-img', 'iframe', 'iframe-csp', 'img', 'link-prefetch', 'link-stylesheet', 'object', 'script', 'video', 'window.open'], ordered=True)
if site_cat:
site_cats = CategoricalDtype(categories=['pier1.com-unpruned', 'chartink.com-unpruned', 'pdffiller.com-unpruned', 'staples.ca-unpruned', 'freelogodesign.org-unpruned', 'duplichecker.com-unpruned', 'miro.com-unpruned', 'mnml.la-unpruned', 'redtube.com-unpruned', 'whatfontis.com-unpruned', 'glosbe.com-unpruned', 'wideads.com-unpruned', 'standardmedia.co.ke-unpruned', 'gyazo.com-unpruned', 'megogo.net-unpruned', 'zennioptical.com-unpruned', 'powtoon.com-unpruned', 'italki.com-unpruned', 'themehorse.com-unpruned', 'versobooks.com-unpruned', 'yourstory.com-unpruned', 'korrespondent.net-unpruned', 'transifex.com-unpruned', 'ankiweb.net-unpruned', 'iplocation.net-unpruned', 'youporn.com-unpruned', 'tmj4.com-unpruned', 'nimbusweb.me-unpruned', 'classifiedads.com-unpruned', 'myvidster.com-unpruned', 'cafepress.com-unpruned', 'pakwheels.com-unpruned', 'idntimes.com-unpruned', 'mhthemes.com-unpruned', 'universe.com-unpruned', 'aboutus.com-unpruned'], ordered=True)
df["site"] = df["site"].astype(site_cats)
browser_cats = CategoricalDtype(categories=["firefox", "chrome"], ordered=True)
df["method"] = df["method"].astype(method_cats)
df["browser"] = df["browser"].astype(browser_cats)
return df
def get_predictions_retroactive(df, methods="limited"):
"""Returns the tree predictions for a every site in a df."""
init(methods)
predicted_leak_urls = []
for site in df["site"].tolist():
dat = get_url_data(site)
af, d, poss, results = basic_pruning(dat)
if af is None:
urls = {}
else:
leaky_endpoints = predict_trees(af)
if leaky_endpoints == {}:
urls = {}
else:
leaks = reduce_leaky_endpoints(leaky_endpoints)
incs = get_working_incs(leaks)
urls = get_dyn_urls(leaks, incs, d, poss)
predicted_leak_urls.append(urls)
return predicted_leak_urls
def get_basic_pruning_reduction(row):
"""Return the size reduction from basic pruning"""
return save_div(row["num_urls"] - row["num_basic_pruning"], row["num_urls"], ret=None)
def save_div(a, b, ret=0):
"""Division without 0 error, ret is returned instead."""
if b == 0:
return ret
return a/b
def get_stats(ground_truth, predicted_trees, all_combinations, info):
"""Calculate and display the pruning false negative data."""
res = {}
for group_key in [["nogroup"], ["method"], ["browser"], ["site"]]: #, ["browser", "method"]]: # Not working as not every group exist
try:
gts = ground_truth.groupby(group_key)
preds = predicted_trees.groupby(group_key)
all_combs = all_combinations.groupby(group_key)
df = pd.DataFrame()
for (name, gt), (_, pred), (_, all_comb) in zip(gts, preds, all_combs):
gt_len, pred_len, _, tp_len, fn_len, fp_len = get_only_both({"ground_truth": gt, "predicted_trees": pred}, ("ground_truth", "predicted_trees"))
all_comb_len = all_comb.drop_duplicates().shape[0]
gn_len = all_comb_len - gt_len
size_red = save_div(all_comb_len, pred_len)
fnr = save_div(fn_len, gt_len)
fpr = save_div(fp_len, gn_len)
tn_len = all_comb_len - pred_len - fn_len
res_line = [(name, gt_len, all_comb_len, pred_len, size_red, fnr, fpr, tp_len, fn_len, fp_len, tn_len)]
columns = ["grouping", "gt", "all_comb", "pred", "size_red", "fnr", "fpr", "tp", "fn", "fp", "tn"]
df = df.append(pd.DataFrame(res_line, columns=columns))
if len(df) > 1:
pass
# df.loc["Mean"] = df.mean()
res[str(group_key)] = df
except KeyError as e:
print(e)
# Get size difference in all_combinations/predicted_trees/predicted_trees_all
for entry in res:
print(info)
with pd.option_context("max_columns", None):
print(entry)
display(res[entry])
# display(res[entry].describe())
return res
def calc_info_frames(site_results_filtered, remove_multiple=None):
"""Return the info frames for the input."""
dat, conf_both, conf_any = get_pipeline_stats(site_results_filtered, log=False)
df_all = get_conf_dfs(conf_any)
if remove_multiple:
url_by_leak = df_all.groupby(["browser", "url"])[["method", "inc_method"]].nunique()
only_one_inc = set(url_by_leak.loc[url_by_leak[remove_multiple] == 1].reset_index()[["browser", "url"]].itertuples(name=None, index=False))
df_all = df_all.loc[df_all[["browser", "url"]].apply(lambda x: (x["browser"], x["url"]) in only_one_inc, axis=1)]
sites = dat["site"].tolist()
leak_urls = url_list_to_tuples(dat["dyn_conf_urls"].tolist(), sites)
leak_url_set = set(list(leak_urls.itertuples(name=None, index=None)))
# Complete frame
info_frame, info_frame_new = get_info_frames(df_all, None)
# Prune all leak URLs only tested in one browser
info_frame_both, info_frame_new_both = get_info_frames(df_all, leak_url_set, leave=[2])
# Prune all leak URLs tested in both browsers
info_frame_only, info_frame_new_only = get_info_frames(df_all, leak_url_set, leave=[1])
return (info_frame, info_frame_new), (info_frame_both, info_frame_new_both), (info_frame_only, info_frame_new_only) | [
6738,
6831,
62,
8443,
273,
1330,
2018,
11,
1281,
34239,
13976,
62,
1462,
62,
7890,
14535,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
13,
15042,
13,
19199,
1330,
327,
2397,
12409,
35,
4906,
198,
11748,
4818,
8079,
198... | 2.33203 | 13,047 |
#!/usr/bin/python3
"""
definition file for 2 dc motors on an adafruit DC and stepper motor HAT, 'left' and 'right'.
The motors can have rotary encoders and can use a speed mapping table to provide something approaching a linear response.
The configuration is defined by a list of motors. Each entry in the list defines a single motor. The full list defines a motorset's motors.
A single motor definition is defined by a dict, for a full specification see the individual motor class' documentation.
There are currently 2 similar classes that can be used:
motor in module dcmotorbasic
motoranayse in module motoranalyser
motoranayse inherits from dcmotorbasic and provides additional methods to test and log the motor's performance. There are
associated jupyter notebooks that analyse the logs.
Both these classes have the same configuration definitions.
The motor and motoranalyse classes:
className : The name of the class to instantiate for this motor. See className in the details below.
nameccccc : The name of the motor. Used in all further access to the motor within the motorset.
mdrive : The class that takes care of the low level interface to the motor - typically defined by the hardware in use, and the way in which
it is connected (direct gpio, through a HAT accessed through I2C, ...)
rotationsense : The class that tracks the motor's movement, it provides methods to detect the angle through which the motor has turned.
speedmapinfo : The class that takes a requested speed and turns it into the values used by the mdrive class to run the motor. For brushed dc
motors here, that is the frequency at which the motor is turned off and on, and the duty cycle that is applied.
logtypes : This is a list of the logging that is to be printed / recorded to file.
Standard parameters:
className: These strings identify a class, typically as <modulename>.<classname>. The class constructor is then called
using everything else in the dict as keyword parameters. Other parameters can be supplied by position or keyword.
"""
motordef=(
{
# 'className' : 'motoranalyser.motoranalyse',
'className' : 'dcmotorbasic.motor',
'name' : 'left',
'mdrive' : {'className': 'dc_adafruit_dchat.dc_m_hat', 'motorno':4},
'logtypes' : (('phys',{'filename': 'leftlog.txt', 'format': '{setting} is {newval}.'}),),
},
{
# 'className' : 'motoranalyser.motoranalyse',
'className' : 'dcmotorbasic.motor',
'name' : 'right',
'mdrive' : {'className': 'dc_adafruit_dchat.dc_m_hat', 'motorno':3, 'invert': True},
'logtypes' : (('phys',{'filename': 'rightlog.txt', 'format': '{setting} is {newval}.'}),),
},
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
220,
198,
46758,
2393,
329,
362,
30736,
24699,
319,
281,
512,
1878,
4872,
6257,
290,
2876,
2848,
5584,
367,
1404,
11,
705,
9464,
6,
290,
705,
3506,
4458,
198,
198,
464,
24699,
46... | 2.959835 | 971 |
import socket
import struct
import textwrap
TAB_1 = '\t - '
TAB_2 = '\t\t - '
TAB_3 = '\t\t\t - '
TAB_4 = '\t\t\t\t - '
DATA_TAB_1 = '\t - '
DATA_TAB_2 = '\t\t - '
DATA_TAB_3 = '\t\t\t - '
DATA_TAB_4 = '\t\t\t\t - '
# unpack ethernet frame
# Translate MAC address
# unpack IPv4 packet
# Translate IPv4 address
# unpack ICMP packet
icmp_type, code, checksum = struct.unpack('! B B H', data[:4])
return icmp_type, code, checksum, data[4:]
# unpack TCP segment
return '\n'.join([prefix + line for line in textwrap(string, size)])
main()
| [
11748,
17802,
201,
198,
11748,
2878,
201,
198,
11748,
2420,
37150,
201,
198,
201,
198,
5603,
33,
62,
16,
796,
705,
59,
83,
532,
705,
201,
198,
5603,
33,
62,
17,
796,
705,
59,
83,
59,
83,
532,
705,
201,
198,
5603,
33,
62,
18,
7... | 1.996644 | 298 |
#!/usr/bin/python
from os.path import join,exists,dirname
import numpy as np
import pickle
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from sklearn.metrics import f1_score
from uda_common import zero_pivot_columns, zero_nonpivot_columns, read_pivots, evaluate_and_print_scores, align_test_X_train, get_f1, find_best_c, read_feature_groups, read_feature_lookup
import os
import scipy.sparse
import sys
from sklearn import svm
from sklearn.feature_selection import chi2
## This script gets a baseline for domain adaptation based on a combined training
## set containing source and target training data. This is a better ceiling for
## adaptation performance than source-source or target-target evaluations.
## This way, if guidelines are different, the discriminating line p(y|x) will
## be different, and performance will be lower than target-target.
## Since we have been using target _trainign_ set for testing (for greater power)
## we have a problem adding it because we can't have it be part of training and
## test set. So what I do is basically 2-fold experiments where half the target
## training data is added to the source, test on the other half, and then reverse
## and calculate again.
if __name__ == "__main__":
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
1069,
1023,
11,
15908,
3672,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
82... | 3.548747 | 359 |
#!/usr/bin/env python
# coding=utf-8
import inkex
# ---------------------------------
# UTILITIES
# Common standards
UPPERCASE_PREFIXES = {
chr(15): 0x2828, # uppercase prefix: https://codepoints.net/U+000F
}
LOUIS_BRAILLE_NUMBERS_PREFIX = 0x283c # Louis Braille's numbers prefix
LOUIS_BRAILLE_NUMBERS = { # Louis Braille's original numbers codification
"0": 0x281a,
"1": 0x2801,
"2": 0x2803,
"3": 0x2809,
"4": 0x2819,
"5": 0x2811,
"6": 0x280B,
"7": 0x281b,
"8": 0x2813,
"9": 0x280a,
}
# ---------------------
# English based locales
EN_ASCII = " A1B'K2L@CIF/MSP\"E3H9O6R^DJG>NTQ,*5<-U8V.%[$+X!&;:4\\0Z7(_?W]#Y)="
# Spanish based locales
ES_LETTERS = {
"A": 0x2801,
"B": 0x2803,
"C": 0x2809,
"D": 0x2819,
"E": 0x2811,
"F": 0x280B,
"G": 0x281b,
"H": 0x2813,
"I": 0x280a,
"J": 0x281a,
"K": 0x2805,
"L": 0x2807,
"M": 0x280d,
"N": 0x281d,
"Ñ": 0x283b,
"O": 0x2815,
"P": 0x280f,
"Q": 0x281f,
"R": 0x2817,
"S": 0x280e,
"T": 0x281e,
"U": 0x2825,
"V": 0x2827,
"W": 0x283a,
"X": 0x282d,
"Y": 0x283d,
"Z": 0x2835,
}
ES_SIGNS = {
" ": 0x2800, # braille space
"ª": 0x2801, # ordinal (feminine) -> same as A
"º": 0x2815, # ordinal (masculine) -> same as O
"&": 0x282f,
".": 0x2804,
",": 0x2802,
":": 0x2812,
";": 0x2806,
"¿": 0x2822,
"?": 0x2822,
"¡": 0x2816,
"!": 0x2816,
'"': 0x2826,
"(": 0x2823,
")": 0x281c,
# "[": 0x2837, collides with "Á" (Spanish and Catalan)
# "]": 0x283e, collides with "Ú" (Spanish and Catalan)
"*": 0x2814,
# math
"-": 0x2824,
"=": 0x2836,
"×": 0x2826, # multiplication
"÷": 0x2832, # division
"+": 0x2816,
"@": 0x2810,
}
ES_ACCENT_MARKS = {
"Á": 0x2837,
"É": 0x282e,
"Í": 0x280c,
"Ó": 0x282c,
"Ú": 0x283e,
"Ü": 0x2833,
}
ES_COMBINATIONS = {
# signs
"%": (0x2838, 0x2834),
"‰": (0x2838, 0x2834, 0x2834), # per mile
"/": (0x2820, 0x2802),
"\\": (0x2810, 0x2804),
"<": (0x2810, 0x2805),
">": (0x2828, 0x2802),
"|": (0x2838, 0x2807),
"{": (0x2810, 0x2807),
"}": (0x2838, 0x2802),
"–": (0x2824, 0x2824), # two different unicode dashes
"—": (0x2824, 0x2824),
"…": (0x2804, 0x2804, 0x2804),
# legal
"©": (0x2823, 0x2828, 0x2809, 0x281c), # copyright
"®": (0x2823, 0x2828, 0x2817, 0x281c), # registered
"℗": (0x2823, 0x2828, 0x280f, 0x281c),
"🄯": (0x2823, 0x2828, 0x2807, 0x281c),
# currencies
"€": (0x2838, 0x2811),
"$": (0x2838, 0x280e),
"¢": (0x2818, 0x2809),
"£": (0x2810, 0x282e),
"¥": (0x2838, 0x283d),
"¥": (0x2838, 0x283d),
}
CA_ACCENT_MARKS = {
"É": 0x283f,
"Í": 0x280c,
"Ó": 0x282a,
"Ú": 0x283e,
"À": 0x2837,
"È": 0x282e,
"Ò": 0x282c,
"Ï": 0x283b,
"Ü": 0x2833,
"Ç": 0x282f,
}
# French based locales
FR_LETTERS = {
"A": 0x2801,
"B": 0x2803,
"C": 0x2809,
"D": 0x2819,
"E": 0x2811,
"F": 0x280b,
"G": 0x281b,
"H": 0x2813,
"I": 0x280a,
"J": 0x281a,
"K": 0x2805,
"L": 0x2807,
"M": 0x280d,
"N": 0x281d,
"O": 0x2815,
"P": 0x280f,
"Q": 0x281f,
"R": 0x2817,
"S": 0x280e,
"T": 0x281e,
"U": 0x2825,
"V": 0x2827,
"W": 0x283a,
"X": 0x282d,
"Y": 0x283d,
"Z": 0x2835,
}
FR_ACCENT_MARKS = {
"É": 0x283f,
"À": 0x2837,
"È": 0x282e,
"Ù": 0x283e,
"Â": 0x2821,
"Ê": 0x2823,
"Î": 0x2829,
"Ô": 0x2839,
"Û": 0x2831,
"Ë": 0x282b,
"Ï": 0x283b,
"Ü": 0x2833,
"Ç": 0x282f,
"Œ": 0x282a, # oe ligature
}
FR_SIGNS = {
" ": 0x2800, # braille space
",": 0x2802,
";": 0x2806,
":": 0x2812,
".": 0x2832,
"?": 0x2822,
"!": 0x2816,
"«": 0x2836,
"»": 0x2836,
"“": 0x2836,
"”": 0x2836,
'"': 0x2836,
"‘": 0x2836,
"’": 0x2836,
"(": 0x2826,
")": 0x2834,
"'": 0x2804,
"'": 0x2804,
"/": 0x280c,
"@": 0x281c,
"^": 0x2808, # elevation exponent
"-": 0x2824,
"+": 0x2816,
"×": 0x2814, # multiplication
"÷": 0x2812, # division
"=": 0x2836,
}
FR_COMBINATIONS = {
"↔": (0x282a, 0x2812, 0x2815), # bidirectional arrow
"←": (0x282a, 0x2812, 0x2812), # left arrow
"→": (0x2812, 0x2812, 0x2815), # right arrow
"…": (0x2832, 0x2832, 0x2832), # unicode ellipsis
"–": (0x2824, 0x2824),
"—": (0x2824, 0x2824),
"_": (0x2810, 0x2824),
"[": (0x2818, 0x2826),
"]": (0x2834, 0x2803),
"°": (0x2810, 0x2815), # degrees
"§": (0x2810, 0x280f), # paragraph/section symbol
"&": (0x2810, 0x283f),
"\\": (0x2810, 0x280c),
"#": (0x2810, 0x283c),
"{": (0x2820, 0x2820, 0x2826),
"}": (0x2834, 0x2804, 0x2804),
# math
"µ": (0x2818, 0x280d), # micron
"π": (0x2818, 0x280f),
"≤": (0x2818, 0x2823),
"≥": (0x2818, 0x281c),
"<": (0x2810, 0x2823),
">": (0x2810, 0x281c),
"~": (0x2810, 0x2822),
"*": (0x2810, 0x2814),
"%": (0x2810, 0x282c),
"‰": (0x2810, 0x282c, 0x282c), # per mile
# legal
"©": (0x2810, 0x2809), # copyright
"®": (0x2810, 0x2817), # registered
"™": (0x2810, 0x281e), # trademark
# currencies
"¢": (0x2818, 0x2809),
"€": (0x2818, 0x2811),
"£": (0x2818, 0x2807),
"$": (0x2818, 0x280e),
"¥": (0x2818, 0x283d),
"¥": (0x2818, 0x283d),
}
# German based locales
DE_ACCENT_MARKS = {
"Ä": 0x281c,
"Ö": 0x282a,
"Ü": 0x2833,
}
DE_SIGNS = {
" ": 0x2800, # braille space
",": 0x2802,
";": 0x2806,
":": 0x2812,
"?": 0x2822,
"!": 0x2816,
"„": 0x2826,
"“": 0x2834,
"§": 0x282c,
".": 0x2804,
"–": 0x2824,
"‚": 0x2820,
}
DE_COMBINATIONS = {
# signs
"ß": (0x282e,), # converted to 'SS' if uppercased, so defined in combinations
"|": (0x2810, 0x2824),
"[": (0x2818, 0x2837),
"]": (0x2818, 0x283e),
"/": (0x2818, 0x280c),
"`": (0x2820, 0x2826),
"´": (0x2820, 0x2834),
"/": (0x2810, 0x2802),
"&": (0x2810, 0x2825),
"*": (0x2820, 0x2814),
"→": (0x2812, 0x2812, 0x2815),
"←": (0x282a, 0x2812, 0x2812),
"↔": (0x282a, 0x2812, 0x2812, 0x2815),
"%": (0x283c, 0x281a, 0x2834),
"‰": (0x283c, 0x281a, 0x2834, 0x2834),
"°": (0x2808, 0x2834),
"′": (0x2808, 0x2814),
"″": (0x2808, 0x2814, 0x2814),
"@": (0x2808, 0x281c),
"_": (0x2808, 0x2838),
"#": (0x2808, 0x283c),
# currencies
"€": (0x2808, 0x2811),
"$": (0x2808, 0x280e),
"¢": (0x2808, 0x2809),
"£": (0x2808, 0x2807),
# legal
"©": (0x2836, 0x2818, 0x2809, 0x2836),
"®": (0x2836, 0x2818, 0x2817, 0x2836),
}
# END: UTILITIES
# ---------------------------------
# LOCALE FUNCTIONS
def en_char_map(char):
"""English chars mapper.
Source: https://en.wikipedia.org/wiki/Braille_ASCII#Braille_ASCII_values
"""
try:
mapint = EN_ASCII.index(char.upper())
except ValueError:
return char
return chr(mapint + 0x2800)
def numbers_singleuppers_combinations_factory(
numbers_map,
singleuppers_map,
combinations_map, # also individual characters that are modified if uppercased
number_prefix,
uppercase_prefix,
):
"""Wrapper for various character mappers implementations."""
return char_mapper
def es_char_map_loader():
"""Spanish/Galician chars mappers.
Source: https://sid.usal.es/idocs/F8/FDO12069/signografiabasica.pdf
"""
return numbers_singleuppers_combinations_factory(
LOUIS_BRAILLE_NUMBERS,
{
**ES_LETTERS,
**ES_ACCENT_MARKS,
**ES_SIGNS,
**UPPERCASE_PREFIXES,
},
ES_COMBINATIONS,
0x283c,
0x2828,
)
def eu_char_map_loader():
"""Euskera chars mapper.
Uses the sample implementation as Spanish but without accent marks.
Source: https://sid.usal.es/idocs/F8/FDO12069/signografiabasica.pdf
"""
return numbers_singleuppers_combinations_factory(
LOUIS_BRAILLE_NUMBERS,
{
**ES_LETTERS,
**ES_SIGNS,
**UPPERCASE_PREFIXES,
},
ES_COMBINATIONS,
0x283c,
0x2828,
)
def ca_char_map_loader():
"""Catalan/Valencian chars mappers. Uses the same implementation as
Spanish but different accent marks.
Source: https://sid.usal.es/idocs/F8/FDO12069/signografiabasica.pdf
"""
return numbers_singleuppers_combinations_factory(
LOUIS_BRAILLE_NUMBERS,
{
**ES_LETTERS,
**CA_ACCENT_MARKS,
**ES_SIGNS,
**UPPERCASE_PREFIXES,
},
ES_COMBINATIONS,
0x283c,
0x2828,
)
def fr_char_map_loader():
"""French chars mapper.
Source: https://sid.usal.es/idocs/F8/FDO12069/signografiabasica.pdf
"""
return numbers_singleuppers_combinations_factory(
LOUIS_BRAILLE_NUMBERS,
{
**FR_LETTERS,
**FR_ACCENT_MARKS,
**FR_SIGNS,
**UPPERCASE_PREFIXES,
},
FR_COMBINATIONS,
0x283c,
0x2828,
)
def de_char_map_loader():
"""German chars mapper.
- For letters, uses the same dictionary as French implementation.
Source: http://bskdl.org/textschrift.html
"""
return numbers_singleuppers_combinations_factory(
LOUIS_BRAILLE_NUMBERS,
{
**FR_LETTERS, # Same as French implementation
**DE_ACCENT_MARKS,
**DE_SIGNS,
**UPPERCASE_PREFIXES,
},
DE_COMBINATIONS,
0x283c,
0x2828,
)
# END: LOCALE FUNCTIONS
LOCALE_CHARMAPS = {
"en": en_char_map, # English
"es": es_char_map_loader, # Spanish
"fr": fr_char_map_loader, # French
"de": de_char_map_loader, # German
"gl": es_char_map_loader, # Galician
"eu": eu_char_map_loader, # Euskera
"ca": ca_char_map_loader, # Catalan/Valencian
}
# ---------------------------------
# EXTENSION
class BrailleL18n(inkex.TextExtension):
"""Convert to Braille giving a localized map of replacements."""
def process_chardata(self, text):
"""Replaceable chardata method for processing the text."""
chars_mapper = LOCALE_CHARMAPS[self.options.locale]
# `chars_mapper` could be a function loader or a characters mapper
# itself, so check if the characters mapper is loaded and load it
# if is created from a factory
if "loader" in chars_mapper.__name__:
chars_mapper = chars_mapper()
return ''.join(map(chars_mapper, text))
if __name__ == '__main__':
BrailleL18n().run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
287,
365,
87,
198,
198,
2,
20368,
12,
198,
198,
2,
19255,
4146,
30383,
198,
198,
2,
8070,
5423,
198,
198,
8577,
18973,
34,
11159,
62,
... | 1.759798 | 6,124 |
"""Neuron simulation functions and NetworkBuilder class."""
# Authors: Mainak Jas <mjas@mgh.harvard.edu>
# Sam Neymotin <samnemo@gmail.com>
# Blake Caldwell <blake_caldwell@brown.edu>
from copy import deepcopy
import numpy as np
from neuron import h
# This is due to: https://github.com/neuronsimulator/nrn/pull/746
from neuron import __version__
if int(__version__[0]) >= 8:
h.nrnunit_use_legacy(1)
from .cell import _ArtificialCell
from .params import _long_name, _short_name
from .extracellular import _ExtracellularArrayBuilder
from .network import pick_connection
# a few globals
_PC = None
_CVODE = None
# We need to maintain a reference to the last
# NetworkBuilder instance that ran pc.gid_clear(). Even if
# pc is global, if pc.gid_clear() is called within a new
# NetworkBuilder, it will seg fault.
_LAST_NETWORK = None
def _simulate_single_trial(net, tstop, dt, trial_idx):
"""Simulate one trial including building the network
This is used by both backends. MPIBackend calls this in mpi_child.py, once
for each trial (blocking), and JoblibBackend calls this for each trial
(non-blocking)
"""
neuron_net = NetworkBuilder(net, trial_idx=trial_idx)
global _PC, _CVODE
h.load_file("stdrun.hoc")
rank = _get_rank()
nhosts = _get_nhosts()
# Now let's simulate the dipole
_PC.barrier() # sync for output to screen
if rank == 0:
print("running trial %d on %d cores" %
(trial_idx + 1, nhosts))
# Set tstop before instantiating any classes
h.tstop = tstop
h.dt = dt # simulation duration and time-step
h.celsius = net._params['celsius'] # 37.0 - set temperature
times = h.Vector().record(h._ref_t)
# sets the default max solver step in ms (purposefully large)
_PC.set_maxstep(10)
# initialize cells to -65 mV, after all the NetCon
# delays have been specified
h.finitialize()
if rank == 0:
for tt in range(0, int(h.tstop), 10):
_CVODE.event(tt, simulation_time)
h.fcurrent()
# initialization complete, but wait for all procs to start the solver
_PC.barrier()
# actual simulation - run the solver
_PC.psolve(h.tstop)
_PC.barrier()
# these calls aggregate data across procs/nodes
neuron_net.aggregate_data()
# now convert data from Neuron into Python
vsoma_py = dict()
for gid, rec_v in neuron_net._vsoma.items():
vsoma_py[gid] = rec_v.to_python()
isoma_py = dict()
for gid, rec_i in neuron_net._isoma.items():
isoma_py[gid] = {key: rec_i.to_python()
for key, rec_i in rec_i.items()}
dpl_data = np.c_[
neuron_net._nrn_dipoles['L2_pyramidal'].as_numpy() +
neuron_net._nrn_dipoles['L5_pyramidal'].as_numpy(),
neuron_net._nrn_dipoles['L2_pyramidal'].as_numpy(),
neuron_net._nrn_dipoles['L5_pyramidal'].as_numpy()
]
rec_arr_py = dict()
rec_times_py = dict()
for arr_name, nrn_arr in neuron_net._nrn_rec_arrays.items():
rec_arr_py.update({arr_name: nrn_arr._get_nrn_voltages()})
rec_times_py.update({arr_name: nrn_arr._get_nrn_times()})
data = {'dpl_data': dpl_data,
'spike_times': neuron_net._all_spike_times.to_python(),
'spike_gids': neuron_net._all_spike_gids.to_python(),
'gid_ranges': net.gid_ranges,
'vsoma': vsoma_py,
'isoma': isoma_py,
'rec_data': rec_arr_py,
'rec_times': rec_times_py,
'times': times.to_python()}
return data
def _get_nhosts():
"""Return the number of processors used by ParallelContext
Returns
-------
nhosts: int
Value from pc.nhost()
"""
if _PC is not None:
return int(_PC.nhost())
return 1
def _get_rank():
"""Return the MPI rank from ParallelContext
Returns
-------
rank: int
Value from pc.id()
"""
if _PC is not None:
return int(_PC.id())
return 0
def _create_parallel_context(n_cores=None, expose_imem=False):
"""Create parallel context.
Parameters
----------
n_cores: int | None
Number of processors to use for a simulation. A value of None will
allow NEURON to use all available processors.
expose_imem : bool
If True, sets _CVODE.use_fast_imem(1) (default: False)
"""
global _CVODE, _PC
if _PC is None:
if n_cores is None:
# MPI: Initialize the ParallelContext class
_PC = h.ParallelContext()
else:
_PC = h.ParallelContext(n_cores)
_CVODE = h.CVode()
# use cache_efficient mode for allocating elements in contiguous order
# cvode.cache_efficient(1)
else:
# ParallelContext() has already been called. Don't start more workers.
# Just tell old nrniv workers to quit.
_PC.done()
# be explicit about using fixed step integration
_CVODE.active(0)
# note that CVode seems to forget this setting in either parallel backend
if expose_imem:
_CVODE.use_fast_imem(1)
class NetworkBuilder(object):
"""The NetworkBuilder class.
Parameters
----------
net : Network object
The instance of Network to instantiate in NEURON-Python
trial_idx : int (optional)
Index number of the trial being processed (different event statistics).
Defaults to 0.
Attributes
----------
trial_idx : int
The index number of the current trial of a simulation.
ncs : dict of list
A dictionary with key describing the types of cell objects connected
and contains a list of NetCon objects.
Notes
-----
NetworkBuilder is not a pickleable class because it contains many NEURON
objects once it has been instantiated. This is important for the Joblib
backend that passes a pickled Network object to each forked process (job)
and only instantiates NetworkBuilder after the fork.
The `_build` routine can be called again to run more simulations without
creating new `nrniv` processes. Instead, the NERUON objects are recreated
and gids are reassigned according to the specifications in
`self.net._params` and the network is ready for another simulation.
"""
def _build(self):
"""Building the network in NEURON."""
global _CVODE, _PC
_create_parallel_context(expose_imem=self._expose_imem)
self._rank = _get_rank()
# load mechanisms needs ParallelContext for get_rank
load_custom_mechanisms()
if self._rank == 0:
print('Building the NEURON model')
self._clear_last_network_objects()
self._nrn_dipoles['L5_pyramidal'] = h.Vector()
self._nrn_dipoles['L2_pyramidal'] = h.Vector()
self._gid_assign()
record_vsoma = self.net._params['record_vsoma']
record_isoma = self.net._params['record_isoma']
self._create_cells_and_drives(threshold=self.net._params['threshold'],
record_vsoma=record_vsoma,
record_isoma=record_isoma)
self.state_init()
# set to record spikes, somatic voltages, and extracellular potentials
self._spike_times = h.Vector()
self._spike_gids = h.Vector()
# used by rank 0 for spikes across all procs (MPI)
self._all_spike_times = h.Vector()
self._all_spike_gids = h.Vector()
self._record_spikes()
self._connect_celltypes()
if len(self.net.rec_arrays) > 0:
self._record_extracellular()
if self._rank == 0:
print('[Done]')
def _gid_assign(self, rank=None, n_hosts=None):
"""Assign cell IDs to this node
Parameters
----------
rank : int | None
If not None, override the rank set
automatically using Neuron. Used for testing.
n_hosts : int | None
If not None, override the number of hosts set
automatically using Neuron. Used for testing.
"""
if rank is not None:
self._rank = rank
if n_hosts is None:
n_hosts = _get_nhosts()
# round robin assignment of cell gids
for gid in range(self._rank, self.net._n_cells, n_hosts):
self._gid_list.append(gid)
for drive in self.net.external_drives.values():
if drive['cell_specific']:
# only assign drive gids that have a target cell gid already
# assigned to this rank
for src_gid in self.net.gid_ranges[drive['name']]:
conn_idxs = pick_connection(self.net, src_gids=src_gid)
target_gids = list()
for conn_idx in conn_idxs:
gid_pairs = self.net.connectivity[
conn_idx]['gid_pairs']
if src_gid in gid_pairs:
target_gids += (self.net.connectivity[conn_idx]
['gid_pairs'][src_gid])
for target_gid in set(target_gids):
if (target_gid in self._gid_list and
src_gid not in self._gid_list):
self._gid_list.append(src_gid)
else:
# round robin assignment of drive gids
src_gids = list(self.net.gid_ranges[drive['name']])
for gid_idx in range(self._rank, len(src_gids), n_hosts):
self._gid_list.append(src_gids[gid_idx])
# extremely important to get the gids in the right order
self._gid_list.sort()
def _create_cells_and_drives(self, threshold, record_vsoma=False,
record_isoma=False):
"""Parallel create cells AND external drives
NB: _Cell.__init__ calls h.Section -> non-picklable!
NB: _ArtificialCell.__init__ calls h.*** -> non-picklable!
These drives are spike SOURCES but cells are also targets.
External inputs are not targets.
"""
for gid in self._gid_list:
_PC.set_gid2node(gid, self._rank)
# loop through ALL gids
# have to loop over self._gid_list, since this is what we got
# on this rank (MPI)
for gid in self._gid_list:
src_type = self.net.gid_to_type(gid)
gid_idx = gid - self.net.gid_ranges[src_type][0]
if src_type in self.net.cell_types:
# copy cell object from template cell type in Network
cell = self.net.cell_types[src_type].copy()
cell.gid = gid
cell.pos = self.net.pos_dict[src_type][gid_idx]
# instantiate NEURON object
if src_type in ('L2_pyramidal', 'L5_pyramidal'):
cell.build(sec_name_apical='apical_trunk')
else:
cell.build()
# add tonic biases
if ('tonic' in self.net.external_biases and
src_type in self.net.external_biases['tonic']):
cell.create_tonic_bias(**self.net.external_biases
['tonic'][src_type])
cell.record_soma(record_vsoma, record_isoma)
# this call could belong in init of a _Cell (with threshold)?
nrn_netcon = cell.setup_source_netcon(threshold)
assert cell.gid in self._gid_list
_PC.cell(cell.gid, nrn_netcon)
self._cells.append(cell)
# external driving inputs are special types of artificial-cells
else:
event_times = self.net.external_drives[
src_type]['events'][self.trial_idx][gid_idx]
drive_cell = _ArtificialCell(event_times, threshold, gid=gid)
_PC.cell(drive_cell.gid, drive_cell.nrn_netcon)
self._drive_cells.append(drive_cell)
# connections:
# this NODE is aware of its cells as targets
# for each syn, return list of source GIDs.
# for each item in the list, do a:
# nc = pc.gid_connect(source_gid, target_syn), weight,delay
# Both for synapses AND for external inputs
def _connect_celltypes(self):
"""Connect two cell types for a particular receptor."""
net = self.net
connectivity = self.net.connectivity
assert len(self._cells) == len(self._gid_list) - len(self._drive_cells)
for conn in connectivity:
loc, receptor = conn['loc'], conn['receptor']
nc_dict = deepcopy(conn['nc_dict'])
# Gather indices of targets on current node
valid_targets = set()
for src_gid, target_gids in conn['gid_pairs'].items():
filtered_targets = list()
for target_gid in target_gids:
if _PC.gid_exists(target_gid):
filtered_targets.append(target_gid)
valid_targets.add(target_gid)
conn['gid_pairs'][src_gid] = filtered_targets
target_filter = dict()
for idx in range(len(self._cells)):
gid = self._gid_list[idx]
if gid in valid_targets:
target_filter[gid] = idx
# Iterate over src/target pairs and connect cells
for src_gid, target_gids in conn['gid_pairs'].items():
for target_gid in target_gids:
src_type = self.net.gid_to_type(src_gid)
target_type = self.net.gid_to_type(target_gid)
target_cell = self._cells[target_filter[target_gid]]
connection_name = f'{_short_name(src_type)}_'\
f'{_short_name(target_type)}_{receptor}'
if connection_name not in self.ncs:
self.ncs[connection_name] = list()
pos_idx = src_gid - net.gid_ranges[_long_name(src_type)][0]
# NB pos_dict for this drive must include ALL cell types!
nc_dict['pos_src'] = net.pos_dict[
_long_name(src_type)][pos_idx]
# get synapse locations
syn_keys = list()
if loc in ['proximal', 'distal']:
for sect in target_cell.sect_loc[loc]:
syn_keys.append(f'{sect}_{receptor}')
else:
syn_keys = [f'{loc}_{receptor}']
for syn_key in syn_keys:
nc = target_cell.parconnect_from_src(
src_gid, deepcopy(nc_dict),
target_cell._nrn_synapses[syn_key],
net._inplane_distance)
self.ncs[connection_name].append(nc)
def _record_spikes(self):
"""Setup spike recording for this node"""
# iterate through gids on this node and
# set to record spikes in spike time vec and id vec
# agnostic to type of source, will sort that out later
for gid in self._gid_list:
if _PC.gid_exists(gid):
_PC.spike_record(gid, self._spike_times, self._spike_gids)
def aggregate_data(self):
"""Aggregate somatic currents, voltages, and dipoles."""
for cell in self._cells:
if cell.name in ('L5Pyr', 'L2Pyr'):
nrn_dpl = self._nrn_dipoles[_long_name(cell.name)]
# dipoles are initialized as empty h.Vector() containers
# the first cell is "appended", setting the
# length of the vector, after which cell data are added (sum)
if nrn_dpl.size() > 0:
nrn_dpl.add(cell.dipole)
else:
nrn_dpl.append(cell.dipole)
self._vsoma[cell.gid] = cell.rec_v
self._isoma[cell.gid] = cell.rec_i
_PC.allreduce(self._nrn_dipoles['L5_pyramidal'], 1)
_PC.allreduce(self._nrn_dipoles['L2_pyramidal'], 1)
for nrn_arr in self._nrn_rec_arrays.values():
_PC.allreduce(nrn_arr._nrn_voltages, 1)
# aggregate the currents and voltages independently on each proc
vsoma_list = _PC.py_gather(self._vsoma, 0)
isoma_list = _PC.py_gather(self._isoma, 0)
# combine spiking data from each proc
spike_times_list = _PC.py_gather(self._spike_times, 0)
spike_gids_list = _PC.py_gather(self._spike_gids, 0)
# only rank 0's lists are complete
if _get_rank() == 0:
for spike_vec in spike_times_list:
self._all_spike_times.append(spike_vec)
for spike_vec in spike_gids_list:
self._all_spike_gids.append(spike_vec)
for vsoma in vsoma_list:
self._vsoma.update(vsoma)
for isoma in isoma_list:
self._isoma.update(isoma)
_PC.barrier() # get all nodes to this place before continuing
def state_init(self):
"""Initializes the state closer to baseline."""
for cell in self._cells:
seclist = h.SectionList()
seclist.wholetree(sec=cell._nrn_sections['soma'])
for sect in seclist:
for seg in sect:
if cell.name == 'L2Pyr':
seg.v = -71.46
elif cell.name == 'L5Pyr':
if sect.name() == 'L5Pyr_apical_1':
seg.v = -71.32
elif sect.name() == 'L5Pyr_apical_2':
seg.v = -69.08
elif sect.name() == 'L5Pyr_apical_tuft':
seg.v = -67.30
else:
seg.v = -72.
elif cell.name == 'L2Basket':
seg.v = -64.9737
elif cell.name == 'L5Basket':
seg.v = -64.9737
def _clear_neuron_objects(self):
"""Clear up NEURON internal gid and reference information.
Note: This function must be called from the context of the
Network instance that ran `_build`. This is a bug or
peculiarity of NEURON. If this function is called from a different
context, then the next simulation will run very slow because nrniv
workers are still going for the old simulation. If pc.gid_clear is
called from the right context, then those workers can exit.
"""
_PC.gid_clear()
# dereference cell and NetConn objects
for gid, cell in zip(self._gid_list, self._cells):
# only work on cells on this node
if _PC.gid_exists(gid):
for nc_key in self.ncs:
for nc in self.ncs[nc_key]:
if nc.valid():
# delete NEURON cell object
cell_obj1 = nc.precell(gid)
if cell_obj1 is not None:
del cell_obj1
cell_obj2 = nc.postcell(gid)
if cell_obj2 is not None:
del cell_obj2
del nc
self._gid_list = list()
self._cells = list()
self._drive_cells = list()
# NB needed if multiple simulations are run in same python proc.
# removes callbacks used to gather transmembrane currents
for nrn_arr in self._nrn_rec_arrays.values():
if nrn_arr._recording_callback is not None:
_CVODE.extra_scatter_gather_remove(nrn_arr._recording_callback)
def _clear_last_network_objects(self):
"""Clears NEURON objects and saves the current Network instance"""
global _LAST_NETWORK
if _LAST_NETWORK is not None:
_LAST_NETWORK._clear_neuron_objects()
self._clear_neuron_objects()
_LAST_NETWORK = self
| [
37811,
8199,
44372,
18640,
5499,
290,
7311,
32875,
1398,
526,
15931,
198,
198,
2,
46665,
25,
8774,
461,
21961,
1279,
76,
28121,
31,
76,
456,
13,
9869,
10187,
13,
15532,
29,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
3409,
... | 2.041335 | 9,919 |
import requests
url = 'https://notify-api.line.me/api/notify'#LINE NotifyのAPIのURL
token = '2RNdAKwlaj69HK0KlEdMX1y575gDWNKrPpggFcLnh82' #自分のアクセストークン
ms = "新たなソフトを開くと負担が過剰にかかってしまいます。"#送信する通知内容
while True:
now=dt.('cpu_temps')
dt = getCpuTempFromFile(data_file) #CPU温度取得
print(cpu_temps)
if print(cpu_temp) == "print >= 80":#CPU温度が80度以上の際にラインが送られるようにする
line(postdate=message, date=postdate, palams=postdate )#lineを呼び出す
break
time.sleep(1)
| [
11748,
7007,
198,
198,
6371,
796,
705,
5450,
1378,
1662,
1958,
12,
15042,
13,
1370,
13,
1326,
14,
15042,
14,
1662,
1958,
6,
2,
24027,
1892,
1958,
5641,
17614,
5641,
21886,
198,
30001,
796,
705,
17,
42336,
67,
10206,
40989,
1228,
3388,... | 1.578073 | 301 |
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
from flask import Flask, render_template, request
# Initialize the Flask application
app = Flask(__name__)
# Default route, print user's IP
@app.route('/')
if __name__ == '__main__':
app.run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
55,
84,
44,
278,
171,
120,
230,
87,
12595,
21,
1731,
31,
38227,
13,
785,
8,
198,
31,
11213,
25,
220,
198,
37811,
198,
198,
6738,
42903,
1... | 2.572727 | 110 |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 22:01:45 2020
@author: PRAFULL
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2447,
220,
642,
2534,
25,
486,
25,
2231,
12131,
198,
198,
31,
9800,
25,
350,
3861,
37,
9994,
198,
37811,
628
] | 2.175 | 40 |
#!/usr/bin/env python
# Part 5: Measure Twice, Test Once
# Jason Meil Attempt 2 DS3 Unit 3 Sprint 1 060219
import unittest
from acme_JM import Product
from acme_report import generate_products, ADJECTIVES, NOUNS
class AcmeProductTests(unittest.TestCase):
"""Making sure Acme products are the tops!"""
def test_default_product_price(self):
"""Testing default product price as 10"""
prod = Product('Testing Product')
self.assertEqual(prod.price, 10)
def test_default_product_weight(self):
"""Testing default product weight at 10"""
prod = Product('Testing Product')
self.assertEqual(prod.weight, 20)
def test_stealability(self):
"""Testing stealability()"""
prod = Product('Testing Product')
self.assertEqual(prod.stealability(), 'Kinda stealable.')
def test_explode(self):
"""Testing explode()"""
prod = Product('Testing Product')
self.assertEqual(prod.explode(), '...boom!')
class AcmeReportTests(unittest.TestCase):
""" Testing generate_products returning 30 results"""
def test_legal_names(self):
""" Testing if the names are in the correct format """
# valid lists of adjectives and nouns
adjectives = set(['Awesome', 'Shiny', 'Impressive', 'Portable',
'Improved'])
nouns = set(['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???'])
# generate product names from report
products = generate_products()
# split into adjectives and nouns
bad_adjectives = [prod.name.split()[0] for prod in products
if prod.name.split()[0] not in adjectives]
bad_nouns = [prod.name.split()[1] for prod in products
if prod.name.split()[1] not in nouns]
self.assertEqual(len(bad_adjectives), 0)
self.assertEqual(len(bad_nouns), 0)
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
2,
2142,
642,
25,
24291,
41217,
11,
6208,
4874,
201,
198,
2,
8982,
2185,
346,
25770,
362,
17400,
18,
11801,
513,
23178,
352,
657,
1899,
28896,
201,
198,
201,
198,
1174... | 2.357562 | 853 |
#!/usr/bin/env python3
import argparse
import subprocess
from os.path import isfile
import os
import sys
import time
import signal
from datetime import datetime
stopping=False
if __name__=="__main__":
ret = main(sys.argv)
sys.exit(ret)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
6737,
198,
6738,
4818... | 2.850575 | 87 |
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from backend import db, ma
# TODO: Implement Schema for each of our tables
# Marshmallow is used for serialization/deserialization of Python data types for API calls
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
663,
38537,
516,
1330,
5045,
276,
40386,
13908,
11712,
1300,
32634,
7509,
355,
23283,
7509,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
30203,
1330,
20613,
11,
17266,
628,
198,
198,
... | 3.949367 | 79 |
from __future__ import print_function
from colorama import Fore
import os
import sys
from plugin import plugin
@plugin('file organise')
class File_Organise():
"""
Type file_organise and follow instructions
It organises selected folder based on extension
"""
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
3124,
1689,
1330,
4558,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
13877,
1330,
13877,
628,
198,
31,
33803,
10786,
7753,
42386,
11537,
198,
4871,
9220,
62,
26121,
786,
33529,... | 3.68 | 75 |
import grelok_subroutines as gs
# TODO: FSM seems like the best approach for this RPG
# TODO: Use JSON format for storing text and FSM
gs.routine_010()
key = gs.routine_100()
gs_100_mapped = gs.routine_100_map(key)
if isinstance(gs_100_mapped, str):
print(gs_100_mapped)
else:
if gs_100_mapped == 17:
key = gs.routine_101()
| [
11748,
308,
2411,
482,
62,
7266,
81,
448,
1127,
355,
308,
82,
198,
198,
2,
16926,
46,
25,
376,
12310,
2331,
588,
262,
1266,
3164,
329,
428,
12909,
198,
2,
16926,
46,
25,
5765,
19449,
5794,
329,
23069,
2420,
290,
376,
12310,
198,
1... | 2.324324 | 148 |
from typing import List
from pddl.domain_duration import DomainDuration
from pddl.domain_formula import DomainFormula
from pddl.domain_assignment import DomainAssignment
from pddl.domain_inequality import DomainInequality
from pddl.probabilistic_effect import ProbabilisticEffect
from pddl.domain_effect import Effect, TimedEffect
from pddl.domain_condition import GoalDescriptor
class DomainOperator:
"""
A class used to represent an operator in the domain.
"""
| [
6738,
19720,
1330,
7343,
198,
6738,
279,
1860,
75,
13,
27830,
62,
32257,
1330,
20021,
26054,
198,
6738,
279,
1860,
75,
13,
27830,
62,
687,
4712,
1330,
20021,
8479,
4712,
198,
6738,
279,
1860,
75,
13,
27830,
62,
562,
16747,
1330,
20021... | 3.361111 | 144 |
""" An AppProvider Service Provider """
from config import application, middleware, storage
from masonite.autoload import Autoload
from masonite.commands import (AuthCommand, CommandCommand, ControllerCommand,
InfoCommand, InstallCommand, JobCommand,
KeyCommand, MakeMigrationCommand,
MigrateCommand, MigrateRefreshCommand,
MigrateResetCommand, MigrateRollbackCommand,
ModelCommand, ModelDocstringCommand, ProviderCommand, RoutesCommand,
SeedCommand, SeedRunCommand, ServeCommand, QueueWorkCommand,
TinkerCommand, ViewCommand, ValidatorCommand)
from masonite.exception_handler import ExceptionHandler
from masonite.helpers.routes import flatten_routes
from masonite.hook import Hook
from masonite.provider import ServiceProvider
from masonite.request import Request
from masonite.routes import Route
from routes import api, web
| [
37811,
1052,
2034,
29495,
4809,
32549,
37227,
198,
198,
6738,
4566,
1330,
3586,
11,
3504,
1574,
11,
6143,
198,
198,
6738,
285,
888,
578,
13,
2306,
349,
1170,
1330,
5231,
349,
1170,
198,
6738,
285,
888,
578,
13,
9503,
1746,
1330,
357,
... | 2.450935 | 428 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
import datetime
from django.test import TestCase
import dataqs
from dataqs.aqicn.aqicn import AQICNProcessor
import httpretty
from mock import patch
script_dir = os.path.dirname(os.path.realpath(__file__))
tmpfile = os.path.join(script_dir, 'test_city.json')
def get_mock_response(filename):
"""
Return a canned response with HTML for all cities
"""
with open(os.path.join(
script_dir, 'resources/{}'.format(filename))) as infile:
return infile.read()
def mock_saveData(self, city):
"""
Save data to a JSON file instead of to database
"""
for key in city.keys():
if isinstance(city[key], datetime.datetime):
city[key] = city[key].strftime('%Y-%m-%d')
with open(tmpfile, 'w') as outfile:
outfile.write(json.dumps(city))
class AQICNTest(TestCase):
"""
Tests the dataqs.aqicn module. Since each processor is highly
dependent on a running GeoNode instance for most functions, only
independent functions are tested here.
"""
def test_download(self):
"""
Verify that the master url is retrieved.
"""
httpretty.register_uri(
httpretty.GET,
self.processor.base_url,
body=get_mock_response('test_aqicn_cities.html'),
content_type='text/html')
content = self.processor.download()
self.assertIn(
'<title>Air Pollution in the World - aqicn.org</title>', content)
def test_getCities(self):
"""
Verify that the processor creates a correct cities dictionary structure
"""
self.processor.getCities()
cities = self.processor.cities
self.assertIsNotNone(cities)
for city in cities:
self.assertIsNotNone(city['city'], city)
self.assertIsNotNone(city['country'], city)
self.assertIsNotNone(city['url'], city)
@patch('dataqs.aqicn.aqicn.AQICNWorker.__init__', mock_worker_init)
@patch('dataqs.aqicn.aqicn.AQICNWorker.save_data', mock_saveData)
def test_handleCity(self):
"""
Verify that the correct AQI for a city is returned.
"""
boston = u'http://aqicn.org/city/boston/'
httpretty.register_uri(
httpretty.GET,
boston,
body=get_mock_response('test_aqicn_boston.html'),
content_type='text/html')
cities = [{'city': u'Boston', 'country': u'USA', 'url': boston}]
worker = dataqs.aqicn.aqicn.AQICNWorker('aqicn', cities)
worker.handle_city(0, cities[0])
with open(tmpfile) as jsonfile:
city_json = json.load(jsonfile)
self.assertEquals(city_json['data']['cur_aqi'], u'25')
self.assertEquals(city_json['data']['cur_pm25'], u'25')
self.assertEquals(city_json['data']['cur_o3'], u'11')
self.assertEquals(city_json['data']['cur_so2'], u'2')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
220,
15069,
10897,
1574,
3457,
13,
290,
38437,
3713,
3457,
13,
198,
2,
... | 2.482827 | 1,514 |
import matplotlib.pyplot as plt
import numpy as np
import tikzplotlib
read_dataset_Set_sd = np.genfromtxt('results/set_epochs_200_recording_dis_sd/SET__fashion_mnist_for_200_epochs_20210603-164315_num_sd_None_sd_lap__sd_dis_.csv',delimiter='')
perc_change_sd = np.diff(read_dataset_Set_sd) / read_dataset_Set_sd[:-1] * 100
plt.plot(perc_change_sd)
# plt.legend()
plt.ylabel("$\sigma$ change")
plt.xlabel("Epoch[#]")
plt.title("$\sigma$ change between epochs")
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
256,
1134,
89,
29487,
8019,
628,
198,
961,
62,
19608,
292,
316,
62,
7248,
62,
21282,
796,
45941,
13,
5235,
6738,
14116,
10786,
43... | 2.230047 | 213 |
'''
Set caseleds led strip through 'f' (frame) command.
A hifive1 running hifive1-argb-fxl must be connected through USB.
'''
import math
import serial
from pyutil.delayedkeyboardinterrupt import DelayedKeyboardInterrupt
UART='/dev/serial/by-id/usb-FTDI_Dual_RS232-HS-if01-port0'
baudrate = 115200
| [
7061,
6,
198,
7248,
6124,
417,
5379,
2957,
10283,
832,
705,
69,
6,
357,
14535,
8,
3141,
13,
198,
32,
289,
361,
425,
16,
2491,
289,
361,
425,
16,
12,
853,
65,
12,
21373,
75,
1276,
307,
5884,
832,
8450,
13,
198,
7061,
6,
198,
11... | 2.752294 | 109 |
import komand
from .schema import SchedReportInput, SchedReportOutput
# Custom imports below
| [
11748,
479,
296,
392,
198,
6738,
764,
15952,
2611,
1330,
27774,
19100,
20560,
11,
27774,
19100,
26410,
198,
198,
2,
8562,
17944,
2174,
628
] | 3.958333 | 24 |
'''
给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。
您可以假设除了数字 0 之外,这两个数都不会以 0 开头。
示例:
输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)
输出:7 -> 0 -> 8
原因:342 + 465 = 807
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| [
7061,
6,
198,
163,
119,
247,
49035,
118,
10310,
97,
10310,
103,
1849,
165,
251,
252,
163,
102,
118,
13328,
248,
226,
165,
241,
122,
26193,
101,
18796,
101,
30266,
98,
26193,
101,
163,
97,
118,
10310,
97,
10310,
103,
165,
251,
252,
... | 0.901299 | 385 |
# 2 tea shops sell tea at the price of 15 and 30 rupees per cup. Input the number of cups a person buys from the first shop, then input the number of cups a person buys from the second shop and tell the customer the total bill.
price_first_shop = 15
price_second_shop = 30
items_first_shop = int(input("How many tea cups will you buy from the first shop?: "))
items_second_shop = int(input("How many tea cups will you buy from the second shop?: "))
if items_first_shop > 100 or items_second_shop > 100:
print("that's too much tea, don't you think")
elif items_second_shop <0 or items_second_shop < 0:
print("no")
items_second_shop =1
items_first_shop = 2
bill = (items_first_shop * price_first_shop) + (items_second_shop * price_second_shop)
print("......................................................\n" "Your bill is: ", bill)
| [
2,
362,
8887,
12437,
3677,
8887,
379,
262,
2756,
286,
1315,
290,
1542,
7422,
42623,
583,
6508,
13,
23412,
262,
1271,
286,
14180,
257,
1048,
24779,
422,
262,
717,
6128,
11,
788,
5128,
262,
1271,
286,
14180,
257,
1048,
24779,
422,
262,
... | 3.341176 | 255 |
import multiprocessing as mp
import os
import shutil
import sys
import time
import numpy as np
# from tool_packages.magphase import libutils as lu
# from tool_packages.magphase import magphase as mp
from util import file_util, log_util, system_cmd_util
log = log_util.get_logger("extract vocoder features")
fs_nFFT_dict = {16000: 1024,
22050: 1024,
44100: 2048,
48000: 2048}
fs_alpha_dict = {16000: 0.58,
22050: 0.65,
44100: 0.76,
48000: 0.77}
raw_dir = "/home/top/workspace/tts/data/CarNum/raw"
sp_dir = "/home/top/workspace/tts/data/CarNum/sp"
ap_dir = "/home/top/workspace/tts/data/CarNum/ap"
f0_dir = "/home/top/workspace/tts/data/CarNum/f0"
# output feature dir
lf0_dir = "/home/top/workspace/tts/data/CarNum/lf0"
mgc_dir = "/home/top/workspace/tts/data/CarNum/mgc"
bap_dir = "/home/top/workspace/tts/data/CarNum/bap"
# out_feat_dir must contain all of above feature name
feat_dir = ["raw", "sp", "mgc", "bap", "ap", "f0", "lf0"]
merlin_dir = "/home/top/workspace/tts/merlin-tf-slim"
straight = os.path.join(merlin_dir, "tools/bin/straight")
world = os.path.join(merlin_dir, "tools/bin/WORLD")
worldv2 = os.path.join(merlin_dir, "tools/bin/WORLD")
sptk = os.path.join(merlin_dir, "tools/bin/SPTK-3.9")
reaper = os.path.join(merlin_dir, "tools/bin/REAPER")
magphase = os.path.join(merlin_dir, 'tools', 'magphase', 'src')
def extract_vocoder_feats_for_merlin(merlin_path, vocoder_type, wav_dir, out_dir, sample_rate):
'''
extract vocoder feature for merlin with different vocoder type
:param merlin_path: root dir of merlin
:param vocoder_type: type of vocoder,possible value are maghase,straight,world,world2
:param wav_dir: wav file path to extract
:param out_dir: root dir to save extracted features
:param sample_rate: sample rate of radio, possible value are 16000,44100,48000
:return:
'''
if not os.path.exists(out_dir):
os.mkdir(out_dir)
path_list = [os.path.join(out_dir, feat_dir) for feat_dir in os.listdir(feat_dir)]
file_util.create_path_list(path_list)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = file_util.read_file_list_from_path(wav_dir, ".wav", True)
process = None
params = None
if vocoder_type == "magphase":
sys.path.append(os.path.realpath(magphase))
elif vocoder_type == "straight":
process = extract_feats_by_straight
params = [straight, sptk, wav_files, sample_rate]
elif vocoder_type == "world":
process = extract_feat_by_world
params = [wav_files, sample_rate]
elif vocoder_type == "worldv2":
process = extract_feat_by_worldv2
params = [wav_files, sample_rate]
else:
log.error("current vocoder not supported " + vocoder_type)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, params)
# clean temporal files
shutil.rmtree(raw_dir, ignore_errors=True)
shutil.rmtree(sp_dir, ignore_errors=True)
shutil.rmtree(f0_dir, ignore_errors=True)
shutil.rmtree(ap_dir, ignore_errors=True)
print("You should have your features ready in: " + out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
'''
DESCRIPTION:
This script extracts low-dimensional acoustic features from a batch of wav files intended for using with the Merlin toolkit.
It runs the extraction in parallel mode, using all the cores available in the system.
The acoustic features extracted and used by Merlin are:
- '<file>.mag' : Mel-scaled Log-Mag (dim=nbins_mel, usually 60).
- '<file>.real' : Mel-scaled real (dim=nbins_phase, usually 45).
- '<file>.imag' : Mel-scaled imag (dim=nbins_phase, usually 45).
- '<file>.lf0' : Log-F0 (dim=1).
Also, this script extracts the additional files:
- '<file>.est' : File generated by REAPER containing epoch locations and voi/unvoi decisions (remove them if wanted).
- '<file>.shift': File that contains the shifts (hop-sizes) for each extracted frame (variable frame rate).
It is used to modify the label files in Merlin. Se .... for more information.
INSTRUCTIONS:
This demo should work out of the box. Just run it by typing: python <script name>
If wanted, you can modify the input options (directories, input files, etc.) See the main function below for details.
'''
def extract_feats_by_magphase(magphase, wav_dir, out_dir):
'''
extract vocoder features by magphase
:param merlin_dir:
:param wav_dir:
:param out_dir:
:return:
'''
sys.path.append(os.path.realpath(magphase))
lu.mkdir(out_dir)
l_wavfiles = file_util.read_file_list_from_path(wav_dir, file_type=".wav", if_recursive=True)
# MULTIPROCESSING EXTRACTION
lu.run_multithreaded(feat_extraction, l_wavfiles, out_dir)
def extract_feats_by_straight(straight, wav_file, sample_rate):
'''
extract vocoder feature by straight
:param merlin_dir:
:param wav_dir:
:param out_dir:
:param sample_rate:
:return:
'''
file_id = os.path.basename(wav_file).split(".")[0]
print(file_id)
nFFT = fs_nFFT_dict[sample_rate]
alpha = fs_alpha_dict[sample_rate]
mcsize = 59
order = 24
fshift = 5
sox_wav_2_raw_cmd = 'sox %s -b 16 -c 1 -r %s -t raw %s' % (wav_file, \
sample_rate, \
os.path.join(raw_dir, file_id + '.raw'))
os.system(sox_wav_2_raw_cmd)
### STRAIGHT ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
raw_file = os.path.join(raw_dir, file_id + '.raw')
f0_file = os.path.join(f0_dir, file_id + '.f0')
ap_file = os.path.join(ap_dir, file_id + '.ap')
sp_file = os.path.join(sp_dir, file_id + '.sp')
bap_file = os.path.join(bap_dir, file_id + '.bap')
mgc_file = os.path.join(mgc_dir, file_id + '.mgc')
lf0_file = os.path.join(lf0_dir, file_id + '.lf0')
system_cmd_util.straight_f0_analysis(straight, fshift, sample_rate, raw_file, f0_file)
system_cmd_util.straight_ap_analysis(straight, sample_rate, nFFT, fshift, f0_file, raw_file, ap_file)
system_cmd_util.straight_sp_analysis(straight, sample_rate, nFFT, fshift, mcsize, f0_file, raw_file, sp_file)
### convert f0 to lf0 ###
system_cmd_util.sptk_f0_to_lf0(sptk, f0_file, lf0_file)
### convert sp to mgc ###
system_cmd_util.sptk_mcep_cmd(sptk, 3, alpha, mcsize, nFFT, sp_file, mgc_file)
### convert ap to bap ###
system_cmd_util.sptk_mcep_cmd(sptk, 1, alpha, order, nFFT, ap_file, bap_file)
def extract_feat_by_world(wav_file, sample_rate, b_use_reaper=True):
''''''
nFFTHalf = fs_nFFT_dict[sample_rate]
alpha = fs_alpha_dict[sample_rate]
mcsize = 59
file_id = os.path.basename(wav_file).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract sp, ap ###
f0_file = os.path.join(f0_dir, file_id + '.f0')
f0_world_file = f0_file
if b_use_reaper:
f0_world_file = f0_file + "_world"
f0_file = os.path.join(f0_dir, file_id + '.f0')
sp_file = os.path.join(sp_dir, file_id + '.sp')
bapd_file = os.path.join(bap_dir, file_id + '.bapd')
system_cmd_util.world_analysis(world, wav_file, f0_file, sp_file, bapd_file)
### Extract f0 using reaper ###
if b_use_reaper:
reaper_f0_extract(wav_file, f0_world_file, f0_file)
### convert f0 to lf0 ###
f0_file = os.path.join(f0_dir, file_id + '.f0')
lf0_file = os.path.join(lf0_dir, file_id + '.lf0')
system_cmd_util.sptk_f0_to_lf0(sptk, f0_file, lf0_file)
### convert sp to mgc ###
sp_file = os.path.join(sp_dir, file_id + '.sp')
mgc_file = os.path.join(mgc_dir, file_id + '.mgc')
system_cmd_util.sptk_sp_to_mgc(sptk, sp_file, mgc_file, alpha, mcsize, nFFTHalf)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(bap_dir, file_id + ".bapd"), \
os.path.join(bap_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
def extract_feat_by_worldv2(wav_file, sample_rate):
'''
:param wav_file:
:param sample_rate:
:return:
'''
nFFTHalf = fs_nFFT_dict[sample_rate]
alpha = fs_alpha_dict[sample_rate]
mcsize = 59
order = 4
file_id = os.path.basename(wav_file).split(".")[0]
print('\n' + file_id)
f0_file = os.path.join(f0_dir, file_id + '.f0')
sp_file = os.path.join(sp_dir, file_id + '.sp')
ap_file = os.path.join(ap_dir, file_id + '.ap')
system_cmd_util.world_analysis(world, wav_file, f0_file, sp_file, ap_file)
### convert f0 to lf0 ###
f0_file = os.path.join(f0_dir, file_id + '.f0')
lf0_file = os.path.join(lf0_dir, file_id + '.lf0')
system_cmd_util.sptk_f0_to_lf0(sptk, f0_file, lf0_file)
### convert sp to mgc ###
mgc_file = os.path.join(mgc_dir, file_id + '.mgc')
system_cmd_util.sptk_sp_to_mgc(sptk, sp_file, mgc_file, alpha, mcsize, nFFTHalf)
### convert ap to bap ###
sptk_x2x_df_cmd2 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
ap_file, \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
order) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(mgc_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
def synthesis_by_straight(lf0, mgc, bap, synth_dir, sample_rate):
'''
:param lf0:
:param mgc:
:param bap:
:param synth_dir:
:return:
'''
mcsize = 59
order = 24
nFFT = fs_nFFT_dict[sample_rate]
alpha = fs_alpha_dict[sample_rate]
nFFTHalf = (1 + nFFT / 2)
fshift = 5
file_id = os.path.basename(lf0).split(".")[0]
### convert lf0 to f0 ###
f0_file = os.path.join(synth_dir, file_id + ".f0")
system_cmd_util.sptk_lf0_to_f0(sptk, lf0, f0_file)
# lf0_f0_cmd = "sptk/sopr -magic -1.0E+10 -EXP -MAGIC 0.0 %s | %s +fa > %s" % \
# (os.path.join(sptk, "sopr"), lf0, os.path.join(sptk, "x2x"), f0_file)
#
# os.system(lf0_f0_cmd)
### convert mgc to sp ###
sp_file = os.path.join(synth_dir, file_id + ".sp")
system_cmd_util.straight_mgc2apsp(sptk, alpha, mcsize, nFFT, mgc, 2, sp_file)
### convert bap to ap ###
ap_file = os.path.join(synth_dir, file_id + ".ap")
system_cmd_util.straight_mgc2apsp(sptk, alpha, order, nFFT, bap, 0, ap_file)
## synthesis
wav_file = os.path.join(synth_dir, file_id + ".wav")
system_cmd_util.straight_synth(straight, sample_rate, nFFT, fshift, ap_file, f0_file, sp_file, wav_file)
log.info("synthesized speech in " + wav_file)
def synthesis_by_worldv2(lf0, mgc, synth_dir, sample_rate):
'''
synthesis speech by world v2
:param lf0: one lf0 file path
:param mgc: one mgc file path
:param bap: one bap file path
:param synth_dir: where should the synthesized speech should be saved into
:param sample_rate:
:return:
'''
mcsize = 59
order = 4
nFFT = fs_nFFT_dict[sample_rate]
alpha = fs_alpha_dict[sample_rate]
file_id = os.path.basename(lf0).split(".")[0]
f0a = os.path.join(synth_dir, file_id + ".f0a")
f0 = os.path.join(synth_dir, file_id + ".f0")
system_cmd_util.sptk_lf0_to_f0(sptk, lf0, f0)
sp = os.path.join(synth_dir, file_id + ".sp")
ap = os.path.join(synth_dir, file_id + ".ap")
wav_file = os.path.join(synth_dir, file_id + ".wav")
system_cmd_util.sptk_mgc_to_apsp(sptk, alpha, mcsize, nFFT, mgc, sp)
system_cmd_util.sptk_mgc_to_apsp(sptk, alpha, order, nFFT, sp, ap)
system_cmd_util.world_synth(world, nFFT, sample_rate, f0, sp, ap, wav_file)
log.info("synthesize speech in " + wav_file)
def synthesis_by_world(lf0, mgc, bap, synth_dir, sample_rate):
'''
synthesize speech by world
:param lf0:
:param mgc:
:param bap:
:param synth_dir:
:param sample_rate:
:return:
'''
mcsize = 59
# set to True if synthesizing generated files
post_filtering = False
# this coefficient depends on voice
pf_coef = 1.07
alpha = fs_alpha_dict[sample_rate]
nFFTHalf = fs_nFFT_dict[sample_rate]
file_id = os.path.basename(lf0).split(".")[0]
f0a = os.path.join(synth_dir, file_id + ".f0a")
f0 = os.path.join(synth_dir, file_id + ".f0")
system_cmd_util.sptk_lf0_to_f0(sptk, lf0, f0)
if post_filtering:
### post-filtering mgc ###
mgcp = os.path.join(synth_dir, file_id + ".mgc_p")
system_cmd_util.sptk_mcpf_post_filtering_mgc(sptk, mcsize, pf_coef, mgc, mgcp)
### convert mgc to sp ###
sp_file = os.path.join(synth_dir, file_id + ".sp")
system_cmd_util.sptk_mgc_to_apsp(sptk, alpha, mcsize, nFFTHalf, mgc, sp_file)
### convert bap to bapd ###
bapd = os.path.join(synth_dir, file_id + ".bapd")
system_cmd_util.sptk_x2x_bap2bapd(sptk, bap, bapd)
# Final synthesis using WORLD
wav_file = os.path.join(synth_dir, file_id + ".wav")
system_cmd_util.world_synth(world, nFFTHalf, sample_rate, f0, sp_file, bapd, wav_file)
#########used for world vocoder #######
def read_reaper_f0_file(est_file, skiprows=7):
'''
Reads f0 track into numpy array from EST file generated by REAPER.
'''
v_f0 = np.loadtxt(est_file, skiprows=skiprows, usecols=[2])
v_f0[v_f0 < 0] = 0
return v_f0
def reaper_f0_extract(in_wavfile, f0_file_ref, f0_file_out, frame_shift_ms=5.0):
'''
Extracts f0 track using REAPER.
To keep consistency with the vocoder, it also fixes for the difference in number
of frames between the REAPER f0 track and the acoustic parameters extracted by the vocoder.
f0_file_ref: f0 extracted by the vocoder. It is used as a reference to fix the number of frames, as explained.
'''
# Run REAPER:
log.debug("Running REAPER f0 extraction...")
out_reaper = f0_file_out + "_reaper"
system_cmd_util.reaper_extract_f0(reaper, frame_shift_ms / 1000.0, in_wavfile, out_reaper)
# Protection - number of frames:
v_f0_ref = file_util.read_binfile(f0_file_ref, dim=1)
v_f0 = read_reaper_f0_file(out_reaper)
frm_diff = v_f0.size - v_f0_ref.size
if frm_diff < 0:
v_f0 = np.r_[v_f0, np.zeros(-frm_diff) + v_f0[-1]]
if frm_diff > 0:
v_f0 = v_f0[:-frm_diff]
# Save f0 file:
file_util.write_binfile(v_f0, f0_file_out)
return
wav_file = "/home/top/workspace/tts/data/CarNum/wav/N_10.wav"
sample_rate = 16000
#extract_feat_by_worldv2(wav_file, sample_rate)
lf0="/home/top/workspace/tts/data/CarNum/lf0/N_10.lf0"
mgc = "/home/top/workspace/tts/data/CarNum/mgc/N_10.mgc"
bap = "/home/top/workspace/tts/data/CarNum/bap/N_10.bap"
synth_dir = "/home/top/workspace/tts/data/CarNum/synth/"
synthesis_by_worldv2(lf0, mgc, synth_dir, sample_rate) | [
11748,
18540,
305,
919,
278,
355,
29034,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
11748,
640,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
422,
2891,
62,
43789,
13,
19726,
40715,
1330,
9195,
26791,
355,... | 2.104977 | 7,354 |
from ukpocopy import postcodes
from ukpocopy import validators
from ukpocopy import exceptions
| [
6738,
334,
74,
79,
420,
11081,
1330,
1281,
40148,
198,
6738,
334,
74,
79,
420,
11081,
1330,
4938,
2024,
198,
6738,
334,
74,
79,
420,
11081,
1330,
13269,
198
] | 3.275862 | 29 |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from typing import Callable
import torch
from .torch_const import TorchOpClassType
_TORCH_OP_ATTR_MAP = {} #Dict(str, TorchOpAttr)
| [
198,
198,
2,
198,
2,
15069,
13130,
1395,
346,
28413,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,... | 3.478673 | 211 |
"""
Having a registry of all available classes is convenient for retrieving an instance
based on a configuration at run-time.
"""
import logging
from collections import OrderedDict
from plato.config import Config
if hasattr(Config().trainer, 'use_mindspore'):
from plato.datasources.mindspore import (
mnist as mnist_mindspore, )
registered_datasources = OrderedDict([
('MNIST', mnist_mindspore),
])
registered_partitioned_datasources = OrderedDict()
elif hasattr(Config().trainer, 'use_tensorflow'):
from plato.datasources.tensorflow import (
mnist as mnist_tensorflow,
fashion_mnist as fashion_mnist_tensorflow,
)
registered_datasources = OrderedDict([('MNIST', mnist_tensorflow),
('FashionMNIST',
fashion_mnist_tensorflow)])
elif hasattr(Config.data, 'use_multimodal'):
from plato.datasources.multimodal import kinetics, gym, flickr30k_entities, referitgame
registered_datasources = OrderedDict([
('kinetics700', kinetics),
('kinetics400', kinetics),
('Gym', gym),
('Flickr30E', flickr30k_entities),
('Referitgame', referitgame),
])
registered_partitioned_datasources = OrderedDict()
else:
from plato.datasources import (
mnist,
fashion_mnist,
cifar10,
cinic10,
huggingface,
pascal_voc,
tiny_imagenet,
femnist,
feature,
)
registered_datasources = OrderedDict([('MNIST', mnist),
('FashionMNIST', fashion_mnist),
('CIFAR10', cifar10),
('CINIC10', cinic10),
('HuggingFace', huggingface),
('PASCAL_VOC', pascal_voc),
('TinyImageNet', tiny_imagenet),
('Feature', feature)])
registered_partitioned_datasources = OrderedDict([('FEMNIST', femnist)])
def get(client_id=0):
"""Get the data source with the provided name."""
datasource_name = Config().data.datasource
logging.info("Data source: %s", Config().data.datasource)
if Config().data.datasource == 'YOLO':
from plato.datasources import yolo
return yolo.DataSource()
elif datasource_name in registered_datasources:
dataset = registered_datasources[datasource_name].DataSource()
elif datasource_name in registered_partitioned_datasources:
dataset = registered_partitioned_datasources[
datasource_name].DataSource(client_id)
else:
raise ValueError('No such data source: {}'.format(datasource_name))
return dataset
def get_input_shape():
"""Get the input shape of data source with the provided name."""
datasource_name = Config().data.datasource
logging.info("Data source: %s", Config().data.datasource)
if Config().data.datasource == 'YOLO':
from plato.datasources import yolo
return yolo.DataSource.input_shape()
elif datasource_name in registered_datasources:
input_shape = registered_datasources[
datasource_name].DataSource.input_shape()
elif datasource_name in registered_partitioned_datasources:
input_shape = registered_partitioned_datasources[
datasource_name].DataSource.input_shape()
else:
raise ValueError('No such data source: {}'.format(datasource_name))
return input_shape
| [
37811,
198,
14698,
257,
20478,
286,
477,
1695,
6097,
318,
11282,
329,
50122,
281,
4554,
198,
3106,
319,
257,
8398,
379,
1057,
12,
2435,
13,
198,
37811,
198,
198,
11748,
18931,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
67... | 2.192005 | 1,651 |
'''
This project is derived from the course work
and is an extension of the course work. Due
to the source of the dataset itself, the
dataset needs to be pre-processed before it
can be called.
Author: Bruce Hou, Email: ecstayalive@163.com
'''
import scipy.io as scio
import matplotlib.pyplot as plt
import numpy as np
import os
class PreProcess:
'''
加载数据并保存成npy格式
'''
def run(self):
'''
调用接口
:return 采样数据,数据对应的标签
'''
data = self.load()
# 判断文件是否存在
files_exist = os.path.isfile('./dataset/train.npy')
if not files_exist:
print('files does not exist')
# 数据集大小
train_data = np.empty((1600, 2048))
label = np.empty((1600,))
# 转换数据格式并保存
train_data, label = self.transform(data, train_data, label)
return train_data, label
else:
print('files exist, now load')
train_data = np.load('./dataset/train.npy')
label = np.load('./dataset/label.npy')
return train_data, label
def load(self):
'''
加载数据
:return 加载的数据
'''
dataset = scio.loadmat("./dataset/lecture_data.mat")
return dataset
def transform(self, data, train_data, label):
'''
改变格式,生成数据集并保存
:param data 加载的mat数据
:param train_data 需要的数据格式和形状
:param label 数据对应的标签
:return train_data, label
'''
temp1 = np.empty((8, 4096, 80))
temp2 = np.empty((1, 4096, 160))
temp = np.empty((320, 2048))
temp1[0] = data['class0_train_normal']
temp1[1] = data['class1_train_inner']
temp1[2] = data['class2_train_outer']
temp1[3] = data['class3_train_roller']
temp1[4] = data['class4_train_crack']
temp1[5] = data['class5_train_pitting']
temp1[6] = data['class6_train_broken_tooth']
temp1[7] = data['class7_train_missing_tooth']
temp2[0] = data['test_data']
temp3 = np.load('./dataset/result.npy')
# 生成train_data和label数据集
for i in range(8):
for j in range(80):
train_data[i * 160 + 2 * j, :] = temp1[i, 0:2048, j]
train_data[i * 160 + 2 * j + 1, :] = temp1[i, 2048:4096, j]
label[i * 160 + 2 * j:i * 160 + 2 * j + 2] = i
#
for i in range(160):
temp[2 * i, :] = temp2[0, 0:2048, i]
temp[2 * i + 1, :] = temp2[0, 2048:4096, i]
for i in range(1280, 1600):
train_data[i, :] = temp[i - 1280, :]
label[i] = temp3[(i - 1280) // 2]
# 打乱训练集和标签
permutation = np.random.permutation(label.shape[0])
print(permutation)
train_data = train_data[permutation, :]
label = label[permutation]
np.save('./dataset/or_train.npy', train_data)
np.save('./dataset/or_label.npy', label)
# 对每一段序列添加噪声
for i in range(train_data.shape[0]):
train_noise = self.gen_gaussian_noise(train_data[i, :], 1)
train_data[i, :] = train_data[i, :] + train_noise
# 保存数据
np.save('./dataset/train.npy', train_data)
np.save('./dataset/label.npy', label)
return train_data, label
def gen_gaussian_noise(self, signal, SNR):
"""
:param signal: 原始信号
:param SNR: 添加噪声的信噪比
:return: 生成的噪声
"""
noise = np.random.randn(*signal.shape) # *signal.shape 获取样本序列的尺寸
# print(signal.shape)
noise = noise - np.mean(noise) # np.mean 求均值
signal_power = (1 / signal.shape[0]) * np.sum(np.power(signal, 2))
noise_variance = signal_power / np.power(10, (SNR / 10))
noise = (np.sqrt(noise_variance) / np.std(noise)) * noise
return noise
if __name__ == '__main__':
# User's code here
f = 125600
load = PreProcess()
train, label = load.run()
or_train, or_label = np.load('./dataset/or_train.npy'), np.load('./dataset/or_label.npy')
# 选取6个数据进行绘图
# 第一幅图为加入噪声后的数据
plt.figure(1)
for i in range(0, 6):
ax = plt.subplot(3, 2, i + 1)
ax.set_title(str(label[i]))
plt.plot(np.arange(2048), train[i, :])
# 第二幅图为没有加入噪声的数据
plt.figure(2)
for i in range(0, 6):
ax = plt.subplot(3, 2, i + 1)
ax.set_title(str(or_label[i]))
plt.plot(np.arange(2048), or_train[i, :])
plt.show()
| [
7061,
6,
198,
1212,
1628,
318,
10944,
422,
262,
1781,
670,
198,
392,
318,
281,
7552,
286,
262,
1781,
670,
13,
14444,
198,
1462,
262,
2723,
286,
262,
27039,
2346,
11,
262,
198,
19608,
292,
316,
2476,
284,
307,
662,
12,
14681,
276,
... | 1.701607 | 2,614 |